prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
#Import necessary packages
import numpy as np
import numpy.ma as ma
from obspy import read, Stream
from obspy.clients.nrl import NRL
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import pandas
import stretching
from obspy import UTCDateTime
import pandas as pd
import prepro as pp
import glob
from matplotlib import cm
import pickle
import ast
############################
############################
## FIND XCORR PKL FILES ####
############################
############################
station=[]
xcorr_pkldata=glob.glob("*daily-xcorr*.pickle")
locs=pd.read_csv('stationdvv.txt')
nsta=len(locs)
#climate_data=pd.read_csv("envcanada_rainfall.csv")
######################
######################
### PARAM SETTING ####
######################
######################
with open('prepro_para.txt') as f:
contents = f.read()
prepro_para = ast.literal_eval(contents)
segwin = prepro_para['segwin'] # length of segment to stack over(in seconds)
corrwin=prepro_para['corrwin'] # length of correlation window (in seconds)
freqmin=prepro_para['freqmin'] # minimum bandpass filter frequency used in prepro_pkl.py [Hz]
freqmax=prepro_para['freqmax'] # maximum bandpass filter frequency used in prepro_pkl.py [Hz]
trimtime=prepro_para['trimtime'] # trimtime ("noisy" or "quiet" set in prepro_pkl.py)
sps=500 # Sampling rate (in Hertz)
corrlen=sps*corrwin # length of correlation window (in number samples)
seglen=sps*segwin #length of window to stack correlations over (in number samples)
ncorr=int(np.floor(segwin/corrwin)) # number of correlations per segment (stack)
f.close()
#################################
#################################
### STRETCHING PARAM SETTING ####
#################################
#################################
Epsilon = .10 # Stretching between -Epsilon to +Epsilon (multiply by 100 to get the dv in %) STRETCHING LIMITS
#FOR CAUSAL t_ini=0.5 seconds
t_ini = 0.5 # Time to start computing the dv/v (in second). Zero is at the zero lag time
t_length = 3 # Length of the signal over which the dv/v is computed (in second)
delta=500
t_ini_d = t_ini*delta # start dv/v computation at t_ini_d/delta seconds from the signal begining
t_length_d = int(t_length*delta) # dv/v computation over t_length_d/delta seconds after t_ini_d/delta
limit=corrwin/2. # set window length in plot (-10 to 10)
timevec = np.arange(-limit, limit, 1/sps)
isrc=0 # setting source index at 0
nsrc=25 # total number of sources (stations) = 25
nrec=nsrc-1 # total number of receivers = nsrc-1 = 24
nfile=nsrc*nrec # total number of station-receiver pairs
maxdays=60 # maximum number of days collecting data over all stations (estimate)
dv_arr_unmasked=np.zeros((nfile,maxdays)) # initialize dv/v array
dv_arr=ma.masked_equal(dv_arr_unmasked,0) # mask zero values
dv_avg_unmasked=np.zeros((nsrc,maxdays)) # initialize average dv/v array. stacks dv/v for 1 source + all receivers
dv_avg=ma.masked_equal(dv_avg_unmasked,0) # mask zero values
cc_arr_unmasked=np.zeros((nfile,maxdays))
cc_arr=ma.masked_equal(cc_arr_unmasked,0)
error_arr_unmasked=np.zeros((nfile,maxdays))
error_arr=ma.masked_equal(error_arr_unmasked,0)
date = np.array('2020-06-18', dtype=np.datetime64) #Based on trimming, start date should be June 18 2021
datevec=date+ | np.arange(maxdays) | numpy.arange |
#!/usr/bin/env python
# projectS and projectC were written by <NAME>.
import time
start = time.time()
import argparse
import cv2
import os
import dlib
import numpy as np
np.set_printoptions(precision=2)
import openface
from matplotlib import cm
fileDir = os.path.dirname(os.path.realpath(__file__))
modelDir = os.path.join(fileDir, '..', 'models')
dlibModelDir = os.path.join(modelDir, 'dlib')
def getRep(bgrImg):
start = time.time()
if bgrImg is None:
raise Exception("Unable to load image/frame")
rgbImg = cv2.cvtColor(bgrImg, cv2.COLOR_BGR2RGB)
if args.verbose:
print(" + Original size: {}".format(rgbImg.shape))
if args.verbose:
print("Loading the image took {} seconds.".format(time.time() - start))
start = time.time()
# Get all bounding boxes
bb = align.getAllFaceBoundingBoxes(rgbImg)
if bb is None:
# raise Exception("Unable to find a face: {}".format(imgPath))
return None
if args.verbose:
print("Face detection took {} seconds.".format(time.time() - start))
start = time.time()
alignedFaces = []
for box in bb:
alignedFaces.append(
align.align(
args.imgDim,
rgbImg,
box,
landmarkIndices=openface.AlignDlib.OUTER_EYES_AND_NOSE))
if alignedFaces is None:
raise Exception("Unable to align the frame")
if args.verbose:
print("Alignment took {} seconds.".format(time.time() - start))
start = time.time()
reps = []
for alignedFace in alignedFaces:
reps.append(net.forward(alignedFace))
if args.verbose:
print("Neural network forward pass took {} seconds.".format(
time.time() - start))
return reps
def projectS(rho, theta, z):
p = np.array([np.sqrt(3.) * rho * (np.cos(theta) + np.sin(theta)) / 2.,
z + 1. + rho * (np.cos(theta) - np.sin(theta)) / 2.])
p += np.array([1.5, 0.5])
p /= 3.
return p
def projectC(x, y, z):
rho = | np.sqrt(x**2 + y**2) | numpy.sqrt |
'''Libraries for Prototype selection'''
import numpy as np
from sklearn.datasets import load_breast_cancer
from sklearn.datasets import load_iris
from sklearn.datasets import load_digits
import cvxpy as cvx
import math as mt
from sklearn.model_selection import KFold
import sklearn.metrics
from scipy import stats
# lambda = 1/n in original paper
class classifier():
"""Contains functions for prototype selection"""
def __init__(self, X, y, epsilon_, lambda_):
"""
Store data points as unique indexes, and initialize
the required member variables eg. epsilon, lambda,
interpoint distances, points in neighborhood
:param X: training data
:param y: training class
:param epsilon_: radius, constant
:param lambda_: weight, constant
"""
self.epsilon_ = epsilon_
self.lambda_ = lambda_
self.X_train = np.asarray(X)
self.y_train = np.asarray(y)
self.nbr_mask = None
self.alpha = np.empty(len(X))
self.xi = np.empty(len(X))
# For testing
self.probe = None
# How many dimensions are there in train data?
self.dim_num = len(self.X_train.shape)
# Calculate interpoint distances
self.dist = self.compute_dist(self.X_train)
# Create points in neighborhood
self.nbr_mask = self.dist < self.epsilon_
"""Implement modules which will be useful in the train_lp() function
for example
1) operations such as intersection, union etc of sets of datapoints
2) check for feasibility for the randomized algorithm approach
3) compute the objective value with current prototype set
4) fill in the train_lp() module which will make
use of the above modules and member variables defined by you
5) any other module that you deem fit and useful for the purpose."""
def train_lp(self, verbose=False):
"""
Implement the linear programming formulation and solve using cvxpy for prototype selection
Input:
verbose:
"""
def _union_size(self, train_sets, idx):
count = 0
idx_list = [tmp for tmp, x in enumerate(self.nbr_mask[idx] == True) if x]
for j in idx_list:
if not (j in train_sets):
count += 1
return count, idx_list
# Separate into L prize-collecting set cover problems
train_sets = {}
cls_count = np.unique(self.y_train)
# cls_num = len(cls_count)
for cls in cls_count:
# keys in train_sets are indeices of the training data of the same class
train_sets[cls] = [i for i, x in enumerate(self.y_train == cls) if x]
# Initialize variables/constants
alpha_ = cvx.Variable(self.dim_num)
xi_ = cvx.Variable(self.dim_num)
Cl = np.zeros(alpha_.shape)
# Set up obj and constraints
cons = [0 <= alpha_, alpha_ <= 1, 0 <= xi_]
for true_idx, ref_idx in enumerate(train_sets[cls]):
tmp, circle_idx = _union_size(self, train_sets[cls], ref_idx)
Cl[ref_idx] = self.lambda_ + tmp
nbr_list = np.zeros(alpha_.shape)
nbr_list[circle_idx] = 1
cons += [1 - xi_[ref_idx] <= sum(np.asarray(nbr_list) * alpha_)]
obj = cvx.Minimize(sum(Cl * alpha_) + sum(xi_))
# Solve for class "cls"
prob_cls = cvx.Problem(obj, cons)
prob_cls.solve()
alpha_.value[alpha_.value > 1] = 1
xi_.value[xi_.value < 0] = 0
# Bernoulli rounding
A_l = np.zeros(alpha_.shape)
S_i = np.zeros(xi_.shape)
flag = False
while not flag:
for t in range(int(2 * np.log(len(train_sets[cls])))):
A_tilt = | np.random.binomial(1, alpha_.value) | numpy.random.binomial |
import numpy as np
import matplotlib.pyplot as plt
from mutations import current_to_best_mutation, rand_mutation
# TODO: Visualize the steps of Differential Evolution
# TODO: Implement variants of the algorithm (have separate mutation and recombination methods in the class)
class DifferentialEvolution:
def __init__(self, f, limits, seed=None):
"""
Expects limits as list of intervals
"""
self.f = f
self.dimensions = len(limits)
self.limits = np.asarray(limits).T
self.seed = seed
self.fitness_history = None
self.generation_history = None
self.best_in_generation = None
def _rescale(self, arr):
lower, upper = self.limits
diff = | np.abs(upper - lower) | numpy.abs |
from __future__ import print_function, division
import os, sys, warnings, platform
from time import time
import numpy as np
if "PyPy" not in platform.python_implementation():
from scipy.io import loadmat, savemat
from Florence.Tensor import makezero, itemfreq, unique2d, in2d
from Florence.Utils import insensitive
from .vtk_writer import write_vtu
try:
import meshpy.triangle as triangle
has_meshpy = True
except ImportError:
has_meshpy = False
from .HigherOrderMeshing import *
from .NodeArrangement import *
from .GeometricPath import *
from warnings import warn
from copy import deepcopy
"""
Mesh class providing most of the pre-processing functionalities of the Core module
<NAME> - 13/06/2015
"""
class Mesh(object):
"""Mesh class provides the following functionalities:
1. Generating higher order meshes based on a linear mesh, for tris, tets, quads and hexes
2. Generating linear tri and tet meshes based on meshpy back-end
3. Generating linear tri meshes based on distmesh back-end
4. Finding bounary edges and faces for tris and tets, in case they are not provided by the mesh generator
5. Reading Salome meshes in binary (.dat/.txt/etc) format
6. Reading gmsh files .msh
7. Checking for node numbering order of elements and fixing it if desired
8. Writing meshes to unstructured vtk file format (.vtu) in xml and binary formats,
including high order elements
"""
def __init__(self, element_type=None):
super(Mesh, self).__init__()
# self.faces and self.edges ARE BOUNDARY FACES
# AND BOUNDARY EDGES, RESPECTIVELY
self.degree = None
self.ndim = None
self.edim = None
self.nelem = None
self.nnode = None
self.elements = None
self.points = None
self.corners = None
self.edges = None
self.faces = None
self.element_type = element_type
self.face_to_element = None
self.edge_to_element = None
self.boundary_edge_to_element = None
self.boundary_face_to_element = None
self.all_faces = None
self.all_edges = None
self.interior_faces = None
self.interior_edges = None
# TYPE OF BOUNDARY FACES/EDGES
self.boundary_element_type = None
# FOR GEOMETRICAL CURVES/SURFACES
self.edge_to_curve = None
self.face_to_surface = None
self.spatial_dimension = None
self.reader_type = None
self.reader_type_format = None
self.reader_type_version = None
self.writer_type = None
self.filename = None
# self.has_meshpy = has_meshpy
def SetElements(self,arr):
self.elements = arr
def SetPoints(self,arr):
self.points = arr
def SetEdges(self,arr):
self.edges = arr
def SetFaces(self,arr):
self.faces = arr
def GetElements(self):
return self.elements
def GetPoints(self):
return self.points
def GetEdges(self):
assert self.element_type is not None
if self.element_type == "tri":
self.GetEdgesTri()
elif self.element_type == "quad":
self.GetEdgesQuad()
elif self.element_type == "pent":
self.GetEdgesPent()
elif self.element_type == "tet":
self.GetEdgesTet()
elif self.element_type == "hex":
self.GetEdgesHex()
else:
raise ValueError('Type of element not understood')
return self.all_edges
def GetBoundaryEdges(self):
assert self.element_type is not None
if self.element_type == "tri":
self.GetBoundaryEdgesTri()
elif self.element_type == "quad":
self.GetBoundaryEdgesQuad()
elif self.element_type == "pent":
self.GetBoundaryEdgesPent()
elif self.element_type == "tet":
self.GetBoundaryEdgesTet()
elif self.element_type == "hex":
self.GetBoundaryEdgesHex()
else:
raise ValueError('Type of element not understood')
return self.edges
def GetInteriorEdges(self):
assert self.element_type is not None
if self.element_type == "tri":
self.GetInteriorEdgesTri()
elif self.element_type == "quad":
self.GetInteriorEdgesQuad()
elif self.element_type == "pent":
self.GetInteriorEdgesPent()
elif self.element_type == "tet":
self.GetInteriorEdgesTet()
elif self.element_type == "hex":
self.GetInteriorEdgesHex()
else:
raise ValueError('Type of element not understood')
return self.interior_edges
def GetFaces(self):
assert self.element_type is not None
if self.element_type == "tet":
self.GetFacesTet()
elif self.element_type == "hex":
self.GetFacesHex()
elif self.element_type=="tri" or self.element_type=="quad":
raise ValueError("2D mesh does not have faces")
else:
raise ValueError('Type of element not understood')
return self.all_faces
def GetBoundaryFaces(self):
assert self.element_type is not None
if self.element_type == "tet":
self.GetBoundaryFacesTet()
elif self.element_type == "hex":
self.GetBoundaryFacesHex()
elif self.element_type=="tri" or self.element_type=="quad":
raise ValueError("2D mesh does not have faces")
else:
raise ValueError('Type of element not understood')
return self.faces
def GetInteriorFaces(self):
assert self.element_type is not None
if self.element_type == "tet":
self.GetInteriorFacesTet()
elif self.element_type == "hex":
self.GetInteriorFacesHex()
elif self.element_type=="tri" or self.element_type=="quad":
raise ValueError("2D mesh does not have faces")
else:
raise ValueError('Type of element not understood')
return self.interior_faces
def GetElementsEdgeNumbering(self):
assert self.element_type is not None
if self.element_type == "tri":
return self.GetElementsEdgeNumberingTri()
elif self.element_type == "quad":
return self.GetElementsEdgeNumberingQuad()
else:
raise ValueError('Type of element not understood')
return self.edge_to_element
def GetElementsWithBoundaryEdges(self):
assert self.element_type is not None
if self.element_type == "tri":
return self.GetElementsWithBoundaryEdgesTri()
elif self.element_type == "quad":
return self.GetElementsWithBoundaryEdgesQuad()
else:
raise ValueError('Type of element not understood')
return self.boundary_edge_to_element
def GetElementsFaceNumbering(self):
assert self.element_type is not None
if self.element_type == "tet":
return self.GetElementsFaceNumberingTet()
elif self.element_type == "hex":
return self.GetElementsFaceNumberingHex()
elif self.element_type=="tri" or self.element_type=="quad":
raise ValueError("2D mesh does not have faces")
else:
raise ValueError('Type of element not understood')
return self.face_to_element
def GetElementsWithBoundaryFaces(self):
assert self.element_type is not None
if self.element_type == "tet":
return self.GetElementsWithBoundaryFacesTet()
elif self.element_type == "hex":
return self.GetElementsWithBoundaryFacesHex()
elif self.element_type=="tri" or self.element_type=="quad":
raise ValueError("2D mesh does not have faces")
else:
raise ValueError('Type of element not understood')
return self.boundary_face_to_element
@property
def Bounds(self):
"""Returns bounds of a mesh i.e. the minimum and maximum coordinate values
in every direction
"""
assert self.points is not None
if self.points.shape[1] == 3:
bounds = np.array([[np.min(self.points[:,0]),
np.min(self.points[:,1]),
np.min(self.points[:,2])],
[np.max(self.points[:,0]),
np.max(self.points[:,1]),
np.max(self.points[:,2])]])
makezero(bounds)
return bounds
elif self.points.shape[1] == 2:
bounds = np.array([[np.min(self.points[:,0]),
np.min(self.points[:,1])],
[np.max(self.points[:,0]),
np.max(self.points[:,1])]])
makezero(bounds)
return bounds
elif self.points.shape[1] == 1:
bounds = np.array([[np.min(self.points[:,0])],
[np.max(self.points[:,0])]])
makezero(bounds)
return bounds
else:
raise ValueError("Invalid dimension for mesh coordinates")
def GetEdgesTri(self):
"""Find all edges of a triangular mesh.
Sets all_edges property and returns it
returns:
arr: numpy ndarray of all edges"""
p = self.InferPolynomialDegree()
# DO NOT COMPUTE IF ALREADY COMPUTED
if isinstance(self.all_edges,np.ndarray):
if self.all_edges.shape[0] > 1:
# IF LINEAR VERSION IS COMPUTED, DO COMPUTE HIGHER VERSION
if self.all_edges.shape[1]==2 and p > 1:
pass
else:
return self.all_edges
node_arranger = NodeArrangementTri(p-1)[0]
# CHECK IF FACES ARE ALREADY AVAILABLE
if isinstance(self.all_edges,np.ndarray):
if self.all_edges.shape[0] > 1 and self.all_edges.shape[1] == p+1:
warn("Mesh edges seem to be already computed. I am going to recompute them")
# GET ALL EDGES FROM THE ELEMENT CONNECTIVITY
edges = np.zeros((3*self.elements.shape[0],p+1),dtype=np.uint64)
edges[:self.elements.shape[0],:] = self.elements[:,node_arranger[0,:]]
edges[self.elements.shape[0]:2*self.elements.shape[0],:] = self.elements[:,node_arranger[1,:]]
edges[2*self.elements.shape[0]:,:] = self.elements[:,node_arranger[2,:]]
# REMOVE DUPLICATES
edges, idx = unique2d(edges,consider_sort=True,order=False,return_index=True)
edge_to_element = np.zeros((edges.shape[0],2),np.int64)
edge_to_element[:,0] = idx % self.elements.shape[0]
edge_to_element[:,1] = idx // self.elements.shape[0]
self.edge_to_element = edge_to_element
# DO NOT SET all_edges IF THE CALLER FUNCTION IS GetBoundaryEdgesTet
import inspect
curframe = inspect.currentframe()
calframe = inspect.getouterframes(curframe, 2)[1][3]
if calframe != "GetBoundaryEdgesTet":
self.all_edges = edges
return edges
def GetBoundaryEdgesTri(self):
"""Find boundary edges (lines) of triangular mesh"""
p = self.InferPolynomialDegree()
# DO NOT COMPUTE IF ALREADY COMPUTED
if isinstance(self.edges,np.ndarray):
if self.edges.shape[0] > 1:
# IF LINEAR VERSION IS COMPUTED, DO COMPUTE HIGHER VERSION
if self.edges.shape[1] == 2 and p > 1:
pass
else:
return
node_arranger = NodeArrangementTri(p-1)[0]
# CONCATENATE ALL THE EDGES MADE FROM ELEMENTS
all_edges = np.concatenate((self.elements[:,node_arranger[0,:]],self.elements[:,node_arranger[1,:]],
self.elements[:,node_arranger[2,:]]),axis=0)
# GET UNIQUE ROWS
uniques, idx, inv = unique2d(all_edges,consider_sort=True,order=False,return_index=True,return_inverse=True)
# ROWS THAT APPEAR ONLY ONCE CORRESPOND TO BOUNDARY EDGES
freqs_inv = itemfreq(inv)
edges_ext_flags = freqs_inv[freqs_inv[:,1]==1,0]
# NOT ARRANGED
self.edges = uniques[edges_ext_flags,:]
# DETERMINE WHICH FACE OF THE ELEMENT THEY ARE
boundary_edge_to_element = np.zeros((edges_ext_flags.shape[0],2),dtype=np.int64)
# FURTHER RE-ARRANGEMENT / ARANGE THE NODES BASED ON THE ORDER THEY APPEAR
# IN ELEMENT CONNECTIVITY
# THIS STEP IS NOT NECESSARY INDEED - ITS JUST FOR RE-ARANGMENT OF EDGES
all_edges_in_edges = in2d(all_edges,self.edges,consider_sort=True)
all_edges_in_edges = np.where(all_edges_in_edges==True)[0]
boundary_edge_to_element[:,0] = all_edges_in_edges % self.elements.shape[0]
boundary_edge_to_element[:,1] = all_edges_in_edges // self.elements.shape[0]
# ARRANGE FOR ANY ORDER OF BASES/ELEMENTS AND ASSIGN DATA MEMBERS
self.edges = self.elements[boundary_edge_to_element[:,0][:,None],node_arranger[boundary_edge_to_element[:,1],:]]
self.edges = self.edges.astype(np.uint64)
self.boundary_edge_to_element = boundary_edge_to_element
return self.edges
def GetInteriorEdgesTri(self):
"""Computes interior edges of a triangular mesh
returns:
interior_edges ndarray of interior edges
edge_flags ndarray of edge flags: 0 for interior and 1 for boundary
"""
if not isinstance(self.all_edges,np.ndarray):
self.GetEdgesTri()
if not isinstance(self.edges,np.ndarray):
self.GetBoundaryEdgesTri()
sorted_all_edges = np.sort(self.all_edges,axis=1)
sorted_boundary_edges = np.sort(self.edges,axis=1)
x = []
for i in range(self.edges.shape[0]):
current_sorted_boundary_edge = np.tile(sorted_boundary_edges[i,:],
self.all_edges.shape[0]).reshape(self.all_edges.shape[0],self.all_edges.shape[1])
interior_edges = np.linalg.norm(current_sorted_boundary_edge - sorted_all_edges,axis=1)
pos_interior_edges = np.where(interior_edges==0)[0]
if pos_interior_edges.shape[0] != 0:
x.append(pos_interior_edges)
edge_aranger = np.arange(self.all_edges.shape[0])
edge_aranger = np.setdiff1d(edge_aranger,np.array(x)[:,0])
interior_edges = self.all_edges[edge_aranger,:]
# GET FLAGS FOR BOUNDRAY AND INTERIOR
edge_flags = np.ones(self.all_edges.shape[0],dtype=np.int64)
edge_flags[edge_aranger] = 0
self.interior_edges = interior_edges
return interior_edges, edge_flags
def GetFacesTet(self):
"""Find all faces (surfaces) in the tetrahedral mesh (boundary & interior).
Sets all_faces property and returns it
returns:
arr: numpy ndarray of all faces
"""
# DETERMINE DEGREE
p = self.InferPolynomialDegree()
# DO NOT COMPUTE IF ALREADY COMPUTED
if isinstance(self.all_faces,np.ndarray):
if self.all_faces.shape[0] > 1:
# IF LINEAR VERSION IS COMPUTED, DO COMPUTE HIGHER VERSION
if self.all_faces.shape[1] == 3 and p > 1:
pass
else:
return self.all_faces
node_arranger = NodeArrangementTet(p-1)[0]
fsize = int((p+1.)*(p+2.)/2.)
# GET ALL FACES FROM THE ELEMENT CONNECTIVITY
faces = np.zeros((4*self.elements.shape[0],fsize),dtype=np.uint64)
faces[:self.elements.shape[0],:] = self.elements[:,node_arranger[0,:]]
faces[self.elements.shape[0]:2*self.elements.shape[0],:] = self.elements[:,node_arranger[1,:]]
faces[2*self.elements.shape[0]:3*self.elements.shape[0],:] = self.elements[:,node_arranger[2,:]]
faces[3*self.elements.shape[0]:,:] = self.elements[:,node_arranger[3,:]]
# REMOVE DUPLICATES
self.all_faces, idx = unique2d(faces,consider_sort=True,order=False,return_index=True)
face_to_element = np.zeros((self.all_faces.shape[0],2),np.int64)
face_to_element[:,0] = idx % self.elements.shape[0]
face_to_element[:,1] = idx // self.elements.shape[0]
self.face_to_element = face_to_element
return self.all_faces
def GetEdgesTet(self):
"""Find all edges (lines) of tetrahedral mesh (boundary & interior)"""
p = self.InferPolynomialDegree()
# DO NOT COMPUTE IF ALREADY COMPUTED
if isinstance(self.all_edges,np.ndarray):
if self.all_edges.shape[0] > 1:
# IF LINEAR VERSION IS COMPUTED, DO COMPUTE HIGHER VERSION
if self.all_edges.shape[1] == 2 and p > 1:
pass
else:
return self.all_edges
# FIRST GET BOUNDARY FACES
if isinstance(self.all_faces,np.ndarray):
if self.all_faces.shape[0] > 1:
# IF LINEAR VERSION IS COMPUTED, DO COMPUTE HIGHER VERSION
if self.all_faces.shape[1] == 3 and p > 1:
self.GetFacesTet()
else:
self.GetFacesTet()
# BUILD A 2D MESH
tmesh = Mesh()
# tmesh = deepcopy(self)
tmesh.element_type = "tri"
tmesh.elements = self.all_faces
tmesh.nelem = tmesh.elements.shape[0]
del tmesh.faces
del tmesh.points
# COMPUTE ALL EDGES
self.all_edges = tmesh.GetEdgesTri()
return self.all_edges
def GetBoundaryFacesTet(self):
"""Find boundary faces (surfaces) of a tetrahedral mesh"""
p = self.InferPolynomialDegree()
# DO NOT COMPUTE IF ALREADY COMPUTED
if isinstance(self.faces,np.ndarray):
if self.faces.shape[0] > 1:
# IF LINEAR VERSION IS COMPUTED, DO COMPUTE HIGHER VERSION
if self.faces.shape[1] == 3 and p > 1:
pass
else:
return
node_arranger = NodeArrangementTet(p-1)[0]
# CONCATENATE ALL THE FACES MADE FROM ELEMENTS
all_faces = np.concatenate((self.elements[:,:3],self.elements[:,[0,1,3]],
self.elements[:,[0,2,3]],self.elements[:,[1,2,3]]),axis=0)
# GET UNIQUE ROWS
uniques, idx, inv = unique2d(all_faces,consider_sort=True,order=False,return_index=True,return_inverse=True)
# ROWS THAT APPEAR ONLY ONCE CORRESPOND TO BOUNDARY FACES
freqs_inv = itemfreq(inv)
faces_ext_flags = freqs_inv[freqs_inv[:,1]==1,0]
# NOT ARRANGED
self.faces = uniques[faces_ext_flags,:]
# DETERMINE WHICH FACE OF THE ELEMENT THEY ARE
boundary_face_to_element = np.zeros((faces_ext_flags.shape[0],2),dtype=np.int64)
# THE FOLLOWING WILL COMPUTE FACES BASED ON SORTING AND NOT TAKING INTO ACCOUNT
# THE ELEMENT CONNECTIVITY
# boundary_face_to_element[:,0] = np.remainder(idx[faces_ext_flags],self.elements.shape[0])
# boundary_face_to_element[:,1] = np.floor_divide(idx[faces_ext_flags],self.elements.shape[0])
# OR EQUIVALENTLY
# boundary_face_to_element[:,0] = idx[faces_ext_flags] % self.elements.shape[0]
# boundary_face_to_element[:,1] = idx[faces_ext_flags] // self.elements.shape[0]
# FURTHER RE-ARRANGEMENT / ARANGE THE NODES BASED ON THE ORDER THEY APPEAR
# IN ELEMENT CONNECTIVITY
# THIS STEP IS NOT NECESSARY INDEED - ITS JUST FOR RE-ARANGMENT OF FACES
all_faces_in_faces = in2d(all_faces,self.faces,consider_sort=True)
all_faces_in_faces = np.where(all_faces_in_faces==True)[0]
# boundary_face_to_element = np.zeros((all_faces_in_faces.shape[0],2),dtype=np.int64)
boundary_face_to_element[:,0] = all_faces_in_faces % self.elements.shape[0]
boundary_face_to_element[:,1] = all_faces_in_faces // self.elements.shape[0]
# ARRANGE FOR ANY ORDER OF BASES/ELEMENTS AND ASSIGN DATA MEMBERS
self.faces = self.elements[boundary_face_to_element[:,0][:,None],node_arranger[boundary_face_to_element[:,1],:]]
self.faces = self.faces.astype(np.uint64)
self.boundary_face_to_element = boundary_face_to_element
def GetBoundaryEdgesTet(self):
"""Find boundary edges (lines) of tetrahedral mesh.
Note that for tetrahedrals this function is more robust than Salome's default edge generator
"""
p = self.InferPolynomialDegree()
# DO NOT COMPUTE IF ALREADY COMPUTED
if isinstance(self.edges,np.ndarray):
if self.edges.shape[0] > 1:
# IF LINEAR VERSION IS COMPUTED, DO COMPUTE HIGHER VERSION
if self.edges.shape[1] == 2 and p > 1:
pass
else:
return
# FIRST GET BOUNDARY FACES
if not isinstance(self.faces,np.ndarray):
self.GetBoundaryFacesTet()
# BUILD A 2D MESH
tmesh = Mesh()
tmesh.element_type = "tri"
tmesh.elements = self.faces
tmesh.nelem = tmesh.elements.shape[0]
del tmesh.faces
del tmesh.points
# ALL THE EDGES CORRESPONDING TO THESE BOUNDARY FACES ARE BOUNDARY EDGES
self.edges = tmesh.GetEdgesTri()
def GetInteriorFacesTet(self):
"""Computes interior faces of a tetrahedral mesh
returns:
interior_faces ndarray of interior faces
face_flags 1D array of face flags: 0 for interior and 1 for boundary
"""
if not isinstance(self.all_faces,np.ndarray):
self.GetFacesTet()
if not isinstance(self.faces,np.ndarray):
self.GetBoundaryFacesTet()
face_flags = in2d(self.all_faces.astype(self.faces.dtype),self.faces,consider_sort=True)
face_flags[face_flags==True] = 1
face_flags[face_flags==False] = 0
interior_faces = self.all_faces[face_flags==False,:]
return interior_faces, face_flags
def GetInteriorEdgesTet(self):
"""Computes interior faces of a tetrahedral mesh
returns:
interior_edges ndarray of interior edges
edge_flags 1D array of edge flags: 0 for interior and 1 for boundary
"""
if not isinstance(self.all_edges,np.ndarray):
self.GetEdgesTet()
if not isinstance(self.edges,np.ndarray):
self.GetBoundaryEdgesTet()
edge_flags = in2d(self.all_edges.astype(self.edges.dtype),self.edges,consider_sort=True)
edge_flags[edge_flags==True] = 1
edge_flags[edge_flags==False] = 0
interior_edges = self.all_edges[edge_flags==False,:]
self.interior_edges = interior_edges
return interior_edges, edge_flags
def GetEdgesQuad(self):
"""Find the all edges of a quadrilateral mesh.
Sets all_edges property and returns it
returns:
arr: numpy ndarray of all edges"""
p = self.InferPolynomialDegree()
# DO NOT COMPUTE IF ALREADY COMPUTED
if isinstance(self.all_edges,np.ndarray):
if self.all_edges.shape[0] > 1:
# IF LINEAR VERSION IS COMPUTED, DO COMPUTE HIGHER VERSION
if self.all_edges.shape[1]==2 and p > 1:
pass
else:
return self.all_edges
node_arranger = NodeArrangementQuad(p-1)[0]
# GET ALL EDGES FROM THE ELEMENT CONNECTIVITY
edges = np.concatenate((self.elements[:,node_arranger[0,:]],self.elements[:,node_arranger[1,:]],
self.elements[:,node_arranger[2,:]],self.elements[:,node_arranger[3,:]]),axis=0).astype(np.uint64)
# REMOVE DUPLICATES
edges, idx = unique2d(edges,consider_sort=True,order=False,return_index=True)
edge_to_element = np.zeros((edges.shape[0],2),np.int64)
edge_to_element[:,0] = idx % self.elements.shape[0]
edge_to_element[:,1] = idx // self.elements.shape[0]
self.edge_to_element = edge_to_element
# DO NOT SET all_edges IF THE CALLER FUNCTION IS GetBoundaryEdgesHex
import inspect
curframe = inspect.currentframe()
calframe = inspect.getouterframes(curframe, 2)[1][3]
if calframe != "GetBoundaryEdgesHex":
self.all_edges = edges
return edges
def GetBoundaryEdgesQuad(self):
"""Find boundary edges (lines) of a quadrilateral mesh"""
p = self.InferPolynomialDegree()
# DO NOT COMPUTE IF ALREADY COMPUTED
if isinstance(self.edges,np.ndarray):
if self.edges.shape[0] > 1:
# IF LINEAR VERSION IS COMPUTED, DO COMPUTE HIGHER VERSION
if self.edges.shape[1] == 2 and p > 1:
pass
else:
return
node_arranger = NodeArrangementQuad(p-1)[0]
# GET ALL EDGES FROM THE ELEMENT CONNECTIVITY
all_edges = np.concatenate((self.elements[:,node_arranger[0,:]],self.elements[:,node_arranger[1,:]],
self.elements[:,node_arranger[2,:]],self.elements[:,node_arranger[3,:]]),axis=0).astype(np.uint64)
# GET UNIQUE ROWS
uniques, idx, inv = unique2d(all_edges,consider_sort=True,order=False,return_index=True,return_inverse=True)
# ROWS THAT APPEAR ONLY ONCE CORRESPOND TO BOUNDARY EDGES
freqs_inv = itemfreq(inv)
edges_ext_flags = freqs_inv[freqs_inv[:,1]==1,0]
# NOT ARRANGED
self.edges = uniques[edges_ext_flags,:]
# DETERMINE WHICH FACE OF THE ELEMENT THEY ARE
boundary_edge_to_element = np.zeros((edges_ext_flags.shape[0],2),dtype=np.int64)
# FURTHER RE-ARRANGEMENT / ARANGE THE NODES BASED ON THE ORDER THEY APPEAR
# IN ELEMENT CONNECTIVITY
# THIS STEP IS NOT NECESSARY INDEED - ITS JUST FOR RE-ARANGMENT OF EDGES
all_edges_in_edges = in2d(all_edges,self.edges,consider_sort=True)
all_edges_in_edges = np.where(all_edges_in_edges==True)[0]
boundary_edge_to_element[:,0] = all_edges_in_edges % self.elements.shape[0]
boundary_edge_to_element[:,1] = all_edges_in_edges // self.elements.shape[0]
# ARRANGE FOR ANY ORDER OF BASES/ELEMENTS AND ASSIGN DATA MEMBERS
self.edges = self.elements[boundary_edge_to_element[:,0][:,None],node_arranger[boundary_edge_to_element[:,1],:]]
self.edges = self.edges.astype(np.uint64)
self.boundary_edge_to_element = boundary_edge_to_element
return self.edges
def GetInteriorEdgesQuad(self):
"""Computes interior edges of a quadrilateral mesh
returns:
interior_faces ndarray of interior edges
edge_flags ndarray of edge flags: 0 for interior and 1 for boundary
"""
if not isinstance(self.all_edges,np.ndarray):
self.GetEdgesQuad()
if not isinstance(self.edges,np.ndarray):
self.GetBoundaryEdgesQuad()
sorted_all_edges = np.sort(self.all_edges,axis=1)
sorted_boundary_edges = np.sort(self.edges,axis=1)
x = []
for i in range(self.edges.shape[0]):
current_sorted_boundary_edge = np.tile(sorted_boundary_edges[i,:],
self.all_edges.shape[0]).reshape(self.all_edges.shape[0],self.all_edges.shape[1])
interior_edges = np.linalg.norm(current_sorted_boundary_edge - sorted_all_edges,axis=1)
pos_interior_edges = np.where(interior_edges==0)[0]
if pos_interior_edges.shape[0] != 0:
x.append(pos_interior_edges)
edge_aranger = np.arange(self.all_edges.shape[0])
edge_aranger = np.setdiff1d(edge_aranger,np.array(x)[:,0])
interior_edges = self.all_edges[edge_aranger,:]
# GET FLAGS FOR BOUNDRAY AND INTERIOR
edge_flags = np.ones(self.all_edges.shape[0],dtype=np.int64)
edge_flags[edge_aranger] = 0
self.interior_edges = interior_edges
return interior_edges, edge_flags
def GetFacesHex(self):
"""Find all faces (surfaces) in the hexahedral mesh (boundary & interior).
Sets all_faces property and returns it
returns:
arr: numpy ndarray of all faces
"""
# DETERMINE DEGREE
p = self.InferPolynomialDegree()
# DO NOT COMPUTE IF ALREADY COMPUTED
if isinstance(self.all_faces,np.ndarray):
if self.all_faces.shape[0] > 1:
# IF LINEAR VERSION IS COMPUTED, DO COMPUTE HIGHER VERSION
if self.all_faces.shape[1] == 4 and p > 1:
pass
else:
return self.all_faces
node_arranger = NodeArrangementHex(p-1)[0]
fsize = int((p+1)**3)
# GET ALL FACES FROM THE ELEMENT CONNECTIVITY
faces = np.concatenate((np.concatenate((
np.concatenate((np.concatenate((np.concatenate((self.elements[:,node_arranger[0,:]],
self.elements[:,node_arranger[1,:]]),axis=0),self.elements[:,node_arranger[2,:]]),axis=0),
self.elements[:,node_arranger[3,:]]),axis=0),self.elements[:,node_arranger[4,:]]),axis=0),
self.elements[:,node_arranger[5,:]]),axis=0).astype(np.int64)
# REMOVE DUPLICATES
self.all_faces, idx = unique2d(faces,consider_sort=True,order=False,return_index=True)
face_to_element = np.zeros((self.all_faces.shape[0],2),np.int64)
face_to_element[:,0] = idx % self.elements.shape[0]
face_to_element[:,1] = idx // self.elements.shape[0]
self.face_to_element = face_to_element
return self.all_faces
def GetEdgesHex(self):
"""Find all edges (lines) of tetrahedral mesh (boundary & interior)"""
p = self.InferPolynomialDegree()
# DO NOT COMPUTE IF ALREADY COMPUTED
if isinstance(self.all_edges,np.ndarray):
if self.all_edges.shape[0] > 1:
# IF LINEAR VERSION IS COMPUTED, DO COMPUTE HIGHER VERSION
if self.all_edges.shape[1] == 2 and p > 1:
pass
else:
return self.all_edges
# FIRST GET BOUNDARY FACES
if not isinstance(self.all_faces,np.ndarray):
self.GetFacesHex()
# BUILD A 2D MESH
tmesh = Mesh()
# tmesh = deepcopy(self)
tmesh.element_type = "quad"
tmesh.elements = self.all_faces
tmesh.nelem = tmesh.elements.shape[0]
del tmesh.faces
del tmesh.points
# COMPUTE ALL EDGES
self.all_edges = tmesh.GetEdgesQuad()
return self.all_edges
def GetBoundaryFacesHex(self):
"""Find boundary faces (surfaces) of a hexahedral mesh"""
p = self.InferPolynomialDegree()
# DO NOT COMPUTE IF ALREADY COMPUTED
if isinstance(self.faces,np.ndarray):
if self.faces.shape[0] > 1:
# IF LINEAR VERSION IS COMPUTED, DO COMPUTE HIGHER VERSION
if self.faces.shape[1] == 4 and p > 1:
pass
else:
return
node_arranger = NodeArrangementHex(p-1)[0]
# CONCATENATE ALL THE FACES MADE FROM ELEMENTS
all_faces = np.concatenate((np.concatenate((
np.concatenate((np.concatenate((np.concatenate((self.elements[:,node_arranger[0,:]],
self.elements[:,node_arranger[1,:]]),axis=0),self.elements[:,node_arranger[2,:]]),axis=0),
self.elements[:,node_arranger[3,:]]),axis=0),self.elements[:,node_arranger[4,:]]),axis=0),
self.elements[:,node_arranger[5,:]]),axis=0).astype(np.int64)
# GET UNIQUE ROWS
uniques, idx, inv = unique2d(all_faces,consider_sort=True,order=False,return_index=True,return_inverse=True)
# ROWS THAT APPEAR ONLY ONCE CORRESPOND TO BOUNDARY FACES
freqs_inv = itemfreq(inv)
faces_ext_flags = freqs_inv[freqs_inv[:,1]==1,0]
# NOT ARRANGED
self.faces = uniques[faces_ext_flags,:]
# DETERMINE WHICH FACE OF THE ELEMENT THEY ARE
boundary_face_to_element = np.zeros((faces_ext_flags.shape[0],2),dtype=np.int64)
# FURTHER RE-ARRANGEMENT / ARANGE THE NODES BASED ON THE ORDER THEY APPEAR
# IN ELEMENT CONNECTIVITY
# THIS STEP IS NOT NECESSARY INDEED - ITS JUST FOR RE-ARANGMENT OF FACES
all_faces_in_faces = in2d(all_faces,self.faces,consider_sort=True)
all_faces_in_faces = np.where(all_faces_in_faces==True)[0]
# boundary_face_to_element = np.zeros((all_faces_in_faces.shape[0],2),dtype=np.int64)
boundary_face_to_element[:,0] = all_faces_in_faces % self.elements.shape[0]
boundary_face_to_element[:,1] = all_faces_in_faces // self.elements.shape[0]
# ARRANGE FOR ANY ORDER OF BASES/ELEMENTS AND ASSIGN DATA MEMBERS
self.faces = self.elements[boundary_face_to_element[:,0][:,None],node_arranger[boundary_face_to_element[:,1],:]]
self.faces = self.faces.astype(np.uint64)
self.boundary_face_to_element = boundary_face_to_element
def GetBoundaryEdgesHex(self):
"""Find boundary edges (lines) of hexahedral mesh.
"""
p = self.InferPolynomialDegree()
# DO NOT COMPUTE IF ALREADY COMPUTED
if isinstance(self.edges,np.ndarray):
if self.edges.shape[0] > 1:
# IF LINEAR VERSION IS COMPUTED, DO COMPUTE HIGHER VERSION
if self.edges.shape[1] == 2 and p > 1:
pass
else:
return
# FIRST GET BOUNDARY FACES
if not isinstance(self.faces,np.ndarray):
self.GetBoundaryFacesHex()
# BUILD A 2D MESH
tmesh = Mesh()
tmesh.element_type = "quad"
tmesh.elements = self.faces
tmesh.nelem = tmesh.elements.shape[0]
del tmesh.faces
del tmesh.points
# ALL THE EDGES CORRESPONDING TO THESE BOUNDARY FACES ARE BOUNDARY EDGES
self.edges = tmesh.GetEdgesQuad()
def GetInteriorFacesHex(self):
"""Computes interior faces of a hexahedral mesh
returns:
interior_faces ndarray of interior faces
face_flags 1D array of face flags: 0 for interior and 1 for boundary
"""
if not isinstance(self.all_faces,np.ndarray):
self.GetFacesHex()
if not isinstance(self.faces,np.ndarray):
self.GetBoundaryFacesHex()
face_flags = in2d(self.all_faces.astype(self.faces.dtype),self.faces,consider_sort=True)
face_flags[face_flags==True] = 1
face_flags[face_flags==False] = 0
interior_faces = self.all_faces[face_flags==False,:]
return interior_faces, face_flags
def GetInteriorEdgesHex(self):
"""Computes interior faces of a hexahedral mesh
returns:
interior_edges ndarray of interior edges
edge_flags 1D array of edge flags: 0 for interior and 1 for boundary
"""
if not isinstance(self.all_edges,np.ndarray):
self.GetEdgesHex()
if not isinstance(self.edges,np.ndarray):
self.GetBoundaryEdgesHex()
edge_flags = in2d(self.all_edges.astype(self.edges.dtype),self.edges,consider_sort=True)
edge_flags[edge_flags==True] = 1
edge_flags[edge_flags==False] = 0
interior_edges = self.all_edges[edge_flags==False,:]
self.interior_edges = interior_edges
return interior_edges, edge_flags
def GetEdgesPent(self):
"""Find the all edges of a pentagonal mesh.
Sets all_edges property and returns it
returns:
arr: numpy ndarray of all edges"""
p = self.InferPolynomialDegree()
# DO NOT COMPUTE IF ALREADY COMPUTED
if isinstance(self.all_edges,np.ndarray):
if self.all_edges.shape[0] > 1:
# IF LINEAR VERSION IS COMPUTED, DO COMPUTE HIGHER VERSION
if self.all_edges.shape[1]==2 and p > 1:
pass
else:
return self.all_edges
node_arranger = np.array([
[0,1],
[1,2],
[2,3],
[3,4],
[4,0],
])
# GET ALL EDGES FROM THE ELEMENT CONNECTIVITY
edges = np.concatenate((self.elements[:,node_arranger[0,:]],self.elements[:,node_arranger[1,:]],
self.elements[:,node_arranger[2,:]],self.elements[:,node_arranger[3,:]],
self.elements[:,node_arranger[4,:]]),axis=0).astype(np.uint64)
# REMOVE DUPLICATES
edges, idx = unique2d(edges,consider_sort=True,order=False,return_index=True)
edge_to_element = np.zeros((edges.shape[0],2),np.int64)
edge_to_element[:,0] = idx % self.elements.shape[0]
edge_to_element[:,1] = idx // self.elements.shape[0]
self.edge_to_element = edge_to_element
self.all_edges = edges
return edges
def GetBoundaryEdgesPent(self):
"""Find boundary edges (lines) of a pentagonal mesh"""
p = self.InferPolynomialDegree()
# DO NOT COMPUTE IF ALREADY COMPUTED
if isinstance(self.edges,np.ndarray):
if self.edges.shape[0] > 1:
# IF LINEAR VERSION IS COMPUTED, DO COMPUTE HIGHER VERSION
if self.edges.shape[1] == 2 and p > 1:
pass
else:
return
node_arranger = np.array([
[0,1],
[1,2],
[2,3],
[3,4],
[4,0],
])
# GET ALL EDGES FROM THE ELEMENT CONNECTIVITY
all_edges = np.concatenate((self.elements[:,node_arranger[0,:]],self.elements[:,node_arranger[1,:]],
self.elements[:,node_arranger[2,:]],self.elements[:,node_arranger[3,:]],
self.elements[:,node_arranger[4,:]]),axis=0).astype(np.uint64)
# GET UNIQUE ROWS
uniques, idx, inv = unique2d(all_edges,consider_sort=True,order=False,return_index=True,return_inverse=True)
# ROWS THAT APPEAR ONLY ONCE CORRESPOND TO BOUNDARY EDGES
freqs_inv = itemfreq(inv)
edges_ext_flags = freqs_inv[freqs_inv[:,1]==1,0]
# NOT ARRANGED
self.edges = uniques[edges_ext_flags,:]
# DETERMINE WHICH FACE OF THE ELEMENT THEY ARE
boundary_edge_to_element = np.zeros((edges_ext_flags.shape[0],2),dtype=np.int64)
# FURTHER RE-ARRANGEMENT / ARANGE THE NODES BASED ON THE ORDER THEY APPEAR
# IN ELEMENT CONNECTIVITY
# THIS STEP IS NOT NECESSARY INDEED - ITS JUST FOR RE-ARANGMENT OF EDGES
all_edges_in_edges = in2d(all_edges,self.edges,consider_sort=True)
all_edges_in_edges = np.where(all_edges_in_edges==True)[0]
boundary_edge_to_element[:,0] = all_edges_in_edges % self.elements.shape[0]
boundary_edge_to_element[:,1] = all_edges_in_edges // self.elements.shape[0]
# ARRANGE FOR ANY ORDER OF BASES/ELEMENTS AND ASSIGN DATA MEMBERS
self.edges = self.elements[boundary_edge_to_element[:,0][:,None],node_arranger[boundary_edge_to_element[:,1],:]]
self.edges = self.edges.astype(np.uint64)
self.boundary_edge_to_element = boundary_edge_to_element
return self.edges
def GetInteriorEdgesPent(self):
"""Computes interior edges of a pentagonal mesh
returns:
interior_faces ndarray of interior edges
edge_flags ndarray of edge flags: 0 for interior and 1 for boundary
"""
if not isinstance(self.all_edges,np.ndarray):
self.GetEdgesPent()
if not isinstance(self.edges,np.ndarray):
self.GetBoundaryEdgesPent()
sorted_all_edges = np.sort(self.all_edges,axis=1)
sorted_boundary_edges = np.sort(self.edges,axis=1)
x = []
for i in range(self.edges.shape[0]):
current_sorted_boundary_edge = np.tile(sorted_boundary_edges[i,:],
self.all_edges.shape[0]).reshape(self.all_edges.shape[0],self.all_edges.shape[1])
interior_edges = np.linalg.norm(current_sorted_boundary_edge - sorted_all_edges,axis=1)
pos_interior_edges = np.where(interior_edges==0)[0]
if pos_interior_edges.shape[0] != 0:
x.append(pos_interior_edges)
edge_aranger = np.arange(self.all_edges.shape[0])
edge_aranger = np.setdiff1d(edge_aranger,np.array(x)[:,0])
interior_edges = self.all_edges[edge_aranger,:]
# GET FLAGS FOR BOUNDRAY AND INTERIOR
edge_flags = np.ones(self.all_edges.shape[0],dtype=np.int64)
edge_flags[edge_aranger] = 0
self.interior_edges = interior_edges
return interior_edges, edge_flags
def GetHighOrderMesh(self,p=1, silent=True, **kwargs):
"""Given a linear tri, tet, quad or hex mesh compute high order mesh based on it.
This is a static method linked to the HigherOrderMeshing module"""
if not isinstance(p,int):
raise ValueError("p must be an integer")
else:
if p < 1:
raise ValueError("Value of p={} is not acceptable. Provide p>=1.".format(p))
if self.degree is None:
self.InferPolynomialDegree()
C = p-1
if 'C' in kwargs.keys():
if kwargs['C'] != p - 1:
raise ValueError("Did not understand the specified interpolation degree of the mesh")
del kwargs['C']
# DO NOT COMPUTE IF ALREADY COMPUTED FOR THE SAME ORDER
if self.degree == None:
self.degree = self.InferPolynomialDegree()
if self.degree == p:
return
# SITUATIONS WHEN ANOTHER HIGH ORDER MESH IS REQUIRED, WITH ONE HIGH
# ORDER MESH ALREADY AVAILABLE
if self.degree != 1 and self.degree - 1 != C:
dum = self.GetLinearMesh(remap=True)
self.__dict__.update(dum.__dict__)
if not silent:
print('Generating p = '+str(C+1)+' mesh based on the linear mesh...')
t_mesh = time()
# BUILD A NEW MESH BASED ON THE LINEAR MESH
if self.element_type == 'line':
nmesh = HighOrderMeshLine(C,self,**kwargs)
if self.element_type == 'tri':
if self.edges is None:
self.GetBoundaryEdgesTri()
# nmesh = HighOrderMeshTri(C,self,**kwargs)
nmesh = HighOrderMeshTri_SEMISTABLE(C,self,**kwargs)
elif self.element_type == 'tet':
# nmesh = HighOrderMeshTet(C,self,**kwargs)
nmesh = HighOrderMeshTet_SEMISTABLE(C,self,**kwargs)
elif self.element_type == 'quad':
if self.edges is None:
self.GetBoundaryEdgesQuad()
nmesh = HighOrderMeshQuad(C,self,**kwargs)
elif self.element_type == 'hex':
nmesh = HighOrderMeshHex(C,self,**kwargs)
self.points = nmesh.points
self.elements = nmesh.elements.astype(np.uint64)
if isinstance(self.corners,np.ndarray):
# NOT NECESSARY BUT GENERIC
self.corners = nmesh.corners.astype(np.uint64)
if isinstance(self.edges,np.ndarray):
self.edges = nmesh.edges.astype(np.uint64)
if isinstance(self.faces,np.ndarray):
if isinstance(nmesh.faces,np.ndarray):
self.faces = nmesh.faces.astype(np.uint64)
self.nelem = nmesh.nelem
self.nnode = self.points.shape[0]
self.element_type = nmesh.info
self.degree = C+1
self.ChangeType()
if not silent:
print('Finished generating the high order mesh. Time taken', time()-t_mesh,'sec')
def EdgeLengths(self,which_edges='boundary'):
"""Computes length of edges, for 2D and 3D meshes
which_edges: [str] 'boundary' for boundary edges only
and 'all' for all edges
"""
assert self.points is not None
assert self.element_type is not None
lengths = None
if which_edges == 'boundary':
if self.edges is None:
self.GetBoundaryEdges()
edge_coords = self.points[self.edges[:,:2],:]
lengths = np.linalg.norm(edge_coords[:,1,:] - edge_coords[:,0,:],axis=1)
elif which_edges == 'all':
if self.all_edges is None:
self.GetEdges()
edge_coords = self.points[self.all_edges[:,:2],:]
lengths = np.linalg.norm(edge_coords[:,1,:] - edge_coords[:,0,:],axis=1)
return lengths
def Lengths(self,):
"""Computes length of all types of elements
"""
self.__do_essential_memebers_exist__()
if self.element_type == "line":
coords = self.points[self.elements[:,:2],:]
lengths = np.linalg.norm(coords[:,1,:] - coords[:,0,:],axis=1)
else:
self.GetEdges()
coord = self.all_edges
coords = self.points[self.elements[:,:2],:]
lengths = np.linalg.norm(coords[:,1,:] - coords[:,0,:],axis=1)
return lengths
def Areas(self, with_sign=False, gpoints=None):
"""Find areas of all 2D elements [tris, quads].
For 3D elements returns surface areas of all faces
input:
with_sign: [str] compute with/without sign
gpoints: [ndarray] given coordinates to use instead of
self.points
returns: 1D array of nelem x 1 containing areas
"""
assert self.elements is not None
assert self.element_type is not None
if gpoints is None:
assert self.points is not None
gpoints = self.points
if self.element_type == "tri":
points = np.ones((gpoints.shape[0],3),dtype=np.float64)
points[:,:2] = gpoints
# FIND AREAS OF ALL THE ELEMENTS
area = 0.5*np.linalg.det(points[self.elements[:,:3],:])
elif self.element_type == "quad":
# NODE ORDERING IS IRRELEVANT, AS IT IS THESE AREAS
# WHICH DETERMINE NODE ORDERING
# AREA OF QUAD ABCD = AREA OF ABC + AREA OF ACD
points = np.ones((gpoints.shape[0],3),dtype=np.float64)
points[:,:2] = gpoints
# FIND AREAS ABC
area0 = np.linalg.det(points[self.elements[:,:3],:])
# FIND AREAS ACD
area1 = np.linalg.det(points[self.elements[:,[0,2,3]],:])
# FIND AREAS OF ALL THE ELEMENTS
area = 0.5*(area0+area1)
elif self.element_type == "tet":
# GET ALL THE FACES
faces = self.GetFacesTet()
points = np.ones((gpoints.shape[0],3),dtype=np.float64)
points[:,:2]=gpoints[:,:2]
area0 = np.linalg.det(points[faces[:,:3],:])
points[:,:2]=gpoints[:,[2,0]]
area1 = np.linalg.det(points[faces[:,:3],:])
points[:,:2]=gpoints[:,[1,2]]
area2 = np.linalg.det(points[faces[:,:3],:])
area = 0.5*np.linalg.norm(area0+area1+area2)
elif self.element_type == "hex":
from Florence.Tensor import unique2d
C = self.InferPolynomialDegree() - 1
area = 0
node_arranger = NodeArrangementHex(C)[0]
for i in range(node_arranger.shape[0]):
# print node_arranger[i,:]
# AREA OF FACES
points = np.ones((gpoints.shape[0],3),dtype=np.float64)
if i==0 or i==1:
points[:,:2] = gpoints[:,:2]
elif i==2 or i==3:
points[:,:2] = gpoints[:,[0,2]]
elif i==4 or i==5:
points[:,:2] = gpoints[:,1:]
# FIND AREAS ABC
area0 = np.linalg.det(points[self.elements[:,node_arranger[i,:3]],:])
# FIND AREAS ACD
area1 = np.linalg.det(points[self.elements[:,node_arranger[i,1:]],:])
# FIND AREAS OF ALL THE ELEMENTS
area += 0.5*np.linalg.norm(area0+area1)
# print area
raise ValueError('Hex areas implementation requires further checks')
else:
raise NotImplementedError("Computing areas for", self.element_type, "elements not implemented yet")
if with_sign is False:
if self.element_type == "tri" or self.element_type == "quad":
area = np.abs(area)
elif self.element_type == "tet":
raise NotImplementedError('Numbering order of tetrahedral faces could not be determined')
return area
def Volumes(self, with_sign=False, gpoints=None):
"""Find Volumes of all 3D elements [tets, hexes]
input:
with_sign: [str] compute with/without sign
gpoints: [ndarray] given coordinates to use instead of
self.points
returns: 1D array of nelem x 1 containing volumes
"""
assert self.elements is not None
assert self.element_type is not None
if self.points.shape[1] == 2:
raise ValueError("2D mesh does not have volume")
if gpoints is None:
assert self.points is not None
gpoints = self.points
if self.element_type == "tet":
a = gpoints[self.elements[:,0],:]
b = gpoints[self.elements[:,1],:]
c = gpoints[self.elements[:,2],:]
d = gpoints[self.elements[:,3],:]
det_array = np.dstack((a-d,b-d,c-d))
# FIND VOLUME OF ALL THE ELEMENTS
volume = 1./6.*np.linalg.det(det_array)
elif self.element_type == "hex":
# Refer: https://en.wikipedia.org/wiki/Parallelepiped
a = gpoints[self.elements[:,0],:]
b = gpoints[self.elements[:,1],:]
c = gpoints[self.elements[:,3],:]
d = gpoints[self.elements[:,4],:]
det_array = np.dstack((b-a,c-a,d-a))
# FIND VOLUME OF ALL THE ELEMENTS
volume = np.linalg.det(det_array)
else:
raise NotImplementedError("Computing volumes for", self.element_type, "elements not implemented yet")
if with_sign is False:
volume = np.abs(volume)
return volume
def Sizes(self, with_sign=False):
"""Computes the size of elements for all element types.
This is a generic method that for 1D=lengths, for 2D=areas and for 3D=volumes.
It works for planar and curved elements
"""
self.__do_essential_memebers_exist__()
try:
from Florence import DisplacementFormulation
except ImportError:
raise ValueError("This functionality requires Florence's support")
if self.element_type != "line":
# FOR LINE ELEMENTS THIS APPROACH DOES NOT WORK AS JACOBIAN IS NOT WELL DEFINED
formulation = DisplacementFormulation(self)
sizes = np.zeros(self.nelem)
if not with_sign:
for elem in range(self.nelem):
LagrangeElemCoords = self.points[self.elements[elem,:],:]
sizes[elem] = formulation.GetVolume(formulation.function_spaces[0],
LagrangeElemCoords, LagrangeElemCoords, False, elem=elem)
else:
for elem in range(self.nelem):
LagrangeElemCoords = self.points[self.elements[elem,:],:]
sizes[elem] = formulation.GetSignedVolume(formulation.function_spaces[0],
LagrangeElemCoords, LagrangeElemCoords, False, elem=elem)
return sizes
else:
warn("Sizes of line elements could be incorrect if the mesh is curvilinear")
return self.Lengths()
def AspectRatios(self,algorithm='edge_based'):
"""Compute aspect ratio of the mesh element-by-element.
For 2D meshes aspect ratio is aspect ratio is defined as
the ratio of maximum edge length to minimum edge length.
For 3D meshes aspect ratio can be either length or area based.
input:
algorithm: [str] 'edge_based' or 'face_based'
returns:
aspect_ratio: [1D array] of size (self.nelem) containing aspect ratio of elements
"""
assert self.points is not None
assert self.element_type is not None
aspect_ratio = None
if algorithm == 'edge_based':
if self.element_type == "tri":
edge_coords = self.points[self.elements[:,:3],:]
AB = np.linalg.norm(edge_coords[:,1,:] - edge_coords[:,0,:],axis=1)
AC = np.linalg.norm(edge_coords[:,2,:] - edge_coords[:,0,:],axis=1)
BC = np.linalg.norm(edge_coords[:,2,:] - edge_coords[:,1,:],axis=1)
minimum = np.minimum(np.minimum(AB,AC),BC)
maximum = np.maximum(np.maximum(AB,AC),BC)
aspect_ratio = 1.0*maximum/minimum
elif self.element_type == "quad":
edge_coords = self.points[self.elements[:,:4],:]
AB = np.linalg.norm(edge_coords[:,1,:] - edge_coords[:,0,:],axis=1)
BC = np.linalg.norm(edge_coords[:,2,:] - edge_coords[:,1,:],axis=1)
CD = np.linalg.norm(edge_coords[:,3,:] - edge_coords[:,2,:],axis=1)
DA = np.linalg.norm(edge_coords[:,0,:] - edge_coords[:,3,:],axis=1)
minimum = np.minimum(np.minimum(np.minimum(AB,BC),CD),DA)
maximum = np.maximum(np.maximum(np.maximum(AB,BC),CD),DA)
aspect_ratio = 1.0*maximum/minimum
elif self.element_type == "tet":
edge_coords = self.points[self.elements[:,:4],:]
AB = np.linalg.norm(edge_coords[:,1,:] - edge_coords[:,0,:],axis=1)
AC = np.linalg.norm(edge_coords[:,2,:] - edge_coords[:,0,:],axis=1)
AD = np.linalg.norm(edge_coords[:,3,:] - edge_coords[:,0,:],axis=1)
BC = np.linalg.norm(edge_coords[:,2,:] - edge_coords[:,1,:],axis=1)
BD = np.linalg.norm(edge_coords[:,3,:] - edge_coords[:,1,:],axis=1)
CD = np.linalg.norm(edge_coords[:,3,:] - edge_coords[:,2,:],axis=1)
minimum = np.minimum(np.minimum(np.minimum(np.minimum(np.minimum(AB,AC),AD),BC),BD),CD)
maximum = np.maximum(np.maximum(np.maximum(np.maximum(np.maximum(AB,AC),AD),BC),BD),CD)
aspect_ratio = 1.0*maximum/minimum
elif self.element_type == "hex":
edge_coords = self.points[self.elements[:,:8],:]
AB = np.linalg.norm(edge_coords[:,1,:] - edge_coords[:,0,:],axis=1)
BC = np.linalg.norm(edge_coords[:,2,:] - edge_coords[:,1,:],axis=1)
CD = np.linalg.norm(edge_coords[:,3,:] - edge_coords[:,2,:],axis=1)
DA = np.linalg.norm(edge_coords[:,0,:] - edge_coords[:,3,:],axis=1)
minimum0 = np.minimum(np.minimum(np.minimum(AB,BC),CD),DA)
maximum0 = np.maximum(np.maximum(np.maximum(AB,BC),CD),DA)
AB = np.linalg.norm(edge_coords[:,5,:] - edge_coords[:,4,:],axis=1)
BC = np.linalg.norm(edge_coords[:,6,:] - edge_coords[:,5,:],axis=1)
CD = np.linalg.norm(edge_coords[:,7,:] - edge_coords[:,6,:],axis=1)
DA = np.linalg.norm(edge_coords[:,4,:] - edge_coords[:,7,:],axis=1)
minimum1 = np.minimum(np.minimum(np.minimum(AB,BC),CD),DA)
maximum1 = np.maximum(np.maximum(np.maximum(AB,BC),CD),DA)
AB = np.linalg.norm(edge_coords[:,4,:] - edge_coords[:,0,:],axis=1)
BC = np.linalg.norm(edge_coords[:,5,:] - edge_coords[:,1,:],axis=1)
CD = np.linalg.norm(edge_coords[:,6,:] - edge_coords[:,2,:],axis=1)
DA = np.linalg.norm(edge_coords[:,7,:] - edge_coords[:,3,:],axis=1)
minimum2 = np.minimum(np.minimum(np.minimum(AB,BC),CD),DA)
maximum2 = np.maximum(np.maximum(np.maximum(AB,BC),CD),DA)
minimum = np.minimum(minimum0,np.minimum(minimum1,minimum2))
maximum = np.maximum(maximum0,np.maximum(maximum1,maximum2))
aspect_ratio = 1.0*maximum/minimum
elif self.element_type == "line":
raise ValueError("Line elments do no have aspect ratio")
elif algorithm == 'face_based':
raise NotImplementedError("Face/area based aspect ratio is not implemented yet")
return aspect_ratio
def FaceNormals(self):
"""Computes outward unit normals on faces.
This is a generic method for all element types apart from lines. If the mesh is in 2D plane
then the unit outward normals will point in Z direction. If the mesh is quad or tri type but
in 3D plane, this will still compute the correct unit outward normals. outwardness can only
be guaranteed for volume meshes.
This method is different from the method self.Normals() as the latter can compute normals
for 1D/2D elements in-plane
"""
self.__do_memebers_exist__()
points = np.copy(self.points)
if points.shape[1] < 3:
dum = np.zeros((points.shape[0],3))
dum[:,:points.shape[1]] = points
points = dum
if self.element_type == "tet" or self.element_type == "hex":
faces = self.faces
elif self.element_type == "tri" or self.element_type == "quad":
faces = self.elements
else:
raise ValueError("Cannot compute face normals on {}".format(self.element_type))
face_coords = self.points[faces[:,:3],:]
p1p0 = face_coords[:,1,:] - face_coords[:,0,:]
p2p0 = face_coords[:,2,:] - face_coords[:,0,:]
normals = np.cross(p1p0,p2p0)
norm_normals = np.linalg.norm(normals,axis=1)
normals[:,0] /= norm_normals
normals[:,1] /= norm_normals
normals[:,2] /= norm_normals
# CHECK IF THE NORMAL IS OUTWARD - FOR LINES DIRECTIONALITY DOES NOT MATTER
if self.element_type == "tet" or self.element_type == "hex":
self.GetElementsWithBoundaryFaces()
meds = self.Medians()
face_element_meds = meds[self.boundary_face_to_element[:,0],:]
p1pm = face_coords[:,1,:] - face_element_meds
# IF THE DOT PROUCT OF NORMALS AND EDGE-MED NODE VECTOR IS NEGATIVE THEN FLIP
_check = np.einsum("ij,ij->i",normals,p1pm)
normals[np.less(_check,0.)] = -normals[np.less(_check,0.)]
return normals
def Normals(self, show_plot=False):
"""Computes unit outward normals to the boundary for all element types.
Unity and outwardness are guaranteed
"""
self.__do_memebers_exist__()
ndim = self.InferSpatialDimension()
if self.element_type == "tet" or self.element_type == "hex":
normals = self.FaceNormals()
elif self.element_type == "tri" or self.element_type == "quad" or self.element_type == "line":
if self.points.shape[1] == 3:
normals = self.FaceNormals()
else:
if self.element_type == "tri" or self.element_type == "quad":
edges = self.edges
elif self.element_type == "line":
edges = self.elements
edge_coords = self.points[edges[:,:2],:]
p1p0 = edge_coords[:,1,:] - edge_coords[:,0,:]
normals = np.zeros_like(p1p0)
normals[:,0] = -p1p0[:,1]
normals[:,1] = p1p0[:,0]
norm_normals = np.linalg.norm(normals,axis=1)
normals[:,0] /= norm_normals
normals[:,1] /= norm_normals
# CHECK IF THE NORMAL IS OUTWARD - FOR LINES DIRECTIONALITY DOES NOT MATTER
if self.element_type == "tri" or self.element_type == "quad":
self.GetElementsWithBoundaryEdges()
meds = self.Medians()
edge_element_meds = meds[self.boundary_edge_to_element[:,0],:]
p1pm = edge_coords[:,1,:] - edge_element_meds
# IF THE DOT PROUCT OF NORMALS AND EDGE-MED NODE VECTOR IS NEGATIVE THEN FLIP
_check = np.einsum("ij,ij->i",normals,p1pm)
normals[np.less(_check,0.)] = -normals[np.less(_check,0.)]
if show_plot:
if ndim == 2:
mid_edge_coords = 0.5*(edge_coords[:,1,:] + edge_coords[:,0,:])
import matplotlib.pyplot as plt
figure = plt.figure()
self.SimplePlot(figure=figure, show_plot=False)
q = plt.quiver(mid_edge_coords[:,0], mid_edge_coords[:,1],
normals[:,0], normals[:,1],
color='Teal', headlength=5, width=0.004)
plt.axis('equal')
plt.axis('off')
plt.tight_layout()
plt.show()
elif ndim == 3:
mid_face_coords = np.sum(self.points[self.faces,:3],axis=1)/self.faces.shape[1]
import os
os.environ['ETS_TOOLKIT'] = 'qt4'
from mayavi import mlab
figure = mlab.figure(bgcolor=(1,1,1),fgcolor=(1,1,1),size=(1000,800))
self.SimplePlot(figure=figure, show_plot=False)
mlab.quiver3d(mid_face_coords[:,0], mid_face_coords[:,1], mid_face_coords[:,2],
normals[:,0], normals[:,1], normals[:,2],
color=(0.,128./255,128./255),line_width=2)
mlab.show()
return normals
def Angles(self, degrees=True):
"""Compute angles of 2D meshes. Strictly 2D meshes and linear elements.
If the mesh is curved the angles would be inaccurate
input:
degrees [bool] if True returns angles in degrees
otherwise in radians
returns:
angles [2D array] of angles per element. Angles are
computed per element so every element will
have as many angles as it's nodes
"""
self.__do_essential_memebers_exist__()
if self.InferElementalDimension() != 2:
raise ValueError("Angles can be computed only for 2D elements")
if self.InferSpatialDimension() != 2:
raise ValueError("Angles can be computed only in 2-dimensional plane")
nodeperelem = self.InferNumberOfNodesPerLinearElement()
angles = np.zeros((self.nelem, nodeperelem))
norm = lambda x: np.linalg.norm(x,axis=1)
edge_coords = self.points[self.elements[:,:],:]
if self.element_type == "tri":
AB = edge_coords[:,1,:] - edge_coords[:,0,:]
AC = edge_coords[:,2,:] - edge_coords[:,0,:]
BC = edge_coords[:,2,:] - edge_coords[:,1,:]
angles[:,0] = np.einsum("ij,ij->i",AB,AC) / (norm(AB)*norm(AC))
angles[:,1] = np.einsum("ij,ij->i",AC,BC) / (norm(AC)*norm(BC))
angles[:,2] = np.einsum("ij,ij->i",BC,-AB)/ (norm(BC)*norm(AB))
angles = np.arccos(angles)
elif self.element_type == "quad":
AB = edge_coords[:,1,:] - edge_coords[:,0,:]
BC = edge_coords[:,2,:] - edge_coords[:,1,:]
CD = edge_coords[:,3,:] - edge_coords[:,2,:]
DA = edge_coords[:,0,:] - edge_coords[:,3,:]
angles[:,0] = np.einsum("ij,ij->i",AB,BC) / (norm(AB)*norm(BC))
angles[:,1] = np.einsum("ij,ij->i",BC,CD) / (norm(BC)*norm(CD))
angles[:,2] = np.einsum("ij,ij->i",CD,DA) / (norm(CD)*norm(DA))
angles[:,3] = np.einsum("ij,ij->i",DA,-AB)/ (norm(DA)*norm(AB))
angles = np.arccos(angles)
if degrees:
angles *= 180/np.pi
return angles
def BoundingBoxes(self, show_plot=False, figure=None):
"""Computes a bounding box for every element.
This method complements the Bounds method/property in that it computes
the bounds for every individual element
returns:
bboxes [3D array] of nelem x ndim x ndim of bounding
boxes for every element
"""
self.__do_essential_memebers_exist__()
ndim = self.InferSpatialDimension()
all_elem_coords = self.points[self.elements]
mins = all_elem_coords.min(axis=1)
maxs = all_elem_coords.max(axis=1)
bboxes = np.zeros((2*self.nelem,self.points.shape[1]))
bboxes[::2] = mins
bboxes[1::2] = maxs
bboxes = bboxes.reshape(self.nelem,2,self.points.shape[1])
if show_plot:
if ndim == 3:
point_generator = lambda bbox: np.array([
[ bbox[0,0], bbox[0,1], bbox[0,2] ],
[ bbox[1,0], bbox[0,1], bbox[0,2] ],
[ bbox[1,0], bbox[1,1], bbox[0,2] ],
[ bbox[0,0], bbox[1,1], bbox[0,2] ],
[ bbox[0,0], bbox[0,1], bbox[1,2] ],
[ bbox[1,0], bbox[0,1], bbox[1,2] ],
[ bbox[1,0], bbox[1,1], bbox[1,2] ],
[ bbox[0,0], bbox[1,1], bbox[1,2] ]
])
elif ndim == 2:
point_generator = lambda bbox: np.array([
[ bbox[0,0], bbox[0,1] ],
[ bbox[1,0], bbox[0,1] ],
[ bbox[1,0], bbox[1,1] ],
[ bbox[0,0], bbox[1,1] ]
])
nsize = 4 if ndim ==2 else 8
ranger = np.arange(nsize)
bmesh = Mesh()
bmesh.element_type = "quad" if ndim ==2 else "hex"
bmesh.elements = np.arange(self.nelem*nsize).reshape(self.nelem,nsize)
bmesh.points = np.zeros((self.nelem*nsize,ndim))
bmesh.nelem = self.nelem
bmesh.nnode = bmesh.points.shape[0]
for i in range(0,self.nelem):
bmesh.points[i*nsize:(i+1)*nsize,:] = point_generator(bboxes[i])
if ndim == 2:
import matplotlib.pyplot as plt
if figure is None:
figure = plt.figure()
self.SimplePlot(figure=figure, show_plot=False)
bmesh.SimplePlot(figure=figure, show_plot=False, edge_color='r')
plt.show()
else:
import os
os.environ['ETS_TOOLKIT'] = 'qt4'
from mayavi import mlab
if figure is None:
figure = mlab.figure(bgcolor=(1,1,1),fgcolor=(1,1,1),size=(1000,800))
self.SimplePlot(figure=figure, show_plot=False)
bmesh.SimplePlot(figure=figure, show_plot=False, plot_faces=False, edge_color='r')
mlab.show()
return bboxes
def Medians(self, geometric=True):
"""Computes median of the elements tri, tet, quad, hex based on the interpolation function
input:
geometric [Bool] geometrically computes median without relying on FEM bases
retruns:
median: [ndarray] of median of elements
bases_at_median: [1D array] of (p=1) bases at median
"""
self.__do_essential_memebers_exist__()
median = None
if geometric == True:
median = np.sum(self.points[self.elements,:],axis=1)/self.elements.shape[1]
return median
else:
try:
from Florence.FunctionSpace import Tri, Tet
from Florence.QuadratureRules import FeketePointsTri, FeketePointsTet
except ImportError:
raise ImportError("This functionality requires florence's support")
if self.element_type == "tri":
eps = FeketePointsTri(2)
middle_point_isoparametric = eps[6,:]
if not np.isclose(sum(middle_point_isoparametric),-0.6666666):
raise ValueError("Median of triangle does not match [-0.3333,-0.3333]. "
"Did you change your nodal spacing or interpolation functions?")
hpBases = Tri.hpNodal.hpBases
bases_for_middle_point = hpBases(0,middle_point_isoparametric[0],
middle_point_isoparametric[1])[0]
median = np.einsum('ijk,j',self.points[self.elements[:,:3],:],bases_for_middle_point)
elif self.element_type == "tet":
middle_point_isoparametric = FeketePointsTet(3)[21]
if not np.isclose(sum(middle_point_isoparametric),-1.5):
raise ValueError("Median of tetrahedral does not match [-0.5,-0.5,-0.5]. "
"Did you change your nodal spacing or interpolation functions?")
# C = self.InferPolynomialDegree() - 1
hpBases = Tet.hpNodal.hpBases
bases_for_middle_point = hpBases(0,middle_point_isoparametric[0],
middle_point_isoparametric[1],middle_point_isoparametric[2])[0]
median = np.einsum('ijk,j',self.points[self.elements[:,:4],:],bases_for_middle_point)
else:
raise NotImplementedError('Median for {} elements not implemented yet'.format(self.element_type))
return median, bases_for_middle_point
def FindElementContainingPoint(self, point, algorithm="fem", find_parametric_coordinate=True,
scaling_factor=5., tolerance=1.0e-7, maxiter=20, use_simple_bases=False, return_on_geometric_finds=False,
initial_guess=None, initial_guesses=None, restart=False):
"""Find which element does a point lie in using specificed algorithm.
The FEM isoparametric coordinate of the point is returned as well.
If the isoparametric coordinate of the point is not required, issue find_parametric_coordinate=False
input:
point: [tuple] XYZ of enquiry point
algorithm: [str] either 'fem' or 'geometric'. The 'fem' algorithm uses k-d tree
search to get the right bounding box around as few elements as possible.
The size of the box can be specified by the user through the keyword scaling_factor.
The geometric algorithm is a lot more stable and converges much quicker.
The geomtric algorithm first identifies the right element using volume check,
then tries all possible combination of initial guesses to get the FEM
isoparametric point. Trying all possible combination with FEM can be potentially
more costly since bounding box size can be large.
return_on_geometric_finds:
[bool] if geometric algorithm is chosen and this option is on, then it returns
the indices of elements as soon as the volume check and no further checks are
done. This is useful for situations when searching for points that are meant to
be in the interior of the elements rather than at the boundaries or nodes
otherwise the number of elements returned by geometric algorithm is going to be
more than one
return:
element_index [int/1D array of ints] element(s) containing the point.
If the point is shared between many elements a 1D array is returned
iso_parametric_point [1D array] the parametric coordinate of the point within the element.
return only if find_parametric_coordinate=True
"""
if restart:
if initial_guesses is None:
if self.element_type == "pent":
initial_guesses = np.array([
[0.,0.],
[1.,0.],
[1.,0.5],
[0.5,1.],
[0.,1.],
])
else:
raise ValueError("restart option for this element type is only supported if initial_guesses are available")
for i in range(initial_guesses.shape[0]):
ret_val = self.FindElementContainingPoint(point, algorithm=algorithm,
find_parametric_coordinate=find_parametric_coordinate,
scaling_factor=scaling_factor, tolerance=tolerance, maxiter=maxiter,
use_simple_bases=use_simple_bases, return_on_geometric_finds=return_on_geometric_finds,
initial_guess=initial_guesses[i,:], restart=False)
if ret_val[1] is not None:
break
return ret_val
self.__do_essential_memebers_exist__()
C = self.InferPolynomialDegree() - 1
if C > 0:
warn("Note that finding a point within higher order curved mesh is not supported yet")
if C > 0 and algorithm == "geometric":
warn("High order meshes are not supported using geometric algorithim. I am going to operate on linear mesh")
if use_simple_bases:
raise ValueError("Simple bases for high order elements are not available")
return
ndim = self.InferSpatialDimension()
assert len(point) == ndim
from Florence.FunctionSpace import PointInversionIsoparametricFEM
candidate_element, candidate_piso = None, None
if self.element_type == "tet" and algorithm == "fem":
algorithm = "geometric"
if algorithm == "fem":
scaling_factor = float(scaling_factor)
max_h = self.EdgeLengths().max()
# max_h=1.
# FOR CURVED ELEMENTS
# max_h = self.LargestSegment().max()
# GET A BOUNDING BOX AROUND THE POINT, n TIMES LARGER THAN MAXIMUM h, WHERE n is the SCALING FACTOR
if ndim==3:
bounding_box = (point[0]-scaling_factor*max_h,
point[1]-scaling_factor*max_h,
point[2]-scaling_factor*max_h,
point[0]+scaling_factor*max_h,
point[1]+scaling_factor*max_h,
point[2]+scaling_factor*max_h)
elif ndim==2:
bounding_box = (point[0]-scaling_factor*max_h,
point[1]-scaling_factor*max_h,
point[0]+scaling_factor*max_h,
point[1]+scaling_factor*max_h)
# SELECT ELEMENTS ONLY WITHIN THE BOUNDING BOX
mesh = deepcopy(self)
idx_kept_element = self.RemoveElements(bounding_box)[1]
if ndim==3:
for i in range(self.nelem):
coord = self.points[self.elements[i,:],:]
p_iso, converged = PointInversionIsoparametricFEM(self.element_type, C, coord, point,
tolerance=tolerance, maxiter=maxiter, verbose=True, use_simple_bases=use_simple_bases,
initial_guess=initial_guess)
if converged:
# if p_iso[0] >= -1. and p_iso[0] <=1. and \
# p_iso[1] >= -1. and p_iso[1] <=1. and \
# p_iso[2] >= -1. and p_iso[2] <=1. :
if (p_iso[0] > -1. or np.isclose(p_iso[0],-1.,rtol=tolerance)) and \
(p_iso[0] < 1. or np.isclose(p_iso[0], 1.,rtol=tolerance)) and \
(p_iso[1] > -1. or np.isclose(p_iso[1],-1.,rtol=tolerance)) and \
(p_iso[1] < 1. or np.isclose(p_iso[1],-1.,rtol=tolerance)) and \
(p_iso[2] > -1. or np.isclose(p_iso[2],-1.,rtol=tolerance)) and \
(p_iso[2] < 1. or np.isclose(p_iso[2], 1.,rtol=tolerance)) :
candidate_element, candidate_piso = i, p_iso
break
elif ndim==2:
for i in range(self.nelem):
coord = self.points[self.elements[i,:],:]
p_iso, converged = PointInversionIsoparametricFEM(self.element_type, C, coord, point,
tolerance=tolerance, maxiter=maxiter, verbose=True, use_simple_bases=use_simple_bases,
initial_guess=initial_guess)
# if p_iso[0] >= -1. and p_iso[0] <=1. and \
# p_iso[1] >= -1. and p_iso[1] <=1.:
# candidate_element, candidate_piso = i, p_iso
# break
if (p_iso[0] > -1. or np.isclose(p_iso[0],-1.,rtol=tolerance)) and \
(p_iso[0] < 1. or np.isclose(p_iso[0], 1.,rtol=tolerance)) and \
(p_iso[1] > -1. or np.isclose(p_iso[1],-1.,rtol=tolerance)) and \
(p_iso[1] < 1. or np.isclose(p_iso[1],-1.,rtol=tolerance)) :
candidate_element, candidate_piso = i, p_iso
break
self.__update__(mesh)
# print(candidate_element)
if candidate_element is not None:
candidate_element = idx_kept_element[candidate_element]
if find_parametric_coordinate:
return candidate_element, candidate_piso
else:
return candidate_element
else:
if self.element_type == "tet":
from Florence.QuadratureRules.FeketePointsTet import FeketePointsTet
initial_guesses = FeketePointsTet(C)
def GetVolTet(a0,b0,c0,d0):
det_array = np.dstack((a0-d0,b0-d0,c0-d0))
# FIND VOLUME OF ALL THE ELEMENTS
volume = 1./6.*np.abs(np.linalg.det(det_array))
return volume
a = self.points[self.elements[:,0],:]
b = self.points[self.elements[:,1],:]
c = self.points[self.elements[:,2],:]
d = self.points[self.elements[:,3],:]
o = np.tile(point,self.nelem).reshape(self.nelem,a.shape[1])
# TOTAL VOLUME
vol = self.Volumes()
# PARTS' VOLUMES
vol0 = GetVolTet(a,b,c,o)
vol1 = GetVolTet(a,b,o,d)
vol2 = GetVolTet(a,o,c,d)
vol3 = GetVolTet(o,b,c,d)
criterion_check = vol0+vol1+vol2+vol3-vol
elems = np.isclose(criterion_check,0.,rtol=tolerance)
elems_idx = np.where(elems==True)[0]
elif self.element_type == "quad":
from Florence.QuadratureRules.GaussLobattoPoints import GaussLobattoPointsQuad
initial_guesses = GaussLobattoPointsQuad(C)
def GetAreaQuad(a0,b0,c0,d0):
# AREA OF QUAD ABCD = AREA OF ABC + AREA OF ACD
a00 = np.ones((a0.shape[0],3),dtype=np.float64); a00[:,:2] = a0
b00 = np.ones((b0.shape[0],3),dtype=np.float64); b00[:,:2] = b0
c00 = np.ones((c0.shape[0],3),dtype=np.float64); c00[:,:2] = c0
d00 = np.ones((d0.shape[0],3),dtype=np.float64); d00[:,:2] = d0
# FIND AREAS ABC
area0 = np.abs(np.linalg.det(np.dstack((a00,b00,c00))))
# FIND AREAS ACD
area1 = np.abs(np.linalg.det(np.dstack((a00,c00,d00))))
# FIND AREAS OF ALL THE ELEMENTS
area = 0.5*(area0+area1)
return area
a = self.points[self.elements[:,0],:]
b = self.points[self.elements[:,1],:]
c = self.points[self.elements[:,2],:]
d = self.points[self.elements[:,3],:]
o = np.tile(point,self.nelem).reshape(self.nelem,a.shape[1])
# TOTAL VOLUME
vol = self.Areas()
# PARTS' VOLUMES - DONT CHANGE THE ORDERING OF SPECIALLY vol1
vol0 = GetAreaQuad(o,c,b,a)
vol1 = GetAreaQuad(o,a,d,c)
criterion_check = vol0+vol1-vol
elems = np.isclose(criterion_check,0.,rtol=tolerance)
elems_idx = np.where(elems==True)[0]
else:
raise NotImplementedError("Geometric algorithm for {} elements not implemented yet".format(self.element_type))
if return_on_geometric_finds:
return elems_idx
for i in range(len(elems_idx)):
coord = self.points[self.elements[elems_idx[i],:],:]
# TRY ALL POSSIBLE INITIAL GUESSES - THIS IS CHEAP AS THE SEARCH SPACE CONTAINS ONLY A
# FEW ELEMENTS
for guess in initial_guesses:
p_iso, converged = PointInversionIsoparametricFEM(self.element_type, C, coord, point,
tolerance=tolerance, maxiter=maxiter, verbose=True,
use_simple_bases=use_simple_bases, initial_guess=guess)
if converged:
break
if converged:
candidate_element, candidate_piso = elems_idx[i], p_iso
break
if find_parametric_coordinate:
return candidate_element, candidate_piso
else:
return candidate_element
def AverageJacobian(self):
"""Computes average Jacobian of elements for all element types over a mesh
This is a generic method that for 1D=lengths, for 2D=areas and for 3D=volumes.
It works for planar and curved elements
"""
self.__do_essential_memebers_exist__()
try:
from Florence import DisplacementFormulation
except ImportError:
raise ValueError("This functionality requires Florence's support")
if self.element_type != "line":
# FOR LINE ELEMENTS THIS APPROACH DOES NOT WORK AS JACOBIAN IS NOT WELL DEFINED
formulation = DisplacementFormulation(self)
sizes = np.zeros(self.nelem)
for elem in range(self.nelem):
LagrangeElemCoords = self.points[self.elements[elem,:],:]
sizes[elem] = formulation.GetAverageJacobian(formulation.function_spaces[0],
LagrangeElemCoords, LagrangeElemCoords, False, elem=elem)
return sizes.mean()
else:
raise ValueError("Not implemented for 1D elements")
def LargestSegment(self, smallest_element=True, nsamples=30,
plot_segment=False, plot_element=False, figure=None, save=False, filename=None):
"""Finds the largest segment that can fit in an element. For curvilinear elements
this measure can be used as (h) for h-refinement studies
input:
smallest_element [bool] if the largest segment size is to be computed in the
smallest element (i.e. element with the smallest area in 2D or
smallest volume in 3D). Default is True. If False, then the largest
segment in the largest element will be computed.
nsample: [int] number of sample points along the curved
edges of the elements. The maximum distance between
all combinations of these points is the largest
segment
plot_segment: [bool] plots segment on tope of [curved/straight] mesh
plot_element: [bool] plots the straight/curved element to which the segment
belongs
figure: [an instance of matplotlib/mayavi.mlab figure for 2D/3D]
save: [bool] wether to save the figure or not
filename: [str] file name for the figure to be save
returns:
largest_segment_length [float] maximum segment length that could be fit within either the
"""
self.__do_memebers_exist__()
if self.element_type == "hex" or self.element_type == "tet":
quantity = self.Volumes()
elif self.element_type == "quad" or self.element_type == "tri":
quantity = self.Areas()
if smallest_element:
omesh = self.GetLocalisedMesh(quantity.argmin())
else:
omesh = self.GetLocalisedMesh(quantity.argmax())
try:
from Florence.PostProcessing import PostProcess
except:
raise ImportError('This function requires florence PostProcessing module')
return
if save:
if filename is None:
raise ValueError("No file name provided. I am going to write one the current directory")
filename = PWD(__file__) + "/output.png"
if self.element_type == "tri":
tmesh = PostProcess.TessellateTris(omesh,np.zeros_like(omesh.points),
plot_edges=True, interpolation_degree=nsamples)
elif self.element_type == "quad":
tmesh = PostProcess.TessellateQuads(omesh,np.zeros_like(omesh.points),
plot_edges=True, interpolation_degree=nsamples)
elif self.element_type == "tet":
tmesh = PostProcess.TessellateTets(omesh,np.zeros_like(omesh.points),
plot_edges=True, interpolation_degree=nsamples)
elif self.element_type == "hex":
tmesh = PostProcess.TessellateHexes(omesh,np.zeros_like(omesh.points),
plot_edges=True, interpolation_degree=nsamples)
ndim = omesh.InferSpatialDimension()
nnode = tmesh.points.shape[0]
largest_segment_lengths = []
nodes = np.array((1,ndim))
for i in range(nnode):
tiled_points = np.tile(tmesh.points[i,:][:,None],nnode).T
segment_lengths = np.linalg.norm(tmesh.points - tiled_points, axis=1)
largest_segment_lengths.append(segment_lengths.max())
nodes = np.vstack((nodes, np.array([i,segment_lengths.argmax()])[None,:]))
largest_segment_lengths = np.array(largest_segment_lengths)
nodes = nodes[1:,:]
largest_segment_length = largest_segment_lengths.max()
corresponding_nodes = nodes[largest_segment_lengths.argmax(),:]
if plot_segment:
segment_coords = tmesh.points[corresponding_nodes,:]
if ndim==2:
import matplotlib.pyplot as plt
if figure == None:
figure = plt.figure()
if plot_element:
if omesh.element_type == "tri":
PostProcess.CurvilinearPlotTri(omesh,
np.zeros_like(omesh.points),plot_points=True,
figure=figure, interpolation_degree=nsamples, show_plot=False)
elif omesh.element_type == "quad":
PostProcess.CurvilinearPlotQuad(omesh,
np.zeros_like(omesh.points),plot_points=True,
figure=figure, interpolation_degree=nsamples, show_plot=False)
tmesh.SimplePlot(figure=figure,show_plot=False)
if save:
plt.savefig(filename,bbox_inches="tight",dpi=300)
plt.show()
elif ndim==3:
import os
os.environ['ETS_TOOLKIT'] = 'qt4'
from mayavi import mlab
if figure is None:
figure = mlab.figure(bgcolor=(1,1,1),fgcolor=(1,1,1),size=(1000,800))
if plot_element:
if omesh.element_type == "tet":
PostProcess.CurvilinearPlotTet(omesh,
np.zeros_like(omesh.points),plot_points=True, point_radius=0.13,
figure=figure, interpolation_degree=nsamples, show_plot=False)
elif omesh.element_type == "hex":
PostProcess.CurvilinearPlotHex(omesh,
np.zeros_like(omesh.points),plot_points=True,
figure=figure, interpolation_degree=nsamples, show_plot=False)
tmesh.GetEdges()
edge_coords = tmesh.points[np.unique(tmesh.all_edges),:]
mlab.triangular_mesh(tmesh.points[:,0],tmesh.points[:,1],tmesh.points[:,2],
tmesh.elements, representation='wireframe', color=(0,0,0))
# # mlab.points3d(edge_coords[:,0],edge_coords[:,1],edge_coords[:,2],color=(1., 99/255., 71./255), scale_factor=0.03)
# # mlab.plot3d(segment_coords[:,0],segment_coords[:,1],segment_coords[:,2], color=(227./255, 66./255, 52./255))
mlab.points3d(edge_coords[:,0],edge_coords[:,1],edge_coords[:,2],color=(1., 99/255., 71./255), scale_factor=0.17)
mlab.plot3d(segment_coords[:,0],segment_coords[:,1],segment_coords[:,2],
color=(227./255, 66./255, 52./255), line_width=10., representation="wireframe")
if save:
mlab.savefig(filename,dpi=300)
mlab.show()
return largest_segment_length
def CheckNodeNumbering(self,change_order_to='retain', verbose=True):
"""Checks for node numbering order of the imported mesh. Mesh can be tri or tet
input:
change_order_to: [str] {'clockwise','anti-clockwise','retain'} changes the order to clockwise,
anti-clockwise or retains the numbering order - default is 'retain'
output:
original_order: [str] {'clockwise','anti-clockwise','retain'} returns the original numbering order"""
self.__do_essential_memebers_exist__()
# CHECK IF IT IS LINEAR MESH
nodeperelem = self.InferNumberOfNodesPerLinearElement()
assert self.elements.shape[1] == nodeperelem
quantity = np.array([])
if self.element_type == "tri":
quantity = self.Areas(with_sign=True)
elif self.element_type == "quad":
quantity = self.Areas(with_sign=True)
elif self.element_type == "tet":
quantity = self.Volumes(with_sign=True)
elif self.element_type == "hex":
quantity = self.Volumes(with_sign=True)
original_order = ''
# CHECK NUMBERING
if (quantity > 0).all():
original_order = 'anti-clockwise'
if change_order_to == 'clockwise':
self.elements = np.fliplr(self.elements)
elif (quantity < 0).all():
original_order = 'clockwise'
if change_order_to == 'anti-clockwise':
self.elements = np.fliplr(self.elements)
else:
original_order = 'mixed'
if change_order_to == 'clockwise':
self.elements[quantity>0,:] = np.fliplr(self.elements[quantity>0,:])
elif change_order_to == 'anti-clockwise':
self.elements[quantity<0,:] = np.fliplr(self.elements[quantity<0,:])
if original_order == 'anti-clockwise':
print(u'\u2713'.encode('utf8')+b' : ','Imported mesh has',original_order,'node ordering')
else:
print(u'\u2717'.encode('utf8')+b' : ','Imported mesh has',original_order,'node ordering')
return original_order
def GetElementsEdgeNumberingTri(self):
"""Finds edges of elements and their flags saying which edge they are [0,1,2].
At most a triangle can have all its three edges on the boundary.
output:
edge_elements: [1D array] array containing elements which have edges
on the boundary
Note that this method sets the self.edge_to_element to edge_elements,
so the return value is not strictly necessary
"""
if isinstance(self.edge_to_element,np.ndarray):
if self.edge_to_element.shape[0] > 1:
return self.edge_to_element
# GET ALL EDGES FROM THE ELEMENT CONNECTIVITY
if self.all_edges is None:
self.GetEdgesTri()
all_edges = np.concatenate((self.elements[:,:2],self.elements[:,[1,2]],
self.elements[:,[2,0]]),axis=0).astype(np.int64)
all_edges, idx = unique2d(all_edges,consider_sort=True,order=False, return_index=True)
edge_elements = np.zeros((all_edges.shape[0],2),dtype=np.int64)
edge_elements[:,0] = idx % self.elements.shape[0]
edge_elements[:,1] = idx // self.elements.shape[0]
self.edge_to_element = edge_elements
return self.edge_to_element
def GetElementsWithBoundaryEdgesTri(self):
"""Finds elements which have edges on the boundary.
At most an element can have all its three edges on the boundary.
output:
edge_elements: [2D array] array containing elements which have edge
on the boundary [cloumn 0] and a flag stating which edges they are [column 1]
"""
if isinstance(self.boundary_edge_to_element,np.ndarray):
if self.boundary_edge_to_element.shape[1] > 1 and self.boundary_edge_to_element.shape[0] > 1:
return self.boundary_edge_to_element
# DO NOT COMPUTE EDGES AND RAISE BECAUSE OF CYCLIC DEPENDENCIES
assert self.elements is not None
assert self.edges is not None
edge_elements = np.zeros((self.edges.shape[0],2),dtype=np.int64)
# FIND WHICH FACE NODES ARE IN WHICH ELEMENT
for i in range(self.edges.shape[0]):
x = []
for j in range(2):
x.append(np.where(self.elements[:,:3]==self.edges[i,j])[0])
# FIND WHICH ELEMENTS CONTAIN ALL FACE NODES - FOR INTERIOR ELEMENTS
# THEIR CAN BE MORE THAN ONE ELEMENT CONTAINING ALL FACE NODES
z = x[0]
for k in range(1,len(x)):
z = np.intersect1d(x[k],z)
# CHOOSE ONLY ONE OF THESE ELEMENTS
edge_elements[i,0] = z[0]
# WHICH COLUMNS IN THAT ELEMENT ARE THE FACE NODES LOCATED
cols = np.array([np.where(self.elements[z[0],:]==self.edges[i,0])[0],
np.where(self.elements[z[0],:]==self.edges[i,1])[0]
])
cols = np.sort(cols.flatten())
if cols[0] == 0 and cols[1] == 1:
edge_elements[i,1] = 0
elif cols[0] == 1 and cols[1] == 2:
edge_elements[i,1] = 1
elif cols[0] == 0 and cols[1] == 2:
edge_elements[i,1] = 2
self.boundary_edge_to_element = edge_elements
return edge_elements
def GetElementsWithBoundaryFacesTet(self):
"""Finds elements which have faces on the boundary.
At most a tetrahedral element can have all its four faces on the boundary.
output:
boundary_face_to_element: [2D array] array containing elements which have face
on the boundary [column 0] and a flag stating which faces they are [column 1]
"""
if self.boundary_face_to_element is not None:
return self.boundary_face_to_element
# DO NOT COMPUTE FACES AND RAISE BECAUSE OF CYCLIC DEPENDENCIES
assert self.elements is not None
assert self.faces is not None
# THIS METHOD ALWAYS RETURNS THE FACE TO ELEMENT ARRAY, AND DOES NOT CHECK
# IF THIS HAS BEEN COMPUTED BEFORE, THE REASON BEING THAT THE FACES CAN COME
# EXTERNALLY WHOSE ARRANGEMENT WOULD NOT CORRESPOND TO THE ONE USED INTERNALLY
# HENCE THIS MAPPING BECOMES NECESSARY
all_faces = np.concatenate((self.elements[:,:3],self.elements[:,[0,1,3]],
self.elements[:,[0,2,3]],self.elements[:,[1,2,3]]),axis=0).astype(self.faces.dtype)
all_faces_in_faces = in2d(all_faces,self.faces[:,:3],consider_sort=True)
all_faces_in_faces = np.where(all_faces_in_faces==True)[0]
boundary_face_to_element = np.zeros((all_faces_in_faces.shape[0],2),dtype=np.int64)
boundary_face_to_element[:,0] = all_faces_in_faces % self.elements.shape[0]
boundary_face_to_element[:,1] = all_faces_in_faces // self.elements.shape[0]
# SO FAR WE HAVE COMPUTED THE ELEMENTS THAT CONTAIN FACES, HOWEVER
# NOTE THAT WE STILL HAVE NOT COMPUTED A MAPPING BETWEEN ELEMENTS AND
# FACES. WE ONLY KNOW WHICH ELEMENTS CONTAIN FACES FROM in2d.
# WE NEED TO FIND THIS MAPPING NOW
C = self.InferPolynomialDegree() - 1
node_arranger = NodeArrangementTet(C)[0]
# WE NEED TO DO THIS DUMMY RECONSTRUCTION OF FACES BASED ON ELEMENTS
faces = self.elements[boundary_face_to_element[:,0][:,None],
node_arranger[boundary_face_to_element[:,1],:]].astype(self.faces.dtype)
# CHECK FOR THIS CONDITION AS ARRANGEMENT IS NO LONGER MAINTAINED
assert np.sum(faces[:,:3].astype(np.int64) - self.faces[:,:3].astype(np.int64)) == 0
# NOW GET THE ROW MAPPING BETWEEN OLD FACES AND NEW FACES
from Florence.Tensor import shuffle_along_axis
row_mapper = shuffle_along_axis(faces[:,:3],self.faces[:,:3],consider_sort=True)
# UPDATE THE MAP
boundary_face_to_element[:,:] = boundary_face_to_element[row_mapper,:]
self.boundary_face_to_element = boundary_face_to_element
return self.boundary_face_to_element
def GetElementsFaceNumberingTet(self):
"""Finds which faces belong to which elements and which faces of the elements
they are e.g. 0, 1, 2 or 3.
output:
face_elements: [2D array] nfaces x 2 array containing elements which have face
on the boundary with their flags
Note that this method also sets the self.face_to_element to face_elements,
so the return value is not strictly necessary
"""
if isinstance(self.face_to_element,np.ndarray):
if self.face_to_element.shape[0] > 1:
return self.face_to_element
assert self.elements is not None
# GET ALL FACES FROM ELEMENT CONNECTIVITY
if self.all_faces is None:
self.GetFacesTet()
all_faces = np.concatenate((self.elements[:,:3],self.elements[:,[0,1,3]],
self.elements[:,[0,2,3]],self.elements[:,[1,2,3]]),axis=0).astype(np.int64)
_,idx = unique2d(all_faces,consider_sort=True,order=False, return_index=True)
face_elements = | np.zeros((self.all_faces.shape[0],2),dtype=np.int64) | numpy.zeros |
import os
import pandas as pd
# os.environ["CUDA_VISIBLE_DEVICES"] = "5"
import sys
from datetime import datetime
import torch
from torch import save
import numpy as np
import argparse
sys.path.append(os.path.dirname(os.path.realpath(__file__)) + '/models')
sys.path.append(os.path.dirname(os.path.realpath(__file__)) + '/utils')
import multidcp
import datareader
import metric
import wandb
import pdb
import pickle
from tqdm import tqdm
import warnings
warnings.filterwarnings("ignore")
from multidcp_ae_utils import *
import random
# check cuda
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
print("Use GPU: %s" % torch.cuda.is_available())
def model_training(args, model, data, ae_data, metrics_summary):
optimizer = torch.optim.Adam(model.parameters(), lr=0.0002)
best_dev_pearson = float("-inf")
for epoch in range(args.max_epoch):
print("Iteration %d:" % (epoch))
print_lr(optimizer)
model.train()
data_save = False
epoch_loss = 0
for i, (feature, label, _) in enumerate(ae_data.train_dataloader()):
optimizer.zero_grad()
#### the auto encoder step doesn't need other input rather than feature
predict, cell_hidden_ = model(input_cell_gex=feature, job_id = 'ae', epoch = epoch)
loss_t = model.loss(label, predict)
loss_t.backward()
optimizer.step()
epoch_loss += loss_t.item()
print('AE Train loss:')
print(epoch_loss/(i+1))
if USE_WANDB:
wandb.log({'AE Train loss': epoch_loss/(i+1)}, step = epoch)
model.eval()
epoch_loss = 0
lb_np = np.empty([0, 978])
predict_np = np.empty([0, 978])
with torch.no_grad():
for i, (feature, label, _) in enumerate(ae_data.val_dataloader()):
predict, _ = model(input_cell_gex=feature, job_id = 'ae', epoch = epoch)
loss = model.loss(label, predict)
epoch_loss += loss.item()
lb_np = np.concatenate((lb_np, label.cpu().numpy()), axis=0)
predict_np = np.concatenate((predict_np, predict.cpu().numpy()), axis=0)
validation_epoch_end(epoch_loss = epoch_loss, lb_np = lb_np,
predict_np = predict_np, steps_per_epoch = i+1,
epoch = epoch, metrics_summary = metrics_summary,
job = 'ae', USE_WANDB = USE_WANDB)
model.train()
epoch_loss = 0
for i, (ft, lb, _) in enumerate(data.train_dataloader()):
drug = ft['drug']
mask = ft['mask']
cell_feature = ft['cell_id']
pert_idose = ft['pert_idose']
optimizer.zero_grad()
predict, cell_hidden_ = model(input_cell_gex=cell_feature, input_drug = drug,
input_gene = data.gene, mask = mask,
input_pert_idose = pert_idose,
job_id = 'perturbed', epoch = epoch)
loss_t = model.loss(lb, predict)
loss_t.backward()
optimizer.step()
if i == 1:
print('__________________________pertubed input__________________________')
print(cell_feature)
print('__________________________pertubed hidden__________________________')
print(cell_hidden_)
print('__________________________pertubed predicts__________________________')
print(cell_hidden_)
epoch_loss += loss_t.item()
print('Perturbed gene expression profile Train loss:')
print(epoch_loss/(i+1))
if USE_WANDB:
wandb.log({'Perturbed gene expression profile Train loss': epoch_loss/(i+1)}, step = epoch)
model.eval()
epoch_loss = 0
lb_np = np.empty([0, 978])
predict_np = np.empty([0, 978])
with torch.no_grad():
for i, (ft, lb, _) in enumerate(data.val_dataloader()):
drug = ft['drug']
mask = ft['mask']
cell_feature = ft['cell_id']
pert_idose = ft['pert_idose']
predict, _ = model(input_cell_gex=cell_feature, input_drug = drug,
input_gene = data.gene, mask = mask,
input_pert_idose = pert_idose,
job_id = 'perturbed', epoch = epoch)
loss = model.loss(lb, predict)
epoch_loss += loss.item()
lb_np = np.concatenate((lb_np, lb.cpu().numpy()), axis=0)
predict_np = np.concatenate((predict_np, predict.cpu().numpy()), axis=0)
validation_epoch_end(epoch_loss = epoch_loss, lb_np = lb_np,
predict_np = predict_np, steps_per_epoch = i+1,
epoch = epoch, metrics_summary = metrics_summary,
job = 'perturbed', USE_WANDB = USE_WANDB)
if best_dev_pearson < metrics_summary['pearson_list_perturbed_dev'][-1] or epoch == 1:
# data_save = True
best_dev_pearson = metrics_summary['pearson_list_perturbed_dev'][-1]
torch.save(model.state_dict(), 'best_multidcp_ae_model_1.pt')
# if not data_save or (epoch < 400 and epoch != 1):
# continue
epoch_loss = 0
lb_np = np.empty([0, 978])
predict_np = | np.empty([0, 978]) | numpy.empty |
### All utility functions to build the patch deletion tree
import os
import sys
import matplotlib.pyplot as plt
import matplotlib.ticker as plticker
import cv2
import numpy as np
from copy import deepcopy
import pygraphviz as pgv
import torch
from utils import *
def get_patch_boolean(mask):
boolean = []
z0, z1, h, w = mask.shape
z0, z1, rows, cols = np.where(mask == 0)
for i in range(len(rows)):
patchname = rows[i]*h + cols[i]
boolean.append(patchname)
return boolean
def get_edge_mask_red(mask, canny_param, intensity, kernel_size):
upsampled_mask_newPatch_edge = deepcopy(mask)
upsampled_mask_newPatch_edge = | np.uint8(upsampled_mask_newPatch_edge * 255) | numpy.uint8 |
"""
Shape implementations
"""
import numpy as np
from numpy import random
import color
import constants as c
from shapely import affinity
from shapely.geometry import Point, box, Polygon
def rand_size():
return random.randint(c.SIZE_MIN, c.SIZE_MAX)
def rand_size_2():
"""Slightly bigger."""
return random.randint(c.SIZE_MIN + 2, c.SIZE_MAX + 2)
def rand_pos():
return random.randint(c.X_MIN, c.X_MAX)
class Shape:
def __init__(self,
x=None,
y=None,
relation=None,
relation_dir=None,
color_=None,
max_rotation=90):
if color_ is None:
raise NotImplementedError("Must specify color")
self.color = color_
if x is not None or y is not None:
assert x is not None and y is not None
assert relation is None and relation_dir is None
self.x = x
self.y = y
elif relation is None and relation_dir is None:
self.x = rand_pos()
self.y = rand_pos()
else:
# Generate on 3/4 of image according to relation dir
if relation == 0:
# x matters - y is totally random
self.y = rand_pos()
if relation_dir == 0:
# Place right 3/4 of screen, so second shape
# can be placed LEFT
self.x = random.randint(c.X_MIN_34, c.X_MAX)
else:
# Place left 3/4
self.x = random.randint(c.X_MIN, c.X_MAX_34)
else:
# y matters - x is totally random
self.x = rand_pos()
if relation_dir == 0:
# Place top 3/4 of screen, so second shape can be placed
# BELOW
# NOTE: Remember coords for y travel in opp dir
self.y = random.randint(c.X_MIN, c.X_MAX_34)
else:
self.y = random.randint(c.X_MIN_34, c.X_MAX)
self.rotation = random.randint(max_rotation)
self.init_shape()
def draw(self, image):
image.draw.polygon(self.coords, color.PENS[self.color])
def intersects(self, oth):
return self.shape.intersects(oth.shape)
def left(self, oth):
return self.x < oth.x
def right(self, oth):
return self.x > oth.x
def above(self, oth):
return self.y < oth.y
def below(self, oth):
return self.y > oth.y
@property
def name(self):
return type(self).__name__.lower()
def __str__(self):
return f"<{self.color} {self.name} at ({self.x}, {self.y})>"
def __repr__(self):
return self.__str__()
def json(self):
return {
'shape': self.name,
'color': self.color,
'pos': {
'x': self.x,
'y': self.y
},
'rotation': self.rotation
}
class Ellipse(Shape):
def init_shape(self, min_skew=1.5):
self.dx = rand_size()
# Dy must be at least 1.6x dx, to remove ambiguity with circle
bigger = int(self.dx * min_skew)
if bigger >= c.SIZE_MAX:
smaller = int(self.dx / min_skew)
assert smaller >= c.SIZE_MIN, ("{} {}".format(smaller, self.dx))
self.dy = random.randint(c.SIZE_MIN, smaller+1)
else:
self.dy = random.randint(bigger, c.SIZE_MAX)
if | random.random() | numpy.random.random |
import numpy as nu
import scipy
_SCIPY_VERSION= [int(v.split('rc')[0])
for v in scipy.__version__.split('.')]
if _SCIPY_VERSION[0] < 1 and _SCIPY_VERSION[1] < 10:
from scipy.maxentropy import logsumexp
elif _SCIPY_VERSION[0] < 1 and _SCIPY_VERSION[1] < 19:
from scipy.misc import logsumexp
else:
from scipy.special import logsumexp
from isodist.Isochrone import Isochrone
from isodist.PadovaIsochrone import PadovaIsochrone
_LOGTOLN= 1./nu.log10(nu.exp(1.))
def eval_distpdf(ds,mdict=None,mivardict=None,logg=None,logg_ivar=None,
teff=None,teff_ivar=None,logage=None,logage_ivar=None,
Z=None,Z_ivar=None,feh=None,feh_ivar=None,
afe=None,afe_ivar=None,
padova=None,padova_type=None,
normalize=False,
ageprior=None):
"""
NAME:
eval_distpdf
PURPOSE:
evaluate the distance PDF for an object
INPUT:
ds- list or ndarray of distance (or a single distance), in kpc
mdict= dictionary of apparent magnitudes (e.g., {'J':12.,'Ks':13.})
mivardict= dictionary of magnitude inverse variances (matched to mdict)
logg= observed logg
logg_ivar= inverse variance of logg measurement
teff= observed T_eff [K]
logg_ivar= inverse variance of T_eff measurement
logage= observed log_10 age [Gyr]
logage_ivar= inverse variance of log_10 age measurement
Z= observed metallicity
Z_ivar= inverse variance of Z measurement
feh= observed metallicity (alternative to Z)
feh_ivar= inverse variance of FeH measurement
afe= observed [\alpha/Fe]
afe_ivar= [\alpha/Fe] inverse variance
padova= if True, use Padova isochrones,
if set to a PadovaIsochrone objects, use this
padova_type= type of PadovaIsochrone to use (e.g., 2mass-spitzer-wise)
normalize= if True, normalize output PDF (default: False)
ageprior= - None: flat in log age
- flat: flat in age
OUTPUT:
log of probability
HISTORY:
2011-04-28 - Written - Bovy (NYU)
"""
#load isochrones
if not padova is None and isinstance(padova,PadovaIsochrone):
iso= padova
elif not padova is None and isinstance(padova,bool) and padova:
iso= PadovaIsochrone(type=padova_type)
#Parse metallicity info
if not feh is None: raise NotImplementedError("'feh' not yet implemented")
#set up output
if isinstance(ds,(list,nu.ndarray)):
scalarOut= False
if isinstance(ds,list):
_ds= nu.array(ds)
else: _ds= ds
elif isinstance(ds,float):
scalarOut= True
_ds= [ds]
#Pre-calculate all absolute magnitudes
absmagdict= {}
for key in mdict.keys():
absmagdict[key]= -_distmodulus(_ds)+mdict[key]
#loop through isochrones
ZS= iso.Zs()
logages= iso.logages()
allout= nu.zeros((len(_ds),len(ZS),len(logages)))
for zz in range(len(ZS)):
for aa in range(len(logages)):
thisiso= iso(logages[aa],Z=ZS[zz])
dmpm= nu.roll(thisiso['M_ini'],-1)-thisiso['M_ini']
loglike= nu.zeros((len(_ds),len(thisiso['M_ini'])-1))
loglike-= nu.log(thisiso['M_ini'][-1])
for ii in range(1,len(thisiso['M_ini'])-1):
if dmpm[ii] > 0.:
loglike[:,ii]+= nu.log(dmpm[ii])
else:
loglike[:,ii]= nu.finfo(nu.dtype(nu.float64)).min
continue #no use in continuing here
if not teff is None:
loglike[:,ii]-= (teff-10**thisiso['logTe'][ii])**2.*teff_ivar
if not logg is None:
loglike[:,ii]-= (logg-thisiso['logg'][ii])**2.*logg_ivar
for key in mdict.keys():
#print absmagdict[key][2], thisiso[key][ii]
loglike[:,ii]-= (absmagdict[key]-thisiso[key][ii])**2.\
*mivardict[key]
#marginalize over mass
for jj in range(len(_ds)):
allout[jj,zz,aa]= logsumexp(loglike[jj,:])
#add age constraint and prior
if not logage is None:
allout[:,zz,aa]+= -(logage-logages[aa])**2.*logage_ivar
if not ageprior is None:
if isinstance(ageprior,str) and ageprior.lower() == 'flat':
allout[:,zz,aa]+= logages[aa]*_LOGTOLN
#add Z constraint and prior
if not Z is None:
allout[:,zz,:]+= -(Z-ZS[zz])**2.*Z_ivar
#prepare final output
out= nu.zeros(len(_ds))
for jj in range(len(_ds)):
out[jj]= logsumexp(allout[jj,:,:])
if normalize and not scalarOut:
out-= logsumexp(out)+nu.log(ds[1]-ds[0])
#return
if scalarOut: return out[0]
else: return out
def _distmodulus(d):
return 5.* | nu.log10(d/.01) | numpy.log10 |
import datetime
import numpy as np
import pandas as pd
from scipy import stats
from collections import Counter
from tqdm import tqdm
tqdm.pandas(desc="progress")
#================================================================================
#Don't change the code below!!! 以下代码请勿轻易改动。
#================================================================================
def printlog(info):
nowtime = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
print('\n================================================================================ %s'%nowtime)
print(info+'\n')
# 相关性ks检验
def relativity_ks(labels,features):
assert len(labels) == len(features)
labels = np.array(labels)
features = np.array(features)
# 非数值特征将字符转换成对应序号
if features.dtype is np.dtype('O'):
features_notnan = set(features[~pd.isna(features)])
features_notnan = [str(x) for x in features_notnan]
dic = dict(zip(range(0,len(features_notnan)),sorted(list(features_notnan))))
features = np.array([dic.get(x,x) for x in features])
else:
features = features
if set(labels) == {0,1}: #二分类问题
data_1 = features[labels > 0.5]
data_0 = features[labels < 0.5]
elif "int" in str(labels.dtype): #多分类问题
most_label = Counter(labels).most_common(1)[0][0]
data_0 = features[labels == most_label]
data_1 = features[labels != most_label]
else: #回归问题
mid = np.median(labels)
data_1 = features[labels > mid]
data_0 = features[labels <= mid ]
result = stats.ks_2samp(data_1,data_0)
return result[0]
# 同分布性ks检验
def stability_ks(data1,data2):
data1 = | np.array(data1) | numpy.array |
#!/usr/bin/env python3
import os
import sys
import re
import pandas as pd, geopandas as gpd
import numpy as np
import argparse
import matplotlib.pyplot as plt
import seaborn as sns
from functools import reduce
from multiprocessing import Pool
from os.path import isfile, join
import shutil
import warnings
from pathlib import Path
import time
warnings.simplefilter(action='ignore', category=FutureWarning)
import rasterio
from rasterio import features as riofeatures
from rasterio import plot as rioplot
from shapely.geometry import Polygon
"""
Plot Rating Curves and Compare to USGS Gages
Parameters
----------
fim_dir : str
Directory containing FIM output folders.
output_dir : str
Directory containing rating curve plots and tables.
usgs_gages_filename : str
File name of USGS rating curves.
nwm_flow_dir : str
Directory containing NWM recurrence flows files.
number_of_jobs : str
Number of jobs.
stat_groups : str
string of columns to group eval metrics.
"""
def check_file_age(file):
'''
Checks if file exists, determines the file age, and recommends
updating if older than 1 month.
Returns
-------
None.
'''
file = Path(file)
if file.is_file():
modification_time = file.stat().st_mtime
current_time = time.time()
file_age_days = (current_time - modification_time)/86400
if file_age_days > 30:
check = f'{file.name} is {int(file_age_days)} days old, consider updating.\nUpdate with rating_curve_get_usgs_curves.py'
else:
check = f'{file.name} is {int(file_age_days)} days old.'
return check
# recurr_intervals = ['recurr_1_5_cms.csv','recurr_5_0_cms.csv','recurr_10_0_cms.csv']
def generate_rating_curve_metrics(args):
elev_table_filename = args[0]
branches_folder = args[1]
usgs_gages_filename = args[2]
usgs_recurr_stats_filename = args[3]
nwm_recurr_data_filename = args[4]
rc_comparison_plot_filename = args[5]
nwm_flow_dir = args[6]
catfim_flows_filename = args[7]
huc = args[8]
alt_plot = args[9]
elev_table = pd.read_csv(elev_table_filename,dtype={'location_id': object, 'feature_id':object,'HydroID':object, 'levpa_id':object})
elev_table.dropna(subset=['location_id'], inplace=True)
usgs_gages = pd.read_csv(usgs_gages_filename,dtype={'location_id': object, 'feature_id':object})
# Aggregate FIM4 hydroTables
hydrotable = pd.DataFrame()
for branch in elev_table.levpa_id.unique():
branch_elev_table = elev_table.loc[elev_table.levpa_id == branch].copy()
branch_hydrotable = pd.read_csv(join(branches_folder, str(branch), f'hydroTable_{branch}.csv'),dtype={'HydroID':object,'feature_id':object})
# Only pull SRC for hydroids that are in this branch
branch_hydrotable = branch_hydrotable.loc[branch_hydrotable.HydroID.isin(branch_elev_table.HydroID)]
branch_hydrotable.drop(columns=['order_'], inplace=True)
# Join SRC with elevation data
branch_elev_table.rename(columns={'feature_id':'fim_feature_id'}, inplace=True)
branch_hydrotable = branch_hydrotable.merge(branch_elev_table, on="HydroID")
# Append to full rating curve dataframe
if hydrotable.empty:
hydrotable = branch_hydrotable
else:
hydrotable = hydrotable.append(branch_hydrotable)
# Join rating curves with elevation data
#elev_table.rename(columns={'feature_id':'fim_feature_id'}, inplace=True)
#hydrotable = hydrotable.merge(elev_table, on="HydroID")
relevant_gages = list(hydrotable.location_id.unique())
usgs_gages = usgs_gages[usgs_gages['location_id'].isin(relevant_gages)]
usgs_gages = usgs_gages.reset_index(drop=True)
if len(usgs_gages) > 0:
# Adjust rating curve to elevation
hydrotable['elevation_ft'] = (hydrotable.stage + hydrotable.dem_adj_elevation) * 3.28084 # convert from m to ft
# hydrotable['raw_elevation_ft'] = (hydrotable.stage + hydrotable.dem_elevation) * 3.28084 # convert from m to ft
hydrotable['discharge_cfs'] = hydrotable.discharge_cms * 35.3147
usgs_gages = usgs_gages.rename(columns={"flow": "discharge_cfs", "elevation_navd88": "elevation_ft"})
hydrotable['source'] = "FIM"
usgs_gages['source'] = "USGS"
limited_hydrotable = hydrotable.filter(items=['location_id','elevation_ft','discharge_cfs','source', 'HydroID', 'levpa_id', 'dem_adj_elevation'])
select_usgs_gages = usgs_gages.filter(items=['location_id', 'elevation_ft', 'discharge_cfs','source'])
if 'default_discharge_cms' in hydrotable.columns: # check if both "FIM" and "FIM_default" SRCs are available
hydrotable['default_discharge_cfs'] = hydrotable.default_discharge_cms * 35.3147
limited_hydrotable_default = hydrotable.filter(items=['location_id','elevation_ft', 'default_discharge_cfs'])
limited_hydrotable_default['discharge_cfs'] = limited_hydrotable_default.default_discharge_cfs
limited_hydrotable_default['source'] = "FIM_default"
rating_curves = limited_hydrotable.append(select_usgs_gages)
rating_curves = rating_curves.append(limited_hydrotable_default)
else:
rating_curves = limited_hydrotable.append(select_usgs_gages)
# Add stream order
stream_orders = hydrotable.filter(items=['location_id','order_']).drop_duplicates()
rating_curves = rating_curves.merge(stream_orders, on='location_id')
rating_curves['order_'] = rating_curves['order_'].astype('int')
# NWM recurr intervals
recurr_intervals = ("2","5","10","25","50","100")
recurr_dfs = []
for interval in recurr_intervals:
recurr_file = join(nwm_flow_dir, 'nwm21_17C_recurr_{}_0_cms.csv'.format(interval))
df = pd.read_csv(recurr_file, dtype={'feature_id': str})
# Update column names
df = df.rename(columns={"discharge": interval})
recurr_dfs.append(df)
# Merge NWM recurr intervals into a single layer
nwm_recurr_intervals_all = reduce(lambda x,y: pd.merge(x,y, on='feature_id', how='outer'), recurr_dfs)
nwm_recurr_intervals_all = pd.melt(nwm_recurr_intervals_all, id_vars=['feature_id'], value_vars=recurr_intervals, var_name='recurr_interval', value_name='discharge_cms')
# Append catfim data (already set up in format similar to nwm_recurr_intervals_all)
cat_fim = pd.read_csv(catfim_flows_filename, dtype={'feature_id':str})
nwm_recurr_intervals_all = nwm_recurr_intervals_all.append(cat_fim)
# Convert discharge to cfs and filter
nwm_recurr_intervals_all['discharge_cfs'] = nwm_recurr_intervals_all.discharge_cms * 35.3147
nwm_recurr_intervals_all = nwm_recurr_intervals_all.filter(items=['discharge_cfs', 'recurr_interval','feature_id']).drop_duplicates()
# Identify unique gages
usgs_crosswalk = hydrotable.filter(items=['location_id', 'feature_id']).drop_duplicates()
usgs_crosswalk.dropna(subset=['location_id'], inplace=True)
nwm_recurr_data_table = pd.DataFrame()
usgs_recurr_data = pd.DataFrame()
# Interpolate USGS/FIM elevation at each gage
for index, gage in usgs_crosswalk.iterrows():
# Interpolate USGS elevation at NWM recurrence intervals
usgs_rc = rating_curves.loc[(rating_curves.location_id==gage.location_id) & (rating_curves.source=="USGS")]
if len(usgs_rc) <1:
print(f"missing USGS rating curve data for usgs station {gage.location_id} in huc {huc}")
continue
str_order = | np.unique(usgs_rc.order_) | numpy.unique |
# -*- coding: utf-8 -*-
# Copyright 2019 the HERA Project
# Licensed under the MIT License
import pytest
import numpy as np
from copy import deepcopy
import warnings
import os
import sys
import shutil
from hera_sim.antpos import linear_array, hex_array
from hera_sim.vis import sim_red_data
from hera_sim.sigchain import gen_gains
from .. import redcal as om
from .. import io, abscal
from ..utils import split_pol, conj_pol, split_bl
from ..apply_cal import calibrate_in_place
from ..data import DATA_PATH
from ..datacontainer import DataContainer
np.random.seed(0)
class TestMethods(object):
def test_check_polLists_minV(self):
polLists = [['xy']]
assert not om._check_polLists_minV(polLists)
polLists = [['xx', 'xy']]
assert not om._check_polLists_minV(polLists)
polLists = [['xx', 'xy', 'yx']]
assert not om._check_polLists_minV(polLists)
polLists = [['xy', 'yx'], ['xx'], ['yy'], ['xx'], ['yx', 'xy'], ['yy']]
assert om._check_polLists_minV(polLists)
def test_parse_pol_mode(self):
reds = [[(0, 1, 'xx')]]
assert om.parse_pol_mode(reds) == '1pol'
reds = [[(0, 1, 'xx')], [(0, 1, 'yy')]]
assert om.parse_pol_mode(reds) == '2pol'
reds = [[(0, 1, 'xx')], [(0, 1, 'xy')], [(0, 1, 'yx')], [(0, 1, 'yy')]]
assert om.parse_pol_mode(reds) == '4pol'
reds = [[(0, 1, 'xx')], [(0, 1, 'xy'), (0, 1, 'yx')], [(0, 1, 'yy')]]
assert om.parse_pol_mode(reds) == '4pol_minV'
reds = [[(0, 1, 'xx')], [(0, 1, 'xy'), (0, 1, 'yx')], [(0, 1, 'LR')]]
assert om.parse_pol_mode(reds) == 'unrecognized_pol_mode'
reds = [[(0, 1, 'xx')], [(0, 1, 'xy')]]
assert om.parse_pol_mode(reds) == 'unrecognized_pol_mode'
reds = [[(0, 1, 'xy')]]
assert om.parse_pol_mode(reds) == 'unrecognized_pol_mode'
reds = [[(0, 1, 'xx')], [(0, 1, 'xy'), (0, 1, 'yy')], [(0, 1, 'yx')]]
assert om.parse_pol_mode(reds) == 'unrecognized_pol_mode'
def test_get_pos_red(self):
pos = hex_array(3, sep=14.6, split_core=False, outriggers=0)
assert len(om.get_pos_reds(pos)) == 30
pos = hex_array(7, sep=14.6, split_core=False, outriggers=0)
assert len(om.get_pos_reds(pos)) == 234
for ant, r in pos.items():
pos[ant] += [0, 0, 1 * r[0] - .5 * r[1]]
assert len(om.get_pos_reds(pos)) == 234
pos = hex_array(7, sep=1, split_core=False, outriggers=0)
assert len(om.get_pos_reds(pos)) < 234
assert len(om.get_pos_reds(pos, bl_error_tol=.1)) == 234
pos = hex_array(7, sep=14.6, split_core=False, outriggers=0)
blerror = 1.0 - 1e-12
error = blerror / 4
for key, val in pos.items():
th = np.random.choice([0, np.pi / 2, np.pi])
phi = np.random.choice([0, np.pi / 2, np.pi, 3 * np.pi / 2])
pos[key] = val + error * np.array([np.sin(th) * np.cos(phi), np.sin(th) * np.sin(phi), np.cos(th)])
assert len(om.get_pos_reds(pos, bl_error_tol=1.0)) == 234
assert len(om.get_pos_reds(pos, bl_error_tol=.99)) > 234
pos = {0: np.array([0, 0, 0]), 1: np.array([20, 0, 0]), 2: np.array([10, 0, 0])}
assert om.get_pos_reds(pos) == [[(0, 2), (2, 1)], [(0, 1)]]
# test branch cut
pos = {0: np.array([-.03, 1., 0.]),
1: np.array([1., 1., 0.]),
2: np.array([0.03, 0.0, 0.]),
3: np.array([1., 0., 0.])}
assert len(om.get_pos_reds(pos, bl_error_tol=.1)) == 4
def test_filter_reds(self):
antpos = linear_array(7)
reds = om.get_reds(antpos, pols=['xx'], pol_mode='1pol')
# exclude ants
red = om.filter_reds(reds, ex_ants=[0, 4])
assert red == [[(1, 2, 'xx'), (2, 3, 'xx'), (5, 6, 'xx')], [(1, 3, 'xx'), (3, 5, 'xx')], [(2, 5, 'xx'), (3, 6, 'xx')],
[(1, 5, 'xx'), (2, 6, 'xx')], [(1, 6, 'xx')]]
# include ants
red = om.filter_reds(reds, ants=[0, 1, 4, 5, 6])
assert red == [[(0, 1, 'xx'), (4, 5, 'xx'), (5, 6, 'xx')], [(4, 6, 'xx')], [(1, 4, 'xx')], [(0, 4, 'xx'), (1, 5, 'xx')],
[(0, 5, 'xx'), (1, 6, 'xx')], [(0, 6, 'xx')]]
# exclued bls
red = om.filter_reds(reds, ex_bls=[(0, 2), (1, 2), (0, 6)])
assert red == [[(0, 1, 'xx'), (2, 3, 'xx'), (3, 4, 'xx'), (4, 5, 'xx'), (5, 6, 'xx')],
[(1, 3, 'xx'), (2, 4, 'xx'), (3, 5, 'xx'), (4, 6, 'xx')], [(0, 3, 'xx'), (1, 4, 'xx'), (2, 5, 'xx'), (3, 6, 'xx')],
[(0, 4, 'xx'), (1, 5, 'xx'), (2, 6, 'xx')], [(0, 5, 'xx'), (1, 6, 'xx')]]
# include bls
red = om.filter_reds(reds, bls=[(0, 1), (1, 2)])
assert red == [[(0, 1, 'xx'), (1, 2, 'xx')]]
# include ubls
red = om.filter_reds(reds, ubls=[(0, 2), (1, 4)])
assert red == [[(0, 2, 'xx'), (1, 3, 'xx'), (2, 4, 'xx'), (3, 5, 'xx'), (4, 6, 'xx')],
[(0, 3, 'xx'), (1, 4, 'xx'), (2, 5, 'xx'), (3, 6, 'xx')]]
# exclude ubls
red = om.filter_reds(reds, ex_ubls=[(0, 2), (1, 4), (4, 5), (0, 5), (2, 3), (0, 6)])
assert red == [[(0, 4, 'xx'), (1, 5, 'xx'), (2, 6, 'xx')]]
# exclude crosspols
# reds = omni.filter_reds(self.info.get_reds(), ex_crosspols=()
def test_filter_reds_2pol(self):
antpos = linear_array(4)
reds = om.get_reds(antpos, pols=['xx', 'yy'], pol_mode='1pol')
# include pols
red = om.filter_reds(reds, pols=['xx'])
assert red == [[(0, 1, 'xx'), (1, 2, 'xx'), (2, 3, 'xx')], [(0, 2, 'xx'), (1, 3, 'xx')], [(0, 3, 'xx')]]
# exclude pols
red = om.filter_reds(reds, ex_pols=['yy'])
assert red == [[(0, 1, 'xx'), (1, 2, 'xx'), (2, 3, 'xx')], [(0, 2, 'xx'), (1, 3, 'xx')], [(0, 3, 'xx')]]
# exclude ants
red = om.filter_reds(reds, ex_ants=[0])
assert red == [[(1, 2, 'xx'), (2, 3, 'xx')], [(1, 3, 'xx')], [(1, 2, 'yy'), (2, 3, 'yy')], [(1, 3, 'yy')]]
# include ants
red = om.filter_reds(reds, ants=[1, 2, 3])
red = om.filter_reds(reds, ex_ants=[0])
# exclued bls
red = om.filter_reds(reds, ex_bls=[(1, 2), (0, 3)])
assert red == [[(0, 1, 'xx'), (2, 3, 'xx')], [(0, 2, 'xx'), (1, 3, 'xx')], [(0, 1, 'yy'), (2, 3, 'yy')], [(0, 2, 'yy'), (1, 3, 'yy')]]
# include bls
red = om.filter_reds(reds, bls=[(0, 1), (1, 2)])
assert red == [[(0, 1, 'xx'), (1, 2, 'xx')], [(0, 1, 'yy'), (1, 2, 'yy')]]
# include ubls
red = om.filter_reds(reds, ubls=[(0, 2)])
assert red == [[(0, 2, 'xx'), (1, 3, 'xx')], [(0, 2, 'yy'), (1, 3, 'yy')]]
# exclude ubls
red = om.filter_reds(reds, ex_ubls=[(2, 3), (0, 3)])
assert red == [[(0, 2, 'xx'), (1, 3, 'xx')], [(0, 2, 'yy'), (1, 3, 'yy')]]
# test baseline length min and max cutoffs
antpos = hex_array(4, sep=14.6, split_core=False, outriggers=0)
reds = om.get_reds(antpos, pols=['xx'], pol_mode='1pol')
assert om.filter_reds(reds, antpos=antpos, min_bl_cut=85) == reds[-3:]
assert om.filter_reds(reds, antpos=antpos, max_bl_cut=15) == reds[:3]
def test_filter_reds_max_dim(self):
# build hex array with 4 on a side and 7 total rows
antpos = hex_array(4, split_core=False, outriggers=0)
antpos[37] = np.array([np.pi, np.pi, 0]) # add one off-grid antenna
reds = om.get_reds(antpos)
# remove third, fourth, fifth, and sixth rows
reds = om.filter_reds(reds, ex_ants=list(range(9, 33)))
# Max 1 dimension means largest 1D array
new_reds = om.filter_reds(reds, max_dims=1)
ant_inds = set([ant[0] for red in new_reds for bl in red for ant in split_bl(bl)])
assert ant_inds == set(range(4, 9))
# Max 2 dimensions means only rows 1 and 2
new_reds = om.filter_reds(reds, max_dims=2)
ant_inds = set([ant[0] for red in new_reds for bl in red for ant in split_bl(bl)])
assert ant_inds == set(range(0, 9))
# Max 3 dimensions means all 3 good rows, but keeps out the off-grid antenna
new_reds = om.filter_reds(reds, max_dims=3)
ant_inds = set([ant[0] for red in new_reds for bl in red for ant in split_bl(bl)])
assert ant_inds == (set(range(0, 9)) | set(range(33, 37)))
def test_add_pol_reds(self):
reds = [[(1, 2)]]
polReds = om.add_pol_reds(reds, pols=['xx'], pol_mode='1pol')
assert polReds == [[(1, 2, 'xx')]]
polReds = om.add_pol_reds(reds, pols=['xx', 'yy'], pol_mode='2pol')
assert polReds == [[(1, 2, 'xx')], [(1, 2, 'yy')]]
polReds = om.add_pol_reds(reds, pols=['xx', 'xy', 'yx', 'yy'], pol_mode='4pol')
assert polReds == [[(1, 2, 'xx')], [(1, 2, 'xy')], [(1, 2, 'yx')], [(1, 2, 'yy')]]
polReds = om.add_pol_reds(reds, pols=['xx', 'xy', 'yx', 'yy'], pol_mode='4pol_minV')
assert polReds == [[(1, 2, 'xx')], [(1, 2, 'xy'), (1, 2, 'yx')], [(1, 2, 'yy')]]
def test_reds_to_antpos(self):
# Test 1D
true_antpos = linear_array(10)
reds = om.get_reds(true_antpos, pols=['xx', 'yy'], pol_mode='2pol', bl_error_tol=1e-10)
inferred_antpos = om.reds_to_antpos(reds,)
for pos in inferred_antpos.values():
assert len(pos) == 1
new_reds = om.get_reds(inferred_antpos, pols=['xx', 'yy'], pol_mode='2pol', bl_error_tol=1e-10)
for nred in new_reds:
for red in reds:
if nred[0] in red:
found_match = True
assert len(set(nred).difference(set(red))) == 0
assert found_match
found_match = False
# Test 2D
true_antpos = hex_array(5, split_core=False, outriggers=0)
reds = om.get_reds(true_antpos, pols=['xx'], pol_mode='1pol', bl_error_tol=1e-10)
inferred_antpos = om.reds_to_antpos(reds)
for pos in inferred_antpos.values():
assert len(pos) == 2
new_reds = om.get_reds(inferred_antpos, pols=['xx'], pol_mode='1pol', bl_error_tol=1e-10)
for nred in new_reds:
for red in reds:
if nred[0] in red:
found_match = True
assert len(set(nred).difference(set(red))) == 0
assert found_match
found_match = False
# Test 2D with split
true_antpos = hex_array(5, split_core=True, outriggers=0)
reds = om.get_pos_reds(true_antpos, bl_error_tol=1e-10)
inferred_antpos = om.reds_to_antpos(reds)
for pos in inferred_antpos.values():
assert len(pos) == 2
new_reds = om.get_pos_reds(inferred_antpos, bl_error_tol=1e-10)
for nred in new_reds:
for red in reds:
if nred[0] in red:
found_match = True
assert len(set(nred).difference(set(red))) == 0
assert found_match
found_match = False
# Test 2D with additional degeneracy
true_antpos = {0: [0, 0], 1: [1, 0], 2: [0, 1], 3: [1, 1],
4: [100, 100], 5: [101, 100], 6: [100, 101], 7: [101, 101]}
reds = om.get_pos_reds(true_antpos, bl_error_tol=1e-10)
inferred_antpos = om.reds_to_antpos(reds)
for pos in inferred_antpos.values():
assert len(pos) == 3
new_reds = om.get_pos_reds(inferred_antpos, bl_error_tol=1e-10)
for nred in new_reds:
for red in reds:
if nred[0] in red:
found_match = True
assert len(set(nred).difference(set(red))) == 0
assert found_match
found_match = False
def test_find_polarity_flipped_ants(self):
# test normal operation
antpos = hex_array(3, split_core=False, outriggers=0)
reds = om.get_reds(antpos, pols=['ee'], pol_mode='1pol')
rc = om.RedundantCalibrator(reds)
freqs = np.linspace(.1, .2, 100)
ants = [(ant, 'Jee') for ant in antpos]
gains = gen_gains(freqs, ants)
for ant in [3, 10, 11]:
gains[ant, 'Jee'] *= -1
_, true_vis, data = sim_red_data(reds, gains=gains, shape=(2, len(freqs)))
meta, g_fc = rc.firstcal(data, freqs)
for ant in antpos:
if ant in [3, 10, 11]:
assert np.all(meta['polarity_flips'][ant, 'Jee'])
else:
assert not np.any(meta['polarity_flips'][ant, 'Jee'])
# test operation where no good answer is possible, so we expect it to fail
data[(0, 1, 'ee')] *= -1
meta, g_fc = rc.firstcal(data, freqs)
for ant in meta['polarity_flips']:
assert np.all([m is None for m in meta['polarity_flips'][ant]])
# test errors
with pytest.raises(ValueError):
om._build_polarity_baseline_groups(data, reds, edge_cut=100)
with pytest.raises(ValueError):
om._build_polarity_baseline_groups(data, reds, max_rel_angle=np.pi)
class TestRedundantCalibrator(object):
def test_init(self):
# test a very small array
pos = hex_array(3, split_core=False, outriggers=0)
pos = {ant: pos[ant] for ant in range(4)}
reds = om.get_reds(pos)
rc = om.RedundantCalibrator(reds)
with pytest.raises(ValueError):
rc = om.RedundantCalibrator(reds, check_redundancy=True)
# test disconnected redundant array
pos = hex_array(5, split_core=False, outriggers=0)
pos = {ant: pos[ant] for ant in pos if ant in [0, 1, 5, 6, 54, 55, 59, 60]}
reds = om.get_reds(pos)
try:
rc = om.RedundantCalibrator(reds, check_redundancy=True)
except ValueError:
assert False, 'This array is actually redundant, so check_redundancy should not raise a ValueError.'
def test_build_eq(self):
antpos = linear_array(3)
reds = om.get_reds(antpos, pols=['xx'], pol_mode='1pol')
gains, true_vis, data = sim_red_data(reds)
info = om.RedundantCalibrator(reds)
eqs = info.build_eqs(data)
assert len(eqs) == 3
assert eqs['g_0_Jxx * g_1_Jxx_ * u_0_xx'] == (0, 1, 'xx')
assert eqs['g_1_Jxx * g_2_Jxx_ * u_0_xx'] == (1, 2, 'xx')
assert eqs['g_0_Jxx * g_2_Jxx_ * u_1_xx'] == (0, 2, 'xx')
reds = om.get_reds(antpos, pols=['xx', 'yy', 'xy', 'yx'], pol_mode='4pol')
gains, true_vis, data = sim_red_data(reds)
info = om.RedundantCalibrator(reds)
eqs = info.build_eqs(data)
assert len(eqs) == 3 * 4
assert eqs['g_0_Jxx * g_1_Jyy_ * u_4_xy'] == (0, 1, 'xy')
assert eqs['g_1_Jxx * g_2_Jyy_ * u_4_xy'] == (1, 2, 'xy')
assert eqs['g_0_Jxx * g_2_Jyy_ * u_5_xy'] == (0, 2, 'xy')
assert eqs['g_0_Jyy * g_1_Jxx_ * u_6_yx'] == (0, 1, 'yx')
assert eqs['g_1_Jyy * g_2_Jxx_ * u_6_yx'] == (1, 2, 'yx')
assert eqs['g_0_Jyy * g_2_Jxx_ * u_7_yx'] == (0, 2, 'yx')
reds = om.get_reds(antpos, pols=['xx', 'yy', 'xy', 'yx'], pol_mode='4pol_minV')
gains, true_vis, data = sim_red_data(reds)
info = om.RedundantCalibrator(reds)
eqs = info.build_eqs(data)
assert len(eqs) == 3 * 4
assert eqs['g_0_Jxx * g_1_Jyy_ * u_4_xy'] == (0, 1, 'xy')
assert eqs['g_1_Jxx * g_2_Jyy_ * u_4_xy'] == (1, 2, 'xy')
assert eqs['g_0_Jxx * g_2_Jyy_ * u_5_xy'] == (0, 2, 'xy')
assert eqs['g_0_Jyy * g_1_Jxx_ * u_4_xy'] == (0, 1, 'yx')
assert eqs['g_1_Jyy * g_2_Jxx_ * u_4_xy'] == (1, 2, 'yx')
assert eqs['g_0_Jyy * g_2_Jxx_ * u_5_xy'] == (0, 2, 'yx')
with pytest.raises(KeyError):
info.build_eqs({})
def test_solver(self):
antpos = linear_array(3)
reds = om.get_reds(antpos, pols=['xx'], pol_mode='1pol')
info = om.RedundantCalibrator(reds)
gains, true_vis, d = sim_red_data(reds)
w = {}
w = dict([(k, 1.) for k in d.keys()])
def solver(data, wgts, **kwargs):
np.testing.assert_equal(data['g_0_Jxx * g_1_Jxx_ * u_0_xx'], d[0, 1, 'xx'])
np.testing.assert_equal(data['g_1_Jxx * g_2_Jxx_ * u_0_xx'], d[1, 2, 'xx'])
np.testing.assert_equal(data['g_0_Jxx * g_2_Jxx_ * u_1_xx'], d[0, 2, 'xx'])
if len(wgts) == 0:
return
np.testing.assert_equal(wgts['g_0_Jxx * g_1_Jxx_ * u_0_xx'], w[0, 1, 'xx'])
np.testing.assert_equal(wgts['g_1_Jxx * g_2_Jxx_ * u_0_xx'], w[1, 2, 'xx'])
np.testing.assert_equal(wgts['g_0_Jxx * g_2_Jxx_ * u_1_xx'], w[0, 2, 'xx'])
return
info._solver(solver, d)
info._solver(solver, d, w)
def test_firstcal_iteration(self):
NANTS = 18
NFREQ = 64
antpos = linear_array(NANTS)
reds = om.get_reds(antpos, pols=['xx'], pol_mode='1pol')
info = om.RedundantCalibrator(reds)
fqs = np.linspace(.1, .2, NFREQ)
g, true_vis, d = sim_red_data(reds, shape=(1, NFREQ), gain_scatter=0)
delays = {k: np.random.randn() * 30 for k in g.keys()} # in ns
fc_gains = {k: np.exp(2j * np.pi * v * fqs) for k, v in delays.items()}
delays = {k: np.array([[v]]) for k, v in delays.items()}
fc_gains = {i: v.reshape(1, NFREQ) for i, v in fc_gains.items()}
gains = {k: v * fc_gains[k] for k, v in g.items()}
gains = {k: v.astype(np.complex64) for k, v in gains.items()}
calibrate_in_place(d, gains, old_gains=g, gain_convention='multiply')
d = {k: v.astype(np.complex64) for k, v in d.items()}
dly_sol, off_sol = info._firstcal_iteration(d, df=fqs[1] - fqs[0], f0=fqs[0], medfilt=False)
sol_degen = info.remove_degen_gains(dly_sol, degen_gains=delays, mode='phase')
for i in range(NANTS):
assert dly_sol[(i, 'Jxx')].dtype == np.float64
assert dly_sol[(i, 'Jxx')].shape == (1, 1)
assert np.allclose(np.round(sol_degen[(i, 'Jxx')] - delays[(i, 'Jxx')], 0), 0)
def test_firstcal(self):
np.random.seed(21)
antpos = hex_array(2, split_core=False, outriggers=0)
reds = om.get_reds(antpos, pols=['xx'], pol_mode='1pol')
rc = om.RedundantCalibrator(reds)
freqs = np.linspace(1e8, 2e8, 1024)
# test firstcal where the degeneracies of the phases and delays have already been removed so no abscal is necessary
gains, true_vis, d = sim_red_data(reds, gain_scatter=0, shape=(2, len(freqs)))
fc_delays = {ant: [[100e-9 * np.random.randn()]] for ant in gains.keys()} # in s
fc_delays = rc.remove_degen_gains(fc_delays)
fc_offsets = {ant: [[.49 * np.pi * (np.random.rand() > .90)]] for ant in gains.keys()} # the .49 removes the possibly of phase wraps that need abscal
fc_offsets = rc.remove_degen_gains(fc_offsets)
fc_gains = {ant: np.reshape(np.exp(-2.0j * np.pi * freqs * delay - 1.0j * fc_offsets[ant]), (1, len(freqs)))
for ant, delay in fc_delays.items()}
for ant1, ant2, pol in d.keys():
d[(ant1, ant2, pol)] *= fc_gains[(ant1, split_pol(pol)[0])] * np.conj(fc_gains[(ant2, split_pol(pol)[1])])
for ant in gains.keys():
gains[ant] *= fc_gains[ant]
meta, g_fc = rc.firstcal(d, freqs, conv_crit=0)
np.testing.assert_array_almost_equal(np.linalg.norm([g_fc[ant] - gains[ant] for ant in g_fc]), 0, decimal=3)
# test firstcal with only phases (no delays)
gains, true_vis, d = sim_red_data(reds, gain_scatter=0, shape=(2, len(freqs)))
fc_delays = {ant: [[0 * np.random.randn()]] for ant in gains.keys()} # in s
fc_offsets = {ant: [[.49 * np.pi * (np.random.rand() > .90)]] for ant in gains.keys()} # the .49 removes the possibly of phase wraps that need abscal
fc_offsets = rc.remove_degen_gains(fc_offsets)
fc_gains = {ant: np.reshape(np.exp(-2.0j * np.pi * freqs * delay - 1.0j * fc_offsets[ant]), (1, len(freqs)))
for ant, delay in fc_delays.items()}
for ant1, ant2, pol in d.keys():
d[(ant1, ant2, pol)] *= fc_gains[(ant1, split_pol(pol)[0])] * np.conj(fc_gains[(ant2, split_pol(pol)[1])])
for ant in gains.keys():
gains[ant] *= fc_gains[ant]
meta, g_fc = rc.firstcal(d, freqs, conv_crit=0)
np.testing.assert_array_almost_equal(np.linalg.norm([g_fc[ant] - gains[ant] for ant in g_fc]), 0, decimal=10) # much higher precision
def test_logcal(self):
NANTS = 18
antpos = linear_array(NANTS)
reds = om.get_reds(antpos, pols=['xx'], pol_mode='1pol')
info = om.RedundantCalibrator(reds)
gains, true_vis, d = sim_red_data(reds, gain_scatter=.05)
w = dict([(k, 1.) for k in d.keys()])
meta, sol = info.logcal(d)
for i in range(NANTS):
assert sol[(i, 'Jxx')].shape == (10, 10)
for bls in reds:
ubl = sol[bls[0]]
assert ubl.shape == (10, 10)
for bl in bls:
d_bl = d[bl]
mdl = sol[(bl[0], 'Jxx')] * sol[(bl[1], 'Jxx')].conj() * ubl
np.testing.assert_almost_equal(np.abs(d_bl), np.abs(mdl), decimal=10)
np.testing.assert_almost_equal(np.angle(d_bl * mdl.conj()), 0, decimal=10)
for k in d.keys():
d[k] = np.zeros_like(d[k])
with warnings.catch_warnings():
warnings.simplefilter("ignore")
meta, sol = info.logcal(d)
om.make_sol_finite(sol)
for red in reds:
np.testing.assert_array_equal(sol[red[0]], 0.0)
for ant in gains.keys():
np.testing.assert_array_equal(sol[ant], 1.0)
def test_omnical(self):
NANTS = 18
antpos = linear_array(NANTS)
reds = om.get_reds(antpos, pols=['xx'], pol_mode='1pol')
info = om.RedundantCalibrator(reds)
gains, true_vis, d = sim_red_data(reds, gain_scatter=.0099999)
w = dict([(k, 1.) for k in d.keys()])
sol0 = dict([(k, | np.ones_like(v) | numpy.ones_like |
import pickle
import numpy as np
import random
from sklearn.linear_model import LinearRegression
def alg():
random_normal = np.random.normal(0, 0.35, size=2000)
test = np.array(
[random_normal[i] for i in range(len(random_normal)) if -1.0 < random_normal[i] and random_normal[i] < 1.0])
grade = []
for i in range(len(test)):
if np.quantile(test, 0.1) < test[i] < np.quantile(test, 0.4):
grade.append(random.randint(50, 65))
if np.quantile(test, 0.4) < test[i] < np.quantile(test, 0.87):
grade.append(random.randint(65, 85))
if test[i] > np.quantile(test, 0.87):
grade.append(random.randint(85, 100))
if test[i] < np.quantile(test, 0.1):
grade.append(random.randint(0, 50))
grade = np.array(grade)
random_data = []
for _ in range(2000):
random_data.append([float(random.choice(grade)), float(random.choice(grade)), float(random.choice(grade))])
grade1 = []
for x in random_data:
grade1.append(float((x[0] * x[1] * x[2]) ** (1 / 3)))
x_train = np.array(random_data)
y_train = | np.array(grade1) | numpy.array |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.parameter import Parameter
from torch import Tensor
import numpy as np
from collections import OrderedDict
# https://discuss.pytorch.org/t/torch-round-gradient/28628/6
class Round_fn(torch.autograd.function.InplaceFunction):
@staticmethod
def forward(ctx, input):
ctx.input = input
return torch.round(input)
@staticmethod
def backward(ctx, grad_output):
grad_input = grad_output.clone()
return grad_input
def softmax_init(bits):
degree = 4
theta = (bits ** degree)/(bits ** degree).sum
return theta
"""
@inproceedings{
esser2020learned,
title={LEARNED STEP SIZE QUANTIZATION},
author={<NAME> and <NAME> and <NAME> and <NAME> and <NAME>},
booktitle={International Conference on Learning Representations},
year={2020},
url={https://openreview.net/forum?id=rkgO66VKDS}
}
"""
def grad_scale(x, scale):
yOut = x
yGrad = x * scale
return (yOut-yGrad).detach() + yGrad
class Q_ReLU(nn.Module):
def __init__(self, act_func=True, inplace=False):
super(Q_ReLU, self).__init__()
self.bits = Parameter(Tensor([32]))
self.act_func = act_func
self.inplace = inplace
self.a = Parameter(Tensor(1))
self.c = Parameter(Tensor(1))
def initialize(self, bits, offset, diff):
self.bits = Parameter(Tensor(bits), requires_grad=True)
self.a = Parameter(Tensor(len(self.bits)))
self.c = Parameter(Tensor(len(self.bits)))
self.a.data.fill_(np.log(np.exp(offset + diff)-1))
self.c.data.fill_(np.log(np.exp(offset + diff)-1))
def initialize_qonly(self, offset, diff):
self.a.data.fill_(np.log(np.exp(offset + diff)-1))
self.c.data.fill_(np.log(np.exp(offset + diff)-1))
def forward(self, x):
if self.act_func:
x = F.relu(x, self.inplace)
if len(self.bits)==1 and self.bits[0]==32:
return x
else:
a = F.softplus(self.a)
c = F.softplus(self.c)
nlvs = torch.pow(2, self.bits) # soft forward
#nlvs = torch.round(bits ** 2) # hard forward
x = F.hardtanh(x / a, 0, 1)
x_bar = Round_fn.apply(x.mul(nlvs-1)).div_(nlvs-1) * c
#x_bar = RoundQuant.apply(x, nlvs) * c
return x_bar
class Q_ReLU6(Q_ReLU):
def __init__(self, act_func=True, inplace=False):
super(Q_ReLU6, self).__init__(act_func, inplace)
def initialize(self, bits, offset, diff):
self.bits = Parameter(Tensor(bits), requires_grad=True)
self.n_lvs = 2 ** self.bits
self.a = Parameter(Tensor(len(self.bits)))
self.c = Parameter(Tensor(len(self.bits)))
if offset + diff > 6:
self.a.data.fill_(np.log(np.exp(6)-1))
self.c.data.fill_(np.log(np.exp(6)-1))
else:
self.a.data.fill_(np.log(np.exp(offset + diff)-1))
self.c.data.fill_(np.log(np.exp(offset + diff)-1))
#print("Q_ReLU6")
#print("self.bits", self.bits)
#print("self.a", self.a)
#print("self.c", self.c)
def initialize_qonly(self, offset, diff):
if offset + diff > 6:
self.a.data.fill_(np.log(np.exp(6)-1))
self.c.data.fill_(np.log(np.exp(6)-1))
else:
self.a.data.fill_(np.log(np.exp(offset + diff)-1))
self.c.data.fill_(np.log(np.exp(offset + diff)-1))
class Q_Sym(nn.Module):
def __init__(self):
super(Q_Sym, self).__init__()
self.bits = Parameter(Tensor([32]))
self.a = Parameter(Tensor(1))
self.c = Parameter(Tensor(1))
def initialize(self, bits, offset, diff):
self.bits = Parameter(Tensor(bits), requires_grad=True)
self.a = Parameter(Tensor(len(self.bits)))
self.c = Parameter(Tensor(len(self.bits)))
self.a.data.fill_(np.log(np.exp(offset + diff)-1))
self.c.data.fill_(np.log(np.exp(offset + diff)-1))
def initialize_qonly(self, offset, diff):
self.a.data.fill_(np.log(np.exp(offset + diff)-1))
self.c.data.fill_(np.log(np.exp(offset + diff)-1))
def forward(self, x):
if len(self.bits)==1 and self.bits[0]==32:
return x
else:
a = F.softplus(self.a)
c = F.softplus(self.c)
nlvs = torch.pow(2, self.bits)
x = F.hardtanh(x / a, -1, 1)
x_bar = Round_fn.apply(x.mul(nlvs/2-1)).div_(nlvs/2-1) * c
#x_bar = RoundQuant.apply(x, torch.round(nlvs / 2)) * c
return x_bar
################## didn't modify Q_HSwish #################
class Q_HSwish(nn.Module):
def __init__(self, act_func=True):
super(Q_HSwish, self).__init__()
self.n_lvs = [1]
self.bits = [32]
self.act_func = act_func
self.a = Parameter(Tensor(1))
self.b = 3/8
self.c = Parameter(Tensor(1))
self.d = -3/8
def initialize(self, n_lvs, offset, diff):
self.n_lvs = n_lvs
self.a.data.fill_(np.log(np.exp(offset + diff)-1))
self.c.data.fill_(np.log(np.exp(offset + diff)-1))
def forward(self, x):
if self.act_func:
x = x * (F.hardtanh(x + 3, 0, 6) / 6)
if len(self.bits)==1 and self.bits[0]==32:
return x
else:
a = F.softplus(self.a)
c = F.softplus(self.c)
x = x + self.b
x = F.hardtanh(x / a, 0, 1)
x = Round_fn.apply(x.mul(self.n_lvs-1)).div_(self.n_lvs) * c
#x = RoundQuant.apply(x, self.n_lvs) * c
x = x + self.d
return x
##########################################################
class Q_Conv2d(nn.Conv2d):
def __init__(self, *args, **kargs):
super(Q_Conv2d, self).__init__(*args, **kargs)
self.bits = Parameter(Tensor([32]))
self.a = Parameter(Tensor(1))
self.c = Parameter(Tensor(1))
self.weight_old = None
self.computation = 0
def initialize(self, bits):
self.bits = Parameter(Tensor(bits), requires_grad=True)
self.a = Parameter(Tensor(len(self.bits)))
self.c = Parameter(Tensor(len(self.bits)))
max_val = self.weight.data.abs().max().item()
self.a.data.fill_(np.log(np.exp(max_val * 0.9)-1))
self.c.data.fill_(np.log( | np.exp(max_val * 0.9) | numpy.exp |
import numpy as np
from scipy.sparse import csr_matrix
import myLOBPCG_new
###########################################################################
#
# Parameters and file path
#
###########################################################################
TJfile='../../TJdata/triples_30000.dat'
prefix='../../output/' # directory contains rowA.binary, colA.binary, valA.binary; and will save outputs
numTJ=30000 # number of triple junctions
lamb=1000 # hyperparameter for the strength of the regularization
sym='Cubic' # Cubic or Hex, it changes the gbdat file header
fn= prefix+'Cub.gbdat' # the name of output gbdat file
###########################################################################
#
# Define util functions
#
###########################################################################
def read_dat(datFile, numTJ):
"""
Input: triples.dat, wrote from the fortran program Torq_gen
size=[numTJ*8,]
In each group, the data is [TJ directon, EA1, GB1, EA2, GB2, EA3, GB3]
Output: TJs, direction of the triple junctions
size = [numTJ, 3]
EAs, the EA angles of the 3 grains at a TJ
size = [numTJ, 3, 3]
norms, normal direction of the 3 GB at a TJ
size = [numTJ, 3, 3]
"""
with open(datFile) as f:
tmp = [line.split() for line in f if line.strip()]
TJs = np.zeros((numTJ, 3))
EAs = np.zeros((numTJ, 3, 3))
norms = np.zeros((numTJ, 3, 3))
for i in range(numTJ):
TJs[i,:] = np.array(tmp[i*8 + 1]).astype(float)
EAs[i,0, :] = np.array(tmp[i*8 + 2]).astype(float)
norms[i,0, :] = np.array(tmp[i*8 + 3]).astype(float)
EAs[i, 1, :] = np.array(tmp[i*8 + 4]).astype(float)
norms[i, 1, :] = np.array(tmp[i*8 + 5]).astype(float)
EAs[i, 2, :] = np.array(tmp[i*8 + 6]).astype(float)
norms[i, 2, :] = np.array(tmp[i*8 + 7]).astype(float)
return (TJs, EAs, norms)
def EulerZXZ2Mat(e):
"""
Active Euler Angle (radian) in ZXZ convention to active rotation matrix, which means newV=M*oldV
"""
x=e[0]
y=e[1]
z=e[2]
s1=np.sin(x)
s2=np.sin(y)
s3=np.sin(z)
c1=np.cos(x)
c2=np.cos(y)
c3=np.cos(z)
m=np.array([[c1*c3-c2*s1*s3,-c1*s3-c3*c2*s1,s1*s2],
[s1*c3+c2*c1*s3,c1*c2*c3-s1*s3,-c1*s2],
[s3*s2,s2*c3,c2]])
return m
def EAtoG(EA):
"""
Input: a set of Euler Angle
size=[3,]
Output: the corresponding orientation matrix g
size = [3, 3]
"""
g = np.zeros((3,3))
EA = np.radians(EA)
g=EulerZXZ2Mat(EA).T
return g
###########################################################################
#
# Construct and solve the minimization problem to get GB energy
#
###########################################################################
(TJs, EAs, norms) = read_dat(TJfile, numTJ)
Norm= | np.empty((numTJ*3,3)) | numpy.empty |
import os
import numpy as np
import cv2
import torch
import torch.nn.functional as tnf
def compute_bpp(prob: torch.Tensor, num_pixels: int, batch_reduction='mean'):
""" bits per pixel
Args:
prob (torch.Tensor): probabilities
num_pixels (int): number of pixels
"""
assert isinstance(prob, torch.Tensor) and prob.dim() == 4
p_min = prob.detach().min()
if p_min < 0:
print(f'Error: prob: {prob.shape}, min={p_min} is less than 0.')
elif p_min == 0:
print(f'Warning: prob: {prob.shape}, min={p_min} equals to 0.')
elif torch.isnan(p_min):
num = torch.isnan(prob).sum()
print(f'Error: prob: {prob.shape}, {num} of it is nan.')
nB = prob.shape[0]
bpp = - torch.log2(prob).sum() / num_pixels
if batch_reduction == 'mean':
bpp = bpp / nB
elif batch_reduction == 'sum':
pass
else:
raise ValueError()
return bpp
def _gaussian(window_size, sigma):
gauss = torch.Tensor(
[np.exp(-(x - window_size//2)**2 / float(2*sigma**2)) for x in range(window_size)]
)
return gauss/gauss.sum()
def _create_window(window_size, sigma, channel):
_1D_window = _gaussian(window_size, sigma).unsqueeze(1)
_2D_window = _1D_window.mm(
_1D_window.t()).float().unsqueeze(0).unsqueeze(0)
window = _2D_window.expand(channel, 1, window_size, window_size).contiguous()
return window
class MS_SSIM(torch.nn.Module):
''' Adapted from https://github.com/lizhengwei1992/MS_SSIM_pytorch
'''
def __init__(self, max_val=1.0, reduction='mean'):
super().__init__()
self.channel = 3
self.max_val = max_val
self.weight = torch.Tensor([0.0448, 0.2856, 0.3001, 0.2363, 0.1333])
assert reduction in {'mean'}, 'Invalid reduction'
self.reduction = reduction
def _ssim(self, img1: torch.Tensor, img2: torch.Tensor):
_, c, w, h = img1.size()
window_size = min(w, h, 11)
sigma = 1.5 * window_size / 11
window = _create_window(window_size, sigma, self.channel)
window = window.to(device=img1.device, dtype=img1.dtype)
mu1 = tnf.conv2d(img1, window, padding=window_size //
2, groups=self.channel)
mu2 = tnf.conv2d(img2, window, padding=window_size //
2, groups=self.channel)
mu1_sq = mu1.pow(2)
mu2_sq = mu2.pow(2)
mu1_mu2 = mu1*mu2
sigma1_sq = tnf.conv2d(
img1*img1, window, padding=window_size//2, groups=self.channel) - mu1_sq
sigma2_sq = tnf.conv2d(
img2*img2, window, padding=window_size//2, groups=self.channel) - mu2_sq
sigma12 = tnf.conv2d(img1*img2, window, padding=window_size //
2, groups=self.channel) - mu1_mu2
C1 = (0.01*self.max_val)**2
C2 = (0.03*self.max_val)**2
V1 = 2.0 * sigma12 + C2
V2 = sigma1_sq + sigma2_sq + C2
ssim_map = ((2*mu1_mu2 + C1)*V1)/((mu1_sq + mu2_sq + C1)*V2)
mcs_map = V1 / V2
if self.reduction == 'mean':
ssim_map = ssim_map.mean()
mcs_map = mcs_map.mean()
return ssim_map, mcs_map
def forward(self, img1, img2):
assert img1.shape == img2.shape and img1.device == img2.device
self.weight = self.weight.to(device=img1.device)
levels = 5
if min(img1.shape[2:4]) < 2**levels:
return torch.zeros(1)
msssim = []
mcs = []
for i in range(levels):
ssim_map, mcs_map = self._ssim(img1, img2)
msssim.append(ssim_map)
mcs.append(mcs_map)
filtered_im1 = tnf.avg_pool2d(img1, kernel_size=2, stride=2)
filtered_im2 = tnf.avg_pool2d(img2, kernel_size=2, stride=2)
img1 = filtered_im1
img2 = filtered_im2
msssim = torch.stack(msssim)
mcs = torch.stack(mcs)
value = torch.prod(mcs[0:levels-1] ** self.weight[0:levels-1]) \
* (msssim[levels-1] ** self.weight[levels-1])
value: torch.Tensor
return value
@torch.no_grad()
def get_important_channels(model, topk=16):
from mycv.paths import IMPROC_DIR
device = next(model.parameters()).device
img_dir = IMPROC_DIR / 'kodak'
img_names = os.listdir(img_dir)
# img_names = img_names[:1]
entropy_sum = None
value_max = None
for imname in img_names:
impath = img_dir / imname
im = cv2.imread(str(impath))
im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
input_ = torch.from_numpy(im).permute(2,0,1).float() / 255.0
input_ = input_.unsqueeze(0)
input_ = input_.to(device=device)
ih, iw = input_.shape[2:4]
# encode
outputs = model(input_)
likelihoods = outputs['likelihoods']
p_main = likelihoods['y']
z = model.g_a(input_)
zhat = torch.round(z)
# rec = rec.clamp_(min=0, max=1)
# rec = (rec.squeeze(0).cpu().permute(1,2,0) * 255).to(dtype=torch.uint8).numpy()
# plt.imshow(rec); plt.show()
# entropys
assert p_main.shape[2:4] == (ih//16, iw//16)
Hs = -torch.log2(p_main)
Hs = Hs.sum(dim=3).sum(dim=2).squeeze(0).cpu()
# plt.figure(); plt.bar(np.arange(len(Hs)), Hs)
# plt.figure(); plt.bar(np.arange(len(Es)), Es); plt.show()
# max values within channels
assert zhat.shape[2:4] == (ih//16, iw//16)
zmax = torch.amax(zhat, dim=(2,3)).squeeze(0).cpu()
entropy_sum = entropy_sum + Hs if entropy_sum is not None else Hs
value_max = torch.maximum(value_max, zmax) if value_max is not None else zmax
entropy_mean = entropy_sum / len(img_names)
# plt.bar(np.arange(len(entropy_mean)), entropy_mean); plt.show()
entropys, indexs = torch.sort(entropy_mean, descending=True)
indexs = indexs[:topk]
debug = 1
# plt.bar(np.arange(len(entropys)), entropys)
# plt.xlabel('Rank')
# plt.ylabel('Entropy')
# plt.show()
# exit()
return indexs, value_max, zhat.shape[1]
@torch.no_grad()
def plot_response(model: torch.nn.Module, topk, save_path):
assert not model.training
device = next(model.parameters()).device
if hasattr(model, 'decoder'):
decode_func = model.decoder
elif hasattr(model, 'g_s'):
decode_func = model.g_s
else:
raise NotImplementedError()
channel_indices, value_max, nC = get_important_channels(model, topk)
images = []
for k, chi in enumerate(channel_indices):
# print(f'running channel {chi}')
x = torch.zeros(1, nC, 3, 3, device=device)
a = value_max[chi] * 1.2
x[0, chi, 1, 1] = a
rec1 = decode_func(x)
# x[0, chi, 1, 1] = -a
# rec2 = decode_func(x)
# rec = torch.cat([rec1, rec2], dim=2)
rec = rec1
rec = rec.clamp_(min=0, max=1)
rec = (rec.squeeze(0).cpu().permute(1,2,0) * 255).to(dtype=torch.uint8).numpy()
# assert rec.shape == (48,48,3)
h,w = rec.shape[0:2]
# rec[h//2,:,:] = 255
# resize images
rec = cv2.resize(rec, (w*2,h*2), interpolation=cv2.INTER_NEAREST)
rec = rec.copy()
# label the image
cv2.putText(rec, f'{k+1}', (4,20),
cv2.FONT_HERSHEY_SIMPLEX, 0.8, (255,255,255), 2)
# cv2.putText(rec, f'Rank {k}, channel {chi}', (0,16),
# cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255,255,255), 1)
# plt.imshow(rec); plt.show()
images.append(rec)
if len(images) == 8:
h,w = images[0].shape[0:2]
assert h == w
border = ( | np.ones((h//16, w, 3)) | numpy.ones |
from time import time
import numpy as np
from pandas import DataFrame, Series
from scipy.stats import truncnorm
from .constants import RAD2DEG, YEAR2DAY, FLUX_SCALE
import matplotlib.pyplot as plt
import logging
spot_contrast = 0.75
n_bins = 5 # number of area bins
delta_lnA = 0.5 # bin width in log-area
max_area = 100 # original area of largest bipoles (deg^2)
tau1 = 5 # first and last times (in days) for emergence of "correlated" regions
tau2 = 15 # why these numbers ??
prob = 0.001 # total probability for "correlation" # based on what ??
nlon = 36 # number of longitude bins
nlat = 16 # number of latitude bins
dcon = 2 * np.sinh(delta_lnA / 2) # constant from integration over area bin
fact = np.exp(
delta_lnA * | np.arange(n_bins) | numpy.arange |
"""The class which defines camera """
# function comment template
"""name
description
Args:
Returns:
"""
# class comment template
"""The class of
descriptor
Attributes:
"""
import cv2
import numpy as np
from tqdm import tqdm
import yaml
import logging
from ModelLoader import Loader
from ModelEvaluator import Evaluator
from ModelCalibrator import Calibrator
from ModelUtil.util import *
from ModelSet.settings import *
class Camera(object):
"""Basic class Camera
Base class.
Can be initialized by config file or settings.
Default as settings.
Attributes:
name: name of the camera
task: the usage of camera
Config: a dictionary can be used to set parameters
IntP: intrinsic matrix for camera [3x3]
fx: 1x1
fy: 1x1
cx: 1x1
cy: 1x1
ExtP: extrinsic matrix for cemera [R t]
R: Nx3x3
t: Nx3x1
N is the Nth R, t in the Nth calibration chess board
DisP: distortion parameters [k1 k2 p1 p2 k3]
Image: Images taken by this camera [NxHxW]
N - number of images
HxW - size
Loader: class-Loader for loading images & parameters
Calibrator: class-Calibrator
flag_calib: has been calibrated or not [bool]
Evaluator: class-Evaluator
Functions:
Calibrate camera
Undistort image
show the img
"""
def __init__(self,name):
"""name
descriptor
Args:
Returns:
"""
self.name = name
self.task = TASK
self.IntP = np.zeros((3,3))
self.fx = 0.0
self.fy = 0.0
self.cx = 0.0
self.cy = 0.0
self.IntError = 0.0
self.ExtP = np.zeros((3,4))
self.R = np.zeros((3,3))
self.t = np.zeros((3,1))
self.DisP = np.zeros((1,5))
self.Image = None
self.Image_num = 0
self.Loader = Loader()
self.img_path = ''
self.Calibrator = Calibrator(self.name)
self.chess_board_size = | np.array(CHESSBOARDSIZE) | numpy.array |
import numpy as np
import pickle
from videotime.layout import Layout
class TimeDetector:
'''Detects superimposed timestamps in videos.
The detector performs a trivial template match to determine
the digit class for each digit position in a given Layout.
Since overlay digits are usually rendered pixel exact, the
detector considers only a single image location per digit.
Extracting the time of a single still image usually takes around
80usecs, or equivalently 12500 images can be processed per second.
The detector is quite robust against Gaussian noise but cannot handle
images shifts very well. However, this usually does not pose a problem
as overlays are rendered exactly.
'''
def __init__(self, layout, weights):
'''Create detector from time layout and weights.'''
self.layout = layout
self.weights = weights
def detect(self, img, **kwargs):
'''Detect and parse time overlay.
Params
------
img : HxW, HxWxC
Image to be processed
Kwargs
------
verbose: bool, optional
Wheter or not to sho detection result.
Returns
-------
time : datetime object
Detected time
probability: scalar
Probability of detection [0..1]
'''
verbose = kwargs.pop('verbose', False)
img = np.asarray(img)
if img.ndim == 3:
img = img[..., 0]
# Extract letters from HH:MM:SS.TT
rois = np.array([img[ys, xs] for xs, ys in zip(self.layout.slicesx, self.layout.slicesy)])
# Scale/shift to [-1, 1]
srois = (rois / 255. - 0.5)*2
# Inner product of digit positions and weights to yield scores
scores = np.tensordot(srois, self.weights, axes=([1,2], [1,2]))
# Probabilities for each roi according to the alphabet (softmax)
def softmax(x):
# e ^ (x - max(x)) / sum(e^(x - max(x))
xn = x - x.max(axis=1, keepdims=True)
ex = np.exp(xn)
return ex / ex.sum(axis=1, keepdims=True)
# Use max probs for each digit position as detection result
probs = softmax(scores)
dtime = | np.argmax(probs, axis=1) | numpy.argmax |
'''
MIT License
Copyright (c) 2022 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
from datasets import LeonardoDataset, build_predicates, denormalize_rgb
from functools import partial
from matplotlib import pyplot as plt
from networks import EmbeddingNet, ReadoutNet
from tensorboardX import SummaryWriter
from train_utils import train_one_epoch, eval_one_epoch, train_ddp
import argparse
import json
import numpy as np
import os
import torch
import torch.distributed as dist
import torch.multiprocessing as mp
unary_pred = [
'on_surface(%s, left)', 'on_surface(%s, right)', 'on_surface(%s, far)',
'on_surface(%s, center)', 'has_obj(robot, %s)', 'top_is_clear(%s)',
'in_approach_region(robot, %s)'
]
binary_pred = ['stacked(%s, %s)', 'aligned_with(%s, %s)']
def step(use_gripper, data, model, head):
img, obj_patches, gripper, target = data
emb, attn = model(img, obj_patches)
if use_gripper:
emb = torch.cat(
[emb, gripper[:, None, None].expand(-1, emb.shape[1], -1)], dim=-1
)
logits = head(emb)
loss = torch.nn.functional.binary_cross_entropy_with_logits(logits, target)
return logits, loss
def calc_acc(data, logits, pred_types):
img, obj_patches, gripper, target = data
pred = (logits.detach() > 0).int()
acc = (pred == target.int()).sum(dim=0) / logits.shape[0] * 100
return acc
def log(
writer, global_step, split, epoch, idx, total,
batch_time, data_time, avg_loss, avg_acc, pred_types=None
):
print(
f'Epoch {(epoch+1):02d} {split.capitalize()} {idx:04d}/{total:04d} '
f'Batch time {batch_time:.3f} Data time {data_time:.3f} '
f'Loss {avg_loss.item():.4f} Accuracy {avg_acc.mean().item():.2f}'
)
acc = [a.mean() for a in avg_acc.split(list(pred_types.values()))]
writer.add_scalar(f'{split}/loss', avg_loss, global_step)
writer.add_scalar(f'{split}/accuracy', avg_acc.mean().item(), global_step)
for a, name in zip(acc, pred_types.keys()):
writer.add_scalar(f'{split}/accuracy_{name}', a.item(), global_step)
def plot(predicates, n_plot, data, logits):
img, obj_patches, gripper, target = data
patch_size = obj_patches.shape[-1]
img_with_obj = []
for i, o in zip(img[:n_plot], obj_patches[:n_plot]):
obj_panel = | np.full((patch_size+4, i.shape[2], 3), 255, dtype=np.uint8) | numpy.full |
import numpy as np
import sys
import time
from sklearn.model_selection import RepeatedKFold
from sklearn.model_selection import GroupKFold
from sklearn.base import BaseEstimator
from scipy.linalg import cholesky, solve_triangular
from sklearn.metrics.pairwise import euclidean_distances
from sklearn.decomposition import PCA
from ml_dft.kernel_functions import RBFKernel, MaternKernel
import os
import warnings
def get_alpha_add(n_basis, n_grid, delta, v):
alpha_add = np.pi * ((np.arange(n_basis / 2) / (n_grid * delta))**2 + v**2) / v
alpha_add = np.repeat(alpha_add, 2)
return alpha_add
class MultivariateGaussianProcessCV(BaseEstimator):
def __init__(self, krr_param_grid=None, cv_type=None, cv_nfolds=5, cv_groups=None,
cv_shuffles=1, n_components=None, single_combo=True,
verbose=0, copy_X=True, v=None, n_basis=None, n_grid=None, delta=None,
id=1, cleanup=True, kernel=None, squared_dist=False, kernel_params=None,
delta_learning=False, mae=False, replace_fit=True):
self.krr_param_grid = krr_param_grid
self.verbose = verbose
self.cv_nfolds = cv_nfolds
self.cv_type = cv_type
self.cv_groups = cv_groups
self.cv_shuffles = cv_shuffles
self.n_components = n_components
self.single_combo = single_combo
self.copy_X = copy_X
self.n_grid = n_grid
self.delta = delta
self.n_basis = n_basis
self.id = id
self.cleanup = cleanup
self.kernel = kernel
self.squared_dist = squared_dist
self.device = None
self.replace_fit = replace_fit
self.delta_learning = delta_learning
self.mae = mae
if self.kernel is None:
self.kernel = RBFKernel()
elif self.kernel == 'rbf':
self.kernel = RBFKernel(**kernel_params)
elif self.kernel == 'matern':
self.kernel = MaternKernel(**kernel_params)
if 'v' in self.krr_param_grid is not None and not single_combo:
raise ValueError('Can only add to alpha if single_combo=True')
def score(self, y_true, y_pred):
return np.mean((y_true - y_pred) ** 2)
def fit(self, X, y, labels=None, dist=None, importance_weights=None, cv_indices=None,
dist_savename=None):
t = time.time()
if y.ndim < 2:
y = y.reshape(-1, 1)
if self.n_components is not None:
if self.verbose > 0:
elapsed = time.time() - t
print('PCA [%dmin %dsec]' % (int(elapsed / 60),
int(elapsed % 60)))
sys.stdout.flush()
self.pca = PCA(n_components=self.n_components, svd_solver='arpack')
y_ = self.pca.fit_transform(y)
if self.verbose > 0:
print('Lost %.1f%% information ' % (self.pca.noise_variance_) +
'[%dmin %dsec]' % (int(elapsed / 60), int(elapsed % 60)))
elapsed = time.time() - t
else:
y_ = y
if labels is not None:
raise RuntimeError('Not implemented.')
if cv_indices is None:
cv_indices = np.arange(X.shape[0])
if self.cv_type is None:
kfold = RepeatedKFold(n_splits=self.cv_nfolds, n_repeats=self.cv_shuffles)
cv_folds = kfold.split(X[cv_indices])
n_cv_folds = kfold.get_n_splits()
elif self.cv_type == 'iter':
cv_folds = self.cv_groups
n_cv_folds = len(self.cv_groups)
elif self.cv_type == 'group':
groups = self.cv_groups
if self.cv_nfolds is None:
self.cv_nfolds = len(np.unique(groups))
kfold = GroupKFold(n_splits=self.cv_nfolds)
cv_folds = kfold.split(X[cv_indices], y[cv_indices], groups)
n_cv_folds = kfold.get_n_splits()
else:
raise Exception('Cross-validation type not supported')
add_train_inds = np.setdiff1d(np.arange(X.shape[0]), cv_indices)
cv_folds = list(cv_folds)
cv_folds = [( | np.concatenate((train_fold, add_train_inds)) | numpy.concatenate |
# Copyright (c) 2020, <NAME>, Honda Research Institute Europe GmbH, and
# Technical University of Darmstadt.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of <NAME>, Honda Research Institute Europe GmbH,
# or Technical University of Darmstadt, nor the names of its contributors may
# be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL <NAME>, HONDA RESEARCH INSTITUTE EUROPE GMBH,
# OR TECHNICAL UNIVERSITY OF DARMSTADT BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
# IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import os.path as osp
from functools import partial
from typing import Callable, Sequence
import numpy as np
import rcsenv
from init_args_serializer import Serializable
from pyrado.environments.rcspysim.base import RcsSim
from pyrado.tasks.base import Task
from pyrado.tasks.desired_state import DesStateTask
from pyrado.tasks.final_reward import FinalRewMode, FinalRewTask
from pyrado.tasks.masked import MaskedTask
from pyrado.tasks.parallel import ParallelTasks
from pyrado.tasks.predefined import create_task_space_discrepancy_task
from pyrado.tasks.reward_functions import AbsErrRewFcn, ExpQuadrErrRewFcn, RewFcn
from pyrado.tasks.utils import proximity_succeeded
from pyrado.utils.data_types import EnvSpec
rcsenv.addResourcePath(rcsenv.RCSPYSIM_CONFIG_PATH)
rcsenv.addResourcePath(osp.join(rcsenv.RCSPYSIM_CONFIG_PATH, "PlanarInsert"))
def create_insert_task(env_spec: EnvSpec, state_des: np.ndarray, rew_fcn: RewFcn, success_fcn: Callable):
# Define the indices for selection. This needs to match the observations' names in RcsPySim.
idcs = ["Effector_X", "Effector_Z", "Effector_B", "Effector_Xd", "Effector_Zd", "Effector_Bd"]
# Get the masked environment specification
spec = EnvSpec(
env_spec.obs_space, env_spec.act_space, env_spec.state_space.subspace(env_spec.state_space.create_mask(idcs))
)
# Create a wrapped desired state task with the goal behind the wall
fdst = FinalRewTask(
DesStateTask(spec, state_des, rew_fcn, success_fcn),
mode=FinalRewMode(state_dependent=True, time_dependent=True),
)
# Mask selected states
return MaskedTask(env_spec, fdst, idcs)
class PlanarInsertSim(RcsSim, Serializable):
"""
Planar 5- or 6-link robot environment where the task is to push the wedge-shaped end-effector through a small gap
"""
def __init__(self, task_args: dict, collision_config: dict = None, max_dist_force: float = None, **kwargs):
"""
Constructor
.. note::
This constructor should only be called via the subclasses.
:param task_args: arguments for the task construction
:param max_dist_force: maximum disturbance force, pass `None` for no disturbance
:param kwargs: keyword arguments forwarded to `RcsSim`
collisionConfig: specification of the Rcs CollisionModel
"""
Serializable._init(self, locals())
# Forward to RcsSim's constructor
RcsSim.__init__(
self,
envType="PlanarInsert",
task_args=task_args,
physicsConfigFile="pPlanarInsert.xml",
collisionConfig=collision_config,
**kwargs,
)
if kwargs.get("collisionConfig", None) is None:
collision_config = {
"pairs": [
{"body1": "Effector", "body2": "Link3"},
{"body1": "Effector", "body2": "Link2"},
{"body1": "UpperWall", "body2": "Link4"},
{"body1": "LowerWall", "body2": "Link4"},
{"body1": "LowerWall", "body2": "Link3"},
{"body1": "LowerWall", "body2": "Link2"},
],
"threshold": 0.05,
}
else:
collision_config = kwargs.get("collisionConfig")
# Setup disturbance
self._max_dist_force = max_dist_force
def _create_task(self, task_args: dict) -> Task:
# Define the task including the reward function
state_des = task_args.get("state_des", None)
if state_des is None:
# Get the goal position in world coordinates
p = self.get_body_position("Goal", "", "")
state_des = np.array([p[0], p[2], 0, 0, 0, 0]) # X, Z, B, Xd, Zd, Bd
# Create the individual subtasks
task_reach_goal = create_insert_task(
self.spec,
state_des,
rew_fcn=ExpQuadrErrRewFcn(
Q=np.diag([2e1, 2e1, 1e-1, 1e-2, 1e-2, 1e-2]), R=2e-2 * np.eye(self.act_space.flat_dim)
),
success_fcn=partial(proximity_succeeded, thold_dist=0.07, dims=[0, 1, 2]), # position and angle
)
task_ts_discrepancy = create_task_space_discrepancy_task(
self.spec, AbsErrRewFcn(q=0.1 * np.ones(2), r=np.zeros(self.act_space.shape))
)
return ParallelTasks([task_reach_goal, task_ts_discrepancy])
@classmethod
def get_nominal_domain_param(cls):
return dict(
link1_mass=2.0,
link2_mass=2.0,
link3_mass=2.0,
link4_mass=2.0,
upperwall_pos_offset_x=0.0,
upperwall_friction_coefficient=0.5,
effector_friction_coefficient=0.7,
)
def _disturbance_generator(self) -> (np.ndarray, None):
if self._max_dist_force is None:
return None
# Sample angle and force uniformly
angle = np.random.uniform(-np.pi, np.pi)
force = np.random.uniform(0, self._max_dist_force)
return np.array([force * | np.sin(angle) | numpy.sin |
import warnings
import numpy as np
import pytest
import shapely
from shapely.testing import assert_geometries_equal
from .common import all_types
from .common import empty as empty_geometry_collection
from .common import (
empty_line_string,
empty_line_string_z,
empty_point,
empty_point_z,
empty_polygon,
geometry_collection,
geometry_collection_z,
line_string,
line_string_nan,
line_string_z,
linear_ring,
multi_line_string,
multi_line_string_z,
multi_point,
multi_point_z,
multi_polygon,
multi_polygon_z,
point,
point_z,
polygon,
polygon_with_hole,
polygon_with_hole_z,
polygon_z,
shapely20_todo,
)
def test_get_num_points():
actual = shapely.get_num_points(all_types + (None,)).tolist()
assert actual == [0, 3, 5, 0, 0, 0, 0, 0, 0, 0]
def test_get_num_interior_rings():
actual = shapely.get_num_interior_rings(all_types + (polygon_with_hole, None))
assert actual.tolist() == [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0]
def test_get_num_geometries():
actual = shapely.get_num_geometries(all_types + (None,)).tolist()
assert actual == [1, 1, 1, 1, 2, 1, 2, 2, 0, 0]
@pytest.mark.parametrize(
"geom",
[
point,
polygon,
multi_point,
multi_line_string,
multi_polygon,
geometry_collection,
],
)
def test_get_point_non_linestring(geom):
actual = shapely.get_point(geom, [0, 2, -1])
assert shapely.is_missing(actual).all()
@pytest.mark.parametrize("geom", [line_string, linear_ring])
def test_get_point(geom):
n = shapely.get_num_points(geom)
actual = shapely.get_point(geom, [0, -n, n, -(n + 1)])
assert_geometries_equal(actual[0], actual[1])
assert shapely.is_missing(actual[2:4]).all()
@pytest.mark.parametrize(
"geom",
[
point,
line_string,
linear_ring,
multi_point,
multi_line_string,
multi_polygon,
geometry_collection,
],
)
def test_get_exterior_ring_non_polygon(geom):
actual = shapely.get_exterior_ring(geom)
assert shapely.is_missing(actual).all()
def test_get_exterior_ring():
actual = shapely.get_exterior_ring([polygon, polygon_with_hole])
assert (shapely.get_type_id(actual) == 2).all()
@pytest.mark.parametrize(
"geom",
[
point,
line_string,
linear_ring,
multi_point,
multi_line_string,
multi_polygon,
geometry_collection,
],
)
def test_get_interior_ring_non_polygon(geom):
actual = shapely.get_interior_ring(geom, [0, 2, -1])
assert shapely.is_missing(actual).all()
def test_get_interior_ring():
actual = shapely.get_interior_ring(polygon_with_hole, [0, -1, 1, -2])
assert_geometries_equal(actual[0], actual[1])
assert shapely.is_missing(actual[2:4]).all()
@pytest.mark.parametrize("geom", [point, line_string, linear_ring, polygon])
def test_get_geometry_simple(geom):
actual = shapely.get_geometry(geom, [0, -1, 1, -2])
assert_geometries_equal(actual[0], actual[1])
assert shapely.is_missing(actual[2:4]).all()
@pytest.mark.parametrize(
"geom", [multi_point, multi_line_string, multi_polygon, geometry_collection]
)
def test_get_geometry_collection(geom):
n = shapely.get_num_geometries(geom)
actual = shapely.get_geometry(geom, [0, -n, n, -(n + 1)])
assert_geometries_equal(actual[0], actual[1])
assert shapely.is_missing(actual[2:4]).all()
def test_get_type_id():
actual = shapely.get_type_id(all_types).tolist()
assert actual == [0, 1, 2, 3, 4, 5, 6, 7, 7]
def test_get_dimensions():
actual = shapely.get_dimensions(all_types).tolist()
assert actual == [0, 1, 1, 2, 0, 1, 2, 1, -1]
def test_get_coordinate_dimension():
actual = shapely.get_coordinate_dimension([point, point_z, None]).tolist()
assert actual == [2, 3, -1]
def test_get_num_coordinates():
actual = shapely.get_num_coordinates(all_types + (None,)).tolist()
assert actual == [1, 3, 5, 5, 2, 2, 10, 3, 0, 0]
def test_get_srid():
"""All geometry types have no SRID by default; None returns -1"""
actual = shapely.get_srid(all_types + (None,)).tolist()
assert actual == [0, 0, 0, 0, 0, 0, 0, 0, 0, -1]
def test_get_set_srid():
actual = shapely.set_srid(point, 4326)
assert shapely.get_srid(point) == 0
assert shapely.get_srid(actual) == 4326
@pytest.mark.parametrize(
"func",
[
shapely.get_x,
shapely.get_y,
pytest.param(
shapely.get_z,
marks=pytest.mark.skipif(
shapely.geos_version < (3, 7, 0), reason="GEOS < 3.7"
),
),
],
)
@pytest.mark.parametrize("geom", all_types[1:])
def test_get_xyz_no_point(func, geom):
assert np.isnan(func(geom))
def test_get_x():
assert shapely.get_x([point, point_z]).tolist() == [2.0, 2.0]
def test_get_y():
assert shapely.get_y([point, point_z]).tolist() == [3.0, 3.0]
@pytest.mark.skipif(shapely.geos_version < (3, 7, 0), reason="GEOS < 3.7")
def test_get_z():
assert shapely.get_z([point_z]).tolist() == [4.0]
@pytest.mark.skipif(shapely.geos_version < (3, 7, 0), reason="GEOS < 3.7")
def test_get_z_2d():
assert np.isnan(shapely.get_z(point))
@pytest.mark.parametrize("geom", all_types)
def test_new_from_wkt(geom):
actual = shapely.Geometry(str(geom))
assert_geometries_equal(actual, geom)
# TODO(shapely-2.0) Python 3.10 build triggers warnings as errors, and this still
# raise the deprecation warning (which should be removed)
@shapely20_todo
def test_adapt_ptr_raises():
point = shapely.Geometry("POINT (2 2)")
with pytest.raises(AttributeError):
point._ptr += 1
@pytest.mark.parametrize(
"geom", all_types + (shapely.points(np.nan, np.nan), empty_point)
)
def test_hash_same_equal(geom):
assert hash(geom) == hash(shapely.apply(geom, lambda x: x))
@pytest.mark.parametrize("geom", all_types[:-1])
def test_hash_same_not_equal(geom):
assert hash(geom) != hash(shapely.apply(geom, lambda x: x + 1))
@pytest.mark.parametrize("geom", all_types)
def test_eq(geom):
assert geom == shapely.apply(geom, lambda x: x)
@pytest.mark.parametrize("geom", all_types[:-1])
def test_neq(geom):
assert geom != shapely.apply(geom, lambda x: x + 1)
@pytest.mark.parametrize("geom", all_types)
def test_set_unique(geom):
a = {geom, shapely.apply(geom, lambda x: x)}
assert len(a) == 1
def test_eq_nan():
assert line_string_nan != line_string_nan
def test_neq_nan():
assert not (line_string_nan == line_string_nan)
def test_set_nan():
# As NaN != NaN, you can have multiple "NaN" points in a set
# set([float("nan"), float("nan")]) also returns a set with 2 elements
a = set(shapely.linestrings([[[np.nan, np.nan], [np.nan, np.nan]]] * 10))
assert len(a) == 10 # different objects: NaN != NaN
def test_set_nan_same_objects():
# You can't put identical objects in a set.
# x = float("nan"); set([x, x]) also retuns a set with 1 element
a = set([line_string_nan] * 10)
assert len(a) == 1
@pytest.mark.parametrize(
"geom",
[
point,
multi_point,
line_string,
multi_line_string,
polygon,
multi_polygon,
geometry_collection,
empty_point,
empty_line_string,
empty_polygon,
empty_geometry_collection,
np.array([None]),
np.empty_like(np.array([None])),
],
)
def test_get_parts(geom):
expected_num_parts = shapely.get_num_geometries(geom)
if expected_num_parts == 0:
expected_parts = []
else:
expected_parts = shapely.get_geometry(geom, range(0, expected_num_parts))
parts = shapely.get_parts(geom)
assert len(parts) == expected_num_parts
assert_geometries_equal(parts, expected_parts)
def test_get_parts_array():
# note: this also verifies that None is handled correctly
# in the mix; internally it returns -1 for count of geometries
geom = np.array([None, empty_line_string, multi_point, point, multi_polygon])
expected_parts = []
for g in geom:
for i in range(0, shapely.get_num_geometries(g)):
expected_parts.append(shapely.get_geometry(g, i))
parts = shapely.get_parts(geom)
assert len(parts) == len(expected_parts)
assert_geometries_equal(parts, expected_parts)
def test_get_parts_geometry_collection_multi():
"""On the first pass, the individual Multi* geometry objects are returned
from the collection. On the second pass, the individual singular geometry
objects within those are returned.
"""
geom = shapely.geometrycollections([multi_point, multi_line_string, multi_polygon])
expected_num_parts = shapely.get_num_geometries(geom)
expected_parts = shapely.get_geometry(geom, range(0, expected_num_parts))
parts = shapely.get_parts(geom)
assert len(parts) == expected_num_parts
assert_geometries_equal(parts, expected_parts)
expected_subparts = []
for g in np.asarray(expected_parts):
for i in range(0, shapely.get_num_geometries(g)):
expected_subparts.append(shapely.get_geometry(g, i))
subparts = shapely.get_parts(parts)
assert len(subparts) == len(expected_subparts)
assert_geometries_equal(subparts, expected_subparts)
def test_get_parts_return_index():
geom = | np.array([multi_point, point, multi_polygon]) | numpy.array |
# This software is open source software available under the BSD-3 license.
#
# Copyright (c) 2020 Triad National Security, LLC. All rights reserved.
# Copyright (c) 2020 Lawrence Livermore National Security, LLC. All rights
# reserved.
# Copyright (c) 2020 UT-Battelle, LLC. All rights reserved.
#
# Additional copyright and license information can be found in the LICENSE file
# distributed with this code, or at
# https://raw.githubusercontent.com/MPAS-Dev/MPAS-Analysis/master/LICENSE
from __future__ import absolute_import, division, print_function, \
unicode_literals
import numpy
import xarray as xr
import os
from collections import OrderedDict
from pyremap import PointCollectionDescriptor
from mpas_analysis.shared.climatology import RemapMpasClimatologySubtask
from mpas_analysis.shared.io.utility import build_config_full_path, \
make_directories
from mpas_analysis.shared.io import write_netcdf
from mpas_analysis.ocean.utility import compute_zmid
from mpas_analysis.shared.interpolation import interp_1d
class ComputeTransectsSubtask(RemapMpasClimatologySubtask): # {{{
"""
A subtask for remapping climatologies to transect points
Attributes
----------
obsDatasets : TransectsObservations
A dictionary of observational datasets
verticalComparisonGridName : {'obs', 'mpas'} or any str
The vertical grid name on which to compare MPAS data with
observations. 'obs' indicates the locations of the original
observations; 'mpas' is the vertical locations of MPAS points,
remapped to the observation latitude/longitude. If any other,
string, verticalComparisonGrid should be a 1D numpy array and this
name should be a useful (and unique) description of that grid.
verticalComparisonGrid : 1D numpy array
The vertical grid on which to compare MPAS data with observations
if ``verticalComparisonGridName`` is not 'obs' or 'mpas'. The
values should be elevations (in m, typically negative).
transectNumber : ``xarray.DataArray``
For each point in the point collection after remapping, the index of
the transect it belongs to (so that remapped results can be separated
back into individual transects for plotting)
transectCollectionName : str
A name that describes the collection of transects (e.g. the name
of the collection of observations) used to name the
destination "mesh" for regridding
collectionDescriptor : ``PointCollectionDescriptor``
The mesh descriptor for the collection of all points in all transects,
used for remapping
zMid : ``xarray.DataArray``
Vertical coordinate at the center of layers, used to interpolate to
reference depths
"""
# Authors
# -------
# <NAME>
def __init__(self, mpasClimatologyTask, parentTask, climatologyName,
transectCollectionName, variableList, seasons, obsDatasets,
verticalComparisonGridName='obs', verticalComparisonGrid=None,
subtaskName='remapTransects'):
# {{{
'''
Construct the analysis task and adds it as a subtask of the
``parentTask``.
Parameters
----------
mpasClimatologyTask : ``MpasClimatologyTask``
The task that produced a climatology to be remapped and plotted
as a transect
parentTask : ``AnalysisTask``
The parent task, used to get the ``taskName``, ``config`` and
``componentName``
climatologyName : str
A name that describes the climatology (e.g. a short version of
the important field(s) in the climatology) used to name the
subdirectories for each stage of the climatology
transectCollectionName : str
A name that describes the collection of transects (e.g. the name
of the collection of observations) used to name the
destination "mesh" for regridding
variableList : list of str
A list of variable names in ``timeSeriesStatsMonthly`` to be
included in the climatologies
seasons : list of str
A list of seasons (keys in ``shared.constants.monthDictionary``)
to be computed or ['none'] (not ``None``) if only monthly
climatologies are needed.
obsDatasets : TransectsObservations
A dictionary of observational datasets
verticalComparisonGridName : {'obs', 'mpas'} or any str, optional
The vertical grid name on which to compare MPAS data with
observations. 'obs' indicates the locations of the original
observations; 'mpas' is the vertical locations of MPAS points,
remapped to the observation latitude/longitude. If any other,
string, verticalComparisonGrid should be a 1D numpy array and this
name should be a useful (and unique) description of that grid.
verticalComparisonGrid : 1D numpy array, optional
The vertical grid on which to compare MPAS data with observations
if ``verticalComparisonGridName`` is not 'obs' or 'mpas'. The
values should be elevations (in m, typically negative).
subtaskName : str, optional
The name of the subtask
'''
# Authors
# -------
# <NAME>
# call the constructor from the base class
# (RemapMpasClimatologySubtask)
super(ComputeTransectsSubtask, self).__init__(
mpasClimatologyTask, parentTask,
climatologyName=climatologyName, variableList=variableList,
seasons=seasons, subtaskName=subtaskName)
self.obsDatasets = obsDatasets
self.transectCollectionName = transectCollectionName
self.verticalComparisonGridName = verticalComparisonGridName
self.verticalComparisonGrid = verticalComparisonGrid
# }}}
def setup_and_check(self): # {{{
'''
Creates a PointCollectionDescriptor describing all the points in the
transects to remap to. Keeps track of which transects index each point
belongs to.
Raises
------
IOError :
If a restart file is not available from which to read mesh
information or if no history files are available from which to
compute the climatology in the desired time range.
'''
# Authors
# -------
# <NAME>
transectNumber = []
lats = []
lons = []
x = []
obsDatasets = self.obsDatasets.get_observations()
datasets = list(obsDatasets.values())
for transectIndex, ds in enumerate(datasets):
localLats = list(ds.lat.values)
localLons = list(ds.lon.values)
localX = list(ds.x.values)
localIndices = [transectIndex for lat in localLats]
lats.extend(localLats)
lons.extend(localLons)
x.extend(localX)
transectNumber.extend(localIndices)
self.transectNumber = xr.DataArray.from_dict(
{'dims': ('nPoints'),
'data': transectNumber})
self.x = xr.DataArray.from_dict(
{'dims': ('nPoints'),
'data': x})
self.collectionDescriptor = PointCollectionDescriptor(
lats, lons, collectionName=self.transectCollectionName,
units='degrees', outDimension='nPoints')
self.add_comparison_grid_descriptor(self.transectCollectionName,
self.collectionDescriptor)
# then, call setup_and_check from the base class
# (RemapMpasClimatologySubtask)
super(ComputeTransectsSubtask, self).setup_and_check()
for transectName in obsDatasets:
obsDatasets[transectName].close()
def run_task(self): # {{{
'''
Compute climatologies of melt rates from E3SM/MPAS output
This function has been overridden to compute ``zMid`` based on data
from a restart file for later use in vertically interpolating to
reference depths.
'''
# Authors
# -------
# <NAME>
# first, compute zMid and cell mask from the restart file
with xr.open_dataset(self.restartFileName) as ds:
ds = ds[['maxLevelCell', 'bottomDepth', 'layerThickness']]
ds = ds.isel(Time=0)
self.maxLevelCell = ds.maxLevelCell - 1
zMid = compute_zmid(ds.bottomDepth, ds.maxLevelCell,
ds.layerThickness)
self.zMid = \
xr.DataArray.from_dict({'dims': ('nCells', 'nVertLevels'),
'data': zMid})
ds.close()
# then, call run from the base class (RemapMpasClimatologySubtask),
# which will perform the horizontal remapping
super(ComputeTransectsSubtask, self).run_task()
obsDatasets = self.obsDatasets.get_observations()
self.logger.info('Interpolating each transect vertically...')
# finally, vertically interpolate and write out each transect
for season in self.seasons:
remappedFileName = self.get_remapped_file_name(
season, comparisonGridName=self.transectCollectionName)
with xr.open_dataset(remappedFileName) as ds:
transectNames = list(obsDatasets.keys())
for transectIndex, transectName in enumerate(transectNames):
self.logger.info(' {}'.format(transectName))
dsObs = obsDatasets[transectName]
outFileName = self.get_remapped_file_name(
season, comparisonGridName=transectName)
outObsFileName = self.obsDatasets.get_out_file_name(
transectName, self.verticalComparisonGridName)
self._vertical_interp(ds, transectIndex, dsObs,
outFileName, outObsFileName)
ds.close()
for transectName in obsDatasets:
obsDatasets[transectName].close()
# }}}
def customize_masked_climatology(self, climatology, season): # {{{
'''
Add zMid to the climatologys
Parameters
----------
climatology : ``xarray.Dataset`` object
the climatology data set
season : str
The name of the season to be masked
Returns
-------
climatology : ``xarray.Dataset`` object
the modified climatology data set
'''
# Authors
# -------
# <NAME>
zIndex = xr.DataArray.from_dict(
{'dims': ('nVertLevels',),
'data': numpy.arange(climatology.sizes['nVertLevels'])})
cellMask = zIndex < self.maxLevelCell
for variableName in self.variableList:
climatology[variableName] = \
climatology[variableName].where(cellMask)
climatology['zMid'] = self.zMid
climatology = climatology.transpose('nVertLevels', 'nCells')
return climatology # }}}
def customize_remapped_climatology(self, climatology, comparisonGridNames,
season): # {{{
'''
Add the transect index to the data set
Parameters
----------
climatology : ``xarray.Dataset```
The MPAS climatology data set that has been remapped
comparisonGridNames : {'latlon', 'antarctic'}
The name of the comparison grid to use for remapping.
season : str
The name of the season to be masked
Returns
-------
climatology : ``xarray.Dataset```
The same data set with any custom fields added or modifications
made
'''
# Authors
# -------
# <NAME>
climatology['transectNumber'] = self.transectNumber
climatology['x'] = self.x
if 'nCells' in climatology.dims:
climatology = climatology.rename({'nCells': 'nPoints'})
dims = ['nPoints', 'nVertLevels']
if 'nv' in climatology.dims:
dims.append('nv')
climatology = climatology.transpose(*dims)
return climatology # }}}
def _vertical_interp(self, ds, transectIndex, dsObs, outFileName,
outObsFileName):
'''
Vertically interpolate a transect and write it to a unique file
Parameters
----------
ds : ``xarray.Dataset``
The data set containing all transects before vertical interpolation
transectIndex : int
The index of the transect to extract
dsObs : ``xarray.Dataset``
The obs dataset used if verticalComparisonGridName is 'obs'
outFileName : str
The name of the file to which the resulting data set should be
written
outObsFileName : str
The name of the file to which the resulting obs data set should be
written if it is interpolated
'''
# Authors
# -------
# <NAME>
if os.path.exists(outFileName):
return
ds = ds.where(ds.transectNumber == transectIndex, drop=True)
if self.verticalComparisonGridName == 'mpas':
z = ds.zMid
z = z.rename({'nVertLevels': 'nzOut'})
elif self.verticalComparisonGridName == 'obs':
z = dsObs.z
z = z.rename({'nz': 'nzOut'})
else:
# a defined vertical grid
z = (('nzOut', ), self.verticalComparisonGrid)
if self.verticalComparisonGridName == 'mpas':
ds = ds.rename({'zMid': 'z', 'nVertLevels': 'nz'})
else:
ds['z'] = z
# remap each variable
ds = interp_1d(ds, inInterpDim='nVertLevels', inInterpCoord='zMid',
outInterpDim='nzOut', outInterpCoord='z')
ds = ds.rename({'nzOut': 'nz'})
if self.verticalComparisonGridName != 'obs' and 'nz' in dsObs.dims:
dsObs['zOut'] = z
# remap each variable
dsObs = interp_1d(dsObs, inInterpDim='nz', inInterpCoord='z',
outInterpDim='nzOut', outInterpCoord='zOut')
dsObs = dsObs.rename({'nzOut': 'nz'})
write_netcdf(dsObs, outObsFileName)
ds = ds.drop_vars(['validMask', 'transectNumber'])
write_netcdf(ds, outFileName) # }}}
# }}}
class TransectsObservations(object): # {{{
"""
A class for loading and manipulating transect observations
Attributes
----------
config : ``MpasAnalysisConfigParser``
Configuration options
obsFileNames : OrderedDict
The names of transects and the file names of the corresponding
observations for a transect
horizontalResolution : str
'obs' for the obs as they are or a size in km if subdivision is
desired.
transectCollectionName : str
A name that describes the collection of transects (e.g. the name
of the collection of observations) used to name the
destination "mesh" for regridding
obsDatasets : OrderedDict
A dictionary of observational datasets
"""
# Authors
# -------
# <NAME>
def __init__(self, config, obsFileNames, horizontalResolution,
transectCollectionName): # {{{
'''
Construct the object, setting the observations dictionary to None.
Parameters
----------
config : ``MpasAnalysisConfigParser``
Configuration options
obsFileNames : OrderedDict
The names of transects and the file names of the corresponding
observations for a transect
horizontalResolution : str
'obs' for the obs as they are or a size in km if subdivision is
desired.
transectCollectionName : str
A name that describes the collection of transects (e.g. the name
of the collection of observations) used to name the
destination "mesh" for regridding
'''
# Authors
# -------
# <NAME>
self.obsDatasets = None
self.config = config
self.obsFileNames = obsFileNames
if horizontalResolution != 'obs':
horizontalResolution = float(horizontalResolution)
self.horizontalResolution = horizontalResolution
self.transectCollectionName = transectCollectionName
def get_observations(self):
# {{{
'''
Read in and set up the observations.
Returns
-------
obsDatasets : OrderedDict
The observational dataset
'''
# Authors
# -------
# <NAME>
obsDatasets = OrderedDict()
for name in self.obsFileNames:
outFileName = self.get_out_file_name(name)
if os.path.exists(outFileName):
dsObs = xr.open_dataset(outFileName)
dsObs.load()
else:
dsObs = self.build_observational_dataset(
self.obsFileNames[name], name)
dsObs.load()
# make sure lat and lon are coordinates
for coord in ['lon', 'lat']:
dsObs.coords[coord] = dsObs[coord]
if self.horizontalResolution == 'obs':
dsObs = self._add_distance(dsObs)
else:
dsObs = self._subdivide_observations(dsObs)
write_netcdf(dsObs, outFileName)
obsDatasets[name] = dsObs
return obsDatasets # }}}
def build_observational_dataset(self, fileName, transectName): # {{{
'''
read in the data sets for observations, and possibly rename some
variables and dimensions
Parameters
----------
fileName : str
observation file name
transectName : str
transect name
Returns
-------
dsObs : ``xarray.Dataset``
The observational dataset
'''
# Authors
# -------
# <NAME>-Davis
dsObs = xr.open_dataset(fileName)
# observations are expected to have horizontal dimension nPoints and
# vertical dimension nz, as well as horizontal coordinates lat and lon
# and vertical coordinate z. Override this function if these need to
# be renamed from the observations file.
return dsObs # }}}
def get_out_file_name(self, transectName,
verticalComparisonGridName='obs'): # {{{
'''
Given config options, the name of a field and a string identifying the
months in a seasonal climatology, returns the full path for MPAS
climatology files before and after remapping.
Parameters
----------
transectName : str
The name of the transect
verticalComparisonGridName : {'obs', 'mpas'} or any str, optional
The vertical grid name on which to compare MPAS data with
observations. 'obs' indicates the locations of the original
observations; 'mpas' is the vertical locations of MPAS points,
remapped to the observation latitude/longitude. If any other,
string, verticalComparisonGrid should be a 1D numpy array and this
name should be a useful (and unique) description of that grid.
Returns
-------
fileName : str
The path to the climatology file for the specified season.
'''
# Authors
# -------
# <NAME>
config = self.config
remappedDirectory = build_config_full_path(
config=config, section='output',
relativePathOption='remappedClimSubdirectory',
relativePathSection='oceanObservations')
make_directories(remappedDirectory)
if verticalComparisonGridName == 'obs':
fileName = '{}/{}_{}.nc'.format(
remappedDirectory, self.transectCollectionName, transectName)
else:
fileName = '{}/{}_{}_{}.nc'.format(
remappedDirectory, self.transectCollectionName, transectName,
verticalComparisonGridName)
return fileName # }}}
def _add_distance(self, dsObs): # {{{
'''
Subdivide each segment of the transect so the horizontal resolution
approximately matches the requested resolution
'''
lat = dsObs.lat.values
lon = dsObs.lon.values
# compute the great circle distance between these points
dxIn = self._haversine(lon[0:-1], lat[0:-1], lon[1:], lat[1:])
xIn = numpy.zeros(lat.shape)
xIn[1:] = numpy.cumsum(dxIn)
dsObs['x'] = (('nPoints',), xIn)
return dsObs # }}}
def _subdivide_observations(self, dsObs): # {{{
'''
Subdivide each segment of the transect so the horizontal resolution
approximately matches the requested resolution
'''
lat = dsObs.lat.values
lon = dsObs.lon.values
# compute the great circle distance between these points
dxIn = self._haversine(lon[0:-1], lat[0:-1], lon[1:], lat[1:])
nSegments = numpy.maximum(
(dxIn / self.horizontalResolution + 0.5).astype(int), 1)
xIn = numpy.zeros(lat.shape)
xIn[1:] = numpy.cumsum(dxIn)
outIndex = []
for index in range(len(xIn) - 1):
n = nSegments[index]
outIndex.extend(index + numpy.arange(0, n) / n)
outIndex.append(len(xIn) - 1)
xOut = numpy.interp(outIndex, numpy.arange(len(xIn)), xIn)
dsObs['xIn'] = (('nPoints',), xIn)
dsObs['xOut'] = (('nPointsOut',), xOut)
# interpolate fields without and with vertical dimension
dsObs = interp_1d(dsObs, inInterpDim='nPoints',
inInterpCoord='xIn', outInterpDim='nPointsOut',
outInterpCoord='xOut')
dsObs = dsObs.drop_vars(['xIn'])
dsObs = dsObs.rename({'nPointsOut': 'nPoints', 'xOut': 'x'})
return dsObs # }}}
def _haversine(self, lon1, lat1, lon2, lat2): # {{{
"""
Calculate the great circle distance in km between two points on the
earth (specified in decimal degrees). Based on
https://stackoverflow.com/a/4913653
"""
# convert decimal degrees to radians
lon1 = numpy.deg2rad(lon1)
lat1 = numpy.deg2rad(lat1)
lon2 = numpy.deg2rad(lon2)
lat2 = numpy.deg2rad(lat2)
# haversine formula
dlon = lon2 - lon1
dlat = lat2 - lat1
a = | numpy.sin(dlat / 2.) | numpy.sin |
#!/usr/bin/python
"""
This module reads and writes the parameters of a Multi Gaussian Expansion model (Monnet et al.
1992, Emsellem et al. 1994). It can read and write MGE input ascii files and
computes a number of basic parameters for the corresponding models.
uptdated regularly and may still contains some obvious bugs. A stable version will
be available hopefully before the end of 2012.
For questions, please contact <NAME> at <EMAIL>
"""
"""
Importing the most import modules
This MGE module requires NUMPY and SCIPY
"""
import os
try:
import numpy as np
except ImportError:
raise Exception("numpy is required for pygme")
try:
from scipy import special
except ImportError:
raise Exception("scipy is required for pygme")
from numpy import asarray
from numpy import cos, sin, copy, sqrt, exp
from .rwcfor import floatMGE
from .mge_miscfunctions import print_msg
__version__ = '1.1.6 (22 Dec 2014)'
## Version 1.1.6 : EE - Fixed found2D
## Version 1.1.5 : EE - Fixed mcut input parameter
## Version 1.1.4 : EE - Fixed a typo on indices
## Version 1.1.3 : EE - Added BetaEps, M/L etc also in the 2D Gauss just in case
## Version 1.1.2 : EE - Changed imin,imax into ilist
## Version 1.1.1 : EE - Removed the condition for comp_Nparticles when reading an mge
## Version 1.1.0 : EE - Some serious cleanup in the naming of the variables
## Version 1.0.2 : EE - few minor changes including adding saveMGE
## Version 1.0.1 : EE - replaces ones to zeros in initialisation of GaussGroupNumber
############################################################################
# Class to define dynamical MGE parameters useful for calculation purposes #
############################################################################
class dynParamMGE():
"""
Class to add some parameters which are useful for dynamical routines
"""
def __init__(self, MGEmodel):
"""
Initialisation of the additional dynamical parameters
"""
if (MGEmodel._findGauss3D > 0):
self.Sig3Darc2_soft = MGEmodel.Sig3Darc**2 + MGEmodel.Softarc**2 # Sigma softened in arcsec
self.dSig3Darc2_soft = 2. * self.Sig3Darc2_soft
# Deriving some more numbers
self.Bij = np.zeros((MGEmodel.nGauss, MGEmodel.nGauss), floatMGE)
self.Bij_soft = np.zeros((MGEmodel.nGauss, MGEmodel.nGauss), floatMGE)
self.e2q2dSig3Darc2 = np.zeros(MGEmodel.nGauss, floatMGE)
self.e2q2Sig3Darc2 = np.zeros(MGEmodel.nGauss, floatMGE)
self.sqpi2s = sqrt(np.pi / 2.) / MGEmodel.qSig3Darc
self.qq2s2 = 4. * MGEmodel.QxZ2 * MGEmodel.Sig3Darc2
self.q2Sig3Darc2 = MGEmodel.QxZ2 * MGEmodel.Sig3Darc2
for i in range(MGEmodel.nGauss) :
if self.q2Sig3Darc2[i] != 0. :
self.e2q2dSig3Darc2[i] = MGEmodel.e2[i] / (2. * self.q2Sig3Darc2[i])
self.e2q2Sig3Darc2[i] = MGEmodel.e2[i] / self.q2Sig3Darc2[i]
else :
print("WARNING: %d component has q2*Sig2=0" %(i+1))
for j in range(MGEmodel.nGauss) :
self.Bij[i,j] = MGEmodel.e2[j] - self.q2Sig3Darc2[i] / MGEmodel.Sig3Darc2[j]
self.Bij_soft[i,j] = MGEmodel.e2[j] - self.q2Sig3Darc2[i] / self.Sig3Darc2_soft[j]
self.kRZ2 = MGEmodel.kRZ**2
self.mkRZ2q2 = 1. - self.kRZ2 * MGEmodel.QxZ2
self.mkRZ2 = 1. - self.kRZ2
self.Dij = np.zeros((MGEmodel.nGauss,MGEmodel.nGauss), floatMGE)
self.Dij_soft = np.zeros((MGEmodel.nGauss,MGEmodel.nGauss), floatMGE)
for i in range(MGEmodel.nGauss) :
for j in range(MGEmodel.nGauss) :
self.Dij[i,j] = self.mkRZ2[i] * self.Bij[i,j] + MGEmodel.e2[j] * self.kRZ2[i]
self.Dij_soft[i,j] = self.mkRZ2[i] * self.Bij_soft[i,j] + MGEmodel.e2[j] * self.kRZ2[i]
## ===========================================================================================
############################################################################
# Class to define photometric MGE parameters useful for calculation purposes #
############################################################################
class photParamMGE():
"""
Class to add some parameters which are useful for photometric routines
"""
def __init__(self, MGEmodel):
"""
Initialisation of the additional photometric parameters
These are hidden in this class
"""
if (MGEmodel._findGauss3D > 0):
self.dSig3Darc = sqrt(2.) * MGEmodel.Sig3Darc
self.dSig3Darc2 = 2. * MGEmodel.Sig3Darc2
self.qParc = MGEmodel.QxZ * MGEmodel.Parc
self.dqSig3Darc = sqrt(2.) * MGEmodel.qSig3Darc
## ===========================================================================================
class paramMGE(object) :
def __init__(self, infilename=None, saveMGE=None, indir=None, **kwargs) :
"""
Initialisation of the MGE model - reading the input file
infilename : input MGE ascii file defining the MGE model
indir: directory where to find the mge file
saveMGE: directory in which some MGE model will be saved automatically during the
realisation of the Nbody sample
If saveMGE is None (default), it will be defined as ~/MGE
This will be created by default (if not existing)
Additional Input (not required):
nTotalPart: total number of particles
nPartStar : number of Stellar particles
nPartHalo: number of Dark Matter particles
nPartGas : number of Gas particles
FirstRealisedPart : number for the first realised Particle
This is useful if we wish to realise the model in chunks
nMaxPart : Max number of particles to be realised for this run
mcut : cut in pc, Default is 50 000 (50 kpc)
Used for the Ellipsoid truncation
Rcut : cut in pc, Default is 50 000 (50 kpc)
Zcut : cut in pc, Default is 50 000 (50 kpc)
Used for the Cylindre truncation
FacBetaEps : Coefficient for : Beta = Coef * Epsilon
Default if Coef = 0.6
Can also be a vector (one for each Gaussian)
MaxFacBetaEps: maximum value allowed for FacBetaEps. Default is 0.8.
"""
## Now checking if saveMGE has been defined and act accordingly
if saveMGE is None :
## This is the default dir (~/MGE) if none is given
saveMGE = os.path.expanduser("~/MGE")
if not os.path.isdir(saveMGE) :
## Creating the default saveMGE directory
os.system("mkdir ~/MGE")
## Test now if this exists
if not os.path.isdir(saveMGE) :
print("ERROR: directory for Archival does not exist = %s"%(saveMGE))
return
## Finally save the value of saveMGE in the structure
self.saveMGE = saveMGE
## Setting up some fixed variable #####################################
## G is in (km/s)2. Msun-1 . pc .
## OLD VALUE WAS: self.Gorig = 0.0043225821
self.Gorig = floatMGE(0.0043225524) # value from <NAME>
self.nPart = np.int(kwargs.get("nTotalPart", 0)) # TOTAL Number of n bodies
self.nPartStar = np.int(kwargs.get("nPartStar", 0)) # TOTAL Number of n bodies
self.nPartHalo = np.int(kwargs.get("nPartHalo", 0)) # TOTAL Number of n bodies
self.nPartGas = np.int(kwargs.get("nPartGas", 0)) # TOTAL Number of n bodies
self.Add_BHParticle = True # Add a BH if Mbh > 0 when realising particles
self.FirstRealisedPart = np.int(kwargs.get("FirstRealisedPart", 0)) # First Realised Particle
self.nMaxPart = np.int(kwargs.get("nMaxPart", 0)) # Max number of particles to be realised
self.Euler = np.array([0., 90., 0.]) # Inclination - Default is 90 degrees = edge-on
self.TruncationMethod = "Ellipsoid" # Default method to truncate Gaussians (other = Cylindre)
self.mcut = kwargs.get("Mcut", 50000.) # Default truncation in pc - Default is 50kpc
self.Rcut = kwargs.get("Rcut", 50000.) # Default truncation in pc - Default is 50kpc
self.Zcut = kwargs.get("Zcut", 50000.) # Default truncation in pc - Default is 50kpc
self.Mbh = 0. # Black hole mass
self.axi = 1
self.Nquad = 100 # Number of Points for the Quadrature, default is 100
self._findGauss3D = 0
self._findGauss2D = 0
self.FacBetaEps = kwargs.get("FacBetaEps", 0.6) # Coefficient for the BETAEPS option: Beta = Coef * Epsilon
self.MaxFacBetaEps = kwargs.get("MaxFacBetaEps", 0.8) # Max value the BETAEPS Factor
self.DummyFacBetaEps = 0.6
## Test if infilename is None. If this is the case reset MGE with 0 Gaussians
self.nGauss = self.nGroup = self.nDynComp = 0
self._reset(All=True)
if infilename is not None :
self.read_mge(infilename, indir=indir)
def _reset(self, **kwargs) :
"""
Reset values of the MGE model
Possible options:
nGauss
nGroup
NDynComp
Dist
Softening
infilename
pwd
All : will set all to None, or 0 (and Dist to 10 Mpc)
"""
AllReset = kwargs.get("All", False)
if AllReset :
for key in ["infilename", "pwd"] :
kwargs[key] = ""
for key in ["nGauss", "nGroup", "nDynComp"] :
kwargs[key] = 0
self._reset_Dist()
self._reset_Softening()
kwargs["Dist"] = self.Dist
kwargs["Softening"] = self.Softening
for key in kwargs :
if key == "nGauss" :
nGauss = kwargs.get("nGauss", None)
self._reset_nGauss(nGauss) # Set nGauss
elif key == "nGroup" :
nGroup = kwargs.get("nGroup", None)
self._reset_nGroup(nGroup) # Set nGroup
elif key == "Dist" :
Dist = kwargs.get("Dist", None)
self._reset_Dist(Dist) # Distance in Mpc - Default is 10 Mpc
elif key == "Softening" :
Softening = kwargs.get("Softening", None)
self._reset_Softening(Softening) # Set Softening
elif key == "nDynComp" :
self.nDynComp = kwargs.get("nDynComp", None)
elif key == "infilename" :
self.infilename = kwargs.get("infilename", None)
elif key == "pwd" :
self.pwd = kwargs.get("pwd", None)
def _reset_nGroup(self, nGroup=None) :
## nGroup Reset
if nGroup is not None :
self.nGroup = nGroup # Number of Groups
self.nPartGroup = np.zeros((self.nGroup,), np.int) # Number of particles per Group
self.nRealisedPartGroup = np.zeros((self.nGroup,), np.int) # Number of REALISED particles per Group
## =============================================================
def _reset_nGauss(self, nGauss=0, verbose=0) :
## nGauss reset
if nGauss is not None :
if np.size(nGauss) == 3 :
self.nStarGauss = int(nGauss[0])
self.nGasGauss = int(nGauss[1])
self.nHaloGauss = int(nGauss[2])
self.nGauss = self.nStarGauss + self.nGasGauss + self.nHaloGauss
elif np.size(nGauss) == 1 :
self.nGauss = nGauss # Number of Gaussians
self.nStarGauss = nGauss
self.nGasGauss = self.nHaloGauss = 0
else :
print_msg("With nGauss which should contain 1 or 3 integers", 2)
return
self._findGauss3D = 0
self._findGauss2D = 0
self.Imax2D = np.zeros((self.nGauss,), floatMGE) # In Lsun pc-2
self.Sig2Darc = np.zeros((self.nGauss,), floatMGE) # in arcsecond
self.Q2D = np.zeros((self.nGauss,), floatMGE)
self.PAp = np.zeros((self.nGauss,), floatMGE)
self.Imax3D = np.zeros((self.nGauss,), floatMGE) # In Lsun pc-2 arcsec-1
self.Sig3Darc = np.zeros((self.nGauss,), floatMGE) # in arcsecond
self.QxZ = np.zeros((self.nGauss,), floatMGE)
self.QyZ = np.zeros((self.nGauss,), floatMGE)
self.ML = np.ones((self.nGauss,), floatMGE)
self.kRTheta = np.ones((self.nGauss,), floatMGE) # sigma_R / sigma_Theta
self.kRZ = np.ones((self.nGauss,), floatMGE) # sigma_R / sigma_Z
self.betaeps = np.zeros((self.nGauss,), np.int) # betaeps option (1 or 0)
self.epicycle = np.zeros((self.nGauss,), np.int) # epicycle option (1 or 0)
self.truncFlux = np.zeros((self.nGauss,), floatMGE)
self.MGEFlux = np.zeros((self.nGauss,), floatMGE)
self.truncMass = np.zeros((self.nGauss,), floatMGE)
self.MGEMass = np.zeros((self.nGauss,), floatMGE)
self.MGEFluxp = np.zeros((self.nGauss,), floatMGE)
self.GaussGroupNumber = np.ones((self.nGauss,), np.int) # Group Number for that Gaussian
self.GaussDynCompNumber = np.ones((self.nGauss,), np.int) # Dynamical Group Number for that Gaussian
self.TtruncMass = 0. # Total mass in Nbody
self.TtruncFlux = 0. # Total flux in Nbody
self.TMGEMass = 0. # Total mass of MGE model
self.TMGEFlux = 0. # Total flux of MGE model
self.axi = 1
## Change the Distance of the model ###########################
def _reset_Dist(self, Dist=None, verbose=True) :
if Dist is None :
if hasattr(self, "Dist"):
Dist = self.Dist
else:
Dist = 10.0 ## Setting the default in case the Distance is negative
print("WARNING: dummy Dist value for reset")
if Dist <= 0. :
if verbose:
print("WARNING: you provided a negative Dist value")
print("WARNING: it will be set to the default (10 Mpc)")
Dist = 10.0 ## Setting the default in case the Distance is negative
self.Dist = floatMGE(Dist)
self.pc_per_arcsec = floatMGE(np.pi * self.Dist / 0.648)
self.mcutarc = self.mcut / self.pc_per_arcsec #Default truncation - in arcseconds at 10 Mpc
self.Rcutarc = self.Rcut / self.pc_per_arcsec #Default truncation - in arcseconds at 10 Mpc
self.Zcutarc = self.Zcut / self.pc_per_arcsec #Default truncation - in arcseconds at 10 Mpc
## G is in (km/s)2. Msun-1 . pc .
## We multiply it by pc / arcsec
## so it becomes:
## (km/s)2. Msun-1 . pc2 . arcsec-1
## OLD VALUE WAS: self.Gorig = 0.0043225821
self.G = self.Gorig * self.pc_per_arcsec
self.PIG = floatMGE(4. * np.pi * self.G)
## Adding the standard parameters
self._add_PhotometricParam()
## =============================================================
## Change the softening of the model ###########################
def _reset_Softening(self, Softening=0.0, verbose=0) :
"""
Change the softening value of the model (in pc)
"""
if Softening is not None :
self.Softening = Softening # softening in pc
self.Softarc = self.Softening / self.pc_per_arcsec # Softening in arcsec
self.SoftarcMbh = self.Softarc
self.SoftarcMbh2 = self.SoftarcMbh**2
## Add dynamics parameters: this is needed since the softening just changed
self._dParam = dynParamMGE(self)
## ============================================================
## List the Gaussians in the different Groups #################
def _listGroups(self) :
# Reinitialise the list of Gaussians in the Groups
self.listGaussGroup = []
for i in range(self.nGroup) :
self.listGaussGroup.append(np.where(self.GaussGroupNumber == (i+1))[0])
## ============================================================
## List the Gaussians in the different Dynamics Groups #################
def _listDynComps(self) :
# Reinitialise the list of Gaussians in the Groups
self.listGaussDynComp = []
for i in range(self.nDynComp) :
self.listGaussDynComp.append(np.where(self.GaussDynCompNumber == (i+1))[0])
## ============================================================
## Decode the SGAUSS and associated lines in mge File #############
def _read_GAUSS2D(self, linesplit, findGauss2D) :
self.Imax2D[findGauss2D] = floatMGE(linesplit[1]) # I in Lum.pc-2
self.Sig2Darc[findGauss2D] = floatMGE(linesplit[2]) # Sigma in arcsec
self.Q2D[findGauss2D] = floatMGE(linesplit[3])
self.PAp[findGauss2D] = floatMGE(linesplit[4])
lelines = len(linesplit)
if lelines >= 6 :
self.ML[findGauss2D] = floatMGE(linesplit[5])
if lelines >= 7 :
if linesplit[6][:3] == "EPI" :
self.kRTheta[findGauss2D] = -1.0
self.epicycle[findGauss2D] = 1
else :
self.kRTheta[findGauss2D] = floatMGE(linesplit[6])
self.epicycle[findGauss2D] = 0
if linesplit[7][:4] == "BETA" :
self.betaeps[findGauss2D] = 1
else :
self.kRZ[findGauss2D] = floatMGE(linesplit[7])
self.betaeps[findGauss2D] = 0
if lelines >= 9 :
self.GaussGroupNumber[findGauss2D] = int(linesplit[8])
if lelines >= 10 :
self.GaussDynCompNumber[findGauss2D] = int(linesplit[9])
return
## Decode the SGAUSS and associated lines in mge File #############
def _read_GAUSS3D(self, linesplit, findGauss3D) :
self.Imax3D[findGauss3D] = floatMGE(linesplit[1]) # I in Lum.pc-2.arcsec-1
self.Sig3Darc[findGauss3D] = floatMGE(linesplit[2]) # Sigma in arcsec
self.QxZ[findGauss3D] = floatMGE(linesplit[3])
self.QyZ[findGauss3D] = floatMGE(linesplit[4])
self.ML[findGauss3D] = floatMGE(linesplit[5])
lelines = len(linesplit)
if lelines >= 8 :
if linesplit[6][:3] == "EPI" :
self.kRTheta[findGauss3D] = -1.0
self.epicycle[findGauss3D] = 1
else :
self.kRTheta[findGauss3D] = floatMGE(linesplit[6])
self.epicycle[findGauss3D] = 0
if linesplit[7][:4] == "BETA" :
self.kRZ[findGauss3D] = 1. / sqrt(1. - (self.FacBetaEps[findGauss3D] * (1. - self.QxZ[findGauss3D])))
self.betaeps[findGauss3D] = 1
else :
self.kRZ[findGauss3D] = floatMGE(linesplit[7])
self.betaeps[findGauss3D] = 0
if lelines >= 9 :
self.GaussGroupNumber[findGauss3D] = int(linesplit[8])
if lelines >= 10 :
self.GaussDynCompNumber[findGauss3D] = int(linesplit[9])
if (self.QxZ[findGauss3D] != self.QyZ[findGauss3D]) :
self.axi = 0
print('Detected triaxial component %d: self.axi set to 0'%(findGauss3D))
return
## ============================================================
def _init_BetaEps(self, verbose=True) :
"""
We initialise here the BetaEps vector using the input value
If a scalar, it is transformed into a vector of constant values.
It will only be used for components that have the betaeps option =1.
"""
if np.size(self.FacBetaEps) == 1 :
self.FacBetaEps = np.array([self.FacBetaEps] * self.nGauss)
elif np.size(self.FacBetaEps) != self.nGauss :
print("WARNING: FacBetaEps has a dimension which is not consistent with the number of Gaussians")
print("WARNING: Should be a scalar or a 1D array of size nGauss")
print("WARNING: We will therefore use the fixed default value = 0.6 instead.")
self.FacBetaEps = np.array([0.6] * self.nGauss)
self.FacBetaEps = np.asarray(self.FacBetaEps)
## Checking that no value goes beyond MaxFacBetaEps
if np.any(self.FacBetaEps > self.MaxFacBetaEps) :
print("WARNING: FacBetaEps cannot be set to values higher than %5.3f"%(self.MaxFacBetaEps))
print("WARNING: Input FacBetaEps = ", self.FacBetaEps)
print("WARNING: We will change these values to 0.6.")
self.FacBetaEps = np.where(self.FacBetaEps > self.MaxFacBetaEps, self.MaxFacBetaEps, self.FacBetaEps)
if verbose:
print("The BetaEps vector (beta = FacBetaEps * Epsilon) is fixed to ")
print(" ", self.FacBetaEps)
if self.betaeps.any() :
self.kRZ[self.betaeps == 1] = np.zeros(np.sum(self.betaeps, dtype=np.int), floatMGE) + 1. / sqrt(1. - (self.FacBetaEps[self.betaeps == 1] * (1. - self.QxZ[self.betaeps == 1])))
##################################################################
### Reading an ascii MGE file and filling the MGE class object ###
##################################################################
def read_mge(self, infilename=None, indir=None) :
if (infilename is not None) : # testing if the name was set
if indir is not None :
infilename = indir + infilename
if not os.path.isfile(infilename) : # testing the existence of the file
print('OPENING ERROR: File %s not found' %infilename)
return
################################
# Opening the ascii input file #
################################
self.pwd = os.getcwd()
self.fullMGEname = os.path.abspath(infilename)
self.MGEname = os.path.basename(self.fullMGEname)
self.pathMGEname = os.path.dirname(self.fullMGEname)
mge_file = open(self.fullMGEname)
lines = mge_file.readlines()
nlines = len(lines)
########################################
## First get the Number of gaussians ##
## And the global set of parameters ##
########################################
keynGauss = keynStarGauss = keynGasGauss = keynHaloGauss = keynGroup = 0
findGauss2D = findGauss3D = findStarGauss2D = findStarGauss3D = findGasGauss2D = findGasGauss3D = findHaloGauss2D = findHaloGauss3D = findGroup = 0
for i in range(nlines) :
if lines[i][0] == "#" or lines[i] == "\n" :
continue
sl = lines[i].split()
keyword = sl[0]
if (keyword[:6] == "NGAUSS") :
if len(sl) == 2 :
nStarGauss = int(sl[1])
nGasGauss = nHaloGauss = 0
elif len(sl) == 4 :
nStarGauss = int(sl[1])
nGasGauss = int(sl[2])
nHaloGauss = int(sl[3])
self.nStarGauss = nStarGauss
self.nGasGauss = nGasGauss
self.nHaloGauss = nHaloGauss
keynStarGauss = 1
keynGasGauss = 1
keynHaloGauss = 1
if nStarGauss < 0 or nGasGauss < 0 or nHaloGauss < 0:
print('ERROR: Keyword NGAUSS has some negative values: %d %d %d' %(nStarGauss, nGasGauss, nHaloGauss))
continue
nGauss = nStarGauss + nGasGauss + nHaloGauss
if nGauss <= 0 :
print('ERROR: Keyword NGAUSS is less than or equal to 0: %d' %nGauss)
continue
self._reset(nGauss=(nStarGauss, nGasGauss, nHaloGauss))
keynGauss = 1
elif (keyword[:4] == "DIST") :
Dist = floatMGE(sl[1])
self._reset_Dist(Dist)
elif (keyword[:6] == "NGROUP") :
nGroup = int(sl[1])
if nGroup < 0 :
print('ERROR: Keyword NGROUP is less than 0: %d' %nGroup)
continue
self._reset(nGroup=nGroup)
keynGroup = 1
elif (keyword[:9] == "NDYNCOMP") :
nDynComp = int(sl[1])
if nDynComp < 0 :
print('ERROR: Keyword NDYNCOMP is less than 0: %d' %nDynComp)
continue
self._reset(nDynComp=nDynComp)
if (keynGauss == 0) :
print('Could not find NGAUSS keyword in the MGE input File %s' %self.MGEname)
return
listStarGauss2D = []
listStarGauss3D = []
listGasGauss2D = []
listGasGauss3D = []
listHaloGauss2D = []
listHaloGauss3D = []
## We initialise the BetaEps Values using the input one
self._init_BetaEps()
##================================================================================##
## Then really decoding the lines and getting all the details from the ascii file ##
##================================================================================##
for i in range(nlines) :
if (lines[i][0] == "#") or (lines[i] == "\n") :
continue
sl = lines[i].split()
keyword = sl[0]
if (keyword[:6] == "NGAUSS") or (keyword[:4] == "DIST") or (keyword[:9] == "NGASGAUSS") or (keyword[:10] == "NHALOGAUSS") or (keyword[:11] == "NGROUP") or (keyword[:11] == "NDYNCOMP"):
continue
## projected gaussians
elif (keyword[:11] == "STARGAUSS2D") :
if findGauss2D == self.nGauss or keynStarGauss == 0 :
print('Line ignored (STARS: NGAUSS = %d): %s' %(self.nGauss,lines[i]))
continue
if findStarGauss2D == self.nStarGauss :
print('Line ignored (STAR: NSTARGAUSS = %d): %s' %(self.nStarGauss,lines[i]))
continue
self._read_GAUSS2D(sl, findGauss2D)
listStarGauss2D.append(findGauss2D)
findGauss2D += 1
findStarGauss2D += 1
elif (keyword[:10] == "GASGAUSS2D") :
if findGauss2D == self.nGauss or keynGasGauss == 0:
print('Line ignored (GAS: NGAUSS = %d): %s' %(self.nGauss,lines[i]))
continue
if findGasGauss2D == self.nGasGauss :
print('Line ignored (GAS: NGASGAUSS = %d): %s' %(self.nGasGauss,lines[i]))
continue
self._read_GAUSS2D(sl, findGauss2D)
listGasGauss2D.append(findGauss2D)
findGauss2D += 1
findGasGauss2D += 1
elif (keyword[:11] == "HALOGAUSS2D") :
if findGauss2D == self.nGauss or keynHaloGauss == 0:
print('Line ignored (HALO: NGAUSS = %d): %s' %(self.nGauss,lines[i]))
continue
if findHaloGauss2D == self.nHaloGauss :
print('Line ignored (HALO: NHALOGAUSS = %d): %s' %(self.nHaloGauss,lines[i]))
continue
self._read_GAUSS2D(sl, findGauss2D)
listHaloGauss2D.append(findGauss2D)
findGauss2D += 1
findHaloGauss2D += 1
## spatial gaussians
elif (keyword[:11] == "STARGAUSS3D") :
if findGauss3D == self.nGauss :
print('Line ignored (NGAUSS = %d): %s' %(self.nGauss,lines[i]))
continue
if findStarGauss3D == self.nStarGauss :
print('Line ignored (STAR: NSTARGAUSS = %d): %s' %(self.nStarGauss,lines[i]))
continue
self._read_GAUSS3D(sl, findGauss3D)
listStarGauss3D.append(findGauss3D)
findGauss3D += 1
findStarGauss3D += 1
elif (keyword[:10] == "GASGAUSS3D") :
if findGauss3D == self.nGauss or keynGasGauss == 0:
print('Line ignored (GAS: NGAUSS = %d): %s' %(self.nGauss,lines[i]))
continue
if findGasGauss3D == self.nGasGauss :
print('Line ignored (GAS: NGASGAUSS = %d): %s' %(self.nGasGauss,lines[i]))
continue
self._read_GAUSS3D(sl, findGauss3D)
listGasGauss3D.append(findGauss3D)
findGauss3D += 1
findGasGauss3D += 1
elif (keyword[:11] == "HALOGAUSS3D") :
if findGauss3D == self.nGauss or keynHaloGauss == 0:
print('Line ignored (HALO: NGAUSS = %d): %s' %(self.nGauss,lines[i]))
continue
if findHaloGauss3D == self.nHaloGauss :
print('Line ignored (HALO: NHALOGAUSS = %d): %s' %(self.nHaloGauss,lines[i]))
continue
self._read_GAUSS3D(sl, findGauss3D)
listHaloGauss3D.append(findGauss3D)
findGauss3D += 1
findHaloGauss3D += 1
## Center and other parameters
elif (keyword[:6] == "CENTER") :
self.Center = np.zeros((2,), floatMGE)
self.Center[0] = floatMGE(sl[1])
self.Center[1] = floatMGE(sl[2])
elif (keyword[:5] == "EULER") :
self.Euler = np.zeros((3,), floatMGE)
self.Euler[0] = floatMGE(sl[1])
self.Euler[1] = floatMGE(sl[2])
self.Euler[2] = floatMGE(sl[3])
elif (keyword[:3] == "MBH") :
self.Mbh = floatMGE(sl[1])
elif (keyword[:10] == "NPARTGROUP") :
GroupNumber = int(keyword[10:])
if GroupNumber > self.nGroup or GroupNumber < 0 or findGroup == self.nGroup or keynGroup == 0 or (len(sl) > 3) or (int(sl[1]) < 0) :
print('Line ignored (NPARTGROUP%2d: NGROUP = %d) = Wrong Entry %s' %(GroupNumber, self.nGroup, lines[i]))
continue
if len(sl) == 3 :
if (int(sl[2]) < 0) or (int(sl[2]) > int(sl[1])) :
print('Line ignored (NPARTGROUP: NGROUP = %d) = second entry should be greater than 0 and less than the first entry: %s' %(self.nGroup,lines[i]))
continue
self.nRealisedPartGroup[GroupNumber - 1] = int(sl[2]) # Number of particles in Group to be realised
self.nPartGroup[GroupNumber - 1] = int(sl[1]) # Number of particles in Group
findGroup += 1
else :
print('Could not decode the following keyword: %s' %keyword)
mge_file.close
break
################################
# CLOSING the ascii input file #
################################
mge_file.close
##============ Ascii file is not closed ====================##
## Reorganising the read parameters and data ##
## And setting this up into the structure ##
self._findGauss2D = findGauss2D
self._findGauss3D = findGauss3D
self.nGauss = max(findGauss3D, findGauss2D)
self.nGasGauss = max(findGasGauss3D, findGasGauss2D)
self.nHaloGauss = max(findHaloGauss3D, findHaloGauss2D)
self.nStarGauss = max(findStarGauss3D, findStarGauss2D)
## Reorganizing things to have the gas then halo components at the end
## ORDER OF GAUSSIANS IS THEREFORE: STARS, GAS, HALO
tempImax2D = copy(self.Imax2D)
tempSig2Darc = copy(self.Sig2Darc)
tempQ2D = copy(self.Q2D)
tempPAp = copy(self.PAp)
tempImax3D = copy(self.Imax3D)
tempSig3Darc = copy(self.Sig3Darc)
tempQxZ = copy(self.QxZ)
tempQyZ = copy(self.QyZ)
tempML = copy(self.ML)
tempkRTheta = copy(self.kRTheta)
tempkRZ = copy(self.kRZ)
tempbetaeps = copy(self.betaeps)
tempepicycle = copy(self.epicycle)
tempGaussGroup = copy(self.GaussGroupNumber)
tempGaussDynComp = copy(self.GaussDynCompNumber)
## Projected components
k = 0
j = findGauss2D - self.nHaloGauss - self.nGasGauss
l = findGauss2D - self.nHaloGauss
for i in range(findGauss2D) :
if i not in listGasGauss2D :
if i not in listHaloGauss2D :
ind = k
k += 1
else :
ind = l
l += 1
else :
ind = j
j += 1
self.Imax2D[ind] = tempImax2D[i] # I in Lum.pc-2
self.Sig2Darc[ind] = tempSig2Darc[i]
self.Q2D[ind] = tempQ2D[i]
self.PAp[ind] = tempPAp[i]
## Spatial components
k = 0
j = findGauss3D - self.nHaloGauss - self.nGasGauss
l = findGauss3D - self.nHaloGauss
self.listGasGauss = listGasGauss3D
self.listHaloGauss = listHaloGauss3D
self.listStarGauss = listStarGauss3D
for i in range(findGauss3D) :
if i not in listGasGauss3D :
if i not in listHaloGauss3D :
ind = k
k += 1
else :
ind = l
l += 1
else :
ind = j
j += 1
self.Imax3D[ind] = tempImax3D[i]
self.Sig3Darc[ind] = tempSig3Darc[i]
self.QxZ[ind] = tempQxZ[i]
self.QyZ[ind] = tempQyZ[i]
self.ML[ind] = tempML[i]
self.kRTheta[ind] = tempkRTheta[i]
self.kRZ[ind] = tempkRZ[i]
self.betaeps[ind] = tempbetaeps[i]
self.epicycle[ind] = tempepicycle[i]
self.GaussGroupNumber[ind] = tempGaussGroup[i]
self.GaussDynCompNumber[ind] = tempGaussDynComp[i]
#########################################
# Testing if all axis ratios are axisymmetric or not
self.axi = 1
for i in range(findGauss3D) :
if (self.QxZ[i] != self.QyZ[i]) :
self.axi = 0
print('Detected triaxial component: self.axi set to 0')
## Add all sorts of parameters which are useful for further derivation
self._comp_Nparticles()
## Set default inclination to 90 degrees
if 'Euler' in self.__dict__ :
inclination = self.Euler[1]
else :
self.Euler = np.zeros((3,), floatMGE)
self.Euler[1] = 90.0
if self._findGauss3D == 0 & self._findGauss2D > 0 :
self.deproject(inclin=self.Euler[1], verbose=False)
if self._findGauss3D > 0 :
if self._findGauss2D == 0 :
self.project(inclin=self.Euler[1], verbose=False)
else :
print_msg("Both 3D and 2D Gaussians were found: ", 1)
print_msg("We thus used the 2D Gaussians as a prior for the deprojection at %5.2f degrees"%(self.Euler[1]), 1)
self.deproject(inclin=self.Euler[1], verbose=True)
print("Found %d Spatial and %d projected Gaussians" %(self._findGauss3D, self._findGauss2D))
print("With an Inclination of %5.2f (degrees)"%(self.Euler[1]))
if self.nStarGauss != 0 :
print("This includes %d STAR Gaussians" %(np.maximum(findStarGauss3D, findStarGauss2D)))
if self.nGasGauss != 0 :
print("This includes %d GAS Gaussians" %(np.maximum(findGasGauss3D, findGasGauss2D)))
if self.nHaloGauss != 0 :
print("This also includes %d HALO Gaussians" %(np.maximum(findHaloGauss3D,findHaloGauss2D)))
print("Found %d Particle Groups" %(findGroup))
print("Found %d Dynamical Components (each may include a set of Gaussians)" %(nDynComp))
print("Distance set up to %6.2f Mpc"%(self.Dist))
# no name was specified #
else :
print('You should specify an output file name')
#====================== END OF READING / INIT THE MGE INPUT FILE =======================#
### INTEGRATED LUMINOSITY - ALL -------------------------------------------------
### Deriving the integrated Lum (Rcut, Zcut) for 1 gaussian, R and Z are in arcsec
def rhointL_1G(self, Rcut, Zcut, ind) :
"""
Integrated LUMINOSITY truncated within a cylindre defined by Rcut, Zcut (in arcsec)
for 1 Gaussian only: ind is the indice of that gaussian
"""
if self._findGauss3D == 0 :
print_msg("No Spatial Gaussians yet", 2)
return 0.
return self.MGEFlux[ind] * (1. - exp(- Rcut*Rcut/self._pParam.dSig3Darc2[ind])) * float(special.erf(Zcut/self._pParam.dqSig3Darc[ind]))
### Deriving the integrated Mass (Rcut, Zcut) for 1 gaussian, R and are in arcsec
def rhointM_1G(self, Rcut, Zcut, ind) :
"""
Integrated Mass truncated within a cylindre defined by Rcut, Zcut (in arcsec)
for 1 Gaussian only: ind is the indice of that gaussian
"""
if self._findGauss3D == 0 :
print_msg("No Spatial Gaussians yet", 2)
return 0.
return self.MGEMass[ind] * (1. - exp(- Rcut * Rcut / self._pParam.dSig3Darc2[ind])) \
* float(special.erf(Zcut / self._pParam.dqSig3Darc[ind]))
### INTEGRATED MASS - SPHERE ALL --------------------------------------------------------
### Deriving the integrated Mass (mcut) for 1 gaussian, m in arcsec
def rhoSphereintM_1G(self, mcut, ind) :
"""
Integrated Mass truncated within a spheroid of m=mcut (in arcsec)
for 1 Gaussian only: ind is the indice of that gaussian
"""
if self._findGauss3D == 0 :
print_msg("No Spatial Gaussians yet", 2)
return 0.
return self.MGEMass[ind] * (float(special.erf(mcut / self._pParam.dSig3Darc[ind])) - mcut * np.sqrt(2. / np.pi) \
* exp(- mcut*mcut/self._pParam.dSig3Darc2[ind])/ self.Sig3Darc[ind])
### Deriving the integrated Lum (mcut) for 1 gaussian, m in arcsec
################### A REVOIR
def rhoSphereintL_1G(self, mcut, ind) :
"""
Integrated LUMINOSITY truncated within a spheroid of m=mcut (in arcsec)
for 1 Gaussian only: ind is the indice of that gaussian
"""
if self._findGauss3D == 0 :
print_msg("No Spatial Gaussians yet", 2)
return 0.
return self.MGEFlux[ind] * (float(special.erf(mcut / self._pParam.dSig3Darc[ind])) - mcut * np.sqrt(2. / np.pi) \
* exp(- mcut*mcut/self._pParam.dSig3Darc2[ind])/ self.Sig3Darc[ind])
#####################################
## Adding more Gaussian parameters ##
#####################################
def _add_PhotometricParam(self) :
"""
Add many more parameters using the basic I, Sig, q, PA parameters of the model
These parameters are important for many (photometry/dynamics-related) routines
"""
## Only if axisymmetric
if self.axi :
##################################################################
## Compute some useful parameters for the projected Gaussians
##################################################################
if (self._findGauss2D > 0) :
# some useful numbers from the projected gaussians if they exist
self.Sig2D = self.Sig2Darc * self.pc_per_arcsec # Sigma in pc
self.Q2D2 = self.Q2D * self.Q2D
self.Sig2Darc2 = self.Sig2Darc * self.Sig2Darc # Projected Sigma in arcsecond
self.dSig2Darc2 = 2. * self.Sig2Darc2
self.Pp = self.Imax2D * self.ML # Mass maximum in Mass/pc-2
self.MGEFluxp = self.Imax2D*(self.Sig2D**2) * self.Q2D2 * np.pi
##################################################################
## Compute some useful parameters for the Spatial Gaussians
##################################################################
if (self._findGauss3D > 0):
# some more useful numbers
self.Imax3Dpc3 = self.Imax3D / self.pc_per_arcsec # I in Lum.pc-3
self.Sig3D = self.Sig3Darc * self.pc_per_arcsec # Sigma in pc
self.Parc = self.Imax3D * self.ML # Mass maximum in Mass/pc-2/arcsec-1
self.QxZ2 = self.QxZ ** 2
self.e2 = 1. - self.QxZ2
self.Sig3Darc2 = self.Sig3Darc**2 # Sigma in arcsecond !
self.qSig3Darc = self.QxZ * self.Sig3Darc
## Add photometric parameters
self._pParam = photParamMGE(self)
## Add dynamics parameters
self._dParam = dynParamMGE(self)
## Fluxes and Masses
self.MGEFlux = self.Imax3Dpc3 * self.QxZ * (sqrt(2.*np.pi) * self.Sig3D)**3
self.MGEMass = self.MGEFlux * self.ML
## Total Mass and Flux for Stars and Gas and Halo (not truncated)
self.MGEStarMass = np.sum(self.MGEMass[:self.nStarGauss],axis=0)
self.MGEStarFlux = np.sum(self.MGEFlux[:self.nStarGauss],axis=0)
self.MGEGasMass = np.sum(self.MGEMass[self.nStarGauss:self.nStarGauss+self.nGasGauss],axis=0)
self.MGEGasFlux = np.sum(self.MGEFlux[self.nStarGauss:self.nStarGauss+self.nGasGauss],axis=0)
self.MGEHaloMass = np.sum(self.MGEMass[self.nStarGauss+self.nGasGauss:self.nStarGauss+self.nGasGauss+self.nHaloGauss],axis=0)
self.MGEHaloFlux = np.sum(self.MGEFlux[self.nStarGauss+self.nGasGauss:self.nStarGauss+self.nGasGauss+self.nHaloGauss],axis=0)
## Total Mass and Flux for all
self.TMGEFlux = np.sum(self.MGEFlux,axis=0)
self.TMGEMass = np.sum(self.MGEMass,axis=0)
self.facMbh = self.Mbh / (4. * np.pi * self.pc_per_arcsec * self.pc_per_arcsec) # in M*pc-2*arcsec2
## TRUNCATED Mass and Flux for each Gaussian
self.truncMass = np.zeros(self.nGauss, floatMGE)
self.truncFlux = np.zeros(self.nGauss, floatMGE)
if self.TruncationMethod == "Cylindre" :
for i in range(self.nGauss) :
self.truncFlux[i] = self.rhointL_1G(self.Rcutarc, self.Zcutarc, i)
self.truncMass[i] = self.rhointM_1G(self.Rcutarc, self.Zcutarc, i)
elif self.TruncationMethod == "Ellipsoid" :
for i in range(self.nGauss) :
self.truncFlux[i] = self.rhoSphereintL_1G(self.mcutarc, i)
self.truncMass[i] = self.rhoSphereintM_1G(self.mcutarc, i)
## Total TRUNCATED Flux and Mass
self.TtruncFlux = np.sum(self.truncFlux,axis=0)
self.TtruncMass = np.sum(self.truncMass,axis=0)
# Listing the Gaussians in the Groups
self._listGroups()
self._listDynComps()
## Total Mass and Flux for Groups TRUNCATED!
self.truncGroupMass = np.zeros(self.nGroup, floatMGE)
self.truncGroupFlux = np.zeros(self.nGroup, floatMGE)
for i in range(self.nGroup) :
self.truncGroupMass[i] = np.sum(self.truncMass[self.listGaussGroup[i]], axis=0)
self.truncGroupFlux[i] = np.sum(self.truncFlux[self.listGaussGroup[i]], axis=0)
## Total TRUNCATED Flux and Mass for STARS, GAS, HALO
## STARS
self.truncStarFlux = np.sum(self.truncFlux[0: self.nStarGauss])
self.truncStarMass = np.sum(self.truncMass[0: self.nStarGauss])
## GAS
self.truncGasFlux = np.sum(self.truncFlux[self.nStarGauss:self.nStarGauss + self.nGasGauss])
self.truncGasMass = np.sum(self.truncMass[self.nStarGauss:self.nStarGauss + self.nGasGauss])
## HALO
self.truncHaloFlux = np.sum(self.truncFlux[self.nStarGauss + self.nGasGauss:self.nStarGauss + self.nGasGauss + self.nHaloGauss])
self.truncHaloMass = np.sum(self.truncMass[self.nStarGauss + self.nGasGauss:self.nStarGauss + self.nGasGauss + self.nHaloGauss])
else :
print_msg("Triaxial model, cannot compute additional photometric parameters", 1)
## ===========================================================================================================
###################################################
### Set the list of Indices for Gaussians ##
###################################################
def _set_ilist(self, ilist=None) :
if ilist is None : return list(range(self.nGauss))
else : return ilist
###################################################
### Compute the fraction for each component ##
## for a list of indices ##
###################################################
def _fraclistNbody(self, nbody, ilist) :
"""
Compute the fraction of particles for each component
corresponding to the truncated (Gaussian) Mass
"""
### Set the list of indices
ilist = self._set_ilist(ilist)
nind = len(ilist)
fracNpGauss = np.zeros(nind, np.int32)
totaln = np.zeros(nind+1, np.int32)
TMass = np.sum(self.truncMass[ilist], axis=0)
for i in range(nind) :
fracNpGauss[i] = np.int(self.truncMass[ilist[i]] * nbody / TMass)
totaln[i+1] = totaln[i] + fracNpGauss[i]
fracNpGauss[nind-1] = nbody - totaln[nind-1]
totaln[nind] = nbody
return fracNpGauss, totaln
## ==================================================
###############################################################################################################
## To compute the number of particles and particle masses for each Gaussian/Groups ############################
###############################################################################################################
def _comp_Nparticles(self) :
"""
Add the respective numbers of particles for each Gaussian, Group
Depending on the Mass of each component
pmassGroup, pmassGauss: mass of the particles for each Gaussian, Group
nPartGauss : number of particles for each Gaussian
"""
self._add_PhotometricParam()
if (self.axi == 1) & (self._findGauss3D > 0):
# For this we use the list of Gaussians in the Groups
# First step is to have the Mass of Each Group to get the particle mass
mask = (self.nPartGroup !=0)
self.pmassGroup = np.zeros_like(self.truncGroupMass)
self.pmassGroup[mask] = self.truncGroupMass[mask] / self.nPartGroup[mask] # Mass of the particles in Groups
self.pmassGauss = self.pmassGroup[self.GaussGroupNumber - 1] # Mass of the particles in Gaussians
self.nPartGauss = np.zeros(self.nGauss, dtype=int)
self.nRealisedPartGauss = np.zeros(self.nGauss, dtype=int)
for i in range(self.nGroup) :
fracNpGauss, totaln = self._fraclistNbody(self.nPartGroup[i], self.listGaussGroup[i])
fracRealNpGauss, totaln = self._fraclistNbody(self.nRealisedPartGroup[i], self.listGaussGroup[i])
self.nPartGauss[self.listGaussGroup[i]] = fracNpGauss # TOTAL Number of particles in that Gaussian
self.nRealisedPartGauss[self.listGaussGroup[i]] = fracRealNpGauss # TOTAL Number of particles to be Realised in that Gaussian
## Cumulative sum for the total number of particles in the Model
self.nPartCum = np.concatenate((np.array([0]),asarray(np.cumsum(self.nPartGauss),dtype=int)))
## Now we calculate the number of particles to be realised in each Gaussian taking into account the MaxPart
##
## Temporary sum for the following calculation
self.nRealisedPartCum = np.concatenate((np.array([0]),asarray(np.cumsum(self.nRealisedPartGauss),dtype=int)))
## If we limit the number of particles, we use nMaxPart and FirstRealisedPart as guidelines
if self.nMaxPart > 0 :
firstPart = self.FirstRealisedPart ## This is the first particle to be realised
lastPart = firstPart + np.minimum(self.nMaxPart, np.sum(self.nRealisedPartGroup, axis=0) - firstPart) ## last particle to be realised
imin = 0 # Counter
for i in range(self.nGauss) :
n1 = np.maximum(imin, firstPart)
n2 = np.minimum(imin + self.nRealisedPartGauss[i], lastPart)
imin += self.nRealisedPartGauss[i]
self.nRealisedPartGauss[i] = np.maximum(0,n2 - n1)
## Derive the cumulative sum now
self.nRealisedPartCum = np.concatenate((np.array([0]),asarray(np.cumsum(self.nRealisedPartGauss),dtype=int)))
## Allocation for particles positions ############################
if self.Add_BHParticle & (self.Mbh > 0) :
self.nRealisedPartBH = 1
else :
self.nRealisedPartBH = 0
self.nPartStar = np.sum(self.nPartGauss[:self.nStarGauss], dtype=np.int)
self.nPartGas = np.sum(self.nPartGauss[self.nStarGauss:self.nStarGauss+self.nGasGauss], dtype=np.int)
self.nPartHalo = np.sum(self.nPartGauss[self.nStarGauss+self.nGasGauss:], dtype=np.int)
self.nPart = self.nPartStar + self.nPartGas + self.nPartHalo
if self.Mbh > 0 :
self.nPart += 1
self.nRealisedPartStar = np.sum(self.nRealisedPartGauss[:self.nStarGauss], dtype=np.int)
self.nRealisedPartGas = np.sum(self.nRealisedPartGauss[self.nStarGauss:self.nStarGauss+self.nGasGauss], dtype=np.int)
self.nRealisedPartHalo = np.sum(self.nRealisedPartGauss[self.nStarGauss+self.nGasGauss:], dtype=np.int)
self.nRealisedPart = self.nRealisedPartStar + self.nRealisedPartGas + self.nRealisedPartHalo + self.nRealisedPartBH
## =============================================================
################################################################
### Deprojection of the MGE model for an axiymmetric galaxy ###
################################################################
def deproject(self, inclin=None, printformat="E", particles=True, verbose=True) :
"""
Deproject the Gaussians and provide the spatial parameters
inclin: inclination in degrees
printformat: "E" or "F" depending if you want Engineering or Float notation
default is "E"
"""
if self.axi != 1 :
print("ERROR: cannot deproject this model: not axisymmetric !\n")
return
if inclin is None : inclin = self.Euler[1]
self.Euler = np.array([0., inclin, 0.])
if inclin == 0. :
print("Not yet supported\n")
return
for i in range(self.nGauss) :
if self.Q2D[i] != 1 :
print("ERROR: cannot deproject this model as component %d does not have Q2D = 1!\n" %(i+1))
elif inclin == 90. :
if verbose :
print("Edge-on deprojection\n")
self.Sig3Darc = self.Sig2Darc
self.QxZ = self.Q2D * 1.0
self.QyZ = self.Q2D * 1.0
self.Imax3D = self.Imax2D / (sqrt(2. * np.pi) * self.Sig2Darc)
self._findGauss3D = self.QxZ.shape[0]
else :
inclin_rad = inclin * np.pi / 180.
cosi2 = cos(inclin_rad) * cos(inclin_rad)
sini2 = sin(inclin_rad) * sin(inclin_rad)
for i in range(self.nGauss) :
if cosi2 > (self.Q2D[i] * self.Q2D[i]) :
maxangle = np.arccos(self.Q2D[i])
print("ERROR: cannot deproject the component %d. Max angle is %f" %(i+1, maxangle*180./np.pi))
continue
self.QxZ[i] = sqrt((self.Q2D[i] * self.Q2D[i] - cosi2) / sini2)
self.QyZ[i] = self.QxZ[i] * 1.0
self.Sig3Darc[i] = self.Sig2Darc[i] * 1.0
self.Imax3D[i] = self.Imax2D[i] * self.Q2D[i] / (sqrt(2. * np.pi) * self.QxZ[i] * self.Sig2Darc[i])
self._findGauss3D = self.QxZ.shape[0]
if verbose :
print("Deprojected Model with inclination of %5.2f" %(inclin))
print(" # Imax Sigma Qx Qy")
print(" Lsun/pc^2/arcsec arcsec")
if printformat == "F" : ff = "%13.5f"
else : ff = "%13.8e"
for i in range(self.nGauss) :
print(("3D-G %2d {0} %10.5f %9.5f %9.5f" %(i+1, self.Sig3Darc[i], self.QxZ[i], self.QyZ[i])).format(ff%(self.Imax3D[i])))
if particles :
if 'kRZ' not in self.__dict__ :
self.kRZ = np.ones(self.nGauss, floatMGE)
self._init_BetaEps(verbose=False)
self._comp_Nparticles()
return
## ===========================================================================================
################################################################
### Projection of the MGE model for an axiymmetric galaxy ###
################################################################
def project(self, inclin=90, printformat="E", particles=True, verbose=True) :
"""
Project the Gaussians and provide the 2D parameters
inclin: inclination in degrees
printformat: "E" or "F" depending if you want Engineering or Float notation
default is "E"
"""
if self.axi != 1 :
print("ERROR: cannot project this model: not axisymmetric !\n")
return
self.Euler = np.array([0., inclin, 0.])
if inclin == 0. :
if verbose :
print("Face-on Projection\n")
self.Sig2Darc = self.Sig3Darc
self.Q2D = np.ones(self.nGauss, floatMGE)
self.Imax2D = self.Imax3D * sqrt(2. * np.pi) * self.QxZ * self.Sig3Darc
elif inclin == 90. :
if verbose :
print("Edge-on Projection\n")
self.Sig2Darc = self.Sig3Darc * 1.0
self.Q2D = self.QxZ * 1.0
self.Imax2D = self.Imax3D * (sqrt(2. * np.pi) * self.Sig3Darc)
else :
inclin_rad = inclin * np.pi / 180.
cosi2 = cos(inclin_rad) * cos(inclin_rad)
sini2 = sin(inclin_rad) * sin(inclin_rad)
for i in range(self.nGauss) :
self.Q2D[i] = sqrt(self.QxZ[i] * self.QxZ[i] * sini2 + cosi2)
self.Sig2Darc[i] = self.Sig3Darc[i] * 1.0
self.Imax2D[i] = self.Imax3D[i] * sqrt(2. * np.pi) * self.QxZ[i] * self.Sig3Darc[i] / self.Q2D[i]
self._findGauss2D = self.Q2D.shape[0]
if verbose :
print("Projected Model with inclination of %5.2f" %(inclin))
print(" # Imax Sigma Q2D")
print(" Lsun/pc^2 arcsec")
if printformat == "F" : ff = "%13.5f"
else : ff = "%13.8e"
for i in range(self.nGauss) :
print(("2D-G %2d {0} %9.5f %9.5f"%(i+1, self.Sig2Darc[i], self.Q2D[i])).format(ff%(self.Imax2D[i])))
if particles :
self._comp_Nparticles()
return
#===================================================================
##################################################################
### Write an ascii MGE file using an existing MGE class object ###
##################################################################
def write_mge(self, outdir=None, outfilename=None, overwrite=False) :
if (outfilename is None) : # testing if the name was set
print('You should specify an output file name')
return
if outdir is not None :
outfilename = outdir + outfilename
## Testing if the file exists
if os.path.isfile(outfilename) :
if not overwrite : # testing if the existing file should be overwritten
print('WRITING ERROR: File %s already exists, use overwrite=True if you wish' %outfilename)
return
mgeout = open(outfilename, "w+")
## Starting to write the output file
linecomment = "#######################################################\n"
def set_txtcomment(text, name, value, valform="%f") :
textout = "## %s \n"%(text)
return textout + name + " " + valform%(value)+"\n"
mgeout.write(linecomment + "## %s MGE model \n"%(outfilename) + linecomment)
## Basic Parameters
mgeout.write(set_txtcomment("Distance [Mpc]", "DIST", self.Dist, "%5.2f"))
mgeout.write(set_txtcomment("Black Hole Mass [Msun]", "MBH", self.Mbh, "%8.4e"))
mgeout.write(set_txtcomment("Euler Angles [Degrees]", "EULER", tuple(self.Euler), "%8.5f %8.5f %8.5f"))
mgeout.write(set_txtcomment("Center [Arcsec]", "CENTER", tuple(self.Center), "%8.5f %8.5f"))
## Number of Gaussians
NGauss = (self.nStarGauss, self.nGasGauss, self.nHaloGauss)
mgeout.write(set_txtcomment("Number of Gaussians (Stars, Gas, Dark Matter)", "NGAUSS", NGauss, "%d %d %d"))
Gaussians3D = np.zeros((self.nGauss, 9), float)
Gaussians2D = np.zeros((self.nGauss, 4), float)
if self._findGauss3D > 0:
## Projecting to get the 2D values
self.project(inclin=self.Euler[1], particles=False)
elif self._findGauss2D > 0:
## Deprojecting to get the 3D values
self.deproject(inclin=self.Euler[1], particles=False)
else :
print_msg("No Gaussians found in this model", 3)
## Deprojecting to get the 3D values
Gaussians2D[:,0] = self.Imax2D
Gaussians2D[:,1] = self.Sig2Darc
Gaussians2D[:,2] = self.Q2D
Gaussians2D[:,3] = self.PAp
Gaussians3D[:,0] = self.Imax3D
Gaussians3D[:,1] = self.Sig3Darc
Gaussians3D[:,2] = self.QxZ
Gaussians3D[:,3] = self.QyZ
Gaussians3D[:,4] = self.ML
Gaussians3D[:,5] = self.kRTheta
Gaussians3D[:,6] = self.kRZ
Gaussians3D[:,7] = np.asarray(self.GaussGroupNumber, float)
Gaussians3D[:,8] = np.asarray(self.GaussDynCompNumber, float)
self.axi = 1
###################
## 2D Gaussians
###################
## STARS First
k = 0
mgeout.write("## No Imax Sigma Q PA\n")
mgeout.write("## Stellar 2D Gaussians\n")
for i in range(NGauss[0]) :
mgeout.write("STARGAUSS2D%02d "%(i+1) + "%8.5e %8.5f %8.5f %8.5f \n"%tuple(Gaussians2D[k]))
k += 1
## then Gas
mgeout.write("## Gas 2D Gaussians\n")
for i in range(NGauss[1]) :
mgeout.write("GASGAUSS2D%02d "%(i+1) + "%8.5e %8.5f %8.5f %8.5f \n"%tuple(Gaussians2D[k]))
k += 1
## Then Dark Matter
mgeout.write("## Dark Matter 2D Gaussians\n")
for i in range(NGauss[2]) :
mgeout.write("HALOGAUSS2D%02d "%(i+1) + "%8.5e %8.5f %8.5f %8.5f \n"%tuple(Gaussians2D[k]))
k += 1
###################
## 3D Gaussians
###################
## STARS First
k = 0
mgeout.write("## ID Imax Sigma QxZ QyZ M/L kRT kRZ Group DynComp\n")
mgeout.write("## Stellar 3D Gaussians\n")
for i in range(NGauss[0]) :
if self.betaeps[k]:
mgeout.write("STARGAUSS3D%02d "%(i+1) + "%8.5e %8.5f %8.5f %8.5f %8.5f %8.5f"%tuple(Gaussians3D[k][:6]) \
+ " BETAEPS " + "%d %d \n"%tuple(Gaussians3D[k][7:]))
else:
mgeout.write("STARGAUSS3D%02d "%(i+1) + "%8.5e %8.5f %8.5f %8.5f %8.5f %8.5f %8.5f %d %d \n"%tuple(Gaussians3D[k]))
k += 1
## then Gas
mgeout.write("## Gas 3D Gaussians\n")
for i in range(NGauss[1]) :
if self.betaeps[k]:
mgeout.write("GASGAUSS3D%02d "%(i+1) + "%8.5e %8.5f %8.5f %8.5f %8.5f %8.5f"%tuple(Gaussians3D[k][:6]) \
+ " BETAEPS " + "%d %d \n"%tuple(Gaussians3D[k][7:]))
else:
mgeout.write("GASGAUSS3D%02d "%(i+1) + "%8.5e %8.5f %8.5f %8.5f %8.5f %8.5f %8.5f %d %d \n"%tuple(Gaussians3D[k]))
k += 1
## Then Dark Matter
mgeout.write("## Dark Matter 3D Gaussians\n")
for i in range(NGauss[2]) :
if self.betaeps[k]:
mgeout.write("HALOGAUSS3D%02d "%(i+1) + "%8.5e %8.5f %8.5f %8.5f %8.5f %8.5f"%tuple(Gaussians3D[k][:6]) \
+ " BETAEPS " + "%d %d \n"%tuple(Gaussians3D[k][7:]))
else:
mgeout.write("HALOGAUSS3D%02d "%(i+1) + "%8.5e %8.5f %8.5f %8.5f %8.5f %8.5f %8.5f %d %d \n"%tuple(Gaussians3D[k]))
k += 1
## Number of Groups et al.
mgeout.write(set_txtcomment("Number of Groups", "NGROUP", self.nGroup, "%d"))
mgeout.write(set_txtcomment("Number of Dynamical Components", "NDYNCOMP", self.nDynComp, "%d"))
mgeout.write("## PARTICLES for each DynComp: Total number and Number to be realised\n")
for i in range(self.nGroup) :
NPartGroup = (self.nPartGroup[i], self.nRealisedPartGroup[i])
mgeout.write("NPARTGROUP%02d %d %d\n"%(i+1, self.nPartGroup[i], self.nRealisedPartGroup[i]))
mgeout.close()
#===================================================================================================================================
def create_mge(outfilename=None, overwrite=False, outdir=None, **kwargs) :
"""Create an MGE ascii file corresponding to the input parameters
"""
## Setting a temporary MGE object
saveMGE = kwargs.get('saveMGE', None)
if saveMGE is None :
tempMGE = paramMGE()
else :
tempMGE = paramMGE(saveMGE=saveMGE)
## Test if the structure was properly initialised
if not hasattr(tempMGE, "mcut") :
## If not just return and stop as the message was already clear
## From the initialisation
return
## Get the numbers from kwargs
## First the Gaussians
NGauss = np.asarray(kwargs.get('NGauss', np.array([1,0,0])), int)
if NGauss.size == 1 : NGauss = np.asarray(np.array([NGauss, 0, 0]), int)
TNGauss = NGauss.sum()
## Inclination
if "Inclination" in kwargs :
if "Euler" in kwargs :
print_msg("Both Euler and Inclination are defined here: will use Euler as a default", 1)
else :
kwargs["Euler"] = np.array([0., float(kwargs.get("Inclination")), 0.])
tempMGE.Euler = np.asarray(kwargs.get("Euler"))
if tempMGE.Euler.size != 3 :
print_msg("Problem with Euler angles, will set the default = 0, 90, 0 = edge-on", 1)
tempMGE.Euler = np.array([0., 90., 0.])
tempMGE._reset(nGauss=NGauss)
temp2D = np.array([0., 1., 1., 0.])
temp3D = np.array([0., 1., 1., 1., 1., 1., 1., 1, 1])
temp3D_short = np.array([1., 1., 1., 1, 1])
# Testing for betaeps
if 'betaeps' in kwargs:
betaeps = kwargs.pop('betaeps', | np.ones(tempMGE.nGauss, dtype=int) | numpy.ones |
import six
import cv2
import inspect
import itertools
import numpy as np
from functools import wraps
# from sklearn.decomposition import PCA
# from scipy.ndimage.interpolation import rotate as imrotate
from nutszebra_sampling import Sampling as sampling
from nutszebra_preprocess_picture import PreprocessPicture as preprocess
from nutszebra_basic_dictionary import NutszebraDictionary as dict_n
def execute_based_on_probability(func):
"""Decorator to execute a function based on probability
Edited date:
160707
Test:
| 2. str or numpy.ndarray: The decorator treat it as the input x of func, thus set the first argument as self.x and pass it to func directly with **kwargs. You can give info as the second argumnet and in that case, the second argument becomes self.info. If you don't give info, new info is generated and setted as self.info.
| 3. Nothing or None: The first argument is treated as 1.0.
Args:
__no_record (bool): If True, this decorator does not do anything
x_or_probability Optional([str, numpy.ndarray, int, float, None]): read Note
info [nutszebra_basic_dictionary.NutszebraDictionary]: read Note
Returns:
nutszebra_data_augmentation_picture.DataAugmentationPicture: return self
"""
@wraps(func)
def wrapper(self, x_or_probability=None, *args, **kwargs):
# if x_or_probability is not given, x_or_probability becomes 1.0
if x_or_probability is None:
x_or_probability = 1.0
if '__no_record' in kwargs and kwargs['__no_record'] is True:
# pop __no_record
kwargs.pop('__no_record')
# no record
return func(self, x_or_probability, **kwargs)
# they are recorded
elif isinstance(x_or_probability, float) or isinstance(x_or_probability, int):
# probability case
probability = float(x_or_probability)
# 0<=np.random.rand()<=1
if probability == 1.0 or probability >= np.random.rand():
if self.x is None:
if self.info is None:
self.info = dict_n({'pc': 0})
self.info[self.info['pc']] = {}
self.info[(self.info['pc'], 'execute')] = False
else:
# func needs only **kwargs to change behavior
self.x, self.info[self.info['pc']] = func(self, self.x, **kwargs)
# func has been executed
self.info[(self.info['pc'], 'execute')] = True
else:
# nothing happened
self.info[(self.info['pc'], 'execute')] = False
else:
# x case
# check info is given or not
if not len(args) == 0 and isinstance(args[0], dict_n):
info = args[0]
else:
info = None
# set x and info
self(x_or_probability, info=info)
self.x, self.info[self.info['pc']] = func(self, self.x, **kwargs)
# func has been executed
self.info[(self.info['pc'], 'execute')] = True
# record function name
self.info[(self.info['pc'], 'whoami')] = func.__name__
# record default arguments
# None, None => self, x_or_probability
tmp = extract_defaults(func)
for key, val in tmp.items():
self.info[(self.info['pc'], key)] = val
# record **kwargs
for key in kwargs.keys():
self.info[(self.info['pc'], key)] = kwargs[key]
# increment program counter
self.info['pc'] += 1
return self
return wrapper
def extract_defaults(func):
tmp = inspect.getargspec(func)
if tmp.defaults is None:
return {}
return dict(zip(tmp.args[-len(tmp.defaults):], tmp.defaults))
class DataAugmentationPicture(object):
"""Some useful functions for data-augmentation about pictures are defined
Attributes:
self.x Optional([numpy.ndarray, str]): input x
self.info (nutszebra_basic_dictionary.NutszebraDictionary): the info tells about what happened while executing data-augmentation
eigenvalue (numpy.ndarray): eigenvalue for self.rgb_shift
eigenvector (numpy.ndarray): eigenvector for self.rgb_shift
papers (list): the title of papers
parameters (dict): the parameters in papers are stored
"""
def __init__(self, **kwargs):
self.x = None
self.info = None
self.eigenvalue = None
self.eigenvector = None
self.papers = ['scalable bayesian optimization using deep neural networks',
'do deep convolutional nets really need to be deep (or even convolutional)?',
]
self.parameters = {}
self.parameters[self.papers[0]] = {'pixel-dropout': {'probability': 0.2},
'shift-hue': {'low': -31.992, 'high': 31.992},
'shift-saturation': {'low': -0.10546, 'high': 0.10546},
'shift-value': {'low': -0.24140, 'high': 0.24140},
'stretch-saturation': {'low': 1. / (1. + 0.31640), 'high': 1. + 0.31640},
'stretch-value': {'low': 1. / (1. + 0.13671), 'high': 1. + 0.13671},
'stretch-BGR': {'low': 1. / (1. + 0.24140), 'high': 1. + 0.24140},
}
self.parameters[self.papers[1]] = {'shift-hue': {'low': -0.06, 'high': 0.06},
'shift-saturation': {'low': -0.26, 'high': 0.26},
'shift-value': {'low': -0.20, 'high': 0.20},
'stretch-saturation': {'low': 1. / (1. + 0.21), 'high': 1. + 0.21},
'stretch-value': {'low': 1. / (1. + 0.13), 'high': 1. + 0.13},
}
def __call__(self, x=None, info=None):
"""Set x and info
Edited date:
160707
Test:
160708
Example:
::
da = nutszebra_data_augmentation_picture.DataAugmentationPicture()
da('lenna.jpg')
>>> print(da.x)
'lenna.jpg'
>>> print(da.info)
{'pc': 0}
da()
>>> print(da.x)
None
>>> print(da.info)
None
da('lenna.jpg', {'test':0})
>>> print(da.x)
'lenna.jpg'
>>> print(da.x)
{'test':0}
Args:
x Optional([None, str, numpy.ndarray, int, float]): If None, self.x and self.info are setted as None, otherwise set this argument as self.x
info Optional([None, nutszebra_basic_dictionary.NutszebraDictionary, dict]): If None, generate new info, otherwise set this argument as self.info
Returns:
nutszebra_data_augmentation_picture.DataAugmentationPicture: return self
"""
if x is None:
# reset
self.x = None
self.info = None
else:
# set
if info is None:
info = self.generate_info()
self.x = x
self.info = dict_n(info)
return self
def end(self, info_flag=False):
"""Return self.x and self.info
Edited date:
160707
Test:
160708
Example:
::
da = nutszebra_data_augmentation_picture.DataAugmentationPicture()
da('lenna.jpg')
>>> print(da.end())
'lenna.jpg'
>>> print(da.end(True))
('lenna.jpg', {'pc': 0})
Args:
info_flag (bool): If False, only self.x is returned, otherwise return (self.x, self.info).
Returns:
numpy.ndarray: info_flag=False
tuple: info=True, (numpy.ndarray, nutszebra_basic_dictionary.NutszebraDictionary)
"""
if info_flag is True:
return (self.x, self.info)
else:
return self.x
@staticmethod
def generate_info():
"""Generate info dictionary
Edited date:
160702
Test:
160702
Example:
::
info = self.nz.generate_info()
>>> print(info)
{'pc': 1}
Returns:
info (nutszebra_basic_dictionary.NutszebraDictionary): information about what happend while self.execute is written onto this
"""
return dict_n({'pc': 0})
def register_eigen(self, data):
"""calculate and register eigenvalue & eigenvector, those eigen ones are used for rgb_shift
Edited date:
160422
Test:
160501
Example:
::
self.register_eigen(data)
Args:
data (numpy.ndarray): data's ndim has to be 2
Returns:
True if successful, False otherwise
"""
if not data.ndim == 2:
return False
cov = np.dot(data, data.T) / len(data)
V, D, _ = | np.linalg.svd(cov) | numpy.linalg.svd |
"""For calculating sub-observer and sub-stellar locations.
Defines the method :func:`sub_observerstellar`.
Thanks to <NAME> for preliminary work on this script.
"""
import numpy as np
pi = np.pi
def sub_observerstellar(times,worb,wrot,inc,obl,sol,longzero=0):
"""Calculates an exoplanet's sub-observer and -stellar locations over time.
Calculates time-dependent, trigonometric values of an exoplanet's sub-
observer and sub-stellar locations when on a circular orbit. Planet
coordinates are colatitude (theta) and longitude (phi). Orbital phase
is zero when planet is opposite star from observer (superior conjunction)
and increases CCW when system is viewed above star's North pole. See
Appendix A of `Schwartz et al. (2016) <https://arxiv.org/abs/1511.05152>`_.
Args:
times (1d array, int, or float):
Discrete time values in any unit, with total number *n_time*.
At t=0 planet is at superior conjunction.
worb (int or float):
Orbital angular frequency in radians per unit time. Positive
values are prograde orbits (CCW), negative are retrograde (CW).
wrot (int or float):
Rotational angular frequency in radians per unit time.
For prograde orbits, positive values are prograde rotation,
negative are retrograde (vice versa for retrograde orbits).
inc (int or float):
Inclination of orbital plane to the observer, in radians.
Zero is face-on, pi/2 is edge-on.
obl (int or float):
Obliquity relative to the ``worb`` vector, in radians.
This is the tilt of the planet's spin axis. Zero is North
pole up, pi/2 is maximal tilt, pi is North pole down.
sol (int or float):
The orbital phase of Northern Summer solstice, in radians.
If the ``wrot`` vector is projected into the orbital plane,
then this phase is where that projection points at the star.
longzero (int or float):
Longitude of the sub-observer point when t=0, in radians.
Default is zero.
Returns:
trigvals (ndarray):
Array of trigonometric values with shape (8, *n_time*). First
dimension is organized as:
- sin theta_obs
- cos theta_obs
- sin phi_obs
- cos phi_obs
- sin theta_st
- cos theta_st
- sin phi_st
- cos phi_st
"""
if isinstance(times,np.ndarray) and (times.size == times.shape[0]):
timeA = times
N_time = timeA.size # Number of time steps from input array
elif isinstance(times,(int,float)):
timeA = np.array([times])
N_time = 1
else:
print('sub_observerstellar aborted: input times should be ndarray (1D), int, or float.')
return
phaseA = worb*timeA # Orbital phases
phiGen = wrot*timeA - longzero # General expression for PhiObs (without overall negative sign)
cThObs = (np.cos(inc)*np.cos(obl)) + (np.sin(inc)*np.sin(obl)*np.cos(sol))
cThObsfull = np.repeat(cThObs,N_time)
sThObs = (1.0 - (cThObs**2.0))**0.5
sThObsfull = np.repeat(sThObs,N_time)
cThSt = np.sin(obl)*np.cos(phaseA - sol)
sThSt = (1.0 - (cThSt**2.0))**0.5
sol_md = (sol % (2.0*pi))
inc_rd = round(inc,4) # Rounded inclination, for better comparison
p_obl_rd = round((pi - obl),4) # Rounded 180 degrees - obliquity, for better comparison
cond_face = (((inc == 0) or (inc == pi)) and ((obl == 0) or (obl == pi))) # Pole-observer 1: face-on inclination
cond_north = ((sol_md == 0) and ((inc == obl) or (inc_rd == -p_obl_rd))) # Ditto 2: North pole view
cond_south = ((sol == pi) and ((inc_rd == p_obl_rd) or (inc == -obl))) # Ditto 3: South pole view
if cond_face or cond_north or cond_south:
if (obl == (pi/2.0)):
aII = | np.sin(phaseA) | numpy.sin |
import bisect
import bounded_kde as bk
import bz2
import gzip
import log_kde as lk
import numpy as np
import matplotlib.pyplot as pp
import os.path
import scipy.interpolate as si
import scipy.stats as ss
import scipy.stats.mstats as sm
def load_header_data(file, header_commented=False):
"""Load data from the file, using header for column names.
:param file: A file object or filename.
:param header_commented: If ``True``, discard the first character
of header as comment marker."""
def do_read(file):
header=file.readline()
if header_commented:
header=header[1:].split()
else:
header=header.split()
return np.loadtxt(file, dtype=[(h, np.float) for h in header])
if isinstance(file, str):
f,ext = os.path.splitext(file)
if ext == '.gz':
with gzip.open(file, 'r') as inp:
return do_read(inp)
elif ext == '.bz2':
with bz2.BZ2File(file, 'r') as inp:
return do_read(inp)
else:
with open(file, 'r') as inp:
return do_read(inp)
else:
return do_read(inp)
def plot_emcee_chains(chain, truths=None, mean=True, fburnin=0):
"""Produces a chain plot of the mean values of each variable at each
step. The chain should have the shape ``(nwalkers, nsteps,
nvars)``, and the resulting grid of plots will be as close to
square as possible, showing the mean walker position at each step.
:param chain: An array of shape ``(nwalkers, nsteps, nvars)``
giving the history of the chain.
:param truths: Either ``None`` or an iterable giving the truth
values for each of the parameters, which will be plotted as a
horizontal line.
:param mean: If ``True`` (default) plot only the mean of the
walker ensemble. Otherwise, plot the evolution of each walker
in the chain.
:param fburnin: The fraction of points to discard at the beginning
of the chain.
"""
nk = chain.shape[2]
n = int(np.ceil(np.sqrt(nk)))
istart = int(round(fburnin*chain.shape[1]))
for k in range(nk):
pp.subplot(n,n,k+1)
if mean:
pp.plot(np.mean(chain[:,istart:,k], axis=0))
else:
pp.plot(chain[:,istart:,k].T)
if truths is not None:
pp.axhline(truths[k], color='k')
def plot_emcee_chains_one_fig(chain, fburnin=None):
"""Plots a single-figure representation of the chain evolution of the
given chain. The figure shows the evolution of the mean of each
coordinate of the ensemble, normalised to zero-mean, unit-standard
deviation.
:param chain: The sampler chain, of shape ``(nwalkers, niter,
nparams)``
:param fburnin: If not ``None``, refers to the fraction of samples
to discard at the beginning of the chain.
"""
nk = chain.shape[2]
if fburnin is None:
istart = 0
else:
istart = int(round(fburnin*chain.shape[1]))
for k in range(nk):
mus = np.mean(chain[:,istart:,k], axis=0)
mu = np.mean(mus)
sigma = np.std(mus)
pp.plot((mus - mu)/sigma)
def decorrelated_2d_histogram_pdf(pts, xmin=None, xmax=None, ymin=None, ymax=None):
"""Returns ``(XS, YS, ZS)``, with ``ZS`` of shape ``(Nx,Ny)`` and
``XS`` and ``YS`` of shape ``(Nx+1,Ny+1)`` giving the height and
box corners, respectively, of the histogram estimate of the PDF
from which ``pts`` are drawn. The bin widths and orientations are
chosen optimally for the convergence of the histogram to the true
PDF under a squared-error metric; the automatic binning is tuned
to work particularly well for multivariate Gaussians.
Note: the first index of ZS varies with the X coordinate, while
the second varies with the y coordinate. This is consistent with
:func:`pp.pcolor`, but inconsistent with :func:`pp.imshow` and
:func:`pp.contour`.
:param pts: The sample points, of shape ``(Npts,2)``.
:param xmin: Minimum value in x. If ``None``, use minimum data
value.
:param xmax: Maximum value in x. If ``None``, use minimum data
value.
:param ymin: Minimum value in y. If ``None``, use minimum data
value.
:param ymax: Maximum value in y. If ``None``, use minimum data
value."""
if xmin is None:
xmin = np.min(pts[:,0])
if xmax is None:
xmax = np.max(pts[:,0])
if ymin is None:
ymin = np.min(pts[:,1])
if ymax is None:
ymax = np.max(pts[:,1])
cov=np.cov(pts, rowvar=0)
mu=np.mean(pts, axis=0)
# cov = L*L^T
d, L = np.linalg.eig(cov)
rescaled_pts = np.dot(pts-mu, L)
rescaled_pts = rescaled_pts / np.reshape(np.sqrt(d), (1, 2))
h = (42.5/pts.shape[0])**0.25
Nx=int((np.max(rescaled_pts[:,0])-np.min(rescaled_pts[:,0]))/h + 0.5)
Ny=int((np.max(rescaled_pts[:,1])-np.min(rescaled_pts[:,1]))/h + 0.5)
H,xs,ys = np.histogram2d(rescaled_pts[:,0], rescaled_pts[:,1], bins=(Nx,Ny))
# Backwards to account for the ordering in histogram2d.
YS_RESCALED,XS_RESCALED=np.meshgrid(ys, xs)
HPTS_RESCALED=np.column_stack((XS_RESCALED.flatten(),
YS_RESCALED.flatten()))
HPTS=np.dot(HPTS_RESCALED*np.reshape(np.sqrt(d), (1,2)), L.T) + mu
XS=np.reshape(HPTS[:,0], (Nx+1,Ny+1))
YS=np.reshape(HPTS[:,1], (Nx+1,Ny+1))
return XS,YS,H
def interpolated_quantile(sorted_pts, quantile):
"""Returns a linearly interpolated quantile value.
:param sorted_pts: A sorted array of points.
:param quantile: The quantile desired."""
N=sorted_pts.shape[0]
idx=N*quantile
lidx=int(np.floor(idx))
hidx=int(np.ceil(idx))
return (idx-lidx)*sorted_pts[lidx] + (hidx-idx)*sorted_pts[hidx]
def plot_interval(pts, levels, *args, **kwargs):
"""Plot probability intervals corresponding to ``levels`` in 1D.
Additional args are passed to :func:`pp.axvline`. The chosen
levels are symmetric, in that they have equal probability mass
outside the interval on each side.
:param pts: Shape ``(Npts,)`` array of samples.
:param levels: Sequence of levels to plot."""
for level in levels:
low,high = sm.mquantiles(pts, [0.5*(1.0-level), 0.5+0.5*level])
pp.axvline(low, *args, **kwargs)
pp.axvline(high, *args, **kwargs)
def plot_greedy_kde_interval_2d(pts, levels, xmin=None, xmax=None, ymin=None, ymax=None, Nx=100, Ny=100, cmap=None, colors=None, *args, **kwargs):
"""Plots the given probability interval contours, using a greedy
selection algorithm. Additional arguments passed to
:func:`pp.contour`.
The algorithm uses a two-step process (see `this document
<https://dcc.ligo.org/LIGO-P1400054/public>`_) so that the
resulting credible areas will be unbiased.
:param pts: Array of shape ``(Npts, 2)`` that contains the points
in question.
:param levels: Sequence of levels (between 0 and 1) of probability
intervals to plot.
:param xmin: Minimum value in x. If ``None``, use minimum data
value.
:param xmax: Maximum value in x. If ``None``, use minimum data
value.
:param ymin: Minimum value in y. If ``None``, use minimum data
value.
:param ymax: Maximum value in y. If ``None``, use minimum data
value.
:param Nx: Number of subdivisions in x for contour plot. (Default
100.)
:param Ny: Number of subdivisions in y for contour plot. (Default
100.)
:param cmap: See :func:`pp.contour`.
:param colors: See :func:`pp.contour`.
"""
Npts=pts.shape[0]
kde_pts = pts[:Npts/2,:]
den_pts = pts[Npts/2:,:]
Nkde = kde_pts.shape[0]
Nden = den_pts.shape[0]
kde=ss.gaussian_kde(kde_pts.T)
den=kde(den_pts.T)
densort=np.sort(den)[::-1]
if xmin is None:
xmin = np.min(pts[:,0])
if xmax is None:
xmax = np.max(pts[:,0])
if ymin is None:
ymin = np.min(pts[:,1])
if ymax is None:
ymax = np.max(pts[:,1])
xs = np.linspace(xmin, xmax, Nx)
ys = np.linspace(ymin, ymax, Ny)
XS,YS=np.meshgrid(xs,ys)
ZS=np.reshape(kde(np.row_stack((XS.flatten(), YS.flatten()))), (Nx, Ny))
zvalues=[]
for level in levels:
ilevel = int(Nden*level + 0.5)
if ilevel >= Nden:
ilevel = Nden-1
zvalues.append(densort[ilevel])
pp.contour(XS, YS, ZS, zvalues, colors=colors, cmap=cmap, *args, **kwargs)
def greedy_kde_areas_2d(pts, levels, Nx=100, Ny=100, truth=None):
"""Returns an estimate of the area within the given credible levels
for the posterior represented by ``pts``.
The algorithm uses a two-step process (see `this document
<https://dcc.ligo.org/LIGO-P1400054/public>`_) so that the
resulting credible areas will be unbiased.
:param pts: An ``(Npts, 2)`` array giving samples from the
posterior. The algorithm assumes that each point is an
independent draw from the posterior.
:param levels: The credible levels for which the areas are
desired.
:param Nx: The number of subdivisions along the first parameter to
be used for the credible area integral.
:param Ny: The number of subdivisions along the second parameter
to be used for the credible area integral.
:param truth: If given, then the area contained within the
posterior contours that are more probable than the posterior
evaluated at ``truth`` will be returned. Also, the credible
level that corresponds to truth is returned. The area quantity
is sometimes called the 'searched area', since it is the area a
greedy search algorithm will cover before finding the true
values.
:return: If ``truth`` is None, ``areas``, an array of the same
shape as ``levels`` giving the credible areas; if ``truth`` is
not ``None`` then ``(areas, searched_area, p_value)``.
"""
pts = np.random.permutation(pts)
mu = np.mean(pts, axis=0)
cov = np.cov(pts, rowvar=0)
L = np.linalg.cholesky(cov)
detL = L[0,0]*L[1,1]
pts = np.linalg.solve(L, (pts - mu).T).T
if truth is not None:
truth = np.linalg.solve(L, truth-mu)
Npts = pts.shape[0]
kde_pts = pts[:Npts/2, :]
den_pts = pts[Npts/2:, :]
kde = ss.gaussian_kde(kde_pts.T)
den = kde(den_pts.T)
densort = np.sort(den)[::-1]
xs = np.linspace(np.min(pts[:,0]), np.max(pts[:,0]), Nx)
ys = np.linspace( | np.min(pts[:,1]) | numpy.min |
import time
import numpy as np
from os import makedirs
from os.path import exists, join, basename
class ReadEEG:
def __init__(self, filename, newSampleRate=0, channels2exclude=[]):
self.filename = filename
if filename[-3:] == 'bdf':
self.readBDF()
elif filename[-3:] == 'eeg':
self.readBrainVision()
# Exclude Channels if necessary
if channels2exclude != []:
self.excludeChannels(channels2exclude)
# Reslice Data if necessary
if newSampleRate != 0:
self.resliceData(newSampleRate)
def readBDF(self):
self.eegFile = self.filename.replace('.bdf', '.lineviewer')
with open(self.filename, 'rb') as f:
offset = 168
f.seek(offset)
startDate = f.read(8).strip()
startTime = f.read(8).strip()
offset += 68
f.seek(offset)
dataRecorded = int(f.read(8))
durationRecorded = int(f.read(8))
nbChannels = int(f.read(4))
labelsChannel = np.array(
[f.read(16).strip() for i in range(nbChannels)])
offset += 20 + 216 * nbChannels
f.seek(offset)
sampleRate = [int(f.read(8)) for i in range(nbChannels)]
sampleRate = sampleRate[0]
offset += (40) * nbChannels
if not exists(self.eegFile):
with open(self.filename, 'rb') as f:
f.seek(offset)
rawdata = np.fromfile(f, dtype='uint8').reshape(-1, 3)
zeroarray = np.zeros((rawdata.shape[0], 1), dtype='uint8')
# 8-bit shift for negative integers
zeroarray[rawdata[:, 2] >= 128] += 255
rawdata = np.array(
np.hstack((rawdata, zeroarray)), dtype='uint8')
rawdata = (
rawdata.flatten().view('i4') / 32.).astype('float32')
rawdata = rawdata.reshape(
dataRecorded, nbChannels, sampleRate)
rawdata = np.rollaxis(
rawdata, 1).reshape(nbChannels, -1)
eegFile = np.memmap(self.eegFile, mode='w+', dtype='float32',
shape=rawdata.shape)
eegFile[:] = rawdata[:]
rawdata = np.memmap(self.eegFile, mode='r', dtype='float32',
shape=(nbChannels, sampleRate * dataRecorded))
# Create Marker value and timestamp
status = rawdata[np.where(labelsChannel == 'Status')][0]
status = status - np.median(status)
timepoint = (np.diff(status) != 0).nonzero()[0] + 1
markerTime = timepoint[np.where(status[timepoint] != 0)]
markerValue = np.uint8(status[markerTime] * 32)
# Prepare output
self.rawdata = rawdata[:-1, :]
self.startDate = '20%s/%s/%s' % (
startDate[6:8], startDate[3:5], startDate[:2])
self.startTime = startTime
self.dataRecorded = dataRecorded
self.durationRecorded = durationRecorded
self.labelsChannel = labelsChannel[:-1]
self.sampleRate = sampleRate
self.markerTime = markerTime
self.markerValue = np.copy(markerValue).astype('|S16')
self.fileType = 'BDF'
def readBrainVision(self):
self.eegFile = self.filename
hdrFile = self.eegFile.replace('.eeg', '.vhdr')
markerFile = self.eegFile.replace('.eeg', '.vmrk')
# Aggregate Header Information
with open(hdrFile) as f:
tmpHeader = f.readlines()
tmpHeader = [t.strip() for t in tmpHeader]
self.labelsChannel = []
readChannelNames = False
for line in tmpHeader:
if readChannelNames:
if len(line) == 0 or line[0] == ';':
continue
elif line[0:2] == 'Ch':
channelName = line.split(',')[0]
idEqual = channelName.index('=')
channelName = channelName[idEqual + 1:]
self.labelsChannel.append(channelName)
else:
readChannelNames = False
elif 'SamplingInterval=' in line:
sampleInterval = int(line.split('=')[1])
self.sampleRate = int(1e6 / float(sampleInterval))
elif 'BinaryFormat=' in line:
binaryFormat = line.split('=')[1]
elif 'recording started at' in line:
self.startTime = line.split('started at')[-1][1:9]
elif '[Channel Infos]' in line:
readChannelNames = True
self.labelsChannel = np.asarray(self.labelsChannel)
# Aggregate Marker Information
with open(markerFile) as f:
tmpMarker = f.readlines()
tmpMarker = [t.strip() for t in tmpMarker]
tmpMarker = [m.split(',') for m in tmpMarker if m[:2] == 'Mk']
self.markerValue = []
self.markerTime = []
for e in tmpMarker:
if 'New Segment' in e[0]:
recDate = e[5][:8]
self.startDate = '%s/%s/%s' % (
recDate[6:], recDate[4:6], recDate[:4])
else:
self.markerValue.append(e[1])
self.markerTime.append(int(e[2]))
self.markerValue = np.array(self.markerValue).astype('|S16')
self.markerTime = np.array(self.markerTime)
# Aggregate Data Information
numberFormat = {'INT_16': np.int16,
'IEEE_FLOAT_32': np.float32}
dataType = numberFormat[binaryFormat]
rawdata = np.memmap(self.eegFile, dataType, 'r')
nbChannels = self.labelsChannel.shape[0]
timepoints = rawdata.shape[0] / nbChannels
self.dataRecorded = float(timepoints) / self.sampleRate
self.durationRecorded = 1
self.rawdata = np.rollaxis(rawdata.reshape(timepoints, nbChannels), 1)
self.fileType = 'BrainVision'
def resliceData(self, newSampleRate):
if newSampleRate != self.sampleRate:
divisor = float(self.sampleRate) / newSampleRate
# Rewrite markerTime
self.markerTime = (self.markerTime / divisor).astype('int')
# Reslice rawdata with slicer
slicer = np.arange(0, self.rawdata.shape[1], int(divisor))
self.rawdata = self.rawdata[:, slicer]
# Rewrite sampleRate
self.sampleRate = newSampleRate
def excludeChannels(self, channels2exclude):
keepID = [i for i, e in enumerate(self.labelsChannel)
if e not in channels2exclude]
self.labelsChannel = self.labelsChannel[keepID]
self.rawdata = self.rawdata[keepID]
class ReadXYZ:
def __init__(self, filename):
# Read XYZ File
with open(filename) as f:
content = f.readlines()
self.coord = []
self.labels = []
for i, e in enumerate(content):
if i != 0:
coord = e.split()
self.coord.append([float(coord[0]),
float(coord[1]),
float(coord[2])])
self.labels.append(coord[3])
self.coord = np.array(self.coord)
self.labels = np.array(self.labels)
# Get ID of 10% most frontal channels
xValue = self.coord[:, 0]
self.frontalID = np.where(xValue >= np.percentile(xValue, 90))[0]
class SaveTVA:
def __init__(self, data, precut, postcut):
dataSize = [
len([m for m in e.markerTime
if m > precut and m < e.rawdata.shape[1] - postcut])
for e in data.Datasets]
tvaMarker = [
1 if 'ok_' in m else 0 for m in data.Results.matrixSelected]
# Go through the files and save TVA for each
counter = 0
for i, n in enumerate(data.Filenames):
filename = join(data.DirPath, n + '.lineviewer.tva')
with open(filename, 'w') as f:
f.writelines('TV01\n')
for j in range(dataSize[i]):
f.writelines('%d\t0\t%s\n' % (
tvaMarker[counter], data.Results.markers[counter]))
counter += 1
class SaveERP:
def __init__(self, resultsName, resultsPath, results, markers2hide,
preFrame):
# Create output folder if it doesn't exist
if not exists(resultsPath):
makedirs(resultsPath)
# Go through all the markers
for i, m in enumerate(results.uniqueMarkers):
# Do nothing if marker was hidden
if m in markers2hide:
continue
# Write GFP data into ERP file
nTimepoint = results.avgGFP[0].shape[0]
filename = '%s.ERP_%s.GFP.eph' % (resultsName, m)
with open(join(resultsPath, filename), 'w') as f:
f.writelines('{:>15}\t{:>15}\t{:>25}\n'.format(
1, nTimepoint, results.sampleRate))
for tValue in results.avgGFP[i]:
f.writelines('{:>15}\n'.format(round(tValue, 7)))
# Write GFP marker file
filename += '.mrk'
with open(join(resultsPath, filename), 'w') as f:
f.writelines(
'TL02\n{:>12}\t{:>12}\t"Origin"\n'.format(preFrame,
preFrame))
# Write electrode data into ERP file
nSignal, nTimepoint = results.avgEpochs[0].shape
filename = '%s.ERP_%s.eph' % (resultsName, m)
with open(join(resultsPath, filename), 'w') as f:
f.writelines('{:>15}\t{:>15}\t{:>25}\n'.format(
nSignal, nTimepoint, results.sampleRate))
for tValues in results.avgEpochs[i].T:
formatString = '{:>15}\t' * nSignal
formatString = formatString[:-1] + '\n'
f.writelines(
formatString.format(*np.round(tValues, 7).tolist()))
# Write ERP marker file
filename += '.mrk'
with open(join(resultsPath, filename), 'w') as f:
f.writelines(
'TL02\n{:>12}\t{:>12}\t"Origin"\n'.format(preFrame,
preFrame))
class SaveEpochs:
def __init__(self, resultsPath, results, preFrame):
# Create output folder if it doesn't exist
if not exists(resultsPath):
makedirs(resultsPath)
# Go through all epochs
for i, epoch in enumerate(results.epochs):
# Only save epochs that were OK
if not results.okID[i]:
continue
# Write electrode data into ERP file
nSignal, nTimepoint = epoch.shape
marker = results.markers[i]
filename = 'Epoch_%.4d_%s.eph' % (i + 1, marker)
with open(join(resultsPath, filename), 'w') as f:
f.writelines('{:>15}\t{:>15}\t{:>25}\n'.format(
nSignal, nTimepoint, results.sampleRate))
for tValues in epoch.T:
formatString = '{:>15}\t' * nSignal
formatString = formatString[:-1] + '\n'
f.writelines(
formatString.format(*np.round(tValues, 7).tolist()))
# Write Epoch marker file
filename += '.mrk'
with open(join(resultsPath, filename), 'w') as f:
f.writelines(
'TL02\n{:>12}\t{:>12}\t"Origin"\n'.format(preFrame,
preFrame))
class SaveFigures:
def __init__(self, resultsName, resultsPath, figures):
figures.Overview.figure.savefig(
join(resultsPath, 'plot_Overview.png'), bbox_inches='tight')
figures.GFPSummary.figure.savefig(
join(resultsPath, 'plot_GFP_Summary.png'), bbox_inches='tight')
figures.GFPDetail.figure.savefig(
join(resultsPath, 'plot_GFP_Detail.png'), bbox_inches='tight')
markers = figures.ERPSummary.ComboMarkers.GetItems()[1:]
for m in markers:
figures.ERPSummary.update(str(m))
figures.ERPSummary.figure.savefig(
join(resultsPath, 'plot_ERP_Marker_%s.png' % str(m)),
bbox_inches='tight')
class SaveVerbose:
def __init__(self, resultsName, resultsPath, data):
# abbreviation to shorten variable name
res = data.Results
# Write Verbose File
with open(join(resultsPath, '%s.vrb' % resultsName), 'w') as f:
f.writelines('Verbose File\n============\n\n')
f.writelines('LINEViewer (Version %s)\n' % data.VERSION)
f.writelines('%s\n\n\n' % time.strftime('%Y/%m/%d %H:%M:%S'))
# Information about the preprocessing
f.writelines(
'Processing Information:\n-----------------------\n\n')
f.writelines('DC removed\t\t\t:\t%s\n' % res.removeDC)
f.writelines('Reference to\t\t:\t%s\n' % res.newReference)
highcut = res.highcut if res.highcut != 0 else 'None'
lowcut = res.lowcut if res.lowcut != 0 else 'None'
f.writelines('High-pass filter\t:\t%s\n' % highcut)
f.writelines('Low-pass filter\t\t:\t%s\n' % lowcut)
notch = res.notchValue if res.doNotch else 'None'
f.writelines('Notch\t\t\t\t:\t%s\n' % notch)
f.writelines('\n')
f.writelines('Epoch duration pre\t:\t%sms / %s sampling points\n' %
(res.preEpoch, res.preFrame))
f.writelines(
'Epoch duration post\t:\t%sms / %s sampling points\n' %
(res.postEpoch, res.postFrame))
f.writelines('Baseline correction\t:\t%s\n' % res.baselineCorr)
f.writelines('Blink correction\t:\t%s\n' % res.blinkCorr)
f.writelines('Thresh. correction\t:\t%s\n' % res.thresholdCorr)
f.writelines('Threshold [mikroV]\t:\t%s\n' % res.threshold)
f.writelines('Channels ignored\t:\t%s\n' %
data.Specs.channels2ignore)
f.writelines('\n')
if hasattr(res, 'collapsedTransform'):
collapsedInfo = [
'%s -> %s' % (res.collapsedTransform[e], e)
for e in res.collapsedTransform]
collapsedInfo = ', '.join(collapsedInfo)
else:
collapsedInfo = 'None'
f.writelines('Markers collapsed\t:\t%s\n' % collapsedInfo)
f.writelines('Markers hidden\t\t:\t%s\n' % data.markers2hide)
f.writelines('\n')
f.writelines('Interpolated ch.\t:\t%s\n' %
data.Specs.channels2interpolate)
xyzFile = data.Specs.xyzFile if data.Specs.xyzFile != '' \
else 'None'
f.writelines('XYZ-file path\t\t:\t%s\n' % xyzFile)
f.writelines('\n\n')
# Information about the ERP output
f.writelines('ERP Information:\n----------------\n\n')
epochsTotal = res.epochs.shape[0]
epochsOK = res.okID.sum()
percentOK = np.round(float(epochsOK) / epochsTotal, 2) * 100
f.writelines('Channels #\t\t\t:\t%s\n' % res.epochs.shape[1])
f.writelines('Markers #\t\t\t:\t%s\n' % len(res.uniqueMarkers))
f.writelines('Marker value\t\t:\t%s\n' %
', '.join(res.uniqueMarkers.astype('str')))
f.writelines('Epochs total #\t\t:\t%s\n' % epochsTotal)
f.writelines(
'Epochs in ERP\t:\t{0} / {1}%\n'.format(epochsOK,
percentOK))
f.writelines('\n')
selected = len(np.where(res.matrixSelected == 'selected')[0])
ok_normal = len(np.where(res.matrixSelected == 'ok_normal')[0])
threshold = len(np.where(res.matrixSelected == 'threshold')[0])
ok_thresh = len( | np.where(res.matrixSelected == 'ok_thresh') | numpy.where |
# File name: NN
# Copyright 2017 <NAME>
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
import numpy as np
from NNActivations import activation_functions
class NN:
'''Creates the NN instance which is a python neural network.
Args:
L(list): List of nodes in each of the layers including the input and output lauer.
activations(list): List of activations in the different layers {‘relu’, ‘sigmoid’, ‘leaky-relu’}.
Attributes
L(list): List of nodes in each of the layers including the input and output lauer.
layers(int): Number of layers.
activations(list): List of activations in the different layers {‘relu’, ‘sigmoid’, ‘leaky-relu’}.
parameters(dict): Dictionary with ndarray of weights and biases.
gradient_parameters(dict): Dictionary with ndarray of gradients of weights and biases.
'''
def __init__(self, L, activations, unit_test=False):
self.L = L
self.layers = len(self.L)
self.activations = activations
self.parameters = {}
self.gradient_parameters = {}
self._update_count = 1
def initialize_parameters(self, method='xavier', epsilon=0.01, beta1=0.0, beta2=0.0, seed=1):
'''
Initializes dictionary of parameters of weights and biases
Args:
method(str): Method of weights initialization, 'xavier','he' or nothing.
epsilon(float): If xavier is false this will be used as the mean of the weights.
beta1(float): Momentum beta1, if 0 then there is no momentum.
beta2(float): RMSprop beta2, 0 if 0 then there is no rmsprop.
seed(int): Ramdom number generator seed.
'''
np.random.seed(seed)
for l in range(1, self.layers):
if method == 'xavier':
factor = np.sqrt(1 / self.L[l - 1])
elif method == 'he':
factor = np.sqrt(2 / self.L[l - 1])
else:
factor = epsilon
self.parameters['W' + str(l)] = np.random.randn(self.L[l], self.L[l - 1]) * factor
self.parameters['b' + str(l)] = np.zeros((self.L[l], 1))
if beta1 != 0:
self.gradient_parameters['dVW' + str(l)] = np.zeros((self.L[l], self.L[l - 1]))
self.gradient_parameters['dVb' + str(l)] = np.zeros((self.L[l], 1))
if beta2 != 0:
self.gradient_parameters['dSW' + str(l)] = np.zeros((self.L[l], self.L[l - 1]))
self.gradient_parameters['dSb' + str(l)] = np.zeros((self.L[l], 1))
def forward_propogation(self, X, keep_prob=1.0, gradient_check=False):
'''
Return the final activation and cache after forward proppogation.
Args:
X (ndarry): Samples as columns, features in rows.
keep_prob(float): If less than 1.0, dropout regularization will be implemented.
gradient_check(boolean): Switches off dropout to allow checking gradient with a numerical check.
Returns:
tuple: A (ndarray), Final activation for each sample as array of floats,
cache (dict), dictionary of the Z, A and derivs for each layer.
'''
np.random.seed(1)
cache = {}
if gradient_check:
keep_prob = 1.0
if type(keep_prob) != list:
keep_prob = np.ones(len(self.activations)) * keep_prob
XT = X[:].T
cache['A0'] = XT
for l in range(1, self.layers):
W = self.parameters['W' + str(l)]
b = self.parameters['b' + str(l)]
G = activation_functions[self.activations[l - 1]]
Z = np.matmul(W, cache['A' + str(l - 1)]) + b
A, deriv = G(Z)
keep = np.random.rand(A.shape[0], A.shape[1])
keep = keep <= keep_prob[l - 1]
keep = keep * 1. / keep_prob[l - 1]
A = np.multiply(A, keep)
deriv = np.multiply(deriv, keep)
# cache['Z' + str(l)] = Z
# cache['D'+str(l)] = keep
cache['A' + str(l)] = A
cache['deriv' + str(l)] = deriv
return A, cache
def L2_cost(self):
'''
Returns the L2 norm of all the weights.
Returns:
float: L2 norm of weights.
'''
L2 = 0.
for parameter_name, parameter in self.parameters.items():
if 'W' in parameter_name:
L2 += np.sum(np.square(parameter))
return L2
def compute_cost(self, AL, Y, lambd):
'''
Computes the cost cross entropy cost of the precictions.
Args:
AL(ndarray): Final activations (logits).
Y(ndarry): Labels.
lambd(float): if not None or 0, you will get L2 regularization with L2 penalty.
Returns:
float: cost.
'''
YT = Y[:].T
m = AL.shape[1]
cost = -np.multiply(YT, np.log(AL)) - np.multiply(1 - YT, | np.log(1 - AL) | numpy.log |
try:
from ulab import numpy as np
except ImportError:
import numpy as np
print(len(np.array([1, 2, 3, 4, 5], dtype=np.uint8)))
print(len(np.array([[1, 2, 3],[4, 5, 6]])))
print(~np.array([0, -1, -100], dtype=np.uint8))
print(~np.array([0, -1, -100], dtype=np.uint16))
print(~np.array([0, -1, -100], dtype=np.int8))
print(~np.array([0, -1, -100], dtype=np.int16))
print(abs(np.array([0, -1, -100], dtype=np.uint8)))
print(abs(np.array([0, -1, -100], dtype=np.uint16)))
print(abs(np.array([0, -1, -100], dtype=np.int8)))
print(abs(np.array([0, -1, -100], dtype=np.int16)))
print(abs(np.array([0, -1, -100], dtype=np.float)))
print(-(np.array([0, -1, -100], dtype=np.uint8)))
print(-(np.array([0, -1, -100], dtype=np.uint16)))
print(-(np.array([0, -1, -100], dtype=np.int8)))
print(-(np.array([0, -1, -100], dtype=np.int16)))
print(-(np.array([0, -1, -100], dtype=np.float)))
print(+(np.array([0, -1, -100], dtype=np.uint8)))
print(+(np.array([0, -1, -100], dtype=np.uint16)))
print(+(np.array([0, -1, -100], dtype=np.int8)))
print(+(np.array([0, -1, -100], dtype=np.int16)))
print(+(np.array([0, -1, -100], dtype=np.float)))
print(np.array([1,2,3], dtype=np.float) > np.array([4,5,6], dtype=np.float))
print(np.array([1,2,3], dtype=np.float) > np.array([4,5,6], dtype=np.uint16))
print(np.array([1,2,3], dtype=np.float) > np.array([4,5,6], dtype=np.int16))
print(np.array([1,2,3], dtype=np.float) < np.array([4,5,6], dtype=np.float))
print(np.array([1,2,3], dtype=np.float) < np.array([4,5,6], dtype=np.uint16))
print(np.array([1,2,3], dtype=np.float) < np.array([4,5,6], dtype=np.int16))
print(np.array([1,2,3], dtype=np.float) >= np.array([4,5,6], dtype=np.float))
print(np.array([1,2,3], dtype=np.float) >= np.array([4,5,6], dtype=np.uint16))
print(np.array([1,2,3], dtype=np.float) >= np.array([4,5,6], dtype=np.int16))
print(np.array([1,2,3], dtype=np.float) <= np.array([4,5,6], dtype=np.float))
print(np.array([1,2,3], dtype=np.float) <= np.array([4,5,6], dtype=np.uint16))
print(np.array([1,2,3], dtype=np.float) <= np.array([4,5,6], dtype=np.int16))
print(np.array([1,2,3], dtype=np.float) > 4)
print(np.array([1,2,3], dtype=np.float) > 4.0)
print(np.array([1,2,3], dtype=np.float) < 4)
print(np.array([1,2,3], dtype=np.float) < 4.0)
print(np.array([1,2,3], dtype=np.float) == np.array([4,5,6], dtype=np.float))
print(np.array([1,2,3], dtype=np.float) == np.array([4,5,6], dtype=np.uint16))
print(np.array([1,2,3], dtype=np.float) == np.array([4,5,6], dtype=np.int16))
print(np.array([1,2,3], dtype=np.float) != np.array([4,5,6], dtype=np.float))
print(np.array([1,2,3], dtype=np.float) != np.array([4,5,6], dtype=np.uint16))
print(np.array([1,2,3], dtype=np.float) != np.array([4,5,6], dtype=np.int16))
print(np.array([1,2,3], dtype=np.float) == 4)
print(np.array([1,2,3], dtype=np.float) == 4.0)
print(np.array([1,2,3], dtype=np.float) != 4)
print(np.array([1,2,3], dtype=np.float) != 4.0)
print(np.array([1,2,3], dtype=np.float) - np.array([4,5,6], dtype=np.float))
print(np.array([1,2,3], dtype=np.float) - np.array([4,5,6], dtype=np.uint16))
print(np.array([1,2,3], dtype=np.float) - np.array([4,5,6], dtype=np.int16))
print( | np.array([1,2,3], dtype=np.float) | numpy.array |
#!/usr/bin/env python3
"""
Identify change points throughout the entire TU of each individual sample.
The premise of this approach is to identify significant change points in the
cumulative read sum (CRS) distribution as a function of position. It identifies
the following change point types: DistalTSS, TandemTSS, DistalPolyA, TandemAPA,
Junction, Exon, and Intron.
"""
import os
import sys
import argparse
import math
from collections import defaultdict, OrderedDict, deque
from datetime import datetime
import numpy as np # v1.10.4
import peakutils # v1.0.3
import pysam # v0.9.0
import pybedtools as pb
from scipy import stats # v0.15.1
from loguru import logger
try:
from functions import sort_bedfile, run_command
except ImportError:
from src.functions import sort_bedfile, run_command
# median filter
from bisect import bisect_left, insort
from itertools import islice
# plotting
import matplotlib
matplotlib.use('pdf') # force matplotlib to not use any Xwindows backend for plotting
import matplotlib.pyplot as pyplot
from matplotlib.backends.backend_pdf import PdfPages
def bedgraph_per_gene_ss(genes, bg_plus, bg_minus, bgfile):
"""
bedtools intersect genes with each of bg_plus and bg_minus.
Run separately so that gene coverage is consecutive by strand.
"""
# === split annotation ===
plus_bed = bgfile + '.genes.plus'
minus_bed = bgfile + '.genes.minus'
p = open(plus_bed, 'w')
m = open(minus_bed, 'w')
with open(genes, 'r') as f:
for line in f:
if not line.startswith('track'):
strand = line.rstrip().split('\t')[5]
if strand == '+':
p.write(line)
elif strand == '-':
m.write(line)
else:
logger.error('do not recognize strand: ' + strand)
logger.error(line)
sys.exit(1)
p.close()
m.close()
# === bedtools intersect: concatenate + & - strands ===
sort_bedfile(bg_plus, bg_plus)
pb.BedTool(plus_bed).intersect(bg_plus, wo=True, sorted=True).saveas(bgfile + '.plus')
# os.remove(bg_plus)
sort_bedfile(bg_minus, bg_minus)
pb.BedTool(minus_bed).intersect(bg_minus, wo=True, sorted=True).saveas(bgfile + '.minus')
# os.remove(bg_minus + ".sorted")
t = open(bgfile, 'w')
t.write(open(bgfile + '.plus').read())
t.write(open(bgfile + '.minus').read())
t.close()
for file in [bgfile + '.plus', bgfile + '.minus', plus_bed, minus_bed]:
os.remove(file)
def bedgraph_per_gene_nss(genes, bg, bgfile):
"""Bedtools intersect, non-strand-specific"""
sort_bedfile(bg, bg)
pb.BedTool(genes).intersect(bg, wo=True, sorted=True).saveas(bgfile)
# os.remove(bg + ".sorted")
def get_exon_cov(exon_list, cov_list):
"""Calculate average reads/bp in each exon and report the maximum exon coverage."""
total_sum = 0
total_len = 0
max_cov = 0
for e, exon in enumerate(exon_list):
this_start, this_end = map(int, exon.split(':'))
this_cov = cov_list[this_start:this_end]
total_sum += sum(this_cov)
total_len += this_end - this_start
this_exon_cov = float(sum(this_cov)) / float(this_end - this_start)
if this_exon_cov > max_cov:
max_cov = this_exon_cov
cov_avg_all_exons = float(total_sum) / float(total_len)
return cov_avg_all_exons, max_cov
def crs(cov_array):
"""Calculate cumulative read sum """
vert_array = np.insert(np.ediff1d(cov_array), [0], 0)
vert_sum_array = np.cumsum(np.absolute(vert_array))
if max(vert_sum_array) == 0:
vert_sum_norm_array = ['NA']
else:
vert_sum_norm_array = vert_sum_array / max(vert_sum_array)
return vert_sum_norm_array, vert_array
def ks_test(vert_sum_array, make_plots, out_prefix):
"""KS test: cumulative distance vs. line y=ax"""
line_array = np.arange(0, max(vert_sum_array), max(vert_sum_array) / vert_sum_array.size)
ks_stat, ksp = stats.ks_2samp(vert_sum_array, line_array)
y0 = vert_sum_array[0]
xmax = vert_sum_array.size - 1
ymax = max(vert_sum_array)
slope = (ymax - y0) / xmax
if slope == 0:
ksp = 1
if make_plots:
x1 = | np.linspace(0, xmax, vert_sum_array.size) | numpy.linspace |
import numpy as np
from scipy import linalg
sx = 1/2 * np.mat([[0, 1],[ 1, 0]], dtype=complex)
sy = 1/2 * np.mat([[0, -1j],[1j, 0]], dtype=complex)
sz = 1/2 * np.mat([[1, 0],[0, -1]], dtype=complex)
def hamiltonian(j):
J = 4
H = (j) * J * sz + sx
return H
T = 2*np.pi
N = 20
dt = T/N
I = 500
fidelity = np.zeros(I+1)
observable = np.mat(np.zeros(shape=(2,2), dtype=complex))
observable[-1, -1] = 1
psi = np.mat(np.zeros(shape=(2, N+1), dtype=complex))
psi[0,0] = 1
pseudo = np.mat(np.zeros(shape=(2, N+1), dtype=complex)) #
seq = | np.random.rand(N) | numpy.random.rand |
import time
import inspect
import logging
import json
import functools
from abc import abstractmethod
from typing import Dict, Union, List
from pathlib import Path
from collections import OrderedDict
import numpy as np
import torch as th
from numpy.random import randint
from torch.utils.data import Dataset
from typeguard import typechecked
import data_loader
from utils.util import ensure_tensor, expert_tensor_storage
from zsvision.zs_utils import memcache
# For SLURM usage, buffering makes it difficult to see events as they happen, so we set
# the global print statement to enforce flushing
print = functools.partial(print, flush=True)
class BaseDataset(Dataset):
@staticmethod
@abstractmethod
@typechecked
def dataset_paths() -> Dict[str, Union[Path, str]]:
"""Generates a datastructure containing all the paths required to load features
"""
raise NotImplementedError
@abstractmethod
def sanity_checks(self):
"""Run sanity checks on loaded data
"""
raise NotImplementedError
@abstractmethod
def load_features(self):
"""Load features from disk
"""
raise NotImplementedError
@typechecked
def __init__(
self,
data_dir: Path,
fuse_captions: bool,
spatial_feats: bool,
challenge_mode: bool,
eval_only: bool,
use_zeros_for_missing: bool,
task: str,
text_agg: str,
text_feat: str,
split_name: str,
cls_partition: str,
root_feat_folder: str,
challenge_test_root_feat_folder: str,
subsample_training_data_fraction: float,
text_dim: int,
num_test_captions: int,
restrict_train_captions: int,
max_tokens: Dict[str, int],
text_dropout: float,
logger: logging.Logger,
raw_input_dims: Dict[str, int],
feat_aggregation: Dict[str, Dict],
):
self.task = task
self.eval_only = eval_only
self.logger = logger
self.challenge_mode = challenge_mode
self.text_feat = text_feat
self.data_dir = data_dir
self.text_dim = text_dim
self.spatial_feats = spatial_feats
self.text_dropout = text_dropout
self.restrict_train_captions = restrict_train_captions
self.subsample_training_data_fraction = subsample_training_data_fraction
self.max_tokens = max_tokens
self.cls_partition = cls_partition
self.fuse_captions = fuse_captions
self.num_test_captions = num_test_captions
self.feat_aggregation = feat_aggregation
self.root_feat = data_dir / root_feat_folder
self.challenge_test_root_feat_folder = data_dir / challenge_test_root_feat_folder
self.experts = set(raw_input_dims.keys())
# This attributes can be overloaded by different datasets, so it must be set
# before the `load_features() method call`
self.restrict_test_captions = None
self.text_features = None
self.label_features = None
self.video_labels = None
self.raw_captions = None
self.features = None
# Use a single caption per video when forming training minibatches (different
# captions from the same video may still be used across different minibatches)
self.captions_per_video = 1
# TODO(Samuel) - is a global fixed ordering still necessary?
self.ordered_experts = list(raw_input_dims.keys())
# Training and test lists are set by dataset-specific subclasses
self.partition_lists = {}
self.configure_train_test_splits(split_name=split_name)
# All retrieval-based tasks use a single dataloader (and handle the retrieval
# data separately), whereas for classification we use one dataloader for
# training and one for validation.
self.logger.info("The current task is {}".format(self.task))
self.sample_list = self.partition_lists["train"]
if self.subsample_training_data_fraction < 1:
num_train = len(self.sample_list)
num_keep = int(self.subsample_training_data_fraction * num_train)
sampled_train = np.random.choice(self.sample_list, num_keep, replace=False)
self.logger.info(f"Sampling training samples [{num_train} -> {num_keep}]")
self.sample_list = sampled_train.tolist()
self.num_samples = len(self.sample_list)
num_val = len(self.partition_lists["val"])
if self.task == "classification":
self.sample_list = self.partition_lists[self.cls_partition]
self.num_samples = len(self.sample_list)
self.logger.info("The current cls_partition is {}".format(self.cls_partition))
# The number of classes and class type (i.e. single or multi-label) must be
# overriden in the subclass
self.num_classes = None
self.class_type = None
self.raw_input_dims = raw_input_dims
# we store default paths to enable visualisations (this can be overloaded by
# dataset-specific classes)
self.video_path_retrieval = [f"videos/{x}.mp4" for x
in self.partition_lists["val"]]
# NOTE: We use nans rather than zeros to indicate missing faces, unless we wish
# to test single modality strength, which requires passing zeroed features for
# missing videos
if use_zeros_for_missing:
self.MISSING_VAL = 0
else:
self.MISSING_VAL = np.nan
# load the dataset-specific features into memory
self.load_features()
if text_agg == "avg":
self.logger.info("averaging the text features...")
for key, val in self.text_features.items():
self.text_features[key] = [ | np.mean(x, 0, keepdims=1) | numpy.mean |
import os
import json
import torch
from torchvision import transforms
import numpy as np
from PIL import Image
from pycocotools.coco import COCO
import random
import torchvision.transforms.functional as tf
ROTATE=False
def imresize(im, size, interp='bilinear'):
if interp == 'nearest':
resample = Image.NEAREST
elif interp == 'bilinear':
resample = Image.BILINEAR
elif interp == 'bicubic':
resample = Image.BICUBIC
else:
raise Exception('resample method undefined!')
return im.resize(size, resample)
class BaseDataset(torch.utils.data.Dataset):
def __init__(self, odgt, opt, **kwargs):
# parse options
self.imgSizes = opt.imgSizes
self.imgMaxSize = opt.imgMaxSize
# max down sampling rate of network to avoid rounding during conv or pooling
self.padding_constant = opt.padding_constant
# parse the input list
self.parse_input_list(odgt, **kwargs)
# mean and std
self.normalize = transforms.Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
def parse_input_list(self, odgt, max_sample=-1, start_idx=-1, end_idx=-1):
if isinstance(odgt, list):
self.list_sample = odgt
elif isinstance(odgt, str):
self.list_sample = [json.loads(x.rstrip()) for x in open(odgt, 'r')]
if max_sample > 0:
self.list_sample = self.list_sample[0:max_sample]
if start_idx >= 0 and end_idx >= 0: # divide file list
self.list_sample = self.list_sample[start_idx:end_idx]
self.num_sample = len(self.list_sample)
assert self.num_sample > 0
print('# samples: {}'.format(self.num_sample))
def img_transform(self, img):
# 0-255 to 0-1
img = np.float32(np.array(img)) / 255.
img = img.transpose((2, 0, 1))
img = self.normalize(torch.from_numpy(img.copy()))
return img
def segm_transform(self, segm):
# to tensor, -1 to 149
segm = np.array(segm)
#segm[segm>1]=1
#print(np.unique(segm))
segm = torch.from_numpy(segm).long() - 1
return segm
# Round x to the nearest multiple of p and x' >= x
def round2nearest_multiple(self, x, p):
return ((x - 1) // p + 1) * p
class TrainDataset(BaseDataset):
def __init__(self, root_dataset, odgt, opt, batch_per_gpu=1, **kwargs):
super(TrainDataset, self).__init__(odgt, opt, **kwargs)
self.root_dataset = root_dataset
# down sampling rate of segm labe
self.segm_downsampling_rate = opt.segm_downsampling_rate
self.batch_per_gpu = batch_per_gpu
# classify images into two classes: 1. h > w and 2. h <= w
self.batch_record_list = [[], []]
# override dataset length when trainig with batch_per_gpu > 1
self.cur_idx = 0
self.if_shuffled = False
def _get_sub_batch(self):
while True:
# get a sample record
this_sample = self.list_sample[self.cur_idx]
if this_sample['height'] > this_sample['width']:
self.batch_record_list[0].append(this_sample) # h > w, go to 1st class
else:
self.batch_record_list[1].append(this_sample) # h <= w, go to 2nd class
# update current sample pointer
self.cur_idx += 1
if self.cur_idx >= self.num_sample:
self.cur_idx = 0
np.random.shuffle(self.list_sample)
if len(self.batch_record_list[0]) == self.batch_per_gpu:
batch_records = self.batch_record_list[0]
self.batch_record_list[0] = []
break
elif len(self.batch_record_list[1]) == self.batch_per_gpu:
batch_records = self.batch_record_list[1]
self.batch_record_list[1] = []
break
return batch_records
def __getitem__(self, index):
# NOTE: random shuffle for the first time. shuffle in __init__ is useless
if not self.if_shuffled:
np.random.seed(index)
np.random.shuffle(self.list_sample)
self.if_shuffled = True
# get sub-batch candidates
batch_records = self._get_sub_batch()
# resize all images' short edges to the chosen size
if isinstance(self.imgSizes, list) or isinstance(self.imgSizes, tuple):
this_short_size = np.random.choice(self.imgSizes)
else:
this_short_size = self.imgSizes
# calculate the BATCH's height and width
# since we concat more than one samples, the batch's h and w shall be larger than EACH sample
batch_widths = np.zeros(self.batch_per_gpu, np.int32)
batch_heights = np.zeros(self.batch_per_gpu, np.int32)
for i in range(self.batch_per_gpu):
img_height, img_width = batch_records[i]['height'], batch_records[i]['width']
this_scale = min(
this_short_size / min(img_height, img_width), \
self.imgMaxSize / max(img_height, img_width))
batch_widths[i] = img_width * this_scale
batch_heights[i] = img_height * this_scale
# Here we must pad both input image and segmentation map to size h' and w' so that p | h' and p | w'
batch_width = np.max(batch_widths)
batch_height = np.max(batch_heights)
batch_width = int(self.round2nearest_multiple(batch_width, self.padding_constant))
batch_height = int(self.round2nearest_multiple(batch_height, self.padding_constant))
assert self.padding_constant >= self.segm_downsampling_rate, \
'padding constant must be equal or large than segm downsamping rate'
batch_images = torch.zeros(
self.batch_per_gpu, 3, batch_height, batch_width)
batch_segms = torch.zeros(
self.batch_per_gpu,
batch_height // self.segm_downsampling_rate,
batch_width // self.segm_downsampling_rate).long()
for i in range(self.batch_per_gpu):
this_record = batch_records[i]
# load image and label
image_path = os.path.join(self.root_dataset, this_record['fpath_img'])
segm_path = os.path.join(self.root_dataset, this_record['fpath_segm'])
img = Image.open(image_path).convert('RGB')
segm = Image.open(segm_path)
assert(segm.mode == "L")
assert(img.size[0] == segm.size[0])
assert(img.size[1] == segm.size[1])
# random_flip
if np.random.choice([0, 1]):
img = img.transpose(Image.FLIP_LEFT_RIGHT)
segm = segm.transpose(Image.FLIP_LEFT_RIGHT)
# note that each sample within a mini batch has different scale param
img = imresize(img, (batch_widths[i], batch_heights[i]), interp='bilinear')
segm = imresize(segm, (batch_widths[i], batch_heights[i]), interp='nearest')
# further downsample seg label, need to avoid seg label misalignment
segm_rounded_width = self.round2nearest_multiple(segm.size[0], self.segm_downsampling_rate)
segm_rounded_height = self.round2nearest_multiple(segm.size[1], self.segm_downsampling_rate)
segm_rounded = Image.new('L', (segm_rounded_width, segm_rounded_height), 0)
segm_rounded.paste(segm, (0, 0))
segm = imresize(
segm_rounded,
(segm_rounded.size[0] // self.segm_downsampling_rate, \
segm_rounded.size[1] // self.segm_downsampling_rate), \
interp='nearest')
# image transform, to torch float tensor 3xHxW
img = self.img_transform(img)
# segm transform, to torch long tensor HxW
segm = self.segm_transform(segm)
# put into batch arrays
batch_images[i][:, :img.shape[1], :img.shape[2]] = img
batch_segms[i][:segm.shape[0], :segm.shape[1]] = segm
output = dict()
output['img_data'] = batch_images
output['seg_label'] = batch_segms
return output
def __len__(self):
return int(1e10) # It's a fake length due to the trick that every loader maintains its own list
#return self.num_sampleclass
class ValDataset(BaseDataset):
def __init__(self, root_dataset, odgt, opt, **kwargs):
super(ValDataset, self).__init__(odgt, opt, **kwargs)
self.root_dataset = root_dataset
def __getitem__(self, index):
this_record = self.list_sample[index]
# load image and label
image_path = os.path.join(self.root_dataset, this_record['fpath_img'])
segm_path = os.path.join(self.root_dataset, this_record['fpath_segm'])
img = Image.open(image_path).convert('RGB')
segm = Image.open(segm_path)
assert(segm.mode == "L")
assert(img.size[0] == segm.size[0])
assert(img.size[1] == segm.size[1])
ori_width, ori_height = img.size
img_resized_list = []
for this_short_size in self.imgSizes:
# calculate target height and width
scale = min(this_short_size / float(min(ori_height, ori_width)),
self.imgMaxSize / float(max(ori_height, ori_width)))
target_height, target_width = int(ori_height * scale), int(ori_width * scale)
# to avoid rounding in network
target_width = self.round2nearest_multiple(target_width, self.padding_constant)
target_height = self.round2nearest_multiple(target_height, self.padding_constant)
# resize images
img_resized = imresize(img, (target_width, target_height), interp='bilinear')
# image transform, to torch float tensor 3xHxW
img_resized = self.img_transform(img_resized)
img_resized = torch.unsqueeze(img_resized, 0)
img_resized_list.append(img_resized)
# segm transform, to torch long tensor HxW
segm = self.segm_transform(segm)
batch_segms = torch.unsqueeze(segm, 0)
output = dict()
output['img_ori'] = np.array(img)
output['img_data'] = [x.contiguous() for x in img_resized_list]
output['seg_label'] = batch_segms.contiguous()
output['info'] = this_record['fpath_img']
return output
def __len__(self):
return self.num_sample
class TestDataset(BaseDataset):
def __init__(self, odgt, opt, **kwargs):
super(TestDataset, self).__init__(odgt, opt, **kwargs)
def __getitem__(self, index):
this_record = self.list_sample[index]
# load image
image_path = this_record['fpath_img']
img = Image.open(image_path).convert('RGB')
ori_width, ori_height = img.size
img_resized_list = []
for this_short_size in self.imgSizes:
# calculate target height and width
scale = min(this_short_size / float(min(ori_height, ori_width)),
self.imgMaxSize / float(max(ori_height, ori_width)))
target_height, target_width = int(ori_height * scale), int(ori_width * scale)
# to avoid rounding in network
target_width = self.round2nearest_multiple(target_width, self.padding_constant)
target_height = self.round2nearest_multiple(target_height, self.padding_constant)
# resize images
img_resized = imresize(img, (target_width, target_height), interp='bilinear')
# image transform, to torch float tensor 3xHxW
img_resized = self.img_transform(img_resized)
img_resized = torch.unsqueeze(img_resized, 0)
img_resized_list.append(img_resized)
output = dict()
output['img_ori'] = | np.array(img) | numpy.array |
#!/usr/bin/env python
"""
Evaluation of conformal predictors.
"""
# Authors: <NAME>
# TODO: cross_val_score/run_experiment should possibly allow multiple to be evaluated on identical folding
from __future__ import division
from cqr.nonconformist_base import RegressorMixin, ClassifierMixin
import sys
import numpy as np
import pandas as pd
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.model_selection import KFold
from sklearn.model_selection import train_test_split
from sklearn.base import clone, BaseEstimator
class BaseIcpCvHelper(BaseEstimator):
"""Base class for cross validation helpers.
"""
def __init__(self, icp, calibration_portion):
super(BaseIcpCvHelper, self).__init__()
self.icp = icp
self.calibration_portion = calibration_portion
def predict(self, x, significance=None):
return self.icp.predict(x, significance)
class ClassIcpCvHelper(BaseIcpCvHelper, ClassifierMixin):
"""Helper class for running the ``cross_val_score`` evaluation
method on IcpClassifiers.
See also
--------
IcpRegCrossValHelper
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn.ensemble import RandomForestClassifier
>>> from cqr.nonconformist import IcpClassifier
>>> from cqr.nonconformist import ClassifierNc, MarginErrFunc
>>> from cqr.nonconformist import ClassIcpCvHelper
>>> from cqr.nonconformist import class_mean_errors
>>> from cqr.nonconformist import cross_val_score
>>> data = load_iris()
>>> nc = ProbEstClassifierNc(RandomForestClassifier(), MarginErrFunc())
>>> icp = IcpClassifier(nc)
>>> icp_cv = ClassIcpCvHelper(icp)
>>> cross_val_score(icp_cv,
... data.data,
... data.target,
... iterations=2,
... folds=2,
... scoring_funcs=[class_mean_errors],
... significance_levels=[0.1])
... # doctest: +SKIP
class_mean_errors fold iter significance
0 0.013333 0 0 0.1
1 0.080000 1 0 0.1
2 0.053333 0 1 0.1
3 0.080000 1 1 0.1
"""
def __init__(self, icp, calibration_portion=0.25):
super(ClassIcpCvHelper, self).__init__(icp, calibration_portion)
def fit(self, x, y):
split = StratifiedShuffleSplit(y, n_iter=1,
test_size=self.calibration_portion)
for train, cal in split:
self.icp.fit(x[train, :], y[train])
self.icp.calibrate(x[cal, :], y[cal])
class RegIcpCvHelper(BaseIcpCvHelper, RegressorMixin):
"""Helper class for running the ``cross_val_score`` evaluation
method on IcpRegressors.
See also
--------
IcpClassCrossValHelper
Examples
--------
>>> from sklearn.datasets import load_boston
>>> from sklearn.ensemble import RandomForestRegressor
>>> from cqr.nonconformist import IcpRegressor
>>> from cqr.nonconformist import RegressorNc, AbsErrorErrFunc
>>> from cqr.nonconformist import RegIcpCvHelper
>>> from cqr.nonconformist import reg_mean_errors
>>> from cqr.nonconformist import cross_val_score
>>> data = load_boston()
>>> nc = RegressorNc(RandomForestRegressor(), AbsErrorErrFunc())
>>> icp = IcpRegressor(nc)
>>> icp_cv = RegIcpCvHelper(icp)
>>> cross_val_score(icp_cv,
... data.data,
... data.target,
... iterations=2,
... folds=2,
... scoring_funcs=[reg_mean_errors],
... significance_levels=[0.1])
... # doctest: +SKIP
fold iter reg_mean_errors significance
0 0 0 0.185771 0.1
1 1 0 0.138340 0.1
2 0 1 0.071146 0.1
3 1 1 0.043478 0.1
"""
def __init__(self, icp, calibration_portion=0.25):
super(RegIcpCvHelper, self).__init__(icp, calibration_portion)
def fit(self, x, y):
split = train_test_split(x, y, test_size=self.calibration_portion)
x_tr, x_cal, y_tr, y_cal = split[0], split[1], split[2], split[3]
self.icp.fit(x_tr, y_tr)
self.icp.calibrate(x_cal, y_cal)
# -----------------------------------------------------------------------------
#
# -----------------------------------------------------------------------------
def cross_val_score(model,x, y, iterations=10, folds=10, fit_params=None,
scoring_funcs=None, significance_levels=None,
verbose=False):
"""Evaluates a conformal predictor using cross-validation.
Parameters
----------
model : object
Conformal predictor to evaluate.
x : numpy array of shape [n_samples, n_features]
Inputs of data to use for evaluation.
y : numpy array of shape [n_samples]
Outputs of data to use for evaluation.
iterations : int
Number of iterations to use for evaluation. The data set is randomly
shuffled before each iteration.
folds : int
Number of folds to use for evaluation.
fit_params : dictionary
Parameters to supply to the conformal prediction object on training.
scoring_funcs : iterable
List of evaluation functions to apply to the conformal predictor in each
fold. Each evaluation function should have a signature
``scorer(prediction, y, significance)``.
significance_levels : iterable
List of significance levels at which to evaluate the conformal
predictor.
verbose : boolean
Indicates whether to output progress information during evaluation.
Returns
-------
scores : pandas DataFrame
Tabulated results for each iteration, fold and evaluation function.
"""
fit_params = fit_params if fit_params else {}
significance_levels = (significance_levels if significance_levels
is not None else np.arange(0.01, 1.0, 0.01))
df = pd.DataFrame()
columns = ['iter',
'fold',
'significance',
] + [f.__name__ for f in scoring_funcs]
for i in range(iterations):
idx = np.random.permutation(y.size)
x, y = x[idx, :], y[idx]
cv = KFold(y.size, folds)
for j, (train, test) in enumerate(cv):
if verbose:
sys.stdout.write('\riter {}/{} fold {}/{}'.format(
i + 1,
iterations,
j + 1,
folds
))
m = clone(model)
m.fit(x[train, :], y[train], **fit_params)
prediction = m.predict(x[test, :], significance=None)
for k, s in enumerate(significance_levels):
scores = [scoring_func(prediction, y[test], s)
for scoring_func in scoring_funcs]
df_score = pd.DataFrame([[i, j, s] + scores],
columns=columns)
df = df.append(df_score, ignore_index=True)
return df
def run_experiment(models, csv_files, iterations=10, folds=10, fit_params=None,
scoring_funcs=None, significance_levels=None,
normalize=False, verbose=False, header=0):
"""Performs a cross-validation evaluation of one or several conformal
predictors on a collection of data sets in csv format.
Parameters
----------
models : object or iterable
Conformal predictor(s) to evaluate.
csv_files : iterable
List of file names (with absolute paths) containing csv-data, used to
evaluate the conformal predictor.
iterations : int
Number of iterations to use for evaluation. The data set is randomly
shuffled before each iteration.
folds : int
Number of folds to use for evaluation.
fit_params : dictionary
Parameters to supply to the conformal prediction object on training.
scoring_funcs : iterable
List of evaluation functions to apply to the conformal predictor in each
fold. Each evaluation function should have a signature
``scorer(prediction, y, significance)``.
significance_levels : iterable
List of significance levels at which to evaluate the conformal
predictor.
verbose : boolean
Indicates whether to output progress information during evaluation.
Returns
-------
scores : pandas DataFrame
Tabulated results for each data set, iteration, fold and
evaluation function.
"""
df = pd.DataFrame()
if not hasattr(models, '__iter__'):
models = [models]
for model in models:
is_regression = model.get_problem_type() == 'regression'
n_data_sets = len(csv_files)
for i, csv_file in enumerate(csv_files):
if verbose:
print('\n{} ({} / {})'.format(csv_file, i + 1, n_data_sets))
data = pd.read_csv(csv_file, header=header)
x, y = data.values[:, :-1], data.values[:, -1]
x = np.array(x, dtype=np.float64)
if normalize:
if is_regression:
y = y - y.min() / (y.max() - y.min())
else:
for j, y_ in enumerate(np.unique(y)):
y[y == y_] = j
scores = cross_val_score(model, x, y, iterations, folds,
fit_params, scoring_funcs,
significance_levels, verbose)
ds_df = pd.DataFrame(scores)
ds_df['model'] = model.__class__.__name__
try:
ds_df['data_set'] = csv_file.split('/')[-1]
except:
ds_df['data_set'] = csv_file
df = df.append(ds_df)
return df
# -----------------------------------------------------------------------------
# Validity measures
# -----------------------------------------------------------------------------
def reg_n_correct(prediction, y, significance=None):
"""Calculates the number of correct predictions made by a conformal
regression model.
"""
if significance is not None:
idx = int(significance * 100 - 1)
prediction = prediction[:, :, idx]
low = y >= prediction[:, 0]
high = y <= prediction[:, 1]
correct = low * high
return y[correct].size
def reg_mean_errors(prediction, y, significance):
"""Calculates the average error rate of a conformal regression model.
"""
return 1 - reg_n_correct(prediction, y, significance) / y.size
def class_n_correct(prediction, y, significance):
"""Calculates the number of correct predictions made by a conformal
classification model.
"""
labels, y = np.unique(y, return_inverse=True)
prediction = prediction > significance
correct = np.zeros((y.size,), dtype=bool)
for i, y_ in enumerate(y):
correct[i] = prediction[i, int(y_)]
return np.sum(correct)
def class_mean_errors(prediction, y, significance=None):
"""Calculates the average error rate of a conformal classification model.
"""
return 1 - (class_n_correct(prediction, y, significance) / y.size)
def class_one_err(prediction, y, significance=None):
"""Calculates the error rate of conformal classifier predictions containing
only a single output label.
"""
labels, y = np.unique(y, return_inverse=True)
prediction = prediction > significance
idx = np.arange(0, y.size, 1)
idx = filter(lambda x: | np.sum(prediction[x, :]) | numpy.sum |
#!/usr/bin/env python
"""Basic implementation of CHOMP trajectory optimization algorithm.
Optimize over q1...qn, with q0 and qn+1 the fixed end points.
"""
import numpy as np
import matplotlib.pyplot as plt
from scipy import sparse
import IPython
from mm2d import models
class CircleField:
def __init__(self, c, r):
self.c = c
self.r = r
def signed_dist(self, x):
return np.linalg.norm(x - self.c) - self.r
def signed_dist_grad(self, x):
return (x - self.c) / np.linalg.norm(x - self.c)
def cost(self, x, eps):
d = self.signed_dist(x)
if d <= 0:
return -d + 0.5 * eps
elif d <= eps:
return (d - eps) ** 2 / (2 * eps)
return 0
def cost_grad(self, x, eps):
d = self.signed_dist(x)
dg = self.signed_dist_grad(x)
if d <= 0:
return -dg
elif d <= eps:
return -(d - eps) * dg / eps
return np.zeros(dg.shape)
class FloorField:
def __init__(self, y):
self.y = y
def signed_dist(self, p):
return p[1] - self.y
def signed_dist_grad(self, p):
return np.sign([0, p[1]])
def cost(self, p, eps):
d = self.signed_dist(p)
if d <= 0:
return d ** 2
return 0
def cost_grad(self, x, eps):
d = self.signed_dist(x)
dg = self.signed_dist_grad(x)
if d <= 0:
return 2 * d * dg
return np.zeros(dg.shape)
class ObstacleField:
def __init__(self, obstacles):
self.obstacles = obstacles
def cost(self, p, eps):
cost = np.sum([obs.cost(p, eps) for obs in self.obstacles])
return cost
def cost_grad(self, p, eps):
grad = np.sum([obs.cost_grad(p, eps) for obs in self.obstacles], axis=0)
return grad
def fd1(N, n, q0, qf):
"""First-order finite differencing matrix."""
# construct the finite differencing matrix
d1 = np.ones(N + 1)
d2 = -np.ones(N)
# K0 is N+1 x N
K0 = sparse.diags((d1, d2), [0, -1]).toarray()[:, :-1]
# kron to make it work for n-dimensional inputs
K = np.kron(K0, np.eye(n))
e = np.zeros((N + 1) * n)
e[:n] = -q0
e[-n:] = qf
return K, e
def fd2(N, n, q0, qf):
"""Second-order finite differencing matrix."""
# construct the finite differencing matrix
d1 = -2 * np.ones(N)
d2 = np.ones(N - 1)
# K0 is N x N
K0 = sparse.diags((d2, d1, d2), [1, 0, -1]).toarray()
# kron to make it work for n-dimensional inputs
K = np.kron(K0, | np.eye(n) | numpy.eye |
import numba
import numpy as np
import strax
import straxen
from .pulse_processing import HITFINDER_OPTIONS, HITFINDER_OPTIONS_he, HE_PREAMBLE
from strax.processing.general import _touching_windows
export, __all__ = strax.exporter()
@export
@strax.takes_config(
strax.Option('peaklet_gap_threshold', default=350,
help="No hits for this many ns triggers a new peak"),
strax.Option('peak_left_extension', default=30,
help="Include this many ns left of hits in peaks"),
strax.Option('peak_right_extension', default=200,
help="Include this many ns right of hits in peaks"),
strax.Option('peak_min_pmts', default=4,
help="Minimum number of contributing PMTs needed to define a peak"),
strax.Option('peak_split_gof_threshold',
# See https://xe1t-wiki.lngs.infn.it/doku.php?id=
# xenon:xenonnt:analysis:strax_clustering_classification
# #natural_breaks_splitting
# for more information
default=(
None, # Reserved
((0.5, 1), (4, 0.4)),
((2, 1), (4.5, 0.4))),
help='Natural breaks goodness of fit/split threshold to split '
'a peak. Specify as tuples of (log10(area), threshold).'),
strax.Option('peak_split_filter_wing_width', default=70,
help='Wing width of moving average filter for '
'low-split natural breaks'),
strax.Option('peak_split_min_area', default=40.,
help='Minimum area to evaluate natural breaks criterion. '
'Smaller peaks are not split.'),
strax.Option('peak_split_iterations', default=20,
help='Maximum number of recursive peak splits to do.'),
strax.Option('diagnose_sorting', track=False, default=False,
help="Enable runtime checks for sorting and disjointness"),
strax.Option('gain_model',
help='PMT gain model. Specify as (model_type, model_config)'),
strax.Option('tight_coincidence_window_left', default=50,
help="Time range left of peak center to call "
"a hit a tight coincidence (ns)"),
strax.Option('tight_coincidence_window_right', default=50,
help="Time range right of peak center to call "
"a hit a tight coincidence (ns)"),
strax.Option('n_tpc_pmts', type=int,
help='Number of TPC PMTs'),
strax.Option('saturation_correction_on', default=True,
help='On off switch for saturation correction'),
strax.Option('saturation_reference_length', default=100,
help="Maximum number of reference sample used "
"to correct saturated samples"),
strax.Option('saturation_min_reference_length', default=20,
help="Minimum number of reference sample used "
"to correct saturated samples"),
*HITFINDER_OPTIONS,
)
class Peaklets(strax.Plugin):
"""
Split records into:
-peaklets
-lone_hits
Peaklets are very aggressively split peaks such that we are able
to find S1-S2s even if they are close to each other. (S2) Peaks
that are split into too many peaklets will be merged later on.
To get Peaklets from records apply/do:
1. Hit finding
2. Peak finding
3. Peak splitting using the natural breaks algorithm
4. Compute the digital sum waveform
Lone hits are all hits which are outside of any peak. The area of
lone_hits includes the left and right hit extension, except the
extension overlaps with any peaks or other hits.
"""
depends_on = ('records',)
provides = ('peaklets', 'lone_hits')
data_kind = dict(peaklets='peaklets',
lone_hits='lone_hits')
parallel = 'process'
compressor = 'zstd'
__version__ = '0.3.7'
def infer_dtype(self):
return dict(peaklets=strax.peak_dtype(
n_channels=self.config['n_tpc_pmts']),
lone_hits=strax.hit_dtype)
def setup(self):
self.to_pe = straxen.get_to_pe(self.run_id,
self.config['gain_model'],
self.config['n_tpc_pmts'])
def compute(self, records, start, end):
r = records
hits = strax.find_hits(
r,
min_amplitude=straxen.hit_min_amplitude(
self.config['hit_min_amplitude']))
# Remove hits in zero-gain channels
# they should not affect the clustering!
hits = hits[self.to_pe[hits['channel']] != 0]
hits = strax.sort_by_time(hits)
# Use peaklet gap threshold for initial clustering
# based on gaps between hits
peaklets = strax.find_peaks(
hits, self.to_pe,
gap_threshold=self.config['peaklet_gap_threshold'],
left_extension=self.config['peak_left_extension'],
right_extension=self.config['peak_right_extension'],
min_channels=self.config['peak_min_pmts'],
result_dtype=self.dtype_for('peaklets'))
# Make sure peaklets don't extend out of the chunk boundary
# This should be very rare in normal data due to the ADC pretrigger
# window.
self.clip_peaklet_times(peaklets, start, end)
# Get hits outside peaklets, and store them separately.
# fully_contained is OK provided gap_threshold > extension,
# which is asserted inside strax.find_peaks.
lone_hits = hits[strax.fully_contained_in(hits, peaklets) == -1]
strax.integrate_lone_hits(
lone_hits, records, peaklets,
save_outside_hits=(self.config['peak_left_extension'],
self.config['peak_right_extension']),
n_channels=len(self.to_pe))
# Compute basic peak properties -- needed before natural breaks
strax.sum_waveform(peaklets, r, self.to_pe)
strax.compute_widths(peaklets)
# Split peaks using low-split natural breaks;
# see https://github.com/XENONnT/straxen/pull/45
# and https://github.com/AxFoundation/strax/pull/225
peaklets = strax.split_peaks(
peaklets, r, self.to_pe,
algorithm='natural_breaks',
threshold=self.natural_breaks_threshold,
split_low=True,
filter_wing_width=self.config['peak_split_filter_wing_width'],
min_area=self.config['peak_split_min_area'],
do_iterations=self.config['peak_split_iterations'])
# Saturation correction using non-saturated channels
# similar method used in pax
# see https://github.com/XENON1T/pax/pull/712
if self.config['saturation_correction_on']:
peak_saturation_correction(
r, peaklets, self.to_pe,
reference_length=self.config['saturation_reference_length'],
min_reference_length=self.config['saturation_min_reference_length'])
# Compute tight coincidence level.
# Making this a separate plugin would
# (a) doing hitfinding yet again (or storing hits)
# (b) increase strax memory usage / max_messages,
# possibly due to its currently primitive scheduling.
hit_max_times = np.sort(
hits['time']
+ hits['dt'] * hit_max_sample(records, hits))
peaklet_max_times = (
peaklets['time']
+ np.argmax(peaklets['data'], axis=1) * peaklets['dt'])
peaklets['tight_coincidence'] = get_tight_coin(
hit_max_times,
peaklet_max_times,
self.config['tight_coincidence_window_left'],
self.config['tight_coincidence_window_right'])
if self.config['diagnose_sorting'] and len(r):
assert np.diff(r['time']).min(initial=1) >= 0, "Records not sorted"
assert np.diff(hits['time']).min(initial=1) >= 0, "Hits not sorted"
assert np.all(peaklets['time'][1:]
>= strax.endtime(peaklets)[:-1]), "Peaks not disjoint"
# Update nhits of peaklets:
counts = strax.touching_windows(hits, peaklets)
counts = np.diff(counts, axis=1).flatten()
counts += 1
peaklets['n_hits'] = counts
return dict(peaklets=peaklets,
lone_hits=lone_hits)
def natural_breaks_threshold(self, peaks):
# TODO avoid duplication with PeakBasics somehow?
rise_time = -peaks['area_decile_from_midpoint'][:, 1]
# This is ~1 for an clean S2, ~0 for a clean S1,
# and transitions gradually in between.
f_s2 = 8 * np.log10(rise_time.clip(1, 1e5) / 100)
f_s2 = 1 / (1 + np.exp(-f_s2))
log_area = np.log10(peaks['area'].clip(1, 1e7))
thresholds = self.config['peak_split_gof_threshold']
return (
f_s2 * np.interp(
log_area,
* | np.transpose(thresholds[2]) | numpy.transpose |
# model.distributions.py
# copyright 2021 <NAME>
import numpy as np
import pymc3 as pm
from scipy import stats, special
import theano.tensor as tt
from pymc3.distributions.dist_math import bound, logpow, alltrue_elemwise
from pymc3.distributions.continuous import assert_negative_support, PositiveContinuous
from pymc3.distributions.distribution import draw_values, generate_samples
from pymc3.theanof import floatX
RANDOM_SEED = 42
rng = np.random.default_rng(seed=RANDOM_SEED)
# NOTE hack to clip values away from {0, 1} for invcdfs
# Whilst value = {0, 1} is theoretically allowed, is seems to cause a
# numeric compuational issue somewhere in tt.erfcinv which throws infs.
# This screws up the downstream, so clip slightly away from {0, 1}
CLIP_U_AWAY_FROM_ZERO_ONE_FOR_INVCDFS = 1e-15 #1e-18 too small
def boundzero_numpy(vals, *conditions):
""" Bound natural unit distribution params, return 0 for out-of-bounds
Copy from pymc.bound pymc3.distributions.dist_math.py
"""
return np.where(alltrue_elemwise(conditions), vals, 0.)
def boundzero_theano(vals, *conditions):
""" Bound natural unit distribution params, return 0 for out-of-bounds
Copy from pymc.bound pymc3.distributions.dist_math.py
"""
return tt.switch(alltrue_elemwise(conditions), vals, 0.)
def boundlog_numpy(vals, *conditions):
""" Bound log unit distribution params, return -inf for out-of-bounds
Copy from pymc.bound pymc3.distributions.dist_math.py
"""
return np.where(alltrue_elemwise(conditions), vals, -np.inf)
def logpow_numpy(x, m):
""" Copy from pymc3
Safe calc log(x**m) since m*log(x) will fail when m, x = 0.
"""
return np.where(x == 0, np.where(m == 0, 0.0, -np.inf), m * np.log(x))
class Gamma(pm.Gamma):
"""Inherit the pymc class, add cdf and invcdf """
def __init__(self):
raise NotImplementedError(
"""Consider that InvCDF is hard to calculate: even scipy uses C functions
Recommend use different dist in practice""")
class GammaNumpy():
"""Gamma PDF, CDF, InvCDF and logPDF, logCDF, logInvCDF
Manual implementations used in pymc3 custom distributions
Helpful to compare these to scipy to confirm my correct implementation
Ref: https://en.wikipedia.org/wiki/Gamma_distribution
Params: x > 0, u in [0, 1], a (shape) > 0, b (rate) > 0
"""
def __init__(self):
self.name = 'Gamma'
self.notation = {'notation': r'x \sim Gamma(\alpha, \beta)'}
self.dist_natural = {
'pdf': r'f(x \mid \alpha, \beta) = \frac{1}{\Gamma(\alpha)} \beta^{\alpha} x^{\alpha-1} e^{- \beta x}',
'cdf': r'F(x \mid \alpha, \beta) = \frac{1}{\Gamma(\alpha)} \gamma(\alpha, \beta x)',
'invcdf': r'F^{-1}(u \mid \alpha, \beta) = '}
self.dist_log = {
'logpdf': r'\log f(x \mid \alpha, \beta) = -\log \Gamma(\alpha) + \log \beta^{\alpha} + \log x^{\alpha-1} - \beta x',
'logcdf': r'\log F(x \mid \alpha, \beta) = -\log \Gamma(\alpha) + \log \gamma(\alpha, \beta x)',
'loginvcdf': r'\log F^{-1}(u \mid \alpha, \beta) = '}
self.conditions = {
'parameters': r'\alpha > 0 \, \text{(shape)}, \; \beta > 0 \, \text{(rate)}',
'support': r'x \in (0, \infty), \; u \sim \text{Uniform([0, 1])}'}
self.summary_stats = {
'mean': r'\frac{\alpha}{\beta}',
'mode': r'\frac{\alpha - 1}{\beta}, \; \text{for} \alpha \geq 1',
'variance': r'\frac{\alpha}{\beta^{2}}'
}
def pdf(self, x, a, b):
"""Gamma PDF
compare to https://github.com/scipy/scipy/blob/ab1c0907fe9255582397db04592d6066745018d3/scipy/stats/_continuous_distns.py#L2595
"""
fn = (1 / special.gamma(a)) * | np.power(b, a) | numpy.power |
"""
What is ALGOPY:
---------------
The purpose of ALGOPY is the efficient evaluation of higher order derivatives
in the forward and reverse mode of Algorithmic Differentiation (AD). Particular
focus are matrix valued functions as they often appear in statistically motivated
functions. E.g. the covariance matrix of a least squares problem requires the
computation::
C = inv(dot(J.T,J))
where J(x) is a partial derivative of a function F.
The central idea of ALGOPY is the computation on Taylor polynomials with scalar
coefficientsand with matrix coefficients. These algorithms are primarily used for
Algorithmic Differentiation (AD)in the forward and reverse mode.
The focus are univariate Taylor polynomials over matrices (UTPM),implemented in
the class `algopy.utpm.UTPM`.
To allow the use of the reverse mode of AD a simple code tracer has been implemented in
`algopy.tracer`. The idea is to record the computation procedure in a datastructure s.t.
the control flow sequence can walked in reverse order.
ALGOPY is a research prototype where, to the best of authors'
knowledge, some algorithms are implemented that cannot be found elsewhere.
Most of ALGOPY is implemented in pure Python. However, some submodules are implemented
in pure C. For these submodules Python bindings using ctypes are provided.
The algorithms are quite well-tested and have been successfully used.
However, since the user-base is currently quite small, it is possible that bugs
may still be persistent.
Also, not all submodules of ALGOPY are feature complete. The most important parts
`algopy.tracer` and `algopy.upt.UTPM` are however fairly complete.
Getting Started:
----------------
Consider the following function:
"""
def f(A,x):
for n in range(3):
y = dot(x.T,dot(A,x))
A = A - dot(x,x.T) * y
return trace(A)
"""
and it is the goal to compute its gradient w.r.t. A and x.
As one will surely notice, this is not as simple as it seems.
But it's no problem for ALGOPY.
At first, we will find the gradient in the forward mode of AD.
Let A be an (N,N) array, and x an (N,1) array. Therefore, the gradient of f
will be a vector of length `N**2 + N`. In the forward mode one computes
each of those `N**2 + N` by a separate run. This is very similar to the finite differences
approach where each argument is separately perturbed.
As an example, let's compute df/dA_{11}, i.e. the derivative w.r.t. the (1,1) entry of A,
(counting from 1).
"""
import numpy
from algopy import UTPM
from algopy.globalfuncs import dot, trace
D,P,N = 2,1,2
A = UTPM(numpy.zeros((D,P,N,N)))
x = UTPM(numpy.zeros((D,P,N,N)))
A.data[0,:] = numpy.random.rand(N,N)
x.data[0,:] = | numpy.random.rand(N,1) | numpy.random.rand |
#######################################################################################################################################################
#######################################################################Imports#########################################################################
#######################################################################################################################################################
#from itertools import product # forms cartesian products
from tqdm import tqdm_notebook as tqdm
#import pickle
import numpy as np
from numpy import linspace
import pandas as pd
import scipy as sp
from functools import reduce
from more_itertools import random_product
import operator
import math
from joblib import Parallel, delayed
from collections.abc import Iterable
#from scipy.integrate import quad
import matplotlib.pyplot as plt
#from sklearn.model_selection import cross_val_score, train_test_split, StratifiedKFold, KFold
from sklearn.metrics import accuracy_score, log_loss, roc_auc_score, f1_score, mean_absolute_error, r2_score
from similaritymeasures import frechet_dist, area_between_two_curves, dtw
import time
import tensorflow as tf
from tensorflow.keras import Sequential
from tensorflow.keras.layers import Dense, Dropout
from tensorflow.keras.utils import plot_model
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau
from IPython.display import display, Math, Latex, clear_output
import os
import shutil
import pickle
import traceback
#udf import
from utilities.LambdaNet import *
from utilities.metrics import *
#from utilities.utility_functions import *
from scipy.optimize import minimize
from scipy import optimize
import sympy as sym
from sympy import Symbol, sympify, lambdify, abc, SympifyError
# Function Generation 0 1 import
from sympy.sets.sets import Union
from sympy import Number
import math
from numba import jit, njit
import itertools
from interruptingcow import timeout
import time
from sklearn.linear_model import Lasso
from sklearn.preprocessing import PolynomialFeatures
#######################################################################################################################################################
#############################################################Setting relevant parameters from current config###########################################
#######################################################################################################################################################
def initialize_utility_functions_config_from_curent_notebook(config):
try:
globals().update(config['data'])
except KeyError:
print(KeyError)
try:
globals().update(config['lambda_net'])
except KeyError:
print(KeyError)
try:
globals().update(config['i_net'])
except KeyError:
print(KeyError)
try:
globals().update(config['evaluation'])
except KeyError:
print(KeyError)
try:
globals().update(config['computation'])
except KeyError:
print(KeyError)
random.seed(RANDOM_SEED)
np.random.seed(RANDOM_SEED)
if int(tf.__version__[0]) >= 2:
tf.random.set_seed(RANDOM_SEED)
else:
tf.set_random_seed(RANDOM_SEED)
global list_of_monomial_identifiers
from utilities.utility_functions import flatten, rec_gen, gen_monomial_identifier_list
list_of_monomial_identifiers_extended = []
if laurent:
variable_sets = [list(flatten([[_d for _d in range(d+1)], [-_d for _d in range(1, neg_d+1)]])) for _ in range(n)]
list_of_monomial_identifiers_extended = rec_gen(variable_sets)
if len(list_of_monomial_identifiers_extended) < 500:
print(list_of_monomial_identifiers_extended)
list_of_monomial_identifiers = []
for monomial_identifier in tqdm(list_of_monomial_identifiers_extended):
if np.sum(monomial_identifier) <= d:
if monomial_vars == None or len(list(filter(lambda x: x != 0, monomial_identifier))) <= monomial_vars:
list_of_monomial_identifiers.append(monomial_identifier)
else:
variable_list = ['x'+ str(i) for i in range(n)]
list_of_monomial_identifiers = gen_monomial_identifier_list(variable_list, d, n)
#######################################################################################################################################################
#############################################################General Utility Functions#################################################################
#######################################################################################################################################################
def round_expr(expr, num_digits):
return expr.xreplace({n : round(n, num_digits) for n in expr.atoms(Number)})
def nCr(n,r):
f = math.factorial
return f(n) // f(r) // f(n-r)
def rec_gen(x):
if len(x) == 1:
return [[item] for item in x[0]]
appended = []
for s_el in x[0]:
for next_s in rec_gen(x[1:]):
appended.append([s_el] + next_s)
return appended
def gen_monomial_identifier_list(variable_list, degree, number_of_variables):
def get_polynomial(vars, power):
if "c" in vars:
raise Exception("\"c\" cannot be a variable")
vars.append("c") # add dummy variable
# compute all combinations of variables
terms = []
for x in itertools.combinations_with_replacement(vars, power):
terms.append(x)
# get rid of "c" terms
terms = list(map(list, terms))
for i in range(len(terms)):
while "c" in terms[i]:
terms[i].remove("c")
return terms
terms = get_polynomial(variable_list, degree)
monomial_identifier_list = []
for term in terms:
monomial = [0 for i in range(number_of_variables)]
for value in term:
index = int(value[1:])
monomial[index] = monomial[index] + 1
monomial_identifier_list.append(monomial)
return monomial_identifier_list
def pairwise(iterable):
"s -> (s0, s1), (s2, s3), (s4, s5), ..."
a = iter(iterable)
return zip(a, a)
def chunks(lst, chunksize):
"""Yield successive n-sized chunks from lst."""
for i in range(0, len(lst), chunksize):
yield lst[i:i + chunksize]
def prod(iterable):
return reduce(operator.mul, iterable, 1)
def return_float_tensor_representation(some_representation, dtype=tf.float32):
if tf.is_tensor(some_representation):
some_representation = tf.dtypes.cast(some_representation, dtype)
else:
some_representation = tf.convert_to_tensor(some_representation)
some_representation = tf.dtypes.cast(some_representation, dtype)
if not tf.is_tensor(some_representation):
raise SystemExit('Given variable is no instance of ' + str(dtype) + ':' + str(some_representation))
return some_representation
def return_numpy_representation(some_representation):
if isinstance(some_representation, pd.DataFrame):
some_representation = some_representation.values
some_representation = np.float32(some_representation)
if isinstance(some_representation, list):
some_representation = np.array(some_representation, dtype=np.float32)
if isinstance(some_representation, np.ndarray):
#print(some_representation)
#print(type(some_representation))
#print(some_representation.dtype)
#print(some_representation[0])
#print(some_representation[0].dtype)
some_representation = np.float32(some_representation)
else:
raise SystemExit('Given variable is no instance of ' + str(np.ndarray) + ':' + str(some_representation))
return some_representation
def mergeDict(dict1, dict2):
#Merge dictionaries and keep values of common keys in list
newDict = {**dict1, **dict2}
for key, value in newDict.items():
if key in dict1 and key in dict2:
if isinstance(dict1[key], list) and isinstance(value, list):
newDict[key] = dict1[key]
newDict[key].extend(value)
elif isinstance(dict1[key], list) and not isinstance(value, list):
newDict[key] = dict1[key]
newDict[key].extend([value])
elif not isinstance(dict1[key], list) and isinstance(value, list):
newDict[key] = [dict1[key]]
newDict[key].extend(value)
else:
newDict[key] = [dict1[key], value]
return newDict
def return_callbacks_from_string(callback_string_list):
callbacks = [] if len(callback_string_list) > 0 else None
#if 'plot_losses_callback' in callback_string_list:
#callbacks.append(PlotLossesCallback())
if 'reduce_lr_loss' in callback_string_list:
reduce_lr_loss = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=min(50, epochs//10), verbose=0, min_delta=0, mode='min') #epsilon
callbacks.append(reduce_lr_loss)
if 'early_stopping' in callback_string_list:
try:
patience = early_stopping_patience if early_stopping_patience is not None else min(50, epochs//10)
except:
patience = min(50, epochs//10)
earlyStopping = EarlyStopping(monitor='val_loss', patience=patience, min_delta=0, verbose=0, mode='min', restore_best_weights=True)
callbacks.append(earlyStopping)
#if not multi_epoch_analysis and samples_list == None:
#callbacks.append(TQDMNotebookCallback())
return callbacks
def arreq_in_list(myarr, list_arrays):
return next((True for elem in list_arrays if np.array_equal(elem, myarr)), False)
def flatten(l):
for el in l:
if isinstance(el, Iterable) and not isinstance(el, (str, bytes)):
yield from flatten(el)
else:
yield el
def shape_flat_network_parameters(flat_network_parameters, target_network_parameters):
#from utilities.utility_functions import flatten_list
#def recursive_len(item):
# if type(item) == list:
# return sum(recursive_len(subitem) for subitem in item)
# else:
# return 1
shaped_network_parameters =[]
start = 0
for parameters in target_network_parameters:
target_shape = parameters.shape
size = np.prod(target_shape)#recursive_len(el)#len(list(flatten_list(el)))
shaped_parameters = np.reshape(flat_network_parameters[start:start+size], target_shape)
shaped_network_parameters.append(shaped_parameters)
start += size
return shaped_network_parameters
def shaped_network_parameters_to_array(shaped_network_parameters):
network_parameter_list = []
for layer_weights, biases in pairwise(shaped_network_parameters): #clf.get_weights()
for neuron in layer_weights:
for weight in neuron:
network_parameter_list.append(weight)
for bias in biases:
network_parameter_list.append(bias)
return np.array(network_parameter_list)
#################################################################################################################################################################################### Normalization #################################################################################### ################################################################################################################################################################################################################
def get_order_sum(arrays):
arrays = np.array(arrays)
values = [np.sum(arrays[0])]
order = [0]
for i in range(1, len(arrays)):
value = np.sum(arrays[i])
pos = 0
while pos<len(values) and value>=values[pos]:
if value == values[pos]:
print("!!!!!!!!!!!!!!!!KOLLISION!!!!!!!!!!!!!!!!!!")
print(value)
print(arrays[i])
print(arrays[order[pos]])
pos += 1
values.insert(pos, value)
order.insert(pos, i)
return order
## source for sort_array: https://www.geeksforgeeks.org/permute-the-elements-of-an-array-following-given-order/
def sort_array(arr, order):
length = len(order)
#ordered_arr = np.zeros(length)
ordered_arr = [None] * length
for i in range(length):
ordered_arr[i] = arr[order[i]]
arr=ordered_arr
return arr
def normal_neural_net(model_arr):
for i in range(len(lambda_network_layers)):
index = 2*(i)
dense_arr = np.transpose(model_arr[index])
order = get_order_sum(dense_arr)
for j in range(len(model_arr[index])):
model_arr[index][j] = sort_array(model_arr[index][j], order)
model_arr[index+1] = np.array(sort_array(model_arr[index+1], order))
model_arr[index+2] = np.array(sort_array(model_arr[index+2], order))
return model_arr
def print_polynomial_from_coefficients(coefficient_array, force_complete_poly_representation=False, round_digits=None):
return display(get_sympy_string_from_coefficients(coefficient_array, force_complete_poly_representation=force_complete_poly_representation, round_digits=round_digits))
def get_polynomial_string_from_coefficients(coefficients, force_complete_poly_representation=False, round_digits=None):
global list_of_monomial_identifiers
global interpretation_net_output_monomials
string = ''
try: #catch if this is lambda-net training
interpretation_net_output_monomials == None
except NameError:
interpretation_net_output_monomials = None
if interpretation_net_output_monomials == None or force_complete_poly_representation:
for identifier, coefficient in zip(list_of_monomial_identifiers, coefficients):
if round_digits != None:
string += str(np.round(coefficient, round_digits))
else:
string += str(coefficient)
for index, variable_identifier in enumerate(identifier):
if int(variable_identifier) == 1:
#string += '*'
string += 'abcdefghijklmnopqrstuvwxyz'[index]
elif int(variable_identifier) > 1:
#string += '*'
string += 'abcdefghijklmnopqrstuvwxyz'[index] + '^' + str(variable_identifier)
string += ' + '
else:
# Convert output array to monomial identifier index and corresponding coefficient
assert coefficient_array.shape[0] == interpretation_net_output_shape or coefficient_array.shape[0] == interpretation_net_output_shape + 1 + len(list_of_monomial_identifiers)
if coefficient_array.shape[0] == interpretation_net_output_shape:
coefficients = coefficient_array[:interpretation_net_output_monomials]
index_array = coefficient_array[interpretation_net_output_monomials:]
assert index_array.shape[0] == interpretation_net_output_monomials*sparsity or index_array.shape[0] == interpretation_net_output_monomials*(d+1)*n
index_list = np.split(index_array, interpretation_net_output_monomials)
assert len(index_list) == coefficients.shape[0] == interpretation_net_output_monomials
indices = np.argmax(index_list, axis=1)
else:
coefficients = coefficient_array[:interpretation_net_output_monomials+1]
index_array = coefficient_array[interpretation_net_output_monomials+1:]
assert index_array.shape[0] == (interpretation_net_output_monomials+1)*sparsity
index_list = np.split(index_array, interpretation_net_output_monomials+1)
assert len(index_list) == coefficients.shape[0] == interpretation_net_output_monomials+1
indices = np.argmax(index_list, axis=1)
for monomial_index, monomial_coefficient in zip(indices, coefficients):
if round_digits != None:
string += str(np.round(monomial_coefficient, round_digits))
else:
string += str(monomial_coefficient)
#REPLACE NAN
for i, monomial_exponent in enumerate(list_of_monomial_identifiers[monomial_index]):
if int(monomial_exponent) == 1:
#string += '*'
string += 'abcdefghijklmnopqrstuvwxyz'[i]
elif int(monomial_exponent) > 1:
#string += '*'
string += 'abcdefghijklmnopqrstuvwxyz'[i] + '^' + str(monomial_exponent)
string += ' + '
return string[:-3]
def get_sympy_string_from_coefficients(coefficient_array, force_complete_poly_representation=False, round_digits=None):
global list_of_monomial_identifiers
global interpretation_net_output_monomials
variable_alphabet = "abcdefghijklmnopqrstuvwxyz"
variable_list = []
for i in range(n):
variable_list.append(sym.symbols(variable_alphabet[i]))
try: #catch if this is lambda-net training
interpretation_net_output_monomials == None
except NameError:
interpretation_net_output_monomials = None
if interpretation_net_output_monomials == None or force_complete_poly_representation:
f = 0
for monomial_identifier, monomial_coefficient in zip(list_of_monomial_identifiers, coefficient_array):
if round_digits != None:
subfunction = np.round(monomial_coefficient, round_digits)
else:
subfunction = monomial_coefficient
for i, monomial_exponent in enumerate(monomial_identifier):
subfunction *= variable_list[i]**monomial_exponent
f += subfunction
else:
f = 0
# Convert output array to monomial identifier index and corresponding coefficient
assert coefficient_array.shape[0] == interpretation_net_output_shape or coefficient_array.shape[0] == interpretation_net_output_shape + 1 + len(list_of_monomial_identifiers)
if coefficient_array.shape[0] == interpretation_net_output_shape:
coefficients = coefficient_array[:interpretation_net_output_monomials]
index_array = coefficient_array[interpretation_net_output_monomials:]
assert index_array.shape[0] == interpretation_net_output_monomials*sparsity or index_array.shape[0] == interpretation_net_output_monomials*(d+1)*n
index_list = np.split(index_array, interpretation_net_output_monomials)
assert len(index_list) == coefficients.shape[0] == interpretation_net_output_monomials
indices = np.argmax(index_list, axis=1)
else:
coefficients = coefficient_array[:interpretation_net_output_monomials+1]
index_array = coefficient_array[interpretation_net_output_monomials+1:]
assert index_array.shape[0] == (interpretation_net_output_monomials+1)*sparsity
index_list = np.split(index_array, interpretation_net_output_monomials+1)
assert len(index_list) == coefficients.shape[0] == interpretation_net_output_monomials+1
indices = np.argmax(index_list, axis=1)
for monomial_index, monomial_coefficient in zip(indices, coefficients):
if round_digits != None:
subfunction = np.round(monomial_coefficient, round_digits)
else:
subfunction = monomial_coefficient
#REPLACE NAN
for i, monomial_exponent in enumerate(list_of_monomial_identifiers[monomial_index]):
subfunction *= variable_list[i]**monomial_exponent
f += subfunction
return f
def plot_polynomial_from_coefficients(coefficient_array, force_complete_poly_representation=False):
sympy_function_string = get_sympy_string_from_coefficients(coefficient_array, force_complete_poly_representation=False)
variable_alphabet = "abcdefghijklmnopqrstuvwxyz"
variable_list = []
for i in range(n):
variable_list.append(sym.symbols(variable_alphabet[i]))
lam_x = lambdify(variable_list, sympy_function_string, modules=['numpy'])
x_vals = linspace(x_min, x_max, 100)
y_vals = lam_x(x_vals)
plt.plot(x_vals, y_vals)
plt.show()
def get_critical_points_from_polynomial(coefficient_array, force_complete_poly_representation=False):
coefficient_array = return_numpy_representation(coefficient_array)
#assert coefficient_array.shape[0] == interpretation_net_output_shape
f = get_sympy_string_from_coefficients(coefficient_array, force_complete_poly_representation=force_complete_poly_representation)
gradient = sym.derive_by_array(f, tuple(f.free_symbols))
stationary_points = sym.solve(gradient, tuple(f.free_symbols))
return f, gradient, stationary_points
#######################################################################################################################################################
########################################################################JUSTUS CODE####################################################################
#######################################################################################################################################################
# simplified the function for this usecase
def get_sympy_string_from_coefficients_fg(coefficient_array, variable_list):
global list_of_monomial_identifiers
f=0
for i in range(sparsity):
monomial = coefficient_array[i]
for j in range(n):
monomial = monomial*variable_list[j]**int(list_of_monomial_identifiers[i][j])
f += monomial
return f
# Method to shift a function(func) by a given distance(distance) for a given variable(variable)
def shift(func, distance, variable):
a = variable
f = func
# substitude a by a-distance (shifting)
f = f.subs(a, (a-distance))
# expand function returns polynomial funtion as sum of monomials
f = sym.expand(f)
return f
# Method to bulge a function(func) by a given factor(factor) for a given variable(variable)
def bulge(func, factor, variable):
a = variable
f = func
#substitude a by a*factor (bulging)
f = f.subs(a, (factor*a))
#expand function returns polynomial funtion as sum of monomials
f = sym.expand(f)
return f
def adjust_function(f, borders, variables):
variables = list(f.free_symbols)
width = [1 - border - random.uniform(border_min, border_max) for border in borders] # space between borders (only left border is saved)
# check if the degree is 1 (there are no stationary points for functions of degree 1. Therefore they can't be adjusted with this function)
higher_degree = False
for variable in variables:
if sym.degree(f, variable) > 1:
higher_degree = True
break;
if not higher_degree:
return f
# special method for functions with 1 variable (inflection points are also used) There also are some extras for functions with more than 1 variable that functions with only 1 variable do not need
if n==1:
g = sym.diff(f, variables[0])
#find extremums ()
ext = sym.solveset(g, domain=sym.Reals)
#find inflection points
inflec = sym.calculus.util.stationary_points(g, variables[0], domain=sym.Reals)
#critical points (joint extremums and inflection points)
critical_points = Union(ext, inflec)
# Test, if there are any critical points (Only case where a polynomial function has no critical point is a straight, which causes no problem)
if not critical_points.is_empty:
# find infimum and supremum of set:
left_critical_point = critical_points.inf
right_critical_point = critical_points.sup
# calculate distance between points:
distance = right_critical_point - left_critical_point
# only one critical point
if distance == 0:
# shift function so that the critical point is between border and 1-border
bulge_factor = random.uniform(bulge_min, bulge_max)
shift_distance = -(stationary_points[0][variables[0]]) + bulge_factor * (borders[0] + random.uniform(0, width[0]))
f = shift(f, shift_distance, variables[0])
f = bulge(f, bulge_factor, variables[0])
#f = shift(f, -left_critical_point+random.uniform(borders[0], 1-borders[0]), variables[0])
# check if function needs to be bulged
elif distance <= width[0]:
# shift function so that the critical points are between border and 1-border
f = shift(f, -left_critical_point+borders[0]+random.uniform(0, width[0]-distance), variables[0])
else:
bulge_factor = distance/width[0]
shift_distance = -left_critical_point + bulge_factor * borders[0]
f = shift(f, shift_distance, variables[0])
# bulge the function
f = bulge(f, distance/width[0], variables[0])
return f
# determine the number of variables that are used in the search for stationary points (probabilties in configs)
number_of_used_variables = random.choices([n, random.randint(min_variables_used, max_variables_used)], [global_stationary_prob, 1-global_stationary_prob])[0]
used_variables = []
# helper function to get stationary points
f_copy = f
# select 'number_of_used_variables' many variables
while len(used_variables) < number_of_used_variables and len(used_variables)<len(variables):
variable = variables[random.randint(0, len(variables)-1)]
if not variable in used_variables:
used_variables.append(variable)
# substitute all variables that are not used with constants that are in the intervall
for variable in variables:
if not variable in used_variables:
f_copy = f_copy.subs(variable, random.uniform(x_min, x_max))
# adjustment of the used_variables, because some variables might not be in the function
used_variables = list(f_copy.free_symbols)
number_of_used_variables = len(used_variables)
# special search for the use of only one variable (also uses inflection points)
if number_of_used_variables == 1:
g = sym.diff(f_copy, used_variables[0], domain=sym.Reals)
#find extremums ()
ext = sym.solveset(g, used_variables[0], domain=sym.Reals)
#find inflection points
inflec = sym.calculus.util.stationary_points(g, used_variables[0])
#critical points (joint extremums and inflection points) (called stationary_points to use the same code)
critical_points = Union(ext, inflec)
stationary_points = []
# filter out stationary points that are not real
if not type(critical_points) is sym.sets.fancysets.Reals:
for point in critical_points:
stationary_points.append({used_variables[0]: point})
# get stationary points for the use of more than one variable
else:
f_copy = sym.expand(f_copy)
gradient = sym.derive_by_array(f_copy, tuple(f_copy.free_symbols))
stationary_points = sym.solve(gradient, tuple(f_copy.free_symbols), dict=True)
if len(stationary_points) == 0:
return f;
length_helper = len(stationary_points) - 1
used_variables = list(stationary_points[0].keys())
number_of_used_variables = len(used_variables)
# filter out stationary points that are not real
for i in range(len(stationary_points)):
for j in range(number_of_used_variables):
if not stationary_points[length_helper-i][used_variables[j]].is_real:
stationary_points.pop(length_helper-i)
break;
# no stationary points => nothing can be adjusted => just return functions
if len(stationary_points) == 0:
return f;
# 1 stationary point => shift it inside the intervall for all used variables and bulge it randomly
if len(stationary_points) == 1:
for i in range(number_of_used_variables):
bulge_factor = random.uniform(bulge_min, bulge_max)
shift_distance = -(stationary_points[0][used_variables[i]]) + bulge_factor * (borders[i] + random.uniform(0, width[i]))
f = shift(f, shift_distance, used_variables[i])
f = bulge(f, bulge_factor, used_variables[i])
# minimum of two stationary points => shift them to the border limits
else:
for i in range(len(used_variables)):
critical_values = [stationary_points[j][used_variables[i]] for j in range(len(stationary_points))]
minimum = min(critical_values)
distance = max(critical_values) - minimum
bulge_factor = distance/width[i]
shift_distance = -minimum + bulge_factor * borders[i]
f = shift(f, shift_distance, used_variables[i])
f = bulge(f, bulge_factor, used_variables[i])
return f
def prep_post_polynomial (borders, values):
variable_alphabet = "abcdefghijklmnopqrstuvwxyz"
variable_list = [sym.symbols(variable_alphabet[i]) for i in range(n)]
list_of_monomial_dict_names = []
global list_of_monomial_identifiers
# get dictionary keys to retrieve function
for mono_string in list_of_monomial_identifiers:
helper = 1
for i in range(n):
if mono_string[i] != "0":
if(helper!=1):
helper = helper*variable_list[i]**int(mono_string[i])
else:
helper = variable_list[i]**int(mono_string[i])
list_of_monomial_dict_names.append(helper)
# get sympy string for adjustments
function = get_sympy_string_from_coefficients_fg(values, variable_list)
# adjustment
function_adjusted = adjust_function(function, borders, variable_list)
# get list representation from sympy representation
coeff_dict = function_adjusted.as_coefficients_dict()
coeff_list = [coeff_dict[monomial] for monomial in list_of_monomial_dict_names]
# possible divisor for the case that coefficient values are to high
divider = abs(max(coeff_list, key=abs) / random.uniform(a_max/4, a_max))
if divider > 1:
coeff_list = [x / divider for x in coeff_list]
#adjust the y-axis intercept so that function are spread better
if coeff_list[0] != 0:
multiplier0 = random.uniform(1, a_max / abs(coeff_list[0]))
coeff_list[0] = coeff_list[0] * multiplier0
# NaN can happen if one coefficent has values of infinity after bulging or shifting
for i in range(sparsity):
if math.isnan(coeff_list[i]):
values = [random.uniform(a_min, a_max) for _ in range(sparsity)]
return prep_post_polynomial(borders, values)
return coeff_list
def get_polynomial_basic (sparsities ,change = 0):
# change adjusts the lower degree probability. Other values than 0 are better for function generations with low degree because if you use 0 there will be a lot of functions of degree 0. Example value:
# change = -((lower_degree_prob / (d-1)) - (0.01 * d))
values = np.zeros(sparsity)
degree_helper = 1
for i in range(d):
if(random.random() < (lower_degree_prob + i*change)):
degree_helper += 1
else:
break
#return random nonadjusted function
if random.random()<a_random_prob:
for i in range(max_monomials_random-1):
values[random.randint(0, sparsities[-degree_helper]-1)] = random.uniform(a_min, a_max)
values[0] = random.uniform(a_min, a_max)
return values
# degree_helper >= d => maximum degree = 1 => no stationary points => no adjustment possible
if degree_helper >= d:
for i in range(max_monomials-1):
values[random.randint(0, sparsities[-degree_helper]-1)] = random.uniform(a_min, a_max)
values[0] = random.uniform(a_min, a_max)
return values
# get random borders (minimum space between x_min (x_max) to the critical_points
borders = [random.uniform(border_min, border_max) for i in range(n)]
try:
with timeout(5, exception=RuntimeError):
coeff_list = prep_post_polynomial(borders, values)
except:
return get_polynomial_basic(sparsities, change = change)
return coeff_list
#######################################################################################################################################################
###########################Manual calculations for comparison of polynomials based on function values (no TF!)#########################################
#######################################################################################################################################################
#@njit#(nopython=True)
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!DEPRECATED!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
def calcualate_function_value(coefficient_list, lambda_input_entry, force_complete_poly_representation=False, list_of_monomial_identifiers=None, interpretation_net_output_monomials=None):
#print('coefficient_list', coefficient_list)
#print('lambda_input_entry', lambda_input_entry)
result = 0
#try: #catch if this is lambda-net training
# config['interpretation_net_output_monomials'] == None
#except NameError:
# config['interpretation_net_output_monomials'] = None
if interpretation_net_output_monomials == None or force_complete_poly_representation:
#print('coefficient_list', coefficient_list)
#print(force_complete_poly_representation)
#print(interpretation_net_output_monomials)
#assert coefficient_list.shape[0] == sparsity, 'Shape of Coefficient List: ' + str(coefficient_list.shape) + str(interpretation_net_output_monomials) + str(coefficient_list)
for coefficient_value, coefficient_multipliers in zip(coefficient_list, list_of_monomial_identifiers):
#print('coefficient_value', coefficient_value)
#print('coefficient_multipliers', coefficient_multipliers)
value_without_coefficient = [lambda_input_value**coefficient_multiplier for coefficient_multiplier, lambda_input_value in zip(coefficient_multipliers, lambda_input_entry)]
#print('value_without_coefficient', value_without_coefficient)
#try:
result += coefficient_value * reduce(lambda x, y: x*y, value_without_coefficient)
#except TypeError:
# print('ERROR')
# print(lambda_input_entry)
# print(coefficient_list)
#
# print(coefficient_value)
# print(value_without_coefficient)
else:
# Convert output array to monomial identifier index and corresponding coefficient
#ASSERT
#assert coefficient_list.shape[0] == interpretation_net_output_shape or coefficient_list.shape[0] == interpretation_net_output_shape + 1 + len(list_of_monomial_identifiers)
if coefficient_list.shape[0] == interpretation_net_output_shape:
coefficients = coefficient_list[:interpretation_net_output_monomials]
index_array = coefficient_list[interpretation_net_output_monomials:]
#ASSERT
#assert index_array.shape[0] == interpretation_net_output_monomials*sparsity
index_list = np.split(index_array, interpretation_net_output_monomials)
#ASSERT
#assert len(index_list) == coefficients.shape[0] == interpretation_net_output_monomials
indices = np.argmax(index_list, axis=1)
else:
coefficients = coefficient_list[:interpretation_net_output_monomials+1]
index_array = coefficient_list[interpretation_net_output_monomials+1:]
#ASSERT
#assert index_array.shape[0] == (interpretation_net_output_monomials+1)*sparsity
index_list = np.split(index_array, interpretation_net_output_monomials+1)
#ASSERT
#assert len(index_list) == coefficients.shape[0] == interpretation_net_output_monomials+1
indices = np.argmax(index_list, axis=1)
# Calculate monomial values without coefficient
value_without_coefficient_list = []
for coefficient_multipliers in list_of_monomial_identifiers:
value_without_coefficient = [lambda_input_value**coefficient_multiplier for coefficient_multiplier, lambda_input_value in zip(coefficient_multipliers, lambda_input_entry)]
value_without_coefficient_list.append(reduce(lambda x, y: x*y, value_without_coefficient))
value_without_coefficient_by_indices = np.array(value_without_coefficient_list)[[indices]]
# Select relevant monomial values without coefficient and calculate final polynomial
for coefficient, monomial_index in zip(coefficients, indices):
#TODOOOOO
result += coefficient * value_without_coefficient_list[monomial_index]
#print('result', result)
return result
#@jit#@jit(nopython=True)
def calculate_function_values_from_polynomial(polynomial, lambda_input_data, force_complete_poly_representation=False, list_of_monomial_identifiers=None, interpretation_net_output_monomials=None):
#function_value_list = []
#for lambda_input_entry in lambda_input_data:
#function_value = calcualate_function_value(polynomial, lambda_input_entry, force_complete_poly_representation=force_complete_poly_representation, list_of_monomial_identifiers=list_of_monomial_identifiers, interpretation_net_output_monomials=interpretation_net_output_monomials)
#function_value_list.append(function_value)
config = {
'n': n,
#'inet_loss': inet_loss,
'sparsity': sparsity,
#'lambda_network_layers': lambda_network_layers,
#'interpretation_net_output_shape': interpretation_net_output_shape,
'RANDOM_SEED': RANDOM_SEED,
#'nas': nas,
#'number_of_lambda_weights': number_of_lambda_weights,
#'interpretation_net_output_monomials': interpretation_net_output_monomials,
#'list_of_monomial_identifiers': list_of_monomial_identifiers,
'x_min': x_min,
'x_max': x_max,
}
try:
config['interpretation_net_output_monomials'] = interpretation_net_output_monomials
except:
config['interpretation_net_output_monomials'] = None
#print(list_of_monomial_identifiers)
#print(polynomial)
#print(lambda_input_data)
function_value_list = calculate_poly_fv_tf_wrapper_new(return_float_tensor_representation(list_of_monomial_identifiers), return_float_tensor_representation(polynomial), return_float_tensor_representation(lambda_input_data), force_complete_poly_representation=force_complete_poly_representation, config=config)
return np.nan_to_num(np.array(function_value_list))
def parallel_fv_calculation_from_polynomial(polynomial_list, lambda_input_list, force_complete_poly_representation=False, n_jobs_parallel_fv=10, backend='threading'):
print(force_complete_poly_representation)
polynomial_list = return_numpy_representation(polynomial_list)
lambda_input_list = return_numpy_representation(lambda_input_list)
#print(polynomial_list.shape)
#print(type(polynomial_list))
#print(polynomial_list.dtype)
#print(polynomial_list)
#print(polynomial_list[0].shape)
#print(type(polynomial_list[0]))
#print(polynomial_list[0].dtype)
#print(polynomial_list[0])
assert polynomial_list.shape[0] == lambda_input_list.shape[0]
if force_complete_poly_representation:
assert polynomial_list.shape[1] == sparsity
else:
assert polynomial_list.shape[1] == interpretation_net_output_shape or polynomial_list.shape[1] == interpretation_net_output_shape + 1 + len(list_of_monomial_identifiers) , 'Poly Shape ' + str(polynomial_list.shape[1]) +' Output Monomials ' + str(interpretation_net_output_shape) + str(polynomial_list[:2])
assert lambda_input_list.shape[2] == n
config = {'list_of_monomial_identifiers': list_of_monomial_identifiers,
'interpretation_net_output_monomials': interpretation_net_output_monomials}
parallel = Parallel(n_jobs=n_jobs_parallel_fv, verbose=1, backend=backend)
#polynomial_true_fv = parallel(delayed(calculate_function_values_from_polynomial)(polynomial, lambda_inputs, force_complete_poly_representation=force_complete_poly_representation, list_of_monomial_identifiers=list_of_monomial_identifiers, interpretation_net_output_monomials=interpretation_net_output_monomials) for polynomial, lambda_inputs in zip(polynomial_list, lambda_input_list))
config = {
'n': n,
#'inet_loss': inet_loss,
'sparsity': sparsity,
#'lambda_network_layers': lambda_network_layers,
#'interpretation_net_output_shape': interpretation_net_output_shape,
'RANDOM_SEED': RANDOM_SEED,
#'nas': nas,
#'number_of_lambda_weights': number_of_lambda_weights,
#'interpretation_net_output_monomials': interpretation_net_output_monomials,
#'list_of_monomial_identifiers': list_of_monomial_identifiers,
'x_min': x_min,
'x_max': x_max,
'sparse_poly_representation_version': sparse_poly_representation_version,
}
try:
config['interpretation_net_output_monomials'] = interpretation_net_output_monomials
except:
config['interpretation_net_output_monomials'] = None
if use_gpu:
polynomial_true_fv = parallel(delayed(calculate_poly_fv_tf_wrapper_new_no_tfFunction)(return_float_tensor_representation(list_of_monomial_identifiers), return_float_tensor_representation(polynomial), return_float_tensor_representation(lambda_inputs), force_complete_poly_representation=force_complete_poly_representation, config=config) for polynomial, lambda_inputs in zip(polynomial_list, lambda_input_list))
else:
polynomial_true_fv = parallel(delayed(calculate_poly_fv_tf_wrapper_new)(return_float_tensor_representation(list_of_monomial_identifiers), return_float_tensor_representation(polynomial), return_float_tensor_representation(lambda_inputs), force_complete_poly_representation=force_complete_poly_representation, config=config) for polynomial, lambda_inputs in zip(polynomial_list, lambda_input_list))
del parallel
return np.array(polynomial_true_fv)
def calculate_function_values_from_sympy(function, data_points, variable_names=None):
if function is None:
return np.array([np.nan for i in range(data_points.shape[0])])
try:
if variable_names == None:
function_vars = function.atoms(Symbol)
else:
function_vars = [sym.symbols(variable_name) for variable_name in variable_names]
#print('function_vars', function_vars)
lambda_function = lambdify([function_vars], function, modules=["scipy", "numpy"])
#print('lambda_function', lambda_function)
#print('data_points[0]', data_points[0])
if len(function_vars) >= 1:
function_values = [lambda_function(data_point) for data_point in data_points]
else:
function_values = [lambda_function() for i in range(data_points.shape[0])]
except (NameError, KeyError) as e:
#print(e)
function_values = []
for data_point in data_points:
function_value = function.evalf(subs={var: data_point[index] for index, var in enumerate(list(function_vars))})
try:
function_value = float(function_value)
except TypeError as te:
#print(te)
#print(function_value)
function_value = np.inf
function_values.append(function_value)
function_values = np.nan_to_num(function_values).ravel()
return function_values
def parallel_fv_calculation_from_sympy(function_list, lambda_input_list, n_jobs_parallel_fv=10, backend='threading', variable_names=None):
backend='sequential'
lambda_input_list = return_numpy_representation(lambda_input_list)
assert len(function_list) == lambda_input_list.shape[0], str(len(function_list)) + '\t' + str(lambda_input_list.shape[0])
parallel = Parallel(n_jobs=n_jobs_parallel_fv, verbose=1, backend=backend)
polynomial_true_fv = parallel(delayed(calculate_function_values_from_sympy)(function, lambda_inputs, variable_names=variable_names) for function, lambda_inputs in zip(function_list, lambda_input_list))
del parallel
return np.array(polynomial_true_fv)
def sleep_minutes(minutes):
time.sleep(int(60*minutes))
def sleep_hours(hours):
time.sleep(int(60*60*hours))
def generate_paths(config=None, path_type='interpretation_net'):
if config is not None:
paths_dict = {}
training_string = '_sameX' if config['data']['same_training_all_lambda_nets'] else '_diffX'
laurent_str = '_laurent' if config['data']['laurent'] else ''
monomial_vars_str = '_monvars_' + str(config['data']['monomial_vars']) if config['data']['monomial_vars'] != None else ''
neg_d_str = '_negd_' + str(config['data']['neg_d']) + '_prob_' + str(config['data']['neg_d_prob']) if config['data']['neg_d'] != None else ''
dataset_description_string = ('_var_' + str(config['data']['n']) +
'_d_' + str(config['data']['d']) +
laurent_str +
monomial_vars_str +
neg_d_str +
'_spars_' + str(config['data']['sample_sparsity']) +
'_amin_' + str(config['data']['a_min']) +
'_amax_' + str(config['data']['a_max']) +
#'_xmin_' + str(x_min) +
#'_xmax_' + str(x_max) +
'_xdist_' + str(config['data']['x_distrib']) +
'_noise_' + str(config['data']['noise_distrib']) + '_' + str(config['data']['noise'])
+ '_' + config['data']['function_generation_type']
)
if config['data']['shift_polynomial']:
adjusted_dataset_string = ('bmin' + str(config['data']['border_min']) +
'bmax' + str(config['data']['border_max']) +
'lowd' + str(config['data']['lower_degree_prob']) +
'arand' + str(config['data']['a_random_prob']))
else:
adjusted_dataset_string = ''
if path_type == 'data_creation' or path_type == 'lambda_net': #Data Generation
path_identifier_polynomial_data = ('poly_' + str(config['data']['polynomial_data_size']) +
'_train_' + str(config['lambda_net']['lambda_dataset_size']) +
dataset_description_string +
adjusted_dataset_string +
training_string)
paths_dict['path_identifier_polynomial_data'] = path_identifier_polynomial_data
if path_type == 'lambda_net' or path_type == 'interpretation_net': #Lambda-Net
if config['data']['fixed_seed_lambda_training'] and config['data']['fixed_initialization_lambda_training']:
seed_init_string = '_' + str(config['data']['number_different_lambda_trainings']) + '-FixSeedInit'
elif config['data']['fixed_seed_lambda_training'] and not config['data']['fixed_initialization_lambda_training']:
seed_init_string = '_' + str(config['data']['number_different_lambda_trainings']) + '-FixSeed'
elif not config['data']['fixed_seed_lambda_training'] and config['data']['fixed_initialization_lambda_training']:
seed_init_string = '_' + str(config['data']['number_different_lambda_trainings']) + '-FixInit'
elif not config['data']['fixed_seed_lambda_training'] and not config['data']['fixed_initialization_lambda_training']:
seed_init_string = '_NoFixSeedInit'
early_stopping_string = '_ES' + str(config['lambda_net']['early_stopping_min_delta_lambda']) + '_' if config['lambda_net']['early_stopping_lambda'] else ''
lambda_layer_str = ''.join([str(neurons) + '-' for neurons in config['lambda_net']['lambda_network_layers']])
lambda_net_identifier = '_' + lambda_layer_str + str(config['lambda_net']['epochs_lambda']) + 'e' + early_stopping_string + str(config['lambda_net']['batch_lambda']) + 'b' + '_' + config['lambda_net']['optimizer_lambda'] + '_' + config['lambda_net']['loss_lambda']
path_identifier_lambda_net_data = ('lnets_' + str(config['data']['lambda_nets_total']) +
lambda_net_identifier +
'_train_' + str(lambda_dataset_size) +
training_string +
seed_init_string + '_' + str(config['computation']['RANDOM_SEED']) +
'/' +
dataset_description_string[1:] +
adjusted_dataset_string)
paths_dict['path_identifier_lambda_net_data'] = path_identifier_lambda_net_data
if path_type == 'interpretation_net': #Interpretation-Net
interpretation_network_layers_string = 'dense' + ''.join([str(neurons) + '-' for neurons in config['i_net']['dense_layers']])
if config['i_net']['convolution_layers'] != None:
interpretation_network_layers_string += 'conv' + str(config['i_net']['convolution_layers'])
if config['i_net']['lstm_layers'] != None:
interpretation_network_layers_string += 'lstm' + str(config['i_net']['lstm_layers'])
interpretation_net_identifier = '_' + interpretation_network_layers_string + 'output_' + str(config['i_net']['interpretation_net_output_shape']) + '_drop' + str(config['i_net']['dropout']) + 'e' + str(config['i_net']['epochs']) + 'b' + str(config['i_net']['batch_size']) + '_' + config['i_net']['optimizer']
path_identifier_interpretation_net_data = ('inet' + interpretation_net_identifier +
'/lnets_' + str(config['i_net']['interpretation_dataset_size']) +
lambda_net_identifier +
'_train_' + str(config['lambda_net']['lambda_dataset_size']) +
training_string +
seed_init_string + '_' + str(config['computation']['RANDOM_SEED']) +
'/' +
dataset_description_string[1:] +
adjusted_dataset_string)
paths_dict['path_identifier_interpretation_net_data'] = path_identifier_interpretation_net_data
else:
paths_dict = {}
training_string = '_sameX' if same_training_all_lambda_nets else '_diffX'
laurent_str = '_laurent' if laurent else ''
monomial_vars_str = '_monvars_' + str(monomial_vars) if monomial_vars != None else ''
neg_d_str = '_negd_' + str(neg_d) + '_prob_' + str(neg_d_prob) if neg_d != None else ''
dataset_description_string = ('_var_' + str(n) +
'_d_' + str(d) +
laurent_str +
monomial_vars_str +
neg_d_str +
'_spars_' + str(sample_sparsity) +
'_amin_' + str(a_min) +
'_amax_' + str(a_max) +
#'_xmin_' + str(x_min) +
#'_xmax_' + str(x_max) +
'_xdist_' + str(x_distrib) +
'_noise_' + str(noise_distrib) + '_' + str(noise)
+ '_' + function_generation_type
)
if shift_polynomial:
adjusted_dataset_string = ('bmin' + str(border_min) +
'bmax' + str(border_max) +
'lowd' + str(lower_degree_prob) +
'arand' + str(a_random_prob))
else:
adjusted_dataset_string = ''
if path_type == 'data_creation' or path_type == 'lambda_net': #Data Generation
path_identifier_polynomial_data = ('poly_' + str(polynomial_data_size) +
'_train_' + str(lambda_dataset_size) +
dataset_description_string +
adjusted_dataset_string +
training_string)
paths_dict['path_identifier_polynomial_data'] = path_identifier_polynomial_data
if path_type == 'lambda_net' or path_type == 'interpretation_net': #Lambda-Net
if fixed_seed_lambda_training and fixed_initialization_lambda_training:
seed_init_string = '_' + str(number_different_lambda_trainings) + '-FixSeedInit'
elif fixed_seed_lambda_training and not fixed_initialization_lambda_training:
seed_init_string = '_' + str(number_different_lambda_trainings) + '-FixSeed'
elif not fixed_seed_lambda_training and fixed_initialization_lambda_training:
seed_init_string = '_' + str(number_different_lambda_trainings) + '-FixInit'
elif not fixed_seed_lambda_training and not fixed_initialization_lambda_training:
seed_init_string = '_NoFixSeedInit'
early_stopping_string = '_ES' + str(early_stopping_min_delta_lambda) + '_' if early_stopping_lambda else ''
lambda_layer_str = ''.join([str(neurons) + '-' for neurons in lambda_network_layers])
lambda_net_identifier = '_' + lambda_layer_str + str(epochs_lambda) + 'e' + early_stopping_string + str(batch_lambda) + 'b' + '_' + optimizer_lambda + '_' + loss_lambda
path_identifier_lambda_net_data = ('lnets_' + str(lambda_nets_total) +
lambda_net_identifier +
'_train_' + str(lambda_dataset_size) +
training_string +
seed_init_string + '_' + str(RANDOM_SEED) +
'/' +
dataset_description_string[1:] +
adjusted_dataset_string)
paths_dict['path_identifier_lambda_net_data'] = path_identifier_lambda_net_data
if path_type == 'interpretation_net': #Interpretation-Net
interpretation_network_layers_string = 'dense' + ''.join([str(neurons) + '-' for neurons in dense_layers])
if convolution_layers != None:
interpretation_network_layers_string += 'conv' + str(convolution_layers)
if lstm_layers != None:
interpretation_network_layers_string += 'lstm' + str(lstm_layers)
interpretation_net_identifier = '_' + interpretation_network_layers_string + 'output_' + str(interpretation_net_output_shape) + '_drop' + str(dropout) + 'e' + str(epochs) + 'b' + str(batch_size) + '_' + optimizer
path_identifier_interpretation_net_data = ('inet' + interpretation_net_identifier +
'/lnets_' + str(interpretation_dataset_size) +
lambda_net_identifier +
'_train_' + str(lambda_dataset_size) +
training_string +
seed_init_string + '_' + str(RANDOM_SEED) +
'/' +
dataset_description_string[1:] +
adjusted_dataset_string)
paths_dict['path_identifier_interpretation_net_data'] = path_identifier_interpretation_net_data
return paths_dict
def create_folders_inet():
paths_dict = generate_paths(path_type = 'interpretation_net')
try:
# Create target Directory
os.makedirs('./data/plotting/' + paths_dict['path_identifier_interpretation_net_data'] + '/')
os.makedirs('./data/results/' + paths_dict['path_identifier_interpretation_net_data'] + '/')
except FileExistsError:
pass
def generate_directory_structure():
directory_names = ['plotting', 'saved_polynomial_lists', 'results', 'saved_models', 'weights']
if not os.path.exists('./data'):
os.makedirs('./data')
text_file = open('./data/.gitignore', 'w')
text_file.write('*')
text_file.close()
for directory_name in directory_names:
path = './data/' + directory_name
if not os.path.exists(path):
os.makedirs(path)
def generate_lambda_net_directory():
paths_dict = generate_paths(path_type = 'lambda_net')
#clear files
try:
# Create target Directory
os.makedirs('./data/weights/weights_' + paths_dict['path_identifier_lambda_net_data'])
except FileExistsError:
folder = './data/weights/weights_' + paths_dict['path_identifier_lambda_net_data']
for filename in os.listdir(folder):
file_path = os.path.join(folder, filename)
try:
if os.path.isfile(file_path) or os.path.islink(file_path):
os.unlink(file_path)
elif os.path.isdir(file_path):
shutil.rmtree(file_path)
except Exception as e:
print('Failed to delete %s. Reason: %s' % (file_path, e))
try:
# Create target Directory
os.makedirs('./data/results/weights_' + paths_dict['path_identifier_lambda_net_data'])
except FileExistsError:
pass
######################################################################################################################################################################################################################
######################################################################################## RANDOM FUNCTION GENERATION FROM ############################################################################################
################################# code adjusted, originally from: https://github.com/tirthajyoti/Machine-Learning-with-Python/tree/master/Random%20Function%20Generator ##############################################
######################################################################################################################################################################################################################
def symbolize(s):
"""
Converts a a string (equation) to a SymPy symbol object
"""
s1=s.replace(',','.')
s2=s1.replace('^','**')
s3=sympify(s2)
return(s3)
def eval_multinomial(s,vals=None,symbolic_eval=False):
"""
Evaluates polynomial at vals.
vals can be simple list, dictionary, or tuple of values.
vals can also contain symbols instead of real values provided those symbols have been declared before using SymPy
"""
sym_s=symbolize(s)
sym_set=sym_s.atoms(Symbol)
sym_lst=[]
for s in sym_set:
sym_lst.append(str(s))
sym_lst.sort()
if symbolic_eval==False and len(sym_set)!=len(vals):
print("Length of the input values did not match number of variables and symbolic evaluation is not selected")
return None
else:
if type(vals)==list:
sub=list(zip(sym_lst,vals))
elif type(vals)==dict:
l=list(vals.keys())
l.sort()
lst=[]
for i in l:
lst.append(vals[i])
sub=list(zip(sym_lst,lst))
elif type(vals)==tuple:
sub=list(zip(sym_lst,list(vals)))
result=sym_s.subs(sub)
return result
def flip(y,p):
lst=[]
for i in range(len(y)):
f=np.random.choice([1,0],p=[p,1-p])
lst.append(f)
lst=np.array(lst)
return np.array(np.logical_xor(y,lst),dtype=int)
@tf.function
def calculate_poly_fv_tf_wrapper_new_no_tfFunction(list_of_monomial_identifiers, polynomial, evaluation_entry_list, force_complete_poly_representation=False, config=None):
if config != None:
globals().update(config)
def calculate_poly_fv_tf(evaluation_entry):
def calculate_monomial_with_coefficient_degree_by_var_wrapper(evaluation_entry):
def calculate_monomial_with_coefficient_degree_by_var(input_list):
degree_by_var_per_monomial = input_list[0]
coefficient = input_list[1]
#degree_by_var_per_monomial = gewählter degree für jede variable in monomial
monomial_value_without_coefficient = tf.math.reduce_prod(tf.vectorized_map(lambda x: x[0]**tf.dtypes.cast(x[1], tf.float32), (evaluation_entry, degree_by_var_per_monomial)))
return coefficient*monomial_value_without_coefficient
return calculate_monomial_with_coefficient_degree_by_var
if interpretation_net_output_monomials == None or force_complete_poly_representation:
monomials_without_coefficient = tf.vectorized_map(calculate_monomial_without_coefficient_tf_wrapper(evaluation_entry), (list_of_monomial_identifiers))
monomial_values = tf.vectorized_map(lambda x: x[0]*x[1], (monomials_without_coefficient, polynomial))
else:
if sparse_poly_representation_version == 1:
monomials_without_coefficient = tf.vectorized_map(calculate_monomial_without_coefficient_tf_wrapper(evaluation_entry), (list_of_monomial_identifiers))
coefficients = polynomial[:interpretation_net_output_monomials]
index_array = polynomial[interpretation_net_output_monomials:]
assert index_array.shape[0] == interpretation_net_output_monomials*sparsity, 'Shape of Coefficient Indices : ' + str(index_array.shape)
index_list = tf.split(index_array, interpretation_net_output_monomials)
assert len(index_list) == coefficients.shape[0] == interpretation_net_output_monomials, 'Shape of Coefficient Indices Split: ' + str(len(index_list))
indices = tf.argmax(index_list, axis=1)
monomial_values = tf.vectorized_map(lambda x: tf.gather(monomials_without_coefficient, x[0])*x[1], (indices, coefficients))
elif sparse_poly_representation_version == 2:
coefficients = polynomial[:interpretation_net_output_monomials]
index_array = polynomial[interpretation_net_output_monomials:]
#tf.print('index_array.shape', index_array)
assert index_array.shape[0] == interpretation_net_output_monomials*n*(d+1), 'Shape of Coefficient Indices : ' + str(index_array.shape)
if False:
index_list_by_monomial = tf.split(index_array, n)
assert len(index_list_by_monomial) == coefficients.shape[0] == interpretation_net_output_monomials, 'Shape of Coefficient Indices Split: ' + str(len(index_list))
index_list_by_monomial_by_var = tf.split(index_list_by_monomial, d+1, axis=1)
degree_by_var_per_monomial_list = tf.argmax(index_list_by_monomial_by_var, axis=2)
else:
index_list_by_monomial = tf.transpose(tf.split(index_array, interpretation_net_output_monomials))
index_list_by_monomial_by_var = tf.split(index_list_by_monomial, n, axis=0)
index_list_by_monomial_by_var_new = []
for tensor in index_list_by_monomial_by_var:
index_list_by_monomial_by_var_new.append(tf.transpose(tensor))
index_list_by_monomial_by_var = index_list_by_monomial_by_var_new
#tf.print('index_list_by_monomial_by_var', index_list_by_monomial_by_var)
degree_by_var_per_monomial_list = tf.transpose(tf.argmax(index_list_by_monomial_by_var, axis=2))
#tf.print('degree_by_var_per_monomial_list', degree_by_var_per_monomial_list)
#tf.print('evaluation_entry', evaluation_entry)
#tf.print('coefficients', coefficients)
monomial_values = tf.vectorized_map(calculate_monomial_with_coefficient_degree_by_var_wrapper(evaluation_entry), (degree_by_var_per_monomial_list, coefficients))
#tf.print('monomial_values', monomial_values)
polynomial_fv = tf.reduce_sum(monomial_values)
#tf.print(polynomial_fv)
return polynomial_fv
return tf.vectorized_map(calculate_poly_fv_tf, (evaluation_entry_list))
@tf.function(jit_compile=True)
def calculate_poly_fv_tf_wrapper_new(list_of_monomial_identifiers, polynomial, evaluation_entry_list, force_complete_poly_representation=False, config=None):
if config != None:
globals().update(config)
def calculate_poly_fv_tf(evaluation_entry):
def calculate_monomial_with_coefficient_degree_by_var_wrapper(evaluation_entry):
def calculate_monomial_with_coefficient_degree_by_var(input_list):
degree_by_var_per_monomial = input_list[0]
coefficient = input_list[1]
#degree_by_var_per_monomial = gewählter degree für jede variable in monomial
monomial_value_without_coefficient = tf.math.reduce_prod(tf.vectorized_map(lambda x: x[0]**tf.dtypes.cast(x[1], tf.float32), (evaluation_entry, degree_by_var_per_monomial)))
return coefficient*monomial_value_without_coefficient
return calculate_monomial_with_coefficient_degree_by_var
if interpretation_net_output_monomials == None or force_complete_poly_representation:
monomials_without_coefficient = tf.vectorized_map(calculate_monomial_without_coefficient_tf_wrapper(evaluation_entry), (list_of_monomial_identifiers))
monomial_values = tf.vectorized_map(lambda x: x[0]*x[1], (monomials_without_coefficient, polynomial))
else:
if sparse_poly_representation_version == 1:
monomials_without_coefficient = tf.vectorized_map(calculate_monomial_without_coefficient_tf_wrapper(evaluation_entry), (list_of_monomial_identifiers))
coefficients = polynomial[:interpretation_net_output_monomials]
index_array = polynomial[interpretation_net_output_monomials:]
assert index_array.shape[0] == interpretation_net_output_monomials*sparsity, 'Shape of Coefficient Indices : ' + str(index_array.shape)
index_list = tf.split(index_array, interpretation_net_output_monomials)
assert len(index_list) == coefficients.shape[0] == interpretation_net_output_monomials, 'Shape of Coefficient Indices Split: ' + str(len(index_list))
indices = tf.argmax(index_list, axis=1)
monomial_values = tf.vectorized_map(lambda x: tf.gather(monomials_without_coefficient, x[0])*x[1], (indices, coefficients))
elif sparse_poly_representation_version == 2:
coefficients = polynomial[:interpretation_net_output_monomials]
index_array = polynomial[interpretation_net_output_monomials:]
#tf.print('index_array.shape', index_array)
assert index_array.shape[0] == interpretation_net_output_monomials*n*(d+1), 'Shape of Coefficient Indices : ' + str(index_array.shape)
if False:
index_list_by_monomial = tf.split(index_array, n)
assert len(index_list_by_monomial) == coefficients.shape[0] == interpretation_net_output_monomials, 'Shape of Coefficient Indices Split: ' + str(len(index_list))
index_list_by_monomial_by_var = tf.split(index_list_by_monomial, d+1, axis=1)
degree_by_var_per_monomial_list = tf.argmax(index_list_by_monomial_by_var, axis=2)
else:
index_list_by_monomial = tf.transpose(tf.split(index_array, interpretation_net_output_monomials))
index_list_by_monomial_by_var = tf.split(index_list_by_monomial, n, axis=0)
index_list_by_monomial_by_var_new = []
for tensor in index_list_by_monomial_by_var:
index_list_by_monomial_by_var_new.append(tf.transpose(tensor))
index_list_by_monomial_by_var = index_list_by_monomial_by_var_new
#tf.print('index_list_by_monomial_by_var', index_list_by_monomial_by_var)
degree_by_var_per_monomial_list = tf.transpose(tf.argmax(index_list_by_monomial_by_var, axis=2))
#tf.print('degree_by_var_per_monomial_list', degree_by_var_per_monomial_list)
#tf.print('evaluation_entry', evaluation_entry)
#tf.print('coefficients', coefficients)
monomial_values = tf.vectorized_map(calculate_monomial_with_coefficient_degree_by_var_wrapper(evaluation_entry), (degree_by_var_per_monomial_list, coefficients))
#tf.print('monomial_values', monomial_values)
polynomial_fv = tf.reduce_sum(monomial_values)
#tf.print(polynomial_fv)
return polynomial_fv
return tf.vectorized_map(calculate_poly_fv_tf, (evaluation_entry_list))
#calculate intermediate term (without coefficient multiplication)
def calculate_monomial_without_coefficient_tf_wrapper(evaluation_entry):
def calculate_monomial_without_coefficient_tf(coefficient_multiplier_term):
return tf.math.reduce_prod(tf.vectorized_map(lambda x: x[0]**x[1], (evaluation_entry, coefficient_multiplier_term)))
return calculate_monomial_without_coefficient_tf
def gen_regression_symbolic(polynomial_array=None,
n_samples=100,
noise=0.0,
noise_dist='normal',
seed=42,
sympy_calculation=True):
from sklearn.datasets import make_friedman1, make_friedman2, make_friedman3
from sklearn.preprocessing import MinMaxScaler
np.random.seed(seed)
random.seed(seed)
if function_generation_type == 'polynomial':
if polynomial_array is not None:
sympy_string = get_sympy_string_from_coefficients(polynomial_array)
sympy_function=sympify(sympy_string)
if polynomial_array is None:
sympy_function=''
for i in range(1,n_features+1):
c='x'+str(i)
c+=np.random.choice(['+','-'],p=[0.5,0.5])
sympy_function+=c
sympy_function=sympy_function[:-1]
n_features=len(sympy_function.atoms(Symbol))
eval_results=[]
eval_dataset = generate_random_data_points(low=x_min, high=x_max, size=n_samples, variables=max(1, n), distrib=x_distrib)
config = {'list_of_monomial_identifiers': list_of_monomial_identifiers,
'interpretation_net_output_monomials': interpretation_net_output_monomials}
if sympy_calculation:
for i in range(n_samples):
eval_results.append(eval_multinomial(sympy_string, vals=list(eval_dataset[i])))
elif not sympy_calculation and polynomial_array is not None:
config = {
'n': n,
#'inet_loss': inet_loss,
'sparsity': sparsity,
#'lambda_network_layers': lambda_network_layers,
#'interpretation_net_output_shape': interpretation_net_output_shape,
'RANDOM_SEED': RANDOM_SEED,
#'nas': nas,
#'number_of_lambda_weights': number_of_lambda_weights,
'interpretation_net_output_monomials': interpretation_net_output_monomials,
#'list_of_monomial_identifiers': list_of_monomial_identifiers,
'x_min': x_min,
'x_max': x_max,
}
try:
config['interpretation_net_output_monomials'] = interpretation_net_output_monomials
except:
config['interpretation_net_output_monomials'] = None
eval_results = calculate_poly_fv_tf_wrapper_new(return_float_tensor_representation(list_of_monomial_identifiers), return_float_tensor_representation(polynomial_array), return_float_tensor_representation(eval_dataset), force_complete_poly_representation=True, config=config)
eval_results=np.array(eval_results)
eval_results=eval_results.reshape(n_samples,1)
if noise_dist=='normal':
noise_sample=noise*np.random.normal(loc=0, scale=np.max(eval_results)- | np.min(eval_results) | numpy.min |
# -*- coding: utf-8 -*-
import json
import math
import os
import platform
import random
import re
import sys
import time
from collections import OrderedDict
from io import StringIO
import requests
import numpy as np
from scipy import optimize
__author__ = "<NAME> and <NAME>"
__version__ = "2022.06.14"
__license__ = "MIT"
def removeHTMLTags(s):
"""Remove HTML tags, notably for use as page title"""
return re.sub('<[^<]+?>', '', s)
def finishWebPage(outbuffer):
""" Write the footer and finish the page """
print('<div id="footer" class="content">')
print('Code version: ' + __version__ + ' (running on Python ' + platform.python_version() + ')<br/>')
print('<script type="text/javascript">var endTime = %g;' % time.perf_counter())
print('document.write("Execution time: " + (endTime-startTime).toFixed(3) + " seconds<br/>");')
print('if(typeof isOrtho !== \'undefined\') document.write("Specific (faster) code for orthorhombic case was used.");')
print('</script></div>')
print('</div>')
print('</body></html>')
return outbuffer.getvalue()
def writeHeader(outbuffer, title="Elastic Tensor Analysis"):
""" Write the header of the HTML page """
print("""
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
<html>
<head>
<title>%s</title>
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8" />
<link rel="stylesheet" type="text/css" href="/default.css" />
<link rel="stylesheet" type="text/css" href="https://cdn.jsdelivr.net/npm/[email protected]/distrib/jsxgraph.css" />
<script src="https://cdn.jsdelivr.net/npm/[email protected]/distrib/jsxgraphcore.js"></script>
<script src="http://cdn.plot.ly/plotly-latest.min.js"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/jquery/1.12.4/jquery.min.js"></script>
</head>
""" % (title))
# printTitle writes the introduction of Elate
def printTitle(outbuffer, title="Elastic Tensor Analysis"):
writeHeader(outbuffer, title)
print("""
<body>
<div class="content">
<h1><a href="/elate">ELATE: Elastic tensor analysis</a></h1>
<p>Welcome to ELATE, the online tool for analysis of elastic tensors, developed by <b><NAME></b> and <b><a
href="http://coudert.name"><NAME></a></b> at <a href="http://www.chimie-paristech.fr/molsim/">CNRS / Chimie
ParisTech</a>. <br/> If you use the software in published results (paper, conference, etc.), please cite the <a
href="http://dx.doi.org/10.1088/0953-8984/28/27/275201">corresponding paper</a> (<em><NAME></em>, 2016, 28, 275201) and give the
website URL.</p>
<p>ELATE is <a href="https://github.com/fxcoudert/elate">open source software</a>. Any queries or comments are welcome at
<script type="text/javascript">
//<![CDATA[
var c_="";for(var o5=0;o5<411;o5++)c_+=String.fromCharCode(("s%oz65j5>oJ.~~vs!Kt00}.~|}{\\"$s~%}!s0Kv#\\"wv<s!~tjjK{j5wo#zH}<j5s!z~qo6s~=u=i:00ikk>97a6!#|w<u!t{}vQ!o}Qsr?6F8G9:B8D9>@?7>a9!#|w<u!t{}vQ!o}QsrB67Dj59}qr$!s8#vq{wsw~;!oAA\\"wA#qsj5v!<~sozsq=6=A:u00970i0<ikk>a9!#|w<u!t{}vQ!o}QsrA69DDD>:E\\'7@<7s!z~qo6sjj==8:uN070j59j5jj.0|}}{\\"$}s#$0Kv#\\"wv<s!Ktj5jjj5jjL0\\'t14>O>>DBqI$}sr#!14>>>>BDqIwvw{sO~;!o\\"ws#vq14>>B>ID!t=JLo<j5s!z~qo6sO=u=0:705<!s~zoqs6=6<76<7=u:02@2?07<\\"$p\\"#!6?77".charCodeAt(o5)-(14)+0x3f)%(2*6+83)+64-32);document.write(eval(c_))
//]]>
</script>
</p>
""")
# 3D plot functions
################################################################################################
def write3DPlotData(dataX, dataY, dataZ, dataR, n, opacity=1.0):
showcont = "true"
if (opacity != 1.0):
showcont = "false"
if (n == 1):
js = OrderedDict([
("x", dataX),
("y", dataY),
("z", dataZ),
("text", dataR),
("showscale", "false"),
("colorscale", "[[\'0\',\'rgb(22,136,51)\'],[\'0.125\',\'rgb(61,153,85)\'],[\'0.25\',\'rgb(121,178,136)\'],[\'0.375\',\'rgb(181,204,187)\'],[\'0.5\',\'rgb(195,230,200)\'],[\'0.625\',\'rgb(181,204,187)\'],[\'0.75\',\'rgb(121,178,136)\'],[\'0.875\',\'rgb(61,153,85)\'],[\'1\',\'rgb(22,136,51)\']]"),
("zsmooth", "'fast'"),
("type", "'surface'"),
("hoverinfo", "'text'"),
("opacity", opacity),
("contours", "{x :{ show:"+showcont+", color: 'rgb(192,192,192)'},y :{ show:"+showcont+", color: 'rgb(192,192,192)'},z :{ show:"+showcont+", color: 'rgb(192,192,192)'}}")
])
if (n == 2):
js = OrderedDict([
("x", dataX),
("y", dataY),
("z", dataZ),
("text", dataR),
("showscale", "false"),
("colorscale", "[[\'0\',\'rgb(180,4,38)\'],[\'0.125\',\'rgb(222,96,77)\'],[\'0.25\',\'rgb(244,154,123)\'],[\'0.375\',\'rgb(245,196,173)\'],[\'0.5\',\'rgb(246,216,201)\'],[\'0.625\',\'rgb(245,196,173)\'],[\'0.75\',\'rgb(244,154,123)\'],[\'0.875\',\'rgb(222,96,77)\'],[\'1\',\'rgb(180,4,38)\']]"),
("zsmooth", "'fast'"),
("type", "'surface'"),
("hoverinfo", "'text'"),
("opacity", opacity),
("contours", "{x :{ show:"+showcont+", color: 'rgb(192,192,192)'},y :{ show:"+showcont+", color: 'rgb(192,192,192)'},z :{ show:"+showcont+", color: 'rgb(192,192,192)'}}")
])
if (n == 3):
js = OrderedDict([
("x", dataX),
("y", dataY),
("z", dataZ),
("text", dataR),
("showscale", "false"),
("colorscale", "[[\'0\',\'rgb(59,76,192)\'],[\'0.125\',\'rgb(98,130,234)\'],[\'0.25\',\'rgb(141,176,254)\'],[\'0.375\',\'rgb(184,208,249)\'],[\'0.5\',\'rgb(207,223,250)\'],[\'0.625\',\'rgb(184,208,249)\'],[\'0.75\',\'rgb(141,176,254)\'],[\'0.875\',\'rgb(98,130,234)\'],[\'1\',\'rgb(59,76,192)\']]"),
("zsmooth", "'fast'"),
("type", "'surface'"),
("hoverinfo", "'text'"),
("opacity", opacity),
("contours", "{x :{ show:"+showcont+", color: 'rgb(192,192,192)'},y :{ show:"+showcont+", color: 'rgb(192,192,192)'},z :{ show:"+showcont+", color: 'rgb(192,192,192)'}}")
])
print(json.dumps(js, indent=3).replace('\"', '') + ";")
def make3DPlot(func, legend='', width=600, height=600, npoints=200):
str1 = legend.split("\'")[0]
str2 = legend.split("\'")[1]
u = np.linspace(0, np.pi, npoints)
v = np.linspace(0, 2*np.pi, 2*npoints)
r = np.zeros(len(u)*len(v))
dataX = [[0.0 for i in range(len(v))] for j in range(len(u))]
dataY = [[0.0 for i in range(len(v))] for j in range(len(u))]
dataZ = [[0.0 for i in range(len(v))] for j in range(len(u))]
dataR = [["0.0" for i in range(len(v))] for j in range(len(u))]
count = 0
for cu in range(len(u)):
for cv in range(len(v)):
r_tmp = func(u[cu], v[cv])
z = r_tmp * np.cos(u[cu])
x = r_tmp * np.sin(u[cu]) * np.cos(v[cv])
y = r_tmp * np.sin(u[cu]) * np.sin(v[cv])
dataX[cu][cv] = x
dataY[cu][cv] = y
dataZ[cu][cv] = z
dataR[cu][cv] = "'E = "+str(float(int(10*r_tmp))/10.0)+" GPa, "+"\u03B8 = "+str(float(int(10*u[cu]*180/np.pi))/10.0)+"\u00B0, "+"\u03c6 = "+str(float(int(10*v[cv]*180/np.pi))/10.0)+"\u00B0'"
count = count+1
i = random.randint(0, 100000)
print('<div class="plot3D">')
print('<div id="box%d" style="width: %dpx; height: %dpx; display:block;"></div>' % (i, width, height))
print('</div>')
print('<script type="text/javascript">')
print("var trace =")
write3DPlotData(dataX, dataY, dataZ, dataR, 1)
print("var data = [trace]")
print("var layout =")
layout = {"title": "\'"+str1+"\\"+"\'"+str2+"\'", "width": "650", "height": "700", "autosize": "false", "autorange": "true", "margin": "{l: 65, r: 50, b: 65, t: 90}"}
print(json.dumps(layout, indent=3).replace('\\\\', '\\').replace('\"', '') + ";")
print("Plotly.newPlot('box%d',data,layout);" % (i))
print('</script>')
def make3DPlotPosNeg(func, legend='', width=600, height=600, npoints=200):
u = np.linspace(0, np.pi, npoints)
v = np.linspace(0, 2*np.pi, 2*npoints)
dataX1 = [[0.0 for i in range(len(v))] for j in range(len(u))]
dataY1 = [[0.0 for i in range(len(v))] for j in range(len(u))]
dataZ1 = [[0.0 for i in range(len(v))] for j in range(len(u))]
dataR1 = [["0.0" for i in range(len(v))] for j in range(len(u))]
count = 0
for cu in range(len(u)):
for cv in range(len(v)):
r_tmp = max(0, func(u[cu], v[cv]))
z = r_tmp * np.cos(u[cu])
x = r_tmp * np.sin(u[cu]) * np.cos(v[cv])
y = r_tmp * np.sin(u[cu]) * np.sin(v[cv])
dataX1[cu][cv] = x
dataY1[cu][cv] = y
dataZ1[cu][cv] = z
dataR1[cu][cv] = "'"+"\u03B2 = "+str(float(int(10*r_tmp))/10.0)+" TPa'"+"+'-1'.sup()+"+"', \u03B8 = "+str(float(int(10*u[cu]*180/np.pi))/10.0)+"\u00B0, "+"\u03c6 = "+str(float(int(10*v[cv]*180/np.pi))/10.0)+"\u00B0'"
count = count+1
dataX2 = [[0.0 for i in range(len(v))] for j in range(len(u))]
dataY2 = [[0.0 for i in range(len(v))] for j in range(len(u))]
dataZ2 = [[0.0 for i in range(len(v))] for j in range(len(u))]
dataR2 = [["0.0" for i in range(len(v))] for j in range(len(u))]
count = 0
for cu in range(len(u)):
for cv in range(len(v)):
r_tmp = max(0, -func(u[cu], v[cv]))
z = r_tmp * | np.cos(u[cu]) | numpy.cos |
#!/usr/bin/env python3
import numpy as np
from matplotlib import pyplot as plt
from scipy.optimize import minimize
from scipy.signal import hann
from scipy import fft
from LoLIM.utilities import v_air, RTD, natural_sort
import LoLIM.utilities as util
from LoLIM.antenna_response import getGalaxyCalibrationData, calibrated_AARTFAAC_model
from LoLIM.signal_processing import locate_data_loss, data_cut_inspan
from LoLIM.pol_beamforming.intensity_plotting import image_plotter
from LoLIM.pol_beamforming import cython_beamforming_tools_centerT as cyt
###### tools for picking antennas #####
class antenna_picker_parent:
def choose(self, TBB_file, X_antNames, X_XYZs, X_antStartTimes, X_cal,
Y_antNames, Y_XYZs, Y_antStartTimes, Y_cal):
""" return two lists. First list is indeces of X_antNames to use, second list is indeces of Y_antNames to use"""
print("NOT IMPLEMENTED")
quit()
class outermost_antPicker( antenna_picker_parent ):
"""pick N outermost antennas. Insures equal number X and Y"""
def __init__(self, N):
self.N = N
if N == 0:
print('no')
quit()
def choose(self, TBB_file, X_antNames, X_XYZs, X_antStartTimes, X_cal,
Y_antNames, Y_XYZs, Y_antStartTimes, Y_cal):
X_out = []
if len(X_antNames) > 0:
sorter = np.argsort( [int(n) for n in X_antNames] )
i = min( len(X_antNames), self.N )
X_out = sorter[-i:]
Y_out = []
if len(Y_antNames) > 0:
sorter = np.argsort( [int(n) for n in Y_antNames] )
i = min( len(Y_antNames), self.N )
Y_out = sorter[-i:]
N_X = len(X_out)
N_Y = len(Y_out)
if N_X != N_Y:
N = min(N_X, N_Y)
X_out = X_out[N]
Y_out = Y_out[N]
return X_out, Y_out
### TODO: increase wing-size with han-width
class beamformer_3D:
def __init__(self, center_XYZ, voxelDelta_XYZ, numVoxels_XYZ, minTraceLength_samples, imaging_half_hann_length_samples,
TBB_file_dict, RFI_data_filters_dict, frequency_width_factor, antenna_picker=None, store_antenna_data=False):
## basic input options setting
center_XYZ = np.array( center_XYZ, dtype=np.double )
voxelDelta_XYZ = np.array( voxelDelta_XYZ, dtype=np.double )
numVoxels_XYZ = np.array( numVoxels_XYZ, dtype=np.int )
# if minTraceLength_samples < 100:
# print("WARNING: should probably not beamform less than 100 points at a time")
self.center_XYZ = center_XYZ
self.voxelDelta_XYZ = voxelDelta_XYZ
self.numVoxels_XYZ = numVoxels_XYZ
self.minTraceLength_samples = minTraceLength_samples
self.half_minTrace = int(round( self.minTraceLength_samples/2 ))
self.imaging_half_hann_length_samples = imaging_half_hann_length_samples
self.TBB_file_dict = TBB_file_dict
self.RFI_data_filters_dict = RFI_data_filters_dict
self.frequency_width_factor = frequency_width_factor
self.store_antenna_data = store_antenna_data
## set location arrays
self.X_array = np.arange(numVoxels_XYZ[0], dtype=np.double)
self.X_array -= int(numVoxels_XYZ[0]/2)
centerX_voxel = np.where(self.X_array==0)[0][0]
self.X_array *= voxelDelta_XYZ[0]
self.X_array += center_XYZ[0]
self.Y_array = np.arange(numVoxels_XYZ[1], dtype=np.double)
self.Y_array -= int(numVoxels_XYZ[1]/2)
centerY_voxel = np.where(self.Y_array==0)[0][0]
self.Y_array *= voxelDelta_XYZ[1]
self.Y_array += center_XYZ[1]
self.Z_array = np.arange(numVoxels_XYZ[2], dtype=np.double)
self.Z_array -= int(numVoxels_XYZ[2]/2)
centerZ_voxel = np.where(self.Z_array==0)[0][0]
self.Z_array *= voxelDelta_XYZ[2]
self.Z_array += center_XYZ[2]
self.center_voxel = np.array([centerX_voxel, centerY_voxel, centerZ_voxel], dtype=np.int)
if antenna_picker is None:
antenna_picker = outermost_antPicker(3)
elif isinstance(antenna_picker, int):
antenna_picker = outermost_antPicker( antenna_picker )
### organize antennas and stations ###
self.station_names = natural_sort( [ sname for sname in TBB_file_dict.keys()] )
self.station_TBBfiles = [ TBB_file_dict[sname] for sname in self.station_names ]
self.station_filters = [ RFI_data_filters_dict[ sname ] for sname in self.station_names]
self.num_stations = len( self.station_names )
# collect antenna names
self.stationi_to_antRange = []
self.stationi_to_anti = [0]
self.anti_to_stati = []
self.all_antnames = []
self.all_antXYZs = []
self.all_antStartTimes = []
self.antenna_polarization = [] ## 0 for X-dipole, 1 for Y-dipole
self.amplitude_calibrations = []
for stat_i, stat_TBB in enumerate(self.station_TBBfiles):
ant_names = stat_TBB.get_antenna_names()
ant_times = stat_TBB.get_time_from_second()
ant_locs = stat_TBB.get_LOFAR_centered_positions()
freq_filter_info = self.station_filters[stat_i].RFI_data
early_N = len(self.all_antnames)
cal_antenna_names = freq_filter_info["antenna_names"]
cleaned_power = freq_filter_info["cleaned_power"]
timestamp = freq_filter_info["timestamp"]
# analyzed_blocksize = freq_filter_info["blocksize"]
even_cal_factors, odd_cal_factors = getGalaxyCalibrationData(cleaned_power, timestamp, antenna_type="outer" )
new_X_antNames = []
new_X_XYZs = []
new_X_antStartTimes = []
new_X_cal = []
new_Y_antNames = []
new_Y_XYZs = []
new_Y_antStartTimes = []
new_Y_cal = []
for cal_ant_i in range(0, int(len(cal_antenna_names)/2)):
even_ant_name = cal_antenna_names[ cal_ant_i*2 ]
odd_ant_name = cal_antenna_names[ cal_ant_i*2 + 1 ]
if np.isfinite( even_cal_factors[cal_ant_i] ) and (even_ant_name in ant_names):
even_ant_i = ant_names.index( even_ant_name )
new_Y_antNames.append( even_ant_name )
new_Y_XYZs.append( ant_locs[even_ant_i] )
new_Y_antStartTimes.append( ant_times[even_ant_i] )
new_Y_cal.append( even_cal_factors[cal_ant_i] )
if np.isfinite( odd_cal_factors[cal_ant_i] ) and (odd_ant_name in ant_names):
odd_ant_i = ant_names.index( odd_ant_name )
new_X_antNames.append( odd_ant_name )
new_X_XYZs.append( ant_locs[odd_ant_i] )
new_X_antStartTimes.append( ant_times[odd_ant_i] )
new_X_cal.append( odd_cal_factors[cal_ant_i] )
X_indeces, Y_indeces = antenna_picker.choose( TBB_file=stat_TBB,
X_antNames=new_X_antNames, X_XYZs=new_X_XYZs, X_antStartTimes=new_X_antStartTimes, X_cal=new_X_cal,
Y_antNames=new_Y_antNames, Y_XYZs=new_Y_XYZs, Y_antStartTimes=new_Y_antStartTimes, Y_cal=new_Y_cal)
self.anti_to_stati += [stat_i]*( len(X_indeces) + len(Y_indeces) )
self.antenna_polarization += [0]*len(X_indeces)
self.antenna_polarization += [1]*len(Y_indeces)
for i in X_indeces:
self.all_antnames.append( new_X_antNames[i] )
self.all_antXYZs.append( new_X_XYZs[i] )
self.all_antStartTimes.append( new_X_antStartTimes[i] )
self.amplitude_calibrations.append( new_X_cal[i] )
for i in Y_indeces:
self.all_antnames.append( new_Y_antNames[i] )
self.all_antXYZs.append( new_Y_XYZs[i] )
self.all_antStartTimes.append( new_Y_antStartTimes[i] )
self.amplitude_calibrations.append( new_Y_cal[i] )
self.stationi_to_antRange.append( slice(early_N, len(self.all_antnames) ) )
self.stationi_to_anti.append( len(self.all_antnames) )
self.all_antXYZs = np.array(self.all_antXYZs , dtype=np.double)
## turn off calibration delays
self.all_antStartTimes = np.array(self.all_antStartTimes , dtype=np.double)
# self.all_antStartTimes = np.zeros( len(self.all_antStartTimes), dtype=np.double )
self.antenna_polarization = np.array(self.antenna_polarization , dtype=np.intc)
self.anti_to_stati = np.array(self.anti_to_stati, dtype=np.int)
## turn off amplitude calibrations
self.amplitude_calibrations = np.array(self.amplitude_calibrations , dtype=np.double)
# self.amplitude_calibrations = np.ones(len(self.amplitude_calibrations) , dtype=np.double)
self.stationi_to_anti = np.array(self.stationi_to_anti, dtype=np.int)
self.num_antennas = len(self.all_antnames)
print(self.num_antennas, 'antennas')
#### initial engine setup
self.geometric_delays = np.empty( self.num_antennas, dtype=np.double ) ## time delay from center voxel to antenna (positive)
self.min_DTs = np.empty( self.num_antennas, dtype=np.double ) ## minimum difference of geo delay over all voxels from center (negative!)
self.max_DTs = np.empty( self.num_antennas, dtype=np.double ) ## maximum difference of geo delay over all voxels from center (positive!)
self.index_shifts = np.empty(self.num_antennas, dtype=np.int) ## index shifts such that if a pulse from center arrives at this index difference on all antennas
self.cal_shifts = np.empty( self.num_antennas, dtype=np.double )
self.reference_XYZ = np.array([0.0 ,0.0, 0.0], dtype=np.double)
self.engine = cyt.beamform_engine3D(
X_array=self.X_array, Y_array=self.Y_array, Z_array=self.Z_array, center_XYZ=center_XYZ, reference_XYZ=self.reference_XYZ,
antenna_locs=self.all_antXYZs, ant_startTimes=self.all_antStartTimes,
antenna_polarizations=self.antenna_polarization, anti_to_stat_i=self.anti_to_stati, stati_to_anti=self.stationi_to_anti,
geometric_delays_memory=self.geometric_delays, min_DTs_memory=self.min_DTs, max_DTs_memory=self.max_DTs,
index_shifts_memory=self.index_shifts,
cal_shifts_memory=self.cal_shifts)
earliest_ant_i = np.where( self.index_shifts==0 )[0][0]
self.center_delay = self.all_antStartTimes[earliest_ant_i] - self.geometric_delays[earliest_ant_i]
# defined so that arrival_index = (emisstion_T-self.center_delay)/5.0e-9 + self.index_shifts
print('setup frequencies')
#### calculate trace lengths
self.earlyHalf_lengths = np.empty( self.num_antennas, dtype=np.int )
self.lateHalf_lengths = np.empty( self.num_antennas, dtype=np.int )
for ant_i in range(self.num_antennas):
self.earlyHalf_lengths[ ant_i ] = int(abs( self.min_DTs[ant_i]/(5.0e-9) )) + 1
self.lateHalf_lengths[ ant_i ] = int(abs( self.max_DTs[ant_i]/(5.0e-9) )) + 1
self.max_earlyHalf_length = np.max( self.earlyHalf_lengths )
self.max_lateHalf_length = np.max( self.lateHalf_lengths )
self.total_trace_length = fft.next_fast_len( self.max_earlyHalf_length + self.max_lateHalf_length + minTraceLength_samples + 2*imaging_half_hann_length_samples )
self.starting_edge_length = self.max_earlyHalf_length + imaging_half_hann_length_samples
print('total trace length', self.total_trace_length)
self.trace_loadBuffer_length = self.total_trace_length # this is buffer before arrival sample. this is a little long, probably only need half this!
self.frequencies = np.fft.fftfreq(self.total_trace_length, d=5.0e-9)
print('Jones Matrices')
#### jones matrices
## first used JM pointing upwards to get frequency range
antenna_model = calibrated_AARTFAAC_model()
# upwards_JM = antenna_model.Jones_Matrices(self.frequencies, zenith=0.0, azimuth=0.0)
upwards_JM = antenna_model.Jones_ONLY(self.frequencies, zenith=0.0, azimuth=0.0)
half_F = int( len(self.frequencies)/2 )
lowest_Fi = np.where( self.frequencies[:half_F]>30e6 )[0][0]
highest_Fi = np.where( self.frequencies[:half_F]<80e6 )[0][-1]
self.F30MHZ_i = lowest_Fi
self.F80MHZ_i = highest_Fi
# posFreq_amps = np.abs( upwards_JM[lowest_Fi:highest_Fi, 0,0] )
posFreq_amps = np.array( [ np.linalg.norm(upwards_JM[fi,:,:],ord=2) for fi in range(lowest_Fi,highest_Fi) ] )
max_freq_index = np.argmax( posFreq_amps ) + lowest_Fi
self.max_freq_index = max_freq_index
ref_amp = np.max(posFreq_amps)*frequency_width_factor
if posFreq_amps[0] <= ref_amp:
self.start_freq_index = np.where( np.logical_and( posFreq_amps[:-1]<=ref_amp, posFreq_amps[1:]>ref_amp) )[0][0]
else:
self.start_freq_index = 0
if posFreq_amps[-1] <= ref_amp:
self.end_freq_index = np.where( np.logical_and( posFreq_amps[:-1]>=ref_amp, posFreq_amps[1:]<ref_amp) )[0][0]
else:
self.end_freq_index = len( posFreq_amps )
self.antenna_norms_in_range = np.array( posFreq_amps[self.start_freq_index:self.end_freq_index ] )
self.start_freq_index += lowest_Fi
self.end_freq_index += lowest_Fi
self.beamformed_freqs = self.frequencies[ self.start_freq_index:self.end_freq_index ]
self.num_freqs = self.end_freq_index-self.start_freq_index
print('frequency range:', self.frequencies[self.start_freq_index], self.frequencies[self.end_freq_index])
print(' response amps (start, peak, end)', posFreq_amps[self.start_freq_index-lowest_Fi], np.max(posFreq_amps), posFreq_amps[self.end_freq_index-lowest_Fi-1 ])
print(' number frequency points:', self.num_freqs )
## ALL jones matrices!
self.cut_jones_matrices = np.empty( (self.num_stations, self.num_freqs,2,2), dtype=np.cdouble )
self.JM_condition_numbers = np.empty(self.num_stations, dtype=np.double) ## both at peak frequency
self.JM_magnitudes = np.empty(self.num_stations, dtype=np.double)
self.station_R = np.empty(self.num_stations, dtype=np.double) ## distance to center pixel
for stat_i in range(self.num_stations):
ant_XYZs = self.all_antXYZs[ self.stationi_to_antRange[ stat_i ] ]
stat_XYZ = np.average( ant_XYZs, axis=0 )
## from station to source!
delta_XYZ = center_XYZ - stat_XYZ
center_R = np.linalg.norm( delta_XYZ )
center_zenith = np.arccos(delta_XYZ[2]/center_R)*RTD
center_azimuth = np.arctan2( delta_XYZ[1], delta_XYZ[0] )*RTD
# self.cut_jones_matrices[stat_i, :,:,:] = antenna_model.Jones_Matrices(self.beamformed_freqs, zenith=center_zenith, azimuth=center_azimuth)
self.cut_jones_matrices[stat_i, :,:,:] = antenna_model.Jones_ONLY(self.beamformed_freqs, zenith=center_zenith, azimuth=center_azimuth)
self.JM_condition_numbers[stat_i] = np.linalg.cond( self.cut_jones_matrices[stat_i, max_freq_index-self.start_freq_index, :,:] )
self.JM_magnitudes[stat_i] = np.linalg.norm( self.cut_jones_matrices[stat_i, max_freq_index-self.start_freq_index, :,:], ord=2 )
self.station_R[stat_i] = center_R
#### windowing matrices!
self.engine.set_antenna_functions( self.total_trace_length, self.start_freq_index, self.end_freq_index,
self.frequencies, self.cut_jones_matrices)
self.engine.turn_on_all_antennas()
self.set_weights_by_station()
### some memory
self.blocksize = self.station_filters[0].blocksize
self.hann_sample_length = int( self.station_filters[0].half_window_percent * self.blocksize )
## loading
self.loading_temp = np.empty(self.blocksize, dtype=np.double)
self.loaded_data = np.empty( (self.num_antennas, self.blocksize-2*self.hann_sample_length), dtype=np.cdouble )
self.loaded_samples = np.empty( self.num_antennas, dtype=np.int )
self.data_loss_spans = [ [] ]*self.num_antennas
self.loaded_indexRange = [np.inf, -np.inf]
## windowing
self.temp_window = np.empty( self.total_trace_length, dtype=np.cdouble )
self.antenna_windowed = np.empty( self.num_antennas, dtype=np.int ) ## false if data loss, true otherwise
self.imaging_hann = hann(2*imaging_half_hann_length_samples)
## this will include the data that was loaded into the imager
if self.store_antenna_data :
self.antenna_data = np.zeros( (self.num_antennas,self.total_trace_length), dtype=np.cdouble )
self.correction_matrix = None
self.temp_inversion_matrix = np.empty((3, 3), dtype=np.cdouble)
self.inverted_matrix = np.empty((3, 3), dtype=np.cdouble)
self.invertrix = cyt.SVD_psuedoinversion(3, 3)
self.ifft_full_tmp = self.get_empty_partial_inverse_FFT()
### weights and condition numbers
def set_weights_by_station(self, station_weights=None):
if station_weights is None:
station_weights = np.ones( self.num_stations, dtype=np.double )
station_weights /= np.sum(station_weights)
station_weights *= self.num_stations
self.used_station_weights = station_weights
for stat_i in range( self.num_stations ):
for ant_i in range(self.stationi_to_anti[stat_i], self.stationi_to_anti[stat_i+1]):
self.engine.set_antennaWeight(ant_i, station_weights[stat_i] )
def calc_CN(self, station_weights=None):
if station_weights is not None:
self.set_weights_by_station( station_weights )
self.TMP_oPol_matrix = self.engine.get_correctionMatrix( self.TMP_oPol_matrix )
ACN = np.linalg.cond( self.TMP_oPol_matrix )
A_mag = np.linalg.norm(self.TMP_oPol_matrix , ord=2)
part_B = 0
part_C = 0
for stat_i in range( self.num_stations ):
B = self.JM_condition_numbers[stat_i]*self.used_station_weights[stat_i]/( self.JM_magnitudes[stat_i]*self.station_R[stat_i] )
part_B += B*B
C = self.JM_magnitudes[stat_i]/self.station_R[stat_i]
part_C += C*C
return ACN*np.sqrt(part_B/self.num_stations)*np.sqrt(part_C)/A_mag
def calc_set_weights(self):
F = self.calc_CN
self.TMP_oPol_matrix = None ## temporary memory needed for the function
station_weights_guess = np.ones( self.num_stations, dtype=np.double )
bound = [[0,np.inf] for i in range(self.num_stations)]
ret = minimize( F, station_weights_guess, method='powell', bounds=bound,
options={'maxiter': 1000, 'xtol':1e-30, 'ftol':1e-30})
self.set_weights_by_station( ret.x )
self.correction_matrix = self.engine.get_correctionMatrix()
return ret
### for loading and manipulating data ###
def load_raw_data(self, sky_T):
print('loading')
self.loaded_skyT = sky_T
first_index = int(round( (sky_T - self.center_delay)/5.0e-9 )) - (self.hann_sample_length + self.trace_loadBuffer_length)
self.loaded_indexRange = [ first_index+self.hann_sample_length, first_index+self.blocksize-self.hann_sample_length ]
for ant_i in range(self.num_antennas):
ant_name = self.all_antnames[ ant_i ]
stat_i = self.anti_to_stati[ ant_i ]
TBB_file = self.station_TBBfiles[ stat_i ]
freq_filter = self.station_filters[ stat_i ]
start_sample = self.index_shifts[ant_i] + first_index
self.loading_temp[:] = TBB_file.get_data(start_sample, self.blocksize, antenna_ID=ant_name )
dataLoss, number = locate_data_loss( self.loading_temp[ self.hann_sample_length:-self.hann_sample_length ], 5 )
self.data_loss_spans[ ant_i ] = dataLoss
self.loaded_data[ant_i, :] = freq_filter.filter( self.loading_temp )[ self.hann_sample_length:-self.hann_sample_length ]
self.loaded_data[ant_i, :] *= self.amplitude_calibrations[ant_i]
self.loaded_samples[ant_i] = start_sample+self.hann_sample_length
def window_data(self, sky_T, average_station=None):
if average_station is not None:
ave_stat_i = self.station_names.index( average_station )
amp_ave = 0
num_amp_ave = 0
sample_center = int(round( (sky_T - self.center_delay)/5.0e-9 ))
earliest_sample = sample_center - self.max_earlyHalf_length - self.half_minTrace
latest_sample = sample_center + self.max_lateHalf_length + self.half_minTrace
if earliest_sample<self.loaded_indexRange[0] or latest_sample>self.loaded_indexRange[1]:
self.load_raw_data( sky_T )
# print('windowing')
n = self.imaging_half_hann_length_samples
for ant_i in range(self.num_antennas):
ant_center_sample = sample_center + self.index_shifts[ant_i] - self.loaded_samples[ant_i]
ant_first_sample = ant_center_sample - self.earlyHalf_lengths[ant_i] - self.half_minTrace
ant_final_sample = ant_center_sample + self.lateHalf_lengths[ant_i] + self.half_minTrace
width = ant_final_sample - ant_first_sample
has_data_loss = data_cut_inspan( self.data_loss_spans[ ant_i ], ant_first_sample, ant_final_sample )
if has_data_loss:
# self.windowed_data[ant_i] = 0.0
self.engine.set_antennaData_zero( ant_i )
self.antenna_windowed[ ant_i ] = 0
continue
self.antenna_windowed[ant_i] = 1
delay_samples = self.max_earlyHalf_length - self.earlyHalf_lengths[ant_i]
self.temp_window[:] = 0.0
self.temp_window[delay_samples:delay_samples+width] = self.loaded_data[ant_i, ant_first_sample:ant_final_sample]
self.temp_window[delay_samples: delay_samples+n] *= self.imaging_hann[:n]
self.temp_window[delay_samples+width-n: delay_samples+width] *= self.imaging_hann[n:]
if (average_station is not None) and self.anti_to_stati[ant_i] == ave_stat_i:
amp_ave += np.max( np.abs( self.temp_window ) )/self.amplitude_calibrations[ant_i] ## DE-calibrate
num_amp_ave += 1
if self.store_antenna_data :
self.antenna_data[ant_i, :] = self.temp_window
self.engine.set_antennaData(ant_i, self.temp_window )#.view(np.double) )
if (average_station is not None):
self.ave_stat_i = ave_stat_i
self.station_ave_amp = amp_ave/num_amp_ave
else:
self.ave_stat_i = None
def plot_data(self, sky_T, source_XYZT=None):
self.plotted_sky_T = sky_T
sample_center = int(round( (sky_T - self.center_delay)/5.0e-9 ))
earliest_sample = sample_center - self.max_earlyHalf_length - self.half_minTrace
latest_sample = sample_center + self.max_lateHalf_length + self.half_minTrace
# if earliest_sample<self.loaded_indexRange[0] or latest_sample>self.loaded_indexRange[1]:
# self.load_raw_data( sky_T )
# n = self.imaging_half_hann_length_samples
for stat_i in range( self.num_stations ):
signal_dt = []
# cal_sum = 0
# n_ants = 0
max_amp = 0
for ant_i in range(self.stationi_to_anti[stat_i], self.stationi_to_anti[stat_i+1]):
ant_center_sample = sample_center + self.index_shifts[ant_i] - self.loaded_samples[ant_i]
ant_first_sample = ant_center_sample - self.earlyHalf_lengths[ant_i] - self.half_minTrace
data = self.antenna_data[ant_i, :]
abs_window = np.abs( data )
max_ant_amp = np.max( abs_window )
if max_ant_amp > max_amp:
max_amp = max_ant_amp
# cal_sum += self.amplitude_calibrations[ant_i]
# n_ants += 1
if source_XYZT is not None:
ant_XYZ = self.all_antXYZs[ant_i]
reception_time = np.linalg.norm(ant_XYZ - source_XYZT[:3])/v_air
first_sample_time = self.all_antStartTimes[ant_i] + (self.loaded_samples[ant_i] + ant_first_sample)*(5.0e-9)
if not np.isfinite(source_XYZT[3]):
signal_t = np.argmax( abs_window )*(5.0e-9)
source_XYZT[3] = first_sample_time+signal_t - reception_time
reception_time += source_XYZT[3]
signal_dt.append( reception_time-first_sample_time )
else:
signal_dt.append( None )
# print(stat_i, max_amp*n_ants/cal_sum)
for ant_i, sdt in zip(range(self.stationi_to_anti[stat_i], self.stationi_to_anti[stat_i+1]), signal_dt):
p = self.antenna_polarization[ant_i]
data = np.array( self.antenna_data[ant_i, :] )
data *= 1.0/max_amp
offset = stat_i*3 + p*0.75
plt.plot( np.abs(data) + offset )
plt.plot( np.real(data) + offset )
if source_XYZT is not None:
sdt_samples = sdt/(5.0e-9)
plt.plot( [sdt_samples,sdt_samples], [offset,offset+1] )
plt.annotate( self.station_names[stat_i], (0, stat_i*3) )
def plt_sourceLines(self, source_XYZT, color):
sample_center = int(round( (self.plotted_sky_T - self.center_delay)/5.0e-9 ))
# earliest_sample = sample_center - self.max_earlyHalf_length - self.half_minTrace
# latest_sample = sample_center + self.max_lateHalf_length + self.half_minTrace
# n = self.imaging_half_hann_length_samples
for stat_i in range( self.num_stations ):
for ant_i in range(self.stationi_to_anti[stat_i], self.stationi_to_anti[stat_i+1]):
ant_center_sample = sample_center + self.index_shifts[ant_i] - self.loaded_samples[ant_i]
ant_first_sample = ant_center_sample - self.earlyHalf_lengths[ant_i] - self.half_minTrace
# ant_final_sample = ant_center_sample + self.lateHalf_lengths[ant_i] + self.half_minTrace
# width = ant_final_sample - ant_first_sample
# has_data_loss = data_cut_inspan( self.data_loss_spans[ ant_i ], ant_first_sample, ant_final_sample )
# if has_data_loss:
# continue
abs_window = np.abs( self.antenna_data[ant_i, :] )
ant_XYZ = self.all_antXYZs[ant_i]
reception_time = np.linalg.norm(ant_XYZ - source_XYZT[:3])/v_air
first_sample_time = self.all_antStartTimes[ant_i] + (self.loaded_samples[ant_i] + ant_first_sample)*(5.0e-9)
if not np.isfinite(source_XYZT[3]):
signal_t = np.argmax( abs_window )*(5.0e-9)
source_XYZT[3] = first_sample_time+signal_t - reception_time
reception_time += source_XYZT[3]
sdt = reception_time-first_sample_time
sdt_samples = sdt/(5.0e-9)
offset = stat_i*3 + self.antenna_polarization[ant_i] *0.75
plt.plot( [sdt_samples,sdt_samples], [offset,offset+1], c=color )
def load_PSF(self, X_val, Y_val, Z_val, average_station=None):
""" make a point source at center voxel with XYZ polarization. average_station calculates the average peak amplitude for that station"""
stat_X_dipole = np.empty( len(self.beamformed_freqs), dtype=np.cdouble )
stat_Y_dipole = np.empty( len(self.beamformed_freqs), dtype=np.cdouble )
TMP = np.zeros( len(self.frequencies), dtype=np.cdouble )
shifter_TMP = np.zeros( len(self.beamformed_freqs), dtype=np.cdouble )
if average_station is not None:
ave_stat_i = self.station_names.index( average_station )
amp_ave = 0
num_amp_ave = 0
for stat_i in range( self.num_stations ):
ant_range = self.stationi_to_antRange[stat_i]
# print('station', stat_i, 'Fi:', self.max_freq_index-self.start_freq_index)
### get jones matrices
J_00 = self.cut_jones_matrices[stat_i, :, 0,0]
J_01 = self.cut_jones_matrices[stat_i, :, 0,1]
J_10 = self.cut_jones_matrices[stat_i, :, 1,0]
J_11 = self.cut_jones_matrices[stat_i, :, 1,1]
### get angles
## from station to source!!
stat_X = self.center_XYZ[0] - np.average( self.all_antXYZs[ant_range, 0] )
stat_Y = self.center_XYZ[1] - np.average( self.all_antXYZs[ant_range, 1] )
stat_Z = self.center_XYZ[2] - np.average( self.all_antXYZs[ant_range, 2] )
stat_R = np.sqrt( stat_X*stat_X + stat_Y*stat_Y + stat_Z*stat_Z )
stat_zenith = np.arccos( stat_Z/stat_R )
stat_azimuth = np.arctan2( stat_Y, stat_X)
sin_stat_azimuth = np.sin( stat_azimuth )
cos_stat_azimuth = np.cos( stat_azimuth )
sin_stat_zenith = np.sin( stat_zenith )
cos_stat_zenith = np.cos( stat_zenith )
stat_X_dipole[:] = 0.0
stat_Y_dipole[:] = 0.0
## X dipole
## X-orriented field
## zenithal
T = cos_stat_azimuth*cos_stat_zenith*J_00
## azimuthal
T += -sin_stat_azimuth*J_01
stat_X_dipole += T*X_val
## Y-orriented field
## zenithal
T = cos_stat_zenith*sin_stat_azimuth*J_00
## azimuthal
T += cos_stat_azimuth*J_01
stat_X_dipole += T*Y_val
## Z-orriented field
## zenithal
T = -sin_stat_zenith*J_00
## no azimuthal!!
stat_X_dipole += T*Z_val
## Y dipole
## X-orriented field
## zenithal
T = cos_stat_azimuth*cos_stat_zenith*J_10
## azimuthal
T += -sin_stat_azimuth*J_11
stat_Y_dipole += T*X_val
## Y-orriented field
## zenithal
T = cos_stat_zenith*sin_stat_azimuth*J_10
## azimuthal
T += cos_stat_azimuth*J_11
stat_Y_dipole += T*Y_val
## Z-orriented field
## zenithal
T = -sin_stat_zenith*J_10
## no azimuthal!!
stat_Y_dipole += T*Z_val
# datums = []
# pol = []
# signal_dt = []
# max_amp = 0.0
if (average_station is not None) and ave_stat_i==stat_i:
do_amp_average = True
else:
do_amp_average = False
for ant_i in range(self.stationi_to_anti[stat_i], self.stationi_to_anti[stat_i+1] ):
sub_sample_shift = self.geometric_delays[ant_i] + self.cal_shifts[ant_i]
shifter_TMP[:] = self.beamformed_freqs
shifter_TMP *= -2j*np.pi*sub_sample_shift
np.exp( shifter_TMP, out=shifter_TMP )
R = np.linalg.norm( self.all_antXYZs[ant_i] - self.center_XYZ )
if self.antenna_polarization[ant_i] == 0: ## X-dipole
shifter_TMP *= stat_X_dipole
else: ## Y-dipole
shifter_TMP *= stat_Y_dipole
shifter_TMP *= 1.0/R
TMP[self.start_freq_index:self.end_freq_index] = shifter_TMP
ifft = np.fft.ifft( TMP )
if self.store_antenna_data:
self.antenna_data[ant_i, :] = ifft
if do_amp_average:
amp_ave += np.max(np.abs(ifft))
num_amp_ave += 1
self.engine.set_antennaData(ant_i, ifft) ## note this modifies in place!!
if self.store_antenna_data:
self.antenna_data_magnitude = np.sqrt( cyt.total_2dnorm_squared( self.antenna_data ) )
if average_station is not None:
self.ave_stat_i = ave_stat_i
self.station_ave_amp = amp_ave/num_amp_ave
else:
self.ave_stat_i = None
def inject_noise(self, ratio, station_to_plot=None):
noise_sigma = 2*self.antenna_data_magnitude*ratio/np.sqrt( self.antenna_data.shape[0]*self.antenna_data.shape[1] )
total_norm_sq = 0.0
ave_amp = 0
num_ave_amp = 0
plot_Y = 0
FTMP = np.zeros( len(self.frequencies) , dtype=np.cdouble)
for ant_i in range(self.num_antennas):
rand = np.random.normal(size=2*( self.F80MHZ_i - self.F30MHZ_i ), scale=noise_sigma).view(np.cdouble)
FTMP[self.F30MHZ_i:self.F80MHZ_i] = rand
rand = np.fft.ifft( FTMP )
total_norm_sq += cyt.total_1dnorm_squared( rand )
if (self.ave_stat_i is not None) and self.anti_to_stati[ant_i]==self.ave_stat_i:
ave_amp += np.average( np.abs( rand ) )
num_ave_amp += 1
rand += self.antenna_data[ant_i]
if (station_to_plot is not None) and self.anti_to_stati[ant_i]==station_to_plot:
A = np.array( rand )
ABS = np.abs(A)
ABS_max = np.max(ABS)
plt.plot(ABS+plot_Y)
plt.plot(A.real + plot_Y)
plot_Y += ABS_max
self.engine.set_antennaData(ant_i, rand)
if (station_to_plot is not None) :
if self.ave_stat_i is not None:
plt.axhline(ave_amp/num_ave_amp, c='r')
plt.axhline(self.amp_ave, c='b')
plt.show()
total_norm = np.sqrt( total_norm_sq )
if (self.ave_stat_i is None):
return total_norm/self.antenna_data_magnitude
else:
return total_norm/self.antenna_data_magnitude, self.amp_ave/(ave_amp/num_ave_amp)
def get_empty_image(self):
return np.empty( (self.numVoxels_XYZ[0], self.numVoxels_XYZ[1], self.numVoxels_XYZ[2], self.num_freqs, 3) , dtype=np.cdouble)
def get_image(self, out_image=None, print_progress=False, weighting=None):
if weighting is not None:
if weighting is True:
weighting = self.antenna_norms_in_range
A = self.engine.full_image( out_image, print_progress, frequency_weights=weighting )
return A
def get_empty_chuncked_image(self, num_chunks):
if num_chunks == 0: ## see behavior below
num_chunks = 1
return np.empty( (self.numVoxels_XYZ[0], self.numVoxels_XYZ[1], self.numVoxels_XYZ[2], num_chunks) , dtype=np.cdouble)
def get_chunked_intesity_image(self, num_chunks, out_image=None, print_progress=False, weighting=None):
""" set num_chunks to 0 to sum over whole length. """
if weighting is not None:
if weighting is True:
weighting = self.antenna_norms_in_range
if num_chunks == 0:
starting_i = 0
chunck_size = self.total_trace_length
num_chunks = 1
else:
starting_i = self.starting_edge_length
chunck_size = int(self.minTraceLength_samples/num_chunks)
RET = self.engine.ChunkedIntensity_Image( starting_i, chunck_size, num_chunks,
image = out_image, print_progress = print_progress, frequency_weights = weighting)
return RET
def get_timeDiff_at_loc(self, XYZloc):
"""given a source at XYZ, return time diff, that should be added to sky_T, to get the time at that location"""
return ( np.linalg.norm( self.reference_XYZ - XYZloc ) - np.linalg.norm( self.reference_XYZ - self.center_XYZ) )/v_air
def get_correctionMatrix(self, out=None, loc_to_use=None ):
return self.engine.get_correctionMatrix(out, loc_to_use)
def get_empty_partial_inverse_FFT(self):
return np.empty(self.total_trace_length, dtype=np.cdouble)
def partial_inverse_FFT(self, in_data, out_data=None):
return self.engine.partial_inverse_FFT( in_data, out_data)
def get_empty_full_inverse_FFT(self, mode='wingless'):
""" mode can be 'full' or 'wingless'. wingless has time traces minTraceLength_samples long.
full is total_trace_length long.
hannless simply cuts-off the tukey windows."""
if mode == 'full':
T = self.total_trace_length
elif mode == 'wingless':
T = self.minTraceLength_samples
elif mode == 'hannless':
T = self.total_trace_length-2*self.imaging_half_hann_length_samples
return np.empty( ( self.numVoxels_XYZ[0], self.numVoxels_XYZ[1], self.numVoxels_XYZ[2], T, 3),
dtype = np.cdouble)
def full_inverse_FFT(self, in_image, out_data=None, mode='wingless'):
""" mode can be 'full', or 'wingless'. wingless has time traces minTraceLength_samples long, else full is total_trace_length long"""
if out_data is None:
out_data = self.get_empty_full_inverse_FFT(mode)
#out_data[:] = 0.0
# TMP = np.empty(self.total_trace_length, dtype=np.cdouble)
if mode == 'wingless':
# dN = self.total_trace_length - self.minTraceLength_samples
# hdN = int(dN/2)
hdN = self.starting_edge_length
L = self.minTraceLength_samples
for xi in range(self.numVoxels_XYZ[0]):
for yi in range(self.numVoxels_XYZ[1]):
for zi in range(self.numVoxels_XYZ[2]):
for pi in range(3):
self.engine.partial_inverse_FFT(in_image[xi,yi,zi,:,pi], self.ifft_full_tmp)
if mode == 'full':
out_data[xi,yi,zi,:,pi] = self.ifft_full_tmp
else:
out_data[xi, yi, zi, :, pi] = self.ifft_full_tmp[ hdN : hdN+L ]
return out_data
def get_empty_SpotImage(self):
return np.empty((self.num_freqs, 3), dtype=np.cdouble)
def get_SpotImage(self, loc, out_image=None, weighting=None, do_matrix=None):
if weighting is not None:
if weighting is True:
weighting = self.antenna_norms_in_range
if do_matrix is not None:
if do_matrix is True:
self.temp_inversion_matrix = self.get_correctionMatrix(out=self.temp_inversion_matrix,
loc_to_use=loc)
self.invertrix.set_matrix(self.temp_inversion_matrix)
self.invertrix.get_psuedoinverse(self.inverted_matrix, override_cond=0)
do_matrix = self.inverted_matrix
A = self.engine.Image_at_Spot(loc[0], loc[1], loc[2], out_image,
frequency_weights=weighting, freq_mode=1, matrix=do_matrix)
return A
def get_empty_polarized_inverse_FFT(self, mode='full'):
if mode == 'full':
return np.empty((self.total_trace_length, 3), dtype=np.cdouble)
elif mode == 'wingless':
return np.empty((self.minTraceLength_samples, 3), dtype=np.cdouble)
def polarized_inverse_FFT(self, in_data, out_data=None, mode='full'):
if out_data is None:
out_data = self.get_empty_polarized_inverse_FFT(mode)
for pi in range(3):
TMP = self.engine.partial_inverse_FFT(in_data[:, pi], self.ifft_full_tmp, freq_mode=1)
if mode == 'full':
out_data[:, pi] = TMP
elif mode == 'wingless':
out_data[:, pi] = TMP[
self.starting_edge_length: self.starting_edge_length + self.minTraceLength_samples]
return out_data
def get_secondaryLength_beamformer(self, trace_length):
if trace_length > self.minTraceLength_samples:
print('ERROR: secondary length must be smaller than initial length')
quit()
return self.secondary_length_beamformer( trace_length, self )
class secondary_length_beamformer:
def __init__(self, trace_length, parent):
self.minTraceLength_samples = trace_length
self.parent = parent
self.half_minTrace = int(round(self.minTraceLength_samples / 2))
self.total_trace_length = fft.next_fast_len( parent.max_earlyHalf_length + parent.max_lateHalf_length + trace_length + + 2*parent.imaging_half_hann_length_samples )
self.starting_edge_length = self.parent.max_earlyHalf_length + parent.imaging_half_hann_length_samples
# self.trace_loadBuffer_length = self.total_trace_length # this is buffer before arrival sample. this is a little long, probably only need half this!
self.frequencies = np.fft.fftfreq(self.total_trace_length, d=5.0e-9)
antenna_model = calibrated_AARTFAAC_model()
upwards_JM = antenna_model.Jones_ONLY(self.frequencies, zenith=0.0, azimuth=0.0)
half_F = int(len(self.frequencies) / 2)
lowest_Fi = np.where(self.frequencies[:half_F] > 30e6)[0][0]
highest_Fi = np.where(self.frequencies[:half_F] < 80e6)[0][-1]
self.F30MHZ_i = lowest_Fi
self.F80MHZ_i = highest_Fi
posFreq_amps = np.array(
[np.linalg.norm(upwards_JM[fi, :, :], ord=2) for fi in range(lowest_Fi, highest_Fi)])
self.max_freq_index = np.argmax(posFreq_amps) + lowest_Fi
ref_amp = np.max(posFreq_amps) * parent.frequency_width_factor
if posFreq_amps[0] <= ref_amp:
self.start_freq_index = \
np.where(np.logical_and(posFreq_amps[:-1] <= ref_amp, posFreq_amps[1:] > ref_amp))[0][0]
else:
self.start_freq_index = 0
if posFreq_amps[-1] <= ref_amp:
self.end_freq_index = \
np.where(np.logical_and(posFreq_amps[:-1] >= ref_amp, posFreq_amps[1:] < ref_amp))[0][0]
else:
self.end_freq_index = len(posFreq_amps)
self.antenna_norms_in_range = np.array(posFreq_amps[self.start_freq_index:self.end_freq_index])
self.start_freq_index += lowest_Fi
self.end_freq_index += lowest_Fi
self.beamformed_freqs = self.frequencies[self.start_freq_index:self.end_freq_index]
self.num_freqs = self.end_freq_index - self.start_freq_index
## ALL jones matrices!
self.cut_jones_matrices = np.empty((self.parent.num_stations, self.num_freqs, 2, 2), dtype=np.cdouble)
self.JM_condition_numbers = np.empty(self.parent.num_stations, dtype=np.double) ## both at peak frequency
self.JM_magnitudes = np.empty(self.parent.num_stations, dtype=np.double)
# self.station_R = np.empty(self.num_stations, dtype=np.double) ## distance to center pixel
for stat_i in range(self.parent.num_stations):
ant_XYZs = parent.all_antXYZs[parent.stationi_to_antRange[stat_i]]
stat_XYZ = np.average(ant_XYZs, axis=0)
## from station to source!
delta_XYZ = parent.center_XYZ - stat_XYZ
center_R = np.linalg.norm(delta_XYZ)
center_zenith = np.arccos(delta_XYZ[2] / center_R) * RTD
center_azimuth = np.arctan2(delta_XYZ[1], delta_XYZ[0]) * RTD
# self.cut_jones_matrices[stat_i, :,:,:] = antenna_model.Jones_Matrices(self.beamformed_freqs, zenith=center_zenith, azimuth=center_azimuth)
self.cut_jones_matrices[stat_i, :, :, :] = antenna_model.Jones_ONLY(self.beamformed_freqs,
zenith=center_zenith,
azimuth=center_azimuth)
self.JM_condition_numbers[stat_i] = np.linalg.cond(
self.cut_jones_matrices[stat_i, self.max_freq_index - self.start_freq_index, :, :])
self.JM_magnitudes[stat_i] = np.linalg.norm(
self.cut_jones_matrices[stat_i, self.max_freq_index - self.start_freq_index, :, :], ord=2)
# self.station_R[stat_i] = center_R
#### windowing matrices!
self.parent.engine.set_antenna_functions(self.total_trace_length, self.start_freq_index, self.end_freq_index,
self.frequencies, self.cut_jones_matrices, freq_mode=2)
### memory
self.temp_window = np.empty(self.total_trace_length, dtype=np.cdouble)
self.temp_inversion_matrix = np.empty((3, 3), dtype=np.cdouble)
self.inverted_matrix = np.empty((3, 3), dtype=np.cdouble)
self.invertrix = parent.invertrix#cyt.SVD_psuedoinversion(3, 3)
self.ifft_full_tmp = self.get_empty_partial_inverse_FFT()
def window_data(self, sky_T, average_station=None):
# if average_station is not None:
# ave_stat_i = self.parent.station_names.index(average_station)
amp_ave = 0
num_amp_ave = 0
sample_center = int(round((sky_T - self.parent.center_delay) / 5.0e-9))
earliest_sample = sample_center - self.parent.max_earlyHalf_length - self.half_minTrace
latest_sample = sample_center + self.parent.max_lateHalf_length + self.half_minTrace
if earliest_sample < self.parent.loaded_indexRange[0] or latest_sample > self.parent.loaded_indexRange[1]:
self.parent.load_raw_data(sky_T)
# print('windowing')
n = self.parent.imaging_half_hann_length_samples
for ant_i in range(self.parent.num_antennas):
ant_center_sample = sample_center + self.parent.index_shifts[ant_i] - self.parent.loaded_samples[ant_i]
ant_first_sample = ant_center_sample - self.parent.earlyHalf_lengths[ant_i] - self.half_minTrace
ant_final_sample = ant_center_sample + self.parent.lateHalf_lengths[ant_i] + self.half_minTrace
width = ant_final_sample - ant_first_sample
has_data_loss = data_cut_inspan(self.parent.data_loss_spans[ant_i], ant_first_sample, ant_final_sample)
if has_data_loss:
# self.windowed_data[ant_i] = 0.0
self.parent.engine.set_antennaData_zero(ant_i)
# self.antenna_windowed[ant_i] = 0
continue
# self.antenna_windowed[ant_i] = 1
delay_samples = self.parent.max_earlyHalf_length - self.parent.earlyHalf_lengths[ant_i]
self.temp_window[:] = 0.0
self.temp_window[delay_samples:delay_samples + width] = self.parent.loaded_data[ant_i,
ant_first_sample:ant_final_sample]
self.temp_window[delay_samples: delay_samples + n] *= self.parent.imaging_hann[:n]
self.temp_window[delay_samples + width - n: delay_samples + width] *= self.parent.imaging_hann[n:]
# if (average_station is not None) and self.anti_to_stati[ant_i] == ave_stat_i:
# amp_ave += np.max(np.abs(self.temp_window)) / self.amplitude_calibrations[ant_i] ## DE-calibrate
# num_amp_ave += 1
# if self.store_antenna_data:
# self.antenna_data[ant_i, :] = self.temp_window
self.parent.engine.set_antennaData(ant_i, self.temp_window, freq_mode=2) # .view(np.double) )
# if (average_station is not None):
# self.ave_stat_i = ave_stat_i
# self.station_ave_amp = amp_ave / num_amp_ave
# else:
# self.ave_stat_i = None
def get_empty_SpotImage(self):
return np.empty(( self.num_freqs, 3), dtype=np.cdouble)
def get_SpotImage(self, loc, out_image=None, weighting=None, do_matrix=None):
if weighting is not None:
if weighting is True:
weighting = self.antenna_norms_in_range
if do_matrix is not None:
if do_matrix is True:
self.temp_inversion_matrix = self.parent.get_correctionMatrix(out=self.temp_inversion_matrix, loc_to_use=loc )
self.invertrix.set_matrix(self.temp_inversion_matrix )
self.invertrix.get_psuedoinverse(self.inverted_matrix, override_cond=0)
do_matrix = self.inverted_matrix
A = self.parent.engine.Image_at_Spot(loc[0], loc[1], loc[2], out_image,
frequency_weights=weighting, freq_mode=2, matrix=do_matrix)
return A
def get_empty_partial_inverse_FFT(self):
return np.empty(self.total_trace_length, dtype=np.cdouble)
def partial_inverse_FFT(self, in_data, out_data=None):
return self.parent.engine.partial_inverse_FFT(in_data, out_data, freq_mode=2)
def get_empty_polarized_inverse_FFT(self, mode='full'):
if mode == 'full':
return np.empty( (self.total_trace_length,3), dtype=np.cdouble)
elif mode == 'wingless':
return np.empty( (self.minTraceLength_samples,3), dtype=np.cdouble)
def polarized_inverse_FFT(self, in_data, out_data=None, mode='full'):
if out_data is None:
out_data = self.get_empty_polarized_inverse_FFT( mode )
for pi in range(3):
TMP = self.parent.engine.partial_inverse_FFT(in_data[:,pi], self.ifft_full_tmp, freq_mode=2)
if mode == 'full':
out_data[:,pi] = TMP
elif mode == 'wingless':
out_data[:, pi] = TMP[ self.starting_edge_length : self.starting_edge_length + self.minTraceLength_samples ]
return out_data
#### now we need code to interpret the beamformer
## first, 3D stokes
def simple_coherency( vec ):
return np.outer( vec, np.conj( vec ) )
class stokes_3D:
def __init__(self, coherency_matrix):
self.coherency_matrix = np.array( coherency_matrix )
self.Rreal_eigvals, self.Rreal_eigvecs = np.linalg.eig( np.real(self.coherency_matrix) )
sorter = np.argsort(self.Rreal_eigvals)[::-1]
self.Rreal_eigvals = self.Rreal_eigvals[sorter]
self.Rreal_eigvecs = self.Rreal_eigvecs[:, sorter]
self.R_total_eigvals = None
self.Rreal_eigvecs_inverse = np.linalg.inv( self.Rreal_eigvecs )
# A = np.dot(self.Rreal_eigvecs_inverse, np.dot(np.real(coherency_matrix), self.Rreal_eigvecs) )
# print(A)
# print(self.Rreal_eigvals)
self.transformed_coherency_matrix = np.dot(self.Rreal_eigvecs_inverse, np.dot( self.coherency_matrix, self.Rreal_eigvecs) )
# print("TCM")
# print(self.transformed_coherency_matrix)
# print()
SM = np.zeros((3,3), dtype=np.double)
SM[0,0] = np.real( self.transformed_coherency_matrix[0,0] + self.transformed_coherency_matrix[1,1] + self.transformed_coherency_matrix[2,2] )
# SM[0,1] = np.real( self.transformed_coherency_matrix[0,1] + self.transformed_coherency_matrix[1,0] )
# SM[0,2] = np.real( self.transformed_coherency_matrix[0,2] + self.transformed_coherency_matrix[2,0] )
SM[1,0] = np.real( 1j*(self.transformed_coherency_matrix[0,1] - self.transformed_coherency_matrix[1,0]) )
SM[1,1] = np.real( self.transformed_coherency_matrix[0,0] - self.transformed_coherency_matrix[1,1] )
# SM[1,2] = np.real( self.transformed_coherency_matrix[0,2] + self.transformed_coherency_matrix[2,0] )
SM[2,0] = np.real( 1j*(self.transformed_coherency_matrix[0,2] - self.transformed_coherency_matrix[2,0]) )
SM[2,1] = np.real( 1j*(self.transformed_coherency_matrix[1,2] - self.transformed_coherency_matrix[2,1]) )
SM[2,2] = np.real( (self.transformed_coherency_matrix[0,0] + self.transformed_coherency_matrix[1,1] - 2*self.transformed_coherency_matrix[2,2])/np.sqrt(3) )
# print('SM')
# print(SM)
self.stokes_matrix = SM
self.intensity = SM[0,0]
self.linear_polarization = SM[1,1]
self.degree_of_directionality = SM[2,2] = SM[2,2]*np.sqrt(3)/self.intensity
self.angular_momentum = np.array( [ SM[2,1]*0.5, -SM[2,0]*0.5, SM[1,0]*0.5 ] )
def get_axis(self, i=0):
"""return axis of the 3D elipse. axis 0 (default) is direction of linear polarization"""
return self.Rreal_eigvecs[:,i]
def get_intensity(self):
"""return total intensity"""
return self.intensity
def get_linear_intensity(self):
"""return linear polarization intensity (in direction of axis 0)"""
return self.linear_polarization
def get_circular_intensity(self):
"""return intensity of ciruclar polarization"""
return np.linalg.norm(self.angular_momentum)*2
def get_angular_momentum_normal(self):
"""return angular momentum vector"""
R = np.dot( self.Rreal_eigvecs, self.angular_momentum)
R *= 1.0/self.intensity
return R
def get_degrees_polarized(self):
"""return fraction linear polarized, circular polarized, and fraction directional. Closely related to degree of polarmetric purity"""
A = np.array([ self.linear_polarization/self.intensity, self.get_circular_intensity()/self.intensity, self.degree_of_directionality ])
return A
def get_degree_of_directionality(self):
return self.degree_of_directionality
def get_degree_of_polarimetric_purity(self):
S = self.stokes_matrix[1,0]*self.stokes_matrix[1,0]
S += self.stokes_matrix[1,1]*self.stokes_matrix[1,1]
S += self.stokes_matrix[2,0]*self.stokes_matrix[2,0]
S += self.stokes_matrix[2,1]*self.stokes_matrix[2,1]
S += self.stokes_matrix[2,2]*self.stokes_matrix[2,2]
return np.sqrt(3*S)/(2*self.intensity)
def get_degree_of_polarization(self):
if self.R_total_eigvals is None:
self.R_total_eigvals = np.linalg.eigvals( self.coherency_matrix )
sorter = np.argsort(self.R_total_eigvals)[::-1]
self.R_total_eigvals = self.R_total_eigvals[sorter]
return np.real( (self.R_total_eigvals[0] - self.R_total_eigvals[1])/self.intensity )
def get_indeces_of_polarametric_purity(self):
"""return array of three values. First is ration of power of completely polarized wave over total power, i.e., amount of polarized power (could be degree of polarization)
No idea what the second one is. Probably something about directionality.
Last index is the degree of polarimetric purity, and is a combination of first two. It includes polarized energy, and how much the polarization plane wobbles"""
P1 = self.get_degree_of_polarization()
P2 = np.real( (self.R_total_eigvals[0] + self.R_total_eigvals[1] - 2*self.R_total_eigvals[2])/self.intensity )
return np.array( [P1,P2, self.get_degree_of_polarimetric_purity()] )
### 3D parabolic fitter
class parabola_3D:
def __init__(self, half_N, X_array, Y_array, Z_array):
self.X_array = X_array
self.Y_array = Y_array
self.Z_array = Z_array
self.dx = X_array[1] - X_array[0]
self.dy = Y_array[1] - Y_array[0]
self.dz = Z_array[1] - Z_array[0]
self.half_N = half_N
self.N_1D = 2*half_N + 1
self.num_points = self.N_1D*self.N_1D*self.N_1D
self.matrix = | np.empty( (self.num_points,10),dtype=np.double ) | numpy.empty |
import pytest
import os
import sys
import tempfile
import numpy as np
from numpy.testing import assert_allclose
from numpy.testing import assert_raises
from keras import backend as K
from keras.models import Model, Sequential
from keras.layers import Dense, Lambda, RepeatVector, TimeDistributed
from keras.layers import Input
from keras import optimizers
from keras import losses
from keras import metrics
from keras.utils.test_utils import keras_test
if sys.version_info[0] == 3:
import pickle
else:
import cPickle as pickle
skipif_no_tf_gpu = pytest.mark.skipif(
(K.backend() != 'tensorflow') or
(not K.tensorflow_backend._get_available_gpus()),
reason='Requires TensorFlow backend and a GPU')
@keras_test
def test_sequential_model_pickling():
model = Sequential()
model.add(Dense(2, input_shape=(3,)))
model.add(RepeatVector(3))
model.add(TimeDistributed(Dense(3)))
model.compile(loss=losses.MSE,
optimizer=optimizers.RMSprop(lr=0.0001),
metrics=[metrics.categorical_accuracy],
sample_weight_mode='temporal')
x = np.random.random((1, 3))
y = np.random.random((1, 3, 3))
model.train_on_batch(x, y)
out = model.predict(x)
state = pickle.dumps(model)
new_model = pickle.loads(state)
out2 = new_model.predict(x)
assert_allclose(out, out2, atol=1e-05)
# test that new updates are the same with both models
x = np.random.random((1, 3))
y = np.random.random((1, 3, 3))
model.train_on_batch(x, y)
new_model.train_on_batch(x, y)
out = model.predict(x)
out2 = new_model.predict(x)
assert_allclose(out, out2, atol=1e-05)
@keras_test
def test_sequential_model_pickling_2():
# test with custom optimizer, loss
custom_opt = optimizers.rmsprop
custom_loss = losses.mse
model = Sequential()
model.add(Dense(2, input_shape=(3,)))
model.add(Dense(3))
model.compile(loss=custom_loss, optimizer=custom_opt(), metrics=['acc'])
x = np.random.random((1, 3))
y = np.random.random((1, 3))
model.train_on_batch(x, y)
out = model.predict(x)
state = pickle.dumps(model)
model = pickle.loads(state)
out2 = model.predict(x)
assert_allclose(out, out2, atol=1e-05)
@keras_test
def test_functional_model_pickling():
inputs = Input(shape=(3,))
x = Dense(2)(inputs)
outputs = Dense(3)(x)
model = Model(inputs, outputs)
model.compile(loss=losses.MSE,
optimizer=optimizers.Adam(),
metrics=[metrics.categorical_accuracy])
x = np.random.random((1, 3))
y = np.random.random((1, 3))
model.train_on_batch(x, y)
out = model.predict(x)
state = pickle.dumps(model)
model = pickle.loads(state)
out2 = model.predict(x)
assert_allclose(out, out2, atol=1e-05)
@keras_test
def test_pickling_multiple_metrics_outputs():
inputs = Input(shape=(5,))
x = Dense(5)(inputs)
output1 = Dense(1, name='output1')(x)
output2 = Dense(1, name='output2')(x)
model = Model(inputs=inputs, outputs=[output1, output2])
metrics = {'output1': ['mse', 'binary_accuracy'],
'output2': ['mse', 'binary_accuracy']
}
loss = {'output1': 'mse', 'output2': 'mse'}
model.compile(loss=loss, optimizer='sgd', metrics=metrics)
# assure that model is working
x = | np.array([[1, 1, 1, 1, 1]]) | numpy.array |
name = "attitudes"
''' Attitude Control Module
Ok, so the principle behind this module is the base class "att", which represents an attitude
description, by default of type "DCM". Can also be created from Euler Angles, PRV, Quaternions,
CRPs, and MRPs. Can also be transformed into these others by a method as well.
For simplicity's sake, I'm going to treat these classes as kind of a "dual number" where the DCM
representation is stored, but for all other types, the representation of that type is also stored.
This should allow for direct quaternion addition and so forth.
This should also allow me to simplify the addition/subtraction functions into a single function,
that read the types of the inputs and acts accordingly.
There will probably also be an angular acceleration vector class, but I'll get there when I get
there.
Author: <NAME>
'''
#standard imports
import numpy as np
from numpy import linalg as LA
# ------------------------------------------------------------------------------------------------
# CONSTANTS
# ------------------------------------------------------------------------------------------------
# Nothing here yet
# -----------------------------------------------------------------------------------------------
# BASE CLASS "ATT"
# -----------------------------------------------------------------------------------------------
class att():
''' Attitude Description Class
Defines an attitude, by default from a DCM description. Also contains a whole bunch of class
methods for defining by other means (CRP, quaternions, etc).
Arguments:
DCM: (ndarray [2x2]) General 3x3 DCM of the attitude description
'''
def __init__(self, DCM, type='DCM', angle_vec=np.array([]),units='rad',
euler_type=None,phi=None,path=None):
''' Standard Definition from a DCM '''
if np.max(np.abs((DCM.T @ DCM) - np.eye(3))) > 1e-3:
raise ValueError('DCM doesn\'t appear to be orthonormal')
self.DCM = DCM
self.type = type
self.units = units
if euler_type:
self.order = euler_type
if angle_vec != np.array([]):
self.vec = angle_vec
if phi:
self.phi = phi
if path:
self.path = path
def __repr__(self):
if self.type == 'DCM':
return 'DCM Attitude description is \n {}'.format(self.DCM)
elif self.type == 'PRV':
statement = ''' \n
{} Attitude description is: \n e = {} \n Phi = {} {} \n
\n DCM description is: \n {} \n
'''
return statement.format(self.type,list(self.vec),self.phi,self.units,self.DCM)
elif self.type == 'Euler Angle':
statement = '\n {} {} Attitude description is: \n {} {} \n \n DCM description is: \n {} \n'
return statement.format(self.order,self.type,list(self.vec),self.units,self.DCM)
else:
statement = '\n {} Attitude description is: \n {} \n \n DCM description is: \n {} \n'
return statement.format(self.type,np.array(self.vec).flatten(),self.DCM)
@classmethod
def _from_eul_ang(cls,type,ang1,ang2,ang3,units='deg'):
''' Definition from Euler Angles
Takes a type, 3 angles, and units to determine a DCM, then records both sets
Arguments:
type: (int) int of order of rotation axes
ang1: (float) angle of rotation about first axis
ang2: (float) angle of rotation about second axis
ang3: (float) angle of rotation about third axis
units: (string) either 'rad' or 'deg'
'''
if units=='deg':
ang1, ang2, ang3 = np.radians(ang1),np.radians(ang2),np.radians(ang3)
if type not in (123,132,213,231,312,321,131,121,212,232,313,323):
raise ValueError('Euler angle type definition is incorrect')
angle_vec = np.array([ang1,ang2,ang3])
type = str(type)
DCM = eul_to_DCM(int(type[0]),ang1,int(type[1]),ang2,int(type[2]),ang3,'rad')
if units=='deg':
angle_vec = np.degrees(angle_vec)
return cls(DCM,'Euler Angle',angle_vec=angle_vec,units=units,euler_type=type)
@classmethod
def _from_PRV(cls,vec,phi=None,units='rad'):
''' Definition from Principle Rotation Vector
Takes either a vector with norm != 1 or a normalized vector and a phi rotation magnitude
Internally, the normalized vector and the phi rotation are used
Arguments:
vec: (list) principle rotation vector
phi: (float) optional, rotation magnitude
units: (string) either 'rad' or 'deg' to specify units for phi
'''
if not phi:
phi = LA.norm(vec)
vec = vec/LA.norm(vec)
if units=='deg':
phi = np.radians(phi)
e1,e2,e3 = vec
sigma = 1 - np.cos(phi)
cphi = np.cos(phi)
sphi = np.sin(phi)
C = np.array([[e1*e1*sigma+cphi,e1*e2*sigma+e3*sphi,e1*e3*sigma - e2*sphi],
[e2*e1*sigma - e3*sphi,e2**2*sigma+cphi,e2*e3*sigma+e1*sphi],
[e3*e1*sigma+e2*sphi,e3*e2*sigma-e1*sphi,e3**2*sigma+cphi]])
if units=='deg':
phi = np.degrees(phi)
return cls(C,'PRV', units=units, angle_vec=np.array(vec), phi=phi)
@classmethod
def _from_quat(cls,vec):
'''Definition from Quaternions
Takes in a quaternion and spits out an attitude object (DCM). Checks first for a valid
quaternion
Arguments:
vec: (list) of quaternion values
'''
if np.abs(LA.norm(vec)-1) > 1e-13:
raise ValueError('Quaternions must have norm of 1')
b0,b1,b2,b3 = vec
C = np.array([[b0**2+b1**2-b2**2-b3**2, 2*(b1*b2+b0*b3), 2*(b1*b3-b0*b2)],
[2*(b1*b2-b0*b3), b0**2-b1**2+b2**2-b3**2, 2*(b2*b3+b0*b1)],
[2*(b1*b3+b0*b2), 2*(b2*b3-b0*b1), b0**2-b1**2-b2**2+b3**2]])
return cls(C,'Quaternion', angle_vec=vec)
@classmethod
def _from_CRP(cls,vec):
'''Definition from Classical Rodriguez Parameters
Uses the vector definition of the DCM to convert CRPs into a valid attitude object (element
option also available in comments)
Arguments:
vec: (list) of CRP values
'''
q = np.atleast_2d(vec).reshape(3,1)
C = (1/(1+q.T@q))*((1-q.T@q)*np.eye(3) + 2 * q @ q.T - 2 * tilde(q))
# q1,q2,q3 = q.reshape(np.size(vec))
# C = np.array([[1+q1**2-q2**2-q3**2, 2*(q1*q2+q3), 2*(q1*q3-q2)],
# [2*(q1*q2-q3), 1-q1**2+q2**2-q3**2, 2*(q2*q3+q1)],
# [2*(q1*q3+q2), 2*(q2*q3-q1), 1-q1**2-q2**2+q3**2]])
# C = (1/(1 + q.T @ q)) * C
return cls(C,'CRP',angle_vec=np.array(vec))
@classmethod
def _from_MRP(cls,vec):
'''Definition from Modified Rodriguez Parameters
Uses the vector definition of the DCM to convert MRPs into a valid attitude object. Returns
the path whether it's long (norm > 1) or short (norm < 1) with norm==1 taken to be short
Arguments:
vec: (list) of MRP values
'''
s = np.atleast_2d(vec).T
C = np.eye(3) + (8*tilde(s)@tilde(s) - 4*(1-s.T@s)*tilde(s))/(1+s.T@s)**2
if LA.norm(vec) > 1:
path = 'long'
else:
path = 'short'
return cls(C,'MRP',angle_vec=np.array(vec),path=path)
def _to_eul_ang(self,type,units='deg'):
'''Conversion to Euler Angles. There's no easy way to do this, so it's always just done
from the DCM. Which is fine, it's still quick.
Arguments:
type: (int) currently must be 321 or 313 since those are common. Will expand
units: (str) optional, units to output the angles
'''
C = self.DCM
if type == 321:
ang1 = np.arctan2(C[0,1],C[0,0])
ang2 = -np.arcsin(C[0,2])
ang3 = np.arctan2(C[1,2],C[2,2])
elif type == 313:
ang1 = np.arctan2(C[2,0],-C[2,1])
ang2 = np.arccos(C[2,2])
ang3 = np.arctan2(C[0,2],C[1,2])
if units == 'deg':
ang1,ang2,ang3 = np.degrees([ang1,ang2,ang3])
return self._from_eul_ang(type,ang1,ang2,ang3,units=units)
def _to_PRV(self, units='rad'):
'''Conversion to Principle Rotation Vector. Always done from the DCM. Doesn't need to
take any arguments
Outputs the short version of the PRV (using arccos function) and the positive output
for e_hat
'''
C = self.DCM
phi = np.arccos(0.5*(C[0,0]+C[1,1]+C[2,2]-1))
e = (1/(2*np.sin(phi)))*np.array([C[1,2]-C[2,1],C[2,0]-C[0,2],C[0,1]-C[1,0]])
if units=='deg':
phi = np.degrees(phi)
return self._from_PRV(e,phi=phi,units=units)
def _to_quat(self, path='short'):
'''If the object is a classical or modified Rodriguez parameter object, directly converts
to quaternions via known relations. Otherwise, uses sheppard's method to determine the
quaternions from the DCM.
Arguments:
path: (str) optional, tells the function whether you'd like the short way or the
long way
'''
if self.type == 'CRP':
q = self.vec
b0 = 1/np.sqrt(1+LA.norm(q)**2)
b1 = q[0]*b0
b2 = q[1]*b0
b3 = q[2]*b0
elif self.type == 'MRP':
s = self.vec
b0 = (1-LA.norm(s)**2)/(1+LA.norm(s)**2)
b1 = 2*s[0]/(1+LA.norm(s)**2)
b2 = 2*s[1]/(1+LA.norm(s)**2)
b3 = 2*s[2]/(1+LA.norm(s)**2)
else:
#the annoying way...
C = self.DCM
[[C11,C12,C13],
[C21,C22,C23],
[C31,C32,C33]] = C
trC = C[0,0]+C[1,1]+C[2,2]
b02 = 0.25*(1+trC)
b12 = 0.25*(1+2*C[0,0]-trC)
b22 = 0.25*(1+2*C[1,1]-trC)
b32 = 0.25*(1+2*C[2,2]-trC)
b0b1 = (C23 - C32)/4
b0b2 = (C31 - C13)/4
b0b3 = (C12 - C21)/4
b1b2 = (C12 + C21)/4
b3b1 = (C31 + C13)/4
b2b3 = (C23 + C32)/4
squares = [b02,b12,b22,b32]
if b02 == np.max(squares):
b0 = np.sqrt(b02)
b1 = b0b1/b0
b2 = b0b2/b0
b3 = b0b3/b0
elif b12 == np.max(squares):
b1 = np.sqrt(b12)
b0 = b0b1/b1
b2 = b1b2/b1
b3 = b3b1/b1
elif b22 == np.max(squares):
b2 = np.sqrt(b22)
b0 = b0b2/b2
b1 = b1b2/b2
b3 = b2b3/b2
else:
b3 = np.sqrt(b32)
b0 = b0b3/b3
b1 = b3b1/b3
b2 = b2b3/b3
quats = np.array([b0,b1,b2,b3])
if b0 > 0 and path == 'long':
quats = -quats
elif b0 < 0 and path == 'short':
quats = -quats
return self._from_quat(quats)
def _to_CRP(self):
'''Conversion to Classical Rodriguex Parameters. If the initial attitude is in quaternions,
then it converts directly, because that's very easy. Otherwise, it converts from the DCM,
which is actually still pretty easy. No arguments because the shadow set doesn't really
exist.
'''
if self.type == 'Quaternion':
b0,b1,b2,b3 = self.vec
q = np.array([b1/b0,b2/b0,b3/b0])
else:
C = self.DCM
[[C11,C12,C13],
[C21,C22,C23],
[C31,C32,C33]] = C
zeta = np.sqrt(C11+C22+C33+1)
q = (1/zeta**2)*np.array([C23-C32,C31-C13,C12-C21])
return self._from_CRP(q)
def _to_MRP(self,path='short'):
'''Conversion to Modified Rodriguez Parameters
Similar to CRPs, if the input attitude is a quaternion, it'll just do the output directly,
otherwise, it'll compute the CRP from the DCM. This function does have an input for the
short rotation or the long rotation, though.
'''
if self.type == 'Quaternion':
b0,b1,b2,b3 = self.vec
s = np.array([b1/(1+b0),b2/(1+b0),b3/(1+b0)])
else:
C = self.DCM
[[C11,C12,C13],
[C21,C22,C23],
[C31,C32,C33]] = C
zeta = np.sqrt(C11+C22+C33+1)
s = (1/(zeta*(zeta+2)))*np.array([C23-C32,C31-C13,C12-C21])
if LA.norm(s) > 1 and path=='short':
s = -s/LA.norm(s)
elif LA.norm(s) < 1 and path=='long':
s = -s/LA.norm(s)
return self._from_MRP(s)
# ------------------------------------------------------------------------------------------------
# INTERNAL FUNCTIONS (designed to be used by module, not user)
# ------------------------------------------------------------------------------------------------
def rot(angle,axis,radordeg):
''' Defines a single axis rotation'''
mat = np.array([])
if radordeg == 'rad':
angle = angle
elif radordeg == 'deg':
angle = np.radians(angle)
else:
print('Error')
if axis==1:
mat = np.array( [[ 1, 0, 0 ],
[ 0, np.cos(angle), np.sin(angle) ],
[ 0, -np.sin(angle), np.cos(angle) ]])
elif axis==2:
mat = np.array( [[ np.cos(angle), 0, -np.sin(angle) ],
[ 0, 1, 0 ],
[ np.sin(angle), 0, np.cos(angle) ]])
elif axis==3:
mat = np.array([[ np.cos(angle), np.sin(angle), 0 ],
[ - | np.sin(angle) | numpy.sin |
# -*- coding: utf-8 -*-
# This file as well as the whole tsfresh package are licenced under the MIT licence (see the LICENCE.txt)
# <NAME> (<EMAIL>), Blue Yonder Gmbh, 2016
"""
This module contains the feature calculators that take time series as input and calculate the values of the feature.
There are two types of features:
1. feature calculators which calculate a single number (simple)
2. feature calculators which calculate a bunch of features for a list of parameters at once,
to use e.g. cached results (combiner). They return a list of (key, value) pairs for each input parameter.
They are specified using the "fctype" parameter of each feature calculator, which is added using the
set_property function. Only functions in this python module, which have a parameter called "fctype" are
seen by tsfresh as a feature calculator. Others will not be calculated.
"""
from __future__ import absolute_import, division
import itertools
import warnings
from builtins import range
import numpy as np
import pandas as pd
from numpy.linalg import LinAlgError
from scipy.signal import cwt, find_peaks_cwt, ricker, welch
from scipy.stats import linregress
from statsmodels.tools.sm_exceptions import MissingDataError
from statsmodels.tsa.ar_model import AR
from statsmodels.tsa.stattools import acf, adfuller, pacf
# todo: make sure '_' works in parameter names in all cases, add a warning if not
def _roll(a, shift):
"""
Roll 1D array elements. Improves the performance of numpy.roll() by reducing the overhead introduced from the
flexibility of the numpy.roll() method such as the support for rolling over multiple dimensions.
Elements that roll beyond the last position are re-introduced at the beginning. Similarly, elements that roll
back beyond the first position are re-introduced at the end (with negative shift).
Examples
--------
>>> x = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> _roll(x, shift=2)
>>> array([8, 9, 0, 1, 2, 3, 4, 5, 6, 7])
>>> x = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> _roll(x, shift=-2)
>>> array([2, 3, 4, 5, 6, 7, 8, 9, 0, 1])
>>> x = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> _roll(x, shift=12)
>>> array([8, 9, 0, 1, 2, 3, 4, 5, 6, 7])
Benchmark
---------
>>> x = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> %timeit _roll(x, shift=2)
>>> 1.89 µs ± 341 ns per loop (mean ± std. dev. of 7 runs, 100000 loops each)
>>> x = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> %timeit np.roll(x, shift=2)
>>> 11.4 µs ± 776 ns per loop (mean ± std. dev. of 7 runs, 100000 loops each)
:param a: the input array
:type a: array_like
:param shift: the number of places by which elements are shifted
:type shift: int
:return: shifted array with the same shape as a
:return type: ndarray
"""
if not isinstance(a, np.ndarray):
a = np.asarray(a)
idx = shift % len(a)
return np.concatenate([a[-idx:], a[:-idx]])
def _get_length_sequences_where(x):
"""
This method calculates the length of all sub-sequences where the array x is either True or 1.
Examples
--------
>>> x = [0,1,0,0,1,1,1,0,0,1,0,1,1]
>>> _get_length_sequences_where(x)
>>> [1, 3, 1, 2]
>>> x = [0,True,0,0,True,True,True,0,0,True,0,True,True]
>>> _get_length_sequences_where(x)
>>> [1, 3, 1, 2]
>>> x = [0,True,0,0,1,True,1,0,0,True,0,1,True]
>>> _get_length_sequences_where(x)
>>> [1, 3, 1, 2]
:param x: An iterable containing only 1, True, 0 and False values
:return: A list with the length of all sub-sequences where the array is either True or False. If no ones or Trues
contained, the list [0] is returned.
"""
if len(x) == 0:
return [0]
else:
res = [len(list(group)) for value, group in itertools.groupby(x) if value == 1]
return res if len(res) > 0 else [0]
def _estimate_friedrich_coefficients(x, m, r):
"""
Coefficients of polynomial :math:`h(x)`, which has been fitted to
the deterministic dynamics of Langevin model
.. math::
\dot{x}(t) = h(x(t)) + \mathcal{N}(0,R)
As described by
Friedrich et al. (2000): Physics Letters A 271, p. 217-222
*Extracting model equations from experimental data*
For short time-series this method is highly dependent on the parameters.
:param x: the time series to calculate the feature of
:type x: numpy.ndarray
:param m: order of polynom to fit for estimating fixed points of dynamics
:type m: int
:param r: number of quantils to use for averaging
:type r: float
:return: coefficients of polynomial of deterministic dynamics
:return type: ndarray
"""
assert m > 0, "Order of polynomial need to be positive integer, found {}".format(m)
df = pd.DataFrame({'signal': x[:-1], 'delta': np.diff(x)})
try:
df['quantiles'] = pd.qcut(df.signal, r)
except ValueError:
return [np.NaN] * (m + 1)
quantiles = df.groupby('quantiles')
result = pd.DataFrame({'x_mean': quantiles.signal.mean(), 'y_mean': quantiles.delta.mean()})
result.dropna(inplace=True)
try:
return np.polyfit(result.x_mean, result.y_mean, deg=m)
except (np.linalg.LinAlgError, ValueError):
return [np.NaN] * (m + 1)
def _aggregate_on_chunks(x, f_agg, chunk_len):
"""
Takes the time series x and constructs a lower sampled version of it by applying the aggregation function f_agg on
consecutive chunks of length chunk_len
:param x: the time series to calculate the aggregation of
:type x: numpy.ndarray
:param f_agg: The name of the aggregation function that should be an attribute of the pandas.Series
:type f_agg: str
:param chunk_len: The size of the chunks where to aggregate the time series
:type chunk_len: int
:return: A list of the aggregation function over the chunks
:return type: list
"""
return [getattr(x[i * chunk_len: (i + 1) * chunk_len], f_agg)() for i in range(int(np.ceil(len(x) / chunk_len)))]
def set_property(key, value):
"""
This method returns a decorator that sets the property key of the function to value
"""
def decorate_func(func):
setattr(func, key, value)
if func.__doc__ and key == "fctype":
func.__doc__ = func.__doc__ + "\n\n *This function is of type: " + value + "*\n"
return func
return decorate_func
@set_property("fctype", "simple")
def variance_larger_than_standard_deviation(x):
"""
Boolean variable denoting if the variance of x is greater than its standard deviation. Is equal to variance of x
being larger than 1
:param x: the time series to calculate the feature of
:type x: numpy.ndarray
:return: the value of this feature
:return type: bool
"""
y = np.var(x)
return y > np.sqrt(y)
@set_property("fctype", "simple")
def ratio_beyond_r_sigma(x, r):
"""
Ratio of values that are more than r*std(x) (so r sigma) away from the mean of x.
:param x: the time series to calculate the feature of
:type x: iterable
:return: the value of this feature
:return type: float
"""
if not isinstance(x, (np.ndarray, pd.Series)):
x = np.asarray(x)
return np.sum(np.abs(x - np.mean(x)) > r * np.std(x))/x.size
@set_property("fctype", "simple")
def large_standard_deviation(x, r):
"""
Boolean variable denoting if the standard dev of x is higher
than 'r' times the range = difference between max and min of x.
Hence it checks if
.. math::
std(x) > r * (max(X)-min(X))
According to a rule of the thumb, the standard deviation should be a forth of the range of the values.
:param x: the time series to calculate the feature of
:type x: numpy.ndarray
:param r: the percentage of the range to compare with
:type r: float
:return: the value of this feature
:return type: bool
"""
if not isinstance(x, (np.ndarray, pd.Series)):
x = np.asarray(x)
return np.std(x) > (r * (np.max(x) - np.min(x)))
@set_property("fctype", "combiner")
def symmetry_looking(x, param):
"""
Boolean variable denoting if the distribution of x *looks symmetric*. This is the case if
.. math::
| mean(X)-median(X)| < r * (max(X)-min(X))
:param x: the time series to calculate the feature of
:type x: numpy.ndarray
:param r: the percentage of the range to compare with
:type r: float
:return: the value of this feature
:return type: bool
"""
if not isinstance(x, (np.ndarray, pd.Series)):
x = np.asarray(x)
mean_median_difference = np.abs(np.mean(x) - np.median(x))
max_min_difference = np.max(x) - np.min(x)
return [("r_{}".format(r["r"]), mean_median_difference < (r["r"] * max_min_difference))
for r in param]
@set_property("fctype", "simple")
def has_duplicate_max(x):
"""
Checks if the maximum value of x is observed more than once
:param x: the time series to calculate the feature of
:type x: numpy.ndarray
:return: the value of this feature
:return type: bool
"""
if not isinstance(x, (np.ndarray, pd.Series)):
x = np.asarray(x)
return np.sum(x == np.max(x)) >= 2
@set_property("fctype", "simple")
def has_duplicate_min(x):
"""
Checks if the minimal value of x is observed more than once
:param x: the time series to calculate the feature of
:type x: numpy.ndarray
:return: the value of this feature
:return type: bool
"""
if not isinstance(x, (np.ndarray, pd.Series)):
x = np.asarray(x)
return np.sum(x == np.min(x)) >= 2
@set_property("fctype", "simple")
def has_duplicate(x):
"""
Checks if any value in x occurs more than once
:param x: the time series to calculate the feature of
:type x: numpy.ndarray
:return: the value of this feature
:return type: bool
"""
if not isinstance(x, (np.ndarray, pd.Series)):
x = np.asarray(x)
return x.size != np.unique(x).size
@set_property("fctype", "simple")
@set_property("minimal", True)
def sum_values(x):
"""
Calculates the sum over the time series values
:param x: the time series to calculate the feature of
:type x: numpy.ndarray
:return: the value of this feature
:return type: float
"""
if len(x) == 0:
return 0
return np.sum(x)
@set_property("fctype", "combiner")
def agg_autocorrelation(x, param):
r"""
Calculates the value of an aggregation function :math:`f_{agg}` (e.g. the variance or the mean) over the
autocorrelation :math:`R(l)` for different lags. The autocorrelation :math:`R(l)` for lag :math:`l` is defined as
.. math::
R(l) = \frac{1}{(n-l)\sigma^{2}} \sum_{t=1}^{n-l}(X_{t}-\mu )(X_{t+l}-\mu)
where :math:`X_i` are the values of the time series, :math:`n` its length. Finally, :math:`\sigma^2` and
:math:`\mu` are estimators for its variance and mean
(See `Estimation of the Autocorrelation function <http://en.wikipedia.org/wiki/Autocorrelation#Estimation>`_).
The :math:`R(l)` for different lags :math:`l` form a vector. This feature calculator applies the aggregation
function :math:`f_{agg}` to this vector and returns
.. math::
f_{agg} \left( R(1), \ldots, R(m)\right) \quad \text{for} \quad m = max(n, maxlag).
Here :math:`maxlag` is the second parameter passed to this function.
:param x: the time series to calculate the feature of
:type x: numpy.ndarray
:param param: contains dictionaries {"f_agg": x, "maxlag", n} with x str, the name of a numpy function
(e.g. "mean", "var", "std", "median"), its the name of the aggregator function that is applied to the
autocorrelations. Further, n is an int and the maximal number of lags to consider.
:type param: list
:return: the value of this feature
:return type: float
"""
# if the time series is longer than the following threshold, we use fft to calculate the acf
THRESHOLD_TO_USE_FFT = 1250
var = np.var(x)
n = len(x)
max_maxlag = max([config["maxlag"] for config in param])
if np.abs(var) < 10**-10 or n == 1:
a = [0] * len(x)
else:
a = acf(x, unbiased=True, fft=n > THRESHOLD_TO_USE_FFT, nlags=max_maxlag)[1:]
return [("f_agg_\"{}\"__maxlag_{}".format(config["f_agg"], config["maxlag"]),
getattr(np, config["f_agg"])(a[:int(config["maxlag"])])) for config in param]
@set_property("fctype", "combiner")
def partial_autocorrelation(x, param):
"""
Calculates the value of the partial autocorrelation function at the given lag. The lag `k` partial autocorrelation
of a time series :math:`\\lbrace x_t, t = 1 \\ldots T \\rbrace` equals the partial correlation of :math:`x_t` and
:math:`x_{t-k}`, adjusted for the intermediate variables
:math:`\\lbrace x_{t-1}, \\ldots, x_{t-k+1} \\rbrace` ([1]).
Following [2], it can be defined as
.. math::
\\alpha_k = \\frac{ Cov(x_t, x_{t-k} | x_{t-1}, \\ldots, x_{t-k+1})}
{\\sqrt{ Var(x_t | x_{t-1}, \\ldots, x_{t-k+1}) Var(x_{t-k} | x_{t-1}, \\ldots, x_{t-k+1} )}}
with (a) :math:`x_t = f(x_{t-1}, \\ldots, x_{t-k+1})` and (b) :math:`x_{t-k} = f(x_{t-1}, \\ldots, x_{t-k+1})`
being AR(k-1) models that can be fitted by OLS. Be aware that in (a), the regression is done on past values to
predict :math:`x_t` whereas in (b), future values are used to calculate the past value :math:`x_{t-k}`.
It is said in [1] that "for an AR(p), the partial autocorrelations [ :math:`\\alpha_k` ] will be nonzero for `k<=p`
and zero for `k>p`."
With this property, it is used to determine the lag of an AR-Process.
.. rubric:: References
| [1] <NAME>., <NAME>., <NAME>., & <NAME>. (2015).
| Time series analysis: forecasting and control. <NAME> & Sons.
| [2] https://onlinecourses.science.psu.edu/stat510/node/62
:param x: the time series to calculate the feature of
:type x: numpy.ndarray
:param param: contains dictionaries {"lag": val} with int val indicating the lag to be returned
:type param: list
:return: the value of this feature
:return type: float
"""
# Check the difference between demanded lags by param and possible lags to calculate (depends on len(x))
max_demanded_lag = max([lag["lag"] for lag in param])
n = len(x)
# Check if list is too short to make calculations
if n <= 1:
pacf_coeffs = [np.nan] * (max_demanded_lag + 1)
else:
if (n <= max_demanded_lag):
max_lag = n - 1
else:
max_lag = max_demanded_lag
pacf_coeffs = list(pacf(x, method="ld", nlags=max_lag))
pacf_coeffs = pacf_coeffs + [np.nan] * max(0, (max_demanded_lag - max_lag))
return [("lag_{}".format(lag["lag"]), pacf_coeffs[lag["lag"]]) for lag in param]
@set_property("fctype", "combiner")
def augmented_dickey_fuller(x, param):
"""
The Augmented Dickey-Fuller test is a hypothesis test which checks whether a unit root is present in a time
series sample. This feature calculator returns the value of the respective test statistic.
See the statsmodels implementation for references and more details.
:param x: the time series to calculate the feature of
:type x: numpy.ndarray
:param param: contains dictionaries {"attr": x} with x str, either "teststat", "pvalue" or "usedlag"
:type param: list
:return: the value of this feature
:return type: float
"""
res = None
try:
res = adfuller(x)
except LinAlgError:
res = np.NaN, np.NaN, np.NaN
except ValueError: # occurs if sample size is too small
res = np.NaN, np.NaN, np.NaN
except MissingDataError: # is thrown for e.g. inf or nan in the data
res = np.NaN, np.NaN, np.NaN
return [('attr_"{}"'.format(config["attr"]),
res[0] if config["attr"] == "teststat"
else res[1] if config["attr"] == "pvalue"
else res[2] if config["attr"] == "usedlag" else np.NaN)
for config in param]
@set_property("fctype", "simple")
def abs_energy(x):
"""
Returns the absolute energy of the time series which is the sum over the squared values
.. math::
E = \\sum_{i=1,\ldots, n} x_i^2
:param x: the time series to calculate the feature of
:type x: numpy.ndarray
:return: the value of this feature
:return type: float
"""
if not isinstance(x, (np.ndarray, pd.Series)):
x = np.asarray(x)
return np.dot(x, x)
@set_property("fctype", "simple")
def cid_ce(x, normalize):
"""
This function calculator is an estimate for a time series complexity [1] (A more complex time series has more peaks,
valleys etc.). It calculates the value of
.. math::
\\sqrt{ \\sum_{i=0}^{n-2lag} ( x_{i} - x_{i+1})^2 }
.. rubric:: References
| [1] Batista, <NAME>, et al (2014).
| CID: an efficient complexity-invariant distance for time series.
| Data Mining and Knowledge Discovery 28.3 (2014): 634-669.
:param x: the time series to calculate the feature of
:type x: numpy.ndarray
:param normalize: should the time series be z-transformed?
:type normalize: bool
:return: the value of this feature
:return type: float
"""
if not isinstance(x, (np.ndarray, pd.Series)):
x = np.asarray(x)
if normalize:
s = np.std(x)
if s!=0:
x = (x - np.mean(x))/s
else:
return 0.0
x = np.diff(x)
return np.sqrt(np.dot(x, x))
@set_property("fctype", "simple")
def mean_abs_change(x):
"""
Returns the mean over the absolute differences between subsequent time series values which is
.. math::
\\frac{1}{n} \\sum_{i=1,\ldots, n-1} | x_{i+1} - x_{i}|
:param x: the time series to calculate the feature of
:type x: numpy.ndarray
:return: the value of this feature
:return type: float
"""
return np.mean(np.abs(np.diff(x)))
@set_property("fctype", "simple")
def mean_change(x):
"""
Returns the mean over the differences between subsequent time series values which is
.. math::
\\frac{1}{n} \\sum_{i=1,\ldots, n-1} x_{i+1} - x_{i}
:param x: the time series to calculate the feature of
:type x: numpy.ndarray
:return: the value of this feature
:return type: float
"""
return np.mean(np.diff(x))
@set_property("fctype", "simple")
def mean_second_derivative_central(x):
"""
Returns the mean value of a central approximation of the second derivative
.. math::
\\frac{1}{n} \\sum_{i=1,\ldots, n-1} \\frac{1}{2} (x_{i+2} - 2 \\cdot x_{i+1} + x_i)
:param x: the time series to calculate the feature of
:type x: numpy.ndarray
:return: the value of this feature
:return type: float
"""
diff = (_roll(x, 1) - 2 * np.array(x) + _roll(x, -1)) / 2.0
return np.mean(diff[1:-1])
@set_property("fctype", "simple")
@set_property("minimal", True)
def median(x):
"""
Returns the median of x
:param x: the time series to calculate the feature of
:type x: numpy.ndarray
:return: the value of this feature
:return type: float
"""
return np.median(x)
@set_property("fctype", "simple")
@set_property("minimal", True)
def mean(x):
"""
Returns the mean of x
:param x: the time series to calculate the feature of
:type x: numpy.ndarray
:return: the value of this feature
:return type: float
"""
return np.mean(x)
@set_property("fctype", "simple")
@set_property("minimal", True)
def length(x):
"""
Returns the length of x
:param x: the time series to calculate the feature of
:type x: numpy.ndarray
:return: the value of this feature
:return type: int
"""
return len(x)
@set_property("fctype", "simple")
@set_property("minimal", True)
def standard_deviation(x):
"""
Returns the standard deviation of x
:param x: the time series to calculate the feature of
:type x: numpy.ndarray
:return: the value of this feature
:return type: float
"""
return np.std(x)
@set_property("fctype", "simple")
@set_property("minimal", True)
def variance(x):
"""
Returns the variance of x
:param x: the time series to calculate the feature of
:type x: numpy.ndarray
:return: the value of this feature
:return type: float
"""
return np.var(x)
@set_property("fctype", "simple")
def skewness(x):
"""
Returns the sample skewness of x (calculated with the adjusted Fisher-Pearson standardized
moment coefficient G1).
:param x: the time series to calculate the feature of
:type x: numpy.ndarray
:return: the value of this feature
:return type: float
"""
if not isinstance(x, pd.Series):
x = pd.Series(x)
return pd.Series.skew(x)
@set_property("fctype", "simple")
def kurtosis(x):
"""
Returns the kurtosis of x (calculated with the adjusted Fisher-Pearson standardized
moment coefficient G2).
:param x: the time series to calculate the feature of
:type x: numpy.ndarray
:return: the value of this feature
:return type: float
"""
if not isinstance(x, pd.Series):
x = pd.Series(x)
return pd.Series.kurtosis(x)
@set_property("fctype", "simple")
def absolute_sum_of_changes(x):
"""
Returns the sum over the absolute value of consecutive changes in the series x
.. math::
\\sum_{i=1, \ldots, n-1} \\mid x_{i+1}- x_i \\mid
:param x: the time series to calculate the feature of
:type x: numpy.ndarray
:return: the value of this feature
:return type: float
"""
return np.sum(np.abs( | np.diff(x) | numpy.diff |
#
# Copyright 2018 Analytics Zoo Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
import os
import os.path as osp
from PIL import Image
import logging
try:
import xml.etree.cElementTree as ET
except ImportError:
import xml.etree.ElementTree as ET
class VOCDatasets:
"""Pascal VOC dataset load
Parameters
----------------
root: str, example:'./data/VOCdevkit'.
splits_names: tuple, ((year, trainval)).
classes: list[str], If you using custom-voc dataset, \
you need to config the name of custom objects.
difficult: bool, False ignore voc xml difficult value.
"""
def __init__(self, root="VOCdevkit",
splits_names=[(2007, "trainval")],
classes=None,
difficult=False) -> None:
self.CLASSES = ['aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car',
'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike',
'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor']
if classes:
self.CLASSES = classes
self.cat2label = {cat: i for i, cat in enumerate(self.CLASSES)}
self._root = osp.abspath(osp.expanduser(root))
self._diff = difficult
self._imgid_items = self._load_items(splits_names)
self._anno_path = osp.join('{}', 'Annotations', '{}.xml')
self._image_path = osp.join('{}', 'JPEGImages', '{}.jpg')
self._im_shapes = {}
self._im_anno = [self._load_label(idx) for idx in range(len(self))]
self._im_cache = {}
def _load_items(self, splits_names):
img_ids = []
for year, txtname in splits_names:
vocfolder = osp.join(self._root, "VOC{}".format(year))
txtpath = osp.join(vocfolder, 'ImageSets', 'Main', txtname + '.txt')
try:
with open(txtpath, 'r', encoding='utf-8') as f:
img_ids += [(vocfolder, line.strip()) for line in f.readlines()]
except:
continue
return img_ids
def __len__(self):
return len(self._imgid_items)
def __iter__(self):
img_path = [self._image_path.format(*img_id) for img_id in self._imgid_items]
return zip(img_path, self._im_anno)
def __getitem__(self, idx):
img_id = self._imgid_items[idx]
img_path = self._image_path.format(*img_id)
if img_path in self._im_cache:
img = self._im_cache
else:
img = self._read_image(img_path)
return img, self._im_anno[idx]
def _load_label(self, idx):
img_id = self._imgid_items[idx]
anno_path = self._anno_path.format(*img_id)
root = ET.parse(anno_path).getroot()
width = 0
height = 0
size = root.find('size')
if size is not None:
width = int(size.find('width').text)
height = int(size.find('height').text)
else:
img_path = self._image_path.format(*img_id)
img = self._read_image(img_path)
width, height = img.size
self._im_cache[img_path] = img
if idx not in self._im_shapes:
# store the shapes for later usage
self._im_shapes[idx] = (width, height)
# load label [[x1, y1, x2, y2, cls, difficult]]
label = []
for obj in root.iter('object'):
try:
difficult = int(obj.find('difficult').text)
except ValueError:
difficult = 0
cls_name = obj.find('name').text.strip().lower()
if cls_name not in self.CLASSES:
logging.warning(f"{cls_name} isn't included in {self.CLASSES}")
continue
cls_id = self.cat2label[cls_name]
xml_box = obj.find('bndbox')
xmin = float(int(xml_box.find('xmin').text) / width)
ymin = float(int(xml_box.find('ymin').text) / height)
xmax = float(int(xml_box.find('xmax').text) / width)
ymax = float(int(xml_box.find('ymax').text) / height)
label.append([xmin, ymin, xmax, ymax, cls_id, difficult])
label = np.array(label).astype(np.float32)
if not self._diff:
label = label[..., :5]
try:
self._check_label(label, width, height)
except AssertionError as e:
logging.warning("Invalid label at %s, %s", anno_path, e)
return label
def _check_label(self, label, width, height):
"""Check if label is correct."""
xmin = label[:, 0]
ymin = label[:, 1]
xmax = label[:, 2]
ymax = label[:, 3]
assert ((0 <= xmin) & (xmin < width)).any(), \
"xmin must in [0, {}), given {}".format(width, xmin)
assert ((0 <= ymin) & (ymin < height)).any(), \
"ymin must in [0, {}), given {}".format(height, ymin)
assert ((xmin < xmax) & (xmax <= width)).any(), \
"xmax must in ({}, {}], given {}".format(xmin, width, xmax)
assert ((ymin < ymax) & (ymax <= height)).any(), \
"ymax must in ({}, {}], given {}".format(ymin, height, ymax)
def _read_image(self, image_path):
try:
img = Image.open(image_path)
img = | np.array(img) | numpy.array |
# Copyright (c) 2019-2021 - for information on the respective copyright owner
# see the NOTICE file and/or the repository
# https://github.com/boschresearch/pylife
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = "<NAME>"
__maintainer__ = __author__
import pytest
import numpy as np
import pandas as pd
from pylife.materiallaws import WoehlerCurve
wc_data = pd.Series({
'k_1': 7.,
'TN': 1.75,
'ND': 1e6,
'SD': 300.0
})
def test_woehler_accessor():
wc = wc_data.drop('TN')
for key in wc.index:
wc_miss = wc.drop(key)
with pytest.raises(AttributeError):
wc_miss.woehler
def test_woehler_transform_probability():
wc_50 = pd.Series({
'k_1': 2,
'k_2': np.inf,
'TS': 2.,
'TN': 9.,
'ND': 3e6,
'SD': 300 * np.sqrt(2.),
'failure_probability': 0.5
}).sort_index()
transformed_90 = wc_50.woehler.transform_to_failure_probability(0.9).to_pandas()
pd.testing.assert_series_equal(transformed_90[['SD', 'ND', 'failure_probability']],
pd.Series({'SD': 600.0, 'ND': 4.5e6, 'failure_probability': 0.9}))
transformed_back = transformed_90.woehler.transform_to_failure_probability(0.5).to_pandas()
pd.testing.assert_series_equal(transformed_back, wc_50)
transformed_10 = wc_50.woehler.transform_to_failure_probability(0.1).to_pandas()
pd.testing.assert_series_equal(transformed_10[['SD', 'ND', 'failure_probability']],
pd.Series({'SD': 300.0, 'ND': 2e6, 'failure_probability': 0.1}))
transformed_back = transformed_10.woehler.transform_to_failure_probability(0.5).to_pandas()
pd.testing.assert_series_equal(transformed_back, wc_50)
def test_woehler_transform_probability_multiple():
wc_50 = pd.Series({
'k_1': 2,
'k_2': np.inf,
'TS': 2.,
'TN': 9.,
'ND': 3e6,
'SD': 300 * np.sqrt(2.),
'failure_probability': 0.5
}).sort_index()
transformed = wc_50.woehler.transform_to_failure_probability([.1, .9]).to_pandas()
expected = pd.DataFrame({
'k_1': [2., 2.],
'k_2': [np.inf, np.inf],
'TS': [2., 2.],
'TN': [9., 9.],
'ND': [2e6, 4.5e6],
'SD': [300., 600.],
'failure_probability': [0.1, 0.9]
})
pd.testing.assert_frame_equal(transformed, expected, check_like=True)
transformed_back = transformed.woehler.transform_to_failure_probability([0.5, 0.5]).to_pandas()
expected = pd.DataFrame({
'k_1': [2., 2.],
'k_2': [np.inf, np.inf],
'TS': [2., 2.],
'TN': [9., 9.],
'ND': [3e6, 3e6],
'SD': [300. * np.sqrt(2.), 300. * | np.sqrt(2.) | numpy.sqrt |
# Implementation of: https://www.geeksforgeeks.org/ml-k-means-algorithm/
import numpy as np
import sys
def distance(p1, p2):
return np.sum((p1 - p2) ** 2)
def initialize(data, k):
'''
initialized the centroids for K-means++
inputs:
data - numpy array of data points having shape (200, 2)
k - number of clusters
'''
## initialize the centroids list and add
## a randomly selected data point to the list
centroids = []
centroids.append(data[np.random.randint(
data.shape[0]), :])
## compute remaining k - 1 centroids
for c_id in range(k - 1):
## initialize a list to store distances of data
## points from nearest centroid
dist = []
for i in range(data.shape[0]):
point = data[i, :]
d = sys.maxsize
## compute distance of 'point' from each of the previously
## selected centroid and store the minimum distance
for j in range(len(centroids)):
temp_dist = distance(point, centroids[j])
d = min(d, temp_dist)
dist.append(d)
## select data point with maximum distance as our next centroid
dist = | np.array(dist) | numpy.array |
#!/usr/bin/python
"""calculate_skystats.py -- Mask stars aggressively, together with input mask, determine sky background stats in remaining pixels. Stats are
Average
Median
Standard deviation of x by x pixels's average values
(y boxes are placed randomly)
Note that for using aplpy to plot location of randomly placed sky areas, the vmin and vmax for fits image is hard coded in right now. So if you get black png files, that's why. For randomly placed boxes, take in vmin, vmax command line options; but not for annuli! Should also write in a remove created files option. Need to update to be an importable module.
Required input
fitsimage - image for which background stats are desired.
Usage:
calculate_skystats.py [-h] [-v] [-b STRING] [-a STRING] [--annulusallover STRING] [-n INT] [-m FILE] [-s SEXLOC] [--vmin FLOAT] [--vmax FLOAT] <fitsimage>
Options:
-h, --help Print this screen.
-v, --verbose Print extra information [default: False]
-b STRING, --box STRING Input box parameters: size of box (pixels), number of random boxes to be placed. E.g. 8,1000.
-a STRING, --annulus STRING Select annulus with params 'xc1,yc1,a1,b1,ang1,xc2,yc2,a2,b2,ang2', angle in degrees, counter clockwise rotation; place random annuli around galaxy.
--annulusallover STRING Select annulus with params 'xc1,yc1,a1,b1,ang1,xc2,yc2,a2,b2,ang2', angle in degrees, counter clockwise rotation; place annulus around galaxy, but let it move around a little more than above option.
-n INT, --niterations INT Input number of random annuli to be placed. (not used for boxes). [default: 100]
-m FILE, --mask FILE Required. Input mask to be combined with grown sextractor mask. Important to input a mask that masks out the galaxy and its extended low surface brightness features!
-s SEXLOC, --sex SEXLOC SExtractor location [default: /opt/local/bin/sex]
--vmin FLOAT For plotting up box/ annuli locations [default: 4.25]
--vmax FLOAT For plotting up box/ annuli locations [default: 4.32]
Example:
python calculate_skystats.py -v fitsimage.fits
"""
import docopt
import numpy as np
import astropy.io.fits as fits
from scipy import ndimage
import subprocess
import os, sys, copy
import matplotlib.pyplot as plt
import aplpy
sextractor_params = """NUMBER
FLUX_AUTO
FLUXERR_AUTO
FLUX_APER
FLUXERR_APER
X_IMAGE
Y_IMAGE
X_WORLD
Y_WORLD
FLUX_RADIUS
FLAGS
CLASS_STAR
BACKGROUND
ELLIPTICITY
FWHM_IMAGE
"""
sextractor_config = """
ANALYSIS_THRESH 3
BACK_FILTERSIZE 3
BACKPHOTO_TYPE LOCAL
BACK_SIZE 32
CATALOG_NAME test.cat
CATALOG_TYPE ASCII_HEAD
CHECKIMAGE_TYPE SEGMENTATION
CHECKIMAGE_NAME {check_name}
CLEAN Y
CLEAN_PARAM 1.
DEBLEND_MINCONT 0.001
DEBLEND_NTHRESH 32
DETECT_MINAREA 5
DETECT_THRESH 3
DETECT_TYPE CCD
FILTER Y
FILTER_NAME {filter_name}
FLAG_IMAGE flag.fits
GAIN 1.0
MAG_GAMMA 4.
MAG_ZEROPOINT 0.0
MASK_TYPE CORRECT
MEMORY_BUFSIZE 1024
MEMORY_OBJSTACK 3000
MEMORY_PIXSTACK 300000
PARAMETERS_NAME {parameters_name}
PHOT_APERTURES 5
PHOT_AUTOPARAMS 2.5, 3.5
PIXEL_SCALE 2.85
SATUR_LEVEL 50000.
SEEING_FWHM 2.5
STARNNW_NAME {starnnw_name}
VERBOSE_TYPE {verbose_type}
"""
default_conv = """CONV NORM
# 3x3 ``all-ground'' convolution mask with FWHM = 2 pixels.
1 2 1
2 4 2
1 2 1
"""
default_nnw = """NNW
# Neural Network Weights for the SExtractor star/galaxy classifier (V1.3)
# inputs: 9 for profile parameters + 1 for seeing.
# outputs: ``Stellarity index'' (0.0 to 1.0)
# Seeing FWHM range: from 0.025 to 5.5'' (images must have 1.5 < FWHM < 5 pixels)
# Optimized for Moffat profiles with 2<= beta <= 4.
3 10 10 1
-1.56604e+00 -2.48265e+00 -1.44564e+00 -1.24675e+00 -9.44913e-01 -5.22453e-01 4.61342e-02 8.31957e-01 2.15505e+00 2.64769e-01
3.03477e+00 2.69561e+00 3.16188e+00 3.34497e+00 3.51885e+00 3.65570e+00 3.74856e+00 3.84541e+00 4.22811e+00 3.27734e+00
-3.22480e-01 -2.12804e+00 6.50750e-01 -1.11242e+00 -1.40683e+00 -1.55944e+00 -1.84558e+00 -1.18946e-01 5.52395e-01 -4.36564e-01 -5.30052e+00
4.62594e-01 -3.29127e+00 1.10950e+00 -6.01857e-01 1.29492e-01 1.42290e+00 2.90741e+00 2.44058e+00 -9.19118e-01 8.42851e-01 -4.69824e+00
-2.57424e+00 8.96469e-01 8.34775e-01 2.18845e+00 2.46526e+00 8.60878e-02 -6.88080e-01 -1.33623e-02 9.30403e-02 1.64942e+00 -1.01231e+00
4.81041e+00 1.53747e+00 -1.12216e+00 -3.16008e+00 -1.67404e+00 -1.75767e+00 -1.29310e+00 5.59549e-01 8.08468e-01 -1.01592e-02 -7.54052e+00
1.01933e+01 -2.09484e+01 -1.07426e+00 9.87912e-01 6.05210e-01 -6.04535e-02 -5.87826e-01 -7.94117e-01 -4.89190e-01 -8.12710e-02 -2.07067e+01
-5.31793e+00 7.94240e+00 -4.64165e+00 -4.37436e+00 -1.55417e+00 7.54368e-01 1.09608e+00 1.45967e+00 1.62946e+00 -1.01301e+00 1.13514e-01
2.20336e-01 1.70056e+00 -5.20105e-01 -4.28330e-01 1.57258e-03 -3.36502e-01 -8.18568e-02 -7.16163e+00 8.23195e+00 -1.71561e-02 -1.13749e+01
3.75075e+00 7.25399e+00 -1.75325e+00 -2.68814e+00 -3.71128e+00 -4.62933e+00 -2.13747e+00 -1.89186e-01 1.29122e+00 -7.49380e-01 6.71712e-01
-8.41923e-01 4.64997e+00 5.65808e-01 -3.08277e-01 -1.01687e+00 1.73127e-01 -8.92130e-01 1.89044e+00 -2.75543e-01 -7.72828e-01 5.36745e-01
-3.65598e+00 7.56997e+00 -3.76373e+00 -1.74542e+00 -1.37540e-01 -5.55400e-01 -1.59195e-01 1.27910e-01 1.91906e+00 1.42119e+00 -4.35502e+00
-1.70059e+00 -3.65695e+00 1.22367e+00 -5.74367e-01 -3.29571e+00 2.46316e+00 5.22353e+00 2.42038e+00 1.22919e+00 -9.22250e-01 -2.32028e+00
0.00000e+00
1.00000e+00
"""
def run_SExtractor(image_name):
'Create temporary directory'
if not os.path.exists('tmpSkystats'):
os.makedirs('tmpSkystats')
else:
print('./tmpSkystats directory existed already')
'Names of required config files'
sextractor_config_name = './tmpSkystats/scamp.sex'
params_name = './tmpSkystats/scamp.param'
conv_name = './tmpSkystats/default.conv'
nnw_name = './tmpSkystats/default.nnw'
catalog_name = image_name.split('.fits')[0]+'_bkg.cat'
check_name = image_name.split('.fits')[0]+'_bkg_segmap.fits'
if verbose:
verbose_type = 'NORMAL'
else:
verbose_type = 'QUIET'
'Stick content in config files'
configs = zip([sextractor_config_name,params_name,conv_name,nnw_name],[sextractor_config,sextractor_params,default_conv,default_nnw])
for fname,fcontent in configs:
fout = open(fname,'w')
if 'scamp.sex' in fname:
fout.write(fcontent.format(filter_name=conv_name,
parameters_name=params_name,starnnw_name=nnw_name,
verbose_type=verbose_type,check_name=check_name))
else:
fout.write(fcontent)
fout.close()
if verbose:
print('SExtracting...')
'SExtractor command'
command = sexloc + ' -c {config} -CATALOG_NAME {catalog} {image}'.format(config=sextractor_config_name,catalog=catalog_name,image=image_name)
if verbose:
print('Running this command:')
print(command+'\n')
'Run SExtractor'
subprocess.call(command,shell=True)
'Clear unnecessary files'
for fname in [sextractor_config_name,params_name,conv_name,nnw_name]:
clearit(fname)
'Remove temp directory if its not empty'
try:
os.rmdir('tmpSkystats')
except OSError as ex:
if ex.errno == errno.ENOTEMPTY:
print("directory not empty")
return check_name
def clearit(fname):
if os.path.isfile(fname):
os.remove(fname)
return None
def writeFITS(im,saveAs,header=None):
if header != None:
hdu = fits.PrimaryHDU(data=im,header=header)
else:
hdu = fits.PrimaryHDU(data=im)
hdulist = fits.HDUList([hdu])
hdulist.writeto(saveAs,overwrite=True)
hdulist.close()
return None
def calculate_sky_box(fitsimage,image,total_mask,boxsize_pix,nboxes,vmin=4.25,vmax=4.32):
'''Place nboxes boxsize_pix sized boxes randomly in image with total_mask, calculate average in each box and standard deviation of averages'''
sky_counts = []
pix_counts = []
n_counter = 0
n_notfinite = 0
# Read in image size and set boxes
# to be placed not too near edge
h = fits.getheader(fitsimage)
xmin = 1.5*boxsize_pix
ymin = 1.5*boxsize_pix
xmax = float(h['NAXIS1'])-1.5*boxsize_pix
ymax = float(h['NAXIS2'])-1.5*boxsize_pix
# Start figure to plot up box locations
fig = plt.figure(figsize=(48, 36))
f1 = aplpy.FITSFigure(fitsimage,figure=fig)
#f1.set_tick_labels_font(size='xx-small')
f1.ticks.hide()
f1.tick_labels.hide_x()
f1.tick_labels.hide_y()
f1.axis_labels.hide()
f1.show_grayscale(invert=True, stretch='linear', vmin=vmin, vmax=vmax)
xlen = int(h['NAXIS2'])
ylen = int(h['NAXIS1'])
xtomesh = np.arange(0, ylen, 1)
ytomesh = np.arange(0, xlen, 1)
X, Y = np.meshgrid(xtomesh, ytomesh)
while n_counter <= nboxes:
# Choose a random spot
row = np.random.randint(low=ymin,high=ymax)
col = np.random.randint(low=xmin,high=xmax)
# Make a box
image_box = image[row-int(boxsize_pix/2):row+int(boxsize_pix/2)+1,col-int(boxsize_pix/2):col+int(boxsize_pix/2)+1]
mask_box = total_mask[row-int(boxsize_pix/2):row+int(boxsize_pix/2)+1,col-int(boxsize_pix/2):col+int(boxsize_pix/2)+1]
# Plot up location of box for display using show_contour
display_mask = np.zeros((xlen,ylen))
display_mask[row-int(boxsize_pix/2):row+int(boxsize_pix/2)+1,col-int(boxsize_pix/2):col+int(boxsize_pix/2)+1] = 1.0
CS = plt.contour(X, Y, display_mask,linewidths=1.0,alpha=0.1,colors='red')
# Measure average counts in this masked box
counts = np.ma.mean(np.ma.masked_array(image_box,mask=mask_box))
# Measure number of pixels not masked in this masked box
no_pixels_notmasked = np.sum(mask_box)
# Add average to sky_counts if finite
# Also increment box count
# Else increment n_notfinite
if np.isfinite(counts):
sky_counts.append(counts)
pix_counts.append(no_pixels_notmasked)
n_counter += 1
else:
n_notfinite += 1
# Save figure to of annuli locations
outname = './skyregionlocs.png'
f1.save(outname)
print(' ')
print('***OUTPUT: Box location plot saved here: ',outname)
return sky_counts, pix_counts, n_notfinite, h
def read_annulusparams(annulusparams):
'''Read out annulus parameters of form xc1,yc1,a1,b1,ang1,xc2,yc2,a2,b2,ang2'''
params = annulusparams.split(',')
xc1,yc1,a1,b1,ang1,xc2,yc2,a2,b2,ang2 = params
return float(xc1),float(yc1),float(a1),float(b1),float(ang1),float(xc2),float(yc2),float(a2),float(b2),float(ang2)
def make_annulus_mask(xlen,ylen,xc1,yc1,a1,b1,ang1,xc2,yc2,a2,b2,ang2):
'''Read in annulus parameters and create grabber of annulus (1 inside and 0 outside)'''
ang1_rad = (ang1/360.)*2*np.pi
ang2_rad = (ang2/360.)*2*np.pi
# Ellipse 1
mask1 = np.zeros((xlen,ylen))
xv,yv = np.meshgrid(np.linspace(0,xlen-1,xlen),np.linspace(0,ylen-1,ylen))
A = ( (xv-xc1)*np.cos(ang1_rad) + (yv-yc1)*np.sin(ang1_rad) )**2 / a1**2
B = ( (xv-xc1)*np.sin(ang1_rad) - (yv-yc1)*np.cos(ang1_rad) )**2 / b1**2
xi,yi = np.where( A+B < 1.0 )
mask1[xi,yi] = 1
# Ellipse 2
mask2 = np.zeros((xlen,ylen))
A = ( (xv-xc2)*np.cos(ang2_rad) + (yv-yc2)*np.sin(ang2_rad) )**2 / a2**2
B = ( (xv-xc2)*np.sin(ang2_rad) - (yv-yc2)*np.cos(ang2_rad) )**2 / b2**2
xi,yi = np.where( A+B < 1.0 )
mask2[xi,yi] = 1
# Combine Ellipse 1 and 2 --> annulus
mask3 = np.ones((xlen,ylen)).astype(int)
tmp = mask1+mask2
xi,yi = np.where(tmp == 1.0)
mask3[xi,yi] = 0
return mask3.astype(bool)
def calculate_sky_annuli(fitsimage,image,total_mask,annulusparams,n_iterations):
'''Save sky count averages in n_iteration annuli, also plot up where random n_iterations of annuli were placed on fits image.'''
# Calculate sky in input annulus
xc1,yc1,a1,b1,ang1,xc2,yc2,a2,b2,ang2 = read_annulusparams(annulusparams)
h = fits.getheader(fitsimage)
xlen = int(h['NAXIS2'])
ylen = int(h['NAXIS1'])
mask = make_annulus_mask(xlen,ylen,xc1,yc1,a1,b1,ang1,xc2,yc2,a2,b2,ang2)
initial_annuli_mask_data = mask.copy()
image_annuli = copy.copy(image)
image_annuli[mask] = float('nan')
image_annuli[total_mask] = float('nan')
initial_annuli_name = 'annuli_input.fits'
writeFITS(image_annuli,initial_annuli_name)
print(' ')
print('***OUTPUT: Sky calculation annulus saved here: ',initial_annuli_name)
print(' ')
print('Average in input sky annulus is: ',np.nanmean(image_annuli))
print('Median in input sky annulus is: ',np.nanmedian(image_annuli))
print('Std in input sky annulus is: ',np.nanstd(image_annuli))
print('Number of finite non masked pixels in input sky annulus: ',np.sum(np.isfinite(image_annuli)))
# Plonk some random annuli, calculate average of averages and std of averages
# Vary xc,yc within width of annuli randomly (move xc2,yc2 by same amount)
# AND vary a1 randomly while keeping a1-a2 constant, varations up to width of annuli
annuli_thickness = abs(a1-a2)/2.
# Start figure to plot up annuli locations
fig = plt.figure(figsize=(48, 36))
f1 = aplpy.FITSFigure(fitsimage,figure=fig)
#f1.set_tick_labels_font(size='xx-small')
f1.ticks.hide()
f1.tick_labels.hide_x()
f1.tick_labels.hide_y()
f1.axis_labels.hide()
f1.show_grayscale(invert=True, stretch='linear', vmin=4.25, vmax=4.32)
# for g-band ngc 2841: vmin=2.38, vmax=2.42
sky_counts = []
pix_counts = []
n_counter = 0
n_notfinite = 0
xtomesh = np.arange(0, ylen, 1)
ytomesh = np.arange(0, xlen, 1)
X, Y = np.meshgrid(xtomesh, ytomesh)
while n_counter < n_iterations:
# Choose X random values for xc,yc and a1
xc_shift = np.random.randint(low=-annuli_thickness,high=annuli_thickness)
yc_shift = np.random.randint(low=-annuli_thickness,high=annuli_thickness)
a1_shift = np.random.randint(low=-annuli_thickness,high=annuli_thickness)
new_xc1 = xc1+xc_shift
new_xc2 = xc2+xc_shift
new_yc1 = yc1+yc_shift
new_yc2 = yc2+yc_shift
new_a1 = a1+a1_shift
new_a2 = a2+a1_shift
new_b1 = (b1/a1)*(new_a1)
new_b2 = (b2/a2)*(new_a2)
# Make mask for new annuli
mask = make_annulus_mask(xlen,ylen,
new_xc1,new_yc1,new_a1,new_b1,ang1,
new_xc2,new_yc2,new_a2,new_b2,ang2)
image_annuli = copy.copy(image)
image_annuli[mask] = float('nan')
image_annuli[total_mask] = float('nan')
# Plot up location annulus for display using show_contour
CS = plt.contour(X, Y, mask,linewidths=1.0,alpha=0.1,colors='red')
# Calculate average and number of pixels in average to array
#counts = 3.*np.nanmedian(image_annuli) - 2.*np.nanmean(image_annuli)
counts = np.nanmean(image_annuli)
# Add average to sky_counts if finite
# Also increment n_counter
# Else increment n_notfinite
if np.isfinite(counts):
sky_counts.append(counts)
pix_counts.append(np.sum(np.isfinite(image_annuli)))
n_counter += 1
else:
n_notfinite += 1
# Increment counter
n_counter += 1
# Plot initial sky ellipse
# Copy wcs to total_mask_name, and show initial ellipse contour
CS = plt.contour(X, Y, initial_annuli_mask_data,linewidths=6.0,colors='green')
# Save figure to of annuli locations
outname = './skyregionlocs.png'
f1.save(outname)
print(' ')
print('***OUTPUT: Annuli location plot saved here: ',outname)
if verbose:
print('Number of annuli placed randomly is: ',n_counter)
return sky_counts, pix_counts, n_notfinite, h
def copy_wcs(fits_withwcs,fits_withoutwcs):
h = fits.getheader(fits_withwcs)
f = fits.open(fits_withoutwcs)
newf = fits.PrimaryHDU()
newf.header = f[0].header
newf.data = f[0].data
newf.header['CTYPE1'] = h['CTYPE1']
newf.header['CRPIX1'] = h['CRPIX1']
newf.header['CRVAL1'] = h['CRVAL1']
newf.header['CTYPE2'] = h['CTYPE2']
newf.header['CRPIX2'] = h['CRPIX2']
newf.header['CRVAL2'] = h['CRVAL2']
newf.header['CD1_1'] = h['CD1_1']
newf.header['CD1_2'] = h['CD1_2']
newf.header['CD2_1'] = h['CD2_1']
newf.header['CD2_2'] = h['CD2_2']
#newf.header['RADECSYS'] = h['RADECSYS']
newf.header['EQUINOX'] = h['EQUINOX']
saveloc = fits_withoutwcs.split('.')[0]+'_wcs.fits'
newf.writeto(saveloc, overwrite=True)
return saveloc
def calculate_sky_annuli_alloverim(fitsimage,image,total_mask,annulusparams,n_iterations):
'''Save sky count averages in n_iteration annuli, also plot up where random n_iterations of annuli were placed on fits image.'''
# Calculate sky in input annulus
xc1,yc1,a1,b1,ang1,xc2,yc2,a2,b2,ang2 = read_annulusparams(annulusparams)
h = fits.getheader(fitsimage)
xlen = int(h['NAXIS2'])
ylen = int(h['NAXIS1'])
mask = make_annulus_mask(xlen,ylen,xc1,yc1,a1,b1,ang1,xc2,yc2,a2,b2,ang2)
initial_annuli_mask_data = mask.copy()
image_annuli = copy.copy(image)
image_annuli[mask] = float('nan')
image_annuli[total_mask] = float('nan')
initial_annuli_name = 'annuli_input.fits'
writeFITS(image_annuli,initial_annuli_name)
print(' ')
print('***OUTPUT: Sky calculation annulus saved here: ',initial_annuli_name)
print(' ')
print('Average in input sky annulus is: ',np.nanmean(image_annuli))
print('Median in input sky annulus is: ',np.nanmedian(image_annuli))
print('Std in input sky annulus is: ',np.nanstd(image_annuli))
print('Number of finite non masked pixels in input sky annulus: ',np.sum(np.isfinite(image_annuli)))
# Plonk some random annuli, calculate average of averages and std of averages
# Vary xc,yc within width of annuli randomly (move xc2,yc2 by same amount)
# AND vary a1 randomly while keeping a1-a2 constant, varations up to width of annuli
annuli_thickness = abs(a1-a2)/2.
# Start figure to plot up annuli locations
fig = plt.figure(figsize=(48, 36))
f1 = aplpy.FITSFigure(fitsimage,figure=fig)
#f1.set_tick_labels_font(size='xx-small')
f1.ticks.hide()
f1.tick_labels.hide_x()
f1.tick_labels.hide_y()
f1.axis_labels.hide()
f1.show_grayscale(invert=True, stretch='linear', vmin=4.25, vmax=4.32)
# g-band ngc 2841 vmin=2.38, vmax=2.42
sky_counts = []
pix_counts = []
n_counter = 0
n_notfinite = 0
xtomesh = np.arange(0, ylen, 1)
ytomesh = | np.arange(0, xlen, 1) | numpy.arange |
from load import ROOT as R
import numpy as np
import gna.constructors as C
from gna.bundle import TransformationBundle
from gna.configurator import NestedDict
class integral_2d1d_v05(TransformationBundle):
"""2d integral based on Integrator21GL v05
Changes since integral_2d1d_v04:
- Added custom configuration of bin size and order
Configuration options:
- instances - dictionary of (name, label) pairs for instances
{ 'name1': 'label1', ... }
alternativetly, a a dictionary with extra options may be passed instead of label:
{ 'name1': {'label': 'label1', 'noindex': True}, ... }
Extra options:
* 'noindex' - disable indices
- xedgescfg - a list of 3-tuples:
[
(left1, step1, order1),
(left2, step2, order2),
...
(last, None, None),
]
an interval [left1, left2) will be filled with points with step1 (similar to arange),
each interval will have order=order1
- yorder - number of points in bin on axis Y, integer.
- variables - variable names (array of 2 strings).
Predefined names:
- variables[0] - array with points to integrate over variable[0]
- variables[1] - array with points to integrate over variable[1]
- variables[0]+'_edges' - array with bin edges for variable[0]
- variables[0]+'_centers' - array with bin centers for variable[0]
- variables[0]+'_hist' - histogram with bin edges for variable[0]
- variables[i]+'_mesh' - 2d mesh for variable[i] (as in numpy.meshgrid)
(may be configured via 'names' option of a bundle)
"""
def __init__(self, *args, **kwargs):
TransformationBundle.__init__( self, *args, **kwargs )
self.check_cfg()
self.init()
@staticmethod
def _provides(cfg):
var0, var1 = cfg.variables
variables = (var0, var1, var0+'_centers', var0+'_edges', var0+'_hist', var0+'_mesh', var1+'_mesh')
names = tuple(cfg['instances'].keys())
return (), names+variables
def check_cfg(self):
if not 'name' in self.cfg:
pkey = self.cfg.parent_key()
if not pkey:
raise self.exception('"name" option is not provided')
self.cfg.name = pkey
# try:
# self.edges = np.ascontiguousarray(self.cfg.edges, dtype='d')
# except:
# raise self.exception('Invalid binning definition: {!r}'.format(self.cfg.edges))
# try:
# self.xorders = np.ascontiguousarray(self.cfg.xorders, dtype='P')
# except:
# raise self.exception('Invalid xorders definition: {!r}'.format(self.cfg.xorders))
if len(self.cfg.variables)!=2:
raise self.exception('Two vairables should be provided')
def init(self):
edges_list = []
orders_list = []
edgescfg = self.cfg.xedgescfg
for (start, step, orders), (stop, _, _) in zip(edgescfg[:-1], edgescfg[1:]):
cedges = | np.arange(start, stop, step, dtype='d') | numpy.arange |
'''
A data generator for 2D object detection.
Copyright (C) 2018 <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
from __future__ import division
import numpy as np
import inspect
from collections import defaultdict
import warnings
import sklearn.utils
from copy import deepcopy
from PIL import Image
import cv2
import csv
import os
import sys
from tqdm import tqdm, trange
try:
import h5py
except ImportError:
warnings.warn("'h5py' module is missing. The fast HDF5 dataset option will be unavailable.")
try:
import json
except ImportError:
warnings.warn("'json' module is missing. The JSON-parser will be unavailable.")
try:
from bs4 import BeautifulSoup
except ImportError:
warnings.warn("'BeautifulSoup' module is missing. The XML-parser will be unavailable.")
try:
import pickle
except ImportError:
warnings.warn("'pickle' module is missing. You won't be able to save parsed file lists and annotations as pickled files.")
from ssd_encoder_decoder.ssd_input_encoder import SSDInputEncoder
from data_generator.object_detection_2d_image_boxes_validation_utils import BoxFilter
class DegenerateBatchError(Exception):
'''
An exception class to be raised if a generated batch ends up being degenerate,
e.g. if a generated batch is empty.
'''
pass
class DatasetError(Exception):
'''
An exception class to be raised if a anything is wrong with the dataset,
in particular if you try to generate batches when no dataset was loaded.
'''
pass
class DataGenerator:
'''
A generator to generate batches of samples and corresponding labels indefinitely.
Can shuffle the dataset consistently after each complete pass.
Currently provides three methods to parse annotation data: A general-purpose CSV parser,
an XML parser for the Pascal VOC datasets, and a JSON parser for the MS COCO datasets.
If the annotations of your dataset are in a format that is not supported by these parsers,
you could just add another parser method and still use this generator.
Can perform image transformations for data conversion and data augmentation,
for details please refer to the documentation of the `generate()` method.
'''
def __init__(self,
load_images_into_memory=False,
hdf5_dataset_path=None,
filenames=None,
filenames_type='text',
images_dir=None,
labels=None,
image_ids=None,
eval_neutral=None,
labels_output_format=('class_id', 'xmin', 'ymin', 'xmax', 'ymax'),
verbose=True):
'''
Initializes the data generator. You can either load a dataset directly here in the constructor,
e.g. an HDF5 dataset, or you can use one of the parser methods to read in a dataset.
Arguments:
load_images_into_memory (bool, optional): If `True`, the entire dataset will be loaded into memory.
This enables noticeably faster data generation than loading batches of images into memory ad hoc.
Be sure that you have enough memory before you activate this option.
hdf5_dataset_path (str, optional): The full file path of an HDF5 file that contains a dataset in the
format that the `create_hdf5_dataset()` method produces. If you load such an HDF5 dataset, you
don't need to use any of the parser methods anymore, the HDF5 dataset already contains all relevant
data.
filenames (string or list, optional): `None` or either a Python list/tuple or a string representing
a filepath. If a list/tuple is passed, it must contain the file names (full paths) of the
images to be used. Note that the list/tuple must contain the paths to the images,
not the images themselves. If a filepath string is passed, it must point either to
(1) a pickled file containing a list/tuple as described above. In this case the `filenames_type`
argument must be set to `pickle`.
Or
(2) a text file. Each line of the text file contains the file name (basename of the file only,
not the full directory path) to one image and nothing else. In this case the `filenames_type`
argument must be set to `text` and you must pass the path to the directory that contains the
images in `images_dir`.
filenames_type (string, optional): In case a string is passed for `filenames`, this indicates what
type of file `filenames` is. It can be either 'pickle' for a pickled file or 'text' for a
plain text file.
images_dir (string, optional): In case a text file is passed for `filenames`, the full paths to
the images will be composed from `images_dir` and the names in the text file, i.e. this
should be the directory that contains the images to which the text file refers.
If `filenames_type` is not 'text', then this argument is irrelevant.
labels (string or list, optional): `None` or either a Python list/tuple or a string representing
the path to a pickled file containing a list/tuple. The list/tuple must contain Numpy arrays
that represent the labels of the dataset.
image_ids (string or list, optional): `None` or either a Python list/tuple or a string representing
the path to a pickled file containing a list/tuple. The list/tuple must contain the image
IDs of the images in the dataset.
eval_neutral (string or list, optional): `None` or either a Python list/tuple or a string representing
the path to a pickled file containing a list/tuple. The list/tuple must contain for each image
a list that indicates for each ground truth object in the image whether that object is supposed
to be treated as neutral during an evaluation.
labels_output_format (list, optional): A list of five strings representing the desired order of the five
items class ID, xmin, ymin, xmax, ymax in the generated ground truth data (if any). The expected
strings are 'xmin', 'ymin', 'xmax', 'ymax', 'class_id'.
verbose (bool, optional): If `True`, prints out the progress for some constructor operations that may
take a bit longer.
'''
self.labels_output_format = labels_output_format
self.labels_format={'class_id': labels_output_format.index('class_id'),
'xmin': labels_output_format.index('xmin'),
'ymin': labels_output_format.index('ymin'),
'xmax': labels_output_format.index('xmax'),
'ymax': labels_output_format.index('ymax')} # This dictionary is for internal use.
self.dataset_size = 0 # As long as we haven't loaded anything yet, the dataset size is zero.
self.load_images_into_memory = load_images_into_memory
self.images = None # The only way that this list will not stay `None` is if `load_images_into_memory == True`.
# `self.filenames` is a list containing all file names of the image samples (full paths).
# Note that it does not contain the actual image files themselves. This list is one of the outputs of the parser methods.
# In case you are loading an HDF5 dataset, this list will be `None`.
if not filenames is None:
if isinstance(filenames, (list, tuple)):
self.filenames = filenames
elif isinstance(filenames, str):
with open(filenames, 'rb') as f:
if filenames_type == 'pickle':
self.filenames = pickle.load(f)
elif filenames_type == 'text':
self.filenames = [os.path.join(images_dir, line.strip()) for line in f]
else:
raise ValueError("`filenames_type` can be either 'text' or 'pickle'.")
else:
raise ValueError("`filenames` must be either a Python list/tuple or a string representing a filepath (to a pickled or text file). The value you passed is neither of the two.")
self.dataset_size = len(self.filenames)
self.dataset_indices = np.arange(self.dataset_size, dtype=np.int32)
if load_images_into_memory:
self.images = []
if verbose: it = tqdm(self.filenames, desc='Loading images into memory', file=sys.stdout)
else: it = self.filenames
for filename in it:
with Image.open(filename) as image:
self.images.append(np.array(image, dtype=np.uint8))
else:
self.filenames = None
# In case ground truth is available, `self.labels` is a list containing for each image a list (or NumPy array)
# of ground truth bounding boxes for that image.
if not labels is None:
if isinstance(labels, str):
with open(labels, 'rb') as f:
self.labels = pickle.load(f)
elif isinstance(labels, (list, tuple)):
self.labels = labels
else:
raise ValueError("`labels` must be either a Python list/tuple or a string representing the path to a pickled file containing a list/tuple. The value you passed is neither of the two.")
else:
self.labels = None
if not image_ids is None:
if isinstance(image_ids, str):
with open(image_ids, 'rb') as f:
self.image_ids = pickle.load(f)
elif isinstance(image_ids, (list, tuple)):
self.image_ids = image_ids
else:
raise ValueError("`image_ids` must be either a Python list/tuple or a string representing the path to a pickled file containing a list/tuple. The value you passed is neither of the two.")
else:
self.image_ids = None
if not eval_neutral is None:
if isinstance(eval_neutral, str):
with open(eval_neutral, 'rb') as f:
self.eval_neutral = pickle.load(f)
elif isinstance(eval_neutral, (list, tuple)):
self.eval_neutral = eval_neutral
else:
raise ValueError("`image_ids` must be either a Python list/tuple or a string representing the path to a pickled file containing a list/tuple. The value you passed is neither of the two.")
else:
self.eval_neutral = None
if not hdf5_dataset_path is None:
self.hdf5_dataset_path = hdf5_dataset_path
self.load_hdf5_dataset(verbose=verbose)
else:
self.hdf5_dataset = None
def load_hdf5_dataset(self, verbose=True):
'''
Loads an HDF5 dataset that is in the format that the `create_hdf5_dataset()` method
produces.
Arguments:
verbose (bool, optional): If `True`, prints out the progress while loading
the dataset.
Returns:
None.
'''
self.hdf5_dataset = h5py.File(self.hdf5_dataset_path, 'r')
self.dataset_size = len(self.hdf5_dataset['images'])
self.dataset_indices = np.arange(self.dataset_size, dtype=np.int32) # Instead of shuffling the HDF5 dataset or images in memory, we will shuffle this index list.
if self.load_images_into_memory:
self.images = []
if verbose: tr = trange(self.dataset_size, desc='Loading images into memory', file=sys.stdout)
else: tr = range(self.dataset_size)
for i in tr:
self.images.append(self.hdf5_dataset['images'][i].reshape(self.hdf5_dataset['image_shapes'][i]))
if self.hdf5_dataset.attrs['has_labels']:
self.labels = []
labels = self.hdf5_dataset['labels']
label_shapes = self.hdf5_dataset['label_shapes']
if verbose: tr = trange(self.dataset_size, desc='Loading labels', file=sys.stdout)
else: tr = range(self.dataset_size)
for i in tr:
self.labels.append(labels[i].reshape(label_shapes[i]))
if self.hdf5_dataset.attrs['has_image_ids']:
self.image_ids = []
image_ids = self.hdf5_dataset['image_ids']
if verbose: tr = trange(self.dataset_size, desc='Loading image IDs', file=sys.stdout)
else: tr = range(self.dataset_size)
for i in tr:
self.image_ids.append(image_ids[i])
if self.hdf5_dataset.attrs['has_eval_neutral']:
self.eval_neutral = []
eval_neutral = self.hdf5_dataset['eval_neutral']
if verbose: tr = trange(self.dataset_size, desc='Loading evaluation-neutrality annotations', file=sys.stdout)
else: tr = range(self.dataset_size)
for i in tr:
self.eval_neutral.append(eval_neutral[i])
def parse_csv(self,
images_dir,
labels_filename,
input_format,
include_classes='all',
random_sample=False,
ret=False,
verbose=True):
'''
Arguments:
images_dir (str): The path to the directory that contains the images.
labels_filename (str): The filepath to a CSV file that contains one ground truth bounding box per line
and each line contains the following six items: image file name, class ID, xmin, xmax, ymin, ymax.
The six items do not have to be in a specific order, but they must be the first six columns of
each line. The order of these items in the CSV file must be specified in `input_format`.
The class ID is an integer greater than zero. Class ID 0 is reserved for the background class.
`xmin` and `xmax` are the left-most and right-most absolute horizontal coordinates of the box,
`ymin` and `ymax` are the top-most and bottom-most absolute vertical coordinates of the box.
The image name is expected to be just the name of the image file without the directory path
at which the image is located.
input_format (list): A list of six strings representing the order of the six items
image file name, class ID, xmin, xmax, ymin, ymax in the input CSV file. The expected strings
are 'image_name', 'xmin', 'xmax', 'ymin', 'ymax', 'class_id'.
include_classes (list, optional): Either 'all' or a list of integers containing the class IDs that
are to be included in the dataset. If 'all', all ground truth boxes will be included in the dataset.
random_sample (float, optional): Either `False` or a float in `[0,1]`. If this is `False`, the
full dataset will be used by the generator. If this is a float in `[0,1]`, a randomly sampled
fraction of the dataset will be used, where `random_sample` is the fraction of the dataset
to be used. For example, if `random_sample = 0.2`, 20 precent of the dataset will be randomly selected,
the rest will be ommitted. The fraction refers to the number of images, not to the number
of boxes, i.e. each image that will be added to the dataset will always be added with all
of its boxes.
ret (bool, optional): Whether or not to return the outputs of the parser.
verbose (bool, optional): If `True`, prints out the progress for operations that may take a bit longer.
Returns:
None by default, optionally lists for whichever are available of images, image filenames, labels, and image IDs.
'''
# Set class members.
self.images_dir = images_dir
self.labels_filename = labels_filename
self.input_format = input_format
self.include_classes = include_classes
# Before we begin, make sure that we have a labels_filename and an input_format
if self.labels_filename is None or self.input_format is None:
raise ValueError("`labels_filename` and/or `input_format` have not been set yet. You need to pass them as arguments.")
# Erase data that might have been parsed before
self.filenames = []
self.image_ids = []
self.labels = []
# First, just read in the CSV file lines and sort them.
data = []
with open(self.labels_filename, newline='') as csvfile:
csvread = csv.reader(csvfile, delimiter=',')
next(csvread) # Skip the header row.
for row in csvread: # For every line (i.e for every bounding box) in the CSV file...
if self.include_classes == 'all' or int(row[self.input_format.index('class_id')].strip()) in self.include_classes: # If the class_id is among the classes that are to be included in the dataset...
box = [] # Store the box class and coordinates here
box.append(row[self.input_format.index('image_name')].strip()) # Select the image name column in the input format and append its content to `box`
for element in self.labels_output_format: # For each element in the output format (where the elements are the class ID and the four box coordinates)...
box.append(int(row[self.input_format.index(element)].strip())) # ...select the respective column in the input format and append it to `box`.
data.append(box)
data = sorted(data) # The data needs to be sorted, otherwise the next step won't give the correct result
# Now that we've made sure that the data is sorted by file names,
# we can compile the actual samples and labels lists
current_file = data[0][0] # The current image for which we're collecting the ground truth boxes
current_image_id = data[0][0].split('.')[0] # The image ID will be the portion of the image name before the first dot.
current_labels = [] # The list where we collect all ground truth boxes for a given image
add_to_dataset = False
for i, box in enumerate(data):
if box[0] == current_file: # If this box (i.e. this line of the CSV file) belongs to the current image file
current_labels.append(box[1:])
if i == len(data)-1: # If this is the last line of the CSV file
if random_sample: # In case we're not using the full dataset, but a random sample of it.
p = np.random.uniform(0,1)
if p >= (1-random_sample):
self.labels.append(np.stack(current_labels, axis=0))
self.filenames.append(os.path.join(self.images_dir, current_file))
self.image_ids.append(current_image_id)
else:
self.labels.append(np.stack(current_labels, axis=0))
self.filenames.append(os.path.join(self.images_dir, current_file))
self.image_ids.append(current_image_id)
else: # If this box belongs to a new image file
if random_sample: # In case we're not using the full dataset, but a random sample of it.
p = np.random.uniform(0,1)
if p >= (1-random_sample):
self.labels.append(np.stack(current_labels, axis=0))
self.filenames.append(os.path.join(self.images_dir, current_file))
self.image_ids.append(current_image_id)
else:
self.labels.append(np.stack(current_labels, axis=0))
self.filenames.append(os.path.join(self.images_dir, current_file))
self.image_ids.append(current_image_id)
current_labels = [] # Reset the labels list because this is a new file.
current_file = box[0]
current_image_id = box[0].split('.')[0]
current_labels.append(box[1:])
if i == len(data)-1: # If this is the last line of the CSV file
if random_sample: # In case we're not using the full dataset, but a random sample of it.
p = np.random.uniform(0,1)
if p >= (1-random_sample):
self.labels.append(np.stack(current_labels, axis=0))
self.filenames.append(os.path.join(self.images_dir, current_file))
self.image_ids.append(current_image_id)
else:
self.labels.append(np.stack(current_labels, axis=0))
self.filenames.append(os.path.join(self.images_dir, current_file))
self.image_ids.append(current_image_id)
self.dataset_size = len(self.filenames)
self.dataset_indices = np.arange(self.dataset_size, dtype=np.int32)
if self.load_images_into_memory:
self.images = []
if verbose: it = tqdm(self.filenames, desc='Loading images into memory', file=sys.stdout)
else: it = self.filenames
for filename in it:
with Image.open(filename) as image:
self.images.append(np.array(image, dtype=np.uint8))
if ret: # In case we want to return these
return self.images, self.filenames, self.labels, self.image_ids
def parse_xml(self,
images_dirs,
image_set_filenames,
annotations_dirs=[],
classes=['background',
'aeroplane', 'bicycle', 'bird', 'boat',
'bottle', 'bus', 'car', 'cat',
'chair', 'cow', 'diningtable', 'dog',
'horse', 'motorbike', 'person', 'pottedplant',
'sheep', 'sofa', 'train', 'tvmonitor'],
include_classes = 'all',
exclude_truncated=False,
exclude_difficult=False,
ret=False,
verbose=True):
'''
This is an XML parser for the Pascal VOC datasets. It might be applicable to other datasets with minor changes to
the code, but in its current form it expects the data format and XML tags of the Pascal VOC datasets.
Arguments:
images_dirs (list): A list of strings, where each string is the path of a directory that
contains images that are to be part of the dataset. This allows you to aggregate multiple datasets
into one (e.g. one directory that contains the images for Pascal VOC 2007, another that contains
the images for Pascal VOC 2012, etc.).
image_set_filenames (list): A list of strings, where each string is the path of the text file with the image
set to be loaded. Must be one file per image directory given. These text files define what images in the
respective image directories are to be part of the dataset and simply contains one image ID per line
and nothing else.
annotations_dirs (list, optional): A list of strings, where each string is the path of a directory that
contains the annotations (XML files) that belong to the images in the respective image directories given.
The directories must contain one XML file per image and the name of an XML file must be the image ID
of the image it belongs to. The content of the XML files must be in the Pascal VOC format.
classes (list, optional): A list containing the names of the object classes as found in the
`name` XML tags. Must include the class `background` as the first list item. The order of this list
defines the class IDs.
include_classes (list, optional): Either 'all' or a list of integers containing the class IDs that
are to be included in the dataset. If 'all', all ground truth boxes will be included in the dataset.
exclude_truncated (bool, optional): If `True`, excludes boxes that are labeled as 'truncated'.
exclude_difficult (bool, optional): If `True`, excludes boxes that are labeled as 'difficult'.
ret (bool, optional): Whether or not to return the outputs of the parser.
verbose (bool, optional): If `True`, prints out the progress for operations that may take a bit longer.
Returns:
None by default, optionally lists for whichever are available of images, image filenames, labels, image IDs,
and a list indicating which boxes are annotated with the label "difficult".
'''
# Set class members.
self.images_dirs = images_dirs
self.annotations_dirs = annotations_dirs
self.image_set_filenames = image_set_filenames
self.classes = classes
self.include_classes = include_classes
# Erase data that might have been parsed before.
self.filenames = []
self.image_ids = []
self.labels = []
self.eval_neutral = []
if not annotations_dirs:
self.labels = None
self.eval_neutral = None
annotations_dirs = [None] * len(images_dirs)
for images_dir, image_set_filename, annotations_dir in zip(images_dirs, image_set_filenames, annotations_dirs):
# Read the image set file that so that we know all the IDs of all the images to be included in the dataset.
with open(image_set_filename) as f:
image_ids = [line.strip() for line in f] # Note: These are strings, not integers.
self.image_ids += image_ids
if verbose: it = tqdm(image_ids, desc="Processing image set '{}'".format(os.path.basename(image_set_filename)), file=sys.stdout)
else: it = image_ids
# Loop over all images in this dataset.
for image_id in it:
filename = '{}'.format(image_id) + '.jpg'
self.filenames.append(os.path.join(images_dir, filename))
if not annotations_dir is None:
# Parse the XML file for this image.
with open(os.path.join(annotations_dir, image_id + '.xml')) as f:
soup = BeautifulSoup(f, 'html.parser')
folder = soup.folder.text # In case we want to return the folder in addition to the image file name. Relevant for determining which dataset an image belongs to.
#filename = soup.filename.text
boxes = [] # We'll store all boxes for this image here.
eval_neutr = [] # We'll store whether a box is annotated as "difficult" here.
objects = soup.find_all('object') # Get a list of all objects in this image.
# Parse the data for each object.
for obj in objects:
class_name = obj.find('name', recursive=False).text
class_id = self.classes.index(class_name)
# Check whether this class is supposed to be included in the dataset.
if (not self.include_classes == 'all') and (not class_id in self.include_classes): continue
pose = obj.find('pose', recursive=False).text
truncated = int(obj.find('truncated', recursive=False).text)
if exclude_truncated and (truncated == 1): continue
difficult = int(obj.find('difficult', recursive=False).text)
if exclude_difficult and (difficult == 1): continue
# Get the bounding box coordinates.
bndbox = obj.find('bndbox', recursive=False)
xmin = int(bndbox.xmin.text)
ymin = int(bndbox.ymin.text)
xmax = int(bndbox.xmax.text)
ymax = int(bndbox.ymax.text)
item_dict = {'folder': folder,
'image_name': filename,
'image_id': image_id,
'class_name': class_name,
'class_id': class_id,
'pose': pose,
'truncated': truncated,
'difficult': difficult,
'xmin': xmin,
'ymin': ymin,
'xmax': xmax,
'ymax': ymax}
box = []
for item in self.labels_output_format:
box.append(item_dict[item])
boxes.append(box)
if difficult: eval_neutr.append(True)
else: eval_neutr.append(False)
self.labels.append(boxes)
self.eval_neutral.append(eval_neutr)
self.dataset_size = len(self.filenames)
self.dataset_indices = np.arange(self.dataset_size, dtype=np.int32)
if self.load_images_into_memory:
self.images = []
if verbose: it = tqdm(self.filenames, desc='Loading images into memory', file=sys.stdout)
else: it = self.filenames
for filename in it:
with Image.open(filename) as image:
self.images.append(np.array(image, dtype=np.uint8))
if ret:
return self.images, self.filenames, self.labels, self.image_ids, self.eval_neutral
def parse_json(self,
images_dirs,
annotations_filenames,
ground_truth_available=False,
include_classes='all',
ret=False,
verbose=True):
'''
This is an JSON parser for the MS COCO datasets. It might be applicable to other datasets with minor changes to
the code, but in its current form it expects the JSON format of the MS COCO datasets.
Arguments:
images_dirs (list, optional): A list of strings, where each string is the path of a directory that
contains images that are to be part of the dataset. This allows you to aggregate multiple datasets
into one (e.g. one directory that contains the images for MS COCO Train 2014, another one for MS COCO
Val 2014, another one for MS COCO Train 2017 etc.).
annotations_filenames (list): A list of strings, where each string is the path of the JSON file
that contains the annotations for the images in the respective image directories given, i.e. one
JSON file per image directory that contains the annotations for all images in that directory.
The content of the JSON files must be in MS COCO object detection format. Note that these annotations
files do not necessarily need to contain ground truth information. MS COCO also provides annotations
files without ground truth information for the test datasets, called `image_info_[...].json`.
ground_truth_available (bool, optional): Set `True` if the annotations files contain ground truth information.
include_classes (list, optional): Either 'all' or a list of integers containing the class IDs that
are to be included in the dataset. If 'all', all ground truth boxes will be included in the dataset.
ret (bool, optional): Whether or not to return the outputs of the parser.
verbose (bool, optional): If `True`, prints out the progress for operations that may take a bit longer.
Returns:
None by default, optionally lists for whichever are available of images, image filenames, labels and image IDs.
'''
self.images_dirs = images_dirs
self.annotations_filenames = annotations_filenames
self.include_classes = include_classes
# Erase data that might have been parsed before.
self.filenames = []
self.image_ids = []
self.labels = []
if not ground_truth_available:
self.labels = None
# Build the dictionaries that map between class names and class IDs.
with open(annotations_filenames[0], 'r') as f:
annotations = json.load(f)
# Unfortunately the 80 MS COCO class IDs are not all consecutive. They go
# from 1 to 90 and some numbers are skipped. Since the IDs that we feed
# into a neural network must be consecutive, we'll save both the original
# (non-consecutive) IDs as well as transformed maps.
# We'll save both the map between the original
self.cats_to_names = {} # The map between class names (values) and their original IDs (keys)
self.classes_to_names = [] # A list of the class names with their indices representing the transformed IDs
self.classes_to_names.append('background') # Need to add the background class first so that the indexing is right.
self.cats_to_classes = {} # A dictionary that maps between the original (keys) and the transformed IDs (values)
self.classes_to_cats = {} # A dictionary that maps between the transformed (keys) and the original IDs (values)
for i, cat in enumerate(annotations['categories']):
self.cats_to_names[cat['id']] = cat['name']
self.classes_to_names.append(cat['name'])
self.cats_to_classes[cat['id']] = i + 1
self.classes_to_cats[i + 1] = cat['id']
# Iterate over all datasets.
for images_dir, annotations_filename in zip(self.images_dirs, self.annotations_filenames):
# Load the JSON file.
with open(annotations_filename, 'r') as f:
annotations = json.load(f)
if ground_truth_available:
# Create the annotations map, a dictionary whose keys are the image IDs
# and whose values are the annotations for the respective image ID.
image_ids_to_annotations = defaultdict(list)
for annotation in annotations['annotations']:
image_ids_to_annotations[annotation['image_id']].append(annotation)
if verbose: it = tqdm(annotations['images'], desc="Processing '{}'".format(os.path.basename(annotations_filename)), file=sys.stdout)
else: it = annotations['images']
# Loop over all images in this dataset.
for img in it:
self.filenames.append(os.path.join(images_dir, img['file_name']))
self.image_ids.append(img['id'])
if ground_truth_available:
# Get all annotations for this image.
annotations = image_ids_to_annotations[img['id']]
boxes = []
for annotation in annotations:
cat_id = annotation['category_id']
# Check if this class is supposed to be included in the dataset.
if (not self.include_classes == 'all') and (not cat_id in self.include_classes): continue
# Transform the original class ID to fit in the sequence of consecutive IDs.
class_id = self.cats_to_classes[cat_id]
xmin = annotation['bbox'][0]
ymin = annotation['bbox'][1]
width = annotation['bbox'][2]
height = annotation['bbox'][3]
# Compute `xmax` and `ymax`.
xmax = xmin + width
ymax = ymin + height
item_dict = {'image_name': img['file_name'],
'image_id': img['id'],
'class_id': class_id,
'xmin': xmin,
'ymin': ymin,
'xmax': xmax,
'ymax': ymax}
box = []
for item in self.labels_output_format:
box.append(item_dict[item])
boxes.append(box)
self.labels.append(boxes)
self.dataset_size = len(self.filenames)
self.dataset_indices = np.arange(self.dataset_size, dtype=np.int32)
if self.load_images_into_memory:
self.images = []
if verbose: it = tqdm(self.filenames, desc='Loading images into memory', file=sys.stdout)
else: it = self.filenames
for filename in it:
with Image.open(filename) as image:
self.images.append(np.array(image, dtype=np.uint8))
if ret:
return self.images, self.filenames, self.labels, self.image_ids
def create_hdf5_dataset(self,
file_path='dataset.h5',
resize=False,
variable_image_size=True,
verbose=True):
'''
Converts the currently loaded dataset into a HDF5 file. This HDF5 file contains all
images as uncompressed arrays in a contiguous block of memory, which allows for them
to be loaded faster. Such an uncompressed dataset, however, may take up considerably
more space on your hard drive than the sum of the source images in a compressed format
such as JPG or PNG.
It is recommended that you always convert the dataset into an HDF5 dataset if you
have enugh hard drive space since loading from an HDF5 dataset accelerates the data
generation noticeably.
Note that you must load a dataset (e.g. via one of the parser methods) before creating
an HDF5 dataset from it.
The created HDF5 dataset will remain open upon its creation so that it can be used right
away.
Arguments:
file_path (str, optional): The full file path under which to store the HDF5 dataset.
You can load this output file via the `DataGenerator` constructor in the future.
resize (tuple, optional): `False` or a 2-tuple `(height, width)` that represents the
target size for the images. All images in the dataset will be resized to this
target size before they will be written to the HDF5 file. If `False`, no resizing
will be performed.
variable_image_size (bool, optional): The only purpose of this argument is that its
value will be stored in the HDF5 dataset in order to be able to quickly find out
whether the images in the dataset all have the same size or not.
verbose (bool, optional): Whether or not prit out the progress of the dataset creation.
Returns:
None.
'''
self.hdf5_dataset_path = file_path
dataset_size = len(self.filenames)
# Create the HDF5 file.
hdf5_dataset = h5py.File(file_path, 'w')
# Create a few attributes that tell us what this dataset contains.
# The dataset will obviously always contain images, but maybe it will
# also contain labels, image IDs, etc.
hdf5_dataset.attrs.create(name='has_labels', data=False, shape=None, dtype=np.bool_)
hdf5_dataset.attrs.create(name='has_image_ids', data=False, shape=None, dtype=np.bool_)
hdf5_dataset.attrs.create(name='has_eval_neutral', data=False, shape=None, dtype=np.bool_)
# It's useful to be able to quickly check whether the images in a dataset all
# have the same size or not, so add a boolean attribute for that.
if variable_image_size and not resize:
hdf5_dataset.attrs.create(name='variable_image_size', data=True, shape=None, dtype=np.bool_)
else:
hdf5_dataset.attrs.create(name='variable_image_size', data=False, shape=None, dtype=np.bool_)
# Create the dataset in which the images will be stored as flattened arrays.
# This allows us, among other things, to store images of variable size.
hdf5_images = hdf5_dataset.create_dataset(name='images',
shape=(dataset_size,),
maxshape=(None),
dtype=h5py.special_dtype(vlen=np.uint8))
# Create the dataset that will hold the image heights, widths and channels that
# we need in order to reconstruct the images from the flattened arrays later.
hdf5_image_shapes = hdf5_dataset.create_dataset(name='image_shapes',
shape=(dataset_size, 3),
maxshape=(None, 3),
dtype=np.int32)
if not (self.labels is None):
# Create the dataset in which the labels will be stored as flattened arrays.
hdf5_labels = hdf5_dataset.create_dataset(name='labels',
shape=(dataset_size,),
maxshape=(None),
dtype=h5py.special_dtype(vlen=np.int32))
# Create the dataset that will hold the dimensions of the labels arrays for
# each image so that we can restore the labels from the flattened arrays later.
hdf5_label_shapes = hdf5_dataset.create_dataset(name='label_shapes',
shape=(dataset_size, 2),
maxshape=(None, 2),
dtype=np.int32)
hdf5_dataset.attrs.modify(name='has_labels', value=True)
if not (self.image_ids is None):
hdf5_image_ids = hdf5_dataset.create_dataset(name='image_ids',
shape=(dataset_size,),
maxshape=(None),
dtype=h5py.special_dtype(vlen=str))
hdf5_dataset.attrs.modify(name='has_image_ids', value=True)
if not (self.eval_neutral is None):
# Create the dataset in which the labels will be stored as flattened arrays.
hdf5_eval_neutral = hdf5_dataset.create_dataset(name='eval_neutral',
shape=(dataset_size,),
maxshape=(None),
dtype=h5py.special_dtype(vlen=np.bool_))
hdf5_dataset.attrs.modify(name='has_eval_neutral', value=True)
if verbose:
tr = trange(dataset_size, desc='Creating HDF5 dataset', file=sys.stdout)
else:
tr = range(dataset_size)
# Iterate over all images in the dataset.
for i in tr:
# Store the image.
with Image.open(self.filenames[i]) as image:
image = np.asarray(image, dtype=np.uint8)
# Make sure all images end up having three channels.
if image.ndim == 2:
image = np.stack([image] * 3, axis=-1)
elif image.ndim == 3:
if image.shape[2] == 1:
image = np.concatenate([image] * 3, axis=-1)
elif image.shape[2] == 4:
image = image[:,:,:3]
if resize:
image = cv2.resize(image, dsize=(resize[1], resize[0]))
# Flatten the image array and write it to the images dataset.
hdf5_images[i] = image.reshape(-1)
# Write the image's shape to the image shapes dataset.
hdf5_image_shapes[i] = image.shape
# Store the ground truth if we have any.
if not (self.labels is None):
labels = np.asarray(self.labels[i])
# Flatten the labels array and write it to the labels dataset.
hdf5_labels[i] = labels.reshape(-1)
# Write the labels' shape to the label shapes dataset.
hdf5_label_shapes[i] = labels.shape
# Store the image ID if we have one.
if not (self.image_ids is None):
hdf5_image_ids[i] = self.image_ids[i]
# Store the evaluation-neutrality annotations if we have any.
if not (self.eval_neutral is None):
hdf5_eval_neutral[i] = self.eval_neutral[i]
hdf5_dataset.close()
self.hdf5_dataset = h5py.File(file_path, 'r')
self.hdf5_dataset_path = file_path
self.dataset_size = len(self.hdf5_dataset['images'])
self.dataset_indices = np.arange(self.dataset_size, dtype=np.int32) # Instead of shuffling the HDF5 dataset, we will shuffle this index list.
def generate(self,
batch_size=32,
shuffle=True,
transformations=[],
label_encoder=None,
returns={'processed_images', 'encoded_labels'},
keep_images_without_gt=False,
degenerate_box_handling='remove'):
'''
Generates batches of samples and (optionally) corresponding labels indefinitely.
Can shuffle the samples consistently after each complete pass.
Optionally takes a list of arbitrary image transformations to apply to the
samples ad hoc.
Arguments:
batch_size (int, optional): The size of the batches to be generated.
shuffle (bool, optional): Whether or not to shuffle the dataset before each pass.
This option should always be `True` during training, but it can be useful to turn shuffling off
for debugging or if you're using the generator for prediction.
transformations (list, optional): A list of transformations that will be applied to the images and labels
in the given order. Each transformation is a callable that takes as input an image (as a Numpy array)
and optionally labels (also as a Numpy array) and returns an image and optionally labels in the same
format.
label_encoder (callable, optional): Only relevant if labels are given. A callable that takes as input the
labels of a batch (as a list of Numpy arrays) and returns some structure that represents those labels.
The general use case for this is to convert labels from their input format to a format that a given object
detection model needs as its training targets.
returns (set, optional): A set of strings that determines what outputs the generator yields. The generator's output
is always a tuple that contains the outputs specified in this set and only those. If an output is not available,
it will be `None`. The output tuple can contain the following outputs according to the specified keyword strings:
* 'processed_images': An array containing the processed images. Will always be in the outputs, so it doesn't
matter whether or not you include this keyword in the set.
* 'encoded_labels': The encoded labels tensor. Will always be in the outputs if a label encoder is given,
so it doesn't matter whether or not you include this keyword in the set if you pass a label encoder.
* 'matched_anchors': Only available if `labels_encoder` is an `SSDInputEncoder` object. The same as 'encoded_labels',
but containing anchor box coordinates for all matched anchor boxes instead of ground truth coordinates.
This can be useful to visualize what anchor boxes are being matched to each ground truth box. Only available
in training mode.
* 'processed_labels': The processed, but not yet encoded labels. This is a list that contains for each
batch image a Numpy array with all ground truth boxes for that image. Only available if ground truth is available.
* 'filenames': A list containing the file names (full paths) of the images in the batch.
* 'image_ids': A list containing the integer IDs of the images in the batch. Only available if there
are image IDs available.
* 'evaluation-neutral': A nested list of lists of booleans. Each list contains `True` or `False` for every ground truth
bounding box of the respective image depending on whether that bounding box is supposed to be evaluation-neutral (`True`)
or not (`False`). May return `None` if there exists no such concept for a given dataset. An example for
evaluation-neutrality are the ground truth boxes annotated as "difficult" in the Pascal VOC datasets, which are
usually treated to be neutral in a model evaluation.
* 'inverse_transform': A nested list that contains a list of "inverter" functions for each item in the batch.
These inverter functions take (predicted) labels for an image as input and apply the inverse of the transformations
that were applied to the original image to them. This makes it possible to let the model make predictions on a
transformed image and then convert these predictions back to the original image. This is mostly relevant for
evaluation: If you want to evaluate your model on a dataset with varying image sizes, then you are forced to
transform the images somehow (e.g. by resizing or cropping) to make them all the same size. Your model will then
predict boxes for those transformed images, but for the evaluation you will need predictions with respect to the
original images, not with respect to the transformed images. This means you will have to transform the predicted
box coordinates back to the original image sizes. Note that for each image, the inverter functions for that
image need to be applied in the order in which they are given in the respective list for that image.
* 'original_images': A list containing the original images in the batch before any processing.
* 'original_labels': A list containing the original ground truth boxes for the images in this batch before any
processing. Only available if ground truth is available.
The order of the outputs in the tuple is the order of the list above. If `returns` contains a keyword for an
output that is unavailable, that output omitted in the yielded tuples and a warning will be raised.
keep_images_without_gt (bool, optional): If `False`, images for which there aren't any ground truth boxes before
any transformations have been applied will be removed from the batch. If `True`, such images will be kept
in the batch.
degenerate_box_handling (str, optional): How to handle degenerate boxes, which are boxes that have `xmax <= xmin` and/or
`ymax <= ymin`. Degenerate boxes can sometimes be in the dataset, or non-degenerate boxes can become degenerate
after they were processed by transformations. Note that the generator checks for degenerate boxes after all
transformations have been applied (if any), but before the labels were passed to the `label_encoder` (if one was given).
Can be one of 'warn' or 'remove'. If 'warn', the generator will merely print a warning to let you know that there
are degenerate boxes in a batch. If 'remove', the generator will remove degenerate boxes from the batch silently.
Yields:
The next batch as a tuple of items as defined by the `returns` argument.
'''
if self.dataset_size == 0:
raise DatasetError("Cannot generate batches because you did not load a dataset.")
#############################################################################################
# Warn if any of the set returns aren't possible.
#############################################################################################
if self.labels is None:
if any([ret in returns for ret in ['original_labels', 'processed_labels', 'encoded_labels', 'matched_anchors', 'evaluation-neutral']]):
warnings.warn("Since no labels were given, none of 'original_labels', 'processed_labels', 'evaluation-neutral', 'encoded_labels', and 'matched_anchors' " +
"are possible returns, but you set `returns = {}`. The impossible returns will be `None`.".format(returns))
elif label_encoder is None:
if any([ret in returns for ret in ['encoded_labels', 'matched_anchors']]):
warnings.warn("Since no label encoder was given, 'encoded_labels' and 'matched_anchors' aren't possible returns, " +
"but you set `returns = {}`. The impossible returns will be `None`.".format(returns))
elif not isinstance(label_encoder, SSDInputEncoder):
if 'matched_anchors' in returns:
warnings.warn("`label_encoder` is not an `SSDInputEncoder` object, therefore 'matched_anchors' is not a possible return, " +
"but you set `returns = {}`. The impossible returns will be `None`.".format(returns))
#############################################################################################
# Do a few preparatory things like maybe shuffling the dataset initially.
#############################################################################################
if shuffle:
objects_to_shuffle = [self.dataset_indices]
if not (self.filenames is None):
objects_to_shuffle.append(self.filenames)
if not (self.labels is None):
objects_to_shuffle.append(self.labels)
if not (self.image_ids is None):
objects_to_shuffle.append(self.image_ids)
if not (self.eval_neutral is None):
objects_to_shuffle.append(self.eval_neutral)
shuffled_objects = sklearn.utils.shuffle(*objects_to_shuffle)
for i in range(len(objects_to_shuffle)):
objects_to_shuffle[i][:] = shuffled_objects[i]
if degenerate_box_handling == 'remove':
box_filter = BoxFilter(check_overlap=False,
check_min_area=False,
check_degenerate=True,
labels_format=self.labels_format)
# Override the labels formats of all the transformations to make sure they are set correctly.
if not (self.labels is None):
for transform in transformations:
transform.labels_format = self.labels_format
#############################################################################################
# Generate mini batches.
#############################################################################################
current = 0
while True:
batch_X, batch_y = [], []
if current >= self.dataset_size:
current = 0
#########################################################################################
# Maybe shuffle the dataset if a full pass over the dataset has finished.
#########################################################################################
if shuffle:
objects_to_shuffle = [self.dataset_indices]
if not (self.filenames is None):
objects_to_shuffle.append(self.filenames)
if not (self.labels is None):
objects_to_shuffle.append(self.labels)
if not (self.image_ids is None):
objects_to_shuffle.append(self.image_ids)
if not (self.eval_neutral is None):
objects_to_shuffle.append(self.eval_neutral)
shuffled_objects = sklearn.utils.shuffle(*objects_to_shuffle)
for i in range(len(objects_to_shuffle)):
objects_to_shuffle[i][:] = shuffled_objects[i]
#########################################################################################
# Get the images, (maybe) image IDs, (maybe) labels, etc. for this batch.
#########################################################################################
# We prioritize our options in the following order:
# 1) If we have the images already loaded in memory, get them from there.
# 2) Else, if we have an HDF5 dataset, get the images from there.
# 3) Else, if we have neither of the above, we'll have to load the individual image
# files from disk.
batch_indices = self.dataset_indices[current:current+batch_size]
if not (self.images is None):
for i in batch_indices:
batch_X.append(self.images[i])
if not (self.filenames is None):
batch_filenames = self.filenames[current:current+batch_size]
else:
batch_filenames = None
elif not (self.hdf5_dataset is None):
for i in batch_indices:
batch_X.append(self.hdf5_dataset['images'][i].reshape(self.hdf5_dataset['image_shapes'][i]))
if not (self.filenames is None):
batch_filenames = self.filenames[current:current+batch_size]
else:
batch_filenames = None
else:
batch_filenames = self.filenames[current:current+batch_size]
for filename in batch_filenames:
with Image.open(filename) as image:
batch_X.append(np.array(image, dtype=np.uint8))
# Get the labels for this batch (if there are any).
if not (self.labels is None):
batch_y = deepcopy(self.labels[current:current+batch_size])
else:
batch_y = None
if not (self.eval_neutral is None):
batch_eval_neutral = self.eval_neutral[current:current+batch_size]
else:
batch_eval_neutral = None
# Get the image IDs for this batch (if there are any).
if not (self.image_ids is None):
batch_image_ids = self.image_ids[current:current+batch_size]
else:
batch_image_ids = None
if 'original_images' in returns:
batch_original_images = deepcopy(batch_X) # The original, unaltered images
if 'original_labels' in returns:
batch_original_labels = deepcopy(batch_y) # The original, unaltered labels
current += batch_size
#########################################################################################
# Maybe perform image transformations.
#########################################################################################
batch_items_to_remove = [] # In case we need to remove any images from the batch, store their indices in this list.
batch_inverse_transforms = []
for i in range(len(batch_X)):
if not (self.labels is None):
# Convert the labels for this image to an array (in case they aren't already).
batch_y[i] = np.array(batch_y[i])
# If this image has no ground truth boxes, maybe we don't want to keep it in the batch.
if (batch_y[i].size == 0) and not keep_images_without_gt:
batch_items_to_remove.append(i)
batch_inverse_transforms.append([])
continue
# Apply any image transformations we may have received.
if transformations:
inverse_transforms = []
for transform in transformations:
if not (self.labels is None):
if ('inverse_transform' in returns) and ('return_inverter' in inspect.signature(transform).parameters):
batch_X[i], batch_y[i], inverse_transform = transform(batch_X[i], batch_y[i], return_inverter=True)
inverse_transforms.append(inverse_transform)
else:
batch_X[i], batch_y[i] = transform(batch_X[i], batch_y[i])
if batch_X[i] is None: # In case the transform failed to produce an output image, which is possible for some random transforms.
batch_items_to_remove.append(i)
batch_inverse_transforms.append([])
continue
else:
if ('inverse_transform' in returns) and ('return_inverter' in inspect.signature(transform).parameters):
batch_X[i], inverse_transform = transform(batch_X[i], return_inverter=True)
inverse_transforms.append(inverse_transform)
else:
batch_X[i] = transform(batch_X[i])
batch_inverse_transforms.append(inverse_transforms[::-1])
#########################################################################################
# Check for degenerate boxes in this batch item.
#########################################################################################
if not (self.labels is None):
xmin = self.labels_format['xmin']
ymin = self.labels_format['ymin']
xmax = self.labels_format['xmax']
ymax = self.labels_format['ymax']
if | np.any(batch_y[i][:,xmax] - batch_y[i][:,xmin] <= 0) | numpy.any |
# Python modules
# 3rd party modules
import numpy as np
from lmfit import Parameters
# Our modules
import vespa.analysis.chain_fit_identity as chain_fit_identity
import vespa.common.util.math_ as util_math
import vespa.common.util.generic_spectral as util_spectral
import vespa.analysis.functors.funct_fit_voigt as funct_fit_voigt
from vespa.common.constants import DEGREES_TO_RADIANS as DTOR
from vespa.analysis.constants import FitLineshapeModel, VoigtDefaultFixedT2, FitMacromoleculeMethod
from vespa.analysis.constants import FitOptimizeMethod as optmeth
from vespa.analysis.chain_base import Chain
LMFIT_METHODS = [optmeth.LMFIT_DEFAULT, optmeth.LMFIT_JACOBIAN]
class ChainFitVoigt(Chain):
"""
Building block object used to create a processing chain for MRS data.
Performs LCM (linear combination model) fit to the data. Fit model is made
up of spectrally simulated basis spectra for all metabolites.
"""
def __init__(self, dataset, block):
"""
Chain objects organize Algo (algorithm) calls by setting up access to
input data and parameters, and creating standard output values for View.
Base class sets convenience references to: self._block and self._dataset
self.data is always initialized as []
"""
super().__init__(dataset, block)
self.fit_function = self.lorgauss_internal
self.reset_results_arrays()
# book-keeping attributes
self.lmfit_fvar_names = []
@property
def nmet(self):
""" Number of metabolites to be fitted - varies depending on model """
if self._block is not None:
if self._block.set.prior_list is not None:
return len(self._block.set.prior_list)
return 0
def reset_results_arrays(self):
"""
Results array reset is in its own method because it may need to be
called at other times that just in the object initialization.
"""
nmet = self.nmet
nmmol = self._block.nmmol
nparam = self._block.nparam
spectral_dim0 = self._dataset.spectral_dims[0]
if len(self.data) != spectral_dim0:
self.data = np.zeros(spectral_dim0, complex)
self.yini = np.zeros((nmet+nmmol, spectral_dim0), complex)
self.yfit = np.zeros((nmet+nmmol, spectral_dim0), complex)
self.base = np.zeros(spectral_dim0, complex)
self.initial_values = np.zeros(nparam, float)
self.fit_results = np.zeros(nparam, float)
self.fit_baseline = np.zeros(spectral_dim0, complex)
self.weight_array = np.zeros(spectral_dim0, complex)
self.limits = np.zeros((2,nparam), float)
self.fitted_lw = 0.0
def run_global_init(self):
""""
Moved all of the global (one time) initialization code to this method
so we could package it in run() in an 'if' statement. This is in line
with making the 'fit all voxels' functionality as streamlined as
possible.
"""
block = self._block
set = self._block.set
prior = self._block.set.prior
self.spectral_dims = self._dataset.spectral_dims
self.nmmol = self._block.nmmol
self.nparam = self._block.nparam
self.init_b0 = 0.0
self.init_lw_hz = 3.0
self.init_ta = 0.8
self.init_tb = 0.03
self.init_ampl = None
self.init_area = None
self.limits = np.zeros((2,self.nparam+self.nmmol), float)
self.weight_array = np.zeros(self._dataset.spectral_dims[0], complex)
self.fit_baseline = 0.0 # needed for LORGAUSS call
self.fit_function = self.lorgauss_internal
self.fix_t2_center = VoigtDefaultFixedT2.CENTER
self.minmaxlw = [0,0]
# set up basis set for selected metabolites, collect all ppm locations
basis_mets = []
ppms = []
for name in set.prior_list:
basis_mets.append(prior.basis_set[name].fid.copy())
ppms += prior.basis_set[name].all_ppms
self.basis_mets = np.array(basis_mets)
self.peakpts = self._dataset.ppm2pts(np.array(ppms)) # for weight array calc
# set up basis set for macromolecules if needed
#self.macromol_model = set.macromol_model
self.basis_mmol = None
if set.macromol_model == FitMacromoleculeMethod.SINGLE_BASIS_DATASET:
if set.macromol_single_basis_dataset:
tmp = set.macromol_single_basis_dataset.blocks['raw']
self.basis_mmol = tmp.data.copy()
# check results arrays for proper dimensionality
block.check_parameter_dimensions(self)
def run(self, voxels, entry='initial_only', statusbar=None, do_init=True):
"""
Run is typically called every time a processing setting is changed
in the parent (block) object. Run processes a single voxel at a time.
This object maintains previous run() results values until next run().
This allows the View to update without having to re-run the pipeline.
The 'entry' keyword adds flexibility to Block-Chain-View relationship.
"""
block = self._block
set = self._block.set
prior = self._block.set.prior
dataset = self._dataset
#----------------------------------------------------------------------
# Return with zero values if no metabolites are selected
if self.nmet < 1:
self.yini = self.yini * 0
voxel = voxels[0]
self.data = dataset.get_source_data('fit')
self.data = self.data[voxel[2],voxel[1],voxel[0],:]
plot_results = { 'fitted_lw' : 3.0,
'minmaxlw' : [1,5],
'init_b0' : 0.0,
'init_ph0' : -dataset.get_phase_0(voxel) * np.pi/180.0,
'init_ph1' : -dataset.get_phase_1(voxel),
'data' : self.data.copy(),
'weight_array' : self.data.copy() * 0,
'fit_baseline' : self.data.copy() * 0,
'yfit' : self.data.copy() * 0,
'yini' : self.data.copy() * 0,
'init_baseline': self.data.copy() * 0,
'mmol_area' : 1.0 }
return plot_results
#----------------------------------------------------------------------
# Do the one time global bits of code, if needed
if do_init:
self.run_global_init()
#----------------------------------------------------------------------
# Now process the current voxel
data_source = dataset.get_source_data('fit')
voxel = voxels[0] # because we got rid of for-loop
x,y,z = voxel # for convenience
self.iteration = 0 # global index used in functors as a trigger
self.voxel = voxel
self.statusbar = statusbar
# local copy of input data
self.data = data_source[z,y,x,:].copy()
# spectral chain needs update for this line to be valid
self.chain = dataset.get_source_chain('fit')
self.kodata = self.chain.kodata.copy()
# various default values
self.mmol_area = 1.0
# copy 'global' parameters, that DO change with voxel, from dataset
#
# NB. phase0/1 are inputs for 'manual' method, the init_ph0/1 are
# outputs from initval calcs. If 'manual' is selected, then the
# output value should be equal but negative to original. We use
# the init_ph0/1 to update the GUI (and mrs_dataset values) so
# the chain needs both input and output (I think).
self.phase0 = dataset.get_phase_0(voxel)
self.phase1 = dataset.get_phase_1(voxel)
self.init_ph0 = -dataset.get_phase_0(voxel) * np.pi / 180.0 # match units in util_initial_values
self.init_ph1 = -dataset.get_phase_1(voxel)
# copy block parameters, that DO change with voxel, from block
self.frequency_shift = dataset.get_frequency_shift(voxel)
self.fit_baseline = block.fit_baseline[:,x,y,z].copy()
self.init_baseline = self.fit_baseline.copy() * 0
# setup chain results arrays
self.initial_values = voigt_checkout(self.nmet, block.initial_values[:,x,y,z], dataset)
self.fit_results = voigt_checkout(self.nmet, block.fit_results[ :,x,y,z], dataset)
self.fit_stats = block.fit_stats[ :,x,y,z].copy()
self.cramer_rao = block.cramer_rao[:,x,y,z].copy()
self.confidence = block.confidence[:,x,y,z].copy()
# select the chain processing functor based on the entry point
if entry == 'initial_only':
funct_fit_voigt.do_processing_initial(self)
elif entry == 'full_fit' or entry == 'all':
funct_fit_voigt.do_processing_full_fit(self)
elif entry == 'plot_refresh':
funct_fit_voigt.do_processing_plot_refresh(self)
elif entry == 'output_refresh':
funct_fit_voigt.do_processing_output_refresh(self)
elif entry == 'voxel_change':
if np.sum(self.initial_values[0:self.nmet])==0.0:
flag_auto_initvals = True
else:
flag_auto_initvals = False
funct_fit_voigt.do_processing_voxel_change(self, flag_auto_initvals=flag_auto_initvals)
else:
print('oooops! - chain_fit_voigt "entry" point error ')
if statusbar:
statusbar.SetStatusText(' Fitting Done', 0)
# one last lw calc to refresh HTLM window on opening VIFF file
self.fitted_lw, _ = util_spectral.voigt_width(self.fit_results[self.nmet*2], self.fit_results[self.nmet*2+1], dataset)
block.initial_values[:,x,y,z] = voigt_checkin(self.nmet, self.initial_values, dataset)
block.fit_results[ :,x,y,z] = voigt_checkin(self.nmet, self.fit_results, dataset)
block.fit_stats[ :,x,y,z] = self.fit_stats.copy()
block.fit_baseline[ :,x,y,z] = self.fit_baseline.copy()
block.cramer_rao[ :,x,y,z] = self.cramer_rao.copy()
block.confidence[ :,x,y,z] = self.confidence.copy()
# Initial value algorithms change b0, ph0/ph1. To be well behaved we ask
# the dataset object to save these to the 'spectral' block for us.
#
# NB. In CLI mode, call this chain with 'initial_only' first, then update
# the 'spectral' block and only then call this chain with 'full_fit'
dataset.set_frequency_shift(dataset.get_frequency_shift(voxel) + self.init_b0, voxel)
dataset.set_phase_0(-self.init_ph0 * 180.0 / np.pi, voxel)
dataset.set_phase_1(-self.init_ph1, voxel)
# Return values specific to calling Tab used to update its self.view (plot_panel_spectrum object).
plot_results = { 'fitted_lw' : self.fitted_lw,
'minmaxlw' : self.minmaxlw,
'init_b0' : self.init_b0,
'init_ph0' : self.init_ph0 * 180.0 / np.pi,
'init_ph1' : self.init_ph1,
'data' : self.data.copy(),
'weight_array' : self.weight_array.copy(),
'fit_baseline' : self.fit_baseline.copy(),
'yfit' : self.yfit.copy(),
'yini' : self.yini.copy(),
'init_baseline' : self.init_baseline.copy(),
'mmol_area' : self.mmol_area }
return plot_results
def create_param_labels(self):
""" Create list of unique parameter labels """
plabel = []
unique_abbr = [item.replace('-', '_') for item in self._dataset.prior_list_unique]
for item in unique_abbr: plabel.append('area_' + item)
for item in unique_abbr: plabel.append('freq_' + item)
plabel.append('ta')
plabel.append('tb')
plabel.append('ph0')
plabel.append('ph1')
if self._block.set.macromol_model == FitMacromoleculeMethod.SINGLE_BASIS_DATASET:
plabel.append('mmol_area')
plabel.append('mmol_freq')
return plabel
def lorgauss_internal_lmfit_dfunc(self, params, *args, **kwargs):
"""
This is in the format that LMFIT expects to call in the Minimizer class
for the 'least_squares' algorithm.
This returns the weighted partial derivative functions all_pders * ww
as a single numpy (n,m) float array, where where n = # of variable
parameters (versus dependent params) and m = # of spectral points. In
this case, the real and imaginary vectors have been concatenated into
a single array, so m = 2 * npts_spectral_zerofilled.
Note. The vespa model (for one example) might have 48 parameters, but
only 42 are variable parameters while the other 6 are dependent
expressions (e.g. freq_naag = freq_naa + 0.04). The LMFIT algorithm
only passes in the 42 'free' params, and I need to expand that into the
actual 48 for the self.lorgauss_internal() call to work properly. On
return, I need to remove the pder entris for the dependent parameters
(and return just a 42 x npts array).
params - these are just the free variable values, we need to expand this
into a full list/dict of free and evaluated expression variables
for the call to self.lorgauss_internal(). This can be a list of
current variable values, OR it can be an ordered dict of LMFIT
Paramters.
"""
ww = np.concatenate([self.weight_array, self.weight_array])
# expand list of free variable values into full list of free and evaluated expression values
all_params = self.all_params.copy() # copy of full param set
for name, val in zip(self.lmfit_fvar_names, params):
all_params[name].value = val # update free params to current pass values
all_params.update_constraints() # evaluate expression params values
yfit, all_pders = self.lorgauss_internal(all_params, pderflg=True)
# Re-sort all_pders array if inequality expressions present in Parameters list
#
# - pder returns in 'Vespa' order (area(s), freq(s), ta, tb, ph0, ph1, mmol_area, mmol_freq)
# - if inequality control vars have been added to end of Paramters list (typical in Vespa
# model) then we have to re-sort
# - usually things like 'freq_naag' have to be relocated to position where 'delta_freq_naa'
# was located in the 'params' variable that was input to this method
pders = []
indxs = []
all_names = list(all_params.keys())
for key in self.lmfit_fvar_names:
if 'delta_' in key:
indx = all_names.index(key.replace('delta_', ''))
pders.append(-1 * all_pders[indx, :]) # -1 is empirical vs LMFIT, bjs 3/2021
else:
indx = all_names.index(key)
pders.append(all_pders[indx, :])
indxs.append(indx)
pders = np.array(pders)
# expand complex to 1D and apply weighting scheme
dfunc = []
for pder in pders:
dfunc.append(np.concatenate([pder.real, pder.imag]) * ww * (-1)) # -1 is empirically vs LMFIT, bjs 3/2021
dfunc = np.array(dfunc)
return dfunc.T # empirical vs LMFIT requirement
def lorgauss_internal_lmfit(self, a, report_stats=False):
"""
This is in the format that LMFIT expects to call in the Minimizer class.
This returns the weighted difference (data - yfit) * ww as a single
numpy float array, where the real and imaginary vectors have been
concatenated into a single array.
a - fully expanded list of parameters, free and evaluated expressions
"""
data = self.data_scale.copy()
ww = self.weight_array
yfit, _ = self.lorgauss_internal(a, pderflg=False)
yfit = np.concatenate([yfit.real, yfit.imag])
data = np.concatenate([data.real, data.imag])
ww = np.concatenate([ww, ww])
if report_stats:
nfree = np.size(yfit)-len(list(a.keys()))
wchisqr = np.sum(ww*(data-yfit)**2)/nfree # got from CCFIT method
chisqr = np.sum( (data-yfit)**2)/nfree
return wchisqr, chisqr
else:
y = (data - yfit) * ww
return y
def lorgauss_internal(self, a, pderflg=True,
nobase=False,
indiv=False,
finalwflg=False):
"""
=========
Arguments
=========
**a:** [list][float] parameters for model function
**dataset:** [object][dataset (or subset)] object containing fitting
parameters
**pderflg:** [keyword][bool][default=False] xxxx
**nobase:** [keyword][bool][default=False] flag, do not include
baseline contribs from (*dood).basarr
**indiv:** [keyword][bool][default=False] flag, return individual
metabolites, not summed total of all
**finalwflg:** [keyword][float][default=False] xxxx
===========
Description
===========
Returns the parameterized metabolite model function.
A contains : [[am],[fr],Ta,Tb,ph0,ph1] - LorGauss complex
Peak ampls and freqs are taken from the DB info in info,
so the values in [am] and [fr] are relative multipliers
and additives respectively. That is why there is only
one value for each compound in each array
If the relfreq flag is ON, then [fr] is a single value that
is added to each peak freq equivalently. Ie. the whole
spectrum can shift, but relative ppm separations between
all metabolites are maintained exactly. If the flag is OFF,
then metabs may shift independently from one another,
however, within groups of peaks belonging to the same
metabolite, relative ppm separtaions are maintained.
am - peak amplitude
fr - peak frequency offsets in PPM
Ta - T2 decay constant in sec
Tb - T2 star decay const in sec
ph0/1 - zero/first order phase in degrees
coef - are the spline coefs for the lineshape, knot locations are in info
======
Syntax
======
::
f = self.lorgauss_internal(a, pderflg = False,
nobase = False,
indiv = False,
finalwflg = False)
"""
ds = self._dataset
set = self._block.set
# parse input parameters
if isinstance(a, Parameters):
v = a.valuesdict()
a = np.array([item[1] for item in list(v.items())])
# Setup constants and flags
nmet = self.nmet
npts = ds.raw_dims[0]
nptszf = int(round(npts * ds.zero_fill_multiplier))
td = 1.0/ds.sw
piv = ds.ppm2pts(ds.phase_1_pivot, acq=True)
arr1 = np.zeros(int(npts),float) + 1.0
f = np.zeros((int(nmet),int(nptszf)),complex)
mf = np.zeros((int(nptszf),),complex)
t = (np.arange(nmet * npts) % npts) * td
t.shape = nmet, npts
# get prior max peak ppm vals for metabs which are flagged ON
peaks = np.array(set.prior_peak_ppm)
# setup Lineshape
if set.lineshape_model != FitLineshapeModel.GAUSS:
# voigt and lorentzian models
expo = t/a[nmet*2] + (t/a[nmet*2+1])**2
lshape = util_math.safe_exp(-expo)
else:
# Gaussian lineshape - allows user to set a fixed T2 value for each
# metabolite stored in a 'T2 lineshape array'. But, this model still
# has a Tb parameter, though tightly constrained. We set it to 0.250
# +/- 0.001 sec, a reasonable T2 value, to make the search space
# happy. The fitting function is adjusted for each metab by the delta
# from 0.250.
ma = (self.fix_t2_center - a[nmet*2]) + set.prior_fix_t2 # delta for Ta param that is set at 0.25 sec
ma = t/np.outer(ma, arr1)
mb = (t / a[nmet*2+1])**2
expo = ma+mb
lshape = util_math.safe_exp(-expo)
if finalwflg:
finalw = lshape[:,0]
finalw = util_spectral.full_width_half_max(np.fft.fft(util_spectral.chop(finalw))/len(finalw)) * ds.spectral_hpp
return finalw
# if FID, then for correct area, first point must be divided by 2
fre = a[nmet:nmet*2] - ds.ppm2hz(peaks)*2.0*np.pi # shift in Radians from basis center freq
fre = np.exp( 1j * (np.outer(fre, arr1)) * t ) # outer is matrix multiplication
amp = np.outer(a[0:nmet], arr1)
ph0 = np.outer(np.exp(1j * (np.zeros(nmet) + a[nmet*2+2])), arr1)
tmp = self.basis_mets.copy() * amp * fre * ph0 * lshape
f[:,0:npts] = tmp
f[:,0] = f[:,0] / 2.0
# Calc Phase1
phase1 = np.exp(1j * (a[nmet*2+3]*DTOR*(np.arange(nptszf,dtype=float)-piv)/nptszf))
# Calc Mmol - we will calc mmol pders later if needed
if (set.macromol_model == FitMacromoleculeMethod.SINGLE_BASIS_DATASET):
if self.basis_mmol is not None:
mdat = self.basis_mmol.copy() * ((((np.arange(npts) + 1) % 2) * 2) - 1) # chop the basis fn
mamp = a[self.nparam - 2]
mfre = np.exp(1j * a[self.nparam - 1] * np.arange(npts) * td) # freq roll shift
mph0 = np.exp(1j * a[nmet*2 + 2]) # global ph0
mdat *= mamp * mfre * mph0
mf[0:npts] = mdat
mf[0] = mf[0] / 2.0
mind = mf.copy() # save copy of indiv mmol basis functions
# Calculate Partial Derivatives
pder = None
if pderflg:
pder = np.zeros((int(len(a)),int(nptszf)), complex)
pall = np.sum(f,axis=0) # all lines added
pind = f
tt = np.zeros(int(nptszf),float)
tt[0:npts] = np.arange(npts,dtype=float) * td
for i in range(nmet): # Calc the Ampl and Freq pders
pder[i,:] = (np.fft.fft(pind[i,:] / a[i] )/nptszf) * phase1
pder[i+nmet,:] = (np.fft.fft(tt * 1j * pind[i,:])/nptszf) * phase1
pder[nmet*2+0,:] = (np.fft.fft( tt * pall/(a[nmet*2+0]**2))/nptszf) * phase1
pder[nmet*2+1,:] = (np.fft.fft(2.0*(tt**2) * pall/(a[nmet*2+1]**3))/nptszf) * phase1
if set.optimize_method in LMFIT_METHODS:
# flags below are set in funct_fit_voigt.py only if both metabs in plabel
plabel = self.create_param_labels()
if set.optimize_constrain_ppm_naa_naag:
pder[plabel.index('freq_naa')] += pder[plabel.index('freq_naag')]
if set.optimize_constrain_ppm_cr_pcr:
pder[plabel.index('freq_cr')] += pder[plabel.index('freq_pcr')]
if set.optimize_constrain_ppm_gpc_pcho:
pder[plabel.index('freq_gpc')] += pder[plabel.index('freq_pcho')]
if set.optimize_constrain_ppm_cr2_pcr2:
pder[plabel.index('freq_cr2')] += pder[plabel.index('freq_pcr2')]
if set.optimize_constrain_ppm_glu_gln:
pder[plabel.index('freq_glu')] += pder[plabel.index('freq_gln')]
if set.optimize_constrain_ppm_tau_glc:
pder[plabel.index('freq_tau')] += pder[plabel.index('freq_glc')]
if set.lineshape_model == FitLineshapeModel.GAUSS:
pder[nmet*2+0,:] *= -1e-6 # empirical from LMFIT tests
pder[nmet*2+2,:] = (np.fft.fft(1j*pall)/nptszf) * phase1
if self.basis_mmol is not None:
pder[nmet*2+2, :] += (np.fft.fft(1j*mf)/nptszf) * phase1
pder[nmet*2+3,:] = (np.fft.fft(pall)/nptszf) * (1j*DTOR*(np.arange(nptszf,dtype=float)-piv)/nptszf) * phase1
if self.basis_mmol is not None:
pder[nmet*2+3,:] += (np.fft.fft(mf)/nptszf) * (1j*DTOR*(np.arange(nptszf,dtype=float)-piv)/nptszf) * phase1
# Do the FFT
if indiv: # return individual lines
if nmet != 1:
for i in range(nmet):
f[i,:] = (np.fft.fft(f[i,:])/nptszf) * phase1
else:
f = (np.fft.fft(f[0,:])/nptszf) * phase1
else: # return summed spectrum
if (nmet) != 1:
f = | np.sum(f,axis=0) | numpy.sum |
# -*- coding: utf-8 -*-
# test_imagecodecs.py
# Copyright (c) 2018-2019, <NAME>
# Copyright (c) 2018-2019, The Regents of the University of California
# Produced at the Laboratory for Fluorescence Dynamics
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holders nor the names of any
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Unittests for the imagecodecs package.
:Author:
`<NAME> <https://www.lfd.uci.edu/~gohlke/>`_
:Organization:
Laboratory for Fluorescence Dynamics. University of California, Irvine
:License: 3-clause BSD
:Version: 2019.12.3
"""
from __future__ import division, print_function
import sys
import os
import io
import re
import glob
import tempfile
import os.path as osp
import pytest
import numpy
from numpy.testing import assert_array_equal, assert_allclose
try:
import tifffile
except ImportError:
tifffile = None
try:
import czifile
except ImportError:
czifile = None
if (
'imagecodecs_lite' in os.getcwd() or
osp.exists(osp.join(osp.dirname(__file__), '..', 'imagecodecs_lite'))
):
try:
import imagecodecs_lite as imagecodecs
from imagecodecs_lite import _imagecodecs_lite # noqa
from imagecodecs_lite import imagecodecs as imagecodecs_py
except ImportError:
pytest.exit('the imagecodec-lite package is not installed')
lzma = zlib = bz2 = zstd = lz4 = lzf = blosc = bitshuffle = None
_jpeg12 = _jpegls = _zfp = None
else:
try:
import imagecodecs
import imagecodecs.imagecodecs as imagecodecs_py
from imagecodecs.imagecodecs import (lzma, zlib, bz2, zstd, lz4, lzf,
blosc, bitshuffle)
from imagecodecs import _imagecodecs # noqa
except ImportError:
pytest.exit('the imagecodec package is not installed')
try:
from imagecodecs import _jpeg12
except ImportError:
_jpeg12 = None
try:
from imagecodecs import _jpegls
except ImportError:
_jpegls = None
try:
from imagecodecs import _zfp
except ImportError:
_zfp = None
IS_PY2 = sys.version_info[0] == 2
IS_32BIT = sys.maxsize < 2**32
TEST_DIR = osp.dirname(__file__)
class TempFileName():
"""Temporary file name context manager."""
def __init__(self, name=None, suffix='', remove=True):
self.remove = bool(remove)
if not name:
self.name = tempfile.NamedTemporaryFile(prefix='test_',
suffix=suffix).name
else:
self.name = osp.join(tempfile.gettempdir(),
'test_%s%s' % (name, suffix))
def __enter__(self):
return self.name
def __exit__(self, exc_type, exc_value, traceback):
if self.remove:
try:
os.remove(self.name)
except Exception:
pass
def test_version():
"""Assert imagecodecs versions match docstrings."""
ver = ':Version: ' + imagecodecs.__version__
assert ver in __doc__
assert ver in imagecodecs.__doc__
assert imagecodecs.version().startswith('imagecodecs')
assert ver in imagecodecs_py.__doc__
if zlib:
assert imagecodecs.version(dict)['zlib'].startswith('1.')
@pytest.mark.skipif(not hasattr(imagecodecs, 'imread'),
reason='imread function missing')
@pytest.mark.filterwarnings('ignore:Possible precision loss')
def test_imread_imwrite():
"""Test imread and imwrite functions."""
imread = imagecodecs.imread
imwrite = imagecodecs.imwrite
data = image_data('rgba', 'uint8')
# codec from file extension
with TempFileName(suffix='.npy') as filename:
imwrite(filename, data, level=99)
im, codec = imread(filename, return_codec=True)
assert codec == imagecodecs.numpy_decode
assert_array_equal(data, im)
with TempFileName() as filename:
# codec from name
imwrite(filename, data, codec='numpy')
im = imread(filename, codec='npy')
assert_array_equal(data, im)
# codec from function
imwrite(filename, data, codec=imagecodecs.numpy_encode)
im = imread(filename, codec=imagecodecs.numpy_decode)
assert_array_equal(data, im)
# codec from name list
im = imread(filename, codec=['npz'])
assert_array_equal(data, im)
# autodetect
im = imread(filename)
assert_array_equal(data, im)
# fail
with pytest.raises(ValueError):
imwrite(filename, data)
with pytest.raises(ValueError):
im = imread(filename, codec='unknown')
def test_none():
"""Test NOP codec."""
encode = imagecodecs.none_encode
decode = imagecodecs.none_decode
data = b'None'
assert encode(data) is data
assert decode(data) is data
def test_bitorder():
"""Test BitOrder codec with bytes."""
decode = imagecodecs.bitorder_decode
data = b'\x01\x00\x9a\x02'
reverse = b'\x80\x00Y@'
# return new string
assert decode(data) == reverse
assert data == b'\x01\x00\x9a\x02'
# provide output
out = bytearray(len(data))
decode(data, out=out)
assert out == reverse
assert data == b'\x01\x00\x9a\x02'
# inplace
decode(data, out=data)
assert data == reverse
# bytes range
assert BYTES == decode(readfile('bytes.bitorder.bin'))
def test_bitorder_ndarray():
"""Test BitOrder codec with ndarray."""
decode = imagecodecs.bitorder_decode
data = numpy.array([1, 666], dtype='uint16')
reverse = numpy.array([128, 16473], dtype='uint16')
# return new array
assert_array_equal(decode(data), reverse)
# inplace
decode(data, out=data)
assert_array_equal(data, numpy.array([128, 16473], dtype='uint16'))
# array view
data = numpy.array([[1, 666, 1431655765, 62],
[2, 667, 2863311530, 32],
[3, 668, 1431655765, 30]], dtype='uint32')
reverse = numpy.array([[1, 666, 1431655765, 62],
[2, 16601, 1431655765, 32],
[3, 16441, 2863311530, 30]], dtype='uint32')
assert_array_equal(decode(data[1:, 1:3]), reverse[1:, 1:3])
# array view inplace
decode(data[1:, 1:3], out=data[1:, 1:3])
assert_array_equal(data, reverse)
def test_packints_decode():
"""Test PackInts decoder."""
decode = imagecodecs.packints_decode
decoded = decode(b'', 'B', 1)
assert len(decoded) == 0
decoded = decode(b'a', 'B', 1)
assert tuple(decoded) == (0, 1, 1, 0, 0, 0, 0, 1)
decoded = decode(b'ab', 'B', 2)
assert tuple(decoded) == (1, 2, 0, 1, 1, 2, 0, 2)
decoded = decode(b'abcd', 'B', 3)
assert tuple(decoded) == (3, 0, 2, 6, 1, 1, 4, 3, 3, 1)
decoded = decode(numpy.frombuffer(b'abcd', dtype='uint8'), 'B', 3)
assert tuple(decoded) == (3, 0, 2, 6, 1, 1, 4, 3, 3, 1)
PACKBITS_DATA = [
(b'', b''),
(b'X', b'\x00X'),
(b'123', b'\x02123'),
(b'112112', b'\xff1\x002\xff1\x002'),
(b'1122', b'\xff1\xff2'),
(b'1' * 126, b'\x831'),
(b'1' * 127, b'\x821'),
(b'1' * 128, b'\x811'),
(b'1' * 127 + b'foo', b'\x821\x00f\xffo'),
(b'12345678' * 16, # literal 128
b'\x7f1234567812345678123456781234567812345678123456781234567812345678'
b'1234567812345678123456781234567812345678123456781234567812345678'),
(b'12345678' * 17,
b'~1234567812345678123456781234567812345678123456781234567812345678'
b'123456781234567812345678123456781234567812345678123456781234567\x08'
b'812345678'),
(b'1' * 128 + b'12345678' * 17,
b'\x821\xff1~2345678123456781234567812345678123456781234567812345678'
b'1234567812345678123456781234567812345678123456781234567812345678'
b'12345678\x0712345678'),
(b'\xaa\xaa\xaa\x80\x00\x2a\xaa\xaa\xaa\xaa\x80\x00'
b'\x2a\x22\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa',
b'\xfe\xaa\x02\x80\x00\x2a\xfd\xaa\x03\x80\x00\x2a\x22\xf7\xaa')]
@pytest.mark.parametrize('data', range(len(PACKBITS_DATA)))
@pytest.mark.parametrize('codec', ['encode', 'decode'])
def test_packbits(codec, data):
"""Test PackBits codec."""
encode = imagecodecs.packbits_encode
decode = imagecodecs.packbits_decode
uncompressed, compressed = PACKBITS_DATA[data]
if codec == 'decode':
assert decode(compressed) == uncompressed
elif codec == 'encode':
try:
assert encode(uncompressed) == compressed
except AssertionError:
# roundtrip
assert decode(encode(uncompressed)) == uncompressed
def test_packbits_nop():
"""Test PackBits decoding empty data."""
decode = imagecodecs.packbits_decode
assert decode(b'\x80') == b''
assert decode(b'\x80\x80') == b''
@pytest.mark.parametrize('output', [None, 'array'])
@pytest.mark.parametrize('codec', ['encode', 'decode'])
def test_packbits_array(codec, output):
"""Test PackBits codec with arrays."""
encode = imagecodecs.packbits_encode
decode = imagecodecs.packbits_decode
uncompressed, compressed = PACKBITS_DATA[-1]
shape = (2, 7, len(uncompressed))
data = numpy.empty(shape, dtype='uint8')
data[..., :] = numpy.frombuffer(uncompressed, dtype='uint8')
compressed = compressed * (shape[0] * shape[1])
if codec == 'encode':
if output == 'array':
out = numpy.empty(data.size, data.dtype)
assert_array_equal(encode(data, out=out),
numpy.frombuffer(compressed, dtype='uint8'))
else:
assert encode(data) == compressed
else:
if output == 'array':
out = numpy.empty(data.size, data.dtype)
assert_array_equal(decode(compressed, out=out), data.flat)
else:
assert decode(compressed) == data.tobytes()
@pytest.mark.parametrize('output', ['new', 'out', 'inplace'])
@pytest.mark.parametrize('codec', ['encode', 'decode'])
@pytest.mark.parametrize(
'kind', ['u1', 'u2', 'u4', 'u8', 'i1', 'i2', 'i4', 'i8', 'f4', 'f8', 'B',
pytest.param('b', marks=pytest.mark.skipif(
sys.version_info[0] == 2, reason='Python 2'))])
@pytest.mark.parametrize('func', ['delta', 'xor'])
def test_delta(output, kind, codec, func):
"""Test Delta codec."""
if func == 'delta':
encode = imagecodecs.delta_encode
decode = imagecodecs.delta_decode
encode_py = imagecodecs_py.delta_encode
# decode_py = imagecodecs_py.imagecodecs.delta_decode
elif func == 'xor':
encode = imagecodecs.xor_encode
decode = imagecodecs.xor_decode
encode_py = imagecodecs_py.xor_encode
# decode_py = imagecodecs_py.imagecodecs.xor_decode
bytetype = bytearray
if kind == 'b':
bytetype = bytes
kind = 'B'
axis = -2 # do not change
dtype = numpy.dtype(kind)
if kind[0] in 'iuB':
low = numpy.iinfo(dtype).min
high = numpy.iinfo(dtype).max
data = numpy.random.randint(low, high, size=33 * 31 * 3,
dtype=dtype).reshape(33, 31, 3)
else:
low, high = -1e5, 1e5
data = numpy.random.randint(low, high, size=33 * 31 * 3,
dtype='i4').reshape(33, 31, 3)
data = data.astype(dtype)
data[16, 14] = [0, 0, 0]
data[16, 15] = [low, high, low]
data[16, 16] = [high, low, high]
data[16, 17] = [low, high, low]
data[16, 18] = [high, low, high]
data[16, 19] = [0, 0, 0]
if kind == 'B':
# data = data.reshape(-1)
data = data.tobytes()
diff = encode_py(data, axis=0)
if output == 'new':
if codec == 'encode':
encoded = encode(data, out=bytetype)
assert encoded == diff
elif codec == 'decode':
decoded = decode(diff, out=bytetype)
assert decoded == data
elif output == 'out':
if codec == 'encode':
encoded = bytetype(len(data))
encode(data, out=encoded)
assert encoded == diff
elif codec == 'decode':
decoded = bytetype(len(data))
decode(diff, out=decoded)
assert decoded == data
elif output == 'inplace':
if codec == 'encode':
encoded = bytetype(data)
encode(encoded, out=encoded)
assert encoded == diff
elif codec == 'decode':
decoded = bytetype(diff)
decode(decoded, out=decoded)
assert decoded == data
else:
# if func == 'xor' and kind in ('f4', 'f8'):
# with pytest.raises(ValueError):
# encode(data, axis=axis)
# pytest.skip("XOR codec not implemented for float data")
diff = encode_py(data, axis=-2)
if output == 'new':
if codec == 'encode':
encoded = encode(data, axis=axis)
assert_array_equal(encoded, diff)
elif codec == 'decode':
decoded = decode(diff, axis=axis)
assert_array_equal(decoded, data)
elif output == 'out':
if codec == 'encode':
encoded = numpy.zeros_like(data)
encode(data, axis=axis, out=encoded)
assert_array_equal(encoded, diff)
elif codec == 'decode':
decoded = numpy.zeros_like(data)
decode(diff, axis=axis, out=decoded)
assert_array_equal(decoded, data)
elif output == 'inplace':
if codec == 'encode':
encoded = data.copy()
encode(encoded, axis=axis, out=encoded)
assert_array_equal(encoded, diff)
elif codec == 'decode':
decoded = diff.copy()
decode(decoded, axis=axis, out=decoded)
assert_array_equal(decoded, data)
@pytest.mark.parametrize('output', ['new', 'out'])
@pytest.mark.parametrize('codec', ['encode', 'decode'])
@pytest.mark.parametrize('endian', ['le', 'be'])
@pytest.mark.parametrize('planar', ['rgb', 'rrggbb'])
def test_floatpred(planar, endian, output, codec):
"""Test FloatPred codec."""
encode = imagecodecs.floatpred_encode
decode = imagecodecs.floatpred_decode
data = numpy.fromfile(
datafiles('rgb.bin'), dtype='<f4').reshape(33, 31, 3)
if planar == 'rgb':
axis = -2
if endian == 'le':
encoded = numpy.fromfile(
datafiles('rgb.floatpred_le.bin'), dtype='<f4')
encoded = encoded.reshape(33, 31, 3)
if output == 'new':
if codec == 'decode':
assert_array_equal(decode(encoded, axis=axis), data)
elif codec == 'encode':
assert_array_equal(encode(data, axis=axis), encoded)
elif output == 'out':
out = numpy.empty_like(data)
if codec == 'decode':
decode(encoded, axis=axis, out=out)
assert_array_equal(out, data)
elif codec == 'encode':
out = numpy.empty_like(data)
encode(data, axis=axis, out=out)
assert_array_equal(out, encoded)
elif endian == 'be':
data = data.astype('>f4')
encoded = numpy.fromfile(
datafiles('rgb.floatpred_be.bin'), dtype='>f4')
encoded = encoded.reshape(33, 31, 3)
if output == 'new':
if codec == 'decode':
assert_array_equal(decode(encoded, axis=axis), data)
elif codec == 'encode':
assert_array_equal(encode(data, axis=axis), encoded)
elif output == 'out':
out = numpy.empty_like(data)
if codec == 'decode':
decode(encoded, axis=axis, out=out)
assert_array_equal(out, data)
elif codec == 'encode':
out = numpy.empty_like(data)
encode(data, axis=axis, out=out)
assert_array_equal(out, encoded)
elif planar == 'rrggbb':
axis = -1
data = numpy.ascontiguousarray(numpy.moveaxis(data, 2, 0))
if endian == 'le':
encoded = numpy.fromfile(
datafiles('rrggbb.floatpred_le.bin'), dtype='<f4')
encoded = encoded.reshape(3, 33, 31)
if output == 'new':
if codec == 'decode':
assert_array_equal(decode(encoded, axis=axis), data)
elif codec == 'encode':
assert_array_equal(encode(data, axis=axis), encoded)
elif output == 'out':
out = numpy.empty_like(data)
if codec == 'decode':
decode(encoded, axis=axis, out=out)
assert_array_equal(out, data)
elif codec == 'encode':
out = numpy.empty_like(data)
encode(data, axis=axis, out=out)
assert_array_equal(out, encoded)
elif endian == 'be':
data = data.astype('>f4')
encoded = numpy.fromfile(
datafiles('rrggbb.floatpred_be.bin'), dtype='>f4')
encoded = encoded.reshape(3, 33, 31)
if output == 'new':
if codec == 'decode':
assert_array_equal(decode(encoded, axis=axis), data)
elif codec == 'encode':
assert_array_equal(encode(data, axis=axis), encoded)
elif output == 'out':
out = numpy.empty_like(data)
if codec == 'decode':
decode(encoded, axis=axis, out=out)
assert_array_equal(out, data)
elif codec == 'encode':
out = numpy.empty_like(data)
encode(data, axis=axis, out=out)
assert_array_equal(out, encoded)
def test_lzw_msb():
"""Test LZW decoder with MSB."""
# TODO: add test_lzw_lsb
decode = imagecodecs.lzw_decode
for data, decoded in [
(b'\x80\x1c\xcc\'\x91\x01\xa0\xc2m6\x99NB\x03\xc9\xbe\x0b'
b'\x07\x84\xc2\xcd\xa68|"\x14 3\xc3\xa0\xd1c\x94\x02\x02\x80',
b'say hammer yo hammer mc hammer go hammer'),
(b'\x80\x18M\xc6A\x01\xd0\xd0e\x10\x1c\x8c\xa73\xa0\x80\xc7\x02'
b'\x10\x19\xcd\xe2\x08\x14\x10\xe0l0\x9e`\x10\x10\x80',
b'and the rest can go and play'),
(b'\x80\x18\xcc&\xe19\xd0@t7\x9dLf\x889\xa0\xd2s',
b"can't touch this"),
(b'\x80@@', b'')]:
assert decode(data) == decoded
@pytest.mark.parametrize('output', ['new', 'size', 'ndarray', 'bytearray'])
def test_lzw_decode(output):
"""Test LZW decoder of input with horizontal differencing."""
decode = imagecodecs.lzw_decode
delta_decode = imagecodecs.delta_decode
data = readfile('bytes.lzw_horizontal.bin')
decoded_size = len(BYTES)
if output == 'new':
decoded = decode(data)
decoded = numpy.frombuffer(decoded, 'uint8').reshape(16, 16)
delta_decode(decoded, out=decoded, axis=-1)
assert_array_equal(BYTESIMG, decoded)
elif output == 'size':
decoded = decode(data, out=decoded_size)
decoded = numpy.frombuffer(decoded, 'uint8').reshape(16, 16)
delta_decode(decoded, out=decoded, axis=-1)
assert_array_equal(BYTESIMG, decoded)
# with pytest.raises(RuntimeError):
decode(data, buffersize=32, out=decoded_size)
elif output == 'ndarray':
decoded = numpy.empty_like(BYTESIMG)
decode(data, out=decoded.reshape(-1))
delta_decode(decoded, out=decoded, axis=-1)
assert_array_equal(BYTESIMG, decoded)
elif output == 'bytearray':
decoded = bytearray(decoded_size)
decode(data, out=decoded)
decoded = numpy.frombuffer(decoded, 'uint8').reshape(16, 16)
delta_decode(decoded, out=decoded, axis=-1)
assert_array_equal(BYTESIMG, decoded)
def test_lzw_decode_image_noeoi():
"""Test LZW decoder of input without EOI 512x512u2."""
decode = imagecodecs.lzw_decode
fname = datafiles('image_noeoi.lzw.bin')
with open(fname, 'rb') as fh:
encoded = fh.read()
fname = datafiles('image_noeoi.bin')
with open(fname, 'rb') as fh:
decoded_known = fh.read()
# new output
decoded = decode(encoded)
assert decoded == decoded_known
# provide output
decoded = bytearray(len(decoded))
decode(encoded, out=decoded)
assert decoded == decoded_known
# truncated output
decoded = bytearray(100)
decode(encoded, out=decoded)
assert len(decoded) == 100
@pytest.mark.skipif(not hasattr(imagecodecs, 'blosc_decode'),
reason='compression codecs missing')
@pytest.mark.parametrize('output', ['new', 'out', 'size', 'excess', 'trunc'])
@pytest.mark.parametrize('length', [0, 2, 31 * 33 * 3])
@pytest.mark.parametrize('codec', ['encode', 'decode'])
@pytest.mark.parametrize('module', [
'zlib', 'bz2',
pytest.param('blosc',
marks=pytest.mark.skipif(blosc is None,
reason='import blosc')),
pytest.param('lzma',
marks=pytest.mark.skipif(lzma is None,
reason='import lzma')),
pytest.param('zstd',
marks=pytest.mark.skipif(zstd is None,
reason='import zstd')),
pytest.param('lzf',
marks=pytest.mark.skipif(lzf is None,
reason='import lzf')),
pytest.param('lz4',
marks=pytest.mark.skipif(lz4 is None,
reason='import lz4')),
pytest.param('lz4h',
marks=pytest.mark.skipif(lz4 is None,
reason='import lz4')),
pytest.param('bitshuffle',
marks=pytest.mark.skipif(bitshuffle is None,
reason='import bitshuffle'))
])
def test_compressors(module, codec, output, length):
"""Test various non-image codecs."""
if length:
data = numpy.random.randint(255, size=length, dtype='uint8').tobytes()
else:
data = b''
if module == 'blosc':
encode = imagecodecs.blosc_encode
decode = imagecodecs.blosc_decode
level = 9
encoded = blosc.compress(data, clevel=level)
elif module == 'zlib':
encode = imagecodecs.zlib_encode
decode = imagecodecs.zlib_decode
level = 5
encoded = zlib.compress(data, level)
elif module == 'lzma':
encode = imagecodecs.lzma_encode
decode = imagecodecs.lzma_decode
level = 6
encoded = lzma.compress(data)
elif module == 'zstd':
encode = imagecodecs.zstd_encode
decode = imagecodecs.zstd_decode
level = 5
if length == 0:
# bug in zstd.compress?
encoded = encode(data, level)
else:
encoded = zstd.compress(data, level)
elif module == 'lzf':
encode = imagecodecs.lzf_encode
decode = imagecodecs.lzf_decode
level = 1
encoded = lzf.compress(data, ((len(data) * 33) >> 5) + 1)
if encoded is None:
pytest.skip("lzf can't compress empty input")
elif module == 'lz4':
encode = imagecodecs.lz4_encode
decode = imagecodecs.lz4_decode
level = 1
encoded = lz4.block.compress(data, store_size=False)
elif module == 'lz4h':
def encode(*args, **kwargs):
return imagecodecs.lz4_encode(*args, header=True, **kwargs)
def decode(*args, **kwargs):
return imagecodecs.lz4_decode(*args, header=True, **kwargs)
level = 1
encoded = lz4.block.compress(data, store_size=True)
elif module == 'bz2':
encode = imagecodecs.bz2_encode
decode = imagecodecs.bz2_decode
level = 9
encoded = bz2.compress(data, compresslevel=level)
elif module == 'bitshuffle':
encode = imagecodecs.bitshuffle_encode
decode = imagecodecs.bitshuffle_decode
level = 0
encoded = bitshuffle.bitshuffle(
numpy.frombuffer(data, 'uint8')).tobytes()
else:
raise ValueError(module)
if codec == 'encode':
size = len(encoded)
if output == 'new':
assert encoded == encode(data, level)
elif output == 'size':
ret = encode(data, level, out=size)
assert encoded == ret
elif output == 'out':
if module == 'zstd':
out = bytearray(max(size, 64))
# elif module == 'blosc':
# out = bytearray(max(size, 17)) # bug in blosc ?
elif module == 'lzf':
out = bytearray(size + 1) # bug in liblzf ?
else:
out = bytearray(size)
ret = encode(data, level, out=out)
assert encoded == out[:size]
assert encoded == ret
elif output == 'excess':
out = bytearray(size + 1021)
ret = encode(data, level, out=out)
if module == 'blosc':
# pytest.skip("blosc output depends on output size")
assert data == decode(ret)
else:
assert ret == out[:size]
assert encoded == ret
elif output == 'trunc':
size = max(0, size - 1)
out = bytearray(size)
if size == 0 and module == 'bitshuffle':
encode(data, level, out=out) == b''
else:
with pytest.raises(RuntimeError):
encode(data, level, out=out)
else:
raise ValueError(output)
elif codec == 'decode':
size = len(data)
if output == 'new':
assert data == decode(encoded)
elif output == 'size':
ret = decode(encoded, out=size)
assert data == ret
elif output == 'out':
out = bytearray(size)
ret = decode(encoded, out=out)
assert data == out
assert data == ret
elif output == 'excess':
out = bytearray(size + 1021)
ret = decode(encoded, out=out)
assert data == out[:size]
assert data == ret
elif output == 'trunc':
size = max(0, size - 1)
out = bytearray(size)
if length > 0 and module in ('zlib', 'zstd', 'lzf', 'lz4', 'lz4h',
'blosc', 'bitshuffle'):
with pytest.raises(RuntimeError):
decode(encoded, out=out)
else:
decode(encoded, out=out)
assert data[:size] == out
else:
raise ValueError(output)
else:
raise ValueError(codec)
@pytest.mark.skipif(not hasattr(imagecodecs, 'bitshuffle_decode'),
reason='bitshuffle codec missing')
@pytest.mark.parametrize('dtype', ['bytes', 'ndarray'])
@pytest.mark.parametrize('itemsize', [1, 2, 4, 8])
@pytest.mark.parametrize('blocksize', [0, 8, 64])
def test_bitshuffle_roundtrip(dtype, itemsize, blocksize):
"""Test Bitshuffle codec."""
encode = imagecodecs.bitshuffle_encode
decode = imagecodecs.bitshuffle_decode
if dtype == 'bytes':
data = numpy.random.randint(255, size=1024, dtype='uint8').tobytes()
else:
data = numpy.random.randint(255, size=1024, dtype='u%i' % itemsize)
data.shape = 2, 4, 128
encoded = encode(data, itemsize=itemsize, blocksize=blocksize)
decoded = decode(encoded, itemsize=itemsize, blocksize=blocksize)
if dtype == 'bytes':
assert data == decoded
else:
assert_array_equal(data, decoded)
@pytest.mark.skipif(not hasattr(imagecodecs, 'blosc_decode'),
reason='blosc codec missing')
@pytest.mark.parametrize('numthreads', [1, 6])
@pytest.mark.parametrize('level', [None, 1])
@pytest.mark.parametrize('shuffle', ['noshuffle', 'shuffle', 'bitshuffle'])
@pytest.mark.parametrize('compressor', ['blosclz', 'lz4', 'lz4hc', 'snappy',
'zlib', 'zstd'])
def test_blosc_roundtrip(compressor, shuffle, level, numthreads):
"""Test Blosc codec."""
encode = imagecodecs.blosc_encode
decode = imagecodecs.blosc_decode
data = numpy.random.randint(255, size=2021, dtype='uint8').tobytes()
encoded = encode(data, level=level, compressor=compressor,
shuffle=shuffle, numthreads=numthreads)
decoded = decode(encoded, numthreads=numthreads)
assert data == decoded
AEC_TEST_DIR = osp.join(TEST_DIR, 'libaec/121B2TestData')
AEC_TEST_OPTIONS = list(
osp.split(f)[-1][5:-3] for f in glob.glob(osp.join(
AEC_TEST_DIR, 'AllOptions', '*.rz')))
AEC_TEST_EXTENDED = list(
osp.split(f)[-1][:-3] for f in glob.glob(osp.join(
AEC_TEST_DIR, 'ExtendedParameters', '*.rz')))
@pytest.mark.skipif(not hasattr(imagecodecs, 'aec_decode'),
reason='aec codec missing')
@pytest.mark.parametrize('dtype', ['bytes', 'numpy'])
@pytest.mark.parametrize('name', AEC_TEST_EXTENDED)
def test_aec_extended(name, dtype):
"""Test AEC codec with libaec ExtendedParameters."""
encode = imagecodecs.aec_encode
decode = imagecodecs.aec_decode
size = 512 * 512 * 4
bitspersample = 32
flags = imagecodecs.AEC_DATA_PREPROCESS | imagecodecs.AEC_PAD_RSI
matches = re.search(r'j(\d+)\.r(\d+)', name).groups()
blocksize = int(matches[0])
rsi = int(matches[1])
filename = osp.join(AEC_TEST_DIR, 'ExtendedParameters', '%s.rz' % name)
with open(filename, 'rb') as fh:
rz = fh.read()
filename = osp.join(AEC_TEST_DIR, 'ExtendedParameters',
'%s.dat' % name.split('.')[0])
if dtype == 'bytes':
with open(filename, 'rb') as fh:
dat = fh.read()
out = size
else:
dat = numpy.fromfile(filename, 'uint32').reshape(512, 512)
out = numpy.empty_like(dat)
# decode
decoded = decode(rz, bitspersample=bitspersample, flags=flags,
blocksize=blocksize, rsi=rsi, out=out)
if dtype == 'bytes':
assert decoded == dat
else:
pass
# roundtrip
if dtype == 'bytes':
encoded = encode(dat, bitspersample=bitspersample, flags=flags,
blocksize=blocksize, rsi=rsi)
# fails with AEC_DATA_ERROR if libaec wasn't built with libaec.diff
decoded = decode(encoded, bitspersample=bitspersample, flags=flags,
blocksize=blocksize, rsi=rsi, out=size)
assert decoded == dat
else:
encoded = encode(dat, flags=flags, blocksize=blocksize, rsi=rsi)
# fails with AEC_DATA_ERROR if libaec wasn't built with libaec.diff
decoded = decode(encoded, flags=flags, blocksize=blocksize, rsi=rsi,
out=out)
assert_array_equal(decoded, out)
@pytest.mark.skipif(not hasattr(imagecodecs, 'aec_decode'),
reason='aec codec missing')
@pytest.mark.parametrize('name', AEC_TEST_OPTIONS)
def test_aec_options(name):
"""Test AEC codec with libaec 121B2TestData."""
encode = imagecodecs.aec_encode
decode = imagecodecs.aec_decode
rsi = 128
blocksize = 16
flags = imagecodecs.AEC_DATA_PREPROCESS
if 'restricted' in name:
flags |= imagecodecs.AEC_RESTRICTED
matches = re.search(r'p(\d+)n(\d+)', name).groups()
size = int(matches[0])
bitspersample = int(matches[1])
if bitspersample > 8:
size *= 2
if bitspersample > 16:
size *= 2
filename = osp.join(AEC_TEST_DIR, 'AllOptions', 'test_%s.rz' % name)
with open(filename, 'rb') as fh:
rz = fh.read()
filename = filename.replace('.rz', '.dat'
).replace('-basic', ''
).replace('-restricted', '')
with open(filename, 'rb') as fh:
dat = fh.read()
out = size
# decode
decoded = decode(rz, bitspersample=bitspersample, flags=flags,
blocksize=blocksize, rsi=rsi, out=out)
assert decoded == dat
# roundtrip
encoded = encode(dat, bitspersample=bitspersample, flags=flags,
blocksize=blocksize, rsi=rsi)
decoded = decode(encoded, bitspersample=bitspersample, flags=flags,
blocksize=blocksize, rsi=rsi, out=out)
assert decoded == dat
@pytest.mark.skipif(not hasattr(imagecodecs, 'jpeg_encode'),
reason='jpeg codecs missing')
@pytest.mark.filterwarnings('ignore:Possible precision loss')
@pytest.mark.parametrize('optimize', [False, True])
@pytest.mark.parametrize('smoothing', [0, 25])
@pytest.mark.parametrize('subsampling', ['444', '422', '420', '411', '440'])
@pytest.mark.parametrize('itype', ['rgb', 'rgba', 'gray'])
@pytest.mark.parametrize('codec', ['jpeg8', 'jpeg12'])
def test_jpeg_encode(codec, itype, subsampling, smoothing, optimize):
"""Test various JPEG encode options."""
# general and default options are tested in test_image_roundtrips
if codec == 'jpeg8':
dtype = 'uint8'
decode = imagecodecs.jpeg8_decode
encode = imagecodecs.jpeg8_encode
atol = 24
elif codec == 'jpeg12':
if _jpeg12 is None:
pytest.skip('_jpeg12 module missing')
if not optimize:
pytest.skip('jpeg12 fails without optimize')
dtype = 'uint16'
decode = imagecodecs.jpeg12_decode
encode = imagecodecs.jpeg12_encode
atol = 24 * 16
else:
raise ValueError(codec)
dtype = numpy.dtype(dtype)
data = image_data(itype, dtype)
data = data[:32, :16].copy() # make divisable by subsamples
encoded = encode(data, level=95, subsampling=subsampling,
smoothing=smoothing, optimize=optimize)
decoded = decode(encoded)
if itype == 'gray':
decoded = decoded.reshape(data.shape)
assert_allclose(data, decoded, atol=atol)
@pytest.mark.skipif(not hasattr(imagecodecs, 'jpeg8_decode'),
reason='jpeg8 codec missing')
@pytest.mark.parametrize('output', ['new', 'out'])
def test_jpeg8_decode(output):
"""Test JPEG 8-bit decoder with separate tables."""
decode = imagecodecs.jpeg8_decode
data = readfile('bytes.jpeg8.bin')
tables = readfile('bytes.jpeg8_tables.bin')
if output == 'new':
decoded = decode(data, tables)
elif output == 'out':
decoded = numpy.empty_like(BYTESIMG)
decode(data, tables, out=decoded)
elif output == 'bytearray':
decoded = bytearray(BYTESIMG.size * BYTESIMG.itemsize)
decoded = decode(data, out=decoded)
assert_array_equal(BYTESIMG, decoded)
@pytest.mark.skipif(_jpeg12 is None, reason='_jpeg12 module missing')
@pytest.mark.parametrize('output', ['new', 'out', 'bytearray'])
def test_jpeg12_decode(output):
"""Test JPEG 12-bit decoder with separate tables."""
decode = imagecodecs.jpeg12_decode
data = readfile('words.jpeg12.bin')
tables = readfile('words.jpeg12_tables.bin')
if output == 'new':
decoded = decode(data, tables)
elif output == 'out':
decoded = numpy.empty_like(WORDSIMG)
decode(data, tables, out=decoded)
elif output == 'bytearray':
decoded = bytearray(WORDSIMG.size * WORDSIMG.itemsize)
decoded = decode(data, out=decoded)
assert numpy.max(numpy.abs(WORDSIMG.astype('int32') -
decoded.astype('int32'))) < 2
@pytest.mark.skipif(not hasattr(imagecodecs, 'jpegsof3_decode'),
reason='jpegsof3 codec missing')
@pytest.mark.parametrize('output', ['new', 'out', 'bytearray'])
@pytest.mark.parametrize('fname', ['gray8.sof3.jpg', 'gray16.sof3.jpg'])
def test_jpegsof3(fname, output):
"""Test JPEG SOF3 decoder with 8 and 16-bit images."""
decode = imagecodecs.jpegsof3_decode
shape = 535, 800
if fname == 'gray8.sof3.jpg':
dtype = 'uint8'
value = 75
elif fname == 'gray16.sof3.jpg':
dtype = 'uint16'
value = 19275
data = readfile(fname)
if output == 'new':
decoded = decode(data)
elif output == 'out':
decoded = numpy.empty(shape, dtype)
decode(data, out=decoded)
elif output == 'bytearray':
decoded = bytearray(535 * 800 * numpy.dtype(dtype).itemsize)
decoded = decode(data, out=decoded)
assert decoded.shape == shape
assert decoded.dtype == dtype
assert decoded[500, 600] == value
@pytest.mark.skipif(not hasattr(imagecodecs, 'jxr_decode'),
reason='jxr codec missing')
@pytest.mark.parametrize('output', ['new', 'out', 'bytearray'])
def test_jxr_decode(output):
"""Test JXR decoder with RGBA32 image."""
decode = imagecodecs.jxr_decode
image = readfile('rgba32.jxr.bin')
image = numpy.frombuffer(image, dtype='uint8').reshape(100, 100, -1)
data = readfile('rgba32.jxr')
if output == 'new':
decoded = decode(data)
elif output == 'out':
decoded = numpy.empty_like(image)
decode(data, out=decoded)
elif output == 'bytearray':
decoded = bytearray(image.size * image.itemsize)
decoded = decode(data, out=decoded)
assert_array_equal(image, decoded)
@pytest.mark.skipif(not hasattr(imagecodecs, 'j2k_decode'),
reason='j2k codec missing')
@pytest.mark.parametrize('output', ['new', 'out', 'bytearray'])
def test_j2k_int8_4bit(output):
"""Test J2K decoder with int8, 4-bit image."""
decode = imagecodecs.j2k_decode
data = readfile('int8_4bit.j2k')
dtype = 'int8'
shape = 256, 256
if output == 'new':
decoded = decode(data, verbose=2)
elif output == 'out':
decoded = numpy.empty(shape, dtype)
decode(data, out=decoded)
elif output == 'bytearray':
decoded = bytearray(shape[0] * shape[1])
decoded = decode(data, out=decoded)
assert decoded.dtype == dtype
assert decoded.shape == shape
assert decoded[0, 0] == -6
assert decoded[-1, -1] == 2
@pytest.mark.skipif(not hasattr(imagecodecs, 'j2k_decode'),
reason='j2k codec missing')
def test_j2k_ycbc():
"""Test J2K decoder with subsampling."""
decode = imagecodecs.j2k_decode
data = readfile('ycbc.j2k')
decoded = decode(data, verbose=2)
assert decoded.dtype == 'uint8'
assert decoded.shape == (256, 256, 3)
assert tuple(decoded[0, 0]) == (243, 243, 240)
assert tuple(decoded[-1, -1]) == (0, 0, 0)
@pytest.mark.skipif(_jpegls is None, reason='_jpegls module missing')
@pytest.mark.parametrize('output', ['new', 'out', 'bytearray'])
def test_jpegls_decode(output):
"""Test JPEGLS decoder with RGBA32 image."""
decode = imagecodecs.jpegls_decode
data = readfile('rgba.u1.jls')
dtype = 'uint8'
shape = 32, 31, 4
if output == 'new':
decoded = decode(data)
elif output == 'out':
decoded = numpy.empty(shape, dtype)
decode(data, out=decoded)
elif output == 'bytearray':
decoded = bytearray(shape[0] * shape[1] * shape[2])
decoded = decode(data, out=decoded)
assert decoded.dtype == dtype
assert decoded.shape == shape
assert decoded[25, 25, 1] == 97
assert decoded[-1, -1, -1] == 63
@pytest.mark.skipif(not hasattr(imagecodecs, 'webp_decode'),
reason='webp codec missing')
@pytest.mark.parametrize('output', ['new', 'out', 'bytearray'])
def test_webp_decode(output):
"""Test WebpP decoder with RGBA32 image."""
decode = imagecodecs.webp_decode
data = readfile('rgba.u1.webp')
dtype = 'uint8'
shape = 32, 31, 4
if output == 'new':
decoded = decode(data)
elif output == 'out':
decoded = numpy.empty(shape, dtype)
decode(data, out=decoded)
elif output == 'bytearray':
decoded = bytearray(shape[0] * shape[1] * shape[2])
decoded = decode(data, out=decoded)
assert decoded.dtype == dtype
assert decoded.shape == shape
assert decoded[25, 25, 1] == 94 # lossy
assert decoded[-1, -1, -1] == 63
@pytest.mark.skipif(_zfp is None, reason='_zfp module missing')
@pytest.mark.filterwarnings('ignore:Possible precision loss')
@pytest.mark.parametrize('execution', [None, 'omp'])
@pytest.mark.parametrize('mode', [(None, None), ('p', None)]) # ('r', 24)
@pytest.mark.parametrize('deout', ['new', 'out', 'bytearray']) # 'view',
@pytest.mark.parametrize('enout', ['new', 'out', 'bytearray'])
@pytest.mark.parametrize('itype', ['rgba', 'view', 'gray', 'line'])
@pytest.mark.parametrize('dtype', ['float32', 'float64', 'int32', 'int64'])
def test_zfp(dtype, itype, enout, deout, mode, execution):
"""Test ZFP codecs."""
if execution == 'omp' and os.environ.get('SKIP_OMP', False):
pytest.skip('omp test skip because of enviroment variable')
decode = imagecodecs.zfp_decode
encode = imagecodecs.zfp_encode
mode, level = mode
dtype = numpy.dtype(dtype)
itemsize = dtype.itemsize
data = image_data(itype, dtype)
shape = data.shape
kwargs = dict(mode=mode, level=level, execution=execution)
encoded = encode(data, **kwargs)
if enout == 'new':
pass
elif enout == 'out':
encoded = numpy.empty(len(encoded), 'uint8')
encode(data, out=encoded, **kwargs)
elif enout == 'bytearray':
encoded = bytearray(len(encoded))
encode(data, out=encoded, **kwargs)
if deout == 'new':
decoded = decode(encoded)
elif deout == 'out':
decoded = numpy.empty(shape, dtype)
decode(encoded, out=decoded)
elif deout == 'view':
temp = numpy.empty((shape[0] + 5, shape[1] + 5, shape[2]), dtype)
decoded = temp[2:2 + shape[0], 3:3 + shape[1], :]
decode(encoded, out=decoded)
elif deout == 'bytearray':
decoded = bytearray(shape[0] * shape[1] * shape[2] * itemsize)
decoded = decode(encoded, out=decoded)
decoded = numpy.asarray(decoded, dtype=dtype).reshape(shape)
if dtype.char == 'f':
atol = 1e-6
else:
atol = 20
assert_allclose(data, decoded, atol=atol, rtol=0)
@pytest.mark.skipif(not hasattr(imagecodecs, 'jxr_decode'),
reason='jxr codec missing')
@pytest.mark.filterwarnings('ignore:Possible precision loss')
@pytest.mark.parametrize('level', [None, 90, 0.4])
@pytest.mark.parametrize('deout', ['new', 'out', 'bytearray']) # 'view',
@pytest.mark.parametrize('enout', ['new', 'out', 'bytearray'])
@pytest.mark.parametrize('itype', [
'gray uint8', 'gray uint16', 'gray float16', 'gray float32',
'rgb uint8', 'rgb uint16', 'rgb float16', 'rgb float32',
'rgba uint8', 'rgba uint16', 'rgba float16', 'rgba float32',
'channels uint8', 'channelsa uint8', 'channels uint16', 'channelsa uint16',
'cmyk uint8', 'cmyka uint8'])
def test_jxr(itype, enout, deout, level):
"""Test JpegXR codecs."""
decode = imagecodecs.jxr_decode
encode = imagecodecs.jxr_encode
itype, dtype = itype.split()
dtype = numpy.dtype(dtype)
itemsize = dtype.itemsize
data = image_data(itype, dtype)
shape = data.shape
kwargs = dict(level=level)
if itype.startswith('cmyk'):
kwargs['photometric'] = 'cmyk'
if itype.endswith('a'):
kwargs['hasalpha'] = True
print(data.shape, data.dtype, data.strides)
encoded = encode(data, **kwargs)
if enout == 'new':
pass
elif enout == 'out':
encoded = numpy.empty(len(encoded), 'uint8')
encode(data, out=encoded, **kwargs)
elif enout == 'bytearray':
encoded = bytearray(len(encoded))
encode(data, out=encoded, **kwargs)
if deout == 'new':
decoded = decode(encoded)
elif deout == 'out':
decoded = numpy.empty(shape, dtype)
decode(encoded, out=numpy.squeeze(decoded))
elif deout == 'view':
temp = numpy.empty((shape[0] + 5, shape[1] + 5, shape[2]), dtype)
decoded = temp[2:2 + shape[0], 3:3 + shape[1], :]
decode(encoded, out=numpy.squeeze(decoded))
elif deout == 'bytearray':
decoded = bytearray(shape[0] * shape[1] * shape[2] * itemsize)
decoded = decode(encoded, out=decoded)
decoded = numpy.asarray(decoded, dtype=dtype).reshape(shape)
if itype == 'gray':
decoded = decoded.reshape(shape)
if level is None:
atol = 0.00001 if dtype.kind == 'f' else 1
if level == 90:
atol = 0.005 if dtype.kind == 'f' else 8 if dtype == 'uint8' else 12
else:
atol = 0.1 if dtype.kind == 'f' else 64 if dtype == 'uint8' else 700
assert_allclose(data, decoded, atol=atol, rtol=0)
@pytest.mark.skipif(not hasattr(imagecodecs, 'jpeg_decode'),
reason='image codecs missing')
@pytest.mark.filterwarnings('ignore:Possible precision loss')
@pytest.mark.parametrize('level', [None, 5, -1])
@pytest.mark.parametrize('deout', ['new', 'out', 'view', 'bytearray'])
@pytest.mark.parametrize('enout', ['new', 'out', 'bytearray'])
@pytest.mark.parametrize('itype', ['rgb', 'rgba', 'view', 'gray', 'graya'])
@pytest.mark.parametrize('dtype', ['uint8', 'uint16'])
@pytest.mark.parametrize('codec', ['webp', 'png', 'jpeg8', 'jpeg12', 'jpegls',
'j2k'])
def test_image_roundtrips(codec, dtype, itype, enout, deout, level):
"""Test various image codecs."""
if codec == 'jpeg8':
if itype in ('view', 'graya') or deout == 'view' or dtype == 'uint16':
pytest.skip("jpeg8 doesn't support these cases")
decode = imagecodecs.jpeg8_decode
encode = imagecodecs.jpeg8_encode
atol = 24
if level:
level += 95
elif codec == 'jpeg12':
if _jpeg12 is None:
pytest.skip('_jpeg12 module missing')
if itype in ('view', 'graya') or deout == 'view' or dtype == 'uint8':
pytest.skip("jpeg12 doesn't support these cases")
decode = imagecodecs.jpeg12_decode
encode = imagecodecs.jpeg12_encode
atol = 24 * 16
if level:
level += 95
elif codec == 'jpegls':
if _jpegls is None:
pytest.skip('_jpegls module missing')
if itype in ('view', 'graya') or deout == 'view':
pytest.skip("jpegls doesn't support these cases")
decode = imagecodecs.jpegls_decode
encode = imagecodecs.jpegls_encode
elif codec == 'webp':
decode = imagecodecs.webp_decode
encode = imagecodecs.webp_encode
if dtype != 'uint8' or itype.startswith('gray'):
pytest.skip("webp doesn't support these cases")
elif codec == 'png':
decode = imagecodecs.png_decode
encode = imagecodecs.png_encode
elif codec == 'j2k':
if itype == 'view' or deout == 'view':
pytest.skip("j2k doesn't support these cases")
decode = imagecodecs.j2k_decode
encode = imagecodecs.j2k_encode
if level:
level += 95
else:
raise ValueError(codec)
dtype = numpy.dtype(dtype)
itemsize = dtype.itemsize
data = image_data(itype, dtype)
shape = data.shape
if enout == 'new':
encoded = encode(data, level=level)
elif enout == 'out':
encoded = numpy.empty(2 * shape[0] * shape[1] * shape[2] * itemsize,
'uint8')
encode(data, level=level, out=encoded)
elif enout == 'bytearray':
encoded = bytearray(2 * shape[0] * shape[1] * shape[2] * itemsize)
encode(data, level=level, out=encoded)
if deout == 'new':
decoded = decode(encoded)
elif deout == 'out':
decoded = numpy.empty(shape, dtype)
decode(encoded, out=numpy.squeeze(decoded))
elif deout == 'view':
temp = numpy.empty((shape[0] + 5, shape[1] + 5, shape[2]), dtype)
decoded = temp[2:2 + shape[0], 3:3 + shape[1], :]
decode(encoded, out=numpy.squeeze(decoded))
elif deout == 'bytearray':
decoded = bytearray(shape[0] * shape[1] * shape[2] * itemsize)
decoded = decode(encoded, out=decoded)
decoded = numpy.asarray(decoded, dtype=dtype).reshape(shape)
if itype == 'gray':
decoded = decoded.reshape(shape)
if codec == 'webp' and (level != -1 or itype == 'rgba'):
# RGBA roundtip doesn't work for A=0
assert_allclose(data, decoded, atol=255)
elif codec in ('jpeg8', 'jpeg12'):
| assert_allclose(data, decoded, atol=atol) | numpy.testing.assert_allclose |
from abc import ABC
import numpy as np
import cv2
class ReferenceCard():
def __init__(
self,
width: float = 57,
height: float = 87,
box_x_border: float = 2,
box_x_width: float = 9.5,
box_y_border: float = 3,
box_y_height: float = 23,
zoom: int = 4
):
self.width = int(width * zoom)
self.height = int(height * zoom)
self.box_x_border = int(box_x_border * zoom)
self.box_x_width = int(box_x_width * zoom)
self.box_y_border = int(box_y_border * zoom)
self.box_y_height = int(box_y_height * zoom)
def box_tl(self):
return np.array(
[
[self.box_x_border, self.box_y_border],
[self.box_x_width, self.box_y_border],
[self.box_x_width, self.box_y_height],
[self.box_x_border, self.box_y_height]
], dtype=np.float32)
def box_br(self):
return np.array(
[
[self.width - self.box_x_border,
self.height - self.box_y_border],
[self.width - self.box_x_width,
self.height - self.box_y_border],
[self.width - self.box_x_width,
self.height - self.box_y_height],
[self.width - self.box_x_border,
self.height - self.box_y_height]
],
dtype=np.float32)
def boxes(self):
return np.array([self.ref_box_hl(), self.ref_box_lr()])
def hull(self, img: np.array, box: list = None):
"""
Find in the zone 'box' of image 'img' and return, the convex hull
delimiting the value and suit symbols
'box' (shape (4,2)) is an array of 4 points delimiting a
rectangular zone, takes one of the 2 possible values : refboxHL or
refboxLR
"""
if box is None:
box = self.ref_box_tl()
kernel = | np.ones((3, 3), np.uint8) | numpy.ones |
import matplotlib
matplotlib.use("Agg")
import os
import argparse
import tensorflow as tf
import numpy as np
from tfbldr.datasets import fetch_fruitspeech
from tfbldr.datasets.audio import soundsc
from tfbldr.datasets.audio import overlap
from tfbldr.plot import specgram
from tfbldr.plot import specplot
from collections import namedtuple, defaultdict
import sys
from scipy.io import wavfile
from tfbldr.plot import get_viridis
viridis_cm = get_viridis()
import matplotlib.pyplot as plt
parser = argparse.ArgumentParser()
parser.add_argument('direct_model', nargs=1, default=None)
parser.add_argument('--seed', dest='seed', type=int, default=1999)
args = parser.parse_args()
direct_model = args.direct_model[0]
random_state = np.random.RandomState(args.seed)
config = tf.ConfigProto(
device_count={'GPU': 0}
)
fruit = fetch_fruitspeech()
minmin = np.inf
maxmax = -np.inf
for s in fruit["data"]:
si = s - s.mean()
minmin = min(minmin, si.min())
maxmax = max(maxmax, si.max())
train_data = []
valid_data = []
type_counts = defaultdict(lambda: 0)
final_audio = []
for n, s in enumerate(fruit["data"]):
type_counts[fruit["target"][n]] += 1
s = s - s.mean()
n_s = (s - minmin) / float(maxmax - minmin)
n_s = 2 * n_s - 1
#n_s = mu_law_transform(n_s, 256)
if type_counts[fruit["target"][n]] == 15:
valid_data.append(n_s)
else:
train_data.append(n_s)
def _cuts(list_of_audio, cut, step):
# make many overlapping cuts
# 8k, this means offset is ~4ms @ step of 32
real_final = []
real_idx = []
for n, s in enumerate(list_of_audio):
# cut off the end
s = s[:len(s) - len(s) % step]
starts = np.arange(0, len(s) - cut + step, step)
for st in starts:
real_final.append(s[st:st + cut][None, :, None])
real_idx.append(n)
return real_final, real_idx
cut = 256
step = 1
train_audio, train_audio_idx = _cuts(train_data, cut, step)
valid_audio, valid_audio_idx = _cuts(valid_data, cut, step)
embedding_dim = 512
vqvae_batch_size = 50
rnn_batch_size = 50
n_hid = 512
n_clusters = 64
# reserve 0 for "start code"
n_inputs = embedding_dim + 1
hardcoded_z_len = 16
reps = 10
sample_random_state = np.random.RandomState(1165)
with tf.Session(config=config) as sess:
saver = tf.train.import_meta_graph(direct_model + '.meta')
saver.restore(sess, direct_model)
fields = ["vqvae_inputs",
"bn_flag",
"x_tilde",
"z_e_x",
"z_q_x",
"z_i_x",
"z_emb",
"vqvae_rec_loss",
"rnn_inputs",
"rnn_inputs_tm1",
"init_hidden",
"init_cell",
"init_q_hidden",
"init_q_cell",
"hiddens",
"cells",
"q_hiddens",
"q_cells",
"q_nvq_hiddens",
"i_hiddens",
"pred",
"pred_sm",
"rnn_rec_loss"]
vs = namedtuple('Params', fields)(
*[tf.get_collection(name)[0] for name in fields]
)
init_h = np.zeros((rnn_batch_size, n_hid)).astype("float32")
init_c = np.zeros((rnn_batch_size, n_hid)).astype("float32")
init_q_h = np.zeros((rnn_batch_size, n_hid)).astype("float32")
init_q_c = np.zeros((rnn_batch_size, n_hid)).astype("float32")
rnn_inputs = np.zeros((1, rnn_batch_size, 1))
all_out = []
all_h = []
all_c = []
all_q_h = []
all_q_c = []
all_i_h = []
for i in range(reps * hardcoded_z_len):
print("Sampling step {} of {}".format(i + 1, reps * hardcoded_z_len))
feed = {vs.rnn_inputs_tm1: rnn_inputs,
vs.init_hidden: init_h,
vs.init_cell: init_c,
vs.init_q_hidden: init_q_h,
vs.init_q_cell: init_q_c}
outs = [vs.pred_sm, vs.hiddens, vs.cells, vs.q_hiddens, vs.q_cells, vs.i_hiddens]
r = sess.run(outs, feed_dict=feed)
pred_sm = r[0]
pred_samp_i = np.argmax(pred_sm - np.log(-np.log(sample_random_state.uniform(low=1E-5, high=1-1E-5, size=pred_sm.shape))), axis=-1)
pred_i = pred_samp_i.astype("float32")
#pred_i = pred_sm.argmax(axis=-1).astype("float32")
rnn_inputs = pred_i[..., None]
hiddens = r[1]
cells = r[2]
q_hiddens = r[3]
q_cells = r[4]
i_hiddens = r[-1]
all_out.append(rnn_inputs)
all_h.append(hiddens)
all_c.append(cells)
all_q_h.append(q_hiddens)
all_q_c.append(q_cells)
all_i_h.append(i_hiddens[..., None])
init_h = hiddens[-1]
init_c = cells[-1]
init_q_h = q_hiddens[-1]
init_q_c = q_cells[-1]
s_out = np.concatenate(all_out, axis=0)
i_h = np.concatenate(all_i_h, axis=0)
from IPython import embed; embed(); raise ValueError()
res = []
i_res = []
for n_round in range(rounds):
this_res = [x]
this_i_res = [-1 * init_q_h[:, 0][None].astype("float32")]
# can reset these, trained only up to inner_seq_len by seems ok if trained on overlap frames
#init_h = np.zeros((batch_size, n_hid)).astype("float32")
#init_c = np.zeros((batch_size, n_hid)).astype("float32")
#init_q_h = np.zeros((batch_size, n_hid)).astype("float32")
#init_q_c = np.zeros((batch_size, n_hid)).astype("float32")
for i in range(inner_seq_len - 1):
feed = {vs.inputs_tm1: x,
vs.init_hidden: init_h,
vs.init_cell: init_c,
vs.init_q_hidden: init_q_h,
vs.init_q_cell: init_q_c}
outs = [vs.pred_sm, vs.hiddens, vs.cells, vs.q_hiddens, vs.q_cells, vs.i_hiddens]
r = sess.run(outs, feed_dict=feed)
p = r[0]
# sample?
#x = np.array([sample_random_state.choice(list(range(p.shape[-1])), p=p[0, i]) for i in range(batch_size)]).astype("float32")[None, :, None]
x = p.argmax(axis=-1)[:, :, None].astype("float32")
hids = r[1]
cs = r[2]
q_hids = r[3]
q_cs = r[4]
i_hids = r[5]
init_h = hids[0]
init_c = cs[0]
init_q_h = q_hids[0]
init_q_c = q_cs[0]
this_res.append(x)
this_i_res.append(i_hids)
res.append(this_res)
i_res.append(this_i_res)
x = x + 1. + inner_seq_len * batch_size
final_quantized_indices = np.array(res)
final_hidden_indices = np.array(i_res)
# n_rounds, 10, 1, 10, 1 -> n_rounds, 10, 10 in the right order
quantized = final_quantized_indices[:, :, 0, :, 0].astype("int32").transpose(0, 2, 1)
indices = final_hidden_indices[:, :, 0].transpose(0, 2, 1)
quantized = quantized.reshape(-1, quantized.shape[-1])
indices = indices.reshape(-1, indices.shape[-1])
r_lu = {v: k for k, v in lu.items()}
# need to look these back up into something...
q_shp = quantized.shape
codes = [r_lu[q] for q in quantized.ravel().astype("int32")]
codes = | np.array(codes) | numpy.array |
from typing import Dict, List, Union
import numpy as np
from grl.algos.p2sro.payoff_table import PayoffTable
from grl.utils.strategy_spec import StrategySpec
class PolicySpecDistribution(object):
def __init__(self, payoff_table: PayoffTable, player: int,
policy_selection_probs_indexed_by_policy_num: List[float]):
self._probs_to_policy_specs = {
selection_prob: payoff_table.get_spec_for_player_and_pure_strat_index(
player=player, pure_strat_index=policy_num)
for policy_num, selection_prob in enumerate(policy_selection_probs_indexed_by_policy_num)
}
self.player = player
def sample_policy_spec(self) -> StrategySpec:
return np.random.choice(a=list(self._probs_to_policy_specs.values()),
p=list(self._probs_to_policy_specs.keys()))
def probabilities_for_each_strategy(self) -> np.ndarray:
return np.asarray(list(self._probs_to_policy_specs.keys()), dtype=np.float64)
def get_latest_metanash_strategies(payoff_table: PayoffTable,
as_player: int,
as_policy_num: int,
fictitious_play_iters: int,
mix_with_uniform_dist_coeff: float = 0.0,
print_matrix: bool = True) -> Union[None, Dict[int, PolicySpecDistribution]]:
# Currently this function only handles 2-player games
if as_policy_num is None:
as_policy_num = payoff_table.shape()[as_player] - 1
if not 0 <= as_player < payoff_table.n_players():
raise ValueError(f"as_player {as_player} should be in the range [0, {payoff_table.n_players()}).")
if (payoff_table.shape() == (0,) and as_policy_num != 0) or \
(payoff_table.shape() != (0,) and payoff_table.shape()[as_player] < as_policy_num):
raise ValueError(f"In the payoff table, policy_num {as_policy_num} is out of range for player {as_player}. "
f"Payoff table shape is {payoff_table.shape()}.")
if payoff_table.n_players() != 2:
raise NotImplemented("Solving normal form Nash equilibrium strats for >2 player games not implemented.")
if as_policy_num == 0:
return None
other_players = list(range(0, payoff_table.n_players()))
other_players.remove(as_player)
opponent_strategy_distributions = {}
for other_player in other_players:
player_payoff_matrix = payoff_table.get_payoff_matrix_for_player(player=other_player)
assert len(player_payoff_matrix.shape) == 2 # assume a 2D payoff matrix
# only consider policies below 'as_policy_num' in the p2sro hierarchy
player_payoff_matrix = player_payoff_matrix[:as_policy_num, :as_policy_num]
player_payoff_matrix_current_player_is_rows = player_payoff_matrix.transpose((other_player, as_player))
if print_matrix:
print(f"payoff matrix as {other_player} (row) against {as_player} (columns):")
print(player_payoff_matrix_current_player_is_rows)
row_averages, col_averages, exps = fictitious_play(iters=fictitious_play_iters,
payoffs=player_payoff_matrix_current_player_is_rows)
selection_probs = | np.copy(row_averages[-1]) | numpy.copy |
import enum
import numpy as np
import numpy.typing as npt
import pyoorb
import requests as req
from astropy.time import Time
from os import getenv
from typing import (
Iterable,
Optional,
Tuple,
List
)
from .spherical_geom import propagate_linearly
pyoorb_initialized = False
DEGREE = 1.0
ARCMIN = DEGREE / 60
ARCSEC = ARCMIN / 60
def _ensure_pyoorb_initialized(*args, **kwargs):
"""Make sure that pyoorb is initialized."""
global pyoorb_initialized
if not pyoorb_initialized:
pyoorb.pyoorb.oorb_init(*args, **kwargs)
pyoorb_initialized = True
class OrbitElementType(enum.Enum):
CARTESIAN = 1
COMETARY = 2
KEPLERIAN = 3
class EpochTimescale(enum.Enum):
UTC = 1
UT1 = 2
TT = 3
TAI = 4
class PropagationIntegrator(enum.Enum):
N_BODY = 1
TWO_BODY = 2
class Orbit:
def __init__(self, orbit_id: int, state_vector: npt.NDArray[np.float64]):
"""
Create a new Orbit.
state_vector is a pretty opaque blob. It should be the structure that
pyoorb expects - a 12-element vector of doubles.
"""
self.orbit_id = orbit_id
self._state_vector = state_vector
self._orbit_type = OrbitElementType(int(state_vector[0][7]))
self._epoch_timescale = EpochTimescale(int(state_vector[0][9]))
self._epoch = state_vector[0][8]
@classmethod
def cometary(
cls,
orbit_id: int,
perihelion_au: float,
eccentricity: float,
inclination_deg: float,
ascending_node_longitude_deg: float,
periapsis_argument_deg: float,
perihelion_epoch_mjd: float,
osculating_element_epoch_mjd: float,
epoch_timescale: EpochTimescale,
abs_magnitude: float,
photometric_slope_parameter: float,
):
# Orbits class takes in degrees, but state vectors are given in radians
state_vector = np.array(
[
[
orbit_id,
perihelion_au,
eccentricity,
np.deg2rad(inclination_deg),
np.deg2rad(ascending_node_longitude_deg),
np.deg2rad(periapsis_argument_deg),
perihelion_epoch_mjd,
OrbitElementType.COMETARY.value,
osculating_element_epoch_mjd,
epoch_timescale.value,
abs_magnitude,
photometric_slope_parameter,
]
],
dtype=np.double,
order="F",
)
return cls(orbit_id, state_vector)
@classmethod
def keplerian(
cls,
orbit_id: int,
semimajor_axis_au: float,
eccentricity: float,
inclination_deg: float,
ascending_node_longitude_deg: float,
periapsis_argument_deg: float,
mean_anomaly_deg: float,
osculating_element_epoch_mjd: float,
epoch_timescale: EpochTimescale,
abs_magnitude: float,
photometric_slope_parameter: float,
):
# Orbits class takes in degrees, but state vectors are given in radians
state_vector = np.array(
[
[
orbit_id,
semimajor_axis_au,
eccentricity,
np.deg2rad(inclination_deg),
np.deg2rad(ascending_node_longitude_deg),
np.deg2rad(periapsis_argument_deg),
np.deg2rad(mean_anomaly_deg),
OrbitElementType.KEPLERIAN.value,
osculating_element_epoch_mjd,
epoch_timescale.value,
abs_magnitude,
photometric_slope_parameter,
]
],
dtype=np.double,
order="F",
)
return cls(orbit_id, state_vector)
@classmethod
def cartesian(
cls,
orbit_id: int,
x: float,
y: float,
z: float,
vx: float,
vy: float,
vz: float,
osculating_element_epoch_mjd: float,
epoch_timescale: EpochTimescale,
abs_magnitude: float,
photometric_slope_parameter: float,
):
state_vector = np.array(
[
[
orbit_id,
x,
y,
z,
vx,
vy,
vz,
OrbitElementType.CARTESIAN.value,
osculating_element_epoch_mjd,
epoch_timescale.value,
abs_magnitude,
photometric_slope_parameter,
]
],
dtype=np.double,
order="F",
)
return cls(orbit_id, state_vector)
def propagate(
self,
epochs: Iterable[float],
method: PropagationIntegrator = PropagationIntegrator.N_BODY
) -> List["Orbit"]:
_ensure_pyoorb_initialized(error_verbosity = 1)
if method == PropagationIntegrator.N_BODY:
dynmodel = "N"
elif method == PropagationIntegrator.TWO_BODY:
dynmodel = "2"
else:
raise ValueError("unexpected propagation method %r" % method)
orbits = []
for epoch in epochs:
epoch_array = np.array(
[epoch, self._epoch_timescale.value], dtype=np.double, order="F"
)
result, err = pyoorb.pyoorb.oorb_propagation(
in_orbits=self._state_vector,
in_epoch=epoch_array,
in_dynmodel=dynmodel,
)
assert err == 0
# Pyoorb wants radians as inputs for orbits but outputs propagated orbits as degrees
# See here: https://github.com/oorb/oorb/blob/master/python/pyoorb.f90#L347
# Note that time of perihelion passage also is converted to a degree.
if (self._orbit_type == OrbitElementType.KEPLERIAN) or (self._orbit_type == OrbitElementType.COMETARY):
result[:, [3,4,5,6]] = np.radians(result[:, [3,4,5,6]])
orbits.append(Orbit(int(result[0][0]), result))
return orbits
def compute_ephemeris(
self,
obscode: str,
epochs: Iterable[float],
method: PropagationIntegrator = PropagationIntegrator.N_BODY,
) -> List["Ephemeris"]:
"""
Compute ephemeris for the orbit, propagated to an epoch, and observed from
a location represented by obscode.
obscode should be a Minor Planet Center observatory code.
"""
_ensure_pyoorb_initialized(error_verbosity=1)
epochs_array = np.array(
[[epoch, self._epoch_timescale.value] for epoch in epochs],
dtype=np.double,
order="F",
)
# print(len(epochs))
# print(epochs_array.shape)
if method == PropagationIntegrator.N_BODY:
dynmodel = "N"
elif method == PropagationIntegrator.TWO_BODY:
dynmodel = "2"
else:
raise ValueError("unexpected propagation method %r" % method)
eph, err = pyoorb.pyoorb.oorb_ephemeris_basic(
in_orbits=self._state_vector,
in_obscode=obscode,
in_date_ephems=epochs_array,
in_dynmodel=dynmodel,
)
# print(epochs_array.shape)
# print(eph.shape)
assert err == 0
return [Ephemeris(eph[0, i, :]) for i in range(epochs_array.shape[0])]
def precover_remote(
self,
tolerance: float = 30 * ARCSEC,
max_matches: Optional[int] = None,
start_mjd: Optional[float] = None,
end_mjd: Optional[float] = None,
window_size: Optional[float] = None,
):
"""
Find observations which match orbit in the database. Observations are
searched in descending order by mjd.
Expects three environment variables:
PRECOVERY_API_SINGLEORBIT_URL
PRECOVERY_API_SINGLEORBIT_USERNAME
PRECOVERY_API_SINGLEORBIT_PASSWORD
max_matches: End once this many matches have been found. If None, find
all matches.
start_mjd: Only consider observations from after this epoch
(inclusive). If None, find all.
end_mjd: Only consider observations from before this epoch (inclusive).
If None, find all.
window_size: UNIMPLEMENTED
"""
# basically:
"""
find all windows between start and end of given size
for each window:
propagate to window center
for each unique epoch,obscode in window:
propagate to epoch
find frames which match healpix of propagation
for each matching frame
find matching observations
for each matching observation
yield match
"""
# Check for environment set vars
precovery_singleorbit_url = getenv("PRECOVERY_API_SINGLEORBIT_URL")
api_username = getenv("PRECOVERY_API_SINGLEORBIT_USERNAME")
# Note Password suffix results in encryption and storage in systems keys for conda env
# (https://anaconda-project.readthedocs.io/en/latest/user-guide/tasks/work-with-variables.html#adding-an-encrypted-variable)
api_password = getenv("PRECOVERY_API_SINGLEORBIT_PASSWORD")
if not (precovery_singleorbit_url and api_username and api_password):
raise ValueError(
"""one of required environment variables unset, expecting PRECOVERY_API_SINGLEORBIT_URL,
PRECOVERY_API_SINGLEORBIT_USERNAME, PRECOVERY_API_SINGLEORBIT_PASSWORD"""
)
if self._orbit_type == OrbitElementType.KEPLERIAN:
orbit_type = "kep"
state_vector = {
"a": self._state_vector[0][1],
"e": self._state_vector[0][2],
"i": self._state_vector[0][3],
"an": self._state_vector[0][4],
"ap": self._state_vector[0][5],
"ma": self._state_vector[0][6],
}
elif self._orbit_type == OrbitElementType.COMETARY:
orbit_type = "com"
state_vector = {
"q": self._state_vector[0][1],
"e": self._state_vector[0][2],
"i": self._state_vector[0][3],
"an": self._state_vector[0][4],
"ap": self._state_vector[0][5],
"tp": self._state_vector[0][6],
}
elif self._orbit_type == OrbitElementType.CARTESIAN:
orbit_type = "car"
state_vector = {
"x": self._state_vector[0][1],
"y": self._state_vector[0][2],
"z": self._state_vector[0][3],
"vx": self._state_vector[0][4],
"vy": self._state_vector[0][5],
"vz": self._state_vector[0][6],
}
else:
raise ValueError("orbit type improperly defined %r" % self._orbit_type)
# Compile request dictionary
if self._epoch_timescale == EpochTimescale.UTC:
scale = "utc"
elif self._epoch_timescale == EpochTimescale.UT1:
scale = "ut1"
elif self._epoch_timescale == EpochTimescale.TT:
scale = "tt"
elif self._epoch_timescale == EpochTimescale.TAI:
scale = "tai"
mjd_tdb = Time(self._epoch, scale=scale, format="mjd").tdb.mjd
post_json = {
"tolerance": tolerance,
"orbit_type": orbit_type,
"mjd_tdb": mjd_tdb,
}
post_json = post_json | state_vector
if max_matches:
post_json["max_matches"] = max_matches
if start_mjd:
post_json["start_mjd"] = start_mjd
if end_mjd:
post_json["end_mjd"] = end_mjd
if window_size:
post_json["window_size"] = window_size
precovery_req = req.post(
precovery_singleorbit_url, json=post_json, auth=(api_username, api_password)
)
return precovery_req.json()
class Ephemeris:
def __init__(self, raw_data: npt.NDArray[np.float64]):
self._raw_data = raw_data
self.mjd = raw_data[0]
self.ra = raw_data[1]
self.dec = raw_data[2]
# oorb returns vracos(dec), so lets remove the cos(dec) term
self.ra_velocity = raw_data[3] / np.cos(np.radians(self.dec)) # deg per day
self.dec_velocity = raw_data[4] # deg per day
def __str__(self):
return f"<Ephemeris ra={self.ra:.4f} dec={self.dec:.4f} mjd={self.mjd:.6f}>"
def approximately_propagate(
self, obscode: str, orbit: Orbit, timedeltas: Iterable[float]
) -> Tuple[float, float]:
"""
Roughly propagate the ephemeris to several new epochs, each 'timedelta' days away along.
If timedelta is small and self.ra_velocity and self.dec_velocity are
small (indicating relatively slow motion across the sky), this uses a
linear motion approximation.
Otherwise, it uses a 2-body integration of the orbit.
Accuracy will decrease as timedelta increases.
"""
timedeltas = np.array(timedeltas)
do_linear_timedelta = timedeltas[np.where(timedeltas <= 1.0)]
approx_ras = np.zeros(timedeltas.shape[0])
approx_decs = np.zeros(timedeltas.shape[0])
# TODO: set timedeltas <= 1
if self.ra_velocity < 1 and self.dec_velocity < 1:
linear = np.where(np.abs(timedeltas) <= -1.0)
approx_ras_rad, approx_decs_rad = propagate_linearly(
np.deg2rad(self.ra),
np.deg2rad(self.ra_velocity),
np.deg2rad(self.dec),
np.deg2rad(self.dec_velocity),
timedeltas[linear],
)
approx_ras[linear] = np.rad2deg(approx_ras_rad)
approx_decs[linear] = np.rad2deg(approx_decs_rad)
two_body = np.where( | np.abs(timedeltas) | numpy.abs |
import numpy as np;
import os, ssl;
from scipy import interpolate
import astropy;
from astropy.coordinates import EarthLocation, Angle, SkyCoord, ICRS, ITRS, AltAz;
from astropy import units;
from astropy.time import Time;
from .headers import HM, HMQ, HMP, HMW, HC, rep_nan;
from . import log;
# Default value for the IERS server
# astropy.utils.iers.conf.iers_auto_url = 'ftp://ftp.iers.org/products/eop/rapid/standard/finals2000A.data';
# astropy.utils.iers.conf.iers_auto_url = 'http://maia.usno.navy.mil/ser7/finals2000A.all';
# ensure astropy.coordinates can query the online database of locations:
if (not os.environ.get('PYTHONHTTPSVERIFY', '') and getattr(ssl, '_create_unverified_context', None)):
ssl._create_default_https_context = ssl._create_unverified_context
def coef_flat(gain):
gains = np.array([1, 10, 20, 50, 60]);
coefs = np.array([-1.8883000e-06, -1.4006500e-06, -1.3885600e-06, -1.3524500e-06, -1.3416900e-06]);
return interpolate.interp1d(gains, coefs)([gain])[0];
# Definition of setups
global target_names;
target_names = ['OBJECT']
global detwin;
detwin = ['CROPROWS','CROPCOLS'];
global detmode;
detmode = ['NREADS','NLOOPS','NBIN','GAIN','FRMPRST'];
global insmode;
insmode = ['FILTER1','FILTER2','CONF_NA','MIRCX_SPECTRO_XY','MIRCX_SPECTRO_FOC'];
global fringewin;
fringewin = [HMW+'FRINGE STARTX', HMW+'FRINGE NX', HMW+'FRINGE STARTY', HMW+'FRINGE NY'];
global visparam;
visparam = [HMP+'NCOHER'];
global beamorder;
beamorder = ['BEAMORD0','BEAMORD1','BEAMORD2','BEAMORD3','BEAMORD4','BEAMORD5'];
global pop;
pop = [HC+"S1_POP", HC+"S2_POP", HC+"E1_POP", HC+"E2_POP", HC+"W1_POP", HC+"W2_POP"];
# Directory for static calibration
global static;
static = os.path.dirname (os.path.abspath(__file__))+'/static/';
def nspec (hdr):
'''
Return the expected number of spectral
channel depending on the insturmental setup
'''
n = int((hdr['FR_ROW2'] - hdr['FR_ROW1'])/2)*2 - 1;
log.info ('nrow = %i'%n);
return n;
def fringe_widthx (hdr):
'''
Return the expected size of the fringe in
spatial direction depending on setup
'''
return 200;
def photo_widthx (hdr):
'''
Return the expected size of the xchan in
spatial direction depending on setup
'''
return 5;
def lbd0 (hdr):
'''
Return a guess of lbd0,deltaLbd depending on
instrument setup where lbd0 is the central wavelength
of spectrum and dlbd is a the bandpass of one channel.
lbdref is the reference wavelength corresponding to
the sampling returned by beam_freq.
'''
# For MYSTIC, we try to use the
# user specified parameter
if hdr['BEAMCOMB'] == 'MYSTIC' :
lbdref = 1.93e-6;
dlbd = - hdr['BANDWID'] * 1e-6 / (hdr['FR_ROW2'] - hdr['FR_ROW1']);
lbd0 = hdr['WAVELEN'] * 1e-6;
# MIRC configurations
elif hdr['CONF_NA'] == 'H_PRISM':
lbdref,lbd0,dlbd = 1.60736e-06, 1.60736e-06, 21.e-9;
elif (hdr['CONF_NA'] == 'H_GRISM200'):
lbdref,lbd0,dlbd = 1.60736e-06, 1.60736e-06, -8.2e-9;
elif (hdr['CONF_NA'] == 'H_GRISM150'):
lbdref,lbd0,dlbd = 1.60736e-06, 1.60736e-06, -8.2e-9;
elif (hdr['CONF_NA'] == 'H_GRISM'):
lbdref,lbd0,dlbd = 1.60736e-06, 1.60736e-06, -8.2e-9;
elif (hdr['CONF_NA'] == 'H_GRISM200 S1=0,S2=1,E1=2,E2=3,W1=4,W2=5'):
lbdref,lbd0,dlbd = 1.60736e-06, 1.60736e-06, -8.2e-9;
# temporary configurations. Not sure
# the sign is correct
elif hdr['CONF_NA'] == 'H_PRISM20' :
lbdref,lbd0 = 1.60736e-06, 1.60736e-06;
dlbd = lbd0 / 27.4;
elif hdr['CONF_NA'] == 'H_PRISM40' :
lbdref,lbd0 = 1.60736e-06, 1.60736e-06;
dlbd = lbd0 / 49.2;
# MIRCX configurations, J-band on top
# of image except for the GRISM_190.
elif hdr['CONF_NA'] == 'H_PRISM22' :
lbdref,lbd0 = 1.60736e-06, 1.60736e-06;
dlbd = -lbd0 / 22.;
elif hdr['CONF_NA'] == 'H_PRISM50' :
lbdref,lbd0 = 1.60736e-06, 1.60736e-06;
dlbd = -lbd0 / 50.;
elif hdr['CONF_NA'] == 'H_PRISM102' :
lbdref,lbd0 = 1.60736e-06, 1.60736e-06;
dlbd = -lbd0 / 102.;
elif hdr['CONF_NA'] == 'H_GRISM190' :
lbdref,lbd0 = 1.60736e-06, 1.60736e-06;
dlbd = lbd0 / 190.0;
# Unknown configuration
else:
log.error ('Unknown CONF_NA');
raise ValueError('CONF_NA unsuported (yet?)');
# Verbose
log.info ('Configuration '+hdr['CONF_NA']+'lbd = %fum dlbd = %fum'%(lbd0*1e6,dlbd*1e6));
return lbdref,lbd0,dlbd;
def xchan_ratio(hdr):
'''
Return a crude estimate
'''
if ('P_ION' in hdr) == True :
return 0.3;
else:
return 0.1;
def fiber_pos(hdr):
'''
Return the fiber position in the v-groove
in unit of micro-lenses
'''
# Fiber position in new MIRC-X
if ('P_ION' in hdr) == True :
pos = np.array([4,6,13,18,24,28]);
# Fiber position in old MIRC
else :
pos = np.array([9,3,1,21,14,18]);
return pos
def beam_freq (hdr):
'''
Return the fiber position in the v-groove
in fringe/pixel at lbdref. The scale factor
is given by the geometry of the combiner
'''
# Scaling in pix/fringe at highest spatial frequency
# and for wavelength defined as lbd0
# MYSTIC sampling and fiber position
if hdr['BEAMCOMB'] == 'MYSTIC' :
scale = 2.75;
tmp = | np.array([4,6,13,18,24,28]) | numpy.array |
# -*- coding: utf-8 -*-
import os
os.environ["MKL_NUM_THREADS"] = "1"
os.environ["NUMEXPR_NUM_THREADS"] = "1"
os.environ["OMP_NUM_THREADS"] = "1"
from os import path, mkdir
import numpy as np
np.random.seed(1)
import random
random.seed(1)
import timeit
import cv2
from tqdm import tqdm
from skimage import measure
from multiprocessing import Pool
import lightgbm as lgb
from sklearn.model_selection import KFold
from sklearn.neighbors import KDTree
from skimage.morphology import watershed
from skimage.morphology import square, dilation
import pandas as pd
import math
cv2.setNumThreads(0)
cv2.ocl.setUseOpenCL(False)
masks_folder = '/wdata/labels'
train_pred_folder = '/wdata/merged_oof'
lgbm_models_folder = '/wdata/lgbm_models'
DATA_THREADS = 32 #16cores * 2 threads
LGBM_THREADS = 16
num_split_iters = 5
folds_count = 5
df = pd.read_csv('train_folds.csv')
pixels_threshold = 115
sep_count = 3
sep_thresholds = [0.65, 0.75, 0.85]
def get_inputs(filename, pred_folder, add_features=[], return_labels=False, truth_folder=None):
inputs = []
pred = cv2.imread(path.join(pred_folder, filename), cv2.IMREAD_UNCHANGED)
pred_msk = pred / 255.
pred_msk = pred_msk[..., 0] * (1 - pred_msk[..., 1]) * (1 - 0.1 * pred_msk[..., 2])
pred_msk = 1 * (pred_msk > 0.55)
pred_msk = pred_msk.astype(np.uint8)
y_pred = measure.label(pred_msk, neighbors=8, background=0)
props = measure.regionprops(y_pred)
for i in range(len(props)):
if props[i].area < 10:
y_pred[y_pred == i+1] = 0
y_pred = measure.label(y_pred, neighbors=8, background=0)
nucl_msk = (255 - pred[..., 0])
nucl_msk = nucl_msk.astype('uint8')
y_pred = watershed(nucl_msk, y_pred, mask=((pred[..., 0] > pixels_threshold)), watershed_line=True)
if y_pred.max() > 0:
mean_pred = pred[..., 0][y_pred > 0].mean()
else:
mean_pred = 0
ext_pred = pred
props = measure.regionprops(y_pred)
for i in range(len(props)):
if props[i].area < 10:
y_pred[y_pred == i+1] = 0
pred_labels = measure.label(y_pred, neighbors=8, background=0).astype('uint16')
pred_props = measure.regionprops(pred_labels)
init_count = len(pred_props)
coords = [pr.centroid for pr in pred_props]
if len(coords) > 0:
t = KDTree(coords)
neighbors100 = t.query_radius(coords, r=50)
neighbors200 = t.query_radius(coords, r=100)
neighbors300 = t.query_radius(coords, r=150)
neighbors400 = t.query_radius(coords, r=200)
med_area = np.median(np.asarray([pr.area for pr in pred_props]))
lvl2_labels = [np.zeros_like(pred_labels, dtype='uint16') for i in range(sep_count)]
separated_regions = [[] for i in range(sep_count)]
main_regions = [[] for i in range(sep_count)]
for i in range(len(pred_props)):
is_on_border = 1 * ((pred_props[i].bbox[0] <= 1) | (pred_props[i].bbox[1] <= 1) | (pred_props[i].bbox[2] >= pred.shape[0] - 1) | (pred_props[i].bbox[3] >= pred.shape[1] - 1))
msk_reg = pred_labels[pred_props[i].bbox[0]:pred_props[i].bbox[2], pred_props[i].bbox[1]:pred_props[i].bbox[3]] == i+1
pred_reg = pred[pred_props[i].bbox[0]:pred_props[i].bbox[2], pred_props[i].bbox[1]:pred_props[i].bbox[3]]
ext_pred_reg = ext_pred[pred_props[i].bbox[0]:pred_props[i].bbox[2], pred_props[i].bbox[1]:pred_props[i].bbox[3]]
ext_pred_reg = ext_pred_reg * 0.5 + pred_reg * 0.5
ext_pred_reg = ext_pred_reg.astype('uint8')
contours = cv2.findContours((msk_reg * 255).astype(dtype=np.uint8), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
if len(contours[1]) > 0:
cnt = contours[1][0]
min_area_rect = cv2.minAreaRect(cnt)
inp = []
inp.extend(add_features)
inp.append(pred_props[i].area)
inp.append(0)
if len(contours[1]) > 0:
inp.append(cv2.isContourConvex(cnt) * 1.0)
inp.append(min(min_area_rect[1]))
inp.append(max(min_area_rect[1]))
if max(min_area_rect[1]) > 0:
inp.append(min(min_area_rect[1]) / max(min_area_rect[1]))
else:
inp.append(0)
inp.append(min_area_rect[2])
else:
inp.append(0)
inp.append(0)
inp.append(0)
inp.append(0)
inp.append(0)
inp.append(pred_props[i].convex_area)
inp.append(pred_props[i].solidity)
inp.append(pred_props[i].eccentricity)
inp.append(pred_props[i].extent)
inp.append(pred_props[i].perimeter)
inp.append(pred_props[i].major_axis_length)
inp.append(pred_props[i].minor_axis_length)
if (pred_props[i].minor_axis_length > 0):
inp.append(pred_props[i].minor_axis_length / pred_props[i].major_axis_length)
else:
inp.append(0)
pred_values = ext_pred_reg[..., 0][msk_reg]
inp.append(pred_values.mean())
inp.append(pred_values.std())
inp.append(pred_props[i].euler_number)
inp.append(pred_props[i].equivalent_diameter)
inp.append(pred_props[i].perimeter ** 2 / (4 * pred_props[i].area * math.pi))
inp.append(mean_pred)
inp.append(is_on_border)
inp.append(init_count)
inp.append(med_area)
inp.append(pred_props[i].area / med_area)
inp.append(neighbors100[i].shape[0])
median_area = med_area
if neighbors100[i].shape[0] > 0:
neighbors_areas = np.asarray([pred_props[j].area for j in neighbors100[i]])
median_area = np.median(neighbors_areas)
inp.append(median_area)
inp.append(pred_props[i].area / median_area)
inp.append(neighbors200[i].shape[0])
median_area = med_area
if neighbors200[i].shape[0] > 0:
neighbors_areas = np.asarray([pred_props[j].area for j in neighbors200[i]])
median_area = np.median(neighbors_areas)
inp.append(median_area)
inp.append(pred_props[i].area / median_area)
inp.append(neighbors300[i].shape[0])
median_area = med_area
if neighbors300[i].shape[0] > 0:
neighbors_areas = np.asarray([pred_props[j].area for j in neighbors300[i]])
median_area = np.median(neighbors_areas)
inp.append(median_area)
inp.append(pred_props[i].area / median_area)
inp.append(neighbors400[i].shape[0])
median_area = med_area
if neighbors400[i].shape[0] > 0:
neighbors_areas = np.asarray([pred_props[j].area for j in neighbors400[i]])
median_area = np.median(neighbors_areas)
inp.append(median_area)
inp.append(pred_props[i].area / median_area)
bst_j = 0
pred_reg[~msk_reg] = 0
pred_reg0 = pred_reg / 255.
pred_reg0 = pred_reg0[..., 0] * (1 - pred_reg0[..., 1])
max_regs = 1
for j in range(1, sep_count+1):
sep_regs = []
if bst_j > 0:
separated_regions[j-1].append(sep_regs)
continue
if j > sep_count-1:
pred_reg[~msk_reg] = 0
pred_reg0 = pred_reg / 255.
pred_reg0 = pred_reg0[..., 0] * (1 - pred_reg0[..., 1]) * (1 - 0.2 * pred_reg0[..., 2])
pred_reg2 = 255 * (pred_reg0 > sep_thresholds[j-1])
pred_reg2 = pred_reg2.astype(np.uint8)
lbls = measure.label(pred_reg2, neighbors=4, background=False)
num_regs = lbls.max()
if (j > sep_count-1) and (num_regs < 2):
kernel = np.ones((3, 3), np.uint8)
its = 2
pred_reg2 = cv2.erode(pred_reg2, kernel, iterations = its)
lbls = measure.label(pred_reg2, neighbors=4, background=False)
num_regs = lbls.max()
if num_regs > 1:
bst_j = j
max_regs = num_regs
if num_regs > 1 or (j < sep_count and num_regs > 0):
lbls = lbls.astype(np.int32)
labels_ws = watershed((255 - ext_pred_reg[..., 0]), lbls, mask=msk_reg)
start_num = len(main_regions[j-1])
labels_ws += start_num
labels_ws[labels_ws == start_num] = 0
for k in range(num_regs):
sep_regs.append(k+start_num)
main_regions[j-1].append(i)
labels_ws = labels_ws.astype('uint16')
lvl2_labels[j-1][pred_props[i].bbox[0]:pred_props[i].bbox[2], pred_props[i].bbox[1]:pred_props[i].bbox[3]] += labels_ws
separated_regions[j-1].append(sep_regs)
inp.append(bst_j)
inp.append(max_regs)
inp.append(1)
inp.append(0)
inputs.append(np.asarray(inp))
inputs = np.asarray(inputs)
all_sep_props = []
all_sep_inputs = []
for j in range(sep_count):
inputs_lvl2 = []
pred_props2 = measure.regionprops(lvl2_labels[j])
for i in range(len(pred_props2)):
is_on_border = 1 * ((pred_props2[i].bbox[0] <= 1) | (pred_props2[i].bbox[1] <= 1) | (pred_props2[i].bbox[2] >= pred.shape[0] - 1) | (pred_props2[i].bbox[3] >= pred.shape[1] - 1))
msk_reg = lvl2_labels[j][pred_props2[i].bbox[0]:pred_props2[i].bbox[2], pred_props2[i].bbox[1]:pred_props2[i].bbox[3]] == i+1
pred_reg = pred[pred_props2[i].bbox[0]:pred_props2[i].bbox[2], pred_props2[i].bbox[1]:pred_props2[i].bbox[3]]
ext_pred_reg = ext_pred[pred_props2[i].bbox[0]:pred_props2[i].bbox[2], pred_props2[i].bbox[1]:pred_props2[i].bbox[3]]
ext_pred_reg = ext_pred_reg * 0.5 + pred_reg * 0.5
ext_pred_reg = ext_pred_reg.astype('uint8')
contours = cv2.findContours((msk_reg * 255).astype(dtype=np.uint8), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
if len(contours[1]) > 0:
cnt = contours[1][0]
min_area_rect = cv2.minAreaRect(cnt)
inp = []
inp.extend(add_features)
inp.append(pred_props2[i].area)
main_area = inputs[main_regions[j][i]][0]
inp.append(pred_props2[i].area / main_area)
if len(contours[1]) > 0:
inp.append(cv2.isContourConvex(cnt) * 1.0)
inp.append(min(min_area_rect[1]))
inp.append(max(min_area_rect[1]))
if max(min_area_rect[1]) > 0:
inp.append(min(min_area_rect[1]) / max(min_area_rect[1]))
else:
inp.append(0)
inp.append(min_area_rect[2])
else:
inp.append(0)
inp.append(0)
inp.append(0)
inp.append(0)
inp.append(0)
inp.append(pred_props2[i].convex_area)
inp.append(pred_props2[i].solidity)
inp.append(pred_props2[i].eccentricity)
inp.append(pred_props2[i].extent)
inp.append(pred_props2[i].perimeter)
inp.append(pred_props2[i].major_axis_length)
inp.append(pred_props2[i].minor_axis_length)
if(pred_props2[i].minor_axis_length > 0):
inp.append(pred_props2[i].minor_axis_length / pred_props2[i].major_axis_length)
else:
inp.append(0)
pred_values = ext_pred_reg[..., 0][msk_reg]
inp.append(pred_values.mean())
inp.append(pred_values.std())
inp.append(pred_props2[i].euler_number)
inp.append(pred_props2[i].equivalent_diameter)
inp.append(pred_props2[i].perimeter ** 2 / (4 * pred_props2[i].area * math.pi))
inp.append(mean_pred)
inp.append(is_on_border)
inp.append(init_count)
inp.append(med_area)
inp.append(pred_props2[i].area / med_area)
inp.append(inputs[main_regions[j][i]][-16])
median_area = inputs[main_regions[j][i]][-15]
inp.append(median_area)
inp.append(pred_props2[i].area / median_area)
inp.append(inputs[main_regions[j][i]][-13])
median_area = inputs[main_regions[j][i]][-12]
inp.append(median_area)
inp.append(pred_props2[i].area / median_area)
inp.append(inputs[main_regions[j][i]][-10])
median_area = inputs[main_regions[j][i]][-9]
inp.append(median_area)
inp.append(pred_props2[i].area / median_area)
inp.append(inputs[main_regions[j][i]][-7])
median_area = inputs[main_regions[j][i]][-6]
inp.append(median_area)
inp.append(pred_props2[i].area / median_area)
bst_j = inputs[main_regions[j][i]][-4]
max_regs = inputs[main_regions[j][i]][-3]
inp.append(bst_j)
inp.append(max_regs)
inp.append(len(separated_regions[j][main_regions[j][i]]))
inp.append(j+1)
inputs_lvl2.append(np.asarray(inp))
all_sep_props.append(pred_props2)
inputs_lvl2 = np.asarray(inputs_lvl2)
all_sep_inputs.append(inputs_lvl2)
if truth_folder is None:
return inputs, (pred_labels if return_labels else None), all_sep_inputs, (lvl2_labels if return_labels else None), separated_regions
else:
outputs = []
truth_labels = cv2.imread(path.join(truth_folder, filename.replace('.png', '.tif')), cv2.IMREAD_UNCHANGED)
truth_labels = measure.label(truth_labels, neighbors=8, background=0)
truth_props = measure.regionprops(truth_labels)
m = np.zeros((len(pred_props), len(truth_props)))
for x in range(pred_labels.shape[1]):
for y in range(pred_labels.shape[0]):
if pred_labels[y, x] > 0 and truth_labels[y, x] > 0:
m[pred_labels[y, x]-1, truth_labels[y, x]-1] += 1
truth_used = set([])
for i in range(len(pred_props)):
max_iou = 0
for j in range(len(truth_props)):
if m[i, j] > 0:
iou = m[i, j] / (pred_props[i].area + truth_props[j].area - m[i, j])
if iou > max_iou:
max_iou = iou
if iou > 0.5:
truth_used.add(j)
if max_iou <= 0.5:
max_iou = 0
outputs.append(max_iou)
outputs = np.asarray(outputs)
fn = len(truth_props) - len(truth_used)
all_sep_outputs = []
for k in range(sep_count):
pred_props2 = all_sep_props[k]
outputs_lvl2 = []
m2 = np.zeros((len(pred_props2), len(truth_props)))
for x in range(lvl2_labels[k].shape[1]):
for y in range(lvl2_labels[k].shape[0]):
if lvl2_labels[k][y, x] > 0 and truth_labels[y, x] > 0:
m2[lvl2_labels[k][y, x]-1, truth_labels[y, x]-1] += 1
truth_used2 = set([])
for i in range(len(pred_props2)):
max_iou = 0
for j in range(len(truth_props)):
if m2[i, j] > 0:
iou = m2[i, j] / (pred_props2[i].area + truth_props[j].area - m2[i, j])
if iou > max_iou:
max_iou = iou
if iou > 0.5:
# tp = 1
truth_used2.add(j)
if max_iou <= 0.5:
max_iou = 0
outputs_lvl2.append(max_iou)
outputs_lvl2 = np.asarray(outputs_lvl2)
all_sep_outputs.append(outputs_lvl2)
return inputs, (pred_labels if return_labels else None), all_sep_inputs, (lvl2_labels if return_labels else None), separated_regions, outputs, all_sep_outputs, fn
if __name__ == '__main__':
t0 = timeit.default_timer()
all_ids = df[df['fold'] < 8]['id'].values # < 8
tile_ids = df[df['fold'] < 8]['tile_id'].values
tiles_uniq = np.unique(tile_ids)
if not path.isdir(lgbm_models_folder):
mkdir(lgbm_models_folder)
all_files = []
inputs = []
outputs = []
inputs2 = []
outputs2 = []
separated_regions = []
fns = []
paramss = []
all_nadir_idxs = []
for _i in tqdm(range(len(all_ids))):
f = all_ids[_i] + '.png'
if path.isfile(path.join(train_pred_folder, f)):
tmp = f.split('_')
nadir = int(tmp[1].split('nadir')[1])
nad_idx = 0
if nadir > 40:
nad_idx = 2
elif nadir > 25:
nad_idx = 1
all_nadir_idxs.append(nad_idx)
all_files.append(path.join(train_pred_folder, f))
paramss.append((f, train_pred_folder, [nadir], False, masks_folder))
with Pool(processes=DATA_THREADS) as pool:
results = pool.starmap(get_inputs, paramss, chunksize=len(paramss)//DATA_THREADS)
for i in range(len(results)):
inp, _, inp2, _, sep_regs, otp, otp2, fn = results[i]
inputs.append(inp)
outputs.append(otp)
inputs2.append(inp2)
outputs2.append(otp2)
separated_regions.append(sep_regs)
fns.append(fn)
gbm_models = []
for it in range(num_split_iters):
kf = KFold(n_splits=folds_count, random_state=it+1, shuffle=True)
it2 = -1
for train_idxs0, test_idxs0 in kf.split(tiles_uniq):
train_tiles = tiles_uniq[train_idxs0]
test_tiles = tiles_uniq[test_idxs0]
train_idxs = np.where(np.isin(tile_ids, train_tiles))[0]
test_idxs = np.where(np.isin(tile_ids, test_tiles))[0]
it2 += 1
random.seed(it*1000+it2)
| np.random.seed(it*1000+it2) | numpy.random.seed |
from __future__ import print_function
import numpy as np
import unittest
import discretize
TOL = 1e-8
class TestSimpleQuadTree(unittest.TestCase):
def test_counts(self):
nc = 8
h1 = np.random.rand(nc)*nc*0.5 + nc*0.5
h2 = np.random.rand(nc)*nc*0.5 + nc*0.5
h = [hi/np.sum(hi) for hi in [h1, h2]] # normalize
M = discretize.TreeMesh(h)
points = np.array([[0.1, 0.1, 0.3]])
level = np.array([3])
M.insert_cells(points, level)
M.number()
self.assertEqual(M.nhFx, 4)
self.assertEqual(M.nFx, 12)
self.assertTrue(np.allclose(M.vol.sum(), 1.0))
#self.assertTrue(np.allclose(np.r_[M._areaFxFull, M._areaFyFull], M._deflationMatrix('F') * M.area)
def test_getitem(self):
M = discretize.TreeMesh([4, 4])
M.refine(1)
self.assertEqual(M.nC, 4)
self.assertEqual(len(M), M.nC)
self.assertTrue(np.allclose(M[0].center, [0.25, 0.25]))
# actual = [[0, 0], [0.5, 0], [0, 0.5], [0.5, 0.5]]
# for i, n in enumerate(M[0].nodes):
# self.assertTrue(np.allclose(, actual[i])
def test_getitem3D(self):
M = discretize.TreeMesh([4, 4, 4])
M.refine(1)
self.assertEqual(M.nC, 8)
self.assertEqual(len(M), M.nC)
self.assertTrue(np.allclose(M[0].center, [0.25, 0.25, 0.25]))
# actual = [[0, 0, 0], [0.5, 0, 0], [0, 0.5, 0], [0.5, 0.5, 0],
# [0, 0, 0.5], [0.5, 0, 0.5], [0, 0.5, 0.5], [0.5, 0.5, 0.5]]
# for i, n in enumerate(M[0].nodes):
# self.assertTrue(np.allclose(M._gridN[n, :], actual[i])
def test_refine(self):
M = discretize.TreeMesh([4, 4, 4])
M.refine(1)
self.assertEqual(M.nC, 8)
def test_h_gridded_2D(self):
hx, hy = np.ones(4), np.r_[1., 2., 3., 4.]
M = discretize.TreeMesh([hx, hy])
def refinefcn(cell):
xyz = cell.center
d = (xyz**2).sum()**0.5
if d < 3:
return 2
return 1
M.refine(refinefcn)
H = M.h_gridded
test_hx = np.all(H[:, 0] == np.r_[1., 1., 1., 1., 2., 2., 2.])
test_hy = np.all(H[:, 1] == np.r_[1., 1., 2., 2., 3., 7., 7.])
self.assertTrue(test_hx and test_hy)
# def test_h_gridded_updates(self):
# mesh = discretize.TreeMesh([8, 8])
# mesh.refine(1)
#
# H = mesh.h_gridded
# self.assertTrue(np.all(H[:, 0] == 0.5*np.ones(4)))
# self.assertTrue(np.all(H[:, 1] == 0.5*np.ones(4)))
#
# # refine the mesh and make sure h_gridded is updated
# mesh.refine(2)
# H = mesh.h_gridded
# self.assertTrue(np.all(H[:, 0] == 0.25*np.ones(16)))
# self.assertTrue(np.all(H[:, 1] == 0.25*np.ones(16)))
def test_faceDiv(self):
hx, hy = np.r_[1., 2, 3, 4], np.r_[5., 6, 7, 8]
T = discretize.TreeMesh([hx, hy], levels=2)
T.refine(lambda xc: 2)
# T.plotGrid(show_it=True)
M = discretize.TensorMesh([hx, hy])
self.assertEqual(M.nC, T.nC)
self.assertEqual(M.nF, T.nF)
self.assertEqual(M.nFx, T.nFx)
self.assertEqual(M.nFy, T.nFy)
self.assertEqual(M.nE, T.nE)
self.assertEqual(M.nEx, T.nEx)
self.assertEqual(M.nEy, T.nEy)
self.assertTrue(np.allclose(M.area, T.permuteF*T.area))
self.assertTrue(np.allclose(M.edge, T.permuteE*T.edge))
self.assertTrue(np.allclose(M.vol, T.permuteCC*T.vol))
# plt.subplot(211).spy(M.faceDiv)
# plt.subplot(212).spy(T.permuteCC*T.faceDiv*T.permuteF.T)
# plt.show()
self.assertEqual((M.faceDiv - T.permuteCC*T.faceDiv*T.permuteF.T).nnz, 0)
def test_serialization(self):
hx, hy = np.r_[1., 2, 3, 4], np.r_[5., 6, 7, 8]
mesh1 = discretize.TreeMesh([hx, hy], levels=2, x0=np.r_[-1, -1])
mesh1.refine(2)
mesh2 = discretize.TreeMesh.deserialize(mesh1.serialize())
self.assertTrue(np.all(mesh1.x0 == mesh2.x0))
self.assertTrue(np.all(mesh1._n == mesh2._n))
self.assertTrue(np.all(mesh1.gridCC == mesh2.gridCC))
mesh1.x0 = np.r_[-2., 2]
mesh2 = discretize.TreeMesh.deserialize(mesh1.serialize())
self.assertTrue(np.all(mesh1.x0 == mesh2.x0))
class TestOcTree(unittest.TestCase):
def test_counts(self):
nc = 8
h1 = np.random.rand(nc)*nc*0.5 + nc*0.5
h2 = np.random.rand(nc)*nc*0.5 + nc*0.5
h3 = np.random.rand(nc)*nc*0.5 + nc*0.5
h = [hi/np.sum(hi) for hi in [h1, h2, h3]] # normalize
M = discretize.TreeMesh(h, levels=3)
points = np.array([[0.2, 0.1, 0.7],
[0.8, 0.4, 0.2]])
levels = np.array([1, 2])
M.insert_cells(points, levels)
M.number()
# M.plotGrid(show_it=True)
self.assertEqual(M.nhFx, 4)
self.assertTrue(M.nFx, 19)
self.assertTrue(M.nC, 15)
self.assertTrue(np.allclose(M.vol.sum(), 1.0))
# self.assertTrue(np.allclose(M._areaFxFull, (M._deflationMatrix('F') * M.area)[:M.ntFx]))
# self.assertTrue(np.allclose(M._areaFyFull, (M._deflationMatrix('F') * M.area)[M.ntFx:(M.ntFx+M.ntFy)])
# self.assertTrue(np.allclose(M._areaFzFull, (M._deflationMatrix('F') * M.area)[(M.ntFx+M.ntFy):])
# self.assertTrue(np.allclose(M._edgeExFull, (M._deflationMatrix('E') * M.edge)[:M.ntEx])
# self.assertTrue(np.allclose(M._edgeEyFull, (M._deflationMatrix('E') * M.edge)[M.ntEx:(M.ntEx+M.ntEy)])
# self.assertTrue(np.allclose(M._edgeEzFull, (M._deflationMatrix('E') * M.edge)[(M.ntEx+M.ntEy):]))
def test_faceDiv(self):
hx, hy, hz = np.r_[1., 2, 3, 4], np.r_[5., 6, 7, 8], np.r_[9., 10, 11, 12]
M = discretize.TreeMesh([hx, hy, hz], levels=2)
M.refine(lambda xc: 2)
# M.plotGrid(show_it=True)
Mr = discretize.TensorMesh([hx, hy, hz])
self.assertEqual(M.nC, Mr.nC)
self.assertEqual(M.nF, Mr.nF)
self.assertEqual(M.nFx, Mr.nFx)
self.assertEqual(M.nFy, Mr.nFy)
self.assertEqual(M.nE, Mr.nE)
self.assertEqual(M.nEx, Mr.nEx)
self.assertEqual(M.nEy , Mr.nEy)
self.assertTrue(np.allclose(Mr.area, M.permuteF*M.area))
self.assertTrue(np.allclose(Mr.edge, M.permuteE*M.edge))
self.assertTrue(np.allclose(Mr.vol, M.permuteCC*M.vol))
A = Mr.faceDiv - M.permuteCC*M.faceDiv*M.permuteF.T
self.assertTrue(np.allclose(A.data, 0))
def test_edgeCurl(self):
hx, hy, hz = np.r_[1., 2, 3, 4], np.r_[5., 6, 7, 8], np.r_[9., 10, 11, 12]
M = discretize.TreeMesh([hx, hy, hz], levels=2)
M.refine(lambda xc:2)
Mr = discretize.TensorMesh([hx, hy, hz])
A = Mr.edgeCurl - M.permuteF*M.edgeCurl*M.permuteE.T
self.assertTrue(len(A.data)==0 or np.allclose(A.data, 0))
def test_faceInnerProduct(self):
hx, hy, hz = np.r_[1., 2, 3, 4], np.r_[5., 6, 7, 8], np.r_[9., 10, 11, 12]
# hx, hy, hz = [[(1, 4)], [(1, 4)], [(1, 4)]]
M = discretize.TreeMesh([hx, hy, hz], levels=2)
M.refine(lambda xc:2)
# M.plotGrid(show_it=True)
Mr = discretize.TensorMesh([hx, hy, hz])
# print(M.nC, M.nF, M.getFaceInnerProduct().shape, M.permuteF.shape)
A_face = Mr.getFaceInnerProduct() - M.permuteF * M.getFaceInnerProduct() * M.permuteF.T
A_edge = Mr.getEdgeInnerProduct() - M.permuteE * M.getEdgeInnerProduct() * M.permuteE.T
self.assertTrue(len(A_face.data)==0 or np.allclose(A_face.data, 0))
self.assertTrue(len(A_edge.data)==0 or np.allclose(A_edge.data, 0))
def test_VectorIdenties(self):
hx, hy, hz = [[(1, 4)], [(1, 4)], [(1, 4)]]
M = discretize.TreeMesh([hx, hy, hz], levels=2)
Mr = discretize.TensorMesh([hx, hy, hz])
M.refine(2) #Why wasn't this here before?
self.assertTrue(np.allclose((M.faceDiv * M.edgeCurl).data, 0))
hx, hy, hz = np.r_[1., 2, 3, 4], np.r_[5., 6, 7, 8], np.r_[9., 10, 11, 12]
M = discretize.TreeMesh([hx, hy, hz], levels=2)
Mr = discretize.TensorMesh([hx, hy, hz])
M.refine(2)
A1 = M.faceDiv * M.edgeCurl
A2 = Mr.faceDiv * Mr.edgeCurl
self.assertTrue(len(A1.data)==0 or np.allclose(A1.data, 0))
self.assertTrue(len(A2.data)==0 or np.allclose(A2.data, 0))
def test_h_gridded_3D(self):
hx, hy, hz = np.ones(4), np.r_[1., 2., 3., 4.], 2*np.ones(4)
M = discretize.TreeMesh([hx, hy, hz])
def refinefcn(cell):
xyz = cell.center
d = (xyz**2).sum()**0.5
if d < 3:
return 2
return 1
M.refine(refinefcn)
H = M.h_gridded
test_hx = np.all(H[:, 0] == np.r_[1., 1., 1., 1., 1., 1., 1., 1., 2., 2., 2., 2., 2., 2., 2.])
test_hy = np.all(H[:, 1] == np.r_[1., 1., 2., 2., 1., 1., 2., 2., 3., 7., 7., 3., 3., 7., 7.])
test_hz = np.all(H[:, 2] == np.r_[2., 2., 2., 2., 2., 2., 2., 2., 4., 4., 4., 4., 4., 4., 4.])
self.assertTrue(test_hx and test_hy and test_hz)
class Test2DInterpolation(unittest.TestCase):
def setUp(self):
def topo(x):
return np.sin(x*(2.*np.pi))*0.3 + 0.5
def function(cell):
r = cell.center - np.array([0.5]*len(cell.center))
dist1 = np.sqrt(r.dot(r)) - 0.08
dist2 = np.abs(cell.center[-1] - topo(cell.center[0]))
dist = min([dist1, dist2])
# if dist < 0.05:
# return 5
if dist < 0.05:
return 6
if dist < 0.2:
return 5
if dist < 0.3:
return 4
if dist < 1.0:
return 3
else:
return 0
M = discretize.TreeMesh([64, 64], levels=6)
M.refine(function)
self.M = M
def test_fx(self):
r = np.random.rand(self.M.nFx)
P = self.M.getInterpolationMat(self.M.gridFx, 'Fx')
self.assertLess(np.abs(P[:, :self.M.nFx]*r - r).max(), TOL)
def test_fy(self):
r = np.random.rand(self.M.nFy)
P = self.M.getInterpolationMat(self.M.gridFy, 'Fy')
self.assertLess( | np.abs(P[:, self.M.nFx:]*r - r) | numpy.abs |
# flake8: noqa
from contextlib import ExitStack
import click
from omegaconf import OmegaConf
from pfb.workers.main import cli
import pyscilog
pyscilog.init('pfb')
log = pyscilog.get_logger('SPIFIT')
@cli.command()
@click.option('-image', '--image', required=True,
help="Path to model or restored image cube.")
@click.option('-resid', "--residual", required=False,
help="Path to residual image cube.")
@click.option('-o', '--output-filename', required=True,
help="Path to output directory + prefix.")
@click.option('-pp', '--psf-pars', nargs=3, type=float,
help="Beam parameters matching FWHM of restoring beam "
"specified as emaj emin pa."
"By default these are taken from the fits header "
"of the residual image.")
@click.option('--circ-psf/--no-circ-psf', default=False)
@click.option('-th', '--threshold', default=10, type=float, show_default=True,
help="Multiple of the rms in the residual to threshold on."
"Only components above threshold*rms will be fit.")
@click.option('-maxdr', '--maxdr', default=100, type=float, show_default=True,
help="Maximum dynamic range used to determine the "
"threshold above which components need to be fit. "
"Only used if residual is not passed in.")
@click.option('-bw', '--band-weights', type=float,
help="Per bands weights to use during the fit")
@click.option('-pb-min', '--pb-min', type=float, default=0.15,
help="Set image to zero where pb falls below this value")
@click.option('-products', '--products', default='aeikIcmrb', type=str,
help="Outputs to write. Letter correspond to: \n"
"a - alpha map \n"
"e - alpha error map \n"
"i - I0 map \n"
"k - I0 error map \n"
"I - reconstructed cube form alpha and I0 \n"
"c - restoring beam used for convolution \n"
"m - convolved model \n"
"r - convolved residual \n"
"b - average power beam \n"
"Default is to write all of them")
@click.option('-pf', "--padding-frac", default=0.5, type=float,
show_default=True, help="Padding factor for FFT's.")
@click.option('-dc', "--dont-convolve", is_flag=True,
help="Do not convolve by the clean beam before fitting")
@click.option('-rf', '--ref-freq', type=float,
help='Reference frequency where the I0 map is sought. '
"Will overwrite in fits headers of output.")
@click.option('-otype', '--out-dtype', default='f4', type=str,
help="Data type of output. Default is single precision")
@click.option('-acr', '--add-convolved-residuals', is_flag=True,
help='Flag to add in the convolved residuals before '
'fitting components')
@click.option('-bm', '--beam-model', default=None,
help="Fits power beam model. It is assumed that the beam "
"match the fits headers of --image. You can use the binterp "
"worker to create compatible beam models")
@click.option('-ha', '--host-address',
help='Address where the distributed client lives. '
'Will use a local cluster if no address is provided')
@click.option('-nw', '--nworkers', type=int, default=1,
help='Number of workers for the client.')
@click.option('-ntpw', '--nthreads-per-worker', type=int,
help='Number of dask threads per worker.')
@click.option('-nvt', '--nvthreads', type=int,
help="Total number of threads to use for vertical scaling (eg. gridder, fft's etc.)")
@click.option('-mem', '--mem-limit', type=int,
help="Memory limit in GB. Default uses all available memory")
@click.option('-nthreads', '--nthreads', type=int,
help="Total available threads. Default uses all available threads")
def spifit(**kw):
"""
Spectral index fitter
"""
args = OmegaConf.create(kw)
pyscilog.log_to_file(args.output_filename + '.log')
from glob import glob
from omegaconf import ListConfig
# image is either a string or a list of strings that we want to glob on
if isinstance(args.image, str):
image = sorted(glob(args.image))
elif isinstance(args.image, list) or isinstance(args.image, ListConfig):
image = []
for i in len(args.image):
image.append(sorted(glob(args.image[i])))
# make sure it's not empty
try:
assert len(image) > 0
args.image = image
except:
raise ValueError(f"No image at {args.image}")
# same goes for the residual except that it may also be None
if isinstance(args.residual, str):
residual = sorted(glob(args.residual))
elif isinstance(args.residual, list) or isinstance(args.residual, ListConfig):
residual = []
for i in len(args.residual):
residual.append(sorted(glob(args.residual[i])))
if args.residual is not None:
try:
assert len(residual) > 0
args.residual = residual
except:
raise ValueError(f"No residual at {args.residual}")
# we also need the same number of residuals as images
try:
assert len(args.image) == len(args.residual)
except:
raise ValueError(f"Number of images and residuals need to "
"match")
else:
print("No residual passed in!", file=log)
# and finally the beam model
if isinstance(args.beam_model, str):
beam_model = sorted(glob(args.beam_model))
elif isinstance(args.beam_model, list) or isinstance(args.beam_model, ListConfig):
beam_model = []
for i in len(args.beam_model):
beam_model.append(sorted(glob(args.beam_model[i])))
if args.beam_model is not None:
try:
assert len(beam_model) > 0
args.beam_model = beam_model
except:
raise ValueError(f"No beam model at {args.beam_model}")
try:
assert len(args.image) == len(args.beam_model)
except:
raise ValueError(f"Number of images and beam models need to "
"match")
else:
print("Not doing any form of primary beam correction", file=log)
# LB - TODO: can we sort them along freq at this point already?
OmegaConf.set_struct(args, True)
with ExitStack() as stack:
from pfb import set_client
args = set_client(args, stack, log)
# TODO - prettier config printing
print('Input Options:', file=log)
for key in args.keys():
print(' %25s = %s' % (key, args[key]), file=log)
return _spifit(**args)
def _spifit(**kw):
args = OmegaConf.create(kw)
OmegaConf.set_struct(args, True)
import dask.array as da
import numpy as np
from astropy.io import fits
from africanus.model.spi.dask import fit_spi_components
from pfb.utils.fits import load_fits, save_fits, data_from_header, set_wcs
from pfb.utils.misc import convolve2gaussres
# get max gausspars
gaussparf = None
if args.psf_pars is None:
if args.residual is None:
ppsource = args.image
else:
ppsource = args.residual
for image in ppsource:
try:
pphdr = fits.getheader(image)
except Exception as e:
raise e
if 'BMAJ0' in pphdr.keys():
emaj = pphdr['BMAJ0']
emin = pphdr['BMIN0']
pa = pphdr['BPA0']
gausspars = [emaj, emin, pa]
freq_idx0 = 0
elif 'BMAJ1' in pphdr.keys():
emaj = pphdr['BMAJ1']
emin = pphdr['BMIN1']
pa = pphdr['BPA1']
gausspars = [emaj, emin, pa]
freq_idx0 = 1
elif 'BMAJ' in pphdr.keys():
emaj = pphdr['BMAJ']
emin = pphdr['BMIN']
pa = pphdr['BPA']
gausspars = [emaj, emin, pa]
freq_idx0 = 0
else:
raise ValueError("No beam parameters found in residual."
"You will have to provide them manually.")
if gaussparf is None:
gaussparf = gausspars
else:
# we need to take the max in both directions
gaussparf[0] = np.maximum(gaussparf[0], gausspars[0])
gaussparf[1] = np.maximum(gaussparf[1], gausspars[1])
else:
freq_idx0 = 0 # assumption
gaussparf = list(args.psf_pars)
if args.circ_psf:
e = np.maximum(gaussparf[0], gaussparf[1])
gaussparf[0] = e
gaussparf[1] = e
gaussparf[2] = 0.0
gaussparf = tuple(gaussparf)
print("Using emaj = %3.2e, emin = %3.2e, PA = %3.2e \n" % gaussparf, file=log)
# get required data products
image_dict = {}
for i in range(len(args.image)):
image_dict[i] = {}
# load model image
model = load_fits(args.image[i], dtype=args.out_dtype).squeeze()
mhdr = fits.getheader(args.image[i])
if model.ndim < 3:
model = model[None, :, :]
l_coord, ref_l = data_from_header(mhdr, axis=1)
l_coord -= ref_l
m_coord, ref_m = data_from_header(mhdr, axis=2)
m_coord -= ref_m
if mhdr["CTYPE4"].lower() == 'freq':
freq_axis = 4
stokes_axis = 3
elif mhdr["CTYPE3"].lower() == 'freq':
freq_axis = 3
stokes_axis = 4
else:
raise ValueError("Freq axis must be 3rd or 4th")
freqs, ref_freq = data_from_header(mhdr, axis=freq_axis)
image_dict[i]['freqs'] = freqs
nband = freqs.size
npix_l = l_coord.size
npix_m = m_coord.size
xx, yy = | np.meshgrid(l_coord, m_coord, indexing='ij') | numpy.meshgrid |
import sys
sys.path.append('./')
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import multiprocessing as multi
import optuna
import changefinder
import bocpd
import dmdl.sdmdl as sdmdl
import dmdl.hsdmdl2 as hsdmdl2
import tsmdl.aw2s_mdl as aw2s_mdl
import utils.sdmdl_nml as sdmdl_nml
import utils.hsdmdl2_nml as hsdmdl2_nml
from multiprocessing import Pool
from functools import partial
from copy import deepcopy
from utils.utils import mean_changing, variance_changing, create_dataset, calc_F1_score
def _calc_metrics(idx_data, dataset, changepoints, tolerance_delay, threshold, retrospective): # calculate the metrics
_retrospective = deepcopy(retrospective)
scores = _retrospective.calc_scores(dataset[idx_data])
F1_score, precision, recall = calc_F1_score(
scores, changepoints, tolerance_delay, threshold)
return F1_score, precision, recall
# obtain the optimal threshold
def calc_opt_threshold(train, changepoints, tolerance_delay, retrospective):
_retrospective = deepcopy(retrospective)
scores = _retrospective.calc_scores(train)
_, _, _, opt_threshold = calc_F1_score(
scores, changepoints, tolerance_delay)
return opt_threshold
def _objective_CF(trial, train, changepoints, tolerance_delay): # ChangeFinder
# hyperparameters
r = trial.suggest_uniform('r', 0.01, 0.99)
order = trial.suggest_int('order', 1, 20)
smooth = trial.suggest_int('smooth', 3, 20)
retrospective = changefinder.Retrospective(r=r, order=order, smooth=smooth)
scores = retrospective.calc_scores(train)
F1_score, _, _, _ = calc_F1_score(scores, changepoints, tolerance_delay)
return -F1_score
def conduct_CF(n_trials, n_samples, dataset, changepoints, tolerance_delay): # ChangeFinder
# hyperparameter tuning
objective_CF = partial(_objective_CF, train=dataset[0],
changepoints=changepoints, tolerance_delay=tolerance_delay)
study = optuna.create_study()
study.optimize(objective_CF, n_trials=n_trials, n_jobs=-1)
opt_r = study.best_params['r']
opt_order = study.best_params['order']
opt_smooth = study.best_params['smooth']
# optimal threshold
retrospective = changefinder.Retrospective(
r=opt_r, order=opt_order, smooth=opt_smooth)
opt_threshold = calc_opt_threshold(train=dataset[0], changepoints=changepoints,
tolerance_delay=tolerance_delay, retrospective=retrospective)
# calculate metrics
calc_metrics = partial(_calc_metrics, dataset=dataset, changepoints=changepoints,
tolerance_delay=tolerance_delay, threshold=opt_threshold, retrospective=retrospective)
p = Pool(multi.cpu_count() - 1)
args = list(range(1, n_samples))
res = np.array(p.map(calc_metrics, args))
p.close()
# result
print("F1 score: ", np.mean(res[:, 0]), "±", np.std(res[:, 0]))
print("precision: ", np.mean(res[:, 1]), "±", np.std(res[:, 1]))
print("recall: ", np.mean(res[:, 2]), "±", np.std(res[:, 2]))
row = pd.DataFrame({"method": ["ChangeFinder"], "F1_score_mean": np.mean(res[:, 0]), "F1_score_std": | np.std(res[:, 0]) | numpy.std |
import pytest
from mrmustard import *
import numpy as np
import tensorflow as tf
from thewalrus.random import random_covariance
from thewalrus.quantum import real_to_complex_displacements
from mrmustard.physics import gaussian as gp, fock as fp
from mrmustard.math import Math
math = Math()
class TestGaussianStates:
@pytest.mark.parametrize("hbar", [1 / 2, 1.0, 2.0, 1.6])
@pytest.mark.parametrize("num_modes", np.arange(5, 10))
@pytest.mark.parametrize("pure", [True, False])
@pytest.mark.parametrize("block_diag", [True, False])
def test_fidelity_is_symmetric(self, num_modes, hbar, pure, block_diag):
"""Test that the fidelity is symmetric"""
cov1 = random_covariance(num_modes, hbar=hbar, pure=pure, block_diag=block_diag)
means1 = np.sqrt(2 * hbar) * np.random.rand(2 * num_modes)
cov2 = random_covariance(num_modes, hbar=hbar, pure=pure, block_diag=block_diag)
means2 = np.sqrt(2 * hbar) * np.random.rand(2 * num_modes)
f12 = gp.fidelity(means1, cov1, means2, cov2, hbar)
f21 = gp.fidelity(means2, cov2, means1, cov1, hbar)
assert np.allclose(f12, f21)
@pytest.mark.parametrize("hbar", [1 / 2, 1.0, 2.0, 1.6])
@pytest.mark.parametrize("num_modes", np.arange(5, 10))
@pytest.mark.parametrize("pure", [True, False])
@pytest.mark.parametrize("block_diag", [True, False])
def test_fidelity_is_leq_one(self, num_modes, hbar, pure, block_diag):
"""Test that the fidelity is between 0 and 1"""
cov1 = random_covariance(num_modes, hbar=hbar, pure=pure, block_diag=block_diag)
means1 = np.sqrt(2 * hbar) * np.random.rand(2 * num_modes)
cov2 = random_covariance(num_modes, hbar=hbar, pure=pure, block_diag=block_diag)
means2 = np.sqrt(2 * hbar) * np.random.rand(2 * num_modes)
f12 = gp.fidelity(means1, cov1, means2, cov2, hbar)
assert 0 <= np.real_if_close(f12) < 1.0
@pytest.mark.parametrize("hbar", [1 / 2, 1.0, 2.0, 1.6])
@pytest.mark.parametrize("num_modes", np.arange(5, 10))
@pytest.mark.parametrize("pure", [True, False])
@pytest.mark.parametrize("block_diag", [True, False])
def test_fidelity_with_self(self, num_modes, hbar, pure, block_diag):
"""Test that the fidelity of two identical quantum states is 1"""
cov = random_covariance(num_modes, hbar=hbar, pure=pure, block_diag=block_diag)
means = np.random.rand(2 * num_modes)
assert np.allclose(gp.fidelity(means, cov, means, cov, hbar=hbar), 1, atol=1e-4)
@pytest.mark.parametrize("num_modes", np.arange(5, 10))
@pytest.mark.parametrize("hbar", [0.5, 1.0, 2.0, 1.6])
def test_fidelity_coherent_state(self, num_modes, hbar):
"""Test the fidelity of two multimode coherent states"""
beta1 = np.random.rand(num_modes) + 1j * np.random.rand(num_modes)
beta2 = np.random.rand(num_modes) + 1j * np.random.rand(num_modes)
means1 = real_to_complex_displacements(np.concatenate([beta1, beta1.conj()]), hbar=hbar)
means2 = real_to_complex_displacements(np.concatenate([beta2, beta2.conj()]), hbar=hbar)
cov1 = hbar * np.identity(2 * num_modes) / 2
cov2 = hbar * np.identity(2 * num_modes) / 2
fid = gp.fidelity(means1, cov1, means2, cov2, hbar=hbar)
expected = np.exp(-np.linalg.norm(beta1 - beta2) ** 2)
assert np.allclose(expected, fid)
@pytest.mark.parametrize("hbar", [0.5, 1.0, 2.0, 1.6])
@pytest.mark.parametrize("r1", np.random.rand(3))
@pytest.mark.parametrize("r2", np.random.rand(3))
def test_fidelity_squeezed_vacuum(self, r1, r2, hbar):
"""Tests fidelity between two squeezed states"""
cov1 = np.diag([np.exp(2 * r1), np.exp(-2 * r1)]) * hbar / 2
cov2 = np.diag([np.exp(2 * r2), np.exp(-2 * r2)]) * hbar / 2
mu = np.zeros([2])
assert np.allclose(1 / np.cosh(r1 - r2), gp.fidelity(mu, cov1, mu, cov2, hbar=hbar))
@pytest.mark.parametrize("n1", [0.5, 1.0, 2.0, 1.6])
@pytest.mark.parametrize("n2", [0.5, 1.0, 2.0, 1.6])
@pytest.mark.parametrize("hbar", [0.5, 1.0, 2.0, 1.6])
def test_fidelity_thermal(self, n1, n2, hbar):
"""Test fidelity between two thermal states"""
expected = 1 / (1 + n1 + n2 + 2 * n1 * n2 - 2 * np.sqrt(n1 * n2 * (n1 + 1) * (n2 + 1)))
cov1 = hbar * (n1 + 0.5) * np.identity(2)
cov2 = hbar * (n2 + 0.5) * np.identity(2)
mu1 = np.zeros([2])
mu2 = np.zeros([2])
assert np.allclose(expected, gp.fidelity(mu1, cov1, mu2, cov2, hbar=hbar))
@pytest.mark.parametrize("hbar", [0.5, 1.0, 2.0, 1.6])
@pytest.mark.parametrize("r", [-2.0, 0.0, 2.0])
@pytest.mark.parametrize("alpha", np.random.rand(10) + 1j * np.random.rand(10))
def test_fidelity_vac_to_displaced_squeezed(self, r, alpha, hbar):
"""Calculates the fidelity between a coherent squeezed state and vacuum"""
cov1 = np.diag([np.exp(2 * r), np.exp(-2 * r)]) * hbar / 2
means1 = real_to_complex_displacements(np.array([alpha, np.conj(alpha)]), hbar=hbar)
means2 = np.zeros([2])
cov2 = np.identity(2) * hbar / 2
expected = (
np.exp(-np.abs(alpha) ** 2)
* np.abs(np.exp( | np.tanh(r) | numpy.tanh |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 2 11:47:44 2019
@author: Aayush
This file contains the dataloader and the augmentations and preprocessing done
Required Preprocessing for all images (test, train and validation set):
1) Gamma correction by a factor of 0.8
2) local Contrast limited adaptive histogram equalization algorithm with clipLimit=1.5, tileGridSize=(8,8)
3) Normalization
Train Image Augmentation Procedure Followed
1) Random horizontal flip with 50% probability.
2) Starburst pattern augmentation with 20% probability.
3) Random length lines augmentation around a random center with 20% probability.
4) Gaussian blur with kernel size (7,7) and random sigma with 20% probability.
5) Translation of image and labels in any direction with random factor less than 20.
"""
import numpy as np
import torch
from torch.utils.data import Dataset
import os
from PIL import Image
from torchvision import transforms
import cv2
import random
import os.path as osp
from utils import one_hot2dist
import copy
transform = transforms.Compose(
[transforms.ToTensor(),
transforms.Normalize([0.5], [0.5])])
#%%
class RandomHorizontalFlip(object):
def __call__(self, img,label):
if random.random() < 0.5:
return img.transpose(Image.FLIP_LEFT_RIGHT),\
label.transpose(Image.FLIP_LEFT_RIGHT)
return img,label
class Starburst_augment(object):
## We have generated the starburst pattern from a train image 000000240768.png
## Please follow the file Starburst_generation_from_train_image_000000240768.pdf attached in the folder
## This procedure is used in order to handle people with multiple reflections for glasses
## a random translation of mask of starburst pattern
def __call__(self, img):
x=np.random.randint(1, 40)
y=np.random.randint(1, 40)
mode = np.random.randint(0, 2)
starburst=Image.open('starburst_black.png').convert("L")
if mode == 0:
starburst = np.pad(starburst, pad_width=((0, 0), (x, 0)), mode='constant')
starburst = starburst[:, :-x]
if mode == 1:
starburst = np.pad(starburst, pad_width=((0, 0), (0, x)), mode='constant')
starburst = starburst[:, x:]
img[92+y:549+y,0:400]=np.array(img)[92+y:549+y,0:400]*((255-np.array(starburst))/255)+np.array(starburst)
return Image.fromarray(img)
def getRandomLine(xc, yc, theta):
x1 = xc - 50*np.random.rand(1)*(1 if np.random.rand(1) < 0.5 else -1)
y1 = (x1 - xc)*np.tan(theta) + yc
x2 = xc - (150*np.random.rand(1) + 50)*(1 if np.random.rand(1) < 0.5 else -1)
y2 = (x2 - xc)*np.tan(theta) + yc
return x1, y1, x2, y2
class Gaussian_blur(object):
def __call__(self, img):
sigma_value=np.random.randint(2, 7)
return Image.fromarray(cv2.GaussianBlur(img,(7,7),sigma_value))
class Translation(object):
def __call__(self, base,mask):
factor_h = 2*np.random.randint(1, 20)
factor_v = 2*np.random.randint(1, 20)
mode = np.random.randint(0, 4)
# print (mode,factor_h,factor_v)
if mode == 0:
aug_base = | np.pad(base, pad_width=((factor_v, 0), (0, 0)), mode='constant') | numpy.pad |
import strax
import straxen
import numpy as np
import unittest
from strax.testutils import run_id
from hypothesis import strategies, given, settings
TEST_DATA_LENGTH = 3
R_TOL_DEFAULT = 1e-5
def _not_close_to_0_or_1(x, rtol=R_TOL_DEFAULT):
return not (np.isclose(x, 1, rtol=rtol) or np.isclose(x, 0, rtol=rtol))
class TestComputePeakBasics(unittest.TestCase):
"""Tests for peak basics plugin"""
def setUp(self, context=straxen.contexts.demo):
self.st = context()
self.n_top = self.st.config.get('n_top_pmts', 2)
# Make sure that the check is on. Otherwise we cannot test it.
self.st.set_config({'check_peak_sum_area_rtol': R_TOL_DEFAULT})
self.peaks_basics_compute = self.st.get_single_plugin(run_id, 'peak_basics').compute
@settings(deadline=None)
@given(strategies.integers(min_value=0,
max_value=TEST_DATA_LENGTH - 1),
)
def test_aft_equals1(self, test_peak_idx):
"""Fill top array with area 1"""
test_data = self.get_test_peaks(self.n_top)
test_data[test_peak_idx]['area_per_channel'][:self.n_top] = 1
test_data[test_peak_idx]['area'] = np.sum(test_data[test_peak_idx]['area_per_channel'])
peaks = self.peaks_basics_compute(test_data)
assert peaks[test_peak_idx]['area_fraction_top'] == 1
@settings(deadline=None)
@given(strategies.floats(min_value=0,
max_value=2,
).filter(_not_close_to_0_or_1),
strategies.integers(min_value=0,
max_value=TEST_DATA_LENGTH - 1,
),
)
def test_bad_peak(self, off_by_factor, test_peak_idx):
"""
Lets deliberately make some data that is not self-consistent to
run into the error in the test.
"""
test_data = self.get_test_peaks(self.n_top)
test_data[test_peak_idx]['area_per_channel'][:self.n_top] = 1
area = | np.sum(test_data[test_peak_idx]['area_per_channel']) | numpy.sum |
import pickle
import math
import numpy as np
split_portion = 4
WORD_COUNT = (1.0360595565014956, 1)
SOS = '<SOS>'
EOS = '<EOS>'
with open('../iwslt/train.tok.bpe.32000.en', 'rt') as file_en:
with open('../iwslt/train.tok.bpe.32000.de', 'rt') as file_de:
with open('../iwslt/reverse.subword.align', 'rt') as file_fa:
with open('../iwslt/reverse.subword.align.right2.4.pickle', 'wb') as file_fas:
final_count = {}
stats = {}
for x, y, z in zip(file_de, file_en, file_fa):
z_dict = {}
x_list = x.split()
y_list = y.split()
len_x = len(x_list)
len_y = len(y_list)
for z_element in z.split():
try:
a, b = z_element.rsplit('-', 1)
except:
print("z_element", z_element)
a, b = float(a), int(b)
key = math.ceil((b + 0.5) / len_y * split_portion) # round((b + 1) / len_x * split_portion) - 1
if key > split_portion:
print("key", key)
print("b", b)
print("len_y", len_y)
# print("b", b)
if b == 0:
# print("inside")
w = SOS
# print("w", w)
else:
w = y_list[b - 1]
if w not in z_dict:
z_dict[w] = {}
if key in z_dict[w]:
z_dict[w][key].append(a - b / WORD_COUNT[0]) # round((int(a) + 1) / len_x * split_portion) - 1
else:
z_dict[w][key] = [a - b / WORD_COUNT[0]]
# last one point to EOS
w = y_list[-1]
key = split_portion
a = len_x
b = len_y - 1
if w not in z_dict:
z_dict[w] = {}
if key in z_dict[w]:
z_dict[w][key].append(
a - b / WORD_COUNT[0]) # round((int(a) + 1) / len_x * split_portion) - 1
else:
z_dict[w][key] = [a - b / WORD_COUNT[0]]
for w in z_dict:
for k in z_dict[w]:
if w not in final_count:
final_count[w] = {}
if k in final_count[w]:
old_mean = final_count[w][k]['mean']
old_var = final_count[w][k]['var']
old_count = final_count[w][k]['count']
current_count = len(z_dict[w][k])
current_mean = np.mean(z_dict[w][k])
current_var = np.var(z_dict[w][k])
new_count = old_count + current_count
new_mean = (old_mean * old_count + sum(z_dict[w][k])) / new_count
new_var = (old_count * (old_var + (old_mean - new_mean) ** 2) + current_count * (current_var + (current_mean - new_mean) ** 2)) / new_count
final_count[w][k]['mean'] = new_mean
final_count[w][k]['var'] = new_var
final_count[w][k]['count'] = new_count
else:
final_count[w][k] = {}
final_count[w][k]['mean'] = | np.mean(z_dict[w][k]) | numpy.mean |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import common_fn as cf
plt.rcParams["svg.hashsalt"]=0
#Input parms
test_lim_arr=np.empty([0,2])
for llim in np.arange(0,1,0.2):
for ulim in np.arange(llim+0.1,1,0.2):
test_lim_arr= | np.append(test_lim_arr,[[llim,ulim]],axis=0) | numpy.append |
################################################################################
# Copyright (C) 2013-2014 <NAME>
#
# This file is licensed under the MIT License.
################################################################################
"""
Unit tests for `dot` module.
"""
import unittest
import numpy as np
import scipy
from numpy import testing
from ..dot import Dot, SumMultiply
from ..gaussian import Gaussian, GaussianARD
from bayespy.nodes import GaussianGamma
from ...vmp import VB
from bayespy.utils import misc
from bayespy.utils import linalg
from bayespy.utils import random
from bayespy.utils.misc import TestCase
class TestSumMultiply(TestCase):
def test_parent_validity(self):
"""
Test that the parent nodes are validated properly in SumMultiply
"""
V = GaussianARD(1, 1)
X = Gaussian(np.ones(1), np.identity(1))
Y = Gaussian(np.ones(3), np.identity(3))
Z = Gaussian(np.ones(5), np.identity(5))
A = SumMultiply(X, ['i'])
self.assertEqual(A.dims, ((), ()))
A = SumMultiply('i', X)
self.assertEqual(A.dims, ((), ()))
A = SumMultiply(X, ['i'], ['i'])
self.assertEqual(A.dims, ((1,), (1,1)))
A = SumMultiply('i->i', X)
self.assertEqual(A.dims, ((1,), (1,1)))
A = SumMultiply(X, ['i'], Y, ['j'], ['i','j'])
self.assertEqual(A.dims, ((1,3), (1,3,1,3)))
A = SumMultiply('i,j->ij', X, Y)
self.assertEqual(A.dims, ((1,3), (1,3,1,3)))
A = SumMultiply(V, [], X, ['i'], Y, ['i'], [])
self.assertEqual(A.dims, ((), ()))
A = SumMultiply(',i,i->', V, X, Y)
self.assertEqual(A.dims, ((), ()))
# Gaussian-gamma parents
C = GaussianGamma(np.ones(3), np.identity(3), 1, 1)
A = SumMultiply(Y, ['i'], C, ['i'], ['i'])
self.assertEqual(A.dims, ((3,), (3,3), (), ()))
A = SumMultiply('i,i->i', Y, C)
self.assertEqual(A.dims, ((3,), (3,3), (), ()))
C = GaussianGamma(np.ones(3), np.identity(3), 1, 1)
A = SumMultiply(Y, ['i'], C, ['i'], [])
self.assertEqual(A.dims, ((), (), (), ()))
A = SumMultiply('i,i->', Y, C)
self.assertEqual(A.dims, ((), (), (), ()))
# Error: not enough inputs
self.assertRaises(ValueError,
SumMultiply)
self.assertRaises(ValueError,
SumMultiply,
X)
# Error: too many keys
self.assertRaises(ValueError,
SumMultiply,
Y,
['i', 'j'])
self.assertRaises(ValueError,
SumMultiply,
'ij',
Y)
# Error: not broadcastable
self.assertRaises(ValueError,
SumMultiply,
Y,
['i'],
Z,
['i'])
self.assertRaises(ValueError,
SumMultiply,
'i,i',
Y,
Z)
# Error: output key not in inputs
self.assertRaises(ValueError,
SumMultiply,
X,
['i'],
['j'])
self.assertRaises(ValueError,
SumMultiply,
'i->j',
X)
# Error: non-unique input keys
self.assertRaises(ValueError,
SumMultiply,
X,
['i','i'])
self.assertRaises(ValueError,
SumMultiply,
'ii',
X)
# Error: non-unique output keys
self.assertRaises(ValueError,
SumMultiply,
X,
['i'],
['i','i'])
self.assertRaises(ValueError,
SumMultiply,
'i->ii',
X)
# String has too many '->'
self.assertRaises(ValueError,
SumMultiply,
'i->i->i',
X)
# String has too many input nodes
self.assertRaises(ValueError,
SumMultiply,
'i,i->i',
X)
# Same parent several times
self.assertRaises(ValueError,
SumMultiply,
'i,i->i',
X,
X)
# Same parent several times via deterministic node
Xh = SumMultiply('i->i', X)
self.assertRaises(ValueError,
SumMultiply,
'i,i->i',
X,
Xh)
def test_message_to_child(self):
"""
Test the message from SumMultiply to its children.
"""
def compare_moments(u0, u1, *args):
Y = SumMultiply(*args)
u_Y = Y.get_moments()
self.assertAllClose(u_Y[0], u0)
self.assertAllClose(u_Y[1], u1)
# Test constant parent
y = np.random.randn(2,3,4)
compare_moments(y,
linalg.outer(y, y, ndim=2),
'ij->ij',
y)
# Do nothing for 2-D array
Y = GaussianARD(np.random.randn(5,2,3),
np.random.rand(5,2,3),
plates=(5,),
shape=(2,3))
y = Y.get_moments()
compare_moments(y[0],
y[1],
'ij->ij',
Y)
compare_moments(y[0],
y[1],
Y,
[0,1],
[0,1])
# Sum over the rows of a matrix
Y = GaussianARD(np.random.randn(5,2,3),
np.random.rand(5,2,3),
plates=(5,),
shape=(2,3))
y = Y.get_moments()
mu = np.einsum('...ij->...j', y[0])
cov = np.einsum('...ijkl->...jl', y[1])
compare_moments(mu,
cov,
'ij->j',
Y)
compare_moments(mu,
cov,
Y,
[0,1],
[1])
# Inner product of three vectors
X1 = GaussianARD(np.random.randn(2),
np.random.rand(2),
plates=(),
shape=(2,))
x1 = X1.get_moments()
X2 = GaussianARD(np.random.randn(6,1,2),
np.random.rand(6,1,2),
plates=(6,1),
shape=(2,))
x2 = X2.get_moments()
X3 = GaussianARD(np.random.randn(7,6,5,2),
np.random.rand(7,6,5,2),
plates=(7,6,5),
shape=(2,))
x3 = X3.get_moments()
mu = np.einsum('...i,...i,...i->...', x1[0], x2[0], x3[0])
cov = np.einsum('...ij,...ij,...ij->...', x1[1], x2[1], x3[1])
compare_moments(mu,
cov,
'i,i,i',
X1,
X2,
X3)
compare_moments(mu,
cov,
'i,i,i->',
X1,
X2,
X3)
compare_moments(mu,
cov,
X1,
[9],
X2,
[9],
X3,
[9])
compare_moments(mu,
cov,
X1,
[9],
X2,
[9],
X3,
[9],
[])
# Outer product of two vectors
X1 = GaussianARD(np.random.randn(2),
np.random.rand(2),
plates=(5,),
shape=(2,))
x1 = X1.get_moments()
X2 = GaussianARD(np.random.randn(6,1,2),
np.random.rand(6,1,2),
plates=(6,1),
shape=(2,))
x2 = X2.get_moments()
mu = np.einsum('...i,...j->...ij', x1[0], x2[0])
cov = np.einsum('...ik,...jl->...ijkl', x1[1], x2[1])
compare_moments(mu,
cov,
'i,j->ij',
X1,
X2)
compare_moments(mu,
cov,
X1,
[9],
X2,
[7],
[9,7])
# Matrix product
Y1 = GaussianARD(np.random.randn(3,2),
np.random.rand(3,2),
plates=(),
shape=(3,2))
y1 = Y1.get_moments()
Y2 = GaussianARD(np.random.randn(5,2,3),
np.random.rand(5,2,3),
plates=(5,),
shape=(2,3))
y2 = Y2.get_moments()
mu = np.einsum('...ik,...kj->...ij', y1[0], y2[0])
cov = np.einsum('...ikjl,...kmln->...imjn', y1[1], y2[1])
compare_moments(mu,
cov,
'ik,kj->ij',
Y1,
Y2)
compare_moments(mu,
cov,
Y1,
['i','k'],
Y2,
['k','j'],
['i','j'])
# Trace of a matrix product
Y1 = GaussianARD(np.random.randn(3,2),
np.random.rand(3,2),
plates=(),
shape=(3,2))
y1 = Y1.get_moments()
Y2 = GaussianARD(np.random.randn(5,2,3),
np.random.rand(5,2,3),
plates=(5,),
shape=(2,3))
y2 = Y2.get_moments()
mu = np.einsum('...ij,...ji->...', y1[0], y2[0])
cov = np.einsum('...ikjl,...kilj->...', y1[1], y2[1])
compare_moments(mu,
cov,
'ij,ji',
Y1,
Y2)
compare_moments(mu,
cov,
'ij,ji->',
Y1,
Y2)
compare_moments(mu,
cov,
Y1,
['i','j'],
Y2,
['j','i'])
compare_moments(mu,
cov,
Y1,
['i','j'],
Y2,
['j','i'],
[])
# Vector-matrix-vector product
X1 = GaussianARD(np.random.randn(3),
np.random.rand(3),
plates=(),
shape=(3,))
x1 = X1.get_moments()
X2 = GaussianARD(np.random.randn(6,1,2),
np.random.rand(6,1,2),
plates=(6,1),
shape=(2,))
x2 = X2.get_moments()
Y = GaussianARD(np.random.randn(3,2),
np.random.rand(3,2),
plates=(),
shape=(3,2))
y = Y.get_moments()
mu = np.einsum('...i,...ij,...j->...', x1[0], y[0], x2[0])
cov = np.einsum('...ia,...ijab,...jb->...', x1[1], y[1], x2[1])
compare_moments(mu,
cov,
'i,ij,j',
X1,
Y,
X2)
compare_moments(mu,
cov,
X1,
[1],
Y,
[1,2],
X2,
[2])
# Complex sum-product of 0-D, 1-D, 2-D and 3-D arrays
V = GaussianARD(np.random.randn(7,6,5),
np.random.rand(7,6,5),
plates=(7,6,5),
shape=())
v = V.get_moments()
X = GaussianARD(np.random.randn(6,1,2),
np.random.rand(6,1,2),
plates=(6,1),
shape=(2,))
x = X.get_moments()
Y = GaussianARD(np.random.randn(3,4),
np.random.rand(3,4),
plates=(5,),
shape=(3,4))
y = Y.get_moments()
Z = GaussianARD(np.random.randn(4,2,3),
np.random.rand(4,2,3),
plates=(6,5),
shape=(4,2,3))
z = Z.get_moments()
mu = np.einsum('...,...i,...kj,...jik->...k', v[0], x[0], y[0], z[0])
cov = np.einsum('...,...ia,...kjcb,...jikbac->...kc', v[1], x[1], y[1], z[1])
compare_moments(mu,
cov,
',i,kj,jik->k',
V,
X,
Y,
Z)
compare_moments(mu,
cov,
V,
[],
X,
['i'],
Y,
['k','j'],
Z,
['j','i','k'],
['k'])
# Test with constant nodes
N = 10
D = 5
a = np.random.randn(N, D)
B = Gaussian(
np.random.randn(D),
random.covariance(D),
)
X = SumMultiply('i,i->', B, a)
np.testing.assert_allclose(
X.get_moments()[0],
np.einsum('ni,i->n', a, B.get_moments()[0]),
)
np.testing.assert_allclose(
X.get_moments()[1],
np.einsum('ni,nj,ij->n', a, a, B.get_moments()[1]),
)
#
# Gaussian-gamma parents
#
# Outer product of vectors
X1 = GaussianARD(np.random.randn(2),
np.random.rand(2),
shape=(2,))
x1 = X1.get_moments()
X2 = GaussianGamma(
np.random.randn(6,1,2),
random.covariance(2),
np.random.rand(6,1),
np.random.rand(6,1),
plates=(6,1)
)
x2 = X2.get_moments()
Y = SumMultiply('i,j->ij', X1, X2)
u = Y._message_to_child()
y = np.einsum('...i,...j->...ij', x1[0], x2[0])
yy = np.einsum('...ik,...jl->...ijkl', x1[1], x2[1])
self.assertAllClose(u[0], y)
self.assertAllClose(u[1], yy)
self.assertAllClose(u[2], x2[2])
self.assertAllClose(u[3], x2[3])
# Test with constant nodes
N = 10
M = 8
D = 5
a = np.random.randn(N, 1, D)
B = GaussianGamma(
np.random.randn(M, D),
random.covariance(D, size=(M,)),
np.random.rand(M),
np.random.rand(M),
ndim=1,
)
X = SumMultiply('i,i->', B, a)
np.testing.assert_allclose(
X.get_moments()[0],
np.einsum('nmi,mi->nm', a, B.get_moments()[0]),
)
np.testing.assert_allclose(
X.get_moments()[1],
np.einsum('nmi,nmj,mij->nm', a, a, B.get_moments()[1]),
)
np.testing.assert_allclose(
X.get_moments()[2],
B.get_moments()[2],
)
np.testing.assert_allclose(
X.get_moments()[3],
B.get_moments()[3],
)
pass
def test_message_to_parent(self):
"""
Test the message from SumMultiply node to its parents.
"""
data = 2
tau = 3
def check_message(true_m0, true_m1, parent, *args, F=None):
if F is None:
A = SumMultiply(*args)
B = GaussianARD(A, tau)
B.observe(data*np.ones(A.plates + A.dims[0]))
else:
A = F
(A_m0, A_m1) = A._message_to_parent(parent)
self.assertAllClose(true_m0, A_m0)
self.assertAllClose(true_m1, A_m1)
pass
# Check: different message to each of multiple parents
X1 = GaussianARD(np.random.randn(2),
np.random.rand(2),
ndim=1)
x1 = X1.get_moments()
X2 = GaussianARD(np.random.randn(2),
np.random.rand(2),
ndim=1)
x2 = X2.get_moments()
m0 = tau * data * x2[0]
m1 = -0.5 * tau * x2[1] * np.identity(2)
check_message(m0, m1, 0,
'i,i->i',
X1,
X2)
check_message(m0, m1, 0,
X1,
[9],
X2,
[9],
[9])
m0 = tau * data * x1[0]
m1 = -0.5 * tau * x1[1] * np.identity(2)
check_message(m0, m1, 1,
'i,i->i',
X1,
X2)
check_message(m0, m1, 1,
X1,
[9],
X2,
[9],
[9])
# Check: key not in output
X1 = GaussianARD(np.random.randn(2),
np.random.rand(2),
ndim=1)
x1 = X1.get_moments()
m0 = tau * data * np.ones(2)
m1 = -0.5 * tau * np.ones((2,2))
check_message(m0, m1, 0,
'i',
X1)
check_message(m0, m1, 0,
'i->',
X1)
check_message(m0, m1, 0,
X1,
[9])
check_message(m0, m1, 0,
X1,
[9],
[])
# Check: key not in some input
X1 = GaussianARD(np.random.randn(),
np.random.rand())
x1 = X1.get_moments()
X2 = GaussianARD(np.random.randn(2),
np.random.rand(2),
ndim=1)
x2 = X2.get_moments()
m0 = tau * data * np.sum(x2[0], axis=-1)
m1 = -0.5 * tau * np.sum(x2[1] * np.identity(2),
axis=(-1,-2))
check_message(m0, m1, 0,
',i->i',
X1,
X2)
check_message(m0, m1, 0,
X1,
[],
X2,
[9],
[9])
m0 = tau * data * x1[0] * np.ones(2)
m1 = -0.5 * tau * x1[1] * np.identity(2)
check_message(m0, m1, 1,
',i->i',
X1,
X2)
check_message(m0, m1, 1,
X1,
[],
X2,
[9],
[9])
# Check: keys in different order
Y1 = GaussianARD(np.random.randn(3,2),
np.random.rand(3,2),
ndim=2)
y1 = Y1.get_moments()
Y2 = GaussianARD(np.random.randn(2,3),
np.random.rand(2,3),
ndim=2)
y2 = Y2.get_moments()
m0 = tau * data * y2[0].T
m1 = -0.5 * tau * np.einsum('ijlk->jikl', y2[1] * misc.identity(2,3))
check_message(m0, m1, 0,
'ij,ji->ij',
Y1,
Y2)
check_message(m0, m1, 0,
Y1,
['i','j'],
Y2,
['j','i'],
['i','j'])
m0 = tau * data * y1[0].T
m1 = -0.5 * tau * np.einsum('ijlk->jikl', y1[1] * misc.identity(3,2))
check_message(m0, m1, 1,
'ij,ji->ij',
Y1,
Y2)
check_message(m0, m1, 1,
Y1,
['i','j'],
Y2,
['j','i'],
['i','j'])
# Check: plates when different dimensionality
X1 = GaussianARD(np.random.randn(5),
np.random.rand(5),
shape=(),
plates=(5,))
x1 = X1.get_moments()
X2 = GaussianARD(np.random.randn(5,3),
np.random.rand(5,3),
shape=(3,),
plates=(5,))
x2 = X2.get_moments()
m0 = tau * data * np.sum(np.ones((5,3)) * x2[0], axis=-1)
m1 = -0.5 * tau * np.sum(x2[1] * misc.identity(3), axis=(-1,-2))
check_message(m0, m1, 0,
',i->i',
X1,
X2)
check_message(m0, m1, 0,
X1,
[],
X2,
['i'],
['i'])
m0 = tau * data * x1[0][:,np.newaxis] * np.ones((5,3))
m1 = -0.5 * tau * x1[1][:,np.newaxis,np.newaxis] * misc.identity(3)
check_message(m0, m1, 1,
',i->i',
X1,
X2)
check_message(m0, m1, 1,
X1,
[],
X2,
['i'],
['i'])
# Check: other parent's moments broadcasts over plates when node has the
# same plates
X1 = GaussianARD(np.random.randn(5,4,3),
np.random.rand(5,4,3),
shape=(3,),
plates=(5,4))
x1 = X1.get_moments()
X2 = GaussianARD(np.random.randn(3),
np.random.rand(3),
shape=(3,),
plates=(5,4))
x2 = X2.get_moments()
m0 = tau * data * np.ones((5,4,3)) * x2[0]
m1 = -0.5 * tau * x2[1] * misc.identity(3)
check_message(m0, m1, 0,
'i,i->i',
X1,
X2)
check_message(m0, m1, 0,
X1,
['i'],
X2,
['i'],
['i'])
# Check: other parent's moments broadcasts over plates when node does
# not have that plate
X1 = GaussianARD(np.random.randn(3),
np.random.rand(3),
shape=(3,),
plates=())
x1 = X1.get_moments()
X2 = GaussianARD(np.random.randn(3),
np.random.rand(3),
shape=(3,),
plates=(5,4))
x2 = X2.get_moments()
m0 = tau * data * np.sum(np.ones((5,4,3)) * x2[0], axis=(0,1))
m1 = -0.5 * tau * np.sum(np.ones((5,4,1,1))
* misc.identity(3)
* x2[1],
axis=(0,1))
check_message(m0, m1, 0,
'i,i->i',
X1,
X2)
check_message(m0, m1, 0,
X1,
['i'],
X2,
['i'],
['i'])
# Check: other parent's moments broadcasts over plates when the node
# only broadcasts that plate
X1 = GaussianARD(np.random.randn(3),
np.random.rand(3),
shape=(3,),
plates=(1,1))
x1 = X1.get_moments()
X2 = GaussianARD(np.random.randn(3),
np.random.rand(3),
shape=(3,),
plates=(5,4))
x2 = X2.get_moments()
m0 = tau * data * np.sum(np.ones((5,4,3)) * x2[0], axis=(0,1), keepdims=True)
m1 = -0.5 * tau * np.sum(np.ones((5,4,1,1))
* misc.identity(3)
* x2[1],
axis=(0,1),
keepdims=True)
check_message(m0, m1, 0,
'i,i->i',
X1,
X2)
check_message(m0, m1, 0,
X1,
['i'],
X2,
['i'],
['i'])
# Check: broadcasted dimensions
X1 = GaussianARD(np.random.randn(1,1),
np.random.rand(1,1),
ndim=2)
x1 = X1.get_moments()
X2 = GaussianARD(np.random.randn(3,2),
np.random.rand(3,2),
ndim=2)
x2 = X2.get_moments()
m0 = tau * data * np.sum(np.ones((3,2)) * x2[0],
keepdims=True)
m1 = -0.5 * tau * np.sum(misc.identity(3,2) * x2[1],
keepdims=True)
check_message(m0, m1, 0,
'ij,ij->ij',
X1,
X2)
check_message(m0, m1, 0,
X1,
[0,1],
X2,
[0,1],
[0,1])
m0 = tau * data * np.ones((3,2)) * x1[0]
m1 = -0.5 * tau * misc.identity(3,2) * x1[1]
check_message(m0, m1, 1,
'ij,ij->ij',
X1,
X2)
check_message(m0, m1, 1,
X1,
[0,1],
X2,
[0,1],
[0,1])
# Check: non-ARD observations
X1 = GaussianARD(np.random.randn(2),
np.random.rand(2),
ndim=1)
x1 = X1.get_moments()
Lambda = np.array([[2, 1.5], [1.5, 2]])
F = SumMultiply('i->i', X1)
Y = Gaussian(F, Lambda)
y = np.random.randn(2)
Y.observe(y)
m0 = np.dot(Lambda, y)
m1 = -0.5 * Lambda
check_message(m0, m1, 0,
'i->i',
X1,
F=F)
check_message(m0, m1, 0,
X1,
['i'],
['i'],
F=F)
# Check: mask with same shape
X1 = GaussianARD(np.random.randn(3,2),
np.random.rand(3,2),
shape=(2,),
plates=(3,))
x1 = X1.get_moments()
mask = np.array([True, False, True])
F = SumMultiply('i->i', X1)
Y = GaussianARD(F, tau, ndim=1)
Y.observe(data*np.ones((3,2)), mask=mask)
m0 = tau * data * mask[:,np.newaxis] * np.ones(2)
m1 = -0.5 * tau * mask[:,np.newaxis,np.newaxis] * np.identity(2)
check_message(m0, m1, 0,
'i->i',
X1,
F=F)
check_message(m0, m1, 0,
X1,
['i'],
['i'],
F=F)
# Check: mask larger
X1 = GaussianARD(np.random.randn(2),
np.random.rand(2),
shape=(2,),
plates=())
x1 = X1.get_moments()
X2 = GaussianARD(np.random.randn(3,2),
np.random.rand(3,2),
shape=(2,),
plates=(3,))
x2 = X2.get_moments()
mask = np.array([True, False, True])
F = SumMultiply('i,i->i', X1, X2)
Y = GaussianARD(F, tau,
plates=(3,),
ndim=1)
Y.observe(data*np.ones((3,2)), mask=mask)
m0 = tau * data * np.sum(mask[:,np.newaxis] * x2[0], axis=0)
m1 = -0.5 * tau * np.sum(mask[:,np.newaxis,np.newaxis]
* x2[1]
* np.identity(2),
axis=0)
check_message(m0, m1, 0,
'i,i->i',
X1,
X2,
F=F)
check_message(m0, m1, 0,
X1,
['i'],
X2,
['i'],
['i'],
F=F)
# Check: mask for broadcasted plate
X1 = GaussianARD(np.random.randn(2),
np.random.rand(2),
ndim=1,
plates=(1,))
x1 = X1.get_moments()
X2 = GaussianARD(np.random.randn(2),
np.random.rand(2),
ndim=1,
plates=(3,))
x2 = X2.get_moments()
mask = np.array([True, False, True])
F = SumMultiply('i,i->i', X1, X2)
Y = GaussianARD(F, tau,
plates=(3,),
ndim=1)
Y.observe(data*np.ones((3,2)), mask=mask)
m0 = tau * data * np.sum(mask[:,np.newaxis] * x2[0],
axis=0,
keepdims=True)
m1 = -0.5 * tau * np.sum(mask[:,np.newaxis,np.newaxis]
* x2[1]
* np.identity(2),
axis=0,
keepdims=True)
check_message(m0, m1, 0,
'i->i',
X1,
F=F)
check_message(m0, m1, 0,
X1,
['i'],
['i'],
F=F)
# Test with constant nodes
N = 10
M = 8
D = 5
K = 3
a = np.random.randn(N, D)
B = Gaussian(
np.random.randn(D),
random.covariance(D),
)
C = GaussianARD(
np.random.randn(M, 1, D, K),
np.random.rand(M, 1, D, K),
ndim=2
)
F = SumMultiply('i,i,ij->', a, B, C)
tau = | np.random.rand(M, N) | numpy.random.rand |
import numpy as np
import cv2
class Anchor():
"""docstring for Anchor"""
def __init__(self,feature_w,feature_h):
self.width=255
self.height=255
self.w=feature_w
self.h=feature_h
self.base=64
self.stride=16
self.scale=[1/3,1/2,1,2,3]
self.anchors=self.gen_anchors()
def gen_single_anchor(self):
scale=np.array(self.scale)
s=self.base*self.base
w=np.sqrt(s/scale)
h=w*scale
c_x=(self.stride-1)/2
c_y=(self.stride-1)/2
anchor=np.vstack([c_x*np.ones_like(scale),c_y*np.ones_like(scale),w,h])
anchor=anchor.transpose()#[x,y,w,h]
anchor=self.center_to_corner(anchor)#[x1,y1,x2,y2]
anchor=anchor.astype(np.int32)
return anchor
def gen_anchors(self):
anchor=self.gen_single_anchor()
k=anchor.shape[0]
shift_x=[x*self.stride for x in range(self.w)]
shift_y=[y*self.stride for y in range(self.h)]
shift_x,shift_y=np.meshgrid(shift_x,shift_y)
shifts=np.vstack([shift_x.ravel(),shift_y.ravel(),shift_x.ravel(),shift_y.ravel()]).transpose()
a=shifts.shape[0]
anchors=anchor.reshape((1,k,4))+shifts.reshape((a,1,4))
anchors=anchors.reshape((a*k,4))#[x1,y1,x2,y2]
#anchors=self.corner_to_center(anchors)#[x,y,w,h]
anchors=anchors.astype(np.float32)
return anchors
def center_to_corner(self,box):
box_temp=np.zeros_like(box)
box_temp[:,0]=box[:,0]-(box[:,2]-1)/2
box_temp[:,1]=box[:,1]-(box[:,3]-1)/2
box_temp[:,2]=box[:,0]+(box[:,2]-1)/2
box_temp[:,3]=box[:,1]+(box[:,3]-1)/2
#box_temp=box_temp.astype(np.int32)
return box_temp
def corner_to_center(self,box):
box_temp=np.zeros_like(box)
box_temp[:,0]=box[:,0]+(box[:,2]-box[:,0])/2
box_temp[:,1]=box[:,1]+(box[:,3]-box[:,1])/2
box_temp[:,2]=(box[:,2]-box[:,0])
box_temp[:,3]=(box[:,3]-box[:,1])
#box_temp=box_temp.astype(np.int32)
return box_temp
def diff_anchor_gt(self,gt,anchors):
#gt [x,y,w,h]
#anchors [x,y,w,h]
diff_anchors=np.zeros_like(anchors).astype(np.float32)
diff_anchors[:,0]=(gt[0]-anchors[:,0])/(anchors[:,2]+0.01)
diff_anchors[:,1]=(gt[1]-anchors[:,1])/(anchors[:,3]+0.01)
diff_anchors[:,2]=np.log(gt[2]/(anchors[:,2]+0.01))
diff_anchors[:,3]=np.log(gt[3]/(anchors[:,3]+0.01))
return diff_anchors#[dx,dy,dw,dh]
def iou(self,box1,box2):
""" Intersection over Union (iou)
Args:
box1 : [N,4]
box2 : [K,4]
box_type:[x1,y1,x2,y2]
Returns:
iou:[N,K]
"""
N=box1.shape[0]
K=box2.shape[0]
box1=np.array(box1.reshape((N,1,4)))+np.zeros((1,K,4))#box1=[N,K,4]
box2=np.array(box2.reshape((1,K,4)))+np.zeros((N,1,4))#box1=[N,K,4]
x_max=np.max(np.stack((box1[:,:,0],box2[:,:,0]),axis=-1),axis=2)
x_min=np.min(np.stack((box1[:,:,2],box2[:,:,2]),axis=-1),axis=2)
y_max=np.max(np.stack((box1[:,:,1],box2[:,:,1]),axis=-1),axis=2)
y_min=np.min(np.stack((box1[:,:,3],box2[:,:,3]),axis=-1),axis=2)
tb=x_min-x_max
lr=y_min-y_max
tb[np.where(tb<0)]=0
lr[np.where(lr<0)]=0
over_square=tb*lr
all_square=(box1[:,:,2]-box1[:,:,0])*(box1[:,:,3]-box1[:,:,1])+(box2[:,:,2]-box2[:,:,0])*(box2[:,:,3]-box2[:,:,1])-over_square
return over_square/all_square
def pos_neg_anchor(self,gt):
inds_inside = np.where(
(self.anchors[:, 0] >= 0) &
(self.anchors[:, 1] >= 0) &
(self.anchors[:, 2] < self.width) & # width
(self.anchors[:, 3] < self.height) # height
)[0]
all_box=np.zeros((self.anchors.shape[0],4),dtype=np.float32)
target_box=np.zeros((self.anchors.shape[0],4),dtype=np.float32)
target_inside_weight_box=np.zeros((self.anchors.shape[0],4),dtype=np.float32)
target_outside_weight_box=np.ones((self.anchors.shape[0],4),dtype=np.float32)
label=-np.ones((self.anchors.shape[0],),dtype=np.float32)
anchors_inside=(self.anchors[inds_inside]).astype(np.float32)
mask_label_inside=-np.ones((len(inds_inside),),dtype=np.float32)
gt_array=np.array(gt).reshape((1,4))
gt_array=self.center_to_corner(gt_array)
iou_value=self.iou(anchors_inside,gt_array)
pos=np.zeros_like(iou_value)
pos[np.where(iou_value>0.3)]=iou_value[np.where(iou_value>0.3)]
pos_index=np.argsort(pos[:,0])[::-1]
pos_num=min(len(pos_index),16)
pos_index=pos_index[:pos_num]
mask_label_inside[pos_index]=1
neg=np.zeros_like(iou_value)
#neg[np.where(iou_value<0.3)]=iou_value[np.where(iou_value<0.3)]
#neg_index=np.argsort(neg[:,0])[::-1]
neg_index=np.where(iou_value<0.3)[0]
#print(neg_index)
neg_index=np.random.choice(neg_index,(64-pos_num))
#print(neg_index)
# neg_num=min(len(neg_index),64-pos_num)
# neg_index=neg_index[:neg_num]
mask_label_inside[neg_index]=0
diff_anchors=self.diff_anchor_gt(gt,self.corner_to_center(anchors_inside))
#print(diff_anchors.shape)
target_box[inds_inside]=diff_anchors
all_box[inds_inside]=anchors_inside
label[inds_inside]=mask_label_inside
target_inside_weight_box[np.where(label==1)]=np.array([1.,1.,1.,1.])
target_outside_weight_box=target_outside_weight_box*1.0/len(np.where(label==1)[0])
#print(target_outside_weight_box[np.where(target_outside_weight_box>0)])
return label,target_box,target_inside_weight_box,target_outside_weight_box,all_box
def pos_neg_anchor2(self,gt):
all_box=self.anchors.copy()
all_box[np.where(all_box<0)]=0
all_box[np.where(all_box>self.width-1)]=self.width-1
target_box=np.zeros((self.anchors.shape[0],4),dtype=np.float32)
target_inside_weight_box=np.zeros((self.anchors.shape[0],4),dtype=np.float32)
target_outside_weight_box=np.ones((self.anchors.shape[0],4),dtype=np.float32)
label=-np.ones((self.anchors.shape[0],),dtype=np.float32)
gt_array=np.array(gt).reshape((1,4))
gt_array=self.center_to_corner(gt_array)
iou_value=self.iou(all_box,gt_array)
pos=np.zeros_like(iou_value)
pos[np.where(iou_value>0.3)]=iou_value[np.where(iou_value>0.3)]
pos_index=np.argsort(pos[:,0])[::-1]
pos_num=min(len(pos_index),16)
pos_index=pos_index[:pos_num]
label[pos_index]=1
neg= | np.zeros_like(iou_value) | numpy.zeros_like |
#!/usr/bin/env python
# --------------------------------------------------------
# LDDP
# Licensed under UC Berkeley's Standard Copyright [see LICENSE for details]
# Written by <NAME>
# --------------------------------------------------------
import numpy as np
from fast_rcnn.bbox_transform import bbox_transform_inv, clip_boxes,bbox_transform
from boxTools import *
from fast_rcnn.config import cfg
class DPP():
def __init__(self,stds=[],means=[],sim_classes=[],epsilon=0.01,loss_weight=0.001):
self.stds =stds
self.means = means
self.sim_classes = sim_classes
self.epsilon = epsilon
self._loss_weight = loss_weight
def select_bg(self,Phi_labels,boxes,labels,bbox_pred,keeps_Y,good_gt_overlap,M,im_shape_w,im_shape_h):
"""
Find B in p(B|Xb)
"""
selected_item = range(M)
prob_dpp = np.ones((M,))
ignores=[]
dict_keeps_Y = {}
for i,j in keeps_Y.iteritems():
if j not in dict_keeps_Y:
dict_keeps_Y[j]=[]
dict_keeps_Y[j].append(i)
for k in range(M):
if (k in keeps_Y and keeps_Y[k]==Phi_labels[k]) \
or (k in good_gt_overlap and Phi_labels[k]==labels[k] and labels[k]>0):
ignores.append(k)
else:
label_k = labels[k]
if label_k in dict_keeps_Y:
loc_lbl = bbox_pred[[k],4*label_k:4*(label_k+1)]
loc_lbl = loc_lbl * self.stds[label_k,:] + self.means[label_k,:]
pbox = bbox_transform_inv(boxes[[k],:], loc_lbl)
pbox = clip_boxes(pbox, (im_shape_w,im_shape_h))
pbox = np.reshape(np.tile(pbox,len(dict_keeps_Y[label_k])),(len(dict_keeps_Y[label_k]),4))
Y_selected_ll = bbox_pred[dict_keeps_Y[label_k],4*label_k:4*(label_k+1)]
Y_selected_ll = Y_selected_ll*self.stds[label_k,:] + self.means[label_k,:]
Y_selected_pbox = bbox_transform_inv(boxes[dict_keeps_Y[label_k],:], Y_selected_ll)
Y_selected_pbox = clip_boxes(Y_selected_pbox, (im_shape_w,im_shape_h))
if np.max(IoU_target(pbox,Y_selected_pbox)) > cfg.TRAIN.IGNORANCE:
ignores.append(k)
selected_item = np.array([x for ii,x in enumerate(selected_item) if ii not in ignores])
prob_dpp = [x for ii,x in enumerate(prob_dpp) if ii not in ignores]
return selected_item,prob_dpp
def dpp_greedy(self,S, scores_s, score_power, max_per_image, among_ims, num_gt_per_img=1000, close_thr=0.0001):
"""
Greedy optimization to select boxes
S: similarity matrix
scores_s : predicted scores over different categories
"""
prob_thresh = cfg.TEST.PROB_THRESH
S = S[among_ims,:][:,among_ims]
scores_s = scores_s[among_ims]
M = S.shape[0]
#keep: selected_boxes
keep = []
#left : boxes not selected yet
left = np.zeros((M,3))
left[:,0] = np.arange(M) #box number
left[:,1] = 1 # 0/1? Is the box left?
selected_prob = []
while (len(keep) < max_per_image) and sum(left[:,1])>0:
z = np.zeros((M,1))
z[keep] = 1
sum_scores = (score_power*np.log(scores_s).T).dot(z)
prob_rest = np.zeros((M,))
left_indices = np.where(left[:,1]==1)[0]
done_indices = np.where(left[:,1]==0)[0]
if len(keep)>0:
S_prev = S[keep,:][:,keep]
det_D = np.linalg.det(S_prev)
d_1 = np.linalg.inv(S_prev)
else:
det_D = 1
d_1 = 0
# ====================================================================
# |D a^T|
# det(|a b|)= (b - a D^{-1} a^T)det(D)
#
# Here "D" = S_prev and "a","b" are the similarity values added by each single item
# in left_indices.
# To avoid using a for loop, we compute the above det for all items in left_indices
# all at once through appropriate inner vector multiplications as the next line:
# ====================================================================
if len(keep)>0:
prob_rest[left_indices] =- np.sum(np.multiply(np.dot(S[left_indices,:][:,keep],d_1),S[left_indices,:][:,keep]),1)
prob_rest[left_indices] = np.log((prob_rest[left_indices] + S[left_indices,left_indices]) * det_D)+\
(sum_scores + score_power * np.log(scores_s[(left[left_indices,0]).astype(int)]))
prob_rest[done_indices] = np.min(prob_rest)-100
max_ind = np.argmax(prob_rest)
ind = left[max_ind,0]
close_inds = np.where(prob_rest >= (prob_rest[max_ind] + np.log(close_thr)))[0]
far_inds = np.where(prob_rest < (prob_rest[max_ind] + np.log(close_thr)))[0]
tops_prob_rest = np.argsort(-prob_rest[close_inds]).astype(int)
if len(keep) >= num_gt_per_img:
break
elif len(keep)> 0:
cost = np.max(S[np.array(range(M))[close_inds][tops_prob_rest],:][:,keep],1)
good_cost = list(np.where(cost <= prob_thresh)[0])
bad_cost = list(np.where(cost > prob_thresh)[0])
if len(good_cost)>0:
ind = np.array(range(M))[close_inds][tops_prob_rest[good_cost[0]]]
keep.append(ind)
left[ind,1] = 0
#left[far_inds,1]=0
selected_prob.append(prob_rest[max_ind])
else:
left[:,1]=0
else:
keep.append(max_ind)
left[max_ind,1] = 0
selected_prob.append(prob_rest[max_ind])
return keep,selected_prob
def dpp_MAP(self,im_dets_pair, scores, boxes,sim_classes,score_thresh,epsilon,max_per_image,close_thr=0.00001):
"""
DPP MAP inference
"""
M0 = boxes.shape[0]
num_classes = scores.shape[1]
scores = scores[:,1:] #ignore background
# consider only top 5 class scores per box
num_ignored = scores.shape[1]-5
sorted_scores = | np.argsort(-scores,1) | numpy.argsort |
# Copyright (c) Materials Virtual Lab.
# Distributed under the terms of the BSD License.
"""
Algorithms for diffusion pathway analysis
"""
from collections import Counter
import numpy as np
from scipy.cluster.hierarchy import fcluster, linkage
from scipy.spatial.distance import squareform
class ProbabilityDensityAnalysis:
r"""
Compute the time-averaged probability density distribution of selected
species on a "uniform" (in terms of fractional coordinates) 3-D grid.
Note that \int_{\Omega}d^3rP(r) = 1
If you use this class, please consider citing the following paper:
<NAME>.; <NAME>.; <NAME>. and <NAME>. "Role of Na+ Interstitials and
Dopants in Enhancing the Na+ Conductivity of the Cubic Na3PS4 Superionic
Conductor". Chem. Mater. (2015), 27, pp 8318–8325.
"""
def __init__(self, structure, trajectories, interval=0.5, species=("Li", "Na")):
"""
Initialization.
Args:
structure (Structure): crystal structure
trajectories (numpy array): ionic trajectories of the structure
from MD simulations. It should be (1) stored as 3-D array [
Ntimesteps, Nions, 3] where 3 refers to a,b,c components;
(2) in fractional coordinates.
interval(float): the interval between two nearest grid points
(in Angstrom)
species(list of str): list of species that are of interest
"""
# initial settings
trajectories = np.array(trajectories)
# All fractional coordinates are between 0 and 1.
trajectories -= np.floor(trajectories)
assert np.all(trajectories >= 0) and np.all(trajectories <= 1)
indices = [j for j, site in enumerate(structure) if site.specie.symbol in species]
lattice = structure.lattice
frac_interval = [interval / l for l in lattice.abc]
nsteps = len(trajectories)
# generate the 3-D grid
ra = np.arange(0.0, 1.0, frac_interval[0])
rb = np.arange(0.0, 1.0, frac_interval[1])
rc = np.arange(0.0, 1.0, frac_interval[2])
lens = [len(ra), len(rb), len(rc)]
ngrid = lens[0] * lens[1] * lens[2]
agrid = ra[:, None] * np.array([1, 0, 0])[None, :]
bgrid = rb[:, None] * np.array([0, 1, 0])[None, :]
cgrid = rc[:, None] * np.array([0, 0, 1])[None, :]
grid = agrid[:, None, None] + bgrid[None, :, None] + cgrid[None, None, :]
# Calculate time-averaged probability density function distribution Pr
count = Counter()
Pr = | np.zeros(ngrid, dtype=np.double) | numpy.zeros |
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
from matplotlib import animation
import numpy as np
import sklearn.cluster as clu
# define the standard font sizes
small = 7
medium = 10
large = 12
# define the conversion constant from pt to cm
pt2cm = 0.0352778
# define the font size default dictionary for figures
font_sizes_raw = {
'small': {
'xlabel': small,
'ylabel': small,
'zlabel': small,
'labels': small,
'xticks': small,
'yticks': small,
'zticks': small-2,
'ticks': small,
'minor_xticks': small,
'minor_yticks': small,
'minor_ticks': small,
'title': small,
'legend': small-1,
'legend_title': small,
},
'medium': {
'xlabel': medium,
'ylabel': medium,
'zlabel': medium,
'labels': medium,
'xticks': medium,
'yticks': medium,
'zticks': medium,
'ticks': medium,
'minor_xticks': medium,
'minor_yticks': medium,
'minor_ticks': medium,
'title': medium,
'legend': medium-1,
'legend_title': medium,
},
'large': {
'xlabel': large,
'ylabel': large,
'zlabel': large,
'labels': large,
'xticks': large,
'yticks': large,
'zticks': large,
'ticks': large,
'minor_xticks': large,
'minor_yticks': large,
'minor_ticks': large,
'title': large,
'legend': large-1,
'legend_title': large,
},
}
def plot_2d(data_in, rows=1, columns=1, labels=None, markers=None, linestyle='-', color=None,
xerr=None, yerr=None, fig=None, fontsize=None, dpi=None, **kwargs):
"""Wrapper for 2D plotting data into subplots"""
# create a new figure window
if fig is None:
if dpi is None:
dpi = 300
else:
dpi = dpi
fig = plt.figure(dpi=dpi)
# initialize a plot counter
plot_counter = 1
# for all the rows
for row in range(rows):
# for all the columns
for col in range(columns):
# if there's no plot in this position, skip it
if len(data_in) < plot_counter:
continue
# add the subplot
ax = fig.add_subplot(rows, columns, plot_counter)
# for all the lines in the list
for count, lines in enumerate(data_in[plot_counter - 1]):
if color is not None:
c = color[plot_counter - 1]
else:
c = 'b'
# plot x,y ot just y depending on the size of the data
if len(lines.shape) == 2:
# line2d = ax.plot(lines[:, 0], lines[:, 1])
if xerr is not None:
if yerr is not None:
line2d = ax.errorbar(lines[:, 0], lines[:, 1], xerr=xerr[plot_counter - 1][count]
, yerr=yerr[plot_counter - 1][count])
else:
line2d = ax.errorbar(lines[:, 0], lines[:, 1], xerr=xerr[plot_counter - 1][count]
, yerr=None)
else:
if yerr is not None:
# line2d = ax.errorbar(range(lines.shape[0]), lines, xerr=None
# , yerr=yerr[plot_counter - 1][count])
line2d = ax.plot(lines[:, 0], lines[:, 1], color=c)
y_error = yerr[plot_counter - 1][count]
ax.fill_between(lines[:, 0], lines[:, 1]-y_error, lines[:, 1]+y_error, alpha=0.5, color=c)
else:
line2d = ax.errorbar(lines[:, 0], lines[:, 1], xerr=None
, yerr=None)
else:
if xerr is not None:
if yerr is not None:
line2d = ax.errorbar(range(lines.shape[0]), lines, xerr=xerr[plot_counter - 1][count]
, yerr=yerr[plot_counter - 1][count])
else:
line2d = ax.errorbar(range(lines.shape[0]), lines, xerr=xerr[plot_counter - 1][count]
, yerr=None)
else:
if yerr is not None:
# line2d = ax.errorbar(range(lines.shape[0]), lines, xerr=None
# , yerr=yerr[plot_counter - 1][count])
line2d = ax.plot(range(lines.shape[0]), lines, color=c)
y_error = yerr[plot_counter - 1][count]
ax.fill_between(range(lines.shape[0]), lines-y_error, lines+y_error, alpha=0.5, color=c)
else:
line2d = ax.errorbar(range(lines.shape[0]), lines, xerr=None
, yerr=None)
# change the marker if provided, otherwise use dots
if markers is not None:
line2d[0].set_marker(markers[plot_counter - 1][count])
else:
line2d[0].set_marker('.')
if linestyle is not '-':
line2d[0].set_linestyle(linestyle[plot_counter - 1][count])
# change the font size if provided
if fontsize is not None:
ax[0].set_fontsize(fontsize[plot_counter - 1][count])
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
# add labels if provided
if labels is not None:
plt.legend(labels[plot_counter - 1])
# apply kwargs
# TODO: implement kwargs
# update the plot counter
plot_counter += 1
return fig
def plot_3d(data_in, fig=None):
"""Wrapper for 3D plotting data"""
# create a new figure window
if fig is None:
fig = plt.figure()
# add the subplot
ax = fig.add_subplot(111, projection='3d')
# for all the lines in the list
for lines in data_in:
ax.plot(lines[:, 0], lines[:, 1], lines[:, 2], marker='.')
return fig
def animation_plotter(motivedata, bonsaidata, cricket_data, xlim, ylim, interval=10):
"""Plot animations from the motive, bonsai and cricket"""
# TODO: generalize function to any number of lines to plot
# First set up the figure, the axis, and the plot element we want to animate
fig0 = plt.figure()
ax0 = plt.axes(xlim=xlim, ylim=ylim)
line0, = ax0.plot([], [], lw=2)
line1, = ax0.plot([], [], lw=2)
line2, = ax0.plot([], [], lw=2)
# initialization function: plot the background of each frame
def init():
line0.set_data([], [])
line1.set_data([], [])
line2.set_data([], [])
return line0, line1, line2
# animation function. This is called sequentially
def animate(i):
# x = np.linspace(0, 2, 1000)
# y = np.sin(2 * np.pi * (x - 0.01 * i))
line0.set_data(motivedata[:i, 0], motivedata[:i, 1])
line1.set_data(bonsaidata[:i, 0], bonsaidata[:i, 1])
line2.set_data(cricket_data[:i, 0], cricket_data[:i, 1])
return line0, line1, line2
# call the animator. blit=True means only re-draw the parts that have changed.
anim = animation.FuncAnimation(fig0, animate, init_func=init,
frames=motivedata.shape[0], interval=interval, blit=True)
return anim
def simple_animation(data_in, interval=10):
"""Animate the trajectories given"""
# First set up the figure, the axis, and the plot element we want to animate
fig0 = plt.figure()
# ax0 = plt.axes(xlim=xlim, ylim=ylim)
xlim = [ | np.min(data_in[[0, 2, 4, 6, 8], :]) | numpy.min |
"""Property transfer models."""
import numpy as np
from sklearn.base import BaseEstimator
from sklearn.neighbors import NearestNeighbors
class PropertiesTransfer(BaseEstimator):
"""Model to transfer rock properties using Nearest Neighbors algorithm."""
def __init__(self):
self._nn = None
self._neigh_dist = None
self._neigh_ind = None
self._new_shape = None
def fit(self, original_grid, new_grid, n_neighbors=5, normalize_vector=(1, 1, 1)):
"""Fit Nearest Neighbors model.
Parameters
----------
original_grid : geology.Grid
Original grid.
new_grid : geology.Grid
New grid.
n_neighbors : int, optional
number of neighbors to use, by default 5.
normalize_vector : list, optional
vector to normalize distance, by default [1, 1, 1].
"""
self._nn = NearestNeighbors(n_neighbors=n_neighbors)
self._nn.fit(original_grid.cell_centroids.reshape(-1, 3) / np.asarray(normalize_vector))
self._neigh_dist, self._neigh_ind = self._nn.kneighbors(
new_grid.cell_centroids.reshape(-1, 3) / np.asarray(normalize_vector))
self._new_shape = new_grid.dimens
return self
def dump(self, path):
"""Dump to a file (*.npz).
Parameters
----------
path : str or pathlib.Path
Path to file, ``.npz`` extension will be appended to the file name if it is not
already there.
"""
np.savez(path, **{att: getattr(self, att) for att in ('_neigh_dist',
'_neigh_ind', '_new_shape')},
)
return self
def load(self, path):
"""Load from file.
Parameters
----------
path : str or pathlib.Path
File path.
"""
npzfile = | np.load(path, allow_pickle=True) | numpy.load |
# -*- coding: utf-8 -*-
import nnabla as nn
import numpy as np
import rawdata as rdat
from tex_util import Texture
""" from nnc_proj.($$project_name_of_nnc) import network """
from nnc_proj.model import network
nn.clear_parameters()
nn.parameter.load_parameters('./nnc_proj/model.nnp')
ratios = rdat.ratios
mvec = rdat.max_vector
def estimate_sscurve(stress_roots, tex_info, num=1):
'''
return:(array_like, [rootNum, 50]) strain, stress
'''
x = nn.Variable((num, 1, 128, 128))
x2 = nn.Variable((num, 1))
y1, y2 = network(x, x2, test=True)
# RD, TD なので2つ [num, xx, '2']
rootsNum = len(stress_roots)
strain = np.empty((num, rootsNum, 50, 2))
stress = np.empty((num, rootsNum, 50, 2))
y_norm = np.array([np.linspace(0.5, 1, 50) for j in range(2)]).T
for cnt in range(num):
tex = Texture(volume=1000, tex_info=tex_info)
img = tex.pole_figure()
x.d[cnt, 0] = img / 255.0
for i, root in enumerate(stress_roots):
x2.d = ratios[root]
y1.forward()
y2.forward()
curve = y1.d[:]
max_vec = y2.d[:] * mvec
for cnt in range(num):
strain[cnt, i] = curve[cnt] * max_vec[cnt, 0, :]
stress[cnt, i] = y_norm * max_vec[cnt, 1, :]
ave_stress = np.average(stress, axis=0)
ave_strain = np.average(strain, axis=0)
std = | np.std(stress, axis=0) | numpy.std |
"""module to deal with gaussian cube type data
NB: for all transformations, the cubes coordinate system is understood to be
A = np.array([
[['(x0,y0,z0)', '(x0,y0,z1)'],
['(x0,y1,z0)', '(x0,y1,z1)']],
[['(x1,y0,z0)', '(x1,y0,z1)'],
['(x1,y1,z0)', '(x1,y1,z1)']]
])
which leads to;
A.shape -> (x length, y length, z length)
"""
from collections import OrderedDict
from itertools import product
import warnings
import numpy
from ejplugins import validate_against_schema
with warnings.catch_warnings(record=True):
warnings.filterwarnings("ignore", category=ImportWarning)
import pymatgen as pym
from pymatgen.io.ase import AseAtomsAdaptor
import numpy as np
import ase
from ipyatom.geometry2d_utils import minimum_bounding_box
import ipyatom.geometry3d_utils as g3
from scipy.linalg import expm
from scipy.ndimage import zoom as ndzoom
from scipy.interpolate import interpn
from scipy.spatial.qhull import Delaunay
from jsonschema import validate
from jsonextended import units as eunits
from ipyatom.utils import slice_mask, round_to_base, get_default_atom_map
from ipyatom.repeat_cell import atoms_to_dict
def gcube_to_dict(cube, cell_vectors, centre=(0., 0., 0.), name="", dtype="", vstruct=None, color_bbox="black"):
""" convert gaussian cube data to visual dict
Parameters
----------
cube: numpy.array
cell_vectors: list
[[a1,a2,a3],[b1,b2,b3],[c1,c2,c3]]
centre: list
[x, y, z]
name: str
name of structure
dtype: str
label of density type (e.g. charge or spin)
vstruct: dict
an existing vstruct to append to
color_bbox: str or None
color of outline bbox
Returns
-------
"""
a, b, c = cell_vectors
centre = 0.5 * (np.array(a) + np.array(b) + np.array(c))
output = {'type': 'repeat_density',
'name': name,
'dtype': dtype,
'centre': centre.tolist(),
'dcube': cube.copy(),
'cell_vectors': {"a": a, "b": b, "c": c},
'color_bbox': color_bbox,
'transforms': []}
if vstruct is not None:
vstruct["elements"].append(output)
return vstruct
else:
return {'elements': [output], 'transforms': []}
# def ejdata_to_dict(data, name="", dtype="charge", lunit="angstrom", vstruct=None, color_bbox="black",
# retrieve_atoms=True, atom_map=None, **kwargs):
# """ convert ejplugin data to visual dict
#
# Parameters
# ----------
# data: dict
# must contain density and cell_vectors keys
# name: str
# name of structure
# dtype: str
# density type ("charge" or "spin")
# lunit: str
# length unit
# vstruct: dict
# an existing vstruct to append to
# color_bbox: str or None
# color of outline bbox
# retrieve_atoms: bool
# if present retrieve atomic positions as repeat_cell element (requires symbols and fcoords)
# atom_map: None or dict
# a mapping of atom labels to keys; ["radius", "color_fill", "color_outline", "transparency"],
# e.g. {"H": {"radius": 1, "color_fill": '#bfbfbf', "color_outline": None, "transparency": 1.}, ...}
# kwargs : object
# additional per atom parameters (must be lists the same length as number of atoms), e.g. charge=[0,1,-1]
#
# Returns
# -------
#
# """
# gkey = "{}_density".format(dtype)
# if gkey not in data or "cell_vectors" not in data:
# raise ValueError("data does not contain both cell_vectors and {} keys".format(gkey))
# validate(data["cell_vectors"], {"type": "object", "required": ["a", "b", "c"],
# "properties": {
# "a": {"type": "object", "required": ["units", "magnitude"]},
# "b": {"type": "object", "required": ["units", "magnitude"]},
# "c": {"type": "object", "required": ["units", "magnitude"]}
# }})
# cell = eunits.combine_quantities(data["cell_vectors"])
# cell = eunits.apply_unitschema(cell, {"a": lunit, "b": lunit, "c": lunit}, as_quantity=False)
# cell_vectors = [cell["a"].tolist(), cell["b"].tolist(), cell["c"].tolist()]
# output = gcube_to_dict(data[gkey], cell_vectors, name=name, dtype=dtype,
# vstruct=vstruct, color_bbox=color_bbox)
#
# if "symbols" in data and "fcoords" in data and retrieve_atoms:
# atoms = ase.Atoms(symbols=data["symbols"], scaled_positions=data["fcoords"], cell=cell_vectors)
# output = atoms_to_dict(atoms, name=name, color_bbox=None, vstruct=output, atom_map=atom_map, **kwargs)
# elif "symbols" in data and "ccoords" in data and retrieve_atoms:
# atoms = ase.Atoms(symbols=data["symbols"], positions=data["ccoords"], cell=cell_vectors)
# output = atoms_to_dict(atoms, name=name, color_bbox=None, vstruct=output, atom_map=atom_map, **kwargs)
#
# return output
def ejdata_to_dict(data, name="", lunit="angstrom", vstruct=None, color_bbox="black",
retrieve_atoms=True, atom_map=None, **kwargs):
""" convert ejplugin data to visual dict
Parameters
----------
data: dict
must contain density and cell_vectors keys
name: str
name of structure
dtype: str
density type ("charge" or "spin")
lunit: str
length unit
vstruct: dict
an existing vstruct to append to
color_bbox: str or None
color of outline bbox
retrieve_atoms: bool
if present retrieve atomic positions as repeat_cell element (requires symbols and fcoords)
atom_map: None or dict
a mapping of atom labels to keys; ["radius", "color_fill", "color_outline", "transparency"],
e.g. {"H": {"radius": 1, "color_fill": '#bfbfbf', "color_outline": None, "transparency": 1.}, ...}
kwargs : object
additional per atom parameters (must be lists the same length as number of atoms), e.g. charge=[0,1,-1]
Returns
-------
"""
validate_against_schema(data, "edensity")
data = eunits.combine_quantities(data)
data = eunits.apply_unitschema(data, {"a": lunit, "b": lunit, "c": lunit, "ccoords": lunit}, as_quantity=False)
cell = data["cell_vectors"]
cell_vectors = [cell["a"].tolist(), cell["b"].tolist(), cell["c"].tolist()]
vstruct = {'elements': [], 'transforms': []} if vstruct is None else vstruct
for density in data["densities"]:
vstruct = gcube_to_dict(density["magnitude"], cell_vectors,
name=name, dtype=density["type"],
vstruct=vstruct, color_bbox=color_bbox)
if "atoms" in data and retrieve_atoms:
adict = {"cell": cell_vectors}
if "symbols" in data["atoms"]:
adict["symbols"] = data["atoms"]["symbols"]
else:
adict["numbers"] = data["atoms"]["atomic_number"]
if "ccoords" in data["atoms"]:
adict["positions"] = data["atoms"]["ccoords"]
else:
adict["scaled_positions"] = data["atoms"]["fcoords"]
atoms = ase.Atoms(**adict)
vstruct = atoms_to_dict(atoms, name=name, color_bbox=None, vstruct=vstruct, atom_map=atom_map, **kwargs)
return vstruct
_atom_map_schema = {
"type": "object",
"patternProperties": {
"^[a-zA-Z0-9]*$": {
"type": "object",
"required": ["radius", "color_fill"],
"properties": {
"radius": {"type": "number"},
}
}
}
}
def atoms_to_rdensity(atoms, cube_dims=(50, 50, 50), name="", dtype="nuclei", color_bbox="black", vstruct=None,
atom_map=None, rdist_implement=2):
""" convert an atom object to a repeat density
Parameters
----------
atoms: pymatgen.core.structure.Structure or ase.Atoms
cube_dims: tuple of int
(adim, bdim, cdim) of final cube
name: str
name of structure
color_bbox: str or None
color of outline bbox
vstruct: dict
an existing vstruct to append to
atom_map: None or dict
a mapping of atom labels to keys; ["radius", "color_fill"],
e.g. {"H": {"radius": 1, "color_fill": '#bfbfbf'}, ...}
rdist_implement: int
implementation for assigning coordinate to atom site (for optimisation testing)
Returns
-------
vstruct: dict
color_map: dict
{(<label>, <color>): <value in dcube>, ...}
"""
if isinstance(atoms, ase.atoms.Atoms):
atoms = AseAtomsAdaptor.get_structure(atoms)
if not isinstance(atoms, pym.core.structure.Structure):
raise ValueError("struct must be ase.Atoms or pymatgen.Structure")
if vstruct is not None:
if "elements" not in vstruct:
raise ValueError("the existing vstruct does not have an elements key")
# get atom data
if atom_map is None:
atom_map = get_default_atom_map()
validate(atom_map, _atom_map_schema)
atom_data = atoms.as_dict()
a, b, c = [_ for _ in atoms.lattice.matrix]
centre = 0.5 * (a + b + c)
sites = []
for i, site in enumerate(atom_data["sites"]):
label = site["label"]
site_data = {"ccoord": site["xyz"], "label": label}
site_data.update(atom_map[label])
sites.append(site_data)
# create a map of site labels to color and index
color_map = {(d[0], d[1]): i + 1 for i, d in enumerate(sorted(
set([(site["label"], site["color_fill"]) for site in sites])))}
# create fractional coordinates cube
ndim, mdim, ldim = cube_dims
gcube = np.full((ldim, mdim, ndim), np.nan)
indices = np.array(list(product(range(ldim), range(mdim), range(ndim))))
# convert indices to cartesian coordinates
coords = np.einsum('...jk,...k->...j', np.array([a, b, c]).T,
np.divide( | np.asarray(indices, dtype=np.float64) | numpy.asarray |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
misclassification_list = list()
model_num = 10
iteration_num = 100
dataset_num = 2
rep = 2
test_images = 120
for i in range(model_num):
filename = 'mis_classify_' + str(i+1) + '_.csv'
data_pd = pd.read_csv(filename, sep=',', header=None)
data = data_pd.values
data = data[:, 100:200]
misclassification_list.append(data)
misclassify_dict = dict()
for i in range(len(misclassification_list)):
unique, counts = np.unique(misclassification_list[i], return_counts=True)
dic = dict(zip(unique, counts))
misclassify_dict = {x: misclassify_dict.get(x, 0) + dic.get(x, 0) for x in set(misclassify_dict).union(dic)}
total_mis_classify = np.zeros((test_images*model_num, iteration_num))
for i in range(model_num):
total_mis_classify[i*test_images:(i+1)*test_images, :] = misclassification_list[i]
num_mis_classify_images = np.count_nonzero(total_mis_classify)
for i in range(test_images):
if i+1 in misclassify_dict:
pass
else:
misclassify_dict[i+1] = 0
mis_list = np.zeros(test_images)
print('Misclassified images percentage is calculated by for specific image, the misclassified'
' times divided total classification number')
for i in range(test_images):
mis_percent = misclassify_dict[i+1]/(model_num*iteration_num)*100
mis_list[i] = mis_percent
# print('Misclassified images percentage for image', str(i+1), 'is: %.4f' % mis_percent, '%')
# mis classification above 70%
for i in range(len(mis_list)):
if mis_list[i] >= 70:
print('Mis classification percentage (above 70%) for image {} is {}'.format(int(i+1), mis_list[i]))
cluster_chemistry_result_dataframe = pd.read_csv('test_cluster_chemistry_result.csv', header=None)
cluster_chemistry_result = cluster_chemistry_result_dataframe.values
cluster_chemistry_result = cluster_chemistry_result.reshape(1, int(test_images / (dataset_num*2)))
cluster_chemistry_result = np.repeat(cluster_chemistry_result, rep)
Y = np.copy(cluster_chemistry_result)
for i in range(dataset_num-1):
Y = np.concatenate((Y, cluster_chemistry_result))
# calculate accuracy of each class
y_1 = 0
y_2 = 0
y_3 = 0
y_4 = 0
y_5 = 0
y_6 = 0
for i in range(len(Y)):
if Y[i] == 1:
y_1 = y_1 + 1
for i in range(len(Y)):
if Y[i] == 2:
y_2 = y_2 + 1
for i in range(len(Y)):
if Y[i] == 3:
y_3 = y_3 + 1
for i in range(len(Y)):
if Y[i] == 4:
y_4 = y_4 + 1
for i in range(len(Y)):
if Y[i] == 5:
y_5 = y_5 + 1
for i in range(len(Y)):
if Y[i] == 6:
y_6 = y_6 + 1
y_1 = y_1 * 100 * 10
y_2 = y_2 * 100 * 10
y_3 = y_3 * 100 * 10
y_4 = y_4 * 100 * 10
y_5 = y_5 * 100 * 10
y_6 = y_6 * 100 * 10
y_1_mis = 0
y_2_mis = 0
y_3_mis = 0
y_4_mis = 0
y_5_mis = 0
y_6_mis = 0
for row in range(total_mis_classify.shape[0]):
for col in range(total_mis_classify.shape[1]):
mis_sample = total_mis_classify[row, col]
if mis_sample != 0:
mis_sample = mis_sample - 1
mis_sample_class = Y[int(mis_sample)]
if mis_sample_class == 1:
y_1_mis = y_1_mis + 1
if mis_sample_class == 2:
y_2_mis = y_2_mis + 1
if mis_sample_class == 3:
y_3_mis = y_3_mis + 1
if mis_sample_class == 4:
y_4_mis = y_4_mis + 1
if mis_sample_class == 5:
y_5_mis = y_5_mis + 1
if mis_sample_class == 6:
y_6_mis = y_6_mis + 1
# accuracy for group 1
y_1_acc = y_1_mis / y_1
y_1_acc = (1 - y_1_acc) * 100
# accuracy for group 2
y_2_acc = y_2_mis / y_2
y_2_acc = (1 - y_2_acc) * 100
# accuracy for group 3
y_3_acc = y_3_mis / y_3
y_3_acc = (1 - y_3_acc) * 100
# accuracy for group 4
y_4_acc = y_4_mis / y_4
y_4_acc = (1 - y_4_acc) * 100
# accuracy for group 5
y_5_acc = y_5_mis / y_5
y_5_acc = (1 - y_5_acc) * 100
# accuracy for group 6
y_6_acc = y_6_mis / y_6
y_6_acc = (1 - y_6_acc) * 100
x = [1, 2, 3, 4, 5, 6]
test_accuracy_list = list()
test_accuracy_list.append(y_1_acc)
test_accuracy_list.append(y_2_acc)
test_accuracy_list.append(y_3_acc)
test_accuracy_list.append(y_4_acc)
test_accuracy_list.append(y_5_acc)
test_accuracy_list.append(y_6_acc)
# test dataset accuracy of each class
fig_accuracy_plot, ax = plt.subplots()
# ax.set_title('Accuracy of each class')
plt.scatter(x, test_accuracy_list)
plt.xlabel('class number', fontsize=15)
plt.ylabel('Accuracy of each class (percentage)', fontsize=15)
plt.yticks(fontsize=9)
plt.xticks([1, 2, 3, 4, 5, 6], ['class 1', 'class 2', 'class 3', 'class 4', 'class 5', 'class 6'], fontsize=9)
plt.show()
fig_accuracy_plot.savefig('Test accuracy of each class.jpg')
plt.close(fig_accuracy_plot)
# plot accuracy histogram
for i in range(len(Y)):
if Y[i] != np.min(Y[i:]):
index = i + np.where(Y[i:] == np.min(Y[i:]))[0][0]
Y[i], Y[index] = Y[index], Y[i]
mis_list[i], mis_list[index] = mis_list[index], mis_list[i]
for i in range(6+1):
Y_sub = Y[np.where(Y == i)]
mis_list_sub = mis_list[np.where(Y == i)]
for num in range(len(mis_list_sub)):
if mis_list_sub[num] != np.min(mis_list_sub[num:]):
index = num + np.where(mis_list_sub[num:] == np.min(mis_list_sub[num:]))[0][0]
mis_list_sub[num], mis_list_sub[index] = mis_list_sub[index], mis_list_sub[num]
Y_sub[num], Y_sub[index] = Y_sub[index], Y_sub[num]
# print(mis_list_sub)
mis_list[np.where(Y == i)] = mis_list_sub
x = []
for i in range(test_images):
x.append(i)
x_tick = []
for i in range(test_images):
x_tick.append(str(Y[i]))
color = []
for i in range(test_images):
if Y[i] == 1:
color.append('red')
if Y[i] == 2:
color.append('green')
if Y[i] == 3:
color.append('cyan')
if Y[i] == 4:
color.append('yellow')
if Y[i] == 5:
color.append('purple')
if Y[i] == 6:
color.append('black')
fig, ax = plt.subplots()
x = np.arange(120)
plt.bar(x, height=mis_list, color=color)
x_line = np.linspace(x[0], x[47], 1000)
y = np.zeros(1000)
y = y + test_accuracy_list[0]
ax.plot(x_line, y)
x_line = np.linspace(x[48], x[51], 1000)
y = np.zeros(1000)
y = y + test_accuracy_list[1]
ax.plot(x_line, y)
x_line = np.linspace(x[52], x[63], 1000)
y = np.zeros(1000)
y = y + test_accuracy_list[2]
ax.plot(x_line, y)
x_line = np.linspace(x[64], x[87], 1000)
y = np.zeros(1000)
y = y + test_accuracy_list[3]
ax.plot(x_line, y)
x_line = | np.linspace(x[88], x[111], 1000) | numpy.linspace |
import os
import gym
import numpy as np
import sys
import pybullet
from metagym.quadrupedal.robots import robot_config
from metagym import quadrupedal
from metagym.quadrupedal.robots import action_filter
from metagym.quadrupedal.envs.utilities.ETG_model import ETG_layer,ETG_model
from copy import copy
Param_Dict = {'torso':1.0,'up':0.3,'feet':0.2,'tau':0.1,'done':1,'velx':0,'badfoot':0.1,'footcontact':0.1}
Random_Param_Dict = {'random_dynamics':0,'random_force':0}
def EnvWrapper(env,param,sensor_mode,normal=0,ETG_T=0.5,enable_action_filter=False,
reward_p=1,ETG=1,ETG_path="",ETG_T2=0.5,random_param=None,
ETG_H=20,act_mode="traj",vel_d=0.6,vel_mode="max",
task_mode="normal",step_y=0.05):
env = ETGWrapper(env=env,ETG=ETG,ETG_T=ETG_T,ETG_path=ETG_path,
ETG_T2=ETG_T2,ETG_H=ETG_H,act_mode=act_mode,
task_mode=task_mode,step_y=step_y)
env = ActionFilterWrapper(env=env,enable_action_filter=enable_action_filter)
env = RandomWrapper(env=env,random_param=random_param)
env = ObservationWrapper(env=env,ETG=ETG,sensor_mode=sensor_mode,normal=normal,ETG_H = ETG_H)
env = RewardShaping(env=env,param=param,reward_p=reward_p,vel_d=vel_d,vel_mode=vel_mode)
return env
class ActionFilterWrapper(gym.Wrapper):
def __init__(self,env,enable_action_filter):
gym.Wrapper.__init__(self, env)
self.robot = self.env.robot
self.pybullet_client = self.env.pybullet_client
self.observation_space = self.env.observation_space
self.action_space = self.env.action_space
self.enable_action_filter = enable_action_filter and self.env.ETG.endswith("sac")
if self.enable_action_filter:
self._action_filter = self._BuildActionFilter()
def reset(self,**kwargs):
obs_all,info = self.env.reset(**kwargs)
self._step_counter = 0
if self.enable_action_filter:
self._ResetActionFilter()
return obs_all,info
def step(self,action,**kwargs):
if self.enable_action_filter:
action = self._FilterAction(action)
obs_all, rew, done, info = self.env.step(action)
self._step_counter += 1
return obs_all, rew, done, info
def _BuildActionFilter(self):
sampling_rate = 1 / self.env.env_time_step
num_joints = 12
a_filter = action_filter.ActionFilterButter(sampling_rate=sampling_rate,
num_joints=num_joints)
return a_filter
def _ResetActionFilter(self):
self._action_filter.reset()
def _FilterAction(self, action):
# initialize the filter history, since resetting the filter will fill
# the history with zeros and this can cause sudden movements at the start
# of each episode
if self._step_counter == 0:
default_action = np.array([0,0,0]*4)
self._action_filter.init_history(default_action)
# for j in range(10):
# self._action_filter.filter(default_action)
filtered_action = self._action_filter.filter(action)
# print(filtered_action)
return filtered_action
class ObservationWrapper(gym.Wrapper):
def __init__(self, env,ETG,sensor_mode,normal,ETG_H):
gym.Wrapper.__init__(self, env)
# print("env_time:",self.env.env_time_step)
self.robot = self.env.robot
self.pybullet_client = self.env.pybullet_client
self.observation_space = self.env.observation_space
self.action_space = self.env.action_space
self.sensor_mode = sensor_mode
self.normal = normal
self.ETG_H = ETG_H
self.ETG = ETG
self.ETG_mean = np.array([2.1505982e-02, 3.6674485e-02, -6.0444288e-02,
2.4625482e-02, 1.5869144e-02, -3.2513142e-02, 2.1506395e-02,
3.1869926e-02, -6.0140789e-02, 2.4625063e-02, 1.1628972e-02,
-3.2163858e-02])
self.ETG_std = np.array([4.5967497e-02,2.0340437e-01, 3.7410179e-01, 4.6187632e-02, 1.9441207e-01, 3.9488649e-01,
4.5966785e-02 ,2.0323379e-01, 3.7382501e-01, 4.6188373e-02 ,1.9457331e-01, 3.9302582e-01])
if self.ETG:
if "ETG" in self.sensor_mode.keys() and sensor_mode["ETG"] :
sensor_shape = self.observation_space.high.shape[0]
obs_h = np.array([1]*(sensor_shape+12))
obs_l = np.array([0]*(sensor_shape+12))
self.observation_space = gym.spaces.Box(obs_l,obs_h,dtype=np.float32)
if "ETG_obs" in self.sensor_mode.keys() and sensor_mode["ETG_obs"] :
sensor_shape = self.observation_space.high.shape[0]
obs_h = np.array([1]*(sensor_shape+self.ETG_H))
obs_l = np.array([0]*(sensor_shape+self.ETG_H))
self.observation_space = gym.spaces.Box(obs_l,obs_h,dtype=np.float32)
if "force_vec" in self.sensor_mode.keys() and sensor_mode["force_vec"]:
sensor_shape = self.observation_space.high.shape[0]
obs_h = np.array([1]*(sensor_shape+6))
obs_l = np.array([0]*(sensor_shape+6))
self.observation_space = gym.spaces.Box(obs_l,obs_h,dtype=np.float32)
if "dynamic_vec" in self.sensor_mode.keys() and sensor_mode["dynamic_vec"]:
sensor_shape = self.observation_space.high.shape[0]
obs_h = np.array([1]*(sensor_shape+3))
obs_l = np.array([0]*(sensor_shape+3))
self.observation_space = gym.spaces.Box(obs_l,obs_h,dtype=np.float32)
if "yaw" in self.sensor_mode.keys() and sensor_mode["yaw"]:
sensor_shape = self.observation_space.high.shape[0]
obs_h = np.array([1]*(sensor_shape+2))
obs_l = np.array([0]*(sensor_shape+2))
self.observation_space = gym.spaces.Box(obs_l,obs_h,dtype=np.float32)
if "RNN" in self.sensor_mode.keys() and self.sensor_mode["RNN"]["time_steps"]>0:
self.time_steps = sensor_mode["RNN"]["time_steps"]
self.time_interval = sensor_mode["RNN"]["time_interval"]
self.sensor_shape = self.observation_space.high.shape[0]
self.obs_history = np.zeros((self.time_steps*self.time_interval,self.sensor_shape))
if sensor_mode["RNN"]["mode"] == "stack":
obs_h = np.array([1]*(self.sensor_shape*(self.time_steps+1)))
obs_l = np.array([0]*(self.sensor_shape*(self.time_steps+1)))
self.observation_space = gym.spaces.Box(obs_l,obs_h,dtype=np.float32)
def reset(self,**kwargs):
obs,info = self.env.reset(**kwargs)
self.dynamic_info = info["dynamics"]
if self.ETG:
if "ETG" in self.sensor_mode.keys() and self.sensor_mode["ETG"] :
ETG_out = info["ETG_act"]
if self.normal:
ETG_out = (ETG_out-self.ETG_mean)/self.ETG_std
obs = np.concatenate((obs,ETG_out),axis = 0)
if "ETG_obs" in self.sensor_mode.keys() and self.sensor_mode["ETG_obs"] :
ETG_obs = info["ETG_obs"]
obs = np.concatenate((obs,ETG_obs),axis = 0)
if "force_vec" in self.sensor_mode.keys() and self.sensor_mode["force_vec"]:
force_vec = info["force_vec"]
obs = np.concatenate((obs,force_vec),axis = 0)
if "dynamic_vec" in self.sensor_mode.keys() and self.sensor_mode["dynamic_vec"]:
dynamic_vec = self.dynamic_info
obs = np.concatenate((obs,dynamic_vec),axis = 0)
if "yaw" in self.sensor_mode.keys() and self.sensor_mode["yaw"]:
if "d_yaw" in kwargs.keys():
d_yaw = kwargs["d_yaw"]
else:
d_yaw = 0
yaw_now = info["pose"][-1]
yaw_info = np.array([np.cos(d_yaw-yaw_now),np.sin(d_yaw-yaw_now)])
obs = np.concatenate((obs,yaw_info),axis = 0)
if "RNN" in self.sensor_mode.keys() and self.sensor_mode["RNN"]["time_steps"]>0:
self.obs_history = np.zeros((self.time_steps*self.time_interval,self.sensor_shape))
obs_list = []
for t in range(self.time_steps):
obs_list.append(copy(self.obs_history[t*self.time_interval]))
obs_list.append(copy(obs))
self.obs_history[-1] = copy(obs)
if self.sensor_mode["RNN"]["mode"]=="GRU":
obs = np.stack(obs_list,axis=0)
elif self.sensor_mode["RNN"]["mode"]=="stack":
obs = np.array(obs_list).reshape(-1)
return obs,info
def step(self,action,**kwargs):
obs, rew, done, info = self.env.step(action, **kwargs)
if self.ETG:
if "ETG" in self.sensor_mode.keys() and self.sensor_mode["ETG"] :
ETG_out = info["ETG_act"]
if self.normal:
ETG_out = (ETG_out-self.ETG_mean)/self.ETG_std
obs = np.concatenate((obs,ETG_out),axis = 0)
if "ETG_obs" in self.sensor_mode.keys() and self.sensor_mode["ETG_obs"] :
ETG_obs = info["ETG_obs"]
obs = np.concatenate((obs,ETG_obs),axis = 0)
if "force_vec" in self.sensor_mode.keys() and self.sensor_mode["force_vec"]:
force_vec = info["force_vec"]
obs = np.concatenate((obs,force_vec),axis = 0)
if "dynamic_vec" in self.sensor_mode.keys() and self.sensor_mode["dynamic_vec"]:
dynamic_vec = self.dynamic_info
obs = np.concatenate((obs,dynamic_vec),axis = 0)
if "yaw" in self.sensor_mode.keys() and self.sensor_mode["yaw"]:
if "d_yaw" in kwargs.keys():
d_yaw = kwargs["d_yaw"]
else:
d_yaw = 0
yaw_now = info["pose"][-1]
yaw_info = np.array([np.cos(d_yaw-yaw_now),np.sin(d_yaw-yaw_now)])
obs = np.concatenate((obs,yaw_info),axis = 0)
if "RNN" in self.sensor_mode.keys() and self.sensor_mode["RNN"]["time_steps"]>0:
obs_list = []
for t in range(self.time_steps):
obs_list.append(copy(self.obs_history[t*self.time_interval]))
obs_list.append(copy(obs))
self.obs_history[:-1] = copy(self.obs_history[1:])
self.obs_history[-1] = copy(obs)
if self.sensor_mode["RNN"]["mode"]=="GRU":
obs = np.stack(obs_list,axis=0)
elif self.sensor_mode["RNN"]["mode"]=="stack":
obs = np.array(obs_list).reshape(-1)
return obs,rew,done,info
class ETGWrapper(gym.Wrapper):
def __init__(self, env,ETG,ETG_T,ETG_path,ETG_T2,ETG_H=20,act_mode="traj",task_mode="normal",step_y=0.05):
gym.Wrapper.__init__(self, env)
self.robot = self.env.robot
self.pybullet_client = self.env.pybullet_client
self.observation_space = self.env.observation_space
self.action_space = self.env.action_space
self.ETG_T2 = ETG_T2
self.ETG_T = ETG_T
self.ETG_H = ETG_H
self.act_mode = act_mode
self.step_y = step_y
self.task_mode = task_mode
self.ETG = ETG
phase = np.array([-np.pi/2,0])
if self.ETG:
self.ETG_agent = ETG_layer(self.ETG_T,self.env.env_time_step,self.ETG_H,0.04,phase,0.2,self.ETG_T2)
self.ETG_weight = 1
if len(ETG_path)>1 and os.path.exists(ETG_path):
info = np.load(ETG_path)
self.ETG_w = info["w"]
self.ETG_b = info["b"]
else:
self.ETG_w = np.zeros((3,ETG_H))
self.ETG_b = np.zeros(3)
self.ETG_model = ETG_model(task_mode=self.task_mode,act_mode=act_mode,step_y=self.step_y)
self.last_ETG_act = np.zeros(12)
self.last_ETG_obs = np.zeros(self.ETG_H)
def reset(self,**kwargs):
kwargs["info"] = True
obs,info = self.env.reset(**kwargs)
if self.ETG:
if "ETG_w" in kwargs.keys() and kwargs["ETG_w"] is not None:
self.ETG_w = kwargs["ETG_w"]
if "ETG_b" in kwargs.keys() and kwargs["ETG_b"] is not None:
self.ETG_b = kwargs["ETG_b"]
self.ETG_agent.reset()
state = self.ETG_agent.update2(t=self.env.get_time_since_reset())
act_ref = self.ETG_model.forward(self.ETG_w,self.ETG_b,state)
act_ref = self.ETG_model.act_clip(act_ref,self.robot)
self.last_ETG_act = act_ref*self.ETG_weight
info["ETG_obs"] = state[0]
info["ETG_act"] = self.last_ETG_act
return obs,info
def step(self,action,**kwargs):
if self.ETG:
action = np.asarray(action).reshape(-1)+self.last_ETG_act
state = self.ETG_agent.update2(t=self.env.get_time_since_reset())
act_ref = self.ETG_model.forward(self.ETG_w,self.ETG_b,state)
action_before = act_ref
act_ref = self.ETG_model.act_clip(act_ref,self.robot)
self.last_ETG_act = act_ref*self.ETG_weight
obs, rew, done, info = self.env.step(action)
info["ETG_obs"] = state[0]
info["ETG_act"] = self.last_ETG_act
else:
obs, rew, done, info = self.env.step(action)
return obs,rew,done,info
class RewardShaping(gym.Wrapper):
def __init__(self, env,param,reward_p=1,vel_d=0.6,vel_mode="max"):
gym.Wrapper.__init__(self, env)
self.param = param
self.reward_p = reward_p
self.last_base10 = np.zeros((10,3))
self.robot = self.env.robot
self.pybullet_client = self.env.pybullet_client
self.observation_space = self.env.observation_space
self.action_space = self.env.action_space
self.vel_d = vel_d
self.steps = 0
self.vel_mode = vel_mode
self.yaw_init = 0.0
def reset(self,**kwargs):
self.steps = 0
obs,info = self.env.reset(**kwargs)
self.yaw_init = info["yaw_init"]
obs, rew, done, infos = self.env.step(np.zeros(self.action_space.high.shape[0]))
self.last_basepose = info["base"]
self.last_footposition = self.get_foot_world(info)
base_pose = info["base"]
self.last_base10 = np.tile(base_pose,(10,1))
info["foot_position_world"] = copy(self.last_footposition)
info["scene"] = "plane"
if "d_yaw" in kwargs.keys():
info['d_yaw'] = kwargs["d_yaw"]
else:
info['d_yaw'] = 0
if self.render:
self.line_id = self.draw_direction(info)
return obs,info
def step(self,action,**kwargs):
self.steps+=1
obs, rew, done, info = self.env.step(action, **kwargs)
self.env_vec = np.array([0,0,0,0,0,0,0])
posex = info["base"][0]
for env_v in info["env_info"]:
if posex+0.2 >= env_v[0] and posex+0.2 <= env_v[1]:
self.env_vec = env_v[2]
break
if self.env_vec[0]:
info["scene"] = "upslope"
elif self.env_vec[1]:
info["scene"] = "downslope"
elif self.env_vec[2]:
info["scene"] = "upstair"
elif self.env_vec[3]:
info["scene"] = "downstair"
else:
info["scene"] = "plane"
v = (np.array(info["base"])-np.array(self.last_basepose))/0.026
if "d_yaw" in kwargs.keys():
info['d_yaw'] = kwargs["d_yaw"]
else:
info['d_yaw'] = 0
donef = kwargs["donef"] if "donef" in kwargs.keys() else False
info = self.reward_shaping(obs, rew, done, info,action,donef)
info["vel"] = v
rewards = 0
done = self.terminate(info)
if done:
info["done"] = -1
else:
info["done"] = 0
for key in Param_Dict.keys():
if key in info.keys():
# print(key)
rewards+= info[key]
info["velx"] = rew
self.last_basepose = copy(info["base"])
self.last_base10[1:,:] = self.last_base10[:9,:]
self.last_base10[0,:] = np.array(info['base']).reshape(1,3)
self.last_footposition = self.get_foot_world(info)
info["foot_position_world"] = copy(self.last_footposition)
if self.render:
self.pybullet_client.removeUserDebugItem(self.line_id)
self.line_id = self.draw_direction(info)
return (obs, self.reward_p*rewards, done, info)
def reward_shaping(self,obs, rew, done, info,action,donef,last_basepose=None,last_footposition=None):
torso = self.re_torso(info,last_basepose=last_basepose)
info['torso'] = self.param['torso']*torso
if last_basepose is None:
v = (np.array(info["base"])-np.array(self.last_basepose))/0.026
else:
v = (np.array(info["base"])-np.array(last_basepose))/0.026
k = 1-self.c_prec(min(v[0],self.vel_d),self.vel_d,0.5)
info['up'] = (self.param['up'])*self.re_up(info)*k
info['feet'] = self.param['feet']*self.re_feet(info,last_footposition=last_footposition)
info['tau'] = -self.param['tau']*info['energy']*k
info['badfoot'] = -self.param['badfoot']*self.robot.GetBadFootContacts()
lose_contact_num = np.sum(1.0-np.array(info["real_contact"]))
info['footcontact'] = -self.param['footcontact']*max(lose_contact_num-2,0)
return info
def draw_direction(self,info):
pose = info['base']
if self.render:
id = self.pybullet_client.addUserDebugLine(lineFromXYZ=[pose[0],pose[1],0.6],
lineToXYZ=[pose[0]+np.cos(info['d_yaw']),pose[1]+np.sin(info['d_yaw']),0.6],
lineColorRGB=[1,0,1],lineWidth=2)
return id
def terminate(self,info):
rot_mat = info["rot_mat"]
pose = info["pose"]
footposition = copy(info["footposition"])
footz = footposition[:,-1]
base = info["base"]
base_std = np.sum(np.std(self.last_base10,axis=0))
return rot_mat[-1]<0.5 or | np.mean(footz) | numpy.mean |
import csv
import glob
from functools import partial
from os import listdir, path
import numpy as np
import open3d as o3d
from plyfile import PlyData, PlyElement
from tqdm.auto import tqdm, trange
from tqdm.contrib.concurrent import process_map, thread_map
from scipy.spatial.transform import Rotation as R
def progresser(ply_file, auto_position=True, write_safe=False, blocking=True, progress=False):
try:
plydata = PlyData.read(ply_file)
vertex = plydata['vertex']
x = vertex['x']
y = vertex['y']
z = vertex['z']
if 'scalar' in vertex._property_lookup:
i = vertex['scalar']
elif 'intensity' in vertex._property_lookup:
i = vertex['intensity']
else:
i = plydata['vertex'][plydata.elements[0].properties[3].name]
return | np.concatenate((x, y, z, i), axis=0) | numpy.concatenate |
import numpy as np
from numdifftools import Gradient, Hessian, Jacobian
import itertools
import seaborn as sns
# import pandas as pd
from cycler import cycler
import palettable
import logging.config
import matplotlib.pyplot as plt
from plots.plot_helper_functions import set_size
import os
import matplotlib as mpl
from opt_einsum import contract
log = logging.getLogger(__name__)
root = os.path.dirname(os.path.abspath(__file__))
class hybrid_rosenbrock:
def __init__(self, n2, n1, mu, a, b, id=None):
"""
Hybrid-Rosenbrock class
Args:
n2 (int): Height of graph
n1 (int): Width of graph
mu (float): Mean
a (float): Controls the extend of the ridge of the marginals formed
b (array): n2 x (n1 -1) array controlling shallowness of ridges.
Entries of b follow block structure and correspond to the x_2, ... x_D (excluding first coordinate)
id (str): Give the provided settings a name.
"""
# String identifying a new set of parameters
self.id = id
# HRD parameters and setup
self.n1 = n1
self.n2 = n2
self.mu = mu
self.a = a
self.b = b
self.DoF = self.n2 * (self.n1 - 1) + 1
# Record evaluations
self.nLikelihoodEvaluations = 0
self.nGradLikelihoodEvaluations = 0
self.nHessLikelihoodEvaluations = 0
# Precalculate theta independent objects
self.jacGraph = self._getJacobianGraph()
self.hessRes = self._getHessianResidual()
#######################################################################################################################
def _getGraph(self, theta):
"""
Produces an array representation of graph denoted in Figure 2 - https://doi.org/10.1111/sjos.12532
Args:
theta (array): DoF sized array, point to evaluate at.
Returns: (array) n2 x n1 sized array
"""
graph = np.zeros((self.n2, self.n1))
graph[:, 0] = theta[0]
graph[:, 1:] = theta[1:].reshape(self.n2, self.n1-1)
return graph
def _getResidual(self, theta):
"""
Residual vector of minus log density. See introduction to Chapter 10 Nocedal and Wright
Args:
theta (array): DoF sized array, point to evaluate at.
Returns: (array) DoF sized array
"""
graph = self._getGraph(theta)
residual = np.zeros(self.DoF)
residual[0] = np.sqrt(self.a) * (theta[0] - self.mu)
residual[1:] = (np.sqrt(self.b) * (graph[:, 1:] - graph[:, :-1] ** 2)).flatten()
return np.sqrt(2) * residual
def _index(self, j, i):
"""
Given position in graph, return corresponding component of input vector
Args:
j (int): row
i (int): col
Returns: (int) Corresponding coordinate of theta in graph location (j, i)
"""
if i == 0:
return int(0)
elif i > 0:
return int(j * (self.n1 - 1) + i)
def _getJacobianGraph(self):
"""
Get the derivative of the graph with respect to input.
Returns: (array) n2 x n1 x DoF
Note: For given a, b, n2, n1, this remains fixed. Calculate once and forget.
"""
jacGraph = np.zeros((self.n2, self.n1, self.DoF))
for j, i in itertools.product(range(self.n2), range(self.n1)):
jacGraph[j, i, self._index(j, i)] = 1
return jacGraph
def _getJacobianResidual(self, theta):
"""
Calculate the Jacobian of the residual vector
Args:
theta (array): DoF sized array, point to evaluate at.
Returns (array): DoF x DoF shaped Jacobian evaluated at theta
"""
graph = self._getGraph(theta)
jacResidual = np.zeros((self.DoF, self.DoF))
jacResidual[0, 0] = np.sqrt(self.a)
jacGraphSquared = 2 * contract('db, dbe -> dbe', graph[:, :-1], self.jacGraph[:,:-1])
jacResidual[1:, :] = contract('ab, abe -> abe', np.sqrt(self.b), self.jacGraph[:,1:] - jacGraphSquared).reshape(self.DoF - 1, self.DoF)
return np.sqrt(2) * jacResidual
def _getHessianResidual(self):
"""
Calculate the Hessian of the residual vector
Args:
theta (array): DoF sized array, point to evaluate at.
Returns (array): DoF x DoF x DoF shaped Hessian evaluated at theta
"""
# hessRes_num = Jacobian(Jacobian(self._getResidual))(np.zeros(self.DoF))
# np.allclose(hessRes_num, hessRes)
hessRes = np.zeros((self.DoF, self.DoF, self.DoF))
hessRes[1:] = contract('ji, jif, jie -> jief', -np.sqrt(8 * self.b), self.jacGraph[:, :-1], self.jacGraph[:, :-1]).reshape(self.DoF - 1, self.DoF, self.DoF)
return hessRes
def getMinusLogLikelihood(self, theta):
"""
Returns minus log of Hybrid Rosenbrock
Args:
theta (array): DoF sized array, point to evaluate at.
Returns: (float) Density evaluation
"""
r = self._getResidual(theta)
self.nLikelihoodEvaluations += 1
return np.dot(r, r) / 2
def getGradientMinusLogLikelihood(self, theta):
"""
Evaluates gradient of minus log of Hybrid Rosenbrock
Args:
theta (array): DoF sized array, point to evaluate at.
Returns: (array) DoF shaped array
"""
r = self._getResidual(theta)
jr = self._getJacobianResidual(theta)
self.nGradLikelihoodEvaluations += 1
return r.T @ jr
def getGNHessianMinusLogLikelihood(self, theta):
"""
Calculate Gauss-Newton approximation of Hybrid Rosenbrock
Args:
theta (array): DoF sized array, point to evaluate at.
Returns: (array) DoF x DoF shaped array of Gauss-Newton approximation at theta.
"""
jr = self._getJacobianResidual(theta)
return contract('af, ae -> ef', jr, jr)
def getHessianMinusLogLikelihood(self, theta):
"""
Calculates Hessian of minus log Hybrid Rosenbrock
Args:
theta (array): DoF sized array, point to evaluate at.
Returns: (array): DoF x DoF shaped array of Hessian of Hybrid Rosenbrock evaluated at theta
"""
# hessNum = Jacobian(Jacobian(self.getMinusLogLikelihood))(theta)
r = self._getResidual(theta)
jr = self._getJacobianResidual(theta)
self.nHessLikelihoodEvaluations += 1
return contract('af, ae -> ef', jr, jr) + contract('a, aef -> ef', r, self.hessRes)
def getJerkMinusLogLikelihood(self, theta):
"""
Evaluate third order derivatives
Args:
theta (array): DoF sized array, point to evaluate at.
Returns:
"""
# TODO
raise NotImplementedError
def getNormalizationConstant(self):
"""
Evaluate normalization constant of Hybrid Rosenbrock given settings.
Returns:
"""
return np.sqrt(self.a / (np.pi ** self.DoF)) * np.prod( | np.sqrt(self.b) | numpy.sqrt |
from mri_modules.utils import *
import os
import numpy as np
import cv2
import shutil
from skimage.measure import marching_cubes_lewiner as marching_cubes
import stl
from stl import mesh
import tensorflow as tf
from tensorflow.keras.models import load_model
import skimage.transform
import nibabel as nib
import h5py
import scipy
from mri_modules.load_in_arrays import *
import time
import random
import tensorflow.keras as keras
import tensorflow.keras.backend as K
import nilearn
import SimpleITK as sitk
import imregpoc
from math import pi
import sys
from skimage.morphology import convex_hull_image
start_time = time.time()
def binarize(array, min_):
binary = array.copy()
binary[array < min_] = 0
binary[array >= min_] = 1
return binary
def dilate_up(array, size, stacked = True):
if stacked:
binary = np.squeeze(array.copy()[0], axis = 3)
else:
binary = array.copy()
binary[binary > 0] = 1
kernel = scipy.ndimage.generate_binary_structure(3, 1)
blew_up = scipy.ndimage.binary_dilation(binary.astype('uint8'), kernel, iterations=size)
if stacked:
return np.stack([np.stack([blew_up], axis = 3)])
else:
return blew_up
def translate_3d(array, translation):
original_array = array.copy()
array_translated = array.copy()
array_translated[:] = 0
for z,Slice in enumerate(original_array):
for y,line in enumerate(Slice):
for x,pixel in enumerate(line):
try:
array_translated[z+translation[0]][y+translation[1]][x+translation[2]] = pixel
except:
pass
return array_translated
def touching_island(reference, array, stacked = True):
if stacked:
array = np.squeeze(array.copy()[0], axis = 3)
array[array > 0] = 1
reference = np.squeeze(reference.copy()[0], axis = 3)
reference[reference > 0] = 1
else:
array[array > 0] = 1
reference[reference > 0] = 1
masked = array.copy()
masked[:] = 0
touching_structure_3d =[[[0,0,0],
[0,1,0],
[0,0,0]],
[[0,1,0],
[1,1,1],
[0,1,0]],
[[0,0,0],
[0,1,0],
[0,0,0]]]
markers, num_features = scipy.ndimage.measurements.label(array, touching_structure_3d)
reference_idx = np.unique(markers[reference == 1])
for idx in reference_idx:
masked[markers == idx] = 1
masked[array == 0] = 0
if stacked:
return np.stack([np.stack([masked], axis = 3)])
else:
return masked
def biggest_island(input_array, stacked = True):
if stacked:
masked = np.squeeze(input_array.copy()[0], axis = 3)
binary = np.squeeze(input_array.copy()[0], axis = 3)
binary[:] = 0
binary[np.squeeze(input_array[0], axis = 3) > 0] = 1
else:
masked = input_array.copy()
binary = input_array.copy()
binary[:] = 0
binary[input_array > 0] = 1
touching_structure_3d =[[[0,0,0],
[0,1,0],
[0,0,0]],
[[0,1,0],
[1,1,1],
[0,1,0]],
[[0,0,0],
[0,1,0],
[0,0,0]]]
markers,_ = scipy.ndimage.measurements.label(binary,touching_structure_3d)
markers[binary == 0] = 0
counts = np.bincount(markers.ravel())
counts[0] = 0
noise_idx = np.where(counts != np.max(counts))
noise = np.isin(markers, noise_idx)
binary[noise] = 0
masked[binary == 0] = 0
if stacked:
return np.stack([np.stack([masked], axis = 3)])
else:
return masked
def combine_zeros(arrays):
combined = arrays[0].copy()
for array in arrays:
combined[array < 0.1] = 0
return combined
def adaptive_threshold(array, course, precise, blur_precision = 0, stacked = True):
if stacked:
thresholded_array = np.squeeze(array.copy()[0], axis = 3)
thresholded_array = thresholded_array*255
thresholded_array[thresholded_array > 255] = 255
else:
thresholded_array = array.copy()
thresholded_array = thresholded_array*255
thresholded_array[thresholded_array > 255] = 255
blurred = scipy.ndimage.gaussian_filter(thresholded_array, blur_precision)
adap = []
for image in blurred:
thresh = cv2.adaptiveThreshold(image.astype('uint8'), 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, course, 2)
thresh2 = cv2.adaptiveThreshold(image.astype('uint8'), 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, precise, 2)
thresh3 = thresh.copy()
thresh3[:] = 255
thresh3[thresh2 == 0] = 0
thresh3[thresh == 0] = 0
adap.append(thresh3)
adap = np.stack(adap)
thresholded_array[adap == 0] = 0
if stacked:
return np.stack([np.stack([thresholded_array/255], axis = 3)])
else:
return thresholded_array/255
def generate_stl(array_3d, stl_file_path, stl_resolution):
array = array_3d.copy()
verts, faces, norm, val = marching_cubes(array, 0.01, step_size = stl_resolution, allow_degenerate=True)
mesh = stl.mesh.Mesh(np.zeros(faces.shape[0], dtype=stl.mesh.Mesh.dtype))
for i, f in enumerate(faces):
for j in range(3):
mesh.vectors[i][j] = verts[f[j],:]
if not stl_file_path.endswith(".stl"):
stl_file_path += ".stl"
if not os.path.exists(os.path.dirname(stl_file_path)):
os.makedirs(os.path.dirname(stl_file_path))
mesh.save(stl_file_path)
def find_median_grayscale(array):
zero_pixels = float(np.count_nonzero(array==0))
single_dimensional = array.flatten().tolist()
single_dimensional.extend(np.full((1, int(zero_pixels)), 1000).flatten().tolist())
return np.median(single_dimensional)
def locate_bounds(array, stacked = True):
if stacked:
left = np.squeeze(array.copy()[0], axis = 3).shape[2]
right = 0
low = np.squeeze(array.copy()[0], axis = 3).shape[1]
high = 0
shallow = np.squeeze(array.copy()[0], axis = 3).shape[0]
deep = 0
array_3d = np.squeeze(array.copy()[0], axis = 3)
else:
left = array.copy().shape[2]
right = 0
low = array.copy().shape[1]
high = 0
shallow = array.copy().shape[0]
deep = 0
array_3d = array.copy()
for z,Slice in enumerate(array_3d):
for y,line in enumerate(Slice):
for x,pixel in enumerate(line):
if pixel > 0:
if z > deep:
deep = z
if z < shallow:
shallow = z
if y > high:
high = y
if y < low:
low = y
if x > right:
right = x
if x < left:
left = x
return [left,right,low,high,shallow,deep]
def pad(array):
padded = []
for image in array:
padded.append(image)
padded.append(np.zeros((array.shape[1],array.shape[2])))
padded.append(np.zeros((array.shape[1],array.shape[2])))
final = translate_3d(np.stack(padded), [1,1,1])
return final
def write_images(array, test_folder_path):
if not os.path.exists(test_folder_path):
os.makedirs(test_folder_path)
for n,image in enumerate(array):
file_name = str(str(n) +'.png')
cv2.imwrite(os.path.join(test_folder_path, file_name), image*255)
def circle_highlighted(reference, binary, color):
circled = reference.copy()
binary = binary.copy()
binary[binary > 0] = 1
for n, image in enumerate(binary):
contours, _ = cv2.findContours(image.astype('uint8'),cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
cv2.drawContours(circled[n], contours, -1,color, 1)
return circled
def preprocess_data(path,rescale_factor = None,translation = None):
print(path)
image_data, valid = load_dicom_folder(path, updating_labels = False)
print(image_data.shape)
print(valid)
print(np.max(image_data))
print("oofoofoofoofofofofofof")
'''if "t2" in path.lower():
image_data = np.rot90(image_data, axes = (2,1)).T'''
image_data = image_data/np.max(image_data)
blank_unscaled_array = image_data.copy()
blank_unscaled_array[:] = 0
z_zoom = image_size/image_data.shape[0]
y_zoom = image_size/image_data.shape[1]
x_zoom = image_size/image_data.shape[2]
print(z_zoom, y_zoom, x_zoom)
rescaled_blank = skimage.transform.rescale(blank_unscaled_array, (z_zoom, y_zoom, x_zoom))
image_data = np.stack([np.stack([image_data], axis = 3)])
if rescale_factor is None:
bounds_finder = image_data.copy()
bounds_finder = adaptive_threshold(bounds_finder, 101, 45, 1)
bounds_finder = biggest_island(bounds_finder)
bounds = locate_bounds(bounds_finder)
[left,right,low,high,shallow,deep] = bounds
x_size = abs(left-right)
y_size = abs(low-high)
z_size = abs(shallow-deep)
max_size = np.max([x_size, y_size, z_size])
rescale_factor = (image_size*0.8)/max_size
backscale_factor = 1/rescale_factor
image_data = skimage.transform.rescale(np.squeeze(image_data.copy()[0], axis = 3), (rescale_factor, rescale_factor, rescale_factor))
if translation is None:
bounds_finder = image_data.copy()
bounds_finder = adaptive_threshold(bounds_finder, 101, 45, 1, stacked = False)
bounds_finder = biggest_island(bounds_finder, stacked = False)
bounds = locate_bounds(np.stack([np.stack([bounds_finder], axis = 3)]))
else:
bounds=translation
print("\n\nbounds:",bounds,"\n\n")
[left,right,low,high,shallow,deep] = bounds
image_data = translate_3d(image_data, [-shallow,-low,-left])
rescaled_array = rescaled_blank.copy()
for z,Slice in enumerate(image_data):
for y,line in enumerate(Slice):
for x,pixel in enumerate(line):
try:
rescaled_array[z][y][x] = pixel
except:
pass
return rescaled_array, rescale_factor,bounds
def ConvNetsemantic(x,y,z):
inputs = keras.layers.Input((x,y,z, 3))
p0 = inputs
c1, p1 = down_block(p0, 8, 0.1) #128 -> 64
print(p1.shape)
c2, p2 = down_block(p1, 16, 0.1) #64 -> 32
c3, p3 = down_block(p2, 32, 0.2) #32 -> 16
c4, p4 = down_block(p3, 64, 0.3) #16->8
c5, p5 = down_block(p4, 128, 0.3) #16->8
c6, p6 = down_block(p5, 256, 0.3) #16->8
c7, p7 = down_block(p6, 512, 0.3) #16->8
bn = bottleneck(p7, 1024, 0.4)
print(bn.shape)
u1 = up_block(bn, c7, 512, 0.3) #8 -> 16
u2 = up_block(u1, c6, 256, 0.2) #16 -> 32
u3 = up_block(u2, c5, 128, 0.1) #32 -> 64
u4 = up_block(u3, c4, 64, 0.1) #64 -> 128
u5 = up_block(u4, c3, 32, 0.1) #64 -> 128
u6 = up_block(u5, c2, 16, 0.1) #64 -> 128
u7 = up_block(u6, c1, 8, 0.1) #64 -> 128
outputs = tf.keras.layers.Conv3D(4, (1, 1, 1),padding='same', activation="softmax")(u7)
#outputs = keras.layers.Conv3D(1, (1, 1, 1), padding="same", activation="relu")(u4)
print("out")
print(outputs.shape)
model = keras.models.Model(inputs, outputs)
return model
def register(fixed_image, moving_image, orig, transform = None):
if transform is None:
resamples = []
metrics = []
transforms = []
for i in range (1,10):
ImageSamplingPercentage = 1
initial_transform = sitk.CenteredTransformInitializer(fixed_image, moving_image, sitk.ScaleVersor3DTransform(), sitk.CenteredTransformInitializerFilter.MOMENTS)
registration_method = sitk.ImageRegistrationMethod()
registration_method.SetMetricAsMattesMutualInformation(numberOfHistogramBins=200)
registration_method.SetMetricSamplingStrategy(registration_method.RANDOM)
registration_method.SetMetricSamplingPercentage(float(ImageSamplingPercentage)/100)
registration_method.SetInterpolator(sitk.sitkLinear)
registration_method.SetOptimizerAsGradientDescent(learningRate=0.001, numberOfIterations=10**5, convergenceMinimumValue=1e-6, convergenceWindowSize=100) #Once
registration_method.SetOptimizerScalesFromPhysicalShift()
registration_method.SetInitialTransform(initial_transform)
#registration_method.SetShrinkFactorsPerLevel(shrinkFactors = [4,2,1])
#registration_method.SetSmoothingSigmasPerLevel(smoothingSigmas = [2,1,0])
#registration_method.SmoothingSigmasAreSpecifiedInPhysicalUnitsOn()
transform = registration_method.Execute(fixed_image, moving_image)
#print(transform)
print("number:",i)
print(registration_method.GetMetricValue())
metrics.append(registration_method.GetMetricValue())
resamples.append(sitk.Resample(orig, fixed_image, transform, sitk.sitkLinear, 0.0, moving_image.GetPixelID()))
transforms.append(transform)
print(np.min(metrics))
return sitk.GetArrayFromImage(resamples[metrics.index(np.min(metrics))]),transforms[metrics.index(np.min(metrics))]
else:
return sitk.GetArrayFromImage(sitk.Resample(orig, fixed_image, transform, sitk.sitkLinear, 0.0, moving_image.GetPixelID())),transform
def adaptive_threshold(array, course, precise, blur_precision = 0, stacked = True):
if stacked:
thresholded_array = np.squeeze(array.copy()[0], axis = 3)
thresholded_array = thresholded_array*255
thresholded_array[thresholded_array > 255] = 255
else:
thresholded_array = array.copy()
thresholded_array = thresholded_array*255
thresholded_array[thresholded_array > 255] = 255
blurred = scipy.ndimage.gaussian_filter(thresholded_array, blur_precision)
adap = []
for image in blurred:
thresh = cv2.adaptiveThreshold(image.astype('uint8'), 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, course, 2)
thresh2 = cv2.adaptiveThreshold(image.astype('uint8'), 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, precise, 2)
thresh3 = thresh.copy()
thresh3[:] = 255
thresh3[thresh2 == 0] = 0
thresh3[thresh == 0] = 0
adap.append(thresh3)
adap = np.stack(adap)
thresholded_array[adap == 0] = 0
if stacked:
return np.stack([np.stack([thresholded_array/255], axis = 3)])
else:
return thresholded_array/255
def convex_border(array, thickness):
contour_only = array.copy()
binary = array.copy()
contour_only[:] = 0
binary[:] = 0
binary[array > 0] = 255
cont = []
for n, image in enumerate(binary):
contours, _ = cv2.findContours(image.astype('uint8'),cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
for contour in contours:
hull = cv2.convexHull(contour)
cv2.drawContours(contour_only[n], [hull], -1, 200, thickness)
return contour_only
def convex_hull(array):
contour_only = array.copy()
binary = array.copy()
hull = []
binary[:] = 0
binary[array > 0.05] = 255
cont = []
for n, image in enumerate(binary):
convex = np.array(convex_hull_image(image.astype('uint8')),dtype="float64")
hull.append(convex)
return np.stack(hull)
def fill_holes_binary(array, sense):
binary = array.copy()
binary_original = array.copy()
binary_original[:] = 0
binary_original[array > 0] = 1
binary[:] = 0
binary[array == 0] = 1
touching_structure_2d = [[0,1,0],
[1,1,1],
[0,1,0]]
denoised = []
for n,image in enumerate(binary):
markers, num_features = scipy.ndimage.measurements.label(image,touching_structure_2d)
omit = markers[0][0]
flat = markers.ravel()
binc = np.bincount(flat)
binc_not = np.bincount(flat[flat == omit])
noise_idx2 = np.where(binc > sense)
noise_idx1 = np.where(binc == np.max(binc_not))
mask1 = np.isin(markers, noise_idx1)
mask2 = np.isin(markers, noise_idx2)
image[mask1] = 0
image[mask2] = 0
denoised.append(image)
denoised = np.stack(denoised)
binary_original[denoised == 1] = 1
return binary_original
def convex_shape(input_array):
#binary = adaptive_threshold(input_array, 101, 45, 1, stacked = False)
#binary[input_array < 0.1] = 0
binary = np.array(input_array > 0.1,dtype = "float64")
binary = biggest_island(binary, stacked = False)
binary = convex_hull(binary)
binary = biggest_island(binary, stacked = False)
return binary
def trim(flair,t1,t2):
flair_cut = flair.copy()
t1_cut = t1.copy()
t1_cut[:] = 0
t2_cut = t2.copy()
t2_cut[:] = 0
for n,image in enumerate(flair):
if np.max(flair) > 0 and np.max(t1) > 0 and np.max(t2) > 0:
flair_cut[n] = flair[n]
t1_cut[n] = t1[n]
t2_cut[n] = t2[n]
return flair_cut, t1_cut, t2_cut
def normalize(flair,t1,t2):
flair = flair/np.max(flair)
blank_unscaled_array = flair.copy()
blank_unscaled_array[:] = 0
z_zoom = image_size/flair.shape[0]
y_zoom = image_size/flair.shape[1]
x_zoom = image_size/flair.shape[2]
image_data1 = skimage.transform.rescale(flair, (z_zoom, y_zoom, x_zoom))
original_array1 = image_data1.copy()
original_array1[:] = 0
image_data = np.stack([np.stack([flair], axis = 3)])
original_unscaled_array = image_data.copy()
bounds = locate_bounds(image_data)
[left,right,low,high,shallow,deep] = bounds
x_size = abs(left-right)
y_size = abs(low-high)
z_size = abs(shallow-deep)
max_size = np.max([x_size, y_size, z_size])
image_data = translate_3d(np.squeeze(image_data.copy()[0], axis = 3), [-shallow,-low,-left])
rescale_factor = (image_size*0.8)/max_size
print("rescale factor:", rescale_factor)
backscale_factor = 1/rescale_factor
image_data = skimage.transform.rescale(image_data, (rescale_factor, rescale_factor, rescale_factor))
original_scaled_down = image_data.copy()
for z,Slice in enumerate(image_data):
for y,line in enumerate(Slice):
for x,pixel in enumerate(line):
try:
original_array1[z][y][x] = pixel
except:
pass
flair = original_array1.copy()
########################### T1CE ####################################
t1 = t1/np.max(t1)
image_data = translate_3d(t1, [-shallow,-low,-left])
image_data = skimage.transform.rescale(image_data, (rescale_factor, rescale_factor, rescale_factor))
original_array1[:] = 0
for z,Slice in enumerate(image_data):
for y,line in enumerate(Slice):
for x,pixel in enumerate(line):
try:
original_array1[z][y][x] = pixel
except:
pass
t1ce = original_array1.copy()
########################### T2 ####################################
t2 = t2/np.max(t2)
image_data = translate_3d(t2, [-shallow,-low,-left])
image_data = skimage.transform.rescale(image_data, (rescale_factor, rescale_factor, rescale_factor))
original_array1[:] = 0
for z,Slice in enumerate(image_data):
for y,line in enumerate(Slice):
for x,pixel in enumerate(line):
try:
original_array1[z][y][x] = pixel
except:
pass
t2 = original_array1.copy()
return flair,t1ce,t2
def ConvNetTumor(x,y,z):
inputs = keras.layers.Input((x,y,z, 1))
p0 = inputs
c1, p1 = down_block(p0, 16, 0.1) #128 -> 64
print(p1.shape)
c2, p2 = down_block(p1, 32, 0.1) #64 -> 32
c3, p3 = down_block(p2, 64, 0.2) #32 -> 16
c4, p4 = down_block(p3, 128, 0.3) #16->8
bn = bottleneck(p4, 256, 0.4)
print(bn.shape)
u1 = up_block(bn, c4, 128, 0.3) #8 -> 16
u2 = up_block(u1, c3, 64, 0.2) #16 -> 32
u3 = up_block(u2, c2, 32, 0.1) #32 -> 64
u4 = up_block(u3, c1, 16, 0.1) #64 -> 128
outputs = tf.keras.layers.Conv3D(1, (1, 1, 1),padding='same', activation="sigmoid")(u4)
#outputs = keras.layers.Conv3D(1, (1, 1, 1), padding="same", activation="relu")(u4)
print("out")
print(outputs.shape)
model = keras.models.Model(inputs, outputs)
return model
def ConvNetbinary(x,y,z):
inputs = keras.layers.Input((x,y,z, 3))
p0 = inputs
c1, p1 = down_block(p0, 4, 0.1) #128 -> 64
print(p1.shape)
c2, p2 = down_block(p1, 8, 0.1) #64 -> 32
c3, p3 = down_block(p2, 16, 0.2) #32 -> 16
c4, p4 = down_block(p3, 32, 0.3) #16->8
c5, p5 = down_block(p4, 64, 0.3) #16->8
c6, p6 = down_block(p5, 128, 0.3) #16->8
c7, p7 = down_block(p6, 256, 0.3) #16->8
bn = bottleneck(p7, 512, 0.4)
print(bn.shape)
u1 = up_block(bn, c7, 256, 0.3) #8 -> 16
u2 = up_block(u1, c6, 128, 0.2) #16 -> 32
u3 = up_block(u2, c5, 64, 0.1) #32 -> 64
u4 = up_block(u3, c4, 32, 0.1) #64 -> 128
u5 = up_block(u4, c3, 16, 0.1) #64 -> 128
u6 = up_block(u5, c2, 8, 0.1) #64 -> 128
u7 = up_block(u6, c1, 4, 0.1) #64 -> 128
outputs = tf.keras.layers.Conv3D(1, (1, 1, 1),padding='same', activation="sigmoid")(u7)
#outputs = keras.layers.Conv3D(1, (1, 1, 1), padding="same", activation="relu")(u4)
print("out")
print(outputs.shape)
model = keras.models.Model(inputs, outputs)
return model
def down_block_e(x, filters, dropout, kernel_size=(3, 3, 3), padding="same", strides=1):
print(x.shape)
c = keras.layers.Conv3D(filters, kernel_size, padding=padding, strides=strides, activation="elu", input_shape = x.shape[1:], kernel_initializer='he_normal')(x)
c = keras.layers.Dropout(dropout)(c)
c = keras.layers.Conv3D(filters, kernel_size, padding=padding, strides=strides, activation="elu", input_shape = c.shape[1:], kernel_initializer='he_normal')(c)
p = keras.layers.MaxPool3D(pool_size = (2, 2, 2))(c)
return c, p
def up_block_e(x, skip, filters, dropout,kernel_size=(3, 3, 3), padding="same", strides=1):
us = keras.layers.UpSampling3D((2, 2, 2))(x)
concat = keras.layers.Concatenate()([us, skip])
c = keras.layers.Conv3D(filters, kernel_size, padding=padding, strides=strides, activation="elu", input_shape = concat.shape[1:], kernel_initializer='he_normal')(concat)
c = keras.layers.Dropout(dropout)(c)
c = keras.layers.Conv3D(filters, kernel_size, padding=padding, strides=strides, activation="elu", input_shape = c.shape[1:], kernel_initializer='he_normal')(c)
return c
def bottleneck_e(x, filters, dropout, kernel_size=(3, 3, 3), padding="same", strides=1):
c = keras.layers.Conv3D(filters, kernel_size, padding=padding, strides=strides, activation="elu", input_shape = x.shape[1:], kernel_initializer='he_normal')(x)
c = keras.layers.Dropout(dropout) (c)
c = keras.layers.Conv3D(filters, kernel_size, padding=padding, strides=strides, activation="elu", input_shape = c.shape[1:], kernel_initializer='he_normal')(c)
return c
def ConvNetSemantic64(x,y,z):
inputs = keras.layers.Input((x,y,z, 3))
p0 = inputs
c1, p1 = down_block_e(p0, 16, 0.1) #128 -> 64
print(p1.shape)
c2, p2 = down_block_e(p1, 32, 0.1) #64 -> 32
c3, p3 = down_block_e(p2, 64, 0.2) #32 -> 16
c4, p4 = down_block_e(p3, 128, 0.3) #16->8
c5, p5 = down_block_e(p4, 256, 0.3) #16->8
c6, p6 = down_block_e(p5, 512, 0.3) #16->8
bn = bottleneck_e(p6, 1024, 0.4)
print(bn.shape)
u1 = up_block_e(bn, c6, 512, 0.3) #8 -> 16
u2 = up_block_e(u1, c5, 256, 0.2) #16 -> 32
u3 = up_block_e(u2, c4, 128, 0.1) #32 -> 64
u4 = up_block_e(u3, c3, 64, 0.1) #64 -> 128
u5 = up_block_e(u4, c2, 32, 0.1) #64 -> 128
u6 = up_block_e(u5, c1, 16, 0.1) #64 -> 128
outputs = tf.keras.layers.Conv3D(4, (1, 1, 1),padding='same', activation="softmax")(u6)
#outputs = keras.layers.Conv3D(1, (1, 1, 1), padding="same", activation="relu")(u4)
print("out")
print(outputs.shape)
model = keras.models.Model(inputs, outputs)
return model
def down_block(x, filters, dropout, kernel_size=(3, 3, 3), padding="same", strides=1):
print(x.shape)
c = keras.layers.Conv3D(filters, kernel_size, padding=padding, strides=strides, activation="relu", input_shape = x.shape[1:], kernel_initializer='he_normal')(x)
c = keras.layers.Dropout(dropout)(c)
c = keras.layers.Conv3D(filters, kernel_size, padding=padding, strides=strides, activation="relu", input_shape = c.shape[1:], kernel_initializer='he_normal')(c)
p = keras.layers.MaxPool3D(pool_size = (2, 2, 2))(c)
return c, p
def up_block(x, skip, filters, dropout,kernel_size=(3, 3, 3), padding="same", strides=1):
us = keras.layers.UpSampling3D((2, 2, 2))(x)
concat = keras.layers.Concatenate()([us, skip])
c = keras.layers.Conv3D(filters, kernel_size, padding=padding, strides=strides, activation="relu", input_shape = concat.shape[1:], kernel_initializer='he_normal')(concat)
c = keras.layers.Dropout(dropout)(c)
c = keras.layers.Conv3D(filters, kernel_size, padding=padding, strides=strides, activation="relu", input_shape = c.shape[1:], kernel_initializer='he_normal')(c)
return c
def bottleneck(x, filters, dropout, kernel_size=(3, 3, 3), padding="same", strides=1):
c = keras.layers.Conv3D(filters, kernel_size, padding=padding, strides=strides, activation="relu", input_shape = x.shape[1:], kernel_initializer='he_normal')(x)
c = keras.layers.Dropout(dropout) (c)
c = keras.layers.Conv3D(filters, kernel_size, padding=padding, strides=strides, activation="relu", input_shape = c.shape[1:], kernel_initializer='he_normal')(c)
return c
def ConvNetRough(x,y,z):
inputs = keras.layers.Input((x,y,z, 1))
p0 = inputs
c1, p1 = down_block(p0, 32, 0.1) #128 -> 64
print(p1.shape)
c2, p2 = down_block(p1, 64, 0.1) #64 -> 32
c3, p3 = down_block(p2, 128, 0.2) #32 -> 16
bn = bottleneck(p3, 256, 0.4)
print(bn.shape)
u1 = up_block(bn, c3, 128, 0.3) #16 -> 32
u2 = up_block(u1, c2, 64, 0.2) #16 -> 64
u3 = up_block(u2, c1, 32, 0.1) #32 -> 128
outputs = tf.keras.layers.Conv3D(1, (1, 1, 1),padding='same', activation="sigmoid")(u3)
print("out")
print(outputs.shape)
model = keras.models.Model(inputs, outputs)
return model
output_image_path = "C:/Users/JiangQin/Documents/python/ct to tumor identifier project/image ct visualizations/Machine Learning 2 models test"
image_size = 128
brain_seg_model_top = "C:/Users/JiangQin/Documents/c++/build-MRImage3D-Desktop_Qt_5_15_0_MSVC2015_64bit-Debug/models/flair_brain_top.h5"
brain_seg_model_top2 = "C:/Users/JiangQin/Documents/c++/build-MRImage3D-Desktop_Qt_5_15_0_MSVC2015_64bit-Debug/models/flair_brain_top.h5"
brain_seg_model_front = "C:/Users/JiangQin/Documents/c++/build-MRImage3D-Desktop_Qt_5_15_0_MSVC2015_64bit-Debug/models/flair_brain_front.h5"
brain_seg_model_side = "C:/Users/JiangQin/Documents/c++/build-MRImage3D-Desktop_Qt_5_15_0_MSVC2015_64bit-Debug/models/flair_brain_side.h5"
brain_seg_model_edges = "C:/Users/JiangQin/Documents/c++/build-MRImage3D-Desktop_Qt_5_15_0_MSVC2015_64bit-Debug/models/flair_brain_edges.h5"
tumor_seg_model = "C:/Users/JiangQin/Documents/c++/build-MRImage3D-Desktop_Qt_5_15_0_MSVC2015_64bit-Debug/models/flair_tumor.h5"
input_path = "C:/Users/JiangQin/Documents/data/raw ct files/QIN GBM Treatment Response"
output_path = "C:/Users/JiangQin/Documents/data/raw ct files/QIN GBM Treatment Response/loaded arrays 2"
input_path = "C:/Users/JiangQin/Documents/data/raw ct files/ACRIN-DSC-MR-Brain"
path = "C:/Users/JiangQin/Documents/data/raw ct files/ACRIN-DSC-MR-Brain/Clinical data/ACRIN-DSC-MR-Brain TCIA Anonymized"
path2 = "C:/Users/JiangQin/Documents/data/raw ct files/ACRIN-DSC-MR-Brain/Clinical data/ACRIN-DSC-MR-Brain-HB TCIA Anonymized"
alphabet = ["A","B","C","D"]
def load_sets(input_path,clinical_data_path,datasets=[]):
bru = 0
oof=0
valid_indexes = []
scans = []
for set_ in os.listdir(input_path):
set_path = input_path + "/" + set_
scans = []
scan_dates = []
try:
set_num = int(set_[-3:])
for scan in os.listdir(set_path):
flair = None
t1 = None
t2 = None
scan_path = set_path + '/' + scan
if os.path.isdir(scan_path):
for mri in os.listdir(scan_path):
if "t2" in mri.lower() and "cor" not in mri.lower() and "sag" not in mri.lower() and "trace" not in mri.lower() and os.path.isdir(scan_path + "/" + mri):
if t2!=None:
bru+=1
t2 = mri
if "t1" in mri.lower() and "cor" not in mri.lower() and "sag" not in mri.lower() and "post" in mri.lower() and os.path.isdir(scan_path + "/" + mri):
if t1!=None:
bru+=1
t1 = mri
if "flair" in mri.lower() and "cor" not in mri.lower() and "sag" not in mri.lower() and "t1" not in mri.lower() and os.path.isdir(scan_path + "/" + mri):
if flair!=None:
bru+=1
flair = mri
if flair is not None and t1 is not None and t2 is not None:
date = dicom.read_file(scan_path + "/" + flair+"/"+os.listdir(scan_path + "/" + flair)[0]).ClinicalTrialTimePointID
found = False
valid = False
for i in range(0,14):
try:
if i >= 10:
ia=alphabet[i%10]
else:
ia=i
data = []
blub = open(os.path.join(clinical_data_path,str("M"+str(ia))+".csv")).read()
lines = blub.split("\n")
del lines[0]
del lines[-1]
for n,line in enumerate(lines):
chars = line.split(",")
data.append([])
for char in chars:
try:
data[n].append(int(char))
except:
data[n].append(0)
data = np.stack(data)
sets = data[:,0]
dates = data[:,8]
if int(date) == data[:,8][sets.tolist().index(set_num)]:
print("uhh")
if data[:,43][sets.tolist().index(set_num)] != 0:
current_time = i
progression = int(data[:,43][sets.tolist().index(set_num)])
zones = np.stack([int(x) for x in list(data[:,31:40][sets.tolist().index(set_num)])])-1
found = True
break
except Exception as e:
pass
if found:
try:
print("found")
if current_time-1 >= 10:
ia=alphabet[(current_time-1)%10]
else:
ia=current_time-1
data = []
blub = open(os.path.join(clinical_data_path,str("M"+str(ia))+".csv")).read()
lines = blub.split("\n")
del lines[0]
del lines[-1]
for n,line in enumerate(lines):
chars = line.split(",")
data.append([])
for char in chars:
try:
data[n].append(int(char))
except:
data[n].append(0)
data = np.stack(data)
sets = data[:,0]
older_date = data[:,8][sets.tolist().index(set_num)]
for scan in os.listdir(set_path):
flair_old = None
t1_old = None
t2_old = None
scan_path_old = set_path + '/' + scan
if os.path.isdir(scan_path_old):
for mri in os.listdir(scan_path_old):
if "t2" in mri.lower() and "cor" not in mri.lower() and "sag" not in mri.lower() and "trace" not in mri.lower() and os.path.isdir(scan_path_old + "/" + mri):
if t2_old!=None:
bru+=1
t2_old = mri
if "t1" in mri.lower() and "cor" not in mri.lower() and "sag" not in mri.lower() and "post" in mri.lower() and os.path.isdir(scan_path_old + "/" + mri):
if t1_old!=None:
bru+=1
t1_old = mri
if "flair" in mri.lower() and "cor" not in mri.lower() and "sag" not in mri.lower() and "t1" not in mri.lower() and os.path.isdir(scan_path_old + "/" + mri):
if flair_old!=None:
bru+=1
flair_old = mri
if flair_old is not None and t1_old is not None and t2_old is not None:
date = dicom.read_file(scan_path_old + "/" + flair_old+"/"+os.listdir(scan_path_old + "/" + flair_old)[0]).ClinicalTrialTimePointID
old_zones = np.stack([int(x) for x in list(data[:,31:40][sets.tolist().index(set_num)])])-1
if int(older_date) == int(date):
if not np.array_equal(zones,old_zones):
print(zones,old_zones)
oof+=1
datasets.append([[scan_path_old + "/" + flair_old,scan_path_old + "/" + t1_old,scan_path_old + "/" + t2_old],
[scan_path + "/" + flair,scan_path + "/" + t1,scan_path + "/" + t2], progression])
break
except Exception as e:
print(e)
pass
except Exception as e:
print("bub",e)
pass
print(oof)
return datasets
sets = load_sets(input_path,path)
sets = load_sets(input_path,path2,sets)
binary_model_path = "C:/Users/JiangQin/Documents/python/ct to tumor identifier project/code files/Saved models/Tumor seg binary with t1ce t2 flair/Model 16.h5"
binary_model_path2 = "C:/Users/JiangQin/Documents/c++/build-MRImage3D-Desktop_Qt_5_15_0_MSVC2015_64bit-Debug/models/flair_tumor.h5"
binary_model_path3 = "C:/Users/JiangQin/Documents/c++/build-MRImage3D-Desktop_Qt_5_15_0_MSVC2015_64bit-Debug/models/Model 34.h5"
semantic_model_path = "C:/Users/JiangQin/Documents/python/ct to tumor identifier project/code files/Saved models/Tumor seg semantic 64x with t1ce t2 flair/Model 81.h5"
brain_seg_t1_path = "C:/Users/JiangQin/Documents/python/ct to tumor identifier project/code files/Saved models/Brain Seg OASIS 36 top view/Model 51 (2).h5"
start = 8
responses = np.stack([0,0,0,0])
print(len(sets))
print("hmmm")
input()
for Set in range(start, len(sets)):
print("\n\nSet " + str(Set) + "\n\n")
for i in range(0,10):
print("\nGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGG\n")
print(sets[Set])
old_mr_path_flair = sets[Set][0][0]
flair,factor,translate = preprocess_data(old_mr_path_flair)
flair_binary = np.array(flair >0.1,dtype = "float64")
write_images(flair_binary, output_image_path)
flair_binary_image_og = sitk.GetImageFromArray(flair_binary)
old_mr_path_t1 = sets[Set][0][1]
t1,_,_ = preprocess_data(old_mr_path_t1,factor,translate)
old_mr_path_t2 = sets[Set][0][2]
t2,_,_ = preprocess_data(old_mr_path_t2,factor,translate)
write_images(np.stack([flair,t1,t2],axis=-1), output_image_path)
write_images(np.stack([flair,t1,t2],axis=-1), output_image_path+"/treatment eval testing/"+str(Set)+"/image_full")
##flair brain seg
print("tranformed arrays",np.max(flair),np.max(t1),np.max(t2))
segmentations = []
brain_seg_top = load_model(brain_seg_model_top)
brain_mask_top = brain_seg_top.predict(np.stack([np.stack([flair], axis = -1)]))
binary_brain_top = binarize(brain_mask_top, 0.5)
binary_brain_top_top_ized = np.squeeze(binary_brain_top.copy()[0], axis = -1)
segmentations.append(binary_brain_top_top_ized)
print("segmented brain")
binary_brain_wo_median_combined = combine_zeros(segmentations)
median_flair = find_median_grayscale(flair[binary_brain_wo_median_combined > 0])
segmentations = []
brain_seg_top = load_model(brain_seg_model_top)
new_array_top = np.stack([np.stack([flair], axis = -1)])/(median_flair/0.2)
brain_mask_top = brain_seg_top.predict(new_array_top)
binary_brain_top = binarize(brain_mask_top, 0.7)
binary_brain_top_top_ized = np.squeeze(binary_brain_top.copy()[0], axis = -1)
segmentations.append(binary_brain_top_top_ized)
binary_brain_final_combined2 = combine_zeros(segmentations)
##t1 brain seg
segmentations = []
model_path = "C:/Users/JiangQin/Documents/python/ct to tumor identifier project/code files/Saved models/Brain Seg OASIS 36 top view/Model 51 (2).h5"
brain_seg = ConvNetRough(128,128,128)
brain_seg.load_weights(model_path)
brain_mask_top = brain_seg.predict(np.stack([np.stack([t1], axis = -1)]))
binary_brain_top = binarize(brain_mask_top, 0.5)
binary_brain_top_top_ized = np.squeeze(binary_brain_top.copy()[0], axis = -1)
segmentations.append(binary_brain_top_top_ized)
print("segmented brain")
print(np.stack([np.stack([t1], axis = -1)]).shape)
binary_brain_wo_median_combined = combine_zeros(segmentations)
only_brain_t1 = t1.copy()
only_brain_t1[binary_brain_wo_median_combined == 0] = 0
median_t1 = find_median_grayscale(only_brain_t1)
segmentations = []
model_path = "C:/Users/JiangQin/Documents/python/ct to tumor identifier project/code files/Saved models/Brain Seg OASIS 36 top view/Model 51 (2).h5"
brain_seg = ConvNetRough(128,128,128)
brain_seg.load_weights(model_path)
brain_mask_top = brain_seg.predict(np.stack([np.stack([t1/(median_t1/0.3)], axis = -1)]))
binary_brain_top = binarize(brain_mask_top, 0.5)
binary_brain_top_top_ized = np.squeeze(binary_brain_top.copy()[0], axis = -1)
segmentations.append(binary_brain_top_top_ized)
binary_brain_final_combined1 = combine_zeros(segmentations)
#write_images(binary_brain_final_combined1, output_image_path+"/treatment eval testing/"+str(Set)+"/imageblub")
#input("HUDADAWUBUDAWUP")
##t2 brain seg
segmentations = []
brain_seg_top = load_model(brain_seg_model_top)
brain_mask_top = brain_seg_top.predict(np.stack([np.stack([t2], axis = -1)]))
binary_brain_top = binarize(brain_mask_top, 0.5)
binary_brain_top_top_ized = np.squeeze(binary_brain_top.copy()[0], axis = -1)
segmentations.append(binary_brain_top_top_ized)
print("segmented brain")
binary_brain_wo_median_combined = combine_zeros(segmentations)
median = find_median_grayscale(t2[binary_brain_wo_median_combined > 0])
segmentations = []
brain_seg_top = load_model(brain_seg_model_top)
new_array_top = np.stack([np.stack([t2], axis = -1)])/(median/0.2)
brain_mask_top = brain_seg_top.predict(new_array_top)
binary_brain_top = binarize(brain_mask_top, 0.7)
binary_brain_top_top_ized = np.squeeze(binary_brain_top.copy()[0], axis = -1)
segmentations.append(binary_brain_top_top_ized)
binary_brain_final_combined = binary_brain_final_combined2.copy()#combine_zeros(segmentations)
binary_brain_final_combined[binary_brain_final_combined1 > 0] = 1
#binary_brain_final_combined[binary_brain_final_combined1 < 1] = 0
write_images(binary_brain_final_combined, output_image_path+"/treatment eval testing/"+str(Set)+"/imageblub")
#-------------
flair[binary_brain_final_combined==0] = 0
t1[binary_brain_final_combined==0] = 0
t2[binary_brain_final_combined==0] = 0
flair,t1,t2 = normalize(flair,t1,t2)
t1 /=(find_median_grayscale(t1)/0.2)
t2 /=(find_median_grayscale(t2)/0.2)
flair /=(find_median_grayscale(flair)/0.2)
write_images(np.stack([flair,t1,t2],axis=-1), output_image_path+"/treatment eval testing/"+str(Set)+"/image_brain")
only_brain = np.stack([flair,t1,t2],axis = -1)
only_brain = skimage.transform.rescale(only_brain, (0.5,0.5,0.5,1))
write_images(only_brain, output_image_path+"/treatment eval testing/"+str(Set)+"/imagebraib")
tumor_seg_binary = load_model(binary_model_path2)
tumor_mask = tumor_seg_binary.predict(np.stack([np.stack([flair/(median_flair/0.3)],axis=-1)]))
tumor_binary = np.squeeze(tumor_mask[0] > 0.9, axis = -1)
tumor_seg_channeled = ConvNetSemantic64(64,64,64)
tumor_seg_channeled.load_weights(semantic_model_path)
tumor_mask = tumor_seg_channeled.predict(np.stack([only_brain]))
print(tumor_mask.shape)
print(np.max(tumor_mask))
tumor_colored = np.argmax(tumor_mask[0], axis = -1)
print(np.max(tumor_colored))
print(tumor_colored.shape)
tumor_colored = skimage.transform.rescale(tumor_colored/3, (2.0,2.0,2.0))
print(np.max(tumor_colored))
tumor_colored = np.round(tumor_colored*3)
write_images(tumor_colored/3, output_image_path+"/treatment eval testing/"+str(Set)+"/image_seg")
tumor_colored[tumor_binary == 0] = 0
tumor_colored = skimage.transform.rescale(tumor_colored/3, (0.5,0.5,0.5))
print(np.max(tumor_colored))
tumor_colored = np.round(tumor_colored*3)
print(tumor_colored.shape)
print( | np.unique(tumor_colored) | numpy.unique |
from .config import chunk_size, sampling_rate
import numpy
class CreateHighCutFilter:
"""Creating a FFT filter audio-effect class/device.
Cuts the upper frequencies of a signal.
Is overloaded with basic settings.
This class introduces latency equal to chunk_size.
Parameters
----------
cutoff_frequency : int or float
Sets the rolloff frequency for the high cut filter.
"""
def __init__(self, cutoff_frequency=8000):
#self.chunk_size = chunk_size
self.fS = sampling_rate # Sampling rate.
self.fH = cutoff_frequency # Cutoff frequency.
self.filter_length = (chunk_size // 2) - 1 # Filter length, must be odd.
self.array_slice_value_start = chunk_size + (self.filter_length // 2)
self.array_slice_value_end = chunk_size - (self.filter_length // 2)
# Compute sinc filter.
self.sinc_filter = numpy.sinc(
2 * self.fH / self.fS * (numpy.arange(self.filter_length) - (self.filter_length - 1) / 2))
# pyplot.plot(self.sinc_filter)
# Apply window.
self.sinc_filter *= numpy.blackman(self.filter_length)
# pyplot.plot(self.sinc_filter)
# Normalize to get unity gain.
self.sinc_filter /= numpy.sum(self.sinc_filter)
self.filtered_signal = numpy.zeros(chunk_size * 3)
self.float32_array_input_1 = numpy.zeros(chunk_size)
self.float32_array_input_2 = numpy.zeros(chunk_size)
self.float32_array_input_3 = numpy.zeros(chunk_size)
self.cut_size = numpy.int16((self.filter_length - 1) / 2)
self.sinc_filter = numpy.append(self.sinc_filter, numpy.zeros(chunk_size - self.filter_length + 1))
self.sinc_filter = numpy.append(self.sinc_filter, numpy.zeros(((len(self.sinc_filter) * 2) - 3)))
self.sinc_filter = numpy.fft.fft(self.sinc_filter)
def apply(self, float32_array_input):
"""Applying the filter to a numpy-array
Parameters
----------
float32_array_input : float
The array, which the effect should be applied on.
Returns
-------
float
The previously processed array, should be the exact same size as the input array
"""
self.float32_array_input_3 = self.float32_array_input_2
self.float32_array_input_2 = self.float32_array_input_1
self.float32_array_input_1 = float32_array_input
self.filtered_signal = numpy.concatenate(
(self.float32_array_input_3, self.float32_array_input_2, self.float32_array_input_1), axis=None)
self.filtered_signal = numpy.fft.fft(self.filtered_signal)
self.filtered_signal = self.filtered_signal * self.sinc_filter
self.filtered_signal = numpy.fft.ifft(self.filtered_signal)
self.filtered_signal = self.filtered_signal[self.array_slice_value_start:-self.array_slice_value_end]
return self.filtered_signal.real.astype('float32')
class CreateLowCutFilter:
"""Creating a FFT filter audio-effect class/device.
Cuts the lower frequencies of a signal.
Is overloaded with basic settings.
This class introduces latency equal to chunk_size.
Parameters
----------
cutoff_frequency : int or float
Sets the rolloff frequency for the high cut filter.
"""
def __init__(self, cutoff_frequency=160):
#self.chunk_size = chunk_size
self.fS = sampling_rate # Sampling rate.
self.fH = cutoff_frequency # Cutoff frequency.
self.filter_length = (chunk_size // 2) - 1 # Filter length, must be odd.
self.array_slice_value_start = chunk_size + (self.filter_length // 2)
self.array_slice_value_end = chunk_size - (self.filter_length // 2)
# Compute sinc filter.
self.sinc_filter = numpy.sinc(
2 * self.fH / self.fS * (numpy.arange(self.filter_length) - (self.filter_length - 1) / 2))
# Apply window.
self.sinc_filter *= numpy.blackman(self.filter_length)
# Normalize to get unity gain.
self.sinc_filter /= numpy.sum(self.sinc_filter)
# print(len(self.sinc_filter))
# Spectral inversion to create Lowcut from Highcut
self.sinc_filter = -self.sinc_filter
self.sinc_filter[(self.filter_length - 1) // 2] += 1
self.filtered_signal = numpy.zeros(chunk_size * 3)
self.float32_array_input_1 = numpy.zeros(chunk_size)
self.float32_array_input_2 = numpy.zeros(chunk_size)
self.float32_array_input_3 = numpy.zeros(chunk_size)
self.cut_size = numpy.int16((self.filter_length - 1) / 2)
self.sinc_filter = numpy.append(self.sinc_filter, numpy.zeros(chunk_size - self.filter_length + 1))
self.sinc_filter = numpy.append(self.sinc_filter, numpy.zeros(((len(self.sinc_filter) * 2) - 3)))
self.sinc_filter = numpy.fft.fft(self.sinc_filter)
def apply(self, float32_array_input):
"""Applying the filter to a numpy-array
Parameters
----------
float32_array_input : float
The array, which the effect should be applied on.
Returns
-------
float
The previously processed array, should be the exact same size as the input array
"""
self.float32_array_input_3 = self.float32_array_input_2
self.float32_array_input_2 = self.float32_array_input_1
self.float32_array_input_1 = float32_array_input
self.filtered_signal = numpy.concatenate(
(self.float32_array_input_3, self.float32_array_input_2, self.float32_array_input_1), axis=None)
self.filtered_signal = numpy.fft.fft(self.filtered_signal)
self.filtered_signal = (self.filtered_signal * self.sinc_filter)
self.filtered_signal = | numpy.fft.ifft(self.filtered_signal) | numpy.fft.ifft |
""" Testing group-level finite difference. """
import unittest
import numpy as np
from numpy.testing import assert_almost_equal
from six import PY3
from openmdao.api import Problem, Group, Component, IndepVarComp, ExecComp
from openmdao.test.simple_comps import SimpleComp, SimpleArrayComp
from openmdao.test.util import assert_rel_error
if PY3:
def py3fix(s):
return s.replace('<type', '<class')
else:
def py3fix(s):
return s
class TestSrcIndices(unittest.TestCase):
def test_src_indices(self):
size = 10
root = Group()
root.add('P1', IndepVarComp('x', np.zeros(size)))
root.add('C1', ExecComp('y = x * 2.', y=np.zeros(size//2), x=np.zeros(size//2)))
root.add('C2', ExecComp('y = x * 3.', y=np.zeros(size//2), x=np.zeros(size//2)))
root.connect('P1.x', "C1.x", src_indices=list(range(size//2)))
root.connect('P1.x', "C2.x", src_indices=list(range(size//2, size)))
prob = Problem(root)
prob.setup(check=False)
root.P1.unknowns['x'][0:size//2] += 1.0
root.P1.unknowns['x'][size//2:size] -= 1.0
prob.run()
assert_rel_error(self, root.C1.params['x'], np.ones(size//2), 0.0001)
assert_rel_error(self, root.C2.params['x'], -np.ones(size//2), 0.0001)
def test_array_to_scalar(self):
root = Group()
root.add('P1', IndepVarComp('x', np.array([2., 3.])))
root.add('C1', SimpleComp())
root.add('C2', ExecComp('y = x * 3.', y=0., x=0.))
root.connect('P1.x', 'C1.x', src_indices=[0,])
root.connect('P1.x', 'C2.x', src_indices=[1,])
prob = Problem(root)
prob.setup(check=False)
prob.run()
self.assertAlmostEqual(root.C1.params['x'], 2.)
self.assertAlmostEqual(root.C2.params['x'], 3.)
def test_subarray_to_promoted_var(self):
root = Group()
P = root.add('P', IndepVarComp('x', np.array([1., 2., 3., 4., 5.])))
G = root.add('G', Group())
C = root.add('C', SimpleComp())
A = G.add('A', SimpleArrayComp())
G2 = G.add('G2', Group())
A2 = G2.add('A2', SimpleArrayComp())
root.connect('P.x', 'G.A.x', src_indices=[0,1])
root.connect('P.x', 'C.x', src_indices=[2,])
root.connect('P.x', 'G.G2.A2.x', src_indices=[3, 4])
prob = Problem(root)
prob.setup(check=False)
prob.run()
assert_rel_error(self, root.G.A.params['x'], np.array([1., 2.]), 0.0001)
self.assertAlmostEqual(root.C.params['x'], 3.)
assert_rel_error(self, root.G.G2.A2.params['x'], np.array([4., 5.]), 0.0001)
# now try the same thing with promoted var
root = Group()
P = root.add('P', IndepVarComp('x', np.array([1., 2., 3., 4., 5.])))
G = root.add('G', Group())
C = root.add('C', SimpleComp())
A = G.add('A', SimpleArrayComp(), promotes=['x', 'y'])
G2 = G.add('G2', Group())
A2 = G2.add('A2', SimpleArrayComp(), promotes=['x', 'y'])
root.connect('P.x', 'G.x', src_indices=[0,1])
root.connect('P.x', 'C.x', src_indices=[2,])
root.connect('P.x', 'G.G2.x', src_indices=[3, 4])
prob = Problem(root)
prob.setup(check=False)
prob.run()
assert_rel_error(self, root.G.A.params['x'], np.array([1., 2.]), 0.0001)
self.assertAlmostEqual(root.C.params['x'], 3.)
assert_rel_error(self, root.G.G2.A2.params['x'], np.array([4., 5.]), 0.0001)
def test_src_indices_connect_error(self):
root = Group()
P = root.add('P', IndepVarComp('x', np.array([1., 2., 3., 4., 5.])))
G = root.add('G', Group())
C = root.add('C', SimpleComp())
A = G.add('A', SimpleArrayComp())
root.connect('P.x', 'G.A.x', src_indices=[0])
root.connect('P.x', 'C.x', src_indices=[2,])
prob = Problem(root)
with self.assertRaises(Exception) as cm:
prob.setup(check=False)
expected = py3fix("Size 1 of the indexed sub-part of source 'P.x' "
"must be the same as size 2 of target 'G.A.x'.")
self.assertTrue(expected in str(cm.exception))
# now try the same thing with promoted var
root = Group()
P = root.add('P', IndepVarComp('x', np.array([1., 2., 3., 4., 5.])))
G = root.add('G', Group())
C = root.add('C', SimpleComp())
A = G.add('A', SimpleArrayComp(), promotes=['x', 'y'])
root.connect('P.x', 'G.x', src_indices=[0,1,2])
root.connect('P.x', 'C.x', src_indices=[2,])
prob = Problem(root)
with self.assertRaises(Exception) as cm:
prob.setup(check=False)
expected = py3fix("Size 3 of the indexed sub-part of source 'P.x' "
"must be the same as size 2 of target 'G.A.x' (G.x).")
self.assertTrue(expected in str(cm.exception))
def test_inner_connection(self):
class Squarer(Component):
def __init__(self, size):
super(Squarer, self).__init__()
self.add_param(name='input:x', val=np.zeros(size), desc='x')
self.add_output(name='output:x2', val=np.zeros(size), desc='x squared')
def solve_nonlinear(self,params,unknowns,resids):
unknowns['output:x2'] = params['input:x']**2
class Cuber(Component):
def __init__(self, size):
super(Cuber, self).__init__()
self.add_param(name='x', val=np.zeros(size), desc='x')
self.add_output(name='output:x3', val=np.zeros(size), desc='x squared')
def solve_nonlinear(self,params,unknowns,resids):
unknowns['output:x3'] = params['x']**3
class InnerGroup(Group):
def __init__(self):
super(InnerGroup, self).__init__()
self.add('square1', Squarer(5))
self.add('square2', Squarer(3), promotes=['input:x'])
# the following connection should result in 'cube1.x' using the
# same src_indices as 'input:x', which is [2,3,4] from the outer
# connection
self.add('cube1', Cuber(3))
self.connect('input:x', 'cube1.x')
# the following connection should result in 'cube2.x' using
# src_indices [0,1] of 'input:x', which corresponds to the
# src_indices [2,3] from the outer connection
self.add('cube2', Cuber(2))
self.connect('input:x', 'cube2.x', src_indices=[0,1])
# the following connection should result in 'cube3.x' using
# src_indices [1,2] of 'square1.input:x', which corresponds to the
# src_indices [1,2] from the outer connection
self.add('cube3', Cuber(2))
self.connect('square1.input:x', 'cube3.x', src_indices=[1,2])
class OuterGroup(Group):
def __init__(self):
super(OuterGroup, self).__init__()
iv = IndepVarComp('input:x', np.zeros(5))
self.add('indep_vars', iv, promotes=['*'])
self.add('inner', InnerGroup())
self.connect('input:x', 'inner.square1.input:x')
self.connect('input:x', 'inner.input:x', src_indices=[2,3,4])
prob = Problem(root=OuterGroup())
prob.setup(check=False)
prob['input:x'] = np.array([4., 5., 6., 7., 8.])
prob.run()
assert_rel_error(self, prob.root.inner.square1.params['input:x'],
np.array([4., 5., 6., 7., 8.]), 0.00000001)
assert_rel_error(self, prob.root.inner.cube1.params['x'],
np.array([6., 7., 8.]), 0.00000001)
assert_rel_error(self, prob.root.inner.cube2.params['x'],
np.array([6., 7.]), 0.00000001)
assert_rel_error(self, prob.root.inner.cube3.params['x'],
np.array([5., 6.]), 0.00000001)
def test_cannonball_src_indices(self):
# this test replicates the structure of a problem in pointer. The bug was that
# the state variables in the segments were not getting connected to the proper
# src_indices of the parameters from the independent variables component
state_var_names = ['x', 'y', 'vx', 'vy']
param_arg_names = ['g']
num_seg = 3
seg_ncn = 3
num_nodes = 3
class Trajectory(Group):
def __init__(self):
super(Trajectory, self).__init__()
class Phase(Group):
def __init__(self, num_seg, seg_ncn):
super(Phase, self).__init__()
ncn_u = 7
state_vars = [('X_c:{0}'.format(state_name), np.zeros(ncn_u))
for state_name in state_var_names]
self.add('state_var_comp', IndepVarComp(state_vars), promotes=['*'])
param_args = [('P_s:{0}'.format(param_name), 0.)
for param_name in param_arg_names]
self.add('static_params', IndepVarComp(param_args), promotes=['*'])
for i in range(num_seg):
self.add('seg{0}'.format(i), Segment(seg_ncn))
offset_states = 0
for i in range(num_seg):
idxs_states = range(offset_states, num_nodes+offset_states)
offset_states += num_nodes-1
for state_name in state_var_names:
self.connect( 'X_c:{0}'.format(state_name), 'seg{0:d}.X_c:{1}'.format(i, state_name), src_indices=idxs_states)
for param_name in param_arg_names:
self.connect( 'P_s:{0}'.format(param_name), 'seg{0:d}.P_s:{1}'.format(i, param_name))
class Segment(Group):
def __init__(self, num_nodes):
super(Segment, self).__init__()
self.add('eom_c', EOM(num_nodes))
self.add('static_bcast', StaticBCast(num_nodes), promotes=['*'])
self.add('state_interp', StateInterp(num_nodes), promotes=['*'])
for name in state_var_names:
self.connect('X_c:{0}'.format(name), 'eom_c.X:{0}'.format(name))
class EOM(Component):
def __init__(self, num_nodes):
super(EOM, self).__init__()
for name in state_var_names:
self.add_param('X:{0}'.format(name), np.zeros(num_nodes))
self.add_output('dXdt:{0}'.format(name), np.zeros(num_nodes))
for name in param_arg_names:
self.add_param('P:{0}'.format(name), 0.)
def solve_nonlinear(self, params, unknowns, resids):
unknowns['dXdt:x'][:] = params['X:vx']
unknowns['dXdt:y'][:] = params['X:vy']
unknowns['dXdt:vx'][:] = 0.0
unknowns['dXdt:vy'][:] = -params['P:g']
class StaticBCast(Component):
def __init__(self, num_nodes):
super(StaticBCast, self).__init__()
for name in param_arg_names:
self.add_param('P_s:{0}'.format(name), 0.)
def solve_nonlinear(self, params, unknowns, resids):
pass
class StateInterp(Component):
def __init__(self, num_nodes):
super(StateInterp, self).__init__()
for name in state_var_names:
self.add_param('X_c:{0}'.format(name), | np.zeros(num_nodes) | numpy.zeros |
""" Test functions for linalg module
"""
import os
import sys
import itertools
import traceback
import textwrap
import subprocess
import pytest
import numpy as np
from numpy import array, single, double, csingle, cdouble, dot, identity, matmul
from numpy import multiply, atleast_2d, inf, asarray
from numpy import linalg
from numpy.linalg import matrix_power, norm, matrix_rank, multi_dot, LinAlgError
from numpy.linalg.linalg import _multi_dot_matrix_chain_order
from numpy.testing import (
assert_, assert_equal, assert_raises, assert_array_equal,
assert_almost_equal, assert_allclose, suppress_warnings,
assert_raises_regex, HAS_LAPACK64,
)
from numpy.testing._private.utils import requires_memory
def consistent_subclass(out, in_):
# For ndarray subclass input, our output should have the same subclass
# (non-ndarray input gets converted to ndarray).
return type(out) is (type(in_) if isinstance(in_, np.ndarray)
else np.ndarray)
old_assert_almost_equal = assert_almost_equal
def assert_almost_equal(a, b, single_decimal=6, double_decimal=12, **kw):
if asarray(a).dtype.type in (single, csingle):
decimal = single_decimal
else:
decimal = double_decimal
old_assert_almost_equal(a, b, decimal=decimal, **kw)
def get_real_dtype(dtype):
return {single: single, double: double,
csingle: single, cdouble: double}[dtype]
def get_complex_dtype(dtype):
return {single: csingle, double: cdouble,
csingle: csingle, cdouble: cdouble}[dtype]
def get_rtol(dtype):
# Choose a safe rtol
if dtype in (single, csingle):
return 1e-5
else:
return 1e-11
# used to categorize tests
all_tags = {
'square', 'nonsquare', 'hermitian', # mutually exclusive
'generalized', 'size-0', 'strided' # optional additions
}
class LinalgCase:
def __init__(self, name, a, b, tags=set()):
"""
A bundle of arguments to be passed to a test case, with an identifying
name, the operands a and b, and a set of tags to filter the tests
"""
assert_(isinstance(name, str))
self.name = name
self.a = a
self.b = b
self.tags = frozenset(tags) # prevent shared tags
def check(self, do):
"""
Run the function `do` on this test case, expanding arguments
"""
do(self.a, self.b, tags=self.tags)
def __repr__(self):
return f'<LinalgCase: {self.name}>'
def apply_tag(tag, cases):
"""
Add the given tag (a string) to each of the cases (a list of LinalgCase
objects)
"""
assert tag in all_tags, "Invalid tag"
for case in cases:
case.tags = case.tags | {tag}
return cases
#
# Base test cases
#
np.random.seed(1234)
CASES = []
# square test cases
CASES += apply_tag('square', [
LinalgCase("single",
array([[1., 2.], [3., 4.]], dtype=single),
array([2., 1.], dtype=single)),
LinalgCase("double",
array([[1., 2.], [3., 4.]], dtype=double),
array([2., 1.], dtype=double)),
LinalgCase("double_2",
array([[1., 2.], [3., 4.]], dtype=double),
array([[2., 1., 4.], [3., 4., 6.]], dtype=double)),
LinalgCase("csingle",
array([[1. + 2j, 2 + 3j], [3 + 4j, 4 + 5j]], dtype=csingle),
array([2. + 1j, 1. + 2j], dtype=csingle)),
LinalgCase("cdouble",
array([[1. + 2j, 2 + 3j], [3 + 4j, 4 + 5j]], dtype=cdouble),
array([2. + 1j, 1. + 2j], dtype=cdouble)),
LinalgCase("cdouble_2",
array([[1. + 2j, 2 + 3j], [3 + 4j, 4 + 5j]], dtype=cdouble),
array([[2. + 1j, 1. + 2j, 1 + 3j], [1 - 2j, 1 - 3j, 1 - 6j]], dtype=cdouble)),
LinalgCase("0x0",
np.empty((0, 0), dtype=double),
np.empty((0,), dtype=double),
tags={'size-0'}),
LinalgCase("8x8",
np.random.rand(8, 8),
np.random.rand(8)),
LinalgCase("1x1",
np.random.rand(1, 1),
np.random.rand(1)),
LinalgCase("nonarray",
[[1, 2], [3, 4]],
[2, 1]),
])
# non-square test-cases
CASES += apply_tag('nonsquare', [
LinalgCase("single_nsq_1",
array([[1., 2., 3.], [3., 4., 6.]], dtype=single),
array([2., 1.], dtype=single)),
LinalgCase("single_nsq_2",
array([[1., 2.], [3., 4.], [5., 6.]], dtype=single),
array([2., 1., 3.], dtype=single)),
LinalgCase("double_nsq_1",
array([[1., 2., 3.], [3., 4., 6.]], dtype=double),
array([2., 1.], dtype=double)),
LinalgCase("double_nsq_2",
array([[1., 2.], [3., 4.], [5., 6.]], dtype=double),
array([2., 1., 3.], dtype=double)),
LinalgCase("csingle_nsq_1",
array(
[[1. + 1j, 2. + 2j, 3. - 3j], [3. - 5j, 4. + 9j, 6. + 2j]], dtype=csingle),
array([2. + 1j, 1. + 2j], dtype=csingle)),
LinalgCase("csingle_nsq_2",
array(
[[1. + 1j, 2. + 2j], [3. - 3j, 4. - 9j], [5. - 4j, 6. + 8j]], dtype=csingle),
array([2. + 1j, 1. + 2j, 3. - 3j], dtype=csingle)),
LinalgCase("cdouble_nsq_1",
array(
[[1. + 1j, 2. + 2j, 3. - 3j], [3. - 5j, 4. + 9j, 6. + 2j]], dtype=cdouble),
array([2. + 1j, 1. + 2j], dtype=cdouble)),
LinalgCase("cdouble_nsq_2",
array(
[[1. + 1j, 2. + 2j], [3. - 3j, 4. - 9j], [5. - 4j, 6. + 8j]], dtype=cdouble),
array([2. + 1j, 1. + 2j, 3. - 3j], dtype=cdouble)),
LinalgCase("cdouble_nsq_1_2",
array(
[[1. + 1j, 2. + 2j, 3. - 3j], [3. - 5j, 4. + 9j, 6. + 2j]], dtype=cdouble),
array([[2. + 1j, 1. + 2j], [1 - 1j, 2 - 2j]], dtype=cdouble)),
LinalgCase("cdouble_nsq_2_2",
array(
[[1. + 1j, 2. + 2j], [3. - 3j, 4. - 9j], [5. - 4j, 6. + 8j]], dtype=cdouble),
array([[2. + 1j, 1. + 2j], [1 - 1j, 2 - 2j], [1 - 1j, 2 - 2j]], dtype=cdouble)),
LinalgCase("8x11",
np.random.rand(8, 11),
np.random.rand(8)),
LinalgCase("1x5",
np.random.rand(1, 5),
np.random.rand(1)),
LinalgCase("5x1",
np.random.rand(5, 1),
np.random.rand(5)),
LinalgCase("0x4",
np.random.rand(0, 4),
np.random.rand(0),
tags={'size-0'}),
LinalgCase("4x0",
np.random.rand(4, 0),
np.random.rand(4),
tags={'size-0'}),
])
# hermitian test-cases
CASES += apply_tag('hermitian', [
LinalgCase("hsingle",
array([[1., 2.], [2., 1.]], dtype=single),
None),
LinalgCase("hdouble",
array([[1., 2.], [2., 1.]], dtype=double),
None),
LinalgCase("hcsingle",
array([[1., 2 + 3j], [2 - 3j, 1]], dtype=csingle),
None),
LinalgCase("hcdouble",
array([[1., 2 + 3j], [2 - 3j, 1]], dtype=cdouble),
None),
LinalgCase("hempty",
np.empty((0, 0), dtype=double),
None,
tags={'size-0'}),
LinalgCase("hnonarray",
[[1, 2], [2, 1]],
None),
LinalgCase("matrix_b_only",
array([[1., 2.], [2., 1.]]),
None),
LinalgCase("hmatrix_1x1",
np.random.rand(1, 1),
None),
])
#
# Gufunc test cases
#
def _make_generalized_cases():
new_cases = []
for case in CASES:
if not isinstance(case.a, np.ndarray):
continue
a = np.array([case.a, 2 * case.a, 3 * case.a])
if case.b is None:
b = None
else:
b = np.array([case.b, 7 * case.b, 6 * case.b])
new_case = LinalgCase(case.name + "_tile3", a, b,
tags=case.tags | {'generalized'})
new_cases.append(new_case)
a = np.array([case.a] * 2 * 3).reshape((3, 2) + case.a.shape)
if case.b is None:
b = None
else:
b = np.array([case.b] * 2 * 3).reshape((3, 2) + case.b.shape)
new_case = LinalgCase(case.name + "_tile213", a, b,
tags=case.tags | {'generalized'})
new_cases.append(new_case)
return new_cases
CASES += _make_generalized_cases()
#
# Generate stride combination variations of the above
#
def _stride_comb_iter(x):
"""
Generate cartesian product of strides for all axes
"""
if not isinstance(x, np.ndarray):
yield x, "nop"
return
stride_set = [(1,)] * x.ndim
stride_set[-1] = (1, 3, -4)
if x.ndim > 1:
stride_set[-2] = (1, 3, -4)
if x.ndim > 2:
stride_set[-3] = (1, -4)
for repeats in itertools.product(*tuple(stride_set)):
new_shape = [abs(a * b) for a, b in zip(x.shape, repeats)]
slices = tuple([slice(None, None, repeat) for repeat in repeats])
# new array with different strides, but same data
xi = np.empty(new_shape, dtype=x.dtype)
xi.view(np.uint32).fill(0xdeadbeef)
xi = xi[slices]
xi[...] = x
xi = xi.view(x.__class__)
assert_(np.all(xi == x))
yield xi, "stride_" + "_".join(["%+d" % j for j in repeats])
# generate also zero strides if possible
if x.ndim >= 1 and x.shape[-1] == 1:
s = list(x.strides)
s[-1] = 0
xi = np.lib.stride_tricks.as_strided(x, strides=s)
yield xi, "stride_xxx_0"
if x.ndim >= 2 and x.shape[-2] == 1:
s = list(x.strides)
s[-2] = 0
xi = np.lib.stride_tricks.as_strided(x, strides=s)
yield xi, "stride_xxx_0_x"
if x.ndim >= 2 and x.shape[:-2] == (1, 1):
s = list(x.strides)
s[-1] = 0
s[-2] = 0
xi = np.lib.stride_tricks.as_strided(x, strides=s)
yield xi, "stride_xxx_0_0"
def _make_strided_cases():
new_cases = []
for case in CASES:
for a, a_label in _stride_comb_iter(case.a):
for b, b_label in _stride_comb_iter(case.b):
new_case = LinalgCase(case.name + "_" + a_label + "_" + b_label, a, b,
tags=case.tags | {'strided'})
new_cases.append(new_case)
return new_cases
CASES += _make_strided_cases()
#
# Test different routines against the above cases
#
class LinalgTestCase:
TEST_CASES = CASES
def check_cases(self, require=set(), exclude=set()):
"""
Run func on each of the cases with all of the tags in require, and none
of the tags in exclude
"""
for case in self.TEST_CASES:
# filter by require and exclude
if case.tags & require != require:
continue
if case.tags & exclude:
continue
try:
case.check(self.do)
except Exception as e:
msg = f'In test case: {case!r}\n\n'
msg += traceback.format_exc()
raise AssertionError(msg) from e
class LinalgSquareTestCase(LinalgTestCase):
def test_sq_cases(self):
self.check_cases(require={'square'},
exclude={'generalized', 'size-0'})
def test_empty_sq_cases(self):
self.check_cases(require={'square', 'size-0'},
exclude={'generalized'})
class LinalgNonsquareTestCase(LinalgTestCase):
def test_nonsq_cases(self):
self.check_cases(require={'nonsquare'},
exclude={'generalized', 'size-0'})
def test_empty_nonsq_cases(self):
self.check_cases(require={'nonsquare', 'size-0'},
exclude={'generalized'})
class HermitianTestCase(LinalgTestCase):
def test_herm_cases(self):
self.check_cases(require={'hermitian'},
exclude={'generalized', 'size-0'})
def test_empty_herm_cases(self):
self.check_cases(require={'hermitian', 'size-0'},
exclude={'generalized'})
class LinalgGeneralizedSquareTestCase(LinalgTestCase):
@pytest.mark.slow
def test_generalized_sq_cases(self):
self.check_cases(require={'generalized', 'square'},
exclude={'size-0'})
@pytest.mark.slow
def test_generalized_empty_sq_cases(self):
self.check_cases(require={'generalized', 'square', 'size-0'})
class LinalgGeneralizedNonsquareTestCase(LinalgTestCase):
@pytest.mark.slow
def test_generalized_nonsq_cases(self):
self.check_cases(require={'generalized', 'nonsquare'},
exclude={'size-0'})
@pytest.mark.slow
def test_generalized_empty_nonsq_cases(self):
self.check_cases(require={'generalized', 'nonsquare', 'size-0'})
class HermitianGeneralizedTestCase(LinalgTestCase):
@pytest.mark.slow
def test_generalized_herm_cases(self):
self.check_cases(require={'generalized', 'hermitian'},
exclude={'size-0'})
@pytest.mark.slow
def test_generalized_empty_herm_cases(self):
self.check_cases(require={'generalized', 'hermitian', 'size-0'},
exclude={'none'})
def dot_generalized(a, b):
a = asarray(a)
if a.ndim >= 3:
if a.ndim == b.ndim:
# matrix x matrix
new_shape = a.shape[:-1] + b.shape[-1:]
elif a.ndim == b.ndim + 1:
# matrix x vector
new_shape = a.shape[:-1]
else:
raise ValueError("Not implemented...")
r = np.empty(new_shape, dtype=np.common_type(a, b))
for c in itertools.product(*map(range, a.shape[:-2])):
r[c] = dot(a[c], b[c])
return r
else:
return dot(a, b)
def identity_like_generalized(a):
a = asarray(a)
if a.ndim >= 3:
r = np.empty(a.shape, dtype=a.dtype)
r[...] = identity(a.shape[-2])
return r
else:
return identity(a.shape[0])
class SolveCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):
# kept apart from TestSolve for use for testing with matrices.
def do(self, a, b, tags):
x = linalg.solve(a, b)
assert_almost_equal(b, dot_generalized(a, x))
assert_(consistent_subclass(x, b))
class TestSolve(SolveCases):
@pytest.mark.parametrize('dtype', [single, double, csingle, cdouble])
def test_types(self, dtype):
x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)
assert_equal(linalg.solve(x, x).dtype, dtype)
def test_0_size(self):
class ArraySubclass(np.ndarray):
pass
# Test system of 0x0 matrices
a = np.arange(8).reshape(2, 2, 2)
b = np.arange(6).reshape(1, 2, 3).view(ArraySubclass)
expected = linalg.solve(a, b)[:, 0:0, :]
result = linalg.solve(a[:, 0:0, 0:0], b[:, 0:0, :])
assert_array_equal(result, expected)
assert_(isinstance(result, ArraySubclass))
# Test errors for non-square and only b's dimension being 0
assert_raises(linalg.LinAlgError, linalg.solve, a[:, 0:0, 0:1], b)
assert_raises(ValueError, linalg.solve, a, b[:, 0:0, :])
# Test broadcasting error
b = np.arange(6).reshape(1, 3, 2) # broadcasting error
assert_raises(ValueError, linalg.solve, a, b)
assert_raises(ValueError, linalg.solve, a[0:0], b[0:0])
# Test zero "single equations" with 0x0 matrices.
b = np.arange(2).reshape(1, 2).view(ArraySubclass)
expected = linalg.solve(a, b)[:, 0:0]
result = linalg.solve(a[:, 0:0, 0:0], b[:, 0:0])
assert_array_equal(result, expected)
assert_(isinstance(result, ArraySubclass))
b = np.arange(3).reshape(1, 3)
assert_raises(ValueError, linalg.solve, a, b)
assert_raises(ValueError, linalg.solve, a[0:0], b[0:0])
assert_raises(ValueError, linalg.solve, a[:, 0:0, 0:0], b)
def test_0_size_k(self):
# test zero multiple equation (K=0) case.
class ArraySubclass(np.ndarray):
pass
a = np.arange(4).reshape(1, 2, 2)
b = np.arange(6).reshape(3, 2, 1).view(ArraySubclass)
expected = linalg.solve(a, b)[:, :, 0:0]
result = linalg.solve(a, b[:, :, 0:0])
assert_array_equal(result, expected)
assert_(isinstance(result, ArraySubclass))
# test both zero.
expected = linalg.solve(a, b)[:, 0:0, 0:0]
result = linalg.solve(a[:, 0:0, 0:0], b[:, 0:0, 0:0])
assert_array_equal(result, expected)
assert_(isinstance(result, ArraySubclass))
class InvCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):
def do(self, a, b, tags):
a_inv = linalg.inv(a)
assert_almost_equal(dot_generalized(a, a_inv),
identity_like_generalized(a))
assert_(consistent_subclass(a_inv, a))
class TestInv(InvCases):
@pytest.mark.parametrize('dtype', [single, double, csingle, cdouble])
def test_types(self, dtype):
x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)
assert_equal(linalg.inv(x).dtype, dtype)
def test_0_size(self):
# Check that all kinds of 0-sized arrays work
class ArraySubclass(np.ndarray):
pass
a = np.zeros((0, 1, 1), dtype=np.int_).view(ArraySubclass)
res = linalg.inv(a)
assert_(res.dtype.type is np.float64)
assert_equal(a.shape, res.shape)
assert_(isinstance(res, ArraySubclass))
a = np.zeros((0, 0), dtype=np.complex64).view(ArraySubclass)
res = linalg.inv(a)
assert_(res.dtype.type is np.complex64)
assert_equal(a.shape, res.shape)
assert_(isinstance(res, ArraySubclass))
class EigvalsCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):
def do(self, a, b, tags):
ev = linalg.eigvals(a)
evalues, evectors = linalg.eig(a)
assert_almost_equal(ev, evalues)
class TestEigvals(EigvalsCases):
@pytest.mark.parametrize('dtype', [single, double, csingle, cdouble])
def test_types(self, dtype):
x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)
assert_equal(linalg.eigvals(x).dtype, dtype)
x = np.array([[1, 0.5], [-1, 1]], dtype=dtype)
assert_equal(linalg.eigvals(x).dtype, get_complex_dtype(dtype))
def test_0_size(self):
# Check that all kinds of 0-sized arrays work
class ArraySubclass(np.ndarray):
pass
a = np.zeros((0, 1, 1), dtype=np.int_).view(ArraySubclass)
res = linalg.eigvals(a)
assert_(res.dtype.type is np.float64)
assert_equal((0, 1), res.shape)
# This is just for documentation, it might make sense to change:
assert_(isinstance(res, np.ndarray))
a = np.zeros((0, 0), dtype=np.complex64).view(ArraySubclass)
res = linalg.eigvals(a)
assert_(res.dtype.type is np.complex64)
assert_equal((0,), res.shape)
# This is just for documentation, it might make sense to change:
assert_(isinstance(res, np.ndarray))
class EigCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):
def do(self, a, b, tags):
evalues, evectors = linalg.eig(a)
assert_allclose(dot_generalized(a, evectors),
np.asarray(evectors) * np.asarray(evalues)[..., None, :],
rtol=get_rtol(evalues.dtype))
assert_(consistent_subclass(evectors, a))
class TestEig(EigCases):
@pytest.mark.parametrize('dtype', [single, double, csingle, cdouble])
def test_types(self, dtype):
x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)
w, v = np.linalg.eig(x)
assert_equal(w.dtype, dtype)
assert_equal(v.dtype, dtype)
x = np.array([[1, 0.5], [-1, 1]], dtype=dtype)
w, v = np.linalg.eig(x)
assert_equal(w.dtype, get_complex_dtype(dtype))
assert_equal(v.dtype, get_complex_dtype(dtype))
def test_0_size(self):
# Check that all kinds of 0-sized arrays work
class ArraySubclass(np.ndarray):
pass
a = np.zeros((0, 1, 1), dtype=np.int_).view(ArraySubclass)
res, res_v = linalg.eig(a)
assert_(res_v.dtype.type is np.float64)
assert_(res.dtype.type is np.float64)
assert_equal(a.shape, res_v.shape)
assert_equal((0, 1), res.shape)
# This is just for documentation, it might make sense to change:
assert_(isinstance(a, np.ndarray))
a = np.zeros((0, 0), dtype=np.complex64).view(ArraySubclass)
res, res_v = linalg.eig(a)
assert_(res_v.dtype.type is np.complex64)
assert_(res.dtype.type is np.complex64)
assert_equal(a.shape, res_v.shape)
assert_equal((0,), res.shape)
# This is just for documentation, it might make sense to change:
assert_(isinstance(a, np.ndarray))
class SVDBaseTests:
hermitian = False
@pytest.mark.parametrize('dtype', [single, double, csingle, cdouble])
def test_types(self, dtype):
x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)
u, s, vh = linalg.svd(x)
assert_equal(u.dtype, dtype)
assert_equal(s.dtype, get_real_dtype(dtype))
assert_equal(vh.dtype, dtype)
s = linalg.svd(x, compute_uv=False, hermitian=self.hermitian)
assert_equal(s.dtype, get_real_dtype(dtype))
class SVDCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):
def do(self, a, b, tags):
u, s, vt = linalg.svd(a, False)
assert_allclose(a, dot_generalized(np.asarray(u) * np.asarray(s)[..., None, :],
np.asarray(vt)),
rtol=get_rtol(u.dtype))
assert_(consistent_subclass(u, a))
assert_(consistent_subclass(vt, a))
class TestSVD(SVDCases, SVDBaseTests):
def test_empty_identity(self):
""" Empty input should put an identity matrix in u or vh """
x = np.empty((4, 0))
u, s, vh = linalg.svd(x, compute_uv=True, hermitian=self.hermitian)
assert_equal(u.shape, (4, 4))
assert_equal(vh.shape, (0, 0))
assert_equal(u, np.eye(4))
x = np.empty((0, 4))
u, s, vh = linalg.svd(x, compute_uv=True, hermitian=self.hermitian)
assert_equal(u.shape, (0, 0))
assert_equal(vh.shape, (4, 4))
assert_equal(vh, np.eye(4))
class SVDHermitianCases(HermitianTestCase, HermitianGeneralizedTestCase):
def do(self, a, b, tags):
u, s, vt = linalg.svd(a, False, hermitian=True)
assert_allclose(a, dot_generalized(np.asarray(u) * np.asarray(s)[..., None, :],
np.asarray(vt)),
rtol=get_rtol(u.dtype))
def hermitian(mat):
axes = list(range(mat.ndim))
axes[-1], axes[-2] = axes[-2], axes[-1]
return np.conj(np.transpose(mat, axes=axes))
assert_almost_equal(np.matmul(u, hermitian(u)), np.broadcast_to(np.eye(u.shape[-1]), u.shape))
assert_almost_equal(np.matmul(vt, hermitian(vt)), np.broadcast_to(np.eye(vt.shape[-1]), vt.shape))
assert_equal(np.sort(s)[..., ::-1], s)
assert_(consistent_subclass(u, a))
assert_(consistent_subclass(vt, a))
class TestSVDHermitian(SVDHermitianCases, SVDBaseTests):
hermitian = True
class CondCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):
# cond(x, p) for p in (None, 2, -2)
def do(self, a, b, tags):
c = asarray(a) # a might be a matrix
if 'size-0' in tags:
assert_raises(LinAlgError, linalg.cond, c)
return
# +-2 norms
s = linalg.svd(c, compute_uv=False)
assert_almost_equal(
linalg.cond(a), s[..., 0] / s[..., -1],
single_decimal=5, double_decimal=11)
assert_almost_equal(
linalg.cond(a, 2), s[..., 0] / s[..., -1],
single_decimal=5, double_decimal=11)
assert_almost_equal(
linalg.cond(a, -2), s[..., -1] / s[..., 0],
single_decimal=5, double_decimal=11)
# Other norms
cinv = np.linalg.inv(c)
assert_almost_equal(
linalg.cond(a, 1),
abs(c).sum(-2).max(-1) * abs(cinv).sum(-2).max(-1),
single_decimal=5, double_decimal=11)
assert_almost_equal(
linalg.cond(a, -1),
abs(c).sum(-2).min(-1) * abs(cinv).sum(-2).min(-1),
single_decimal=5, double_decimal=11)
assert_almost_equal(
linalg.cond(a, np.inf),
abs(c).sum(-1).max(-1) * abs(cinv).sum(-1).max(-1),
single_decimal=5, double_decimal=11)
assert_almost_equal(
linalg.cond(a, -np.inf),
abs(c).sum(-1).min(-1) * abs(cinv).sum(-1).min(-1),
single_decimal=5, double_decimal=11)
assert_almost_equal(
linalg.cond(a, 'fro'),
np.sqrt((abs(c)**2).sum(-1).sum(-1)
* (abs(cinv)**2).sum(-1).sum(-1)),
single_decimal=5, double_decimal=11)
class TestCond(CondCases):
def test_basic_nonsvd(self):
# Smoketest the non-svd norms
A = array([[1., 0, 1], [0, -2., 0], [0, 0, 3.]])
assert_almost_equal(linalg.cond(A, inf), 4)
assert_almost_equal(linalg.cond(A, -inf), 2/3)
assert_almost_equal(linalg.cond(A, 1), 4)
assert_almost_equal(linalg.cond(A, -1), 0.5)
assert_almost_equal(linalg.cond(A, 'fro'), np.sqrt(265 / 12))
def test_singular(self):
# Singular matrices have infinite condition number for
# positive norms, and negative norms shouldn't raise
# exceptions
As = [np.zeros((2, 2)), np.ones((2, 2))]
p_pos = [None, 1, 2, 'fro']
p_neg = [-1, -2]
for A, p in itertools.product(As, p_pos):
# Inversion may not hit exact infinity, so just check the
# number is large
assert_(linalg.cond(A, p) > 1e15)
for A, p in itertools.product(As, p_neg):
linalg.cond(A, p)
def test_nan(self):
# nans should be passed through, not converted to infs
ps = [None, 1, -1, 2, -2, 'fro']
p_pos = [None, 1, 2, 'fro']
A = np.ones((2, 2))
A[0,1] = np.nan
for p in ps:
c = linalg.cond(A, p)
assert_(isinstance(c, np.float_))
assert_(np.isnan(c))
A = np.ones((3, 2, 2))
A[1,0,1] = np.nan
for p in ps:
c = linalg.cond(A, p)
assert_(np.isnan(c[1]))
if p in p_pos:
assert_(c[0] > 1e15)
assert_(c[2] > 1e15)
else:
assert_(not np.isnan(c[0]))
assert_(not np.isnan(c[2]))
def test_stacked_singular(self):
# Check behavior when only some of the stacked matrices are
# singular
np.random.seed(1234)
A = np.random.rand(2, 2, 2, 2)
A[0,0] = 0
A[1,1] = 0
for p in (None, 1, 2, 'fro', -1, -2):
c = linalg.cond(A, p)
assert_equal(c[0,0], np.inf)
assert_equal(c[1,1], np.inf)
assert_(np.isfinite(c[0,1]))
assert_(np.isfinite(c[1,0]))
class PinvCases(LinalgSquareTestCase,
LinalgNonsquareTestCase,
LinalgGeneralizedSquareTestCase,
LinalgGeneralizedNonsquareTestCase):
def do(self, a, b, tags):
a_ginv = linalg.pinv(a)
# `a @ a_ginv == I` does not hold if a is singular
dot = dot_generalized
assert_almost_equal(dot(dot(a, a_ginv), a), a, single_decimal=5, double_decimal=11)
assert_(consistent_subclass(a_ginv, a))
class TestPinv(PinvCases):
pass
class PinvHermitianCases(HermitianTestCase, HermitianGeneralizedTestCase):
def do(self, a, b, tags):
a_ginv = linalg.pinv(a, hermitian=True)
# `a @ a_ginv == I` does not hold if a is singular
dot = dot_generalized
assert_almost_equal(dot(dot(a, a_ginv), a), a, single_decimal=5, double_decimal=11)
assert_(consistent_subclass(a_ginv, a))
class TestPinvHermitian(PinvHermitianCases):
pass
class DetCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):
def do(self, a, b, tags):
d = linalg.det(a)
(s, ld) = linalg.slogdet(a)
if asarray(a).dtype.type in (single, double):
ad = asarray(a).astype(double)
else:
ad = asarray(a).astype(cdouble)
ev = linalg.eigvals(ad)
assert_almost_equal(d, multiply.reduce(ev, axis=-1))
assert_almost_equal(s * np.exp(ld), multiply.reduce(ev, axis=-1))
s = np.atleast_1d(s)
ld = np.atleast_1d(ld)
m = (s != 0)
assert_almost_equal(np.abs(s[m]), 1)
assert_equal(ld[~m], -inf)
class TestDet(DetCases):
def test_zero(self):
assert_equal(linalg.det([[0.0]]), 0.0)
assert_equal(type(linalg.det([[0.0]])), double)
assert_equal(linalg.det([[0.0j]]), 0.0)
assert_equal(type(linalg.det([[0.0j]])), cdouble)
assert_equal(linalg.slogdet([[0.0]]), (0.0, -inf))
assert_equal(type(linalg.slogdet([[0.0]])[0]), double)
assert_equal(type(linalg.slogdet([[0.0]])[1]), double)
assert_equal(linalg.slogdet([[0.0j]]), (0.0j, -inf))
assert_equal(type(linalg.slogdet([[0.0j]])[0]), cdouble)
assert_equal(type(linalg.slogdet([[0.0j]])[1]), double)
@pytest.mark.parametrize('dtype', [single, double, csingle, cdouble])
def test_types(self, dtype):
x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)
assert_equal(np.linalg.det(x).dtype, dtype)
ph, s = np.linalg.slogdet(x)
assert_equal(s.dtype, get_real_dtype(dtype))
assert_equal(ph.dtype, dtype)
def test_0_size(self):
a = np.zeros((0, 0), dtype=np.complex64)
res = linalg.det(a)
assert_equal(res, 1.)
assert_(res.dtype.type is np.complex64)
res = linalg.slogdet(a)
assert_equal(res, (1, 0))
assert_(res[0].dtype.type is np.complex64)
assert_(res[1].dtype.type is np.float32)
a = np.zeros((0, 0), dtype=np.float64)
res = linalg.det(a)
assert_equal(res, 1.)
assert_(res.dtype.type is np.float64)
res = linalg.slogdet(a)
assert_equal(res, (1, 0))
assert_(res[0].dtype.type is np.float64)
assert_(res[1].dtype.type is np.float64)
class LstsqCases(LinalgSquareTestCase, LinalgNonsquareTestCase):
def do(self, a, b, tags):
arr = np.asarray(a)
m, n = arr.shape
u, s, vt = linalg.svd(a, False)
x, residuals, rank, sv = linalg.lstsq(a, b, rcond=-1)
if m == 0:
assert_((x == 0).all())
if m <= n:
assert_almost_equal(b, dot(a, x))
assert_equal(rank, m)
else:
assert_equal(rank, n)
assert_almost_equal(sv, sv.__array_wrap__(s))
if rank == n and m > n:
expect_resids = (
np.asarray(abs(np.dot(a, x) - b)) ** 2).sum(axis=0)
expect_resids = np.asarray(expect_resids)
if np.asarray(b).ndim == 1:
expect_resids.shape = (1,)
assert_equal(residuals.shape, expect_resids.shape)
else:
expect_resids = np.array([]).view(type(x))
assert_almost_equal(residuals, expect_resids)
assert_(np.issubdtype(residuals.dtype, np.floating))
assert_(consistent_subclass(x, b))
assert_(consistent_subclass(residuals, b))
class TestLstsq(LstsqCases):
def test_future_rcond(self):
a = np.array([[0., 1., 0., 1., 2., 0.],
[0., 2., 0., 0., 1., 0.],
[1., 0., 1., 0., 0., 4.],
[0., 0., 0., 2., 3., 0.]]).T
b = np.array([1, 0, 0, 0, 0, 0])
with suppress_warnings() as sup:
w = sup.record(FutureWarning, "`rcond` parameter will change")
x, residuals, rank, s = linalg.lstsq(a, b)
assert_(rank == 4)
x, residuals, rank, s = linalg.lstsq(a, b, rcond=-1)
| assert_(rank == 4) | numpy.testing.assert_ |
import numpy as np
from autograd_cls import AutoGrad
from compute_utils import compute_lcv_lambda_gradient, compute_epsilon_lambda_gradient, compute_hjj, compute_z, compute_z_gradient, compute_eps_t, compute_hjj_gradient, get_discounted_return, calculate_batch_loss, invert_matrix, calculate_batch_mspbe_msbe_mse_losses
from lstd import LSTD, MiniBatchLSTDLambda
from adam import ADAM
import copy
from pprint import pprint
import pdb
import pudb
def minibatch_LSTD(trajectories,
Phi,
P,
V,
D,
R,
Gs,
logger,
config
):
lambda_ = config.default_lambda
gamma = config.gamma
LSTD_lambda = MiniBatchLSTDLambda(gamma, lambda_, Phi)
G = {}
running_loss = []
num_episodes = len(trajectories)
for ep in range(num_episodes):
G[ep] = []
traj = trajectories[ep]
ep_rewards = []
ep_states = []
cur_state, reward, next_state, done = traj[0]
LSTD_lambda.update(None, 0 , cur_state)
cur_state = next_state
#LSTD_lambda.reset_boyan(Phi[cur_state, :])
for timestep in range(len(traj)):
cur_state, reward, next_state, done = traj[timestep]
LSTD_lambda.update(cur_state, reward, next_state)
ep_rewards.append(reward)
ep_states.append(cur_state)
if done:
LSTD_lambda.update(next_state, 0, None)
theta = LSTD_lambda.theta
ep_discountedrewards = get_discounted_return(ep_rewards, gamma)
# print('ep_discounted:{0}'.format(ep_discountedrewards))
if len(ep_discountedrewards) > 0:
ep_loss = np.mean(
[(np.dot(Phi[ep_states[t], :], theta) - ep_discountedrewards[t]) ** 2 for t in range(len(ep_states))])
G[ep] = ep_discountedrewards
running_loss.append(ep_loss)
# After we calculated the Theta parameter from the training data
#loss, rmse = calculate_batch_loss(trajectories, G, theta, Phi)
ms_loss, rms_loss = calculate_batch_mspbe_msbe_mse_losses(trajectories, G, theta, Phi, R, D, P, config)
return LSTD_lambda, theta, G, rms_loss, ms_loss
def minibatch_LSTD_withCV(trajectories,
Phi,
P,
V,
D,
R,
Gs,
logger,
config,
trajectories_test,
Gs_test
):
lambda_ = config.default_lambda
gamma = config.gamma
LSTD_lambda = MiniBatchLSTDLambda(gamma, lambda_, Phi)
G = {}
running_loss = []
num_episodes = len(trajectories)
valid_episode_counter = 0
for ep in range(num_episodes):
G[ep] = []
traj = trajectories[ep]
ep_rewards = []
ep_states = []
cur_state, reward, next_state, done = traj[0]
LSTD_lambda.update(None, 0 , cur_state)
cur_state = next_state
#LSTD_lambda.reset_boyan(Phi[cur_state, :])
for timestep in range(len(traj)):
cur_state, reward, next_state, done = traj[timestep]
LSTD_lambda.update(cur_state, reward, next_state)
ep_rewards.append(reward)
ep_states.append(cur_state)
if done:
LSTD_lambda.update(next_state, 0, None)
theta = LSTD_lambda.theta
ep_discountedrewards = get_discounted_return(ep_rewards, gamma)
#pdb.set_trace()
# print('ep_discounted:{0}'.format(ep_discountedrewards))
if len(ep_discountedrewards) > 0:
ep_loss = np.mean(
[(np.dot(Phi[ep_states[t], :], theta) - ep_discountedrewards[t]) ** 2 for t in range(len(ep_states))])
G[ep] = ep_discountedrewards
running_loss.append(ep_loss)
# After we calculated the Theta parameter from the training data
if valid_episode_counter % config.compute_cv_iterations == 0 and valid_episode_counter > 0:
#pudb.set_trace()
new_config = copy.deepcopy(config)
new_config.default_lambda = 0
current_cv_loss = compute_CV_loss(trajectories,
Phi,
P,
V,
D,
R,
Gs,
logger = None,
config =new_config)
losses, avg_losses = calculate_batch_mspbe_msbe_mse_losses(trajectories_test, Gs_test, theta, Phi, R, D, P, new_config)
print('current_cv_loss:{0}'.format(current_cv_loss))
if logger:
#pudb.set_trace()
logger.log_scalar('Train mean loto cv', current_cv_loss, valid_episode_counter)
logger.log_scalar('Test RMSPBE', avg_losses['RMSPBE'], valid_episode_counter)
logger.log_scalar('Test RMSBE', avg_losses['RMSBE'], valid_episode_counter)
logger.log_scalar('Test RMSBE', avg_losses['RMSE'], valid_episode_counter)
logger.writer.flush()
valid_episode_counter += 1
#loss, rmse = calculate_batch_loss(trajectories, G, theta, Phi)
ms_loss, rms_loss = calculate_batch_mspbe_msbe_mse_losses(trajectories, G, theta, Phi, R, D, P, config)
return LSTD_lambda, theta, G, rms_loss, ms_loss
def LSTD_algorithm(trajectories, Phi, num_features, gamma=0.4, lambda_=0.2):
# LSTD operator:
LSTD_lambda = LSTD(num_features)
G = {}
running_loss = []
num_episodes = len(trajectories)
for ep in range(num_episodes):
G[ep] = []
traj = trajectories[ep]
if len(traj) <= 4:
continue
ep_rewards = []
ep_states = []
cur_state = traj[0][0]
LSTD_lambda.reset_boyan(Phi[cur_state, :])
for timestep in range(len(traj)):
cur_state, reward, next_state, done = traj[timestep]
LSTD_lambda.update_boyan(Phi[cur_state, :], reward, Phi[next_state, :], gamma, lambda_, timestep)
ep_rewards.append(reward)
ep_states.append(cur_state)
theta = LSTD_lambda.theta
ep_discountedrewards = get_discounted_return(ep_rewards, gamma)
# print('ep_discounted:{0}'.format(ep_discountedrewards))
if len(ep_discountedrewards) > 0:
ep_loss = np.mean(
[(np.dot(Phi[ep_states[t], :], theta) - ep_discountedrewards[t]) ** 2 for t in range(len(ep_states))])
# print('Episode {0} loss is {1}'.format(ep, ep_loss))
# print('Episode {0} rewards are {1}'.format(ep, ep_rewards))
G[ep] = ep_discountedrewards
running_loss.append(ep_loss)
# After we calculated the Theta parameter from the training data
loss, rmse = calculate_batch_loss(trajectories, G, theta, Phi)
# print('episode loss:{0}'.format(loss))
# print(LSTD_lambda.A, LSTD_lambda.b)
ms_loss,rms_loss = calculate_batch_mspbe_msbe_mse_losses(trajectories, G, theta, Phi, R, D, P, config)
# print("average running loss in training: ", sum(running_loss) / num_episodes)
# print("average loss after training: ", sum(loss) / num_episodes)
average_loss = rmse
return LSTD_lambda, theta, average_loss, G, rmspbe
def Adaptive_LSTD_algorithm(trajectories,
Phi,
P,
V,
D,
R,
Gs,
config
):
# LSTD operator:
Auto_grad = AutoGrad(compute_CV_loss, 4)
Auto_grad.gradient_fun()
adaptive_LSTD_lambda = LSTD(config.num_features)
G = {}
loss = []
running_loss = []
num_episodes = len(trajectories)
adam_optimizer = ADAM(x_init = config.default_lambda, alpha=config.lr)
lambda_ = config.default_lambda
for ep in range(num_episodes):
G[ep] = []
traj = trajectories[ep]
if len(traj) <= 4:
continue
ep_rewards = []
ep_states = []
Z = np.zeros((config.num_features, config.num_states))
Z_gradient = np.zeros((config.num_features, config.num_states))
H_diag = np.zeros(config.num_states) # n
eps = np.zeros(config.num_states)
states_count = np.zeros(config.num_states)
epsilon_lambda_gradient = np.zeros(config.num_states)
H_diag_gradient = np.zeros(config.num_states)
episode_loss = 0
cur_state = traj[0][0]
adaptive_LSTD_lambda.reset_boyan(Phi[cur_state, :])
for timestep in range(len(traj)):
cur_state, reward, next_state, done = traj[timestep]
adaptive_LSTD_lambda.update_boyan(Phi[cur_state, :], reward, Phi[next_state, :], config.gamma, lambda_, timestep)
ep_rewards.append(reward)
ep_states.append(cur_state)
theta = adaptive_LSTD_lambda.theta
print(theta)
A = adaptive_LSTD_lambda.A
b = adaptive_LSTD_lambda.b
A_inv = np.linalg.pinv(A + np.eye(A.shape[0]) * config.A_inv_epsilon, rcond=.1)
for timestep in range(len(traj)-1):
cur_state, reward, next_state, done = traj[timestep]
# To-do : change the following update to running average
states_count[cur_state] += 1
ct = states_count[cur_state]
Z[:,cur_state] = (ct-1)/ct *Z[:,cur_state]+ 1/ct * compute_z(lambda_, config.gamma, Phi, ep_states, timestep )
Z_gradient[:, cur_state] = (ct-1)/ct * Z_gradient[:, cur_state] + 1/ct * compute_z_gradient(lambda_, config.gamma, Phi, ep_states, timestep)
H_diag[cur_state] = (ct-1)/ct * H_diag[cur_state] + 1/ct * compute_hjj(Phi, lambda_, config.gamma, ep_states, timestep, A_inv)
eps[cur_state] = (ct-1)/ct * eps[cur_state] + 1/ct * compute_eps_t(Phi, theta, config.gamma, reward, ep_states, timestep)
epsilon_lambda_gradient[cur_state] = (ct-1)/ct * epsilon_lambda_gradient[cur_state] + \
1/ct * compute_epsilon_lambda_gradient(Phi,
lambda_,
config.gamma,
A,
b,
A_inv,
Z,
timestep,
ep_states,
ep_rewards
)
H_diag_gradient[cur_state] = (ct-1)/ct * H_diag_gradient[cur_state] + 1/ct * compute_hjj_gradient(Phi,
lambda_,
config.gamma,
ep_states,
timestep,
A,
b,
A_inv
)
#grad = compute_cv_gradient(Phi, theta, gamma, lambda_, P, V, D, R)
# Replaced the above update with:
grad = compute_lcv_lambda_gradient(eps,
H_diag,
ep_states,
epsilon_lambda_gradient,
H_diag_gradient,
grad_clip_max_norm = config.grad_clip_norm)
if config.compute_autograd:
auto_grad = Auto_grad.loss_autograd_fun(trajectories, Phi, config.num_features, config.gamma, lambda_, Gs)
print('gradient diff:{0}'.format(abs(grad-auto_grad)))
# if ep > 1000 :
# new_lambda = lambda_ - lr * compute_cv_gradient(Phi, theta, gamma, lambda_, P, V, D)
# print(new_lambda)
# if new_lambda >= 0 and new_lambda <= 1:
# lambda_ = new_lambda
# print('current lambda:{0}'.format(lambda_))
# grad = compute_cv_gradient2(Phi, theta, gamma, lambda_, R, A, b, z)
if config.use_adam_optimizer:
adam_optimizer.update(grad, ep)
new_lambda = adam_optimizer.x
else:
new_lambda = lambda_ - config.lr * grad
if new_lambda >= 0 and new_lambda <= 1:
lambda_ = new_lambda
print('current lambda:{0}'.format(lambda_))
ep_discountedrewards = get_discounted_return(ep_rewards, config.gamma)
# print('ep_discounted:{0}'.format(ep_discountedrewards))
if len(ep_discountedrewards) > 0:
ep_loss = np.mean(
[(np.dot(Phi[ep_states[t], :], theta) - ep_discountedrewards[t]) ** 2 for t in range(len(ep_states))])
# print('Episode {0} loss is {1}'.format(ep, ep_loss))
# print('Episode {0} rewards are {1}'.format(ep, ep_rewards))
G[ep] = ep_discountedrewards
running_loss.append(ep_loss)
# After we calculated the Theta parameter from the training data
loss, rmse = calculate_batch_loss(trajectories, G, theta, Phi)
# print('episode loss:{0}'.format(loss))
# print(LSTD_lambda.A, LSTD_lambda.b)
#print("Final Lambda: {0}".format(lambda_))
#print("average running loss in training: ", np.mean(running_loss))
#print("average loss after training: ", np.mean(loss))
average_loss = rmse
return adaptive_LSTD_lambda, theta, average_loss, G, lambda_
def Adaptive_LSTD_algorithm_batch(trajectories,
Phi,
P,
V,
D,
R,
Gs,
logger,
config
):
# LSTD operator:
Auto_grad = AutoGrad(compute_CV_loss, 4)
Auto_grad.gradient_fun()
#adaptive_LSTD_lambda = LSTD(config.num_features)
adaptive_LSTD_lambda = MiniBatchLSTDLambda(config.gamma, config.default_lambda, Phi)
G = {}
loss = []
running_loss = []
num_episodes = len(trajectories)
adam_optimizer = ADAM(x_init = config.default_lambda, alpha=config.lr)
lambda_ = config.default_lambda
valid_episode_counter = 0
for ep in range(num_episodes):
traj = trajectories[ep]
G[ep] = []
if len(traj) <= 4:
continue
cur_state, reward, next_state, done = traj[0]
adaptive_LSTD_lambda.update(None, 0 , cur_state)
if valid_episode_counter % config.batch_size == 0:
ep_rewards = []
ep_states = []
Z = np.zeros((config.num_features, config.num_states))
Z_gradient = np.zeros((config.num_features, config.num_states))
H_diag = np.zeros(config.num_states) # n
eps = np.zeros(config.num_states)
states_count = np.zeros(config.num_states)
epsilon_lambda_gradient = np.zeros(config.num_states)
H_diag_gradient = np.zeros(config.num_states)
episode_loss = 0
#cur_state = traj[0][0]
#adaptive_LSTD_lambda.reset_boyan(Phi[cur_state, :])
for timestep in range(len(traj)):
cur_state, reward, next_state, done = traj[timestep]
adaptive_LSTD_lambda.update(cur_state, reward, next_state)
if done:
adaptive_LSTD_lambda.update(next_state, 0, None)
#adaptive_LSTD_lambda.update_boyan(Phi[cur_state, :], reward, Phi[next_state, :], config.gamma, lambda_, timestep)
ep_rewards.append(reward)
ep_states.append(cur_state)
if logger:
logger.log_scalar('average trajectories reward', np.mean(ep_rewards), valid_episode_counter)
logger.writer.flush()
theta = adaptive_LSTD_lambda.theta
A = adaptive_LSTD_lambda.A
b = adaptive_LSTD_lambda.b.reshape((-1,1))
#A_inv = np.linalg.pinv(A + np.eye(A.shape[0]) * config.A_inv_epsilon, rcond=.1)
A_inv = invert_matrix(A)
for timestep in range(len(traj)-1):
cur_state, reward, next_state, done = traj[timestep]
states_count[cur_state] += 1
ct = states_count[cur_state]
Z[:,cur_state] = (ct-1)/ct *Z[:,cur_state]+ 1/ct * compute_z(lambda_, config.gamma, Phi, ep_states, timestep )
Z_gradient[:, cur_state] = (ct-1)/ct * Z_gradient[:, cur_state] + \
1/ct * compute_z_gradient(lambda_, config.gamma, Phi, ep_states, timestep)
H_diag[cur_state] = (ct-1)/ct * H_diag[cur_state] + \
1/ct * compute_hjj(Phi, lambda_, config.gamma, ep_states, timestep, A_inv)
eps[cur_state] = (ct-1)/ct * eps[cur_state] + \
1/ct * compute_eps_t(Phi, theta, config.gamma, reward, ep_states, timestep)
epsilon_lambda_gradient[cur_state] = (ct-1)/ct * epsilon_lambda_gradient[cur_state] + \
1/ct * compute_epsilon_lambda_gradient(Phi,
lambda_,
config.gamma,
A,
b,
A_inv,
Z,
timestep,
ep_states,
ep_rewards
)
H_diag_gradient[cur_state] = (ct-1)/ct * H_diag_gradient[cur_state] + 1/ct * compute_hjj_gradient(Phi,
lambda_,
config.gamma,
ep_states,
timestep,
A,
b,
A_inv
)
# update the gradients of the batch:
if valid_episode_counter % config.batch_size == 0:
grad = compute_lcv_lambda_gradient(eps,
H_diag,
ep_states,
epsilon_lambda_gradient,
H_diag_gradient,
grad_clip_max_norm = config.grad_clip_norm)
if logger:
logger.log_scalar('CV loss lambda gradients per batch', grad, valid_episode_counter/config.batch_size)
logger.writer.flush()
if config.compute_autograd:
auto_grad = Auto_grad.loss_autograd_fun(trajectories, Phi, config.num_features, config.gamma, lambda_, Gs)
print('gradient diff:{0}'.format(abs(grad-auto_grad)))
if config.use_adam_optimizer:
adam_optimizer.update(grad, valid_episode_counter+1)
new_lambda = adam_optimizer.x
else:
new_lambda = lambda_ - config.lr * grad
if new_lambda >= 0 and new_lambda <= 1:
lambda_ = new_lambda
print('gradient: {0}'.format(grad))
print('current lambda:{0}'.format(lambda_))
print('current theta:{0}'.format(theta))
ep_discountedrewards = get_discounted_return(ep_rewards, config.gamma)
# print('ep_discounted:{0}'.format(ep_discountedrewards))
if len(ep_discountedrewards) > 0:
ep_loss = np.mean(
[(np.dot(Phi[ep_states[t], :], theta) - ep_discountedrewards[t]) ** 2 for t in range(len(ep_states))])
G[ep] = ep_discountedrewards
running_loss.append(ep_loss)
valid_episode_counter += 1
# After we calculated the Theta parameter from the training data
#loss, rmse = calculate_batch_loss(trajectories, G, theta, Phi)
new_config = copy.deepcopy(config)
new_config.lambda_ = lambda_
ms_loss, rms_loss = calculate_batch_mspbe_msbe_mse_losses(trajectories, G, theta, Phi, R, D, P, config)
print('Theta values: {0}'.format(theta))
print('episode RMSPBE :{0}'.format(rmspbe))
# print('episode loss:{0}'.format(loss))
# print(LSTD_lambda.A, LSTD_lambda.b)
#print("Final Lambda: {0}".format(lambda_))
#print("average running loss in training: ", np.mean(running_loss))
#print("average loss after training: ", np.mean(loss))
return adaptive_LSTD_lambda, theta, G, lambda_, ms_loss, rms_loss
'''
The same as Adaptive_LSTD_algorithm_batch, except A and b
are calculated based on all the episodes.
'''
def Adaptive_LSTD_algorithm_batch_type2(trajectories,
Phi,
P,
V,
D,
R,
Gs,
logger,
config
):
# LSTD operator:
Auto_grad = AutoGrad(compute_CV_loss, 4)
Auto_grad.gradient_fun()
adaptive_LSTD_lambda = LSTD(config.num_features)
G = {}
loss = []
running_loss = []
num_episodes = len(trajectories)
adam_optimizer = ADAM(x_init = config.default_lambda, alpha=config.lr)
lambda_ = config.default_lambda
##### First go over all the trajectories and calculate estimate A and b:
for ep in range(num_episodes):
traj = trajectories[ep]
if len(traj) <= 4:
continue
cur_state = traj[0][0]
adaptive_LSTD_lambda.reset_boyan(Phi[cur_state, :])
for timestep in range(len(traj)):
cur_state, reward, next_state, done = traj[timestep]
adaptive_LSTD_lambda.update_boyan(Phi[cur_state, :],
reward,
Phi[next_state, :],
config.gamma,
lambda_,
timestep
)
# theta = adaptive_LSTD_lambda.theta
# A = adaptive_LSTD_lambda.A
# b = adaptive_LSTD_lambda.b
# A_inv = np.linalg.pinv(A + np.eye(A.shape[0]) * config.A_inv_epsilon)
#pudb.set_trace()
######## Now use the above A and b to calculate optimal lambda:
valid_episode_counter = 0
for ep in range(num_episodes):
traj = trajectories[ep]
G[ep] = []
# if len(traj) <= 4:
# continue
if valid_episode_counter % config.batch_size == 0:
ep_rewards = []
ep_states = []
Z = np.zeros((config.num_features, config.num_states))
Z_gradient = np.zeros((config.num_features, config.num_states))
H_diag = np.zeros(config.num_states) # n
eps = np.zeros(config.num_states)
states_count = np.zeros(config.num_states)
epsilon_lambda_gradient = | np.zeros(config.num_states) | numpy.zeros |
"""
===========================================
WEDTM Demo
Inter and Intra Topic Structure Learning with Word Embeddings
He Zhao, <NAME>, <NAME>, <NAME>
Published in International Council for Machinery Lubrication 2018
===========================================
"""
# Author: <NAME> <<EMAIL>>; <NAME> <<EMAIL>>; <NAME> <<EMAIL>>
# License: BSD-3-Clause
import os
import copy
import time
import math
import numpy as np
from ._basic_model import Basic_Model
from .._sampler import Basic_Sampler
from .._utils import *
from scipy import sparse
class WEDTM(Basic_Model):
def __init__(self, K: [list], device='gpu'):
"""
The basic model for WEDTM
Inputs:
K : [list] number of topics of each layer;
device : [str] 'cpu' or 'gpu';
Attributes:
@public:
global_params : [Params] the global parameters of the probabilistic model
local_params : [Params] the local parameters of the probabilistic model
@private:
_model_setting : [Params] the model settings of the probabilistic model
_hyper_params : [Params] the hyper parameters of the probabilistic model
"""
super(WEDTM, self).__init__()
setattr(self, '_model_name', 'WEDTM')
self._model_setting.K = K
self._model_setting.T = len(K)
self._model_setting.device = device
assert self._model_setting.device in ['cpu', 'gpu'], 'Device Type Error: the device should be ''cpu'' or ''gpu'''
self._sampler = Basic_Sampler(self._model_setting.device)
def initial(self, data):
'''
Inintial the parameters of WEDTM with the input documents
Inputs:
data : [np.ndarray] or [scipy.sparse.csc.csc_matrix] V*N matrix, N bag-of-words vectors with a vocabulary length of V
Attributes:
@public:
global_params.Phi : [np.ndarray] V*K matrix, K topics with a vocabulary length of V
local_params.Theta : [np.ndarray] N*K matrix, the topic propotions of N documents
@private:
_model_setting.V : [int] scalar, the length of the vocabulary
'''
self._model_setting.V = data.shape[0]
self.global_params.Phi = np.zeros((self._model_setting.K[0], self._model_setting.V)).astype(int)
def train(self, embeddings: np.ndarray, S: int, iter_all: int, data: np.ndarray, is_train: bool = True):
'''
Inputs:
embeddings : [np.ndarray] V*D, word embedding of training words
S : [int] sub topics
iter_all : [np.ndarray] scalar, the iterations of gibbs sampling
data : [np.ndarray] V*N_train matrix, N_train bag-of-words vectors with a vocabulary length of V
is_train : [bool] True or False, whether to update the global params in the probabilistic model
Attributes:
@public:
local_params.Theta : [np.ndarray] N_train*K matrix, the topic propotions of N_train documents
@private:
_model_setting.N : [int] scalar, the number of the documents in the corpus
_model_setting.Iteration : [int] scalar, the iterations of sampling
Outputs:
local_params : [Params] the local parameters of the probabilistic model
'''
assert type(data) is np.ndarray, 'Data type error: the input data should be a 2-D np.ndarray'
self._model_setting.Iteration = [iter_all] * self._model_setting.T
self._model_setting.N = data.shape[1]
# initial local paramters
self.local_params.Theta = np.zeros((self._model_setting.K[0], self._model_setting.N)).astype(int)
# WS the trained words' word index
# DS the trained words' doc index
# ZS the trained words' random theme
words_num = np.sum(data)
WS = np.zeros(words_num).astype(int)
DS = np.zeros(words_num).astype(int)
wi, di = np.where(data)
cc = data[wi, di]
pos = 0
for i in range(len(cc)):
WS[pos:pos+cc[i]] = wi[i]
DS[pos:pos+cc[i]] = di[i]
pos = pos+cc[i]
a0 = 0.01
b0 = 0.01
e0 = 1
f0 = 1
beta0 = 0.05
# Add the default word embedding
embeddings = np.insert(embeddings, embeddings.shape[1], values=np.ones(self._model_setting.V), axis=1)
self.Theta = [[]] * self._model_setting.T
c_j = [[]] * (self._model_setting.T + 1)
for t in range(self._model_setting.T + 1):
c_j[t] = np.ones((1, self._model_setting.N))
self.Phi = [{}] * self._model_setting.T
Xt_to_t1 = [[]] * self._model_setting.T
WSZS = [[]] * self._model_setting.T
paraGlobal = [{}] * self._model_setting.T
# Initialise beta for t = 1
beta1, self.beta_para = self._init_beta(self._model_setting.K[0], self._model_setting.V, S, embeddings, beta0)
for Tcurrent in range(self._model_setting.T):
if Tcurrent == 0: # layer 1, initial params.
ZS = np.random.randint(self._model_setting.K[Tcurrent], size=(len(DS))) # theme of each words
self.local_params.Theta = np.zeros((self._model_setting.K[Tcurrent], self._model_setting.N)).astype(int) # Theta (K,N) distribution of theme
for i in range(len(ZS)):
self.local_params.Theta[ZS[i], DS[i]] += 1
if is_train:
self.global_params.Phi = np.zeros((self._model_setting.K[Tcurrent], self._model_setting.V)).astype(int) # ZSWS Phi (K,V) distribution of words
for i in range(len(ZS)):
self.global_params.Phi[ZS[i], WS[i]] += 1
WSZS[Tcurrent] = self.global_params.Phi.T
Xt_to_t1[Tcurrent] = self.local_params.Theta
n_dot_k = np.sum(self.local_params.Theta, 1) # count number of each theme in doc
p_j = self._calculate_pj(c_j, Tcurrent)
r_k = 1 / self._model_setting.K[Tcurrent] * np.ones(self._model_setting.K[Tcurrent])
gamma0 = 1
c0 = 1
else:
self._model_setting.K[Tcurrent] = self._model_setting.K[Tcurrent - 1]
if self._model_setting.K[Tcurrent] <= 4:
break
self.Phi[Tcurrent] = np.random.rand(self._model_setting.K[Tcurrent - 1], self._model_setting.K[Tcurrent])
self.Phi[Tcurrent] = self.Phi[Tcurrent] / np.maximum(realmin, np.sum(self.Phi[Tcurrent], 0))
self.Theta[Tcurrent] = np.ones((self._model_setting.K[Tcurrent], self._model_setting.N)) / self._model_setting.K[Tcurrent]
p_j = self._calculate_pj(c_j, Tcurrent)
r_k = 1 / self._model_setting.K[Tcurrent] * np.ones(self._model_setting.K[Tcurrent])
gamma0 = self._model_setting.K[Tcurrent] / self._model_setting.K[1]
c0 = 1
for iter in range(1, self._model_setting.Iteration[Tcurrent]):
start_time = time.time()
for t in range(Tcurrent + 1):
if t == 0:
dex111 = list(range(len(ZS)))
np.random.shuffle(dex111)
ZS = ZS[dex111]
DS = DS[dex111]
WS = WS[dex111]
if Tcurrent == 0:
shape = np.dot(r_k.reshape(-1, 1), np.ones((1, self._model_setting.N)))
else:
shape = np.dot(self.Phi[1], self.Theta[1])
beta1_sum = np.sum(beta1, 1)
# Modified from GNBP_mex_collapsed_deep.c in the GBN code,
# to support a full matrix of beta1
[self.local_params.Theta, temp, n_dot_k, ZS] = self._collapsed_gibbs_topic_assignment_mex(
self.local_params.Theta, self.global_params.Phi, n_dot_k, ZS, WS, DS, shape, beta1, beta1_sum)
if is_train:
self.global_params.Phi = temp
WSZS[t] = self.global_params.Phi.T
Xt_to_t1[t] = self.local_params.Theta
# Sample the variables related to sub-topics
beta1 = self.sample_beta(WSZS[t].T, embeddings, beta1)
else:
[Xt_to_t1[t], WSZS[t]] = self._sampler.multi_aug(Xt_to_t1[t-1], self.Phi[t], self.Theta[t])
if t > 0:
self.Phi[t] = self._sample_Phi(WSZS[t], beta0)
if np.count_nonzero(np.isnan(self.Phi[t])):
Warning('Phi Nan')
self.Phi[t][np.isnan(self.Phi[t])] = 0
Xt = self._crt_sum_mex_matrix_v1(sparse.csc_matrix(Xt_to_t1[Tcurrent].T), r_k.reshape(1, -1).T).T
r_k, gamma0, c0 = self._sample_rk(Xt, r_k, p_j[Tcurrent+1], gamma0, c0)
if iter > 10:
if Tcurrent > 0:
p_j[1] = self._sampler.beta(np.sum(Xt_to_t1[0], 0)+a0, np.sum(self.Theta[1], 0)+b0)
else:
p_j[1] = self._sampler.beta(np.sum(Xt_to_t1[0], 0)+a0, np.sum(r_k)+b0)
p_j[1] = np.minimum(np.maximum(p_j[1], np.spacing(1)), 1-np.spacing(1))
c_j[1] = (1 - p_j[1]) / p_j[1]
for t in range(2, Tcurrent+2):
if t == Tcurrent+1:
c_j[t] = self._sampler.gamma(np.sum(r_k)*np.ones((1, self._model_setting.N))+e0) / (np.sum(self.Theta[t-1], 0)+f0)
else:
c_j[t] = self._sampler.gamma(np.sum(self.Theta[t], 0)+e0) / (np.sum(self.Theta[t-1], 0)+f0)
p_j_temp = self._calculate_pj(c_j, Tcurrent)
p_j[2:] = p_j_temp[2:]
for t in range(Tcurrent, -1, -1):
if t == Tcurrent:
shape = r_k.reshape(-1, 1)
else:
shape = np.dot(self.Phi[t+1], self.Theta[t+1])
if t > 0:
self.Theta[t] = self._sampler.gamma(shape+Xt_to_t1[t]) * (1/(c_j[t+1] - np.log(np.maximum(1 - p_j[t], realmin))))
# (100, 12337/987) (1, 12337)
if np.count_nonzero(np.isnan(self.Theta[t])):
Warning('Theta Nan')
self.Theta[t][np.isnan(self.Theta[t])] = 0
end_time = time.time()
stages = 'Training' if is_train else 'Testing'
print(f'{stages} Stage: ',
f'Layer {Tcurrent:3d}, epoch {iter:3d} takes {end_time - start_time:.2f} seconds, topics {np.count_nonzero(Xt):3d}')
for t in range(Tcurrent + 1):
if t == 0:
self.Phi[t] = self._sample_Phi(WSZS[t], beta1.T, True)
else:
self.Phi[t] = self._sample_Phi(WSZS[t], beta0, True)
paraGlobal[Tcurrent]['Phi'] = self.Phi
paraGlobal[Tcurrent]['r_k'] = r_k
paraGlobal[Tcurrent]['gamma0'] = gamma0
paraGlobal[Tcurrent]['c0'] = c0
paraGlobal[Tcurrent]['K'] = self._model_setting.K[:Tcurrent]
paraGlobal[Tcurrent]['beta0'] = beta0
paraGlobal[Tcurrent]['beta_para'] = self.beta_para
paraGlobal[Tcurrent]['p_j'] = p_j # for theta
paraGlobal[Tcurrent]['c_j'] = c_j
paraGlobal[Tcurrent]['Xt_to_t1'] = Xt_to_t1
paraGlobal[Tcurrent]['cjmedian'] = []
for t in range(Tcurrent + 1):
paraGlobal[Tcurrent]['cjmedian'].append(np.median(c_j[t]))
return copy.deepcopy(self.local_params)
def test(self, embeddings: np.ndarray, S: int, iter_all: list, data: np.ndarray):
'''
Inputs:
embeddings : [np.ndarray] V*D, word embedding of training words
S : [int] number of sub topics
iter_all : [np.ndarray] scalar, the iterations of gibbs sampling
data : [np.ndarray] V*N_train matrix, N_train bag-of-words vectors with a vocabulary length of V
Outputs:
local_params : [Params] the local parameters of the probabilistic model
'''
local_params = self.train(embeddings, S, iter_all, data, is_train=False)
return local_params
def save(self, model_path: str = './save_models'):
'''
Save the model to the specified directory.
Inputs:
model_path : [str] the directory path to save the model, default './save_models/WEDTM.npy'
'''
# create the directory path
if not os.path.isdir(model_path):
os.mkdir(model_path)
# save the model
model = {}
for params in ['global_params', 'local_params', '_model_setting', '_hyper_params']:
if params in dir(self):
model[params] = getattr(self, params)
np.save(model_path + '/' + self._model_name + '.npy', model)
print('model have been saved by ' + model_path + '/' + self._model_name + '.npy')
def load(self, model_path: str):
'''
Load the model parameters from the specified directory
Inputs:
model_path : [str] the directory path to load the model;
'''
assert os.path.exists(model_path), 'Path Error: can not find the path to load the model'
model = np.load(model_path, allow_pickle=True).item()
for params in ['global_params', 'local_params', '_model_setting', '_hyper_params']:
if params in model:
setattr(self, params, model[params])
def _init_beta(self, K, V, S, embeddings, beta):
L = embeddings.shape[1]
beta_para = [{}] * S
for s in range(S):
# variables for sub-topic s
beta_para[s]['beta_s'] = beta/S * np.ones((K, V))
beta_para[s]['alpha_k'] = 0.1 * np.ones((K, 1))
beta_para[s]['W'] = 0.1 * np.ones((K, L))
beta_para[s]['pi'] = np.dot(beta_para[s]['W'], embeddings.T)
beta_para[s]['sigma'] = np.ones((K, L))
beta_para[s]['c0'] = 1
beta_para[s]['alpha0'] = 1
beta1 = beta * np.ones((K, V))
return beta1, beta_para
def _calculate_pj(self, c_j, T):
'''
calculate p_j from layer 1 to T+1
same as pfa
'''
p_j = [[]] * (T+2)
N = len(c_j[1])
p_j[0] = (1-np.exp(-1)) * np.ones((1, N))
p_j[1] = 1/(1 + c_j[1])
for t in range(2, T+2):
temp = -np.log(np.maximum(1-p_j[t - 1], realmin))
p_j[t] = temp / (temp + c_j[t])
if np.count_nonzero(np.isnan(p_j[t])):
Warning('pj Nan')
p_j[t][np.isnan(p_j[t])] = np.spacing(1)
return p_j
def _collapsed_gibbs_topic_assignment_mex(self, ZSDS, ZSWS, n_dot_k, ZS, WS, DS, shape, eta, eta_sum):
'''
same as DirBN
'''
Ksize, Nsize = ZSDS.shape
WordNum = WS.shape[0]
prob_cumsum = np.zeros((Ksize, 1))
for i in range(WordNum):
v = WS[i]
j = DS[i]
k = ZS[i]
if ZS[i] > -1:
ZSDS[k, j] -= 1
ZSWS[k, v] -= 1
n_dot_k[k] -= 1
cum_sum = 0
for k in range(Ksize):
cum_sum += (eta[k, v] + ZSWS[k, v]) / (eta_sum[k] + n_dot_k[k]) * (ZSDS[k, j] + shape[k, j])
prob_cumsum[k] = cum_sum
probrnd = np.random.rand() * cum_sum
k = self._binary_search(probrnd, prob_cumsum, Ksize)
ZS[i] = k
ZSDS[k, j] += 1
ZSWS[k, v] += 1
n_dot_k[k] += 1
return ZSDS, ZSWS, n_dot_k, ZS
def _binary_search(self, probrnd, prob_cumsum, Ksize):
if probrnd <= prob_cumsum[0]:
return 0
else:
kstart = 1
kend = Ksize - 1
while 1:
if kstart >= kend:
return kend
else:
k = kstart + int((kend - kstart) / 2)
if (prob_cumsum[k - 1][0] > probrnd) & (prob_cumsum[k][0] > probrnd):
kend = k - 1
elif (prob_cumsum[k - 1][0] < probrnd) & (prob_cumsum[k][0] < probrnd):
kstart = k + 1
else:
return k
return k
def _sample_beta(self, n_topic_word, F, beta1):
a0 = 0.01
b0 = 0.01
e0 = 1
f0 = 1
S = len(self.beta_para)
L = F.shape[1]
# The word count for each v and k in the first layer
[K, V] = n_topic_word.shape
n_sum = np.sum(n_topic_word, 1)
## Eq. (3)
log_inv_q = -np.log(self._sampler.beta(np.sum(beta1, 1), np.maximum(n_sum, realmin)))
log_log_inv_q = np.log(np.maximum(log_inv_q, realmin))
# Active topics in the first layer
active_k = (~np.isnan(log_inv_q)) & (~np.isinf(log_inv_q)) & (n_sum > 0) & (log_inv_q != 0)
## Eq. (4) and (6)
h = np.zeros((K, V, S)).astype(int)
for k in range(K):
for v in range(V):
for j in range(n_topic_word[k, v]):
if j == 0:
is_add_table = 1
else:
is_add_table = (np.random.rand() < beta1[k, v] / (beta1[k, v] + j + 1))
if is_add_table > 0:
p = np.zeros((S, 1))
for s in range(S):
p[s] = self.beta_para[s]['beta_s'][k, v]
sum_cum = np.cumsum(p)
temp = np.argwhere(sum_cum > np.random.rand() * sum_cum[-1])
if len(temp) > 0:
ss = temp[0]
else:
continue
h[k, v, ss] = h[k, v, ss] + 1
beta1 = 0
for s in range(S):
## For each sub-topic s
alpha_k = self.beta_para[s]['alpha_k']
pi_pg = self.beta_para[s]['pi']
W = self.beta_para[s]['W']
c0 = self.beta_para[s]['c0']
alpha0 = self.beta_para[s]['alpha0']
h_s = h[:, :, s]
# Sample alpha_k for each sub-topic s with the hierarchical gamma
h_st = np.zeros((K, V)).astype(int)
# Eq. (11)
h_st[h_s > 0] = 1
for k in range(K):
for v in range(V):
for j in range(h_s[k, v] - 1):
h_st[k, v] = h_st[k, v] + (np.random.rand() < alpha_k[k] / (alpha_k[k] + j + 1)).astype(int)
# Eq. (10)
h_st_dot = np.sum(h_st, 1)
# Active topics in each sub-topic s
local_active_k = h_st_dot > 0 & active_k
l_a_K = sum(local_active_k)
x = pi_pg + log_log_inv_q.reshape(-1, 1)
dex = x < 0
temp = np.zeros(x.shape)
temp[dex] = np.log1p(np.exp(x[dex]))
temp[~dex] = x[~dex]+np.log1p(np.exp(-x[~dex]))
temp = np.sum(temp, 1)
# Eq. (9)
alpha_k = (self._sampler.gamma(alpha0 / l_a_K + h_st_dot) / (c0 + temp)).reshape(-1, 1)
h_stt = np.zeros((K, 1))
h_stt[h_st_dot > 0] = 1
for k in range(K):
for j in range(h_st_dot[k] - 1):
h_stt[k] = h_stt[k] + (np.random.rand() < (alpha0 / l_a_K) / (alpha0 / l_a_K + j + 1)).astype(int)
temp2 = temp / (c0 + temp)
# L17 in Figure 1 in the appendix
alpha0 = self._sampler.gamma(a0 + np.sum(h_stt)) / (b0 - np.sum(np.log(1 - temp2[local_active_k])) / l_a_K)
c0 = self._sampler.gamma(e0 + alpha0) / (f0 + np.sum(alpha_k[local_active_k]))
## Sample Polya-Gamma variables
# Eq. (15)
pi_pg_vec = pi_pg + log_log_inv_q.reshape(-1,1)
pi_pg_vec = pi_pg_vec.reshape(K * V, 1)
temp = h_s + alpha_k # reshape(h_s + alpha_k, K*V,1)
temp = temp.reshape(K * V, 1)
omega_vec = self._polya_gam_rnd_gam(temp, pi_pg_vec, 2)
omega_mat = omega_vec
omega_mat = omega_mat.reshape(K, V)
## Sample sigma
sigma_w = self._sampler.gamma(1e-2 + 0.5 * l_a_K) / (
1e-2 + np.sum(np.power(W[local_active_k, :], 2), 0) * 0.5)
sigma_w = np.tile(sigma_w, (K, 1))
## Sample W
# Eq. (14)
for k in range(K):
if local_active_k[k] > 0:
Hgam = F.T * omega_mat[k, :]
invSigmaW = np.diag(sigma_w[k, :]) + np.dot(Hgam, F)
MuW = np.dot(np.linalg.inv(invSigmaW), (
np.sum(F.T * (0.5 * h_s[k, :].reshape(1, -1) - 0.5 * alpha_k[k, :] - (log_log_inv_q[k]) * omega_mat[k, :]), 1)))
R = self._choll(invSigmaW)
W[k, :] = MuW + np.dot(np.linalg.inv(R), np.random.rand(L, 1)).flatten()
else:
W[k, :] = 1e-10
# Update pi, Eq. (8)
pi_pg = np.dot(W, F.T)
## Sample beta for each sub-topic s
# Eq. (7)
beta_s = self._sampler.gamma(alpha_k + h_s) / (np.exp(-pi_pg) + log_inv_q.reshape(-1, 1))
beta_s[local_active_k == 0, :] = 0.05 / S
beta_s[(np.sum(np.isnan(beta_s), 1)) != 0, :] = 0.05 / S
beta_s[(np.sum(np.isnan(beta_s) | np.isinf(beta_s), 1)) != 0, :] = 0.05 / S
beta_s[(np.sum(beta_s, 1).astype(bool)), :] = 0.05 / S
## Update beta1
beta1 = beta1 + beta_s
## Collect results
# self.beta_para[s]['beta_s'] = beta_s
self.beta_para[s]['pi'] = pi_pg
self.beta_para[s]['W'] = W
self.beta_para[s]['alpha_k'] = alpha_k
self.beta_para[s]['sigma'] = sigma_w
self.beta_para[s]['h_s'] = sparse.csc_matrix(h_s)
self.beta_para[s]['c0'] = c0
self.beta_para[s]['alpha0'] = alpha0
return beta1
def _polya_gam_rnd_gam(self, a, c, KK, IsBiased=None):
'''
Generating Polya-Gamma random varaibles using approximation method
'''
IsBiased = False
x = 1 / 2 / math.pi ** 2 * np.sum(self._sampler.gamma(np.dot(a, np.ones((1, KK))), 1) /
(np.power((np.array([i for i in range(KK)]) + 0.5), 2) + np.power(c, 2) / 4 / math.pi ** 2), 1)
if ~IsBiased:
temp = abs(c / 2)
temp[temp <= 0] = realmin
xmeanfull = (np.tanh(temp) / (temp) / 4)
xmeantruncate = 1 / 2 / math.pi ** 2 * np.sum(
1 / (np.power((np.array([i for i in range(KK)]) + 0.5), 2) + np.power(c, 2) / 4 / math.pi ** 2), 1)
x = x * xmeanfull.flatten() / (xmeantruncate)
return x
def _choll(self, A):
# same as dpfa
P = A.copy()
q = np.linalg.cholesky(P)
q = q.T
return q
def _crt_sum_mex_matrix_v1(self, X, r):
# same as DirBN sample_theta
k, n = np.shape(X)
if len(r) == 1:
r = r[0]
lsum = np.zeros(n).astype(int)
maxx = 0
Xsparse = sparse.csc_matrix(X)
pr = Xsparse.data
ir = Xsparse.indices
jc = Xsparse.indptr
for j in range(n):
starting_row_index = jc[j]
stopping_row_index = jc[j+1]
if starting_row_index == stopping_row_index:
continue
else:
for current_row_index in range(starting_row_index, stopping_row_index):
maxx = int(max(maxx, pr[current_row_index]))
prob = np.zeros(maxx)
for i in range(maxx):
prob[i] = r[j] / (r[j] + i)
lsum[j] = 0
for current_row_index in range(starting_row_index, stopping_row_index):
for i in range(int(pr[current_row_index])):
if | np.random.rand() | numpy.random.rand |
# Pluto
# v0.9.2
# MIT License
# Copyright (c) 2021 <NAME>
from typing import Literal
import numpy as np
import matplotlib.pyplot as plt
import cv2
import torch
import torch.nn as nn
import torchvision.transforms.functional as tf
import torch.nn.functional as F
import time
import webbrowser
import requests
import easyocr
reader = easyocr.Reader(['en'])
# For reproducibility
seed = 3
torch.manual_seed(seed)
np.random.seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
# cli capabilities
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="Runs Pluto on screenshots.")
parser.add_argument("-i", "--input", type=str, metavar="", help="Path to input image. If left empty, the clipboard content will be used automatically")
parser.add_argument("-o", "--output", type=str, metavar="", help="Path to where the output file should be saved.")
parser.add_argument("-c", "--category", type=str, metavar="", help="Category of media. Equal to class name")
args = parser.parse_args()
arg_i = args.input
arg_o = args.output
arg_c = args.category
# try:
# print(arg_i)
# print(arg_o)
# print(arg_c)
# except Exception: pass
def read_image(path: str, no_BGR_correction=False, resz=None): # -> np.ndarray
"""Returns an image from a path as a numpy array, resizes it if necessary
Args:
path: location of the image.
no_BGR_correction: When True, the color space is not converted from BGR to RGB
Returns:
The read image as np.ndarray.
Raises:
AttributeError: if path is not valid, this causes image to be None
"""
if type(path) == np.ndarray: return path
image = cv2.imread(path)
if resz is not None: image = cv2.resize(image, resz)
if image is None: raise AttributeError("Pluto ERROR in read_image() function: Image path is not valid, read object is of type None!")
if no_BGR_correction: return image
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
return image
def show_image(image: np.ndarray, BGR2RGB=False):
"""Displays an image using Matplotlib's pyplot.imshow()
Args:
image: The image to be displayed.
BGR2RGB: When True, the color space is converted from BGR to RGB.
"""
if BGR2RGB: image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
plt.imshow(image)
plt.show()
def grab_clipboard():
from PIL import ImageGrab
img = ImageGrab.grabclipboard().convert("RGB")
img = np.array(img)
return img
def avg_of_row(img: np.ndarray, row: int, ovo=False): # -> int | float
"""Calculates the average pixel value for one row of an image
Args:
img: The screenshot as np.ndarray
row: which row of the image should be analysed?
ovo: output as 'one value only' instead of list?
Returns:
The average value per row, ether one value only or per color channel value
"""
all_values_added = 0
if img.shape[2] == 3: all_values_added = [0, 0, 0]
length = len(img)
for pixl in img[row]: all_values_added += pixl
out = all_values_added / length
if ovo: out = sum(out) / 3
return out
def avg_of_collum(img: np.ndarray, collum: int, ovo=False): # -> int | float
"""Calculates the average pixel value for one collum of an image
Args:
img: The screenshot as np.ndarray
collum: which collum of the image should be analysed?
ovo: output as 'one value only' instead of list?
Returns:
The average value per collum, ether one value only or per color channel value
"""
all_values_added = 0
if img.shape[2] == 3: all_values_added = [0, 0, 0]
length = len(img[0])
for pixl in img[:, collum]: all_values_added += pixl
out = all_values_added / length
if ovo: out = sum(out) / 3
return out
def trimm_and_blur(inpt: np.ndarray, less: bool, value: int, blurs, trimm, double_down=False, invert=None, remove_color=False, remove_value=np.ndarray([0,0,0])):
"""Isolates parts of an image with a specific color or color range. Also capable of removing color and bluring the output.
Args:
inpt: The input image as np.ndarray (must have 3 color channels)
less: Bigger / smaller trimming method
value: Threshold for color values
blurs: blurring kernel size
trimm: pixel value for trimmed areas
double_down: If True, the non-isolated areas will also receive a different pixel value
invert: pixel value for non-isolated areas (as list of values, representing the colors of a pixel)
remove_color: When True, all color pixel will be overridden
remove_value: new pixel value for color pixel
Returns:
A np.ndarray with dimensions of the inpt image
---------
Examples:
---------
trimm_and_blur(img, True, 20, (3, 3), [255, 255, 255])
-> The function goes through each pixel in img. If the value of any color channel of the pixel is bigger than 20, the whole pixel will be overridden with the trimm parameter.\
So, if a pixel has the following values: [14, 21, 3] It will be overriden to : [255, 255, 255] Once that's done for every pixel, a blur will be applied to the entire image.
trimm_and_blur(img, True, 20, (3, 3), [255, 255, 255], True, [0, 0, 0])
-> Now a trimm is also applied to the non-isolated parts of an image. If a pixel has the values [14, 21, 3], it will be overridden to [255, 255, 255].\
A pixel with the values [16, 10, 12] will be overridden to [0, 0, 0].
"""
for i in range(len(inpt)):
for j in range(len(inpt[i])):
if remove_color:
if np.max(inpt[i][j]) - np.min(inpt[i][j]) > 2:
inpt[i][j] = remove_value
if less:
if inpt[i][j][0] > value or inpt[i][j][1] > value or inpt[i][j][2] > value:
inpt[i][j] = np.array(trimm)
elif double_down:
inpt[i][j] = np.array(invert)
else:
if inpt[i][j][0] < value or inpt[i][j][1] < value or inpt[i][j][2] < value:
inpt[i][j] = np.array(trimm)
elif double_down:
inpt[i][j] = np.array(invert)
blur = cv2.blur(inpt, blurs)
return blur
def to_grayscale(img: np.ndarray): # -> np.ndarray
"""Converts a color image to grayscale.
Note: If the input image has dimensions of 200x200x3, the output image will have dimensions of 200x200.
Args:
img: color image with BGR channel order
Returns:
The input image as grayscale.
"""
return cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
def iso_grayscale(img: np.ndarray, less_than, value, convert_grayscale=False, blur=(1, 1), inverse=False): # -> np.ndarray
"""Isolates image areas with a specific value
Args:
img: input image as np.ndarray
less_than: Sets filter technique to less than / bigger than
value: Value to filter by
convert_to:grayscale: True if the input image is color and needs to be converted to grayscale first
blur: optional blur kernal size
Returns:
modified input image as np.ndarray
"""
inv = None
if convert_grayscale: img = to_grayscale(img)
if inverse: inv = img.copy()
if less_than:
for i in range(len(img)):
for j in range(len(img[i])):
if img[i][j] < value:
img[i][j] = 255
if inverse: inv[i][j] = 0
else:
img[i][j] = 0
if inverse: inv[i][j] = 255
else:
for i in range(len(img)):
for j in range(len(img[i])):
if img[i][j] > value:
img[i][j] = 255
if inverse: inv[i][j] = 0
else:
img[i][j] = 0
if inverse: inv[i][j] = 255
if blur != (1, 1):
img = cv2.blur(img, blur)
if inverse: return img, inv
return img
def expand_to_rows(image: np.ndarray, full=False, value=200, bigger_than=True): # -> np.ndarray
"""If one value in a row (of a mask, for example) is above a specific threshold, the whole row is expanded to a specific value.
Args:
image: An grayscale image as np.ndarray.
Returns:
A np.ndarray of the edited image.
"""
dimensions = image.shape
imglen = dimensions[0]
white_row = np.array([255 for k in range(dimensions[1])])
black_row = np.array([0 for k in range(dimensions[1])])
if not full: imglen = dimensions[0] / 2
if bigger_than:
for i in range(int(imglen)):
for j in range(dimensions[1]):
if image[i][j] > value:
image[i] = white_row
else:
for i in range(int(imglen)):
for j in range(dimensions[1]):
if image[i][j] < value:
image[i] = black_row
for i in range(int(imglen), dimensions[0]):
for j in range(dimensions[1]):
image[i] = black_row
return image
def google(query: str):
"""Googles a query. Opens result in browser window.
"""
link = "https://www.google.de/search?q="
query.replace(" ", "+")
webbrowser.open((link + query))
class PlutoObject:
def __init__(self, img: np.ndarray):
self.img = img
self.use_easyocr = False
def load_model(self, path, model, device: Literal["cuda", "cpu"]):
"""Loads the state dictionary and applies it to the model
Args:
path: The relative or absolute path to the state dict file
model: The corresponding PyTorch model
Returns:
The inputed PyTorch model with the loaded state.
"""
model.load_state_dict(torch.load(path))
model.to(device)
return model
def vis_model_prediction(self, img: np.ndarray, mask: np.ndarray, display=False): # --> np.ndarray
""" Shows the predicted mask of a segmentation model as an overlay on the input image.
All arrays must be np.uint8 and have a color range of 0 - 255.
Args:
img: Input image for the model. Shape: IMG_SIZE x IMG_SIZE x 3
mask: The result of the model. Shape: IMG_SIZE x IMG_SIZE
display: True if the return image should also be displayed.
Returns:
The visualized prediction as np.ndarray
"""
dim = img.shape
mask = cv2.resize(mask, (dim[1], dim[0]))
black_img = np.zeros([dim[0], dim[1]]).astype(np.uint8)
# print(mask.shape, black_img.shape, mask.dtype, black_img.dtype, img.shape, img.dtype)
overlay = cv2.merge((mask, black_img, black_img))
out = cv2.addWeighted(img, 0.5, overlay, 1.0, 0)
if display: show_image(out)
return out
def to_tensor(self, arr: np.ndarray, img_size, dtype, device: Literal["cuda", "cpu"], cc=3): # --> torch.Tensor
"""Converts an np.ndarray (which represents an image) to a PyTorch Tensor
Args:
arr: The image as a NumPy array.
img_size: The final image size (quadratic)
dtype: The Type for the Tensor. Recommendet is torch.float32
device: If the Tensor should be moved to the GPU, make this "cuda"
Returns:
The input array as torch.Tensor
"""
arr = cv2.resize(arr.copy(), (img_size, img_size)) / 255.0 # load, resize & normalize
# show_image(arr)
arr = arr.reshape(-1, cc, img_size, img_size)
tensor = torch.from_numpy(arr).to(dtype).to(device) # to tensor
return tensor
def from_tensor(self, tensor, img_size, dtype=np.uint8):
return tensor.cpu().numpy().reshape(img_size, img_size).astype(dtype)
def ocr(self, image=None, switch_to_tesseract=False): # -> str
"""Preforms OCR on a given image, using EasyOCR
Args:
image: np.ndarray of the to be treated image.
Returns:
String with the raw result of the OCR library.
"""
if image is None: image = self.img
try:
# reader = easyocr.Reader(['en'])
ocr_raw_result = reader.readtext(image, detail=0)
except Exception as e:
print("Pluto WARNING - Error while performing OCR: ", e)
ocr_raw_result = [""]
out = ""
for word in ocr_raw_result:
out += " " + word
return out
def expand_to_rows(self, image: np.ndarray, full=False, value=200): # -> np.ndarray
"""
Args:
image: An grayscale image as np.ndarray, which represents a mask.
Returns:
A np.ndarray of the edited image.
"""
dimensions = image.shape
imglen = dimensions[0]
if not full: imglen = dimensions[0] / 2
for i in range(int(imglen)):
for j in range(dimensions[1]):
if image[i][j] > value:
image[i] = [255 for k in range(dimensions[1])]
for i in range(int(imglen), dimensions[0]):
for j in range(dimensions[1]):
image[i][j] = 0
return image
def ocr_cleanup(self, text: str): # -> str
"""Removes unwanted characters or symbols from a text
This includes \n, \x0c, and multiple ' '
Args:
text: The String for cleanup.
Returns:
The cleaned text as String.
"""
out = text.replace("\n", " ")
out = out.replace("\x0c", "")
out = " ".join(out.split())
splits = out.split(",")
clean_splits = []
for phrase in splits:
arr = list(phrase)
l = len(arr)
start = 0
end = l
for i in range(0, l):
if arr[i] == " ": start += 1
else: break
for i in range(l, 0, -1):
if arr[i-1] == " ": end -= 1
else: break
clean_splits.append(phrase[start:end])
out = ""
for phrase in clean_splits:
out += phrase
out += ", "
out = out[:-2]
return out
def to_json(self, data: dict):
import json
out = json.dumps(data)
return out
def load_model(self, model, path: str, device):
"""Loads the state of an model
Args:
model: PyTorch model
path: path to state dic
Returns:
the input model with loaded state
"""
model.load_state_dict(torch.load(path))
return model.to(device)
def determine_device(self): # -> Literal["cuda", "cpu"]
return "cuda" if torch.cuda.is_available() else "cpu"
def run_model(self, model, tnsr):
"""Runs a model with a sigmoid activation function
"""
with torch.no_grad():
prediction = torch.sigmoid(model(tnsr))
return prediction * 255
def run_segmentation_model(self, state_path, img=None): # -> np.ndarray
"""Runs a UNET segmentation model given an image and the state of the model.
Args:
state_path: path to the models's state_dict
img: the model's input image
Returns:
The model's prediction as np.ndarray
"""
if img is None: img = self.img
device = self.determine_device()
model = UNET(in_channels=3, out_channels=1)
model = self.load_model(model, state_path, device)
input_tensor = self.to_tensor(img, 256, torch.float32, device)
prediction = self.run_model(model, input_tensor)
output = self.from_tensor(prediction, 256)
return output
def extr_mask_img(self, mask: np.ndarray, img: np.ndarray, inverted=False):
"""Performs extend_to_rows() on the mask and returns the masked out parts of the original image.
Args:
mask: grayscale mask
img: Original image
inverted: if True, an inverted version of the output will also be returned
Returns:
A np.ndarray of the masked out part
"""
mask = cv2.resize(mask, (img.shape[1], img.shape[0]))
extr = self.expand_to_rows(mask)
# show_image(extr)
out = []
invout = []
for i in range(len(extr)):
if extr[i][0] > 200: out.append(img[i])
elif inverted: invout.append(img[i])
if inverted: return np.array(out), np.array(invout)
return np.array(out)
def extr_replace_mask(self, mask: np.ndarray, img: np.ndarray, replace_value: np.ndarray, invert_replace=False):
"""Performs extend_to_rows() on the mask and returns the masked out parts of the original image.
Args:
mask: grayscale mask
img: Original image
inverted: if True, an inverted version of the output will also be returned
Returns:
A np.ndarray of the masked out part
"""
mask = cv2.resize(mask, (img.shape[1], img.shape[0]))
extr = self.expand_to_rows(mask)
if invert_replace:
for i in range(len(extr)):
if extr[i][0] > 200:
for j in range(len(img[i])):
img[i][j] = replace_value
else:
for i in range(len(extr)):
if extr[i][0] < 200:
for j in range(len(img[i])):
img[i][j] = replace_value
return img
def characters_filter_strict(self, inpt: str, allow_digits=True): # -> str
numbers = list(range(128))
# only digits, uppercase, lowercase and spaces letters are valid
allowed_ascii_values = numbers[65:91] + numbers[97:123] + [32]
if allow_digits: allowed_ascii_values += numbers[48:58]
out = ""
for i in inpt:
if ord(i) in allowed_ascii_values: out += i
out = " ".join(out.split())
return out
class FoxNews(PlutoObject):
def __init__(self, img: np.ndarray):
super().__init__(img)
def analyse(self, display=False):
"""Extracts key information from a screenshot of a Fox News Article.
Args:
display: True if in between steps should be shown
Returns:
The extracted information as JSON
"""
og_shape = self.img.shape
img = cv2.resize(self.img, (512, 512))
black = np.zeros((512, 512))
for i in range(len(black)):
for j in range(len(black[0])):
temp = img[i][j]
if (temp == [34, 34, 34]).all(): black[i][j] = 255
blured = cv2.blur(black, (20, 20))
for i in range(len(blured)):
for j in range(len(blured[0])):
if blured[i][j] < 40: blured[i][j] = 0
else: blured[i][j] = 255
msk = self.expand_to_rows(blured)
og_size_msk = cv2.resize(msk, (og_shape[1], og_shape[0]))
top = []
heading = []
bottom = []
top_part = True
bottom_part = False
for i in range(len(self.img)):
if og_size_msk[i][0] > 1:
heading.append(self.img[i])
if top_part:
top_part = False
bottom_part = True
elif top_part: top.append(self.img[i])
else: bottom.append(self.img[i])
heading = np.array(heading)
bottom = np.array(bottom)
top = np.array(top)
if display:
show_image(heading)
show_image(bottom)
show_image(top)
ocr_result = self.ocr(heading)
headline = self.ocr_cleanup(ocr_result)
cat_info_img = []
top_len = len(top)
for i in range(top_len, 0, -1):
if top[i-1][0][0] > 250: cat_info_img.insert(0, top[i-1])
else: break
cat_info_img = np.array(cat_info_img)
if display: show_image(cat_info_img)
ocr_result = self.ocr(cat_info_img)
clean_ocr = self.ocr_cleanup(ocr_result)
dotsplit = clean_ocr.split("-")[0][:-1].lstrip(" ")
pubsplit = clean_ocr.split("Published")[1].lstrip(" ")
subinfo_bottom = []
stoper = False
for row in bottom:
subinfo_bottom.append(row)
for pix in row:
if pix[0] > 200 and pix[0] < 240 and pix[2] < 50 and pix[1] < 50:
stoper = True
break
if stoper: break
subinfo_bottom = np.array(subinfo_bottom[:-3])
if display: show_image(subinfo_bottom)
subinfo = self.ocr_cleanup(self.ocr(subinfo_bottom))
subsplit = subinfo.split()
author_list = []
subtitle_list = []
subinfo_switcher = True
for w in reversed(subsplit):
if w == "By" and subinfo_switcher:
subinfo_switcher = False
continue
if w == "News" or w == "Fox" or w == "|": continue
if subinfo_switcher: author_list.insert(0, w)
else: subtitle_list.insert(0, w)
author = " ".join(author_list)
subtitle = " ".join(subtitle_list)
return pubsplit, headline, subtitle, author, dotsplit
def to_json(self, img=None, path=None):
if img is None: img = self.img.copy()
import json
pubsplit, headline, subtitle, author, dotsplit = self.analyse(img)
jasoon = { "source": "News Article",
"article": {
"created": "[Published] " + pubsplit,
"organisation": "Fox News",
"headline": headline,
"subtitle": subtitle,
"author": author,
"category": dotsplit
}
}
if path == None: return json.dumps(jasoon)
else:
out = open(path, "w")
json.dump(jasoon, out, indent=6)
out.close()
class Facebook(PlutoObject):
def __init__(self, img: np.ndarray):
super().__init__(img)
self.header = None
self.text = None
self.insert = None
self.engagement = None
def analyse_legacy(self, img=None):
"""---
Depricated
---
"""
if img is None: img = self.img
slc, indx = self.slices(img.copy())
image, eng = self.img_eng(indx, img.copy())
inpts, imgs = self.part(img[:indx].copy(), slc)
header = []
text = []
for indx in range(len(inpts)):
sl = imgs[indx]
t = np.transpose((255 - to_grayscale(sl)), (1, 0))
t = expand_to_rows(t, True, 10)
# show_image(sl)
# show_image(t)
continue
clss = self.classify(inpts[indx])
prt = (imgs[indx]).tolist()
if clss == 0: header += prt
else: text += prt
header = np.array(header, np.uint8)
date = header[int(header.shape[0] / 2) :]
header = header[: int(header.shape[0] / 2)]
text = np.array(text, np.uint8)
# show_image(header)
# show_image(date)
# show_image(text)
header = self.ocr_cleanup(self.ocr(header))
date = self.ocr_cleanup(self.ocr(date))
text = self.ocr_cleanup(self.ocr(text))
engagement = self.ocr(eng)
return header, date, text, engagement
def analyse(self, img=None):
"""Main method for extraction information from the image
"""
if img is None: img = self.img
splits = self.split(img)
self.header, self.text = self.sliceing(splits[0])
self.insert, self.engagement = None, None
if len(splits) > 1 and splits[1] is not None: self.insert = splits[1]
if len(splits) > 2 and splits[2] is not None: self.engagement = splits[2]
date = self.header[int(self.header.shape[0] / 2) :]
header = self.header[: int(self.header.shape[0] / 2)]
engocr = None
headerocr = self.ocr_cleanup(self.ocr(header))
dateocr = self.ocr_cleanup(self.ocr(date))
textocr = self.ocr_cleanup(self.ocr(self.text))
if self.engagement is not None:
engocr = self.ocr_cleanup(self.ocr(self.engagement))
engocr = self.engagement_str(engocr)
return headerocr, dateocr, textocr, engocr
def split(self, img):
"""Splits the screenshot at an attached image or link
"""
og_img = img.copy()
img = to_grayscale(img)
dm = self.dark_mode(img)
if dm: img = 255 - img
exists1 = False
for i in range(len(img)):
if not dm:
if img[i][0] < 250:
exists1 = True
break
else:
if img[i][0] < 215:
exists1 = True
break
top, bottom = None, None
if exists1:
top = og_img[:i-1]
bottom_og = og_img[i:]
bottom = img[i:]
# else: return img
if bottom is None:
return og_img, None, None
exists2 = False
for j in range(len(bottom)-1, i, -1):
if not dm:
if bottom[j][0] < 250:
exists2 = True
break
else:
if bottom[j][0] < 215:
exists2 = True
break
insert, engagement = None, None
if exists2:
insert = bottom_og[:j]
engagement = bottom_og[j:]
if insert is not None and self.classify(insert) == 1:
ts, ins = top.shape, insert.shape
temp = np.zeros((ts[0] + ins[0], ts[1], ts[2]), np.uint8)
temp[:ts[0]] = top
temp[ts[0]:] = insert
top = temp
insert = None
# show_image(top)
# show_image(insert)
# show_image(engagement)
return top, insert, engagement
def sliceing(self, img):
"""Slices the Text & header
"""
exptr = None
img = to_grayscale(img)
dm = self.dark_mode(img)
if not dm: exptr = (255 - img.copy())
else: exptr = img.copy()
if not dm: exptr = expand_to_rows(exptr, True, 5)
else: exptr = expand_to_rows(exptr, True, 40)
slices = []
for i in range(1, len(exptr)):
if exptr[i][0] > 250 and exptr[i-1][0] < 50 or \
exptr[i][0] < 250 and exptr[i-1][0] > 50:
slices.append(i)
slc = []
for i in range(1, len(slices), 2):
if slices[i] - slices[i-1] < 5: continue
slc.append(img[slices[i-1]:slices[i]])
# return slc
return img[slices[0]:slices[1]], img[slices[1]:slices[len(slices)-1]]
def header(img):
slc = slice(img)
show_image(slc[0])
show_image(slc[1])
return
header = []
text = []
for indx, s in enumerate(slc):
print(type(s))
f = first(s)
if indx == 0:
header.append(s)
print(s.shape)
else:
text.append(s)
print(s.shape)
return np.array(header), np.array(text)
def first(self, img):
"""Gets the first block from a slice
Args:
img: Slice as np.ndarray
Returns:
The first block as np.ndarray
"""
img = np.transpose(img, (1, 0))
exptr = img.copy() #pl.to_grayscale(img.copy())
exptr = expand_to_rows(exptr, False, 245, False)
# show_image(exptr)
for i in range(1, len(exptr)):
if exptr[i-1][0] > 250 and exptr[i][0] < 100: break
for j in range(i+1, len(exptr)):
if exptr[j-1][0] < 100 and exptr[j][0] > 250: break
# show_image(img[i:j])
return img[i:j]
def slices(self, img=None):
"""Image to slices
"""
if img is None: img = self.img
test = (255 - to_grayscale(img))
for i in range(len(test)):
if test[i][0] > 10: break
# show_image(test)
test = test[:i,:]
j = i
# show_image(test)
test = expand_to_rows(test, True, 10)
# show_image(test)
slices = []
for i in range(2, len(test)):
if test[i][0] > 250 and test[i-1][0] < 10 or \
test[i][0] < 250 and test[i-1][0] > 10:
slices.append(i)
# slices.append(len(img) - 1)
return slices, j
def img_eng(self, indx, img=None):
img = img[indx:]
test = (255 - to_grayscale(img))
for i in range(len(test)-1, 0, -1):
if test[i][0] > 10: break
image = img[:i]
eng = img[i:]
return image, eng
def engagement_str(self, ocr: str):
"""Returns shares and views from engagements
"""
try:
comsplit = ocr.split("Comments")
sharesplit = comsplit[1].split("Shares")
viewsplit = sharesplit[1].split("Views")
return str(sharesplit[0].strip()) + " Shares", str(viewsplit[0].strip()) + " Views"
except Exception as e:
return ocr
def part(self, img, slices):
"""From slice arr to list of images
"""
if img is None: img = self.img
out = []
full = []
for i in range(1, len(slices), 2):
if slices[i] - slices[i-1] < 5: continue
temp = img[slices[i-1]:slices[i]]
og = temp
temp = temp[:,:int(temp.shape[1]/3)]
# show_image(temp)
out.append(temp)
full.append(og)
return out, full
def classify(self, img=None):
"""Image or still part of text?
0 == image, 1 == text
"""
if img is None: img = self.img
img = to_grayscale(img)
net = ConvNet(1, 6, 12, 100, 20, 2)
net.load_state_dict(torch.load("models/general_1.pt"))
device = self.determine_device()
net.to(device)
tnsr = self.to_tensor(img, 224, torch.float32, device, 1)
net.eval()
with torch.no_grad():
net_out = net(tnsr.to(device))[0]
predicted_class = torch.argmax(net_out)
result = predicted_class.cpu().numpy()
return result
def header(self, img=None):
if img is None: img = self.img
path = "FB Models/fb1.pt"
result = self.run_segmentation_model(path, img)
show_image(result)
result = expand_to_rows(result)
show_image(result)
result = cv2.resize(result, (img.shape[1], img.shape[0]))
print(result.shape, img.shape)
out = []
for i in range(len(result)):
if result[i][0] > 200: out.append(img[i])
show_image(np.array(out))
def to_json(self, img=None, path=None):
"""Extracts information from a screenshot and saves it as json
"""
if img is None: img = self.img.copy()
import json
name, date, body_text, engagement_text = self.analyse(img)
jasoon = { "source": "Facebook",
"category": "Social Media",
"post": {
"username": name,
"date": date,
"content": body_text,
"engagement": engagement_text
}
}
if path == None: return json.dumps(jasoon)
else:
out = open(path, "w")
json.dump(jasoon, out, indent=6)
out.close()
def dark_mode(self, img=None):
"""Checks if dark mode is enabled
Args:
img: input screenshot (optional)
Returns:
Dark Mode enabled? True / False
"""
if img is None: img = self.img
dim = img.shape
img = img[:int(dim[0]*0.1),:int(dim[0]*0.015)]
avg = np.average(img)
return avg < 220
def split_legacy(self, img=None, darkmode=None): # -> np.ndarray
"""---
DEPRECATED
---
Splits a Facebook Screenshot into a top part (that's where the header is), a 'body' part (main text) and a 'bottom' part ('engagement stats').
Args:
img: Alternative to the default self.img
Returns:
The top, body & bottom part, all as np.ndarray
"""
if img is None: img = self.img
og = img.copy()
if darkmode is None: darkmode = self.dark_mode(img)
# print(darkmode)
if darkmode: gry = iso_grayscale(img, False, 50, True)
else: gry = iso_grayscale(img, True, 250, True)
gry_extr = expand_to_rows(gry, False, 100)
top = []
middle = []
c = 0
for i in range(len(gry_extr)):
if gry_extr[i][0] > 100 and c < 2:
if c == 0: c += 1
top.append(og[i])
elif c == 1: c += 1
elif c == 2: middle.append(og[i])
top = np.array(top)
middle = np.array(middle)
non_color = []
for i in range(len(middle)):
pix = middle[i][5]
if pix[0] > 250 or pix[1] > 250 or pix[2] > 250:
non_color.append(middle[i])
non_color = np.array(non_color)
rgh = to_grayscale(non_color)
rgh = rgh[:, int(non_color.shape[1] / 2):]
rgh = iso_grayscale(rgh, True, 10, False, (25, 25))
rgh = expand_to_rows(rgh, True, 5)
body = []
engagement = []
for i in range(len(rgh)):
if rgh[i][0] > 200: body.append(non_color[i])
else: engagement.append(non_color[i])
body = np.array(body)
engagement = np.array(engagement)
self.top = top
self.body = body
self.engagement = engagement
return top, body, engagement
def old_topsplit(self, img=None, darkmode=None): # -> np.ndarray
"""---
DEPRECATED
---
Splits a Facebook Screenshot into a top part (thst's where the header is) and a 'bottom' part.
Args:
img: Alternative to the default self.img
Returns:
The top part and the bottom part, both as np.ndarray
"""
if img is None: img = self.img
og = img.copy()
if darkmode is None: darkmode = self.dark_mode(img)
# print(darkmode)
if darkmode: gry = iso_grayscale(img, False, 50, True)
else: gry = iso_grayscale(img, True, 250, True)
gry_extr = expand_to_rows(gry, False, 100)
top = []
middle = []
bottom = []
c = 0
for i in range(len(gry_extr)):
if gry_extr[i][0] > 100 and c < 2:
if c == 0: c += 1
top.append(og[i])
elif c == 1: c += 1
elif c == 2: break
c = 0
for j in range(len(gry_extr)-1, i, -1):
if gry_extr[j][0] > 100 and c < 2:
if c == 0: c += 1
bottom.insert(0, og[j])
elif c == 1: c += 1
elif c == 2: break
for l in range(i, j, 1):
middle.append(og[l])
# print(i, j, l)
top = np.array(top)
middle = np.array(middle)
bottom = np.array(bottom)
self.top = top
self.middle = middle
self.bottom = bottom
return top, middle, bottom
def search(self, query: str):
"""Searches a query with Facebook's search function. Opens result in browser window.
"""
link = "https://www.facebook.com/search/top/?q="
query.replace(" ", "+")
webbrowser.open((link + query))
def clean_top(self, img=None):
"""---
DEPRECATED
---
'Cleans' the top excerpt by removing the profile picture
"""
if img is None: img = self.top
og_img = img.copy()
img2 = img[:,int(img.shape[1]*0.5):,:]
img = img[:,:int(img.shape[1]*0.5),:]
dim = img.shape
img = cv2.resize(og_img[:,:int(og_img.shape[1]*0.5),:], (200, 200))
prediction = self.run_segmentation_model("Utility Models/imgd_2_net_1.pt", img)
# self.vis_model_prediction(img, prediction, True)
prediction = np.transpose(prediction, (1, 0))
prediction = cv2.resize(prediction, (dim[0], dim[1]))
img = og_img[:,:int(og_img.shape[1]*0.5),:]
# show_image(img)
# show_image(img2)
img = np.transpose(img, (1, 0, 2))
# show_image(prediction)
# show_image(img)
vis = self.vis_model_prediction(img, prediction)
out = self.extr_replace_mask(prediction, img, np.array([255, 255, 255]), True)
out = np.transpose(img, (1, 0, 2))
# show_image(out)
out = np.concatenate((out, img2), axis=1)
# show_image(out)
self.top = out
return out, vis
class Twitter(PlutoObject):
def __init__(self, img: np.ndarray):
super().__init__(img)
self.DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
self.header, self.bottom = None, None
def split(self, img=None, display=False):
img_og = img
if img is None: img_og = self.img
img_size = 256
img = cv2.resize(img_og.copy(), (img_size, img_size))
img_tensor = self.to_tensor(img, img_size, torch.float32, self.DEVICE)
model = UNET(in_channels=3, out_channels=1)
model = self.load_model(model, "D:/Codeing/Twitter_Segmentation/pytorch version/own_2_net_2.pt", self.DEVICE)
with torch.no_grad():
model_out = torch.sigmoid(model(img_tensor)) * 255
mask = self.from_tensor(model_out, img_size)
if display: show_image(mask)
img = (img.reshape(img_size, img_size, 3) * 255).astype(np.uint8)
mask = cv2.merge((mask, mask, mask))
mask = trimm_and_blur(mask, False, 60, (30, 30), [0, 0, 0])[:,:,0]
if display: show_image(mask)
self.vis_model_prediction(img, mask, False)
mask = expand_to_rows(mask, value=30)
img = img_og
mask = cv2.resize(mask[:,:10], (10, img.shape[0]))
header = []
bottom = []
# split in header / bottom
for i in range(len(mask[:,0])):#range(len(mask[:,0])-1, 0, -1):
if mask[i][0] > 250: header.append(img[i,:])
else: bottom.append(img[i,:])
self.header = np.array(header)
self.bottom = np.array(bottom)
if display:
show_image(self.header)
show_image(self.bottom)
return self.header.copy(), self.bottom.copy()
def header_color_mode(self, img=None):
"""Determines whether the header is in dark mode
Args:
img: if the image should be different to self.header, pass it here
Returns:
True if the header is in dark mode.
"""
if img is not None: self.header = img
dim = self.header.shape
img = self.header[:,int(dim[1]/2):,:]
avg = np.average(img)
return avg < 150
def header_analyse(self, img=None, display=False):
if img is None: img = self.header.copy()
img = (img[::-1]).transpose(1, 0, 2)
img_og = img.copy()
if not self.dark_mode(img): img = trimm_and_blur(img, False, 30, (20, 20), [255, 255, 255], True, [0, 0, 0])
else: img = trimm_and_blur(img, True, 245, (20, 20), [255, 255, 255], True, [0, 0, 0])
if display: show_image(img)
img = expand_to_rows(img[:,:,0], True, 5)
out = []
c = 0
for i in range(len(img)-1, 0, -1):
if img[i][0] > 100:
out.append(img_og[i])
if c == 0: c += 1
elif c == 1: break
out = np.flip((np.array(out).transpose(1, 0, 2)), (0, 1))
if display:
show_image(img)
show_image(out)
ocr_result = self.ocr_cleanup(self.ocr(out))
usersplit = ocr_result.split("@")
return usersplit[0][:-1], usersplit[1]
def body_analyse(self, img=None, display=False): # --> str
if img is None: img = self.bottom.copy()
img_og = img.copy()
if not self.dark_mode(img): img = trimm_and_blur(img, False, 30, (20, 20), [255, 255, 255], True, [0, 0, 0])
else: img = trimm_and_blur(img, True, 245, (20, 20), [255, 255, 255], True, [0, 0, 0])
if display: show_image(img)
exptr = expand_to_rows(img[:,:,0], True, 20)[:,:10]
if display: show_image(exptr)
out = []
for i in range(len(exptr)):
if exptr[i][0] > 100: out.append(img_og[i])
out = np.array(out)
if display: show_image(out)
return self.ocr_cleanup(self.ocr(out))
def analyse_light(self, img=None):
if img is None: img = self.img
self.split()
head_info = self.header_analyse()
body_info = self.body_analyse()
jasoon = { "source": "Twitter",
"tweet": {
# "created_at": "[Published] " + pubsplit,
# "client": "Fox News",
"text": body_info,
"user": {
"name": head_info[0],
"handle": head_info[1]
}
}
}
return jasoon
def analyse(self, img=None):
if img is None: img = self.img
self.split()
head_info = self.header_analyse()
body_info = self.body_analyse()
return head_info[0], head_info[1], body_info
def dark_mode(self, img=None): # -> bool
"""Checks if the screenshot has dark mode enabled
Args:
img: if the checked image should be different from the self.img, pass it here
Returns:
Is the screenshot in dark mode? True / False
"""
testimg = self.img
if img is not None: testimg = img.copy()
top_row = avg_of_row(testimg, 0, True)
bottom_row = avg_of_row(testimg, -1, True)
left_collum = avg_of_collum(testimg, 0, True)
right_collum = avg_of_collum(testimg, -1, True)
final_value = sum([top_row, bottom_row, left_collum, right_collum]) / 4
return final_value < 125
def analyse_light(self):
result = None
if self.dark_mode(): result = self.dark()
else: result = self.std()
header, body = result
# show_image(header)
# show_image(body)
return self.ocr_cleanup(self.ocr(header)), self.ocr_cleanup(self.ocr(body))
def std(self, img=None, display=False):
input_img = None
if img is not None: input_img = img.copy()
else: input_img = self.img.copy()
if img is not None: self.img = img.copy()
blur = trimm_and_blur(input_img, True, 30, (20, 20), [255, 255, 255])
out = trimm_and_blur(blur, False, 250, (5, 5), [0, 0, 0])
msk_inv = (255 - out[:,:,0])
out_exptr = self.expand_to_rows(msk_inv, True)
header_info = []
continue_please = True
cnt = 0
for i in range(len(out_exptr)):
if continue_please:
if out_exptr[i][0] < 250: continue
else:
if out_exptr[i][0] < 250: break
header_info.append(self.img[i])
continue_please = False
# print("hey!")
cnt = i
bottom = []
lastone = False
for i in range(cnt+1, len(out_exptr), 1):
if out_exptr[i][0] < 250:
if lastone:
bottom.append(self.img[3])
lastone = False
continue
bottom.append(self.img[i])
lastone = True
header_info = np.array(header_info)
bottom = np.array(bottom)
if display:
show_image(header_info)
show_image(bottom)
return header_info, bottom
def dark(self, img=None, display=False):
"""Segmentates the input screenshot (dark mode enabled) into header and body.
Args:
img: if the screenshot should be different from self.img, pass it here.
display: True if output should be displayed before return
Returns:
The two segmentated areas.
"""
input_img = None
if img is not None: input_img = img.copy()
else: input_img = self.img.copy()
if img is not None: self.img = img.copy()
blur = trimm_and_blur(input_img, False, 230, (20, 20),[0, 0, 0])
out = trimm_and_blur(blur, True, 10, (5, 5),[255, 255, 255])
msk = out[:,:,0]
out_exptr = self.expand_to_rows(msk, True)
header_info = []
continue_please = True
cnt = 0
for i in range(len(out_exptr)):
if continue_please < 3:
if out_exptr[i][0] < 250:
if continue_please == 1: continue_please += 1
if continue_please == 2: header_info.append(self.img[i])
continue
else:
if out_exptr[i][0] < 250: break
if continue_please == 0: continue_please += 1
if continue_please == 2: break
header_info.append(self.img[i])
cnt = i
bottom = []
lastone = False
for i in range(cnt+1, len(out_exptr), 1):
if out_exptr[i][0] < 250:
if lastone:
bottom.append(self.img[3])
lastone = False
continue
bottom.append(self.img[i])
lastone = True
header_info = | np.array(header_info) | numpy.array |
""" Utility functions for DAG analysis using adjacency matrices"""
# Copyright (C) 2016 by
# <NAME> <<EMAIL>>
# All rights reserved.
# BSD license.
__author__ = "\n".join(["<NAME> (<EMAIL>)"])
import numpy as np
def causet_adj_matrix(S, R):
""" Return causal set adjacency matrix A
S: separations
R: original coordinates"""
N = S.shape[0]
A = np.zeros((N, N))
for i in range(N):
for j in range(N):
# check time ordering - A[i,j] is 1 if i is in the future of j
if R[i,0] > R[j,0]:
if S[i,j] < 0:
A[i,j] = 1.
return A
def transitive_completion(A_):
""" Transitively complete adjacency matrix A"""
A = A_[:,:]
A_0 = A[:,:]
N, _ = A.shape
A_diff = True
i = 0
while A_diff:
A_old = A[:,:]
A = np.dot(A, A_0)
A += A_0
A[A>1.] = 1.
if np.array_equal(A_old, A):
A_diff = False
assert i < N, 'ERROR - Transitive Completion required more than N steps'
i += 1
return A
def transitive_reduction(A_, LP=None):
""" Transitively reduce adjacency matrix A
plan is to look at successive powers of A and if an element is 1 in both
then it represents an edge which is transitively implied
we need to do this |LP| times -
- could do it N times to be sure (start here)
- could compute |LP| but that might be slower
- could allow |LP| as optional input incase it is already calculated
"""
A = A_[:,:]
A_0 = A[:,:]
N, _ = A.shape
if LP:
max_path = LP
else:
max_path = N
i = 0
while i < max_path:
A = np.dot(A, A_0)
A = A_0 - A
A[A<1] = 0
A[A>1] = 1
i += 1
return A
def longest_path_matrix(A, dmax=None):
""" Calculate all longest paths and return them in a matrix
Arguments:
A -- adjacency matrix
dmax -- maximum path length to be returned
Result should be an NxN assymetric matrix of longest paths
Notes:
JC - I believe this scales like N**3
Finding one longest path can be done in linear time
And we need to find N**2 of them so this is reasonable
JC - The longest path is conjectured to approximate the geodesic in
Lorentzian spacetimes but this is not proven to my knowledge
"""
N = A.shape[0]
if dmax is None:
dmax = N
LP = np.zeros((N, N))
i = 1
B = A[:,:]
while np.sum(B) > 0.:
path_exist = np.sign(B)
path_length = i * path_exist
LP = np.maximum.reduce((LP, path_length))
B = np.dot(B, A)
i += 1
if i == dmax:
return LP
return LP
def naive_spacelike_matrix(LP, dmax=None, k=None):
""" Calculate all naive spacelike distances and return them in a matrix
Arguments:
LP -- longest path matrix
dmax -- maximum spacelike distance to be returned
k -- only determine distances to k 'landmark' points, and leave the rest
# this feature needs testing
Result should be an NxN symmetric matrix of negative longest paths
and positive naive spacelike separations
JC - this seems quite slow when calculated for all N - I think it is the
limiting factor on embedding large networks in spacetimes
"""
if dmax == None:
dmax = np.max(LP)
ds = LP + LP.transpose()
ds2 = ds * ds * -1
N = LP.shape[0]
for i in range(N):
max_j = i
if k:
max_j = np.min([i, k])
for j in range(max_j):
# spacelike distance is symmetric so ds[i,j]==ds[j,i], and ds[i,i]==0
if ds2[i,j] == 0:
# then they are spacelike separated and need a new value here
i_past = np.flatnonzero(LP[:,i])
j_past = np.flatnonzero(LP[:,j])
w_list = | np.intersect1d(i_past, j_past) | numpy.intersect1d |
# -*- coding: utf-8 -*-
from __future__ import division, absolute_import, print_function
import sys
import numpy as np
from numpy.testing import (
run_module_suite, assert_, assert_equal, assert_raises, assert_warns
)
import textwrap
class TestArrayRepr(object):
def test_nan_inf(self):
x = np.array([np.nan, np.inf])
assert_equal(repr(x), 'array([nan, inf])')
def test_subclass(self):
class sub(np.ndarray): pass
# one dimensional
x1d = np.array([1, 2]).view(sub)
assert_equal(repr(x1d), 'sub([1, 2])')
# two dimensional
x2d = np.array([[1, 2], [3, 4]]).view(sub)
assert_equal(repr(x2d),
'sub([[1, 2],\n'
' [3, 4]])')
# two dimensional with flexible dtype
xstruct = np.ones((2,2), dtype=[('a', 'i4')]).view(sub)
assert_equal(repr(xstruct),
"sub([[(1,), (1,)],\n"
" [(1,), (1,)]], dtype=[('a', '<i4')])"
)
def test_self_containing(self):
arr0d = np.array(None)
arr0d[()] = arr0d
assert_equal(repr(arr0d),
'array(array(..., dtype=object), dtype=object)')
arr1d = np.array([None, None])
arr1d[1] = arr1d
assert_equal(repr(arr1d),
'array([None, array(..., dtype=object)], dtype=object)')
first = np.array(None)
second = np.array(None)
first[()] = second
second[()] = first
assert_equal(repr(first),
'array(array(array(..., dtype=object), dtype=object), dtype=object)')
def test_containing_list(self):
# printing square brackets directly would be ambiguuous
arr1d = np.array([None, None])
arr1d[0] = [1, 2]
arr1d[1] = [3]
assert_equal(repr(arr1d),
'array([list([1, 2]), list([3])], dtype=object)')
def test_void_scalar_recursion(self):
# gh-9345
repr(np.void(b'test')) # RecursionError ?
class TestComplexArray(object):
def test_str(self):
rvals = [0, 1, -1, np.inf, -np.inf, np.nan]
cvals = [complex(rp, ip) for rp in rvals for ip in rvals]
dtypes = [np.complex64, np.cdouble, np.clongdouble]
actual = [str(np.array([c], dt)) for c in cvals for dt in dtypes]
wanted = [
'[0.+0.j]', '[0.+0.j]', '[0.+0.j]',
'[0.+1.j]', '[0.+1.j]', '[0.+1.j]',
'[0.-1.j]', '[0.-1.j]', '[0.-1.j]',
'[0.+infj]', '[0.+infj]', '[0.+infj]',
'[0.-infj]', '[0.-infj]', '[0.-infj]',
'[0.+nanj]', '[0.+nanj]', '[0.+nanj]',
'[1.+0.j]', '[1.+0.j]', '[1.+0.j]',
'[1.+1.j]', '[1.+1.j]', '[1.+1.j]',
'[1.-1.j]', '[1.-1.j]', '[1.-1.j]',
'[1.+infj]', '[1.+infj]', '[1.+infj]',
'[1.-infj]', '[1.-infj]', '[1.-infj]',
'[1.+nanj]', '[1.+nanj]', '[1.+nanj]',
'[-1.+0.j]', '[-1.+0.j]', '[-1.+0.j]',
'[-1.+1.j]', '[-1.+1.j]', '[-1.+1.j]',
'[-1.-1.j]', '[-1.-1.j]', '[-1.-1.j]',
'[-1.+infj]', '[-1.+infj]', '[-1.+infj]',
'[-1.-infj]', '[-1.-infj]', '[-1.-infj]',
'[-1.+nanj]', '[-1.+nanj]', '[-1.+nanj]',
'[inf+0.j]', '[inf+0.j]', '[inf+0.j]',
'[inf+1.j]', '[inf+1.j]', '[inf+1.j]',
'[inf-1.j]', '[inf-1.j]', '[inf-1.j]',
'[inf+infj]', '[inf+infj]', '[inf+infj]',
'[inf-infj]', '[inf-infj]', '[inf-infj]',
'[inf+nanj]', '[inf+nanj]', '[inf+nanj]',
'[-inf+0.j]', '[-inf+0.j]', '[-inf+0.j]',
'[-inf+1.j]', '[-inf+1.j]', '[-inf+1.j]',
'[-inf-1.j]', '[-inf-1.j]', '[-inf-1.j]',
'[-inf+infj]', '[-inf+infj]', '[-inf+infj]',
'[-inf-infj]', '[-inf-infj]', '[-inf-infj]',
'[-inf+nanj]', '[-inf+nanj]', '[-inf+nanj]',
'[nan+0.j]', '[nan+0.j]', '[nan+0.j]',
'[nan+1.j]', '[nan+1.j]', '[nan+1.j]',
'[nan-1.j]', '[nan-1.j]', '[nan-1.j]',
'[nan+infj]', '[nan+infj]', '[nan+infj]',
'[nan-infj]', '[nan-infj]', '[nan-infj]',
'[nan+nanj]', '[nan+nanj]', '[nan+nanj]']
for res, val in zip(actual, wanted):
assert_equal(res, val)
class TestArray2String(object):
def test_basic(self):
"""Basic test of array2string."""
a = np.arange(3)
assert_(np.array2string(a) == '[0 1 2]')
assert_(np.array2string(a, max_line_width=4, legacy='1.13') == '[0 1\n 2]')
assert_(np.array2string(a, max_line_width=4) == '[0\n 1\n 2]')
def test_format_function(self):
"""Test custom format function for each element in array."""
def _format_function(x):
if np.abs(x) < 1:
return '.'
elif np.abs(x) < 2:
return 'o'
else:
return 'O'
x = np.arange(3)
if sys.version_info[0] >= 3:
x_hex = "[0x0 0x1 0x2]"
x_oct = "[0o0 0o1 0o2]"
else:
x_hex = "[0x0L 0x1L 0x2L]"
x_oct = "[0L 01L 02L]"
assert_(np.array2string(x, formatter={'all':_format_function}) ==
"[. o O]")
assert_(np.array2string(x, formatter={'int_kind':_format_function}) ==
"[. o O]")
assert_(np.array2string(x, formatter={'all':lambda x: "%.4f" % x}) ==
"[0.0000 1.0000 2.0000]")
assert_equal(np.array2string(x, formatter={'int':lambda x: hex(x)}),
x_hex)
assert_equal(np.array2string(x, formatter={'int':lambda x: oct(x)}),
x_oct)
x = np.arange(3.)
assert_(np.array2string(x, formatter={'float_kind':lambda x: "%.2f" % x}) ==
"[0.00 1.00 2.00]")
assert_(np.array2string(x, formatter={'float':lambda x: "%.2f" % x}) ==
"[0.00 1.00 2.00]")
s = np.array(['abc', 'def'])
assert_(np.array2string(s, formatter={'numpystr':lambda s: s*2}) ==
'[abcabc defdef]')
# check for backcompat that using FloatFormat works and emits warning
with assert_warns(DeprecationWarning):
fmt = np.core.arrayprint.FloatFormat(x, 9, 'maxprec', False)
assert_equal(np.array2string(x, formatter={'float_kind': fmt}),
'[0. 1. 2.]')
def test_structure_format(self):
dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))])
x = np.array([('Sarah', (8.0, 7.0)), ('John', (6.0, 7.0))], dtype=dt)
assert_equal(np.array2string(x),
"[('Sarah', [8., 7.]) ('John', [6., 7.])]")
np.set_printoptions(legacy='1.13')
try:
# for issue #5692
A = np.zeros(shape=10, dtype=[("A", "M8[s]")])
A[5:].fill(np.datetime64('NaT'))
assert_equal(
np.array2string(A),
textwrap.dedent("""\
[('1970-01-01T00:00:00',) ('1970-01-01T00:00:00',) ('1970-01-01T00:00:00',)
('1970-01-01T00:00:00',) ('1970-01-01T00:00:00',) ('NaT',) ('NaT',)
('NaT',) ('NaT',) ('NaT',)]""")
)
finally:
np.set_printoptions(legacy=False)
# same again, but with non-legacy behavior
assert_equal(
np.array2string(A),
textwrap.dedent("""\
[('1970-01-01T00:00:00',) ('1970-01-01T00:00:00',)
('1970-01-01T00:00:00',) ('1970-01-01T00:00:00',)
('1970-01-01T00:00:00',) ( 'NaT',)
( 'NaT',) ( 'NaT',)
( 'NaT',) ( 'NaT',)]""")
)
# and again, with timedeltas
A = np.full(10, 123456, dtype=[("A", "m8[s]")])
A[5:].fill(np.datetime64('NaT'))
assert_equal(
np.array2string(A),
textwrap.dedent("""\
[(123456,) (123456,) (123456,) (123456,) (123456,) ( 'NaT',) ( 'NaT',)
( 'NaT',) ( 'NaT',) ( 'NaT',)]""")
)
# See #8160
struct_int = np.array([([1, -1],), ([123, 1],)], dtype=[('B', 'i4', 2)])
assert_equal(np.array2string(struct_int),
"[([ 1, -1],) ([123, 1],)]")
struct_2dint = np.array([([[0, 1], [2, 3]],), ([[12, 0], [0, 0]],)],
dtype=[('B', 'i4', (2, 2))])
assert_equal(np.array2string(struct_2dint),
"[([[ 0, 1], [ 2, 3]],) ([[12, 0], [ 0, 0]],)]")
# See #8172
array_scalar = np.array(
(1., 2.1234567890123456789, 3.), dtype=('f8,f8,f8'))
assert_equal(np.array2string(array_scalar), "(1., 2.12345679, 3.)")
def test_unstructured_void_repr(self):
a = np.array([27, 91, 50, 75, 7, 65, 10, 8,
27, 91, 51, 49,109, 82,101,100], dtype='u1').view('V8')
assert_equal(repr(a[0]), r"void(b'\x1B\x5B\x32\x4B\x07\x41\x0A\x08')")
assert_equal(str(a[0]), r"b'\x1B\x5B\x32\x4B\x07\x41\x0A\x08'")
assert_equal(repr(a),
r"array([b'\x1B\x5B\x32\x4B\x07\x41\x0A\x08'," "\n"
r" b'\x1B\x5B\x33\x31\x6D\x52\x65\x64'], dtype='|V8')")
assert_equal(eval(repr(a), vars(np)), a)
assert_equal(eval(repr(a[0]), vars(np)), a[0])
def test_edgeitems_kwarg(self):
# previously the global print options would be taken over the kwarg
arr = np.zeros(3, int)
assert_equal(
np.array2string(arr, edgeitems=1, threshold=0),
"[0 ... 0]"
)
def test_summarize_1d(self):
A = np.arange(1001)
strA = '[ 0 1 2 ... 998 999 1000]'
assert_equal(str(A), strA)
reprA = 'array([ 0, 1, 2, ..., 998, 999, 1000])'
assert_equal(repr(A), reprA)
def test_summarize_2d(self):
A = np.arange(1002).reshape(2, 501)
strA = '[[ 0 1 2 ... 498 499 500]\n' \
' [ 501 502 503 ... 999 1000 1001]]'
assert_equal(str(A), strA)
reprA = 'array([[ 0, 1, 2, ..., 498, 499, 500],\n' \
' [ 501, 502, 503, ..., 999, 1000, 1001]])'
assert_equal(repr(A), reprA)
def test_linewidth(self):
a = np.full(6, 1)
def make_str(a, width, **kw):
return np.array2string(a, separator="", max_line_width=width, **kw)
assert_equal(make_str(a, 8, legacy='1.13'), '[111111]')
assert_equal(make_str(a, 7, legacy='1.13'), '[111111]')
assert_equal(make_str(a, 5, legacy='1.13'), '[1111\n'
' 11]')
assert_equal(make_str(a, 8), '[111111]')
assert_equal(make_str(a, 7), '[11111\n'
' 1]')
assert_equal(make_str(a, 5), '[111\n'
' 111]')
b = a[None,None,:]
assert_equal(make_str(b, 12, legacy='1.13'), '[[[111111]]]')
assert_equal(make_str(b, 9, legacy='1.13'), '[[[111111]]]')
assert_equal(make_str(b, 8, legacy='1.13'), '[[[11111\n'
' 1]]]')
assert_equal(make_str(b, 12), '[[[111111]]]')
assert_equal(make_str(b, 9), '[[[111\n'
' 111]]]')
assert_equal(make_str(b, 8), '[[[11\n'
' 11\n'
' 11]]]')
def test_wide_element(self):
a = np.array(['xxxxx'])
assert_equal(
np.array2string(a, max_line_width=5),
"['xxxxx']"
)
assert_equal(
np.array2string(a, max_line_width=5, legacy='1.13'),
"[ 'xxxxx']"
)
class TestPrintOptions(object):
"""Test getting and setting global print options."""
def setup(self):
self.oldopts = np.get_printoptions()
def teardown(self):
np.set_printoptions(**self.oldopts)
def test_basic(self):
x = np.array([1.5, 0, 1.234567890])
assert_equal(repr(x), "array([1.5 , 0. , 1.23456789])")
np.set_printoptions(precision=4)
assert_equal(repr(x), "array([1.5 , 0. , 1.2346])")
def test_precision_zero(self):
np.set_printoptions(precision=0)
for values, string in (
([0.], "0."), ([.3], "0."), ([-.3], "-0."), ([.7], "1."),
([1.5], "2."), ([-1.5], "-2."), ([-15.34], "-15."),
([100.], "100."), ([.2, -1, 122.51], " 0., -1., 123."),
([0], "0"), ([-12], "-12"), ([complex(.3, -.7)], "0.-1.j")):
x = np.array(values)
assert_equal(repr(x), "array([%s])" % string)
def test_formatter(self):
x = np.arange(3)
np.set_printoptions(formatter={'all':lambda x: str(x-1)})
assert_equal(repr(x), "array([-1, 0, 1])")
def test_formatter_reset(self):
x = np.arange(3)
np.set_printoptions(formatter={'all':lambda x: str(x-1)})
assert_equal(repr(x), "array([-1, 0, 1])")
np.set_printoptions(formatter={'int':None})
assert_equal(repr(x), "array([0, 1, 2])")
np.set_printoptions(formatter={'all':lambda x: str(x-1)})
assert_equal(repr(x), "array([-1, 0, 1])")
np.set_printoptions(formatter={'all':None})
assert_equal(repr(x), "array([0, 1, 2])")
np.set_printoptions(formatter={'int':lambda x: str(x-1)})
assert_equal(repr(x), "array([-1, 0, 1])")
np.set_printoptions(formatter={'int_kind':None})
assert_equal(repr(x), "array([0, 1, 2])")
x = np.arange(3.)
np.set_printoptions(formatter={'float':lambda x: str(x-1)})
assert_equal(repr(x), "array([-1.0, 0.0, 1.0])")
np.set_printoptions(formatter={'float_kind':None})
assert_equal(repr(x), "array([0., 1., 2.])")
def test_0d_arrays(self):
unicode = type(u'')
assert_equal(unicode(np.array(u'café', np.unicode_)), u'café')
if sys.version_info[0] >= 3:
assert_equal(repr(np.array('café', np.unicode_)),
"array('café', dtype='<U4')")
else:
assert_equal(repr(np.array(u'café', np.unicode_)),
"array(u'caf\\xe9', dtype='<U4')")
assert_equal(str(np.array('test', np.str_)), 'test')
a = np.zeros(1, dtype=[('a', '<i4', (3,))])
assert_equal(str(a[0]), '([0, 0, 0],)')
assert_equal(repr(np.datetime64('2005-02-25')[...]),
"array('2005-02-25', dtype='datetime64[D]')")
assert_equal(repr(np.timedelta64('10', 'Y')[...]),
"array(10, dtype='timedelta64[Y]')")
# repr of 0d arrays is affected by printoptions
x = np.array(1)
np.set_printoptions(formatter={'all':lambda x: "test"})
assert_equal(repr(x), "array(test)")
# str is unaffected
assert_equal(str(x), "1")
# check `style` arg raises
assert_warns(DeprecationWarning, np.array2string,
np.array(1.), style=repr)
# but not in legacy mode
np.array2string(np.array(1.), style=repr, legacy='1.13')
def test_float_spacing(self):
x = np.array([1., 2., 3.])
y = np.array([1., 2., -10.])
z = np.array([100., 2., -1.])
w = np.array([-100., 2., 1.])
assert_equal(repr(x), 'array([1., 2., 3.])')
assert_equal(repr(y), 'array([ 1., 2., -10.])')
assert_equal(repr(np.array(y[0])), 'array(1.)')
assert_equal(repr(np.array(y[-1])), 'array(-10.)')
assert_equal(repr(z), 'array([100., 2., -1.])')
assert_equal(repr(w), 'array([-100., 2., 1.])')
assert_equal(repr(np.array([np.nan, np.inf])), 'array([nan, inf])')
assert_equal(repr(np.array([np.nan, -np.inf])), 'array([ nan, -inf])')
x = np.array([np.inf, 100000, 1.1234])
y = np.array([np.inf, 100000, -1.1234])
z = np.array([np.inf, 1.1234, -1e120])
np.set_printoptions(precision=2)
assert_equal(repr(x), 'array([ inf, 1.00e+05, 1.12e+00])')
assert_equal(repr(y), 'array([ inf, 1.00e+05, -1.12e+00])')
assert_equal(repr(z), 'array([ inf, 1.12e+000, -1.00e+120])')
def test_bool_spacing(self):
assert_equal(repr(np.array([True, True])),
'array([ True, True])')
assert_equal(repr(np.array([True, False])),
'array([ True, False])')
assert_equal(repr(np.array([True])),
'array([ True])')
assert_equal(repr(np.array(True)),
'array(True)')
assert_equal(repr(np.array(False)),
'array(False)')
def test_sign_spacing(self):
a = np.arange(4.)
b = np.array([1.234e9])
assert_equal(repr(a), 'array([0., 1., 2., 3.])')
assert_equal(repr(np.array(1.)), 'array(1.)')
assert_equal(repr(b), 'array([1.234e+09])')
assert_equal(repr(np.array([0.])), 'array([0.])')
np.set_printoptions(sign=' ')
assert_equal(repr(a), 'array([ 0., 1., 2., 3.])')
assert_equal(repr(np.array(1.)), 'array( 1.)')
assert_equal(repr(b), 'array([ 1.234e+09])')
np.set_printoptions(sign='+')
assert_equal(repr(a), 'array([+0., +1., +2., +3.])')
assert_equal(repr(np.array(1.)), 'array(+1.)')
assert_equal(repr(b), 'array([+1.234e+09])')
np.set_printoptions(legacy='1.13')
assert_equal(repr(a), 'array([ 0., 1., 2., 3.])')
assert_equal(repr(b), 'array([ 1.23400000e+09])')
assert_equal(repr(-b), 'array([ -1.23400000e+09])')
assert_equal(repr(np.array(1.)), 'array(1.0)')
assert_equal(repr(np.array([0.])), 'array([ 0.])')
assert_raises(TypeError, np.set_printoptions, wrongarg=True)
def test_float_overflow_nowarn(self):
# make sure internal computations in FloatingFormat don't
# warn about overflow
repr(np.array([1e4, 0.1], dtype='f2'))
def test_sign_spacing_structured(self):
a = np.ones(2, dtype='f,f')
assert_equal(repr(a),
"array([(1., 1.), (1., 1.)], dtype=[('f0', '<f4'), ('f1', '<f4')])")
assert_equal(repr(a[0]), "(1., 1.)")
def test_floatmode(self):
x = np.array([0.6104, 0.922, 0.457, 0.0906, 0.3733, 0.007244,
0.5933, 0.947, 0.2383, 0.4226], dtype=np.float16)
y = np.array([0.2918820979355541, 0.5064172631089138,
0.2848750619642916, 0.4342965294660567,
0.7326538397312751, 0.3459503329096204,
0.0862072768214508, 0.39112753029631175],
dtype=np.float64)
z = np.arange(6, dtype=np.float16)/10
# also make sure 1e23 is right (is between two fp numbers)
w = np.array(['1e{}'.format(i) for i in range(25)], dtype=np.float64)
# note: we construct w from the strings `1eXX` instead of doing
# `10.**arange(24)` because it turns out the two are not equivalent in
# python. On some architectures `1e23 != 10.**23`.
wp = np.array([1.234e1, 1e2, 1e123])
# unique mode
np.set_printoptions(floatmode='unique')
assert_equal(repr(x),
"array([0.6104 , 0.922 , 0.457 , 0.0906 , 0.3733 , 0.007244,\n"
" 0.5933 , 0.947 , 0.2383 , 0.4226 ], dtype=float16)")
assert_equal(repr(y),
"array([0.2918820979355541 , 0.5064172631089138 , 0.2848750619642916 ,\n"
" 0.4342965294660567 , 0.7326538397312751 , 0.3459503329096204 ,\n"
" 0.0862072768214508 , 0.39112753029631175])")
assert_equal(repr(z),
"array([0. , 0.1, 0.2, 0.3, 0.4, 0.5], dtype=float16)")
assert_equal(repr(w),
"array([1.e+00, 1.e+01, 1.e+02, 1.e+03, 1.e+04, 1.e+05, 1.e+06, 1.e+07,\n"
" 1.e+08, 1.e+09, 1.e+10, 1.e+11, 1.e+12, 1.e+13, 1.e+14, 1.e+15,\n"
" 1.e+16, 1.e+17, 1.e+18, 1.e+19, 1.e+20, 1.e+21, 1.e+22, 1.e+23,\n"
" 1.e+24])")
assert_equal(repr(wp), "array([1.234e+001, 1.000e+002, 1.000e+123])")
# maxprec mode, precision=8
np.set_printoptions(floatmode='maxprec', precision=8)
assert_equal(repr(x),
"array([0.6104 , 0.922 , 0.457 , 0.0906 , 0.3733 , 0.007244,\n"
" 0.5933 , 0.947 , 0.2383 , 0.4226 ], dtype=float16)")
assert_equal(repr(y),
"array([0.2918821 , 0.50641726, 0.28487506, 0.43429653, 0.73265384,\n"
" 0.34595033, 0.08620728, 0.39112753])")
assert_equal(repr(z),
"array([0. , 0.1, 0.2, 0.3, 0.4, 0.5], dtype=float16)")
assert_equal(repr(w[::5]),
"array([1.e+00, 1.e+05, 1.e+10, 1.e+15, 1.e+20])")
assert_equal(repr(wp), "array([1.234e+001, 1.000e+002, 1.000e+123])")
# fixed mode, precision=4
np.set_printoptions(floatmode='fixed', precision=4)
assert_equal(repr(x),
"array([0.6104, 0.9219, 0.4570, 0.0906, 0.3733, 0.0072, 0.5933, 0.9468,\n"
" 0.2383, 0.4226], dtype=float16)")
assert_equal(repr(y),
"array([0.2919, 0.5064, 0.2849, 0.4343, 0.7327, 0.3460, 0.0862, 0.3911])")
assert_equal(repr(z),
"array([0.0000, 0.1000, 0.2000, 0.3000, 0.3999, 0.5000], dtype=float16)")
assert_equal(repr(w[::5]),
"array([1.0000e+00, 1.0000e+05, 1.0000e+10, 1.0000e+15, 1.0000e+20])")
assert_equal(repr(wp), "array([1.2340e+001, 1.0000e+002, 1.0000e+123])")
assert_equal(repr(np.zeros(3)), "array([0.0000, 0.0000, 0.0000])")
# for larger precision, representation error becomes more apparent:
np.set_printoptions(floatmode='fixed', precision=8)
assert_equal(repr(z),
"array([0.00000000, 0.09997559, 0.19995117, 0.30004883, 0.39990234,\n"
" 0.50000000], dtype=float16)")
# maxprec_equal mode, precision=8
np.set_printoptions(floatmode='maxprec_equal', precision=8)
assert_equal(repr(x),
"array([0.610352, 0.921875, 0.457031, 0.090576, 0.373291, 0.007244,\n"
" 0.593262, 0.946777, 0.238281, 0.422607], dtype=float16)")
assert_equal(repr(y),
"array([0.29188210, 0.50641726, 0.28487506, 0.43429653, 0.73265384,\n"
" 0.34595033, 0.08620728, 0.39112753])")
assert_equal(repr(z),
"array([0.0, 0.1, 0.2, 0.3, 0.4, 0.5], dtype=float16)")
assert_equal(repr(w[::5]),
"array([1.e+00, 1.e+05, 1.e+10, 1.e+15, 1.e+20])")
assert_equal(repr(wp), "array([1.234e+001, 1.000e+002, 1.000e+123])")
def test_legacy_mode_scalars(self):
# in legacy mode, str of floats get truncated, and complex scalars
# use * for non-finite imaginary part
np.set_printoptions(legacy='1.13')
assert_equal(str(np.float64(1.123456789123456789)), '1.12345678912')
assert_equal(str(np.complex128(complex(1, np.nan))), '(1+nan*j)')
np.set_printoptions(legacy=False)
assert_equal(str(np.float64(1.123456789123456789)),
'1.1234567891234568')
assert_equal(str(np.complex128(complex(1, np.nan))), '(1+nanj)')
def test_legacy_stray_comma(self):
np.set_printoptions(legacy='1.13')
assert_equal(str(np.arange(10000)), '[ 0 1 2 ..., 9997 9998 9999]')
np.set_printoptions(legacy=False)
assert_equal(str(np.arange(10000)), '[ 0 1 2 ... 9997 9998 9999]')
def test_dtype_linewidth_wrapping(self):
np.set_printoptions(linewidth=75)
assert_equal(repr(np.arange(10,20., dtype='f4')),
"array([10., 11., 12., 13., 14., 15., 16., 17., 18., 19.], dtype=float32)")
assert_equal(repr(np.arange(10,23., dtype='f4')), textwrap.dedent("""\
array([10., 11., 12., 13., 14., 15., 16., 17., 18., 19., 20., 21., 22.],
dtype=float32)"""))
styp = '<U4' if sys.version_info[0] >= 3 else '|S4'
assert_equal(repr(np.ones(3, dtype=styp)),
"array(['1', '1', '1'], dtype='{}')".format(styp))
assert_equal(repr(np.ones(12, dtype=styp)), textwrap.dedent("""\
array(['1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1'],
dtype='{}')""".format(styp)))
def test_linewidth_repr(self):
a = np.full(7, fill_value=2)
np.set_printoptions(linewidth=17)
assert_equal(
repr(a),
textwrap.dedent("""\
array([2, 2, 2,
2, 2, 2,
2])""")
)
| np.set_printoptions(linewidth=17, legacy='1.13') | numpy.set_printoptions |
""" psychoacoustics exports classes for handling psychophysical procedures and
measures, like trial sequences and staircases."""
import io
import pathlib
import datetime
import json
import pickle
import zipfile
import collections
from contextlib import contextmanager
from abc import abstractmethod
import warnings
import matplotlib.cbook # necessary for matplotlib versions <3.5 to suppress a MatplotlibDeprecationWarning
try:
import curses
except ImportError:
curses = None
import numpy
try:
import matplotlib.pyplot as plt
except ImportError:
plt = None
import slab
results_folder = 'Results'
input_method = 'keyboard' #: sets the input for the Key context manager to 'keyboard 'or 'buttonbox'
class _Buttonbox:
"""
Adapter class to allow easy switching between input from the keyboard via curses and from the custom buttonbox
adapter (custom arduino device that sends a keystroke followed by a return keystroke when pressing a button on
the arduino).
"""
@staticmethod
def getch():
input_key = input() # buttonbox adapter has to return the keycode of intended keys!
if input_key:
return int(input_key)
class _FigChar:
"""
Adapter class to allow easy switching to input via the current_character attribute of stairs figure.
Set slab.psychoacoustics.input_method = 'figure' to use. A figure with the name 'stairs' will be opened if it is not
already present. If used together with the plot method of the Staircase class, input is acquired through the stairs
plot. Depending on the operating system, you may have to click once into the figure to give it focus.
"""
warnings.filterwarnings("ignore", category=matplotlib.cbook.MatplotlibDeprecationWarning)
@staticmethod
def getch():
global key
def _on_key(event):
global key
key = event.key
fig = plt.figure('stairs')
cid = fig.canvas.mpl_connect('key_press_event', _on_key)
key = None # reset
while not key:
plt.pause(0.01) # wait for 10ms, but keep figure event loop running
return ord(key)
@contextmanager
def key():
"""
Wrapper for curses module to simplify getting a single keypress from the terminal (default) or a buttonbox.
Set slab.psychoacoustics.input_method = 'buttonbox' to use a custom USB buttonbox.
Example::
with slab.Key() as key:
response = key.getch()
"""
if input_method == 'keyboard':
if curses is None:
raise ImportError(
'You need curses to use the keypress class (pip install curses (or windows-curses))')
curses.filter()
stdscr = curses.initscr()
curses.noecho()
curses.cbreak()
yield stdscr
curses.nocbreak()
curses.echo()
curses.endwin()
elif input_method == 'buttonbox':
yield _Buttonbox
elif input_method == 'figure':
yield _FigChar
else:
raise ValueError('Unknown input method!')
class LoadSaveMixin:
""" Mixin to provide loading and saving functions. Supports JSON the pickle format """
def save_pickle(self, file_name, clobber=False):
"""
Save the object as pickle file.
Arguments:
file_name (str | pathlib.Path): name of the file to create.
clobber (bool): overwrite existing file with the same name, defaults to False.
Returns:
(bool): True if writing was successful.
"""
if isinstance(file_name, pathlib.Path):
file_name = str(file_name)
if pathlib.Path(file_name).exists() and not clobber:
raise FileExistsError("Select clobber=True to overwrite.")
with open(file_name, 'wb') as fp:
pickle.dump(self.__dict__, fp, protocol=pickle.HIGHEST_PROTOCOL)
return True
def load_pickle(self, file_name):
"""
Read pickle file and deserialize the object into `self.__dict__`.
Attributes:
file_name (str | pathlib.Path): name of the file to read.
"""
if isinstance(file_name, pathlib.Path):
file_name = str(file_name)
with open(file_name, 'rb') as fp:
self.__dict__ = pickle.load(fp)
def save_json(self, file_name=None, clobber=False):
"""
Save the object as JSON file. If the file exists, it is overwritten.
Arguments:
file_name (str | pathlib.Path): name of the file to create. If None or 'stdout', return a JSON object.
clobber (bool): overwrite existing file with the same name, defaults to False.
Returns:
(bool): True if writing was successful.
"""
def default(i): return int(i) if isinstance(i, numpy.int64) else i # helper for converting numpy arrays
if isinstance(file_name, pathlib.Path):
file_name = str(file_name)
if (file_name is None) or (file_name == 'stdout'):
return json.dumps(self.__dict__, indent=2, default=default)
if pathlib.Path(file_name).exists() and not clobber:
raise FileExistsError("Select clobber=True to overwrite.")
try:
with open(file_name, 'w') as f:
json.dump(self.__dict__, f, indent=2, default=default)
return True
except (TypeError, ValueError): # type error caused by json dump, value error by default function
print("Your sequence contains data which is not JSON serializable, use the save_pickle method instead.")
def load_json(self, file_name):
"""
Read JSON file and deserialize the object into `self.__dict__`.
Attributes:
file_name (str | pathlib.Path): name of the file to read.
"""
if isinstance(file_name, pathlib.Path):
file_name = str(file_name)
with open(file_name, 'r') as f:
self.__dict__ = json.load(f)
class TrialPresentationOptionsMixin:
"""
Mixin to provide alternative forced-choice (AFC) and Same-Different trial presentation methods and
response simulation to `Trialsequence` and `Staircase`.
"""
@abstractmethod
def add_response(self, response):
pass
@abstractmethod
def print_trial_info(self):
pass
def present_afc_trial(self, target, distractors, key_codes=(range(49, 58)), isi=0.25, print_info=True):
"""
Present the reference and distractor sounds in random order and acquire a response keypress.
The subject has to identify at which position the reference was played. The result (True if response was correct
or False if response was wrong) is stored in the sequence via the `add_response` method.
Arguments:
target (instance of slab.Sound): sound that ought to be identified in the trial
distractors (instance or list of slab.Sound): distractor sound(s)
key_codes (list of int): ascii codes for the response keys (get code for button '1': ord('1') --> 49)
pressing the second button in the list is equivalent to the response "the reference was the second sound
played in this trial". Defaults to the key codes for buttons '1' to '9'
isi (int or float): inter stimulus interval which is the pause between the end of one sound and the start
of the next one.
print_info (bool): If true, call the `print_trial_info` method afterwards
"""
if isinstance(distractors, list):
stims = [target] + distractors # assuming sound object and list of sounds
else:
stims = [target, distractors] # assuming two sound objects
order = numpy.random.permutation(len(stims))
for idx in order:
stim = stims[idx]
stim.play()
plt.pause(isi)
with key() as k:
response = k.getch()
interval = numpy.where(order == 0)[0][0]
interval_key = key_codes[interval]
response = response == interval_key
self.add_response(response)
if print_info:
self.print_trial_info()
def present_tone_trial(self, stimulus, correct_key_idx=0, key_codes=(range(49, 58)), print_info=True):
"""
Present the reference and distractor sounds in random order and acquire a response keypress.
The result (True if response was correct or False if response was wrong) is stored in the sequence via the
`add_response` method.
Arguments:
stimulus (slab.Sound): sound played in the trial.
correct_key_idx (int): index of the key in `key_codes` that represents a correct response.
Response is correct if `response == key_codes[correct_key_idx]`.
key_codes (list of int): ascii codes for the response keys (get code for button '1': ord('1') --> 49).
print_info (bool): If true, call the `print_trial_info` method afterwards.
"""
stimulus.play()
with slab.key() as k:
response = k.getch()
response = response == key_codes[correct_key_idx]
self.add_response(response)
if print_info:
self.print_trial_info()
def simulate_response(self, threshold=None, transition_width=2, intervals=1, hitrates=None):
"""
Return a simulated response to the current condition index value by calculating the hitrate from a
psychometric (logistic) function. This is only sensible if trials is numeric and an interval scale representing
a continuous stimulus value.
Arguments:
threshold(None | int | float): Midpoint of the psychometric function for adaptive testing. When the
intensity of the current trial is equal to the `threshold` the hitrate is 50 percent.
transition_width (int | float): range of stimulus intensities over which the hitrate increases
from 0.25 to 0.75.
intervals (int): use 1 (default) to indicate a yes/no trial, 2 or more to indicate an alternative forced
choice trial. The number of choices determines the probability for a correct response by chance.
hitrates (None | list | numpy.ndarray): list or numpy array of hitrates for the different conditions,
to allow custom rates instead of simulation. If given, `threshold` and `transition_width` are not used.
If a single value is given, this value is used.
"""
slope = 0.5 / transition_width
if isinstance(self, slab.psychoacoustics.Trialsequence): # check which class the mixin is in
current_condition = self.trials[self.this_n]
elif isinstance(self, slab.psychoacoustics.Staircase):
current_condition = self._next_intensity
else:
return None
if hitrates is None:
if threshold is None:
raise ValueError("threshold can't be None if hitrates is None!")
hitrate = 1 / (1 + numpy.exp(4 * slope * (threshold - current_condition))) # scale/4 = slope at midpoint
else:
if isinstance(hitrates, (list, numpy.ndarray)):
hitrate = hitrates[current_condition]
else:
hitrate = hitrates
hit = numpy.random.rand() < hitrate # True with probability hitrate
if hit or intervals == 1:
return hit
return numpy.random.rand() < 1/intervals # still 1/intervals chance to hit the right interval
class Trialsequence(collections.abc.Iterator, LoadSaveMixin, TrialPresentationOptionsMixin):
"""
Randomized, non-adaptive trial sequences.
Arguments:
conditions (list | int | str): defines the different stimuli appearing the sequence. If given a list,
every element is one condition. The elements can be anything - strings, dictionaries, objects etc.
Note that, if the elements are not JSON serializable, the sequence can only be saved as a pickle file.
If conditions is an integer i, the list of conditions is given by range(i). A string is treated as the
filename of a previously saved trial sequence object, which is then loaded.
n_reps (int): number of repetitions for each condition. Number of trials is given by len(conditions)*n_reps).
trials (None | list | numpy.ndarray): The sequence of trials in the order in which they are appearing in
sequence. Defaults to None, because trials are usually generated by the class based on the other
parameters. However, it is possible to pass a list or one-dimensional array. In that case the parameters
for generating the sequence are ignored.
kind (str): The kind of randomization used to generate the trial sequence. Possible options are:
`non_repeating` (randomization without direct repetition of a condition, default if n_conditions > 2),
`random_permutation` (complete randomization, default if `n_conditions` <= 2) or
`infinite` (sequence that reset when reaching the end to generate an infinite number of trials.
randomization method is random_permutation` if n_conditions` <= 2 and `non_repeating` otherwise).
deviant_freq (float): frequency with which deviants (encoded as 0) appear in the sequence. The minimum number
of trials between two deviants is 3 if deviant frequency is below 10%, 2 if it is below 20% and 1 if it
is below 30%. A deviant frequency greater than 30% is not supported
label (str): a text label for the sequence.
Attributes:
.trials: the order in which the conditions are repeated in the sequence. The elements are integers referring
to indices in `conditions`, starting from 1. 0 represents a deviant (only present if `deviant_freq` > 0)
.n_trials: the total number of trials in the sequence
.conditions: list of the different unique elements in the sequence
.n_conditions: number of conditions, is equal to len(conditions) or len(conditions)+1 if there are deviants
.n_remaining: the number of trials remaining i.e. that have not been called when iterating trough the sequence
.this_n: current trials index in the entire sequence, equals the number of trials completed so far
.this_trial: a dictionary giving the parameters of the current trial
.finished: boolean signaling if all trials have been called
.kind: randomization kind of sequence (`random_permutation`, `non_repeating`, `infinite`)
.data: list with the same length as the one in the `trials` attribute. On sequence generation, `data` is a
list of empty lists. Then , one can use the `add_response` method to append to the list belonging to the
current trial
"""
def __init__(self, conditions=2, n_reps=1, trials=None, kind=None, deviant_freq=None, label=''):
self.label = label
self.n_reps = int(n_reps)
if isinstance(conditions, pathlib.Path):
conditions = str(conditions)
if isinstance(conditions, str):
if not pathlib.Path(conditions).exists():
raise ValueError(f"could not load the file {conditions}")
try:
self.load_json(conditions) # import entire object from file
except (UnicodeDecodeError, json.JSONDecodeError) as _:
self.load_pickle(conditions)
else:
if isinstance(conditions, int):
self.conditions = list(range(1, conditions+1))
else:
self.conditions = conditions
self.n_conditions = len(self.conditions)
if trials is None: # generate stimulus sequence
if kind is None:
kind = 'random_permutation' if self.n_conditions <= 2 else 'non_repeating'
if kind == 'random_permutation':
self.trials = self._create_random_permutation(self.n_conditions, self.n_reps)
elif kind == 'non_repeating':
self.trials = self._create_simple_sequence(self.n_conditions, self.n_reps)
elif kind == 'infinite':
# implementation if infinite sequence is a bit of a hack (number of completed trials needs
# to be calculated as: trials.this_rep_n * trials.n_conditions + trials.this_trial_n + 1)
# It's also not possible to make an infinite sequence with deviants.
if deviant_freq is not None:
raise ValueError("Deviants are not implemented for infinite sequences!")
if self.n_conditions <= 2:
self.trials = self._create_random_permutation(self.n_conditions, 5)
self.n_reps = 5
else:
self.trials = self._create_simple_sequence(self.n_conditions, 1)
self.n_reps = 1
else:
raise ValueError(f'Unknown kind parameter: {kind}!')
if deviant_freq is not None: # insert deviants
deviants = slab.Trialsequence._deviant_indices(n_standard=int(self.n_conditions * n_reps),
deviant_freq=deviant_freq)
self.trials = numpy.insert(arr=self.trials, obj=deviants, values=0)
self.n_conditions += 1 # add one condition for deviants
else: # make a sequence from a given list of trials
self.conditions = list(set(trials))
for i, condition in enumerate(
self.conditions): # encode conditions as integers 1 to n_conditions in trials
for t, trial in enumerate(trials):
if trial == condition:
trials[t] = i + 1
self.trials = trials
self.n_conditions = len(self.conditions)
if isinstance(self.trials, numpy.ndarray):
self.trials = self.trials.tolist() # convert trials to list
self.this_n = -1 # trial index in entire sequence
self.this_trial = [] # condition of current trial
self.finished = False
self.data = [] # holds responses if TrialPresentationOptions methods are called
self.n_trials = len(self.trials)
self.n_remaining = self.n_trials
self.kind = kind
self.data = [[] for _ in self.trials]
def __repr__(self):
return self.__dict__.__repr__()
def __str__(self):
return f'Trialsequence, trials {"inf" if self.kind=="infinite" else self.n_trials}, ' \
f'remaining {"inf" if self.kind=="infinite" else self.n_remaining}, current condition {self.this_trial}'
def __next__(self):
"""
Is called when iterating trough a sequenceAdvances to next trial and returns it. Updates attributes
`this_trial` and `this_n`. If the trials have ended this method will raise a StopIteration.
Returns:
(int): current element of the list in `trials`
"""
self.this_n += 1
self.n_remaining -= 1
if self.n_remaining < 0: # all trials complete
if self.kind == 'infinite': # finite sequence -> reset and start again
# new sequence, avoid start with previous condition
self.trials = self._create_simple_sequence(len(self.conditions), self.n_reps,
dont_start_with=self.trials[-1])
self.this_n = 0
self.n_remaining = self.n_trials - 1 # reset trial countdown to length of new trial
# sequence (subtract 1 because we return the 0th trial below)
else: # finite sequence -> finish
self.this_trial = []
self.finished = True
if self.finished:
raise StopIteration
if self.trials[self.this_n] == 0:
self.this_trial = 0
else:
self.this_trial = self.conditions[self.trials[self.this_n]-1] # fetch the trial info
return self.this_trial
def add_response(self, response):
"""
Append response to the list in the `data` attribute belonging to the current trial (see Trialsequence doc).
Attributes:
response (any): data to append to the list. Can be anything but save_json method won't be available if
the content of `response` is not JSON serializable (if it's an object for example).
"""
if self.this_n < 0:
print("Can't add response because trial hasn't started yet!")
else:
self.data[self.this_n].append(response)
def print_trial_info(self):
""" Convenience method for printing current trial information. """
print(f'{self.label} | trial # {self.this_n} of {"inf" if self.kind=="infinite" else self.n_trials} '
f'({"inf" if self.kind=="infinite" else self.n_remaining} remaining): condition {self.this_trial}, '
f'last response: {self.data[self.this_n-1]}')
@staticmethod
def _create_simple_sequence(n_conditions, n_reps, dont_start_with=None):
"""
Create a randomized sequence of integers without direct repetitions of any element.
Arguments:
n_conditions (int): the number of conditions in the list. The array returned contains integers from 1
to the value of `n_conditions`.
n_reps (int): number that each element is repeated. Length of the returned array is `n_conditions * n_reps`
dont_start_with (int): if not None, dont start the sequence with this integer. Can be useful if several
sequences are used and the final trial of the last sequence should not be the same as the first
element of the next sequence.
Returns:
(numpy.ndarray): randomized sequence of length n_conditions * n_reps without direct repetitions of any
element.
"""
permute = list(range(1, n_conditions+1))
if dont_start_with is not None:
trials = [dont_start_with]
else:
trials = []
for _ in range(n_reps):
numpy.random.shuffle(permute)
if len(trials) > 0:
while trials[-1] == permute[0]:
numpy.random.shuffle(permute)
trials += permute
if dont_start_with is not None: # delete first entry ('dont_start_with')
trials = trials[1:]
return numpy.array(trials)
@staticmethod
def _deviant_indices(n_standard, deviant_freq=.1):
"""
Create sequence for an oddball experiment which contains two conditions: standards (1) and deviants (0).
Arguments:
n_standard (int): number of standard trials, encoded as 1, in the sequence.
deviant_freq (float): frequency of deviants, encoded as 0, in the sequence. Also determines the minimum
number of standards between two deviants which is 3 if deviant_freq <= .1, 2 if deviant_freq <= .2 and
1 if deviant_freq <= .3. A deviant frequency > .3 is not supported.
Returns:
(numpy.ndarray): sequence of length n_standard+(n_standard*deviant_freq) with deviants.
"""
if deviant_freq <= .1:
min_dist = 3
elif deviant_freq <= .2:
min_dist = 2
elif deviant_freq <= .3:
min_dist = 1
else:
raise ValueError("Deviant frequency can't be greater than 0.3!")
# get the possible combinations of deviants and normal trials:
n_deviants = int(n_standard*deviant_freq)
indices = range(n_standard)
deviant_indices = numpy.random.choice(indices, n_deviants, replace=False)
deviant_indices.sort()
dist = numpy.diff(deviant_indices)
while numpy.min(dist) < min_dist: # reshuffle until minimum distance is satisfied
deviant_indices = numpy.random.choice(indices, n_deviants, replace=False)
deviant_indices.sort()
dist = numpy.diff(deviant_indices)
return deviant_indices
@staticmethod
def _create_random_permutation(n_conditions, n_reps):
"""
Create a completely random sequence of integers.
Arguments:
n_conditions (int): the number of conditions in the list. The array returned contains integers from 1
to the value of `n_conditions`.
n_reps (int): number that each element is repeated. Length of the returned array is n_conditions * n_reps.
Returns:
(numpy.ndarray): randomized sequence.
"""
return numpy.random.permutation(numpy.tile(list(range(1, n_conditions+1)), n_reps))
def get_future_trial(self, n=1):
"""
Returns the condition of a trial n iterations into the future or past, without advancing the trials.
Arguments:
n (int): number of iterations into the future or past (negative numbers).
Returns:
(any): element of the list stored in the `conditions` attribute belonging to the trial n
iterations into the past/future. Returns None if attempting to go beyond the first/last trial
"""
if n > self.n_remaining or self.this_n + n < 0:
return None
return self.conditions[self.trials[self.this_n + n]-1]
def transitions(self):
"""
Count the number of transitions between conditions.
Returns:
(numpy.ndarray): table of shape `n_conditions` x `n_conditions` where the rows represent the condition
transitioning from and the columns represent the condition transitioning to. For example [0, 2] shows the
number of transitions from condition 1 to condition 3. If the `kind` of the sequence is "non_repeating",
the diagonal is 0 because no condition transitions into itself.
"""
transitions = numpy.zeros((self.n_conditions, self.n_conditions))
for i, j in zip(self.trials, self.trials[1:]):
transitions[i-1, j-1] += 1
return transitions
def condition_probabilities(self):
"""
Return the frequency with which each condition appears in the sequence.
Returns:
(list): list of floats floats, where every element represents the frequency of one condition.
The fist element is the frequency of the first condition and so on.
"""
probabilities = []
for i in range(self.n_conditions):
num = self.trials.count(i)
num /= self.n_trials
probabilities.append(num)
return probabilities
def response_summary(self):
"""
Generate a summary of the responses for each condition. The function counts how often a specific response
was given to a condition for all conditions and each possible response (including None).
Returns:
(list of lists | None): indices of the outer list represent the conditions in the sequence.Each inner
list contains the number of responses per response key, with the response keys sorted in ascending order,
the last element always represents None. If the sequence is not finished yet, None is returned.
Examples::
import slab
import random
sequence = slab.Trialsequence(conditions=3, n_reps=10) # a sequence with three conditions
# iterate trough the list and generate a random response. The response can be either yes (1), no (0) or
# there can be no response at all (None)
for trial in sequence:
response = random.choice([0, 1, None])
sequence.add_response(response)
sequence.response_summary()
# Out: [[1, 1, 7], [2, 5, 3], [4, 4, 2]]
# The first sublist shows that the subject responded to the first condition once with no (0),
# once with yes (1) and did not give a response seven times, the second and third list show
# prevalence of the same response keys for conditions two and three.
"""
if self.finished:
# list of used response key codes (add None in case it's not present):
response_keys = [item for sublist in self.data for item in sublist]
response_keys = list(set(response_keys + [None]))
response_keys = sorted(response_keys, key=lambda x: (x is None, x)) # sort, with 'None' at the end
responses = []
for condition in self.conditions:
idx = [i for i, cond in enumerate(self.trials) if cond == condition] # indices of condition in sequence
# count how often each type of key was given to this condition:
condition_data = [self.data[i] for i in idx]
count = collections.Counter([item for sublist in condition_data for item in sublist])
resp_1cond = []
for r in response_keys:
resp_1cond.append(count[r])
responses.append(resp_1cond)
return responses
else:
return None
def plot(self, axis=None, show=True):
"""
Plot the trial sequence as scatter plot.
Arguments:
axis (matplotlib.pyplot.Axes): plot axis to draw on, if none a new plot is generated
show (bool): show the plot immediately, defaults to True
"""
if plt is None:
raise ImportError('Plotting requires matplotlib!')
if axis is None:
axis = plt.subplot()
axis.scatter(range(self.n_trials), self.trials)
axis.set(title='Trial sequence', xlabel='Trials', ylabel='Condition index')
if show:
plt.show()
class Staircase(collections.abc.Iterator, LoadSaveMixin, TrialPresentationOptionsMixin):
"""
Class to handle adaptive testing which means smoothly the selecting next trial, report current values and so on.
The sequence will terminate after a certain number of reverals have been exceeded.
Arguments:
start_val (int | float): initial stimulus value for the staircase
n_reversals (int): number of reversals needed to terminate the staircase
step_sizes (int | list): Size of steps in the staircase. Given an integer, the step size is constant. Given
a list, the step size will progress to the next entry at each reversal. If the list is exceeded before the
sequence was finished, it will continue with the last entry of the list as constant step size.
step_up_factor: allows different sizes for up and down steps to implement a Kaernbach1991 weighted
up-down method. step_sizes sets down steps, which are multiplied by step_up_factor to obtain up step
sizes. The default is 1, i.e. same size for up and down steps.
n_pretrials (int): number of trial at the initial stimulus value presented as before start of the staircase
n_up (int): number of `incorrect` (or 0) responses before the staircase level increases. Is 1, regardless of
specified value until the first reversal. Lewitt (1971) gives the up-down values for different threshold
points on the psychometric function: 1-1 (0.5), 1-2 (0.707), 1-3 (0.794), 1-4 (0.841), 1-5 (0.891).
n_down (int): number of `correct` (or 1) responses before the staircase level decreases (see `n_up`).
step_type (str): defines the change of stimulus intensity at each step of the staircase. possible inputs are
'lin' (adds or subtract a certain amount), 'db', and 'log' (prevents the intensity from reaching zero).
min_val (int or float): smallest stimulus value permitted, or -Inf for staircase without lower limit
max_val (int or float): largest stimulus value permitted, or Inf for staircase without upper limit
label (str): text label for the sequence, defaults to an empty string
Attributes:
.this_trial_n: number of completed trials
.intensities: presented stimulus values
.current_direction: 'up' or 'down'
.data: list of responses
.reversal_points: indices of reversal trials
.reversal_intensities: stimulus values at the reversals (used to compute threshold)
.finished: True/False: have we finished yet?
Examples::
stairs = Staircase(start_val=50, n_reversals=10, step_type='lin',
step_sizes=[4,2], min_val=10, max_val=60, n_up=1, n_down=1)
print(stairs)
for trial in stairs:
response = stairs.simulate_response(30)
stairs.add_response(response)
print(f'reversals: {stairs.reversal_intensities}')
print(f'mean of final 6 reversals: {stairs.threshold()}')
"""
def __init__(self, start_val, n_reversals=None, step_sizes=1, step_up_factor=1, n_pretrials=0, n_up=1,
n_down=2, step_type='lin', min_val=-numpy.Inf, max_val=numpy.Inf, label=''):
self.label = label
self.start_val = start_val
self.n_up = n_up
self.n_down = n_down
self.step_type = step_type
try:
self.step_sizes = list(step_sizes)
except TypeError:
self.step_sizes = [step_sizes]
self._variable_step = True if len(self.step_sizes) > 1 else False
self.step_up_factor = step_up_factor
self.step_size_current = self.step_sizes[0]
if n_reversals is None:
if len(self.step_sizes) == 1:
self.n_reversals = 8 # if Staircase called without parameters, construct a short 8-reversal test
else:
self.n_reversals = len(self.step_sizes) + 1 # otherwise dependent on number of step sizes
elif len(self.step_sizes) > n_reversals:
print(
f'Increasing number of minimum required reversals to the number of step sizes, {len(self.step_sizes)}')
self.n_reversals = len(self.step_sizes)
else:
self.n_reversals = n_reversals
self.finished = False
self.n_pretrials = n_pretrials
self.this_trial_n = -n_pretrials
self.data = []
self.intensities = []
self.reversal_points = []
self.reversal_intensities = []
self.current_direction = 'down'
self.correct_counter = 0
self._next_intensity = self.start_val
self.min_val = min_val
self.max_val = max_val
self.pf_intensities = None # psychometric function, auto set when finished
self.pf_percent_correct = None # psychometric function, auto set when finished
self.pf_responses_per_intensity = None # psychometric function, auto set when finished
def __next__(self):
"""
Is called when iterating trough a sequenceAdvances to next trial and returns it. Updates attributes
this_trial, this_n, and this_index. If the trials have ended this method will raise a StopIteration.
Returns:
(int | float | StopIteration): the intensity for the next trial which is calculated by the
`_next_intensity` method. If the sequence is finished a StopIteration is returned instead.
"""
if not self.finished:
self.this_trial_n += 1 # update pointer for next trial
self.intensities.append(self._next_intensity)
return self._next_intensity
else:
self._psychometric_function() # tally responses to create a psychometric function
raise StopIteration
def __repr__(self):
return self.__dict__.__repr__()
def __str__(self):
return f'Staircase {self.n_up}up-{self.n_down}down, trial {self.this_trial_n},' \
f' {len(self.reversal_intensities)} reversals of {self.n_reversals}'
def add_response(self, result, intensity=None):
"""
Add a True or 1 to indicate a correct/detected trial
or False or 0 to indicate an incorrect/missed trial.
This is essential to advance the staircase to a new intensity level.
Supplying an `intensity` value indicates that you did not use
the recommended intensity in your last trial and the staircase will
replace its recorded value with the one supplied.
"""
if self._next_intensity <= self.min_val: # always record False if at min_val
result = False
else:
result = bool(result)
self.data.append(result)
if intensity is not None:
self.intensities.pop()
self.intensities.append(intensity)
if self.this_trial_n > 0: # we're out of the pretrials
if result: # correct response
if len(self.data) > 1 and self.data[-2] == result:
self.correct_counter += 1 # increment if on a run
else:
self.correct_counter = 1 # or reset
else: # incorrect response
if len(self.data) > 1 and self.data[-2] == result:
self.correct_counter -= 1 # decrement if on a run
else:
self.correct_counter = -1 # or reset
self.calculate_next_intensity()
def calculate_next_intensity(self):
""" Based on current intensity, counter of correct responses, and current direction. """
# TODO: description of how the current intensity is calculated
if not self.reversal_intensities: # no reversals yet
if self.data[-1] is True: # last answer correct
reversal = bool(self.current_direction == 'up') # got it right
self.current_direction = 'down'
else: # got it wrong
reversal = bool(self.current_direction == 'down')
self.current_direction = 'up'
elif self.correct_counter >= self.n_down: # n right, time to go down!
reversal = bool(self.current_direction != 'down')
self.current_direction = 'down'
elif self.correct_counter <= -self.n_up: # n wrong, time to go up!
reversal = bool(self.current_direction != 'up')
self.current_direction = 'up'
else: # same as previous trial
reversal = False
if reversal: # add reversal info
self.reversal_points.append(self.this_trial_n)
self.reversal_intensities.append(self.intensities[-1])
if len(self.reversal_intensities) >= self.n_reversals:
self.finished = True # we're done
# if reversal and self._variable_step: # new step size if necessary
# if beyond the list of step sizes, use the last one
if len(self.reversal_intensities) >= len(self.step_sizes):
self.step_size_current = self.step_sizes[-1]
else:
_sz = len(self.reversal_intensities)
self.step_size_current = self.step_sizes[_sz]
if self.current_direction == 'up':
self.step_size_current *= self.step_up_factor # apply factor for weighted up/down method
if not self.reversal_intensities:
if self.data[-1] == 1:
self._intensity_dec()
else:
self._intensity_inc()
elif self.correct_counter >= self.n_down:
self._intensity_dec() # n right, so going down
elif self.correct_counter <= -self.n_up:
self._intensity_inc() # n wrong, so going up
def _intensity_inc(self):
""" increment the current intensity and reset counter. """
if self.step_type == 'db':
self._next_intensity *= 10.0**(self.step_size_current/20.0)
elif self.step_type == 'log':
self._next_intensity *= 10.0**self.step_size_current
elif self.step_type == 'lin':
self._next_intensity += self.step_size_current
if (self.max_val is not None) and (self._next_intensity > self.max_val):
self._next_intensity = self.max_val # check we haven't gone out of the legal range
self.correct_counter = 0
def _intensity_dec(self):
""" decrement the current intensity and reset counter. """
if self.step_type == 'db':
self._next_intensity /= 10.0**(self.step_size_current/20.0)
if self.step_type == 'log':
self._next_intensity /= 10.0**self.step_size_current
elif self.step_type == 'lin':
self._next_intensity -= self.step_size_current
self.correct_counter = 0
if (self.min_val is not None) and (self._next_intensity < self.min_val):
self._next_intensity = self.min_val # check we haven't gone out of the legal range
def threshold(self, n=0):
"""
Returns the average of the last n reversals.
Arguments:
n (int): number of reversals to average over, if 0 use all reversals `n_reversals` - 1.
Returns:
the arithmetic (if `step_type`==='lin') or geometric mean of the `reversal_intensities`.
"""
if self.finished:
if n == 0 or n > self.n_reversals:
n = int(self.n_reversals) - 1
if self.step_type == 'lin':
return numpy.mean(self.reversal_intensities[-n:])
return numpy.exp(numpy.mean(numpy.log(self.reversal_intensities[-n:])))
return None # still running the staircase
def print_trial_info(self):
""" Convenience method for printing current trial information. s """
print(
f'{self.label} | trial # {self.this_trial_n}: reversals: {len(self.reversal_points)}/{self.n_reversals},'
f' intensity {round(self.intensities[-1],2) if self.intensities else round(self._next_intensity,2)},'
f' going {self.current_direction}, response {self.data[-1] if self.data else None}')
def save_csv(self, filename):
"""
Write a csv text file with the stimulus values in the 1st line and the corresponding responses in the 2nd.
Arguments:
filename (str): the name under which the csv file is saved.
Returns:
(bool): True if saving was successful, False if there are no trials to save.
"""
if self.this_trial_n < 1:
return False # no trials to save
with open(filename, 'w') as f:
raw_intensities = str(self.intensities)
raw_intensities = raw_intensities.replace('[', '').replace(']', '')
f.write(raw_intensities)
f.write('\n')
responses = str(numpy.multiply(self.data, 1)) # convert to 0 / 1
responses = responses.replace('[', '').replace(']', '')
responses = responses.replace(' ', ', ')
f.write(responses)
return True
def plot(self, axis=None, show=True):
"""
Plot the staircase. If called after each trial, one plot is created and updated.
Arguments:
axis (matplotlib.pyplot.Axes): plot axis to draw on, if none a new plot is generated
show (bool): whether to show the plot right after drawing.
"""
if plt is None:
raise ImportError('Plotting requires matplotlib!')
if self.intensities: # plotting only after first response
x = numpy.arange(-self.n_pretrials, len(self.intensities)-self.n_pretrials)
y = numpy.array(self.intensities) # all previously played intensities
responses = numpy.array(self.data)
if axis is None:
fig = plt.figure('stairs') # figure 'stairs' is created or made current
axis = fig.gca()
axis.clear()
axis.plot(x, y)
axis.set_xlim(-self.n_pretrials, max(20, (self.this_trial_n + 15)//10*10))
axis.set_ylim(min(0, min(y)) if self.min_val == -numpy.Inf else self.min_val,
max(y) if self.max_val == numpy.Inf else self.max_val)
# plot green dots at correct/yes responses
axis.scatter(x[responses], y[responses], color='green')
# plot red dots at correct/yes responses
axis.scatter(x[~responses], y[~responses], color='red')
axis.scatter(len(self.intensities)-self.n_pretrials+1, self._next_intensity, color='grey') # current trial
axis.set_ylabel('Dependent variable')
axis.set_xlabel('Trial')
axis.set_title('Staircase')
if self.finished:
axis.hlines(self.threshold(), min(x), max(x), 'r')
plt.draw()
if show:
plt.pause(0.01)
@staticmethod
def close_plot():
""" Closes a staircase plot (if not drawn into a specified axis) - used for plotting after each trial. """
plt.close('stairs')
def _psychometric_function(self):
"""
Create a psychometric function by binning data from a staircase procedure.
Called automatically when staircase is finished. Sets attributes `pf_intensites` (array of intensity values
where each is the center of an intensity bin), `pf_percent_correct` (array of mean percent correct in each bin),
`pf_responses_per_intensity` (array of number of responses contributing to each mean).
"""
intensities = numpy.array(self.intensities)
responses = numpy.array(self.data)
binned_resp = []
binned_intensities = []
n_points = []
intensities = numpy.round(intensities, decimals=8)
unique_intensities = | numpy.unique(intensities) | numpy.unique |
import faulthandler
import math
import os
import unittest
import numpy
faulthandler.enable() # to debug seg faults and timeouts
import cfunits
from cfunits import Units
class UnitsTest(unittest.TestCase):
"""Tests the `Units` class."""
def test_Units___eq__(self):
"""Tests the `___eq__` operator on `Units`."""
self.assertEqual(Units(""), Units(""))
self.assertEqual(Units("18"), Units("18"))
self.assertEqual(Units("1"), Units("1"))
self.assertEqual(Units("m"), Units("m"))
self.assertEqual(Units("m"), Units("metres"))
self.assertEqual(Units("m"), Units("meTRES"))
self.assertEqual(
Units("days since 2000-1-1"), Units("d since 2000-1-1 0:0")
)
self.assertNotEqual(
Units("days since 2000-1-1"), Units("h since 1234-1-1 0:0")
)
self.assertEqual(
Units("days since 2000-1-1"),
Units("d since 2000-1-1 0:0", calendar="gregorian"),
)
self.assertEqual(
Units("days since 2000-1-1"),
Units("d since 2000-1-1 0:0", calendar="standard"),
)
self.assertEqual(Units(calendar="noleap"), Units(calendar="noleap"))
self.assertEqual(Units(calendar="noleap"), Units(calendar="365_day"))
self.assertEqual(Units(calendar="nOLEAP"), Units(calendar="365_dAY"))
self.assertEqual(
Units("days since 2000-1-1", calendar="all_leap"),
Units("d since 2000-1-1 0:0", calendar="366_day"),
)
self.assertNotEqual(
Units("days since 2000-1-1", calendar="all_leap"),
Units("h since 2000-1-1 0:0", calendar="366_day"),
)
self.assertNotEqual(Units(1), Units(1))
self.assertNotEqual(Units(1), Units(2))
self.assertNotEqual(Units(1), Units())
self.assertNotEqual(Units(1), Units(""))
self.assertNotEqual(Units(1), Units(" "))
self.assertNotEqual(Units(1), Units("metre"))
def test_Units_equivalent(self):
"""Tests the `equivalent` method on `Units`."""
self.assertTrue(Units().equivalent(Units()))
self.assertTrue(Units(" ").equivalent(Units()))
self.assertTrue(Units("").equivalent(Units()))
self.assertTrue(Units().equivalent(Units("")))
self.assertTrue(Units().equivalent(Units(" ")))
self.assertTrue(Units("").equivalent(Units("")))
self.assertTrue(Units("").equivalent(Units(" ")))
self.assertTrue(Units("").equivalent(Units("1")))
self.assertTrue(Units("").equivalent(Units("18")))
self.assertTrue(Units("18").equivalent(Units("1")))
self.assertTrue(Units("18").equivalent(Units("18")))
self.assertTrue(Units("1)").equivalent(Units("1")))
self.assertTrue(Units("m").equivalent(Units("m")))
self.assertTrue(Units("meter").equivalent(Units("km")))
self.assertTrue(Units("metre").equivalent(Units("mile")))
self.assertTrue(Units("s").equivalent(Units("h")))
self.assertTrue(Units("s").equivalent(Units("day")))
self.assertTrue(Units("second").equivalent(Units("month")))
self.assertTrue(
Units(calendar="noleap").equivalent(Units(calendar="noleap"))
)
self.assertTrue(
Units(calendar="noleap").equivalent(Units(calendar="365_day"))
)
self.assertTrue(
Units(calendar="nOLEAP").equivalent(Units(calendar="365_dAY"))
)
self.assertTrue(
Units("days since 2000-1-1").equivalent(
Units("d since 2000-1-1 0:0")
)
)
self.assertTrue(
Units("days since 2000-1-1").equivalent(
Units("h since 1234-1-1 0:0")
)
)
self.assertTrue(
Units("days since 2000-1-1").equivalent(
Units("d since 2000-1-1 0:0", calendar="gregorian")
)
)
self.assertTrue(
Units("days since 2000-1-1").equivalent(
Units("h since 1234-1-1 0:0", calendar="standard")
)
)
self.assertTrue(
Units("days since 2000-1-1", calendar="all_leap").equivalent(
Units("d since 2000-1-1 0:0", calendar="366_day")
)
)
self.assertTrue(
Units("days since 2000-1-1", calendar="all_leap").equivalent(
Units("h since 1234-1-1 0:0", calendar="366_day")
)
)
u = Units("days since 2000-02-02", calendar="standard")
v = Units("months since 2000-02-02", calendar="standard")
self.assertNotEqual(u, v)
u = Units("days since 2000-02-02", calendar="standard")
v = Units("months since 2000-02-02", calendar="gregorian")
self.assertNotEqual(u, v)
self.assertFalse(Units(1).equivalent(Units(1)))
self.assertFalse(Units().equivalent(Units(1)))
self.assertFalse(Units(2).equivalent(Units(1)))
self.assertFalse(Units("").equivalent(Units(1)))
self.assertFalse(Units(" ").equivalent(Units(1)))
self.assertFalse(Units("1").equivalent(Units(1)))
def test_Units_conform(self):
"""Tests the `conform` class method on `Units`."""
self.assertEqual(Units.conform(0.5, Units("km"), Units("m")), 500)
self.assertEqual(
Units.conform(360, Units("second"), Units("minute")), 6
)
x = Units.conform([360], Units("second"), Units("minute"))
self.assertIsInstance(x, numpy.ndarray)
self.assertTrue(numpy.allclose(x, 6))
x = Units.conform((360, 720), Units("second"), Units("minute"))
self.assertIsInstance(x, numpy.ndarray)
self.assertTrue(numpy.allclose(x, [6, 12]))
x = Units.conform([360.0, 720.0], Units("second"), Units("minute"))
self.assertIsInstance(x, numpy.ndarray)
self.assertTrue( | numpy.allclose(x, [6, 12]) | numpy.allclose |
"""
Hidden Markov Tree model
"""
from abc import ABCMeta
from collections import namedtuple
import os
import scipy
from config import RES_DIR, CHROM_SIZES
from data_provider import SeqLoader
from hmm.HMMModel import _ContinuousEmission
from hmm.bwiter import bw_iter, IteratorCondition
__author__ = 'eranroz'
import numpy as np
class HMTModel(object):
"""
base model for HMT
see Crouse 1997 and Durand 2013
"""
__metaclass__ = ABCMeta
MIN_STD = 0.1
def __init__(self, state_transition, mean_vars, emission_density=scipy.stats.norm):
"""
Initializes a new HMT model.
@param state_transition: state transition matrix.
with rows - source state, cols - target state.
0 state assumed to be the begin state (pi - distrbution for root of the tree)
@param mean_vars: matrix with rows=num of states and cols =2,
where the first column is mean and second is variance
"""
self.state_transition = state_transition
self.mean_vars = mean_vars
self.emission_density = emission_density
self.emission = _ContinuousEmission(mean_vars, emission_density)
self.min_alpha = None
def num_states(self):
"""
Get number of states in the model
"""
return self.state_transition.shape[0]
def level_emission(self, level):
"""
Emission for level. override it to assign different emissions for different levels
@param level: level where 0 is the root
@return: a emission matrix (indexable object) with rows as states and columns as values for emission
"""
return self.emission
def maximize(self, sequence_tree, ud_output):
"""
Maximization step for in Upward-Downward algorithm (EM)
@param sequence_tree symbol sequence
@param ud_output results of upward downward (scaling version)
"""
self._maximize_emission(sequence_tree, ud_output.state_p)
self.state_transition[0, 1:] = ud_output.state_p[-1]
self.state_transition[1:, 1:] *= ud_output.transition_stat
#normalize
self.state_transition /= np.sum(self.state_transition, 1)[:, None]
if self.min_alpha is not None:
n_states = self.state_transition.shape[0]-1 # minus begin/root state
diagonal_selector = np.eye(n_states, dtype='bool')
self_transitions = self.state_transition[1:, 1:][diagonal_selector]
n_self_transitions = np.maximum(self.min_alpha, self_transitions)
# reduce the diff from the rest of transitions equally
self.state_transition[1:, 1:][~diagonal_selector] -= (n_self_transitions-self_transitions)/(n_states-1)
self.state_transition[1:, 1:][diagonal_selector] = n_self_transitions
print('State transition')
print(self.state_transition)
def _maximize_emission(self, sequence_tree, gammas):
n_states = self.num_states() - 1
n_levels = len(sequence_tree)
means_levels = np.zeros((n_levels, n_states))
vars_levels = np.zeros((n_levels, n_states))
state_norm_levels = np.zeros((n_levels, n_states))
scale_level = 0
for gamma, seq in zip(gammas, sequence_tree):
state_norm = np.sum(gamma, 0)
mu = np.sum(gamma * seq[:, None], 0) / state_norm
sym_min_mu = np.power(seq[:, None] - mu, 2)
std = np.sum(gamma * sym_min_mu, 0) / state_norm
state_norm_levels[scale_level, :] = state_norm
vars_levels[scale_level, :] = np.sqrt(std)
means_levels[scale_level, :] = mu
scale_level += 1
state_norm_levels = state_norm_levels / np.sum(state_norm_levels, 0)
state_means = np.sum(means_levels * state_norm_levels, 0)
state_vars = np.maximum(HMTModel.MIN_STD, np.sum(vars_levels * state_norm_levels, 0))
self.mean_vars = np.column_stack([state_means, state_vars])
self.emission = _ContinuousEmission(self.mean_vars)
print(self.emission)
def viterbi(self, sequence_tree):
"""
Viterbi algorithm based on Durand 2013 and in log space
@param sequence_tree: tree-like array, where sequence[0]=scale 1, sequence[1]=scale 2 etc...
@return: most probable state for each node
"""
# upward
n_states = self.state_transition.shape[0] - 1 # the begin is fake
transition = np.log(self.state_transition[1:, 1:])
p_u = []
wave_lvl_iterator = iter(sequence_tree)
# initialization
# leaves
leaves = next(wave_lvl_iterator)
scale_level = len(sequence_tree)
emission = self.level_emission(scale_level)
curr_b_u_tree = emission[:, leaves]
curr_b_u_tree = np.log(curr_b_u_tree)
p_u.append(curr_b_u_tree)
back_map = []
for lvl in wave_lvl_iterator:
scale_level -= 1
emission = self.level_emission(scale_level)
prev_up = np.array([np.max(transition[state, :]+p_u[-1], 1) for state in np.arange(0, n_states)]).T
back_map.append(np.array([np.argmax(transition[state, :]+p_u[-1], 1) for state in np.arange(0, n_states)]).T)
curr_b_u_tree = (prev_up[::2, :]+prev_up[1::2, :])+emission[:, lvl]
p_u.append(curr_b_u_tree)
p = np.max(p_u[-1][0]+np.log(self.state_transition[0, 1:]))
print('Log likelihood', p)
probable_tree = [np.argmax(p_u[-1][0] + np.log(self.state_transition[0, 1:]))]
# maximum "downward"
for lvl_max in back_map[::-1]:
likely_parent = probable_tree[-1]
likely_parent = np.array([likely_parent, likely_parent]).T.reshape(lvl_max.shape[0])
probable_tree.append(lvl_max[np.arange(0, lvl_max.shape[0]), likely_parent])
return probable_tree
def upward_downward(self, sequence_tree, iterations=3):
"""
upward-downward algorithm/EM
@param sequence_tree: tree-like array, where sequence[0]=scale 1, sequence[1]=scale 2 etc...
@param iterations
Remarks:
* implementation based on scaled version, see Durand & Goncalves
* you may use dwt(prepare_sequence(seq)) to get the wavelet coefficients
"""
res = None
for em_iteration in range(1, iterations):
res = self.forward_backward(sequence_tree)
self.maximize(sequence_tree, res)
print(em_iteration, 'P:', res.model_p)
print(self.state_transition)
print(self.mean_vars)
return res.state_p[0]
def forward_backward(self, sequence_tree):
"""
Actual implementation for upward downward - calculates the likelihood for each node in the tree
@param sequence_tree:
@return:
"""
n_states = self.state_transition.shape[0] - 1 # the begin is fake
transition = self.state_transition[1:, 1:]
# == initial distribution of hidden state (Algorithm 3 in Durand) ==
initial_dist = np.zeros((len(sequence_tree), n_states), order='F') # rows - tree levels, cols - state
init_iterator = np.nditer(initial_dist[::-1], op_flags=['writeonly'], flags=['external_loop'], order='C')
next(init_iterator)[...] = self.state_transition[0, 1:]
for _ in sequence_tree[:0:-1]:
next_lvl = np.dot(init_iterator, transition)
next(init_iterator)[...] = next_lvl
# end of algorithm 3
# conditional upward algorithm (Algorithm 4 in Durand)
b_u_tree = []
b_up_tree = []
init_iterator = np.nditer(initial_dist, op_flags=['readonly'], flags=['external_loop'], order='C')
wave_lvl_iterator = iter(sequence_tree)
# initialization
# leaves
leaves = next(wave_lvl_iterator)
emission = self.level_emission(len(sequence_tree))
#curr_b_u_tree = np.array([emission[:, w] for w in leaves]) * next(init_iterator)
curr_b_u_tree = emission[:, leaves] * next(init_iterator)
# normalize
curr_b_u_tree = curr_b_u_tree / np.sum(curr_b_u_tree, 1)[:, None]
b_u_tree.append(curr_b_u_tree)
curr_b_u_tree = np.dot(curr_b_u_tree / init_iterator, transition.T)
b_up_tree.append(curr_b_u_tree)
lop_u = [np.zeros(len(sequence_tree[0]))]
# induction
m_u = []
scale_level = len(sequence_tree)
for lvl, lvl_scale in zip(wave_lvl_iterator, init_iterator):
scale_level -= 1
emission = self.level_emission(scale_level)
prev_up = b_up_tree[-1]
prev_up = np.product(np.array([prev_up[::2], prev_up[1::2]]), 0)
lvl_emis = emission[:, lvl]
p_of_u = lvl_emis * prev_up * lvl_scale
curr_mu = np.sum(p_of_u, 1)
m_u.append(curr_mu)
prev_lu = np.sum(np.array([lop_u[-1][::2], lop_u[-1][1::2]]), 0)
lop_u.append(np.log(curr_mu) + prev_lu)
curr_b_u_tree = p_of_u / curr_mu[:, None]
b_u_tree.append(curr_b_u_tree)
if len(lvl) > 1: # except at root node
curr_b_u_tree = np.dot(curr_b_u_tree / lvl_scale, transition.T) # or lvl_scale outside
b_up_tree.append(curr_b_u_tree)
# end of upward
# downward (algorithm 5)
#intiation
alphas = [np.ones(n_states)]
prev_alpha = np.array([np.ones(n_states)])
b_u_tree_iterator = iter(b_u_tree[::-1])
b_up_tree_iterator = iter(b_up_tree[::-1])
init_iterator = np.nditer(initial_dist[::-1], op_flags=['readonly'], flags=['external_loop'], order='C')
next(init_iterator)
transition_statistic = np.zeros((n_states, n_states))
cur_stat = np.array([[0, 0]])
for bt, scale, b_up in zip(b_u_tree_iterator, init_iterator, b_up_tree_iterator):
transition_statistic += np.dot(bt.T, cur_stat) / scale
a_p = np.array([prev_alpha, prev_alpha]).T.reshape(n_states, len(prev_alpha) * 2).T
b_p = np.array([bt, bt]).T.reshape(n_states, len(bt) * 2).T
cur_stat = a_p * b_p / b_up
prev_alpha = np.dot(cur_stat, transition) / scale
alphas.append(prev_alpha)
# M step
#collecting statistics (expectations)
state_p = [] # likelihood for each level in the tree
for aa, bb, ww in zip(alphas, b_u_tree[::-1], sequence_tree[::-1]):
gamma = aa * bb
state_p.insert(0, gamma)
ud_result = namedtuple('UDResult', 'model_p state_p transition_stat')
return ud_result(lop_u[-1], state_p, transition_statistic)
def dwt(signal, h=np.array([1.0 / 2, -1.0 / 2]), g=np.array([1.0 / 2, 1.0 / 2])):
"""
Simple discrete wavelet transform.
for good reference: http://www.mathworks.com/help/wavelet/ref/dwt.html
@param signal: signal to create dwt for. the signal must be log2(signal)%1=0
@param h: high pass filter (for details space)
@param g: low pass filter (for approximation space)
@return: zip(scaling arrays, wavelet arrays)
"""
scaling_coefficients = []
wavelets_coefficients = []
approx = signal
while len(approx) != 1:
details = np.convolve(approx, h)[h.size - 1::2]
wavelets_coefficients.append(details)
approx = np.convolve(approx, g)[g.size - 1::2]
scaling_coefficients.append(approx)
return scaling_coefficients, wavelets_coefficients
def idwt(approx, wavelets, h=np.array([1.0 / np.sqrt(2), -1.0 / np.sqrt(2)]),
g=np.array([1.0 / np.sqrt(2), 1.0 / np.sqrt(2)])):
"""
Simple inverse discrete wavelet transform.
for good reference: http://www.mathworks.com/help/wavelet/ref/dwt.html
@param approx: approximation of signal at low resolution
@param h: high pass filter (for details space)
@param g: low pass filter (for approximation space)
@return: recovered signal
"""
wave_level = iter(wavelets[::-1])
h, g = g[::-1], h[::-1]
recovered = approx
for wave in wave_level:
#upsample
recovered = np.column_stack([recovered, np.zeros(recovered.size)]).flatten()
wave_up = np.column_stack([wave, np.zeros(wave.size)]).flatten()
recovered = np.convolve(recovered, h)[:-(h.size - 1)]
recovered = recovered + | np.convolve(wave_up, g) | numpy.convolve |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
data = np.loadtxt("data.dat")
temp = np.zeros((34, 34))
df = pd.DataFrame(data, columns=["x", "y", "t"])
for index, row in df.iterrows():
print(row.x, row.y, row.t)
temp[int(row.x) - 1, int(row.y) - 1] = row.t
# temp[row.x - 1, row.y - 1] = row.t
sns.heatmap(temp.T, annot=False)
plt.xlabel("X", fontsize=18)
plt.ylabel("Y", fontsize=18)
plt.title("Temperature profile for the 2D lattice (Q4)", fontsize=20)
plt.xticks(np.arange(0.5, 34.5, 5), np.arange(1, 35, 5))
plt.yticks( | np.arange(0.5, 34.5, 5) | numpy.arange |
from __future__ import division, print_function, absolute_import
import numpy as np
from .monomer_aux import Xi, dXi, d2Xi
from .monomer_aux import dkHS, d2kHS, d3kHS
from .a1sB_monomer import da1B_eval, d2a1B_eval, d3a1B_eval
from .aHS_monomer import ahs, dahs_deta, d2ahs_deta
from .a2m_monomer import a2m, da2m_deta, d2a2m_deta
from .a2m_monomer import da2m_new_deta, d2a2m_new_deta, d3a2m_new_deta
from .a3m_monomer import a3m, da3m_deta, d2a3m_deta
from .gdHS_chain import gdHS, dgdHS_drho, d2gdHS_drho
from .g1sigma_chain import g1sigma, dg1sigma_drho, d2g1sigma_drho
from .g2sigma_chain import g2MCA, dg2MCA_drho, d2g2MCA_drho
from .g2sigma_chain import gammac, dgammac_deta, d2gammac_deta
from .lngmie_chain import lngmie, dlngmie_drho, d2lngmie_drho
from .association_aux import Xass_solver, Iab, dIab_drho
from .association_aux import d2Iab_drho, dXass_drho, d2Xass_drho
from .polarGV import Apol, dApol_drho, d2Apol_drho
def ares(self, rho, temp_aux, Xass0=None):
beta, beta2, beta3, dia, dia3, x0, x03, x0_a1, x0_a2 = temp_aux[:9]
x0_a12, x0_a22, I_lambdas, J_lambdas, beps, beps2, tetha = temp_aux[9:16]
x0_vector, cte_g1s, cte_g2s = temp_aux[16:19]
eta, deta = self.eta_bh(rho, dia3)
nsigma = self.eta_sigma(rho)
# Parameters needed for evaluating the helmothlz contributions
a1sb_a1, a1sb_a2 = da1B_eval(eta, I_lambdas, J_lambdas, self.lambdas,
self.cctes, self.eps)
dkhs = dkHS(eta)
xi = Xi(x03, nsigma, self.f1, self.f2, self.f3)
cte_a2m = self.cte_a2m
eps3 = self.eps3
# Monomer contribution
ahs_eval = ahs(eta)
a1m_eval = self.c*np.matmul(a1sb_a1, x0_a1)
suma_a2 = np.matmul(a1sb_a2, x0_a2)
a2m_eval = a2m(suma_a2[0], dkhs[0], xi, cte_a2m)
a3m_eval = a3m(x03, nsigma, eps3, self.f4, self.f5, self.f6)
a_mono = ahs_eval + beta*a1m_eval[0] + beta2*a2m_eval + beta3*a3m_eval
a_mono *= self.ms
# chain contribution calculation
ghs = gdHS(x0_vector, eta)
# g1sigma
suma_g1 = self.c * np.dot(a1sb_a1[0], x0_a12)
g1s = g1sigma(rho, suma_g1, a1m_eval[1], deta, cte_g1s)
# g2sigma
gc = gammac(x0, nsigma, self.alpha, tetha)
da2m_new = da2m_new_deta(suma_a2, dkhs, cte_a2m)
suma_g2 = self.c2*np.dot(a1sb_a2[0], x0_a22)
g2m = g2MCA(rho, suma_g2, da2m_new, dkhs[0], self.eps, cte_g2s, deta)
g2s = (1+gc)*g2m
lng = lngmie(ghs, g1s, g2s, beps, beps2)
a_chain = - (self.ms - 1. + self.ring*eta)*lng
# Total Helmolthz
a = a_mono + a_chain
if self.assoc_bool:
if Xass0 is None:
Xass = 0.2 * np.ones(self.nsites)
else:
Xass = 1. * Xass0
Fab = temp_aux[19]
Kab = temp_aux[20]
iab = Iab(Kab, eta)
Dab = self.sigma3 * Fab * iab
Dabij = np.zeros([self.nsites, self.nsites])
Dabij[self.indexabij] = Dab
KIJ = rho * (self.DIJ*Dabij)
Xass = Xass_solver(self.nsites, KIJ, self.diagasso, Xass0)
a += np.dot(self.S, (np.log(Xass) - Xass/2 + 1/2))
else:
Xass = None
if self.polar_bool:
epsa = temp_aux[21]
apolar = Apol(rho, eta, epsa, self.anij, self.bnij,
self.cnijk, self.mupolad2, self.npol, self.sigma3)
a += apolar
return a, Xass
def dares_drho(self, rho, temp_aux, Xass0=None):
beta, beta2, beta3, dia, dia3, x0, x03, x0_a1, x0_a2 = temp_aux[:9]
x0_a12, x0_a22, I_lambdas, J_lambdas, beps, beps2, tetha = temp_aux[9:16]
x0_vector, cte_g1s, cte_g2s = temp_aux[16:19]
eta, deta = self.eta_bh(rho, dia3)
nsigma = self.eta_sigma(rho)
drho = np.array([1., deta, deta**2])
# Parameters needed for evaluating the helmothlz contributions
a1sb_a1, a1sb_a2 = d2a1B_eval(eta, I_lambdas, J_lambdas, self.lambdas,
self.cctes, self.eps)
dkhs = d2kHS(eta)
dxi = dXi(x03, nsigma, self.f1, self.f2, self.f3)
cte_a2m = self.cte_a2m
eps3 = self.eps3
# monomer evaluation
ahs_eval = dahs_deta(eta)
a1m_eval = self.c*np.matmul(a1sb_a1, x0_a1)
suma_a2 = np.matmul(a1sb_a2, x0_a2)
a2m_eval = da2m_deta(suma_a2[:2], dkhs[:2], dxi, cte_a2m)
a3m_eval = da3m_deta(x03, nsigma, eps3, self.f4, self.f5, self.f6)
a_mono = ahs_eval + beta*a1m_eval[:2] + beta2*a2m_eval + beta3*a3m_eval
a_mono *= self.ms * drho[:2]
# chain contribution calculation
dghs = dgdHS_drho(x0_vector, eta, drho)
# g1sigma
suma_g1 = self.c * np.dot(a1sb_a1[:2], x0_a12)
suma_g1 *= drho[:2]
d2a1m_drho = a1m_eval[1:]*drho[1:]
dg1s = dg1sigma_drho(rho, suma_g1, d2a1m_drho, cte_g1s)
# g2sigma
dgc = dgammac_deta(x03, nsigma, self.alpha, tetha)
dgc *= drho[:2]
da2m_new = d2a2m_new_deta(suma_a2, dkhs, cte_a2m)
da2m_new_drho = da2m_new*drho[1:]
suma_g2 = self.c2*np.dot(a1sb_a2[:2], x0_a22)
suma_g2 *= drho[:2]
dkhs_drho = dkhs[:2]*drho[:2]
dg2m = dg2MCA_drho(rho, suma_g2, da2m_new_drho, dkhs_drho, self.eps,
cte_g2s)
dg2s = dg2m * (1. + dgc[0])
dg2s[1] += dgc[1] * dg2m[0]
dlng = dlngmie_drho(dghs, dg1s, dg2s, beps, beps2)
a_chain = - (self.ms - 1. + self.ring*eta)*dlng
# Total helmolthz
a = a_mono + a_chain
if self.assoc_bool:
if Xass0 is None:
Xass = 0.2 * np.ones(self.nsites)
else:
Xass = 1. * Xass0
Fab = temp_aux[19]
Kab = temp_aux[20]
iab, diab = dIab_drho(Kab, eta, deta)
# Fab = np.exp(beta * self.eABij) - 1
Dab = self.sigma3 * Fab * iab
dDab = self.sigma3 * Fab * diab
Dabij = np.zeros([self.nsites, self.nsites])
dDabij_drho = np.zeros([self.nsites, self.nsites])
Dabij[self.indexabij] = Dab
dDabij_drho[self.indexabij] = dDab
KIJ = rho * (self.DIJ*Dabij)
Xass = Xass_solver(self.nsites, KIJ, self.diagasso, Xass0)
CIJ = rho * np.tile(Xass**2, (self.nsites, 1)).T * Dabij * self.DIJ
CIJ[self.diagasso] += 1.
dXass = dXass_drho(rho, Xass, self.DIJ, Dabij, dDabij_drho, CIJ)
a[0] += np.dot(self.S, (np.log(Xass) - Xass/2 + 1/2))
a[1] += np.dot(self.S, (1/Xass - 1/2) * dXass)
else:
Xass = None
if self.polar_bool:
epsa = temp_aux[21]
dapolar = dApol_drho(rho, eta, deta, epsa, self.anij,
self.bnij, self.cnijk, self.mupolad2,
self.npol, self.sigma3)
a += dapolar
return a, Xass
def d2ares_drho(self, rho, temp_aux, Xass0=None):
beta, beta2, beta3, dia, dia3, x0, x03, x0_a1, x0_a2 = temp_aux[:9]
x0_a12, x0_a22, I_lambdas, J_lambdas, beps, beps2, tetha = temp_aux[9:16]
x0_vector, cte_g1s, cte_g2s = temp_aux[16:19]
eta, deta = self.eta_bh(rho, dia3)
nsigma = self.eta_sigma(rho)
drho = | np.array([1., deta, deta**2, deta**3]) | numpy.array |
import pandas as pd
import numpy as np
import os
from scipy.spatial import distance
import networkx as nx
import math
import scipy.sparse as sp
from glob import glob
import argparse
import time
parser = argparse.ArgumentParser(description='Main Entrance of MP_MIM_RESEPT')
parser.add_argument('--sampleName', type=str, default='151507')
parser.add_argument('--MP-k-num', type=int, default=90, help='number of k_num in KNN graph of message passing (default: 90)')
parser.add_argument('--MP-l-num', type=int, default=15, help='number of layer_num in message passing (default: 15)')
args = parser.parse_args()
####KNN
# knn_graph_edgelist
def calculateKNNgraphDistanceWeighted(featureMatrix, distanceType, k):
edgeListWeighted = []
for i in np.arange(featureMatrix.shape[0]):
tmp = featureMatrix[i, :].reshape(1, -1)
distMat = distance.cdist(tmp, featureMatrix, distanceType)
res = distMat.argsort()[:k + 1]
tmpdist = distMat[0, res[0][1:k + 1]]
boundary = np.mean(tmpdist) + np.std(tmpdist)
for j in np.arange(1, k + 1):
if distMat[0, res[0][j]] <= boundary and i != res[0][j] :
edgeListWeighted.append((i, res[0][j], 1))
return edgeListWeighted
# generate_adj_nx_matirx
def generate_adj_nx_weighted_adj(featureMatrix, distanceType, k):
edgeList = calculateKNNgraphDistanceWeighted(featureMatrix, distanceType, k)
nodes = range(0,featureMatrix.shape[0])
Gtmp = nx.Graph()
Gtmp.add_nodes_from(nodes)
Gtmp.add_weighted_edges_from(edgeList)
adj = nx.adjacency_matrix(Gtmp)
adj_knn_by_feature = np.array(adj.todense())
return adj_knn_by_feature
# generate_self_loop_adj
def preprocess_graph_self_loop(adj):
adj = sp.coo_matrix(adj)
adj_ = adj + sp.eye(adj.shape[0])
adj_ = adj_.A
return adj_
####MP
# attention_ave
def gat_forward_att_ave(adj, Wh):
attention_ave = adj
attention_ave_par = attention_ave.sum(axis=1, keepdims=True)
attention_ave_final = attention_ave/attention_ave_par
h_prime = np.dot(attention_ave_final, Wh)
return h_prime
# attention_dis
def softmax(X):
X_exp = np.exp(X)
partition = X_exp.sum(axis=1, keepdims=True)
return X_exp/partition
def _prepare_euclidean_attentional_mechanism_input(Wh):
distMat = distance.cdist(Wh, Wh, 'euclidean')
return distMat
def gat_forward_euclidean(adj, Wh):
e = _prepare_euclidean_attentional_mechanism_input(Wh)
zero_vec = -9e15*np.ones_like(e)
attention = np.where(adj > 0, e, zero_vec)
attention = softmax(attention)
h_prime = np.dot(attention, Wh)
return h_prime
# layer_loop_att_ave
def forward_basic_gcn_multi_layer(adj, Wh, layer_num):
hidden = Wh
for num in range(layer_num):
h = gat_forward_att_ave(adj , hidden)
hidden = h
#print(num)
return hidden
# layer_loop_att_euc
def forward_dis_gcn_multi_layer(adj, Wh, layer_num):
hidden = Wh
for num in range(layer_num):
h = gat_forward_euclidean(adj , hidden)
hidden = h
#print(num)
return hidden
####MI_GC
# MI
def Moran_I(multi_hop_weight_mat, feature, MI_type='normal'):
if MI_type == 'normal':
w = multi_hop_weight_mat
y = feature
n = len(y)
z = y - y.mean()
z2ss = (z * z).sum()
s0 = np.sum(w)
zl = np.dot(w , z)
inum = (z * zl).sum()
MI = n / s0 * inum / z2ss
if MI_type == 'row_normalizaiton':
WR_temp = multi_hop_weight_mat
WR = np.zeros((WR_temp.shape[0],WR_temp.shape[1]))
each_row_sum_list=[]
for i in range(WR_temp.shape[0]):
each_row_sum_list.append(np.sum(WR_temp[i,:]))
for i in range(WR_temp.shape[0]):
for j in range(WR_temp.shape[1]):
if WR_temp[i,j] != 0:
WR[i,j] = WR_temp[i,j]/each_row_sum_list[i]
w = WR
y = feature
n = len(y)
z = y - y.mean()
z2ss = (z * z).sum()
s0 = np.sum(w)
zl = np.dot(w , z)
inum = (z * zl).sum()
MI = n / s0 * inum / z2ss
return MI
# GC
def GC_related(multi_hop_weight_mat, feature, GC_type='normal'):
if GC_type == 'normal':
w = multi_hop_weight_mat
y = np.asarray(feature).flatten()
n = len(y)
s0 = np.sum(w)
yd = y - y.mean()
yss = sum(yd * yd)
den = yss * s0 * 2.0
_focal_ix, _neighbor_ix = w.nonzero()
_weights = csr_matrix(w).data
num = (_weights * ((y[_focal_ix] - y[_neighbor_ix])**2)).sum()
a = (n - 1) * num
GC = a / den
if GC > 1:
GC_related = GC - 1
if GC < 1:
GC_related = 1 - GC
if GC == 1:
GC_related = 0
if GC_type == 'row_normalizaiton':
WR_temp = multi_hop_weight_mat
WR = np.zeros((WR_temp.shape[0],WR_temp.shape[1]))
each_row_sum_list=[]
for i in range(WR_temp.shape[0]):
each_row_sum_list.append(np.sum(WR_temp[i,:]))
for i in range(WR_temp.shape[0]):
for j in range(WR_temp.shape[1]):
if WR_temp[i,j] != 0:
WR[i,j] = WR_temp[i,j]/each_row_sum_list[i]
w = WR
y = np.asarray(feature).flatten()
n = len(y)
s0 = np.sum(w)
yd = y - y.mean()
yss = sum(yd * yd)
den = yss * s0 * 2.0
_focal_ix, _neighbor_ix = w.nonzero()
_weights = csr_matrix(w).data
num = (_weights * ((y[_focal_ix] - y[_neighbor_ix])**2)).sum()
a = (n - 1) * num
GC = a / den
if GC > 1:
GC_related = GC - 1
if GC < 1:
GC_related = 1 - GC
if GC == 1:
GC_related = 0
return GC_related
# spatial_adj_knn
def calculateKNNDistanceWeighted_spatial_autocor(featureMatrix, distanceType, k):
edgeListWeighted = []
for i in np.arange(featureMatrix.shape[0]):
tmp = featureMatrix[i, :].reshape(1, -1)
distMat = distance.cdist(tmp, featureMatrix, distanceType)
res = distMat.argsort()[:k + 1]
for j in np.arange(1, k + 1):
edgeListWeighted.append((i, res[0][j], 1))
return edgeListWeighted
# generate_adj_nx_matirx
def generate_spatial_adj_nx_weighted_based_on_coordinate(featureMatrix, distanceType, k):
edgeList = calculateKNNDistanceWeighted_spatial_autocor(featureMatrix, distanceType, k)
nodes = range(0,featureMatrix.shape[0])
Gtmp = nx.Graph()
Gtmp.add_nodes_from(nodes)
Gtmp.add_weighted_edges_from(edgeList)
adj = nx.adjacency_matrix(Gtmp)
adj_knn_by_coordinate = np.array(adj.todense())
return adj_knn_by_coordinate
# spatial_adj_distance
def MI_spatial_adj_matrix(coordinateMatrix, hop_num=1, distanceType='cityblock'):
distMat = distance.cdist(coordinateMatrix, coordinateMatrix, distanceType)
multi_hop_weight_mat = np.zeros((distMat.shape[0] , distMat.shape[1]))
if distanceType == 'euclidean':
if hop_num == 1:
for i in range(distMat.shape[0]):
for j in range(distMat.shape[1]):
if distMat[i][j] <= math.sqrt(2) and distMat[i][j] > 0:
multi_hop_weight_mat[i][j] = 1
return multi_hop_weight_mat
if __name__ == '__main__':
########RESEPT
####time_computing
start_time = time.time()
print("MP_MIM_RESEPT. Start Time: %s seconds" %
(start_time))
####parameter_set_initial
PEalphaList = ['0.1','0.2','0.3', '0.5', '1.0', '1.2', '1.5','2.0']
zdimList = ['3','10', '16','32', '64', '128', '256']
sample = args.sampleName
k_num_distance_att = args.MP_k_num
layer_num_distance_att = args.MP_l_num
####sample_list
sample_list = [ '151507','151508', '151509', '151510', '151669', '151670', '151671', '151672', '151673', '151674', '151675', '151676','18-64','2-5', '2-8', 'T4857']
letter_list = [ 'a','b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l','m', 'n', 'o', 'p']
count_init = sample_list.index(sample)
count = 56*count_init
letter = letter_list[count_init]
embedding_MIrow_max_list = []
embedding_name_list = []
####current_os
meta_folder_path = os.path.abspath('./meta_data_folder/metaData_brain_16_coords')
embedding_folder_path = os.path.abspath('./RESEPT_embedding_folder')
embedding_in_RESEPT_folder = "RESEPT_MP_embedding_"+sample+"/"
if not os.path.exists(embedding_in_RESEPT_folder):
os.makedirs(embedding_in_RESEPT_folder)
####MP_parameter_set
k_num_distance_att_list = [10,20,30,40,50,60,70,80,90]
layer_num_distance_att_list = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
####loop_part
for i in range(len(PEalphaList)):
for j in range((len(zdimList))):
####read_embedding
count = count + 1
embedding_root_path = '/'+sample+'_embedding_raw/'+letter+'_'+str(count)+'_outputdir-3S-'+sample+'_raw_EM1_resolution0.3_euclidean_dummy_add_PEalpha'+str(PEalphaList[i])+'_k6_NA_zdim'+str(zdimList[j])+'/'+sample+'_raw_6_euclidean_NA_dummy_add_'+str(PEalphaList[i])+'_intersect_160_GridEx19_embedding.csv'
embedding_df = pd.read_csv(embedding_folder_path+embedding_root_path,index_col=0)
embedding_celllist = embedding_df.index.tolist()
graph_embedding_name = sample+'_raw_res0.3_euclidean_NA_dummy_add_PEalpha'+str(PEalphaList[i])+'_k6_NA_zdim'+str(zdimList[j])+'_gat_self_loop_euc_graphK'+str(k_num_distance_att)+'_layer'+str(layer_num_distance_att)
embedding_name_list.append(graph_embedding_name)
native_embedding_whole = embedding_df[['embedding0','embedding1','embedding2']].values
####embedding_knn_graph
knn_graph_k_num = k_num_distance_att
l_num = layer_num_distance_att
adj = generate_adj_nx_weighted_adj(native_embedding_whole, distanceType='euclidean', k=knn_graph_k_num)
adj_self_loop = preprocess_graph_self_loop(adj)
graph_embedding_whole = forward_dis_gcn_multi_layer(adj_self_loop, native_embedding_whole, l_num)
graph_embedding_add_barcode_df = pd.DataFrame(graph_embedding_whole, index=embedding_celllist, columns=['embedding0','embedding1','embedding2'])
graph_embedding_add_barcode_df.to_csv(embedding_in_RESEPT_folder+sample+'_'+str(count)+'_raw_res0.3_euclidean_NA_dummy_add_PEalpha'+str(PEalphaList[i])+'_k6_NA_zdim'+str(zdimList[j])+'_gat_self_loop_euc_graphK'+str(knn_graph_k_num)+'_layer'+str(l_num)+'_graph_embedding.csv')
graph_embedding_remove_zero_df = graph_embedding_add_barcode_df.loc[~(graph_embedding_add_barcode_df==0).all(axis=1)]
#print(graph_embedding_remove_zero_df)
graph_embedding_remove_zero_whole = graph_embedding_remove_zero_df[['embedding0','embedding1','embedding2']].values
coordinate_graph_embedding_whole_df = pd.read_csv(meta_folder_path+'/'+sample+'_humanBrain_metaData.csv',index_col=0)
coordinate_graph_embedding_remove_zero_df = coordinate_graph_embedding_whole_df.loc[graph_embedding_remove_zero_df.index]
coordinate_graph_embedding_remove_zero_np = coordinate_graph_embedding_remove_zero_df[['array_row','array_col']].values
####MI_spatial_adj
MI_graph_embedding_spatial_adj = MI_spatial_adj_matrix(coordinate_graph_embedding_remove_zero_np, hop_num=1, distanceType='euclidean')
####MI_max
embedding_MIrow_list = []
for dim_num in range(graph_embedding_remove_zero_whole.shape[1]):
embedding_current_MIrow = Moran_I(MI_graph_embedding_spatial_adj, graph_embedding_remove_zero_whole[:,dim_num], 'row_normalizaiton')
embedding_MIrow_list.append(embedding_current_MIrow)
embedding_MIrow_list_np = | np.array(embedding_MIrow_list) | numpy.array |
import glob
from matplotlib.patches import Circle
import os.path
from scipy.io import loadmat
import matplotlib.pyplot as plt
import matplotlib
import math
import numpy as np
from . import preprocessing as LP
from scipy.ndimage import rotate
matplotlib.interactive(False)
def show_minutiae(img, minutiae, ROI=None, fname=None, block=True):
# for the latent or the low quality rolled print
fig, ax = plt.subplots(1)
ax.set_aspect('equal')
R = 10
arrow_len = 15
if ROI is not None:
h, w = ROI.shape
for i in range(h):
for j in range(w):
if ROI[i, j] == 0:
img[i, j] = 255
ax.imshow(img, cmap='gray')
minu_num = len(minutiae)
for i in range(0, minu_num):
xx = minutiae[i][0]
yy = minutiae[i][1]
circ = Circle((xx, yy), R, color='r', fill=False)
ax.add_patch(circ)
ori = -minutiae[i][2]
dx = math.cos(ori) * arrow_len
dy = math.sin(ori) * arrow_len
ax.arrow(xx, yy, dx, dy, head_width=0.05, head_length=0.1, fc='r', ec='r')
plt.show(block=block)
if fname is not None:
fig.savefig(fname, dpi=600)
plt.close()
def show_minutiae_sets(img, minutiae_sets, ROI=None, fname=None, block=True):
# for the latent or the low quality rolled print
fig, ax = plt.subplots(1)
ax.set_aspect('equal')
arrow_len = 15
if ROI is not None:
h, w = ROI.shape
for i in range(h):
for j in range(w):
if ROI[i, j] == 0:
img[i, j] = 255
ax.imshow(img, cmap='gray')
color = ['r', 'b']
R = [10, 8, 6]
for k in range(len(minutiae_sets)):
minutiae = minutiae_sets[k]
minu_num = len(minutiae)
for i in range(0, minu_num):
xx = minutiae[i, 0]
yy = minutiae[i, 1]
circ = Circle((xx, yy), R[k], color=color[k], fill=False)
ax.add_patch(circ)
ori = -minutiae[i, 2]
dx = math.cos(ori) * arrow_len
dy = math.sin(ori) * arrow_len
ax.arrow(xx, yy, dx, dy, head_width=0.05, head_length=0.1, fc=color[k], ec=color[k])
plt.show(block=block)
if fname is not None:
fig.savefig(fname, dpi=600)
plt.close()
def modify_minutiae_cylinder(input_file, output_file, angle=None, processing=None):
cylinder = np.load(input_file)
img = cylinder[:, :, 0]
if processing == 'STFT':
img = LP.STFT(img)
cylinder[:, :, 0] = img
np.save(output_file, cylinder)
def extract_minutiae_cylinder(img_input, minutiae_input, ROI=None, num_ori=12, angle=None, processing=None):
# for the latent or the low quality rolled print
minutiae = minutiae_input.copy()
img = img_input.copy()
if processing == 'STFT':
img = LP.STFT(img)
elif processing == 'contrast':
img = LP.local_constrast_enhancement(img)
elif processing == 'texture':
img = LP.FastCartoonTexture(img)
sigma = 5**2
if ROI is not None:
h, w = ROI.shape
for i in range(h):
for j in range(w):
if ROI[i, j] == 0:
img[i, j] = 255
h, w = ROI.shape
col_sum = np.sum(ROI, axis=0)
ind = [x for x in range(len(col_sum)) if col_sum[x] > 0]
min_x = np.max([np.min(ind) - 32, 0])
max_x = np.min([np.max(ind) + 32, w])
row_sum = np.sum(ROI, axis=1)
ind = [x for x in range(len(row_sum)) if row_sum[x] > 0]
min_y = np.max([ | np.min(ind) | numpy.min |
#!/usr/bin/python
# Author: <NAME>
# Email: <EMAIL>
# Paper: <NAME>, <NAME>, <NAME>, "Improved document image segmentation algorithm using multiresolution morphology," Proc. SPIE 7874, Document Recognition and Retrieval XVIII, 78740D (24 January 2011);
# "<NAME>, <NAME>, <NAME>, <NAME>, <NAME>, “anyOCR: An Open-Source OCR System for Historical Archives”, The 14th IAPR International Conference on Document Analysis and Recognition (ICDAR 2017), Kyoto, Japan, 2017.
# URL - https://www.dfki.de/fileadmin/user_upload/import/9512_ICDAR2017_anyOCR.pdf
import copy
import json
import os
from pathlib import Path
import sys
import math
import click
from PIL import Image
from scipy import ndimage
import numpy as np
import shapely
import ocrolib
from tensorflow.keras.models import load_model
#from keras_segmentation.models.unet import resnet50_unet
from ocrd import Processor
from ocrd_modelfactory import page_from_file
from ocrd_models.ocrd_page import to_xml, AlternativeImageType
from ocrd_utils import (
getLogger,
concat_padded,
MIMETYPE_PAGE,
coordinates_for_segment,
points_from_polygon,
make_file_id,
assert_file_grp_cardinality,
)
from ocrd.decorators import ocrd_cli_options, ocrd_cli_wrap_processor
from ..constants import OCRD_TOOL
TOOL = 'ocrd-anybaseocr-tiseg'
class OcrdAnybaseocrTiseg(Processor):
def __init__(self, *args, **kwargs):
kwargs['ocrd_tool'] = OCRD_TOOL['tools'][TOOL]
kwargs['version'] = OCRD_TOOL['version']
super(OcrdAnybaseocrTiseg, self).__init__(*args, **kwargs)
if hasattr(self, 'output_file_grp') and hasattr(self, 'parameter'):
# processing context
self.setup()
def setup(self):
LOG = getLogger('OcrdAnybaseocrTiseg')
self.model = None
if self.parameter['use_deeplr']:
model_weights = self.resolve_resource(self.parameter['seg_weights'])
#model = resnet50_unet(n_classes=self.parameter['classes'], input_height=self.parameter['height'], input_width=self.parameter['width'])
#model.load_weights(model_weights)
self.model = load_model(model_weights)
LOG.info('Loaded segmentation model')
def process(self):
LOG = getLogger('OcrdAnybaseocrTiseg')
assert_file_grp_cardinality(self.input_file_grp, 1)
assert_file_grp_cardinality(self.output_file_grp, 1)
for input_file in self.input_files:
page_id = input_file.pageId or input_file.ID
pcgts = page_from_file(self.workspace.download_file(input_file))
self.add_metadata(pcgts)
page = pcgts.get_Page()
LOG.info("INPUT FILE %s", input_file.pageId or input_file.ID)
if self.parameter['use_deeplr']:
kwargs = {'feature_filter': 'binarized,deskewed,cropped'}
else:
# _should_ also be deskewed and cropped, but no need to enforce that here
kwargs = {'feature_selector': 'binarized'}
page_image, page_coords, page_image_info = self.workspace.image_from_page(
page, page_id, **kwargs)
self._process_segment(page, page_image, page_coords, page_id, input_file)
file_id = make_file_id(input_file, self.output_file_grp)
pcgts.set_pcGtsId(file_id)
self.workspace.add_file(
ID=file_id,
file_grp=self.output_file_grp,
pageId=input_file.pageId,
mimetype=MIMETYPE_PAGE,
local_filename=os.path.join(self.output_file_grp, file_id + '.xml'),
content=to_xml(pcgts).encode('utf-8'),
)
def _process_segment(self, page, page_image, page_coords, page_id, input_file):
LOG = getLogger('OcrdAnybaseocrTiseg')
if self.model:
I = ocrolib.pil2array(page_image.resize((800, 1024), Image.ANTIALIAS))
I = np.array(I)[np.newaxis, :, :, :]
LOG.info('I shape %s', I.shape)
if len(I.shape)<3:
print('Wrong input shape. Image should have 3 channel')
# get prediction
#out = self.model.predict_segmentation(
# inp=I,
# out_fname="/tmp/out.png"
#)
out = self.model.predict(I)
out = out.reshape((2048, 1600, 3)).argmax(axis=2)
text_part = 255 * np.ones(out.shape, 'B')
text_part[np.where(out==1)] = 0
LOG.info('text: %d percent', 100 * (1 - np.count_nonzero(text_part) / np.prod(out.shape)))
image_part = 255 * np.ones(out.shape, 'B')
image_part[np.where(out==2)] = 0
LOG.info('image: %d percent', 100 * (1 - np.count_nonzero(image_part) / np.prod(out.shape)))
image_part = ocrolib.array2pil(image_part)
text_part = ocrolib.array2pil(text_part)
image_part = image_part.resize(page_image.size, Image.BICUBIC)
text_part = text_part.resize(page_image.size, Image.BICUBIC)
else:
I = ocrolib.pil2array(page_image)
if len(I.shape) > 2:
I = np.mean(I, 2)
I = 1-I/I.max()
rows, cols = I.shape
# Generate Mask and Seed Images
Imask, Iseed = self.pixMorphSequence_mask_seed_fill_holes(I)
# Iseedfill: Union of Mask and Seed Images
Iseedfill = self.pixSeedfillBinary(Imask, Iseed)
# Dilation of Iseedfill
mask = np.ones((3, 3))
Iseedfill = ndimage.binary_dilation(Iseedfill, mask)
# Expansion of Iseedfill to become equal in size of I
Iseedfill = self.expansion(Iseedfill, (rows, cols))
# Write Text and Non-Text images
image_part = np.array(255*(1-I*Iseedfill), dtype='B')
text_part = np.array(255*(1-I*(1-Iseedfill)), dtype='B')
LOG.info('text: %d percent', 100 * (1 - np.count_nonzero(text_part) / np.prod(I.shape)))
LOG.info('image: %d percent', 100 * (1 - | np.count_nonzero(image_part) | numpy.count_nonzero |
import shutil
import util.util as util_
import os
import cv2
import open3d as o3d
import pickle
import numpy as np
from scipy.optimize import linear_sum_assignment
import trimesh
from skimage import measure
import scipy
from sklearn.neighbors import KDTree
from scipy.ndimage.measurements import label
import data_augmentation
from genre.voxelization import voxel
import traceback
from genre.util import util_sph
from scipy import stats
from dm_control.mujoco.engine import Camera
from trajopt.mujoco_utils import add_object_to_mujoco, remove_objects_from_mujoco, get_mesh_list, compute_mujoco_int_transform
mesh_level=0.5
def chamfer_distance(pcd_1, pcd_2):
pcd_tree = KDTree(pcd_2)
nearest_distances_1, _=pcd_tree.query(pcd_1)
pcd_tree = KDTree(pcd_1)
nearest_distances_2, _=pcd_tree.query(pcd_2)
return np.sum(nearest_distances_1)/pcd_1.shape[0]+np.sum(nearest_distances_2)/pcd_2.shape[0]
#return outher shell of voxel shape
def hollow_dense_pointcloud(ptcld):
conv=scipy.ndimage.convolve(ptcld, np.ones((3,3,3)))
ptcld=np.where(conv<27, ptcld, 0)
return ptcld
def compute_xyz(depth_img, camera_params):
""" Compute ordered point cloud from depth image and camera parameters
@param depth_img: a [H x W] numpy array of depth values in meters
@param camera_params: a dictionary with parameters of the camera used
"""
# Compute focal length from camera parameters
if 'fx' in camera_params and 'fy' in camera_params:
fx = camera_params['fx']
fy = camera_params['fy']
else: # simulated data
aspect_ratio = camera_params['img_width'] / camera_params['img_height']
e = 1 / (np.tan(np.radians(camera_params['fov']/2.)))
t = camera_params['near'] / e; b = -t
r = t * aspect_ratio; l = -r
alpha = camera_params['img_width'] / (r-l) # pixels per meter
focal_length = camera_params['near'] * alpha # focal length of virtual camera (frustum camera)
fx = focal_length; fy = focal_length
if 'x_offset' in camera_params and 'y_offset' in camera_params:
x_offset = camera_params['x_offset']
y_offset = camera_params['y_offset']
else: # simulated data
x_offset = camera_params['img_width']/2
y_offset = camera_params['img_height']/2
indices = util_.build_matrix_of_indices(camera_params['img_height'], camera_params['img_width'])
indices[..., 0] = np.flipud(indices[..., 0]) # pixel indices start at top-left corner. for these equations, it starts at bottom-left
z_e = depth_img
x_e = (indices[..., 1] - x_offset) * z_e / fx
y_e = (indices[..., 0] - y_offset) * z_e / fy
xyz_img = np.stack([x_e, y_e, z_e], axis=-1) # Shape: [H x W x 3]
return xyz_img
upsample=1
xmap = np.array([[j for i in range(int(upsample*640))] for j in range(int(upsample*480))])
ymap = np.array([[i for i in range(int(upsample*640))] for j in range(int(upsample*480))])
#make pointcloud from depth image
def make_pointcloud_all_points(depth_image):
cam_scale = 1.0
cam_cx = 320.0
cam_cy = 240.0
camera_params={'fx':579.411255, 'fy':579.411255, 'img_width':640, 'img_height': 480}
depth_masked = depth_image.flatten()[:, np.newaxis].astype(np.float32)
xmap_masked = xmap.flatten()[:, np.newaxis].astype(np.float32)
ymap_masked = ymap.flatten()[:, np.newaxis].astype(np.float32)
pt2 = depth_masked / cam_scale
pt0 = (ymap_masked/upsample - cam_cx) * pt2 / (camera_params['fx'])
pt1 = (xmap_masked/upsample - cam_cy) * pt2 / (camera_params['fy'])
cloud = np.concatenate((pt0, -pt1, -pt2), axis=1)
return cloud
def color_code_objects(frame, state_id_to_model_pixels, display=False):
#generate object color mapping
labels=np.unique(frame)
exec_dir=os.path.dirname(os.path.realpath(__file__))
color_map_file_name=exec_dir+'/data/object_color_maps/object_color_map_size_'+str(labels.shape[0])+'.p'
if os.path.isfile(color_map_file_name):
object_color_map=pickle.load(open(color_map_file_name, "rb" ))
else:
self.object_color_map=glasbey.get_colors(len(state_id_to_model_pixels))
pickle.dump(self.object_color_map, open(color_map_file_name, "wb" ))
#create labelled image
labelled_frame=np.zeros((frame.shape[0], frame.shape[1], 3))
for label in range(labels.shape[0]):
object_pixel_positions_exact=np.argwhere(frame==label)
object_pixel_positions_exact_in_bounds=object_pixel_positions_exact.astype(int)
if len(object_pixel_positions_exact_in_bounds.shape)==2 and object_pixel_positions_exact_in_bounds.shape[0]>0 and object_pixel_positions_exact_in_bounds.shape[1]==2:
object_color=object_color_map[label]
labelled_frame[object_pixel_positions_exact_in_bounds[:, 0], object_pixel_positions_exact_in_bounds[:, 1]]=object_color
if display:
cv2.imshow('object labels', labelled_frame)
cv2.waitKey(20)
return labelled_frame
border_list = [-1, 40, 80, 120, 160, 200, 240, 280, 320, 360, 400, 440, 480, 520, 560, 600, 640, 680]
def get_bbox(bbx):
if bbx[0] < 0:
bbx[0] = 0
if bbx[1] >= 480:
bbx[1] = 479
if bbx[2] < 0:
bbx[2] = 0
if bbx[3] >= 640:
bbx[3] = 639
rmin, rmax, cmin, cmax = bbx[0], bbx[1], bbx[2], bbx[3]
r_b = rmax - rmin
for tt in range(len(border_list)):
if r_b > border_list[tt] and r_b < border_list[tt + 1]:
r_b = border_list[tt + 1]
break
c_b = cmax - cmin
for tt in range(len(border_list)):
if c_b > border_list[tt] and c_b < border_list[tt + 1]:
c_b = border_list[tt + 1]
break
center = [int((rmin + rmax) / 2), int((cmin + cmax) / 2)]
rmin = center[0] - int(r_b / 2)
rmax = center[0] + int(r_b / 2)
cmin = center[1] - int(c_b / 2)
cmax = center[1] + int(c_b / 2)
if rmin < 0:
delt = -rmin
rmin = 0
rmax += delt
if cmin < 0:
delt = -cmin
cmin = 0
cmax += delt
if rmax > 480:
delt = rmax - 480
rmax = 480
rmin -= delt
if cmax > 640:
delt = cmax - 640
cmax = 640
cmin -= delt
return rmin, rmax, cmin, cmax
#transform robot meshes into current position
def make_known_meshes(known_meshes, physics, geom_names):
transformed_known_meshes=[]
for known_mesh_ind in range(len(known_meshes)):
transformed_known_mesh=known_meshes[known_mesh_ind].copy()
transform=np.eye(4)
transform[0:3,0:3]=np.reshape(physics.named.data.geom_xmat[geom_names[known_mesh_ind]],(3,3))
transformed_known_mesh.apply_transform(transform)
transform=np.eye(4)
transform[0:3,3]=physics.named.data.geom_xpos[geom_names[known_mesh_ind]]
transformed_known_mesh.apply_transform(transform)
transformed_known_meshes.append(transformed_known_mesh)
return transformed_known_meshes
#select voxel points in cube around target object, also compute table surface height
def select_points_in_cube_voxelize_sphr_proj(self, all_points, i, grid_size=128, estimate_table=False, sub_vox=0, min_z=None, unocc=None):
low=np.array([-0.5,-0.5,-0.5])
hi=np.array([0.5,0.5,0.5])
points=all_points[np.argwhere(np.all(np.logical_and(all_points>=low, all_points<=hi), axis=1))][:,0,:]
voxels=np.zeros((grid_size,grid_size,grid_size))
inds=np.floor((points + 0.5) * grid_size).astype(int)
if sub_vox!=0:
inds[:,2]=inds[:,2]-sub_vox/(128/grid_size)
az_inds=np.argwhere(inds[:,2]>=0)
inds=inds[az_inds[:,0]]
inds=np.clip(inds, 0, grid_size-1)
if inds.shape[0]==0:
if estimate_table:
return np.zeros((128,128,128)), np.zeros((160,160)), None, 0
else:
return np.zeros((128,128,128)), np.zeros((160,160)), None
voxels[inds[:, 0], inds[:, 1], inds[:, 2]] = 1.0
if unocc is not None:
voxels=np.clip(voxels-unocc, 0, 1)
if estimate_table:
more_points=all_points[np.argwhere(np.all(np.logical_and(all_points>=np.array([-3,-3,-1]), all_points<=np.array([2,2,min_z+0.01])), axis=1))][:,0,:]
more_inds= | np.floor((more_points + 0.5) * grid_size) | numpy.floor |
# Copyright 2020-2022 OpenDR European Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# OS Libraries
import os
import os.path
import datetime
# Data Structure Libraries
from collections import deque
# ROS Libraries
import rospy
# ROS Messages
from std_msgs.msg import Header
from sensor_msgs.msg import Image
from gmapping.msg import doubleMap, mapModel
# Math Libraries
import numpy as np
import numpy.ma as ma
from cv_bridge import CvBridge
import matplotlib
import matplotlib.pyplot as plt
# Project Libraries
from fmp_slam_eval.map_colorizer import MapColorizer
from fmp_slam_eval.enums import DiscreteStates as DiSt
from map_simulator.utils import map_msg_to_numpy, map_msg_extent, mkdir_p
# Use non-interactive plotting back-end due to issues with rospy.spin()
matplotlib.use('SVG')
class FMPPlotter:
"""
Class for plotting/coloring different statistics from the Full Map Posterior distribution
and publishing them as images or saving them in files.
"""
def __init__(self):
"""
Constructor
"""
rospy.init_node('fmp_plot')
# Object for pseudo-coloring and plotting the maps
self._map_colorizer = MapColorizer()
self._sub_topic_map_model = "map_model"
self._sub_topic_fmp_alpha = "fmp_alpha"
self._sub_topic_fmp_beta = "fmp_beta"
self._map_model = None
# TODO: this two guys:
# do_img_raw = rospy.get_param("~img_raw" , False)
# do_img_fmp = rospy.get_param("~img_fmp" , False)
do_img_stat = rospy.get_param("~img_stat", False)
do_img_mlm = rospy.get_param("~img_mlm", False)
do_img_para = rospy.get_param("~img_para", False)
self._pub_img = rospy.get_param("~pub_img", False)
self._topic_prefix = rospy.get_param("~pub_topic_prefix", "/fmp_img/")
self._save_img = rospy.get_param("~save_img", False)
self._resolution = rospy.get_param("~resolution", 300)
timestamp = datetime.datetime.now().strftime('%y%m%d_%H%M%S')
path_prefix = rospy.get_param("~path_prefix", "exp")
default_path = os.path.join(os.path.join(os.path.expanduser('~')), 'Desktop')
default_path = os.path.join(default_path, 'FMP_img')
default_path = os.path.join(default_path, path_prefix + "_" + timestamp)
save_dir = rospy.get_param("~save_dir", default_path)
save_dir = os.path.expanduser(save_dir)
save_dir = os.path.expandvars(save_dir)
save_dir = os.path.normpath(save_dir)
self._save_dir = save_dir
# Image config dictionary
sub_img_stat_mean_cfg = {"key": "mean", "dir": os.path.join("stats", "mean"), "file_prefix": "mean",
"topic": "stats/mean", "calc_f": self._calc_mean}
sub_img_stat_var_cfg = {"key": "var", "dir": os.path.join("stats", "var"), "file_prefix": "var",
"topic": "stats/var", "calc_f": self._calc_var}
img_stat_cfg = {"do": do_img_stat, "img": [sub_img_stat_mean_cfg, sub_img_stat_var_cfg]}
sub_img_mlm_cfg = {"key": "mlm", "dir": "mlm", "file_prefix": "mlm",
"topic": "mlm", "calc_f": self._calc_mlm}
img_mlm_cfg = {"do": do_img_mlm, "img": [sub_img_mlm_cfg]}
sub_img_par_alpha_cfg = {"key": "alpha", "dir": os.path.join("param", "alpha"), "file_prefix": "alpha",
"topic": "param/alpha", "calc_f": self._calc_para_alpha}
sub_img_par_beta_cfg = {"key": "beta", "dir": os.path.join("param", "beta"), "file_prefix": "beta",
"topic": "param/beta", "calc_f": self._calc_para_beta}
img_par_cfg = {"do": do_img_para, "img": [sub_img_par_alpha_cfg, sub_img_par_beta_cfg]}
self._img_cfg = {
"stat": img_stat_cfg,
"mlm": img_mlm_cfg,
"par": img_par_cfg
}
fmp_param_sub_required = False
# Queues for storing messages
self._alpha_beta_dict = {}
self._alpha_beta_queue = deque()
# Max and Min dictionaries for stabilizing the color scales for continuous values
self._max_values = {}
self._min_values = {}
# Create Publishers
self._publishers = {}
for img_set_key, img_set_cfg in self._img_cfg.items():
fmp_param_sub_required = fmp_param_sub_required or img_set_cfg['do']
if self._pub_img and img_set_cfg['do']:
for img_cfg in img_set_cfg['img']:
key = img_cfg['key']
topic = self._topic_prefix + img_cfg['topic']
self._publishers[key] = rospy.Publisher(topic, Image, latch=True, queue_size=1)
something_to_do = (self._pub_img or self._save_img) and fmp_param_sub_required
# Don't start the node if not needed...
if not something_to_do:
rospy.logerr("Nothing to do here! Why though?!?")
rospy.logdebug("Setting values:")
rospy.logdebug("\tpub_img: {}, save_img: {}".format(self._pub_img, self._save_img))
rospy.logdebug("\tdo_img_stat: {}, do_img_mlm: {}, do_img_para: {}".format(do_img_stat,
do_img_mlm, do_img_para))
rospy.logdebug("\tsomething_to_do: {}".format(something_to_do))
rospy.signal_shutdown('Nothing to do')
return
# Create Subscribers
# To map model
rospy.Subscriber(self._sub_topic_map_model, mapModel, self._map_model_callback)
# To alpha and beta parameters (if publishing or saving images, and at least one image is generated)
if (self._pub_img or self._save_img) and fmp_param_sub_required:
rospy.Subscriber(self._sub_topic_fmp_alpha, doubleMap, self._map2d_alpha_callback, queue_size=1)
rospy.Subscriber(self._sub_topic_fmp_beta, doubleMap, self._map2d_beta_callback, queue_size=1)
# Create save path if not exists
if self._save_img and fmp_param_sub_required:
if not os.path.exists(self._save_dir):
mkdir_p(self._save_dir)
self._busy = False # Thread lock flag for plot_from_queue
rospy.Timer(rospy.Duration(1), self._plot_from_queue)
rospy.spin()
def _plot_from_queue(self, event):
"""
Function called periodically to check if there are any maps in the queue to be plotted.
While there are still alpha and beta maps stored in the queue, it will plot the configured images.
:param event: Caller event. Unused except for logging.
:return: None
"""
if self._busy:
rospy.loginfo("Another thread is already plotting. Caller: {}".format(event))
else:
self._busy = True
while self._alpha_beta_queue:
seq = self._alpha_beta_queue.popleft()
self._plot(seq, self._alpha_beta_dict[seq])
del self._alpha_beta_dict[seq]
self._busy = False
def _plot(self, seq, dic):
"""
Generates the desired images and plots for a given sequence of alpha and beta maps.
:param seq: (int) Sequence number of the received maps
:params dic: (dict) Dictionary containing the alpha and beta maps, as well as their prior values.
It should be formatted as:
dic = {'alpha': {'prior': (int), 'map': (2D np.ndarray)},
'beta' : {'prior': (int), 'map': (2D np.ndarray)}}
:return: None
"""
if not self._pub_img and not self._save_img:
return
extent_a = dic['alpha']['extent']
extent_b = dic['beta']['extent']
if extent_a != extent_b:
raise ValueError("Map extent of alpha {} differs from beta {}!".format(extent_a, extent_b))
self._map_colorizer.set_wm_extent(extent_a)
alpha = dic['alpha']['map'] + dic['alpha']['prior']
beta = dic['beta']['map'] + dic['beta']['prior']
for img_set_key, img_set_cfg in self._img_cfg.items():
if img_set_cfg['do']:
rospy.loginfo('Plotting %s', img_set_key)
for img_cfg in img_set_cfg['img']:
img_key = img_cfg['key']
img_calc = img_cfg['calc_f']
rospy.loginfo("\tComputing continuous and discrete images for %s.", img_key)
# Compute the images to plot using the configured calculation_function ('calc_f')
img_cont, img_disc, ds_list, v_min, v_max, occ, log_scale = img_calc(alpha, beta)
self._map_colorizer.set_disc_state_list(ds_list)
self._map_colorizer.set_cont_bounds(img_cont, v_min=v_min, v_max=v_max, occupancy_map=occ,
log_scale=log_scale)
rgba_img = self._map_colorizer.colorize(img_cont, img_disc)
del img_cont
del img_disc
if self._save_img:
path = os.path.join(self._save_dir, img_cfg['dir'])
if not os.path.exists(path):
mkdir_p(path)
filename = img_cfg['file_prefix'] + '_s' + str(seq)
raw_filename = 'raw_' + filename + '.png'
filename = filename + '.svg'
mlp_path = os.path.join(path, filename)
raw_path = os.path.join(path, raw_filename)
fig, ax = plt.subplots(figsize=[20, 20])
ax.imshow(rgba_img, extent=extent_a)
self._map_colorizer.draw_cb_cont(fig)
if ds_list:
self._map_colorizer.draw_cb_disc(fig)
rospy.loginfo("\t\tSaving image %s to %s.", img_key, mlp_path)
plt.savefig(mlp_path, bbox_inches='tight', dpi=self._resolution)
plt.close()
del fig
del ax
rospy.loginfo("\t\tSaving image %s to %s.", img_key, raw_path)
plt.imsave(raw_path, rgba_img, vmin=0, vmax=1)
plt.close()
rospy.loginfo("\t\tImages saved.")
if self._pub_img:
publisher = self._publishers[img_key]
rospy.loginfo("\t\tGenerating image message to %s.", img_key)
rgba_img = 255 * rgba_img
rgba_img = rgba_img.astype(np.uint8)
image_msg_head = Header()
image_msg_head.seq = seq
image_msg_head.stamp = rospy.Time.now()
image_msg_head.frame_id = 'map'
br = CvBridge()
image_msg = br.cv2_to_imgmsg(rgba_img, encoding="rgba8")
del rgba_img
image_msg.header = image_msg_head
publisher.publish(image_msg)
del image_msg
rospy.loginfo("\t\tImage published.")
def _map_model_callback(self, msg):
"""
Method called when receiving a map model type. It just sets the local field with the message's value.
:param msg: (gmapping.mapModel) An integer stating the type of map model used by the SLAM algorithm and some
constants for comparisons.
:return: None
"""
mm = msg.map_model
mm_str = ''
if mm == mapModel.REFLECTION_MODEL:
mm_str = 'Reflection Model'
elif mm == mapModel.DECAY_MODEL:
mm_str = 'Exponential Decay Model'
else:
rospy.logerr('No idea what kind of model %d is! Going with Reflection Model.', mm)
mm = mapModel.REFLECTION_MODEL
rospy.loginfo("Received Map Model: (%d, %s)", mm, mm_str)
self._map_model = mm
def _add_to_dict(self, a_b, msg):
"""
Adds the received map and prior to the object's buffer dictionary.
:param a_b: (string) Indicates which of the parameters has been received: "alpha"|"beta"
:param msg: (gmapping.doubleMap) Double Map message containing the prior and map parameters.
:return: None
"""
seq = msg.header.seq
map_dict = {
a_b: {
'map': map_msg_to_numpy(msg),
'extent': map_msg_extent(msg),
'prior': msg.param
}
}
if a_b == 'alpha':
b_a = 'beta'
else:
b_a = 'alpha'
rospy.loginfo('Received msg for {} with seq {}'.format(a_b, seq))
if seq in self._alpha_beta_dict:
self._alpha_beta_dict[seq][a_b] = map_dict[a_b]
if b_a in self._alpha_beta_dict[seq]:
rospy.loginfo('Collected alpha/beta info for seq {}'.format(seq))
self._alpha_beta_queue.append(seq)
else:
self._alpha_beta_dict[seq] = map_dict
def _map2d_alpha_callback(self, msg):
"""
Method called when receiving a map with the alpha parameters of the full posterior map distribution.
It adds the received map to the buffer dictionary until both parameter maps have been received.
:param msg: (gmapping.doubleMap) A floating point gmapping map message.
:return: None
"""
self._add_to_dict('alpha', msg)
def _map2d_beta_callback(self, msg):
"""
Method called when receiving a map with the beta parameters of the full posterior map distribution.
It adds the received map to the buffer dictionary until both parameter maps have been received.
:param msg: (gmapping.doubleMap) A floating point gmapping map message.
:return: None
"""
self._add_to_dict('beta', msg)
def _calc_mean(self, alpha, beta):
"""
Takes the alpha and beta parameter maps and computes the mean depending on the mapping model used.
:param alpha: (nd.array) A 2D array containing the alpha parameters of the PDF of the map posterior.
:param beta: (nd.array) A 2D array containing the beta parameters of the PDF of the map posterior.
:return: (tuple) A tuple consisting of:
* means (ma.array),
* special-case discrete-valued means (ma.array),
* list of special discrete states (list)
* minimum continuous value (float) for color map scaling
* maximum continuous value (float) for color map scaling
* whether the map represents occupancy (bool)
* whether the color scale should be logarithmic (bool)
"""
shape = alpha.shape
v_min = 0
occ = True
if self._map_model == mapModel.DECAY_MODEL:
numerator = alpha
denominator = beta
undef_mask = (denominator == 0)
zero_mask = (numerator == 0)
all_mask = np.logical_or(undef_mask, zero_mask)
numerator = ma.masked_array(numerator)
numerator[all_mask] = ma.masked
means = ma.divide(numerator, denominator)
means_ds = ma.zeros(shape, dtype=np.int8)
means_ds[undef_mask] = DiSt.UNDEFINED.value
means_ds[zero_mask] = DiSt.ZERO.value
means_ds[~all_mask] = ma.masked
ds_list = [DiSt.UNDEFINED, DiSt.ZERO]
v_max = None
log_scale = True
elif self._map_model == mapModel.REFLECTION_MODEL:
denominator = alpha + beta
undef_mask = (denominator == 0)
numerator = ma.masked_array(alpha)
numerator[undef_mask] = ma.masked
means = | ma.divide(numerator, denominator) | numpy.ma.divide |
# functions for all methods
import inspect
import os
import sys
from torch import nn
import torch
from nflows.distributions.normal import StandardNormal
from nflows.distributions.normal import ConditionalDiagonalNormal
from nflows.distributions.uniform import BoxUniform
from nflows.flows.base import Flow
from nflows.transforms.autoregressive import MaskedAffineAutoregressiveTransform
from nflows.transforms.base import (
CompositeTransform,
)
from nflows.transforms.permutations import ReversePermutation
from nflows.transforms.standard import PointwiseAffineTransform
from nflows.transforms.nonlinearities import Tanh
from nflows.transforms.normalization import BatchNorm
import numpy as np
# load from util (from https://stackoverflow.com/questions/714063/importing-modules-from-parent-folder)
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0, parentdir)
from util import InvSigmoid
from util import UniformContext
# Return min ess of the samples in x, code adapted from https://github.com/gpapamak/snl/blob/master/util/math.py
def ess_mcmc(x):
N, dim = x.shape
x = x - x.mean(0)
acors = torch.zeros(x.shape)
for i in range(dim):
for lag in range(N):
acor = torch.dot(x[:N-lag, i], x[lag:, i]) / (N - lag)
if acor < 0:
break
acors[lag, i] = acor
act = 1 + 2 + acors[1:].sum(0) / acors[0]
ess = N/act
return min(ess).item()
def load_summary_stats_mean_and_std():
m_s_of_prior = np.loadtxt('data/m_s_of_prior.csv', delimiter=",")
s_s_of_prior = np.loadtxt('data/s_s_of_prior.csv', delimiter=",")
return torch.from_numpy(m_s_of_prior).to(dtype=torch.float32), \
torch.from_numpy(s_s_of_prior).to(dtype=torch.float32)
# For whiten, calc_whitening_transform, and de_whiten: code adapted from: https://github.com/gpapamak/snl/blob/master/util/math.py
def whiten(xs, params):
"""
Whitens a given dataset using the whitening transform provided.
"""
means, U, istds = params
ys = xs.copy()
ys -= means
ys = np.dot(ys, U)
ys *= istds
return ys
def calc_whitening_transform(xs):
"""
Calculates the parameters that whiten a dataset.
"""
N = xs.shape[0]
means = np.mean(xs, axis=0)
ys = xs - means
cov = | np.dot(ys.T, ys) | numpy.dot |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.