prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
# -*- coding: utf-8 -*-
"""
@author: VHOEYS
"""
import os
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.ticker import MaxNLocator
from mpl_toolkits.axes_grid1.inset_locator import zoomed_inset_axes
from mpl_toolkits.axes_grid1.inset_locator import mark_inset
from matplotlib.transforms import offset_copy
from .sensitivity_base import *
from .extrafunctions import *
from .plot_functions_rev import plotbar, scatterwithtext
# from .latextablegenerator import *
class MorrisScreening(SensitivityAnalysis):
'''
Morris screening method, with the improved sampling strategy,
selecting a subset of the trajectories to improve the sampled space.
Working with groups is possible.
Parameters
-----------
parsin : list
either a list of (min,max,'name') values,
[(min,max,'name'),(min,max,'name'),...(min,max,'name')]
or a list of ModPar instances
ModelType : pyFUSE | PCRaster | external
Give the type of model working withµ
Attributes
------------
_ndim : int
number of factors examined. In case the groups are chosen the number of factors is stores in NumFact and sizea becomes the number of created groups, (k)
NumFact : int
number of factors examined in the case when groups are chosen
intervals(p) : int
number of intervals considered in (0, 1)
UB : ndarray
Upper Bound for each factor in list or array, (sizea,1)
LB : ndarray
Lower Bound for each factor in list or array, (sizea,1)
GroupNumber : int
Number of groups (eventually 0)
GroupMat : ndarray
Array which describes the chosen groups. Each column represents
a group and its elements are set to 1 in correspondence of the
factors that belong to the fixed group. All the other elements
are zero, (NumFact,GroupNumber)
Delta : float
jump value to calculate screening
intervals : int
number of intervals used in the sampling
noptimized : int
r-value of the number of base runs are done in the optimize sampling
OutMatrix : ndarray
not-optimized sample matrix
OutFact : ndarray
not-optimzed matrix of changing factors
Groupnumber : int
number of groups used
sizeb : int
when using groups, sizeb is determined by the number of groups,
otherwise the number of factors
OptMatrix_b : ndarray
the not-adapted version of the OptMatrix, with all sampled values
between, 0 and 1
parset2run : ndarrar
every row is a parameter set to run the model for. All sensitivity
methods have this attribute to interact with base-class running
Notes
---------
Original Matlab code from:
http://sensitivity-analysis.jrc.it/software/index.htm
Original method described in [M1]_, improved by the optimization of [M2]_.
The option to work with groups is added, as described in [M2]_.
Examples
------------
>>> Xi = [(0.0,5.0,r'$X_1$'),(4.0,7.0,r'$X_2$'),(0.0,1.0,r'$X_3$'),
(0.0,1.0,r'$X_4$'), (0.0,1.0,r'$X_5$'),(0.5,0.9,r'$X_6$')]
>>> # Set up the morris class instance with uncertain factors Xi
>>> sm = MorrisScreening(Xi,ModelType = 'external')
>>> # calculate an optimized set of parameter sets to run model
>>> OptMatrix, OptOutVec = sm.Optimized_Groups(nbaseruns=100,
intervals = 4, noptimized=4,
Delta = 0.4)
>>> # Check the quality of the selected trajects
>>> sm.Optimized_diagnostic(width=0.15)
>>> #RUN A MODEL AND GET OUTPUT (EXTERNAL) -> get output
>>> #Calculate the Morris screening diagnostics
>>> sm.Morris_Measure_Groups(output)
>>> #plot a barplot of mu, mustar and sigma (edgecolor and facecolor grey)
>>> sm.plotmu(ec='grey',fc='grey')
>>> sm.plotmustar(outputid = 1,ec='grey',fc='grey')
>>> sm.plotsigma(ec='grey',fc='grey')
>>> #plot the mu* sigma plain
>>> sm.plotmustarsigma(zoomperc = 0.05, outputid = 1, loc = 2)
>>> #export the results in txt file
>>> sm.txtresults(name='MorrisTestOut.txt')
>>> #export the results in tex-table
>>> sm.latexresults(name='MorrisTestOut.tex')
References
------------
.. [M1] Morris, <NAME>. Factorial Sampling Plans for Preliminary Computational
Experiments. Technometrics 33, no. 2 (1991): 161–174.
.. [M2] Campolongo, Francesca, <NAME>, and <NAME>.
An Effective Screening Design for Sensitivity Analysis of Large Models.
Environmental Modelling & Software 22, no. 10 (October 2007): 1509–1518.
http://linkinghub.elsevier.com/retrieve/pii/S1364815206002805.
.. [M3] Saltelli, Andrea, <NAME>, <NAME>, <NAME>,
<NAME>, <NAME>, <NAME>, and <NAME>.
Global Sensitivity Analysis, The Primer. John Wiley & Sons Ltd, 2008.
'''
def __init__(self, parsin, ModelType = 'external'):
SensitivityAnalysis.__init__(self, parsin)
self._methodname = 'MorrisScreening'
if ModelType == 'pyFUSE':
self.modeltype = 'pyFUSE'
print('The analysed model is built up by the pyFUSE environment')
elif ModelType == 'external':
self.modeltype = 'pyFUSE'
print('The analysed model is externally run')
elif ModelType == 'PCRaster':
self.modeltype = 'PCRasterPython'
print('The analysed model is a PCRasterPython Framework instance')
elif ModelType == 'testmodel':
self.modeltype = 'testmodel'
print('The analysed model is a testmodel')
else:
raise Exception('Not supported model type')
self.LB = np.array([el[0] for el in self._parsin])
self.UB = np.array([el[1] for el in self._parsin])
def Sampling_Function_2(self, nbaseruns, LB, UB):
'''
Python version of the Morris sampling function
Parameters
-----------
nbaseruns : int
sample size
Returns
---------
OutMatrix(sizeb*r, sizea) :
for the entire sample size computed In(i,j) matrices, values to
run model for
OutFact(sizea*r,1) :
for the entire sample size computed Fact(i,1) vectors, indicates
the factor changing at specific line
Notes
-------
B0 is constructed as in Morris design when groups are not considered.
When groups are considered the routine follows the following steps
1. Creation of P0 and DD0 matrices defined in Morris for the groups.
This means that the dimensions of these 2 matrices are
(GroupNumber,GroupNumber).
2. Creation of AuxMat matrix with (GroupNumber+1,GroupNumber)
elements.
3. Definition of GroupB0 starting from AuxMat, GroupMat
and P0.
4. The final B0 for groups is obtained as [ones(sizeb,1)*x0' + GroupB0].
The P0 permutation is present in GroupB0 and it's not necessary to
permute the matrix (ones(sizeb,1)*x0') because it's already randomly
created.
Adapted from the matlab version of 15 November 2005 by J.Cariboni
References
-------------
.. [M4] <NAME>, <NAME>, <NAME>, Sensitivity Analysis
on page 68 ss
.. [M5] <NAME>, <NAME>, JRC - IPSC Ispra, Varese, IT
'''
#The integration in class version not optimal, therefor this mapping
k = self._ndim
self.nbaseruns = nbaseruns
r = nbaseruns
p = self.intervals
GroupMat = self.GroupMat
# Parameters and initialisation of the output matrix
sizea = k
Delta = self.Delta
NumFact = sizea
if GroupMat.shape[0]==GroupMat.size:
Groupnumber=0
else:
Groupnumber = GroupMat.shape[1] #size(GroupMat,2)
sizea = GroupMat.shape[1]
sizeb = sizea + 1
# sizec = 1
Outmatrix = np.zeros(((sizea+1)*r,NumFact))
OutFact = np.zeros(((sizea+1)*r,1))
# For each i generate a trajectory
for i in range(r):
Fact=np.zeros(sizea+1)
# Construct DD0
DD0 = np.matrix(np.diagflat(np.sign(np.random.random(k)*2-1)))
# Construct B (lower triangular)
B = np.matrix(np.tri((sizeb), sizea,k=-1, dtype=int))
# Construct A0, A
A0 = np.ones((sizeb,1))
A = np.ones((sizeb,NumFact))
# Construct the permutation matrix P0. In each column of P0 one randomly chosen element equals 1
# while all the others equal zero.
# P0 tells the order in which order factors are changed in each
# Note that P0 is then used reading it by rows.
I = np.matrix(np.eye(sizea))
P0 = I[:,np.random.permutation(sizea)]
# When groups are present the random permutation is done only on B. The effect is the same since
# the added part (A0*x0') is completely random.
if Groupnumber != 0:
B = B * (np.matrix(GroupMat)*P0.transpose()).transpose()
# Compute AuxMat both for single factors and groups analysis. For Single factors analysis
# AuxMat is added to (A0*X0) and then permutated through P0. When groups are active AuxMat is
# used to build GroupB0. AuxMat is created considering DD0. If the element on DD0 diagonal
# is 1 then AuxMat will start with zero and add Delta. If the element on DD0 diagonal is -1
# then DD0 will start Delta and goes to zero.
AuxMat = Delta* 0.5 *((2*B - A) * DD0 + A)
#----------------------------------------------------------------------
# a --> Define the random vector x0 for the factors. Note that x0 takes value in the hypercube
# [0,...,1-Delta]*[0,...,1-Delta]*[0,...,1-Delta]*[0,...,1-Delta]
xset=np.arange(0.0,1.0-Delta,1.0/(p-1))
try:
x0 = np.matrix(xset.take(list(np.ceil(np.random.random(k)*np.floor(p/2))-1))) #.transpose()
except:
raise Exception('invalid p (intervals) and Delta combination, please adapt')
#----------------------------------------------------------------------
# b --> Compute the matrix B*, here indicated as B0. Each row in B0 is a
# trajectory for Morris Calculations. The dimension of B0 is (Numfactors+1,Numfactors)
if Groupnumber != 0:
B0 = (A0*x0 + AuxMat)
else:
B0 = (A0*x0 + AuxMat)*P0
#----------------------------------------------------------------------
# c --> Compute values in the original intervals
# B0 has values x(i,j) in [0, 1/(p -1), 2/(p -1), ... , 1].
# To obtain values in the original intervals [LB, UB] we compute
# LB(j) + x(i,j)*(UB(j)-LB(j))
In=np.tile(LB, (sizeb,1)) + np.array(B0)*np.tile((UB-LB), (sizeb,1)) #array!! ????
# Create the Factor vector. Each component of this vector indicate which factor or group of factor
# has been changed in each step of the trajectory.
for j in range(sizea):
Fact[j] = np.where(P0[j,:])[1]
Fact[sizea] = int(-1) #Enkel om vorm logisch te houden. of Fact kleiner maken
#append the create traject to the others
Outmatrix[i*(sizea+1):i*(sizea+1)+(sizea+1),:]=np.array(In)
OutFact[i*(sizea+1):i*(sizea+1)+(sizea+1)]=np.array(Fact).reshape((sizea+1,1))
return Outmatrix, OutFact
def Optimized_Groups(self, nbaseruns=500, intervals = 4, noptimized=10,
GroupMat=np.array([]), Delta = 'default'):
'''
Optimization in the choice of trajectories for the Morris experiment.
Starting from an initial set of nbaseruns, a set of noptimized runs
is selected to use for the screening techique
Groups can be used to evaluate parameters together
Parameters
------------
nbaseruns : int (default 500)
Total number of trajectories
intervals : int (default 4)
Number of levels
noptimized : int (default 10)
Final number of optimal trajectories
GroupMat : [NumFact,NumGroups]
Matrix describing the groups. Each column represents a group and
its elements are set to 1 in correspondence of the factors that
belong to the fixed group. All the other elements are zero.
Delta : 'default'|float (0-1)
When default, the value is calculated from the p value (intervals),
otherwise the given number is taken
Returns
--------
OptMatrix/ self.OptOutMatrix : ndarray
Optimized sampled values giving the matrix too run the model for
OptOutVec/ self.OptOutFact : ndarray
Optimized sampled values giving the matrix indicating the factor
changed at a specific line
Notes
-----
The combination of Delta and intervals is important to get an
good overview. The user is directed to [M3]_
'''
#number of trajectorie (r)
N = nbaseruns
#check the p and Delta value workaround
if not intervals%2==0:
print('It is adviced to use an even number for the p-value, number \
of intervals, since currently not all levels are explored')
if Delta == 'default':
self.Delta = intervals/(2.*(intervals-1.))
else:
if Delta > 0.0 and Delta < 1.0:
self.Delta = Delta
else:
raise Exception('Invalid Delta value, please use default or float number')
self.intervals = intervals
# p = intervals
self.noptimized = noptimized
r = noptimized
self.GroupMat = GroupMat
NumFact = self._ndim
LBt = np.zeros(NumFact)
UBt = np.ones(NumFact)
OutMatrix, OutFact = self.Sampling_Function_2(nbaseruns, LBt, UBt) #Version with Groups
#again mapping (not optimal)
self.OutMatrix = OutMatrix
self.OutFact = OutFact
try:
Groupnumber = GroupMat.shape[1]
except:
Groupnumber = 0
self.Groupnumber = Groupnumber
if Groupnumber != 0:
sizeb = Groupnumber +1
else:
sizeb = NumFact +1
self.sizeb = sizeb
Dist = np.zeros((N,N))
Diff_Traj = np.arange(0.0,N,1.0)
# Compute the distance between all pair of trajectories (sum of the distances between points)
# The distance matrix is a matrix N*N
# The distance is defined as the sum of the distances between all pairs of points
# if the two trajectories differ, 0 otherwise
for j in range(N): #combine all trajectories: eg N=3: 0&1; 0&2; 1&2 (is not dependent from sequence)
for z in range(j+1,N):
MyDist = np.zeros((sizeb,sizeb))
for i in range(sizeb):
for k in range(sizeb):
MyDist[i,k] = (np.sum((OutMatrix[sizeb*(j)+i,:]-OutMatrix[sizeb*(z)+k,:])**2))**0.5 #indices aan te passen
if np.where(MyDist==0)[0].size == sizeb:
# Same trajectory. If the number of zeros in Dist matrix is equal to
# (NumFact+1) then the trajectory is a replica. In fact (NumFact+1) is the maximum numebr of
# points that two trajectories can have in common
Dist[j,z] = 0.
Dist[z,j] = 0.
# Memorise the replicated trajectory
Diff_Traj[z] = -1. #the z value identifies the duplicate
else:
# Define the distance between two trajectories as
# the minimum distance among their points
Dist[j,z] = np.sum(MyDist)
Dist[z,j] = np.sum(MyDist)
#prepare array with excluded duplicates (alternative would be deleting rows)
dupli=np.where(Diff_Traj == -1)[0].size
New_OutMatrix = np.zeros(((sizeb)*(N-dupli),NumFact))
New_OutFact = np.zeros(((sizeb)*(N-dupli),1))
# Eliminate replicated trajectories in the sampled matrix
ID=0
for i in range(N):
if Diff_Traj[i]!= -1.:
New_OutMatrix[ID*sizeb:ID*sizeb+sizeb,:] = OutMatrix[i*(sizeb) : i*(sizeb) + sizeb,:]
New_OutFact[ID*sizeb:ID*sizeb+sizeb,:] = OutFact[i*(sizeb) : i*(sizeb) + sizeb,:]
ID+=1
# Select in the distance matrix only the rows and columns of different trajectories
Dist_Diff = Dist[np.where(Diff_Traj != -1)[0],:] #moet 2D matrix zijn... wis rijen ipv hou bij
Dist_Diff = Dist_Diff[:,np.where(Diff_Traj != -1)[0]] #moet 2D matrix zijn... wis rijen ipv hou bij
# Dist_Diff = np.delete(Dist_Diff,np.where(Diff_Traj==-1.)[0])
New_N = np.size(np.where(Diff_Traj != -1)[0])
# Select the optimal set of trajectories
Traj_Vec = np.zeros((New_N, r))
OptDist = np.zeros((New_N, r))
for m in range(New_N): #each row in Traj_Vec
Traj_Vec[m,0]=m
for z in range(1,r): #elements in columns after first
Max_New_Dist_Diff = 0.0
for j in range(New_N):
# Check that trajectory j is not already in
Is_done = False
for h in range(z):
if j == Traj_Vec[m,h]:
Is_done=True
if Is_done == False:
New_Dist_Diff = 0.0
#compute distance
for k in range(z):
New_Dist_Diff = New_Dist_Diff + (Dist_Diff[int(Traj_Vec[m, k]),j])**2
# Check if the distance is greater than the old one
if New_Dist_Diff**0.5 > Max_New_Dist_Diff:
Max_New_Dist_Diff = New_Dist_Diff**0.5
Pippo = j
# Set the new trajectory
Traj_Vec[m,z] = Pippo
OptDist[m,z] = Max_New_Dist_Diff
# Construct optimal matrix
SumOptDist = np.sum(OptDist, axis=1)
# Find the maximum distance
Pluto = np.where(SumOptDist == np.max(SumOptDist))[0]
Opt_Traj_Vec = Traj_Vec[Pluto[0],:]
OptMatrix = np.zeros(((sizeb)*r,NumFact))
OptOutVec = np.zeros(((sizeb)*r,1))
for k in range(r):
OptMatrix[k*(sizeb):k*(sizeb)+(sizeb),:]= New_OutMatrix[(sizeb)*(int(Opt_Traj_Vec[k])):(sizeb)*(int(Opt_Traj_Vec[k])) + sizeb,:]
OptOutVec[k*(sizeb):k*(sizeb)+(sizeb)]= New_OutFact[(sizeb)*(int(Opt_Traj_Vec[k])):(sizeb)*(int(Opt_Traj_Vec[k]))+ sizeb,:]
#----------------------------------------------------------------------
# Compute values in the original intervals
# Optmatrix has values x(i,j) in [0, 1/(p -1), 2/(p -1), ... , 1].
# To obtain values in the original intervals [LB, UB] we compute
# LB(j) + x(i,j)*(UB(j)-LB(j))
self.OptMatrix_b = OptMatrix.copy()
OptMatrix=np.tile(self.LB, (sizeb*r,1)) + OptMatrix*np.tile((self.UB-self.LB), (sizeb*r,1))
self.OptOutMatrix = OptMatrix
self.OptOutFact = OptOutVec
self.parset2run = OptMatrix
self.totalnumberruns = self.parset2run.shape[0]
return OptMatrix, OptOutVec
def Optimized_diagnostic(self, width = 0.1):
'''
Evaluate the optimized trajects in their space distirbution,
evaluation is done based on the [0-1] boundaries of the sampling
Returns quality measure and 2 figures to compare the optimized version
Parameters
-----------
width : float
width of the bars in the plot (default 0.1)
Examples
---------
>>> sm.Optimized_diagnostic()
The quality of the sampling strategy changed from 0.76 with the old
strategy to 0.88 for the optimized strategy
'''
NumFact = self._ndim
sizeb = self.sizeb
p = self.intervals
r = self.noptimized
# Clean the trajectories from repetitions and plot the histograms
hplot=np.zeros((2*r,NumFact))
for i in range(NumFact):
for j in range(r):
# select the first value of the factor
hplot[j*2,i] = self.OptMatrix_b[j*sizeb,i]
# search the second value
for ii in range(1,sizeb):
if self.OptMatrix_b[j*sizeb+ii,i] != self.OptMatrix_b[j*sizeb,i]:
kk = 1
hplot[j*2+kk,i] = self.OptMatrix_b[j*sizeb+ii,i]
fig=plt.figure()
fig.subplots_adjust(hspace=0.3,wspace = 0.1)
fig.suptitle('Optimized sampling')
# DimPlots = np.round(NumFact/2)
DimPlots = int(np.ceil(NumFact/2.))
# print hplot.shape
for i in range(NumFact):
ax=fig.add_subplot(DimPlots,2,i+1)
# n, bins, patches = ax.hist(hplot[:,i], p, color='k',ec='white')
n, bin_edges = np.histogram(hplot[:,i], bins = p,)
bwidth = width
xlocations = np.linspace(0.,1.,self.intervals)-bwidth/2.
ax.bar(xlocations, n, width = bwidth, color='k')
majloc1 = MaxNLocator(nbins=4, prune='lower', integer=True)
ax.yaxis.set_major_locator(majloc1)
for tick in ax.yaxis.get_major_ticks():
tick.label.set_fontsize(10)
ax.set_xlim([-0.25,1.25])
ax.set_ylim([0,n.max()+n.max()*0.1])
ax.xaxis.set_ticks([0.0,1.0])
for tick in ax.xaxis.get_major_ticks():
tick.label.set_fontsize(10)
ax.set_xlabel(self._namelist[i],fontsize=10)
ax.xaxis.set_label_coords(0.5, -0.08)
# Plot the histogram for the original sampling strategy
# Select the matrix
OrigSample = self.OutMatrix[:r*(sizeb),:]
Orihplot = np.zeros((2*r,NumFact))
for i in range(NumFact):
for j in range(r):
# select the first value of the factor
Orihplot[j*2,i] = OrigSample[j*sizeb,i]
# search the second value
for ii in range(1,sizeb):
if OrigSample[j*sizeb+ii,i] != OrigSample[j*sizeb,i]:
kk = 1
Orihplot[j*2+kk,i] = OrigSample[j*sizeb+ii,i]
fig=plt.figure()
fig.subplots_adjust(hspace=0.25,wspace=0.1)
fig.suptitle('Original sampling')
# DimPlots = np.round(NumFact/2)
DimPlots = int(np.ceil(NumFact/2.))
for i in range(NumFact):
ax=fig.add_subplot(DimPlots,2,i+1)
n, bin_edges = np.histogram(Orihplot[:,i], bins = p,)
bwidth = width
xlocations = np.linspace(0.,1.,self.intervals)-bwidth/2.
ax.bar(xlocations, n, width = bwidth, color='k')
majloc1 = MaxNLocator(nbins=4, prune='lower', integer=True)
ax.yaxis.set_major_locator(majloc1)
for tick in ax.yaxis.get_major_ticks():
tick.label.set_fontsize(10)
ax.set_xlim([-0.25,1.25])
ax.set_ylim([0,n.max()+n.max()*0.1])
ax.xaxis.set_ticks([0.0,1.0])
for tick in ax.xaxis.get_major_ticks():
tick.label.set_fontsize(10)
ax.set_xlabel(self._namelist[i],fontsize=10)
ax.xaxis.set_label_coords(0.5, -0.08)
# Measure the quality of the sampling strategy
levels = np.arange(0.0, 1.1, 1.0/(p-1))
NumSPoint = np.zeros((NumFact,p))
NumSOrigPoint= | np.zeros((NumFact,p)) | numpy.zeros |
""" Class definition for an Implicit Component. """
import numpy as np
# pylint: disable=E0611,F0401
from openmdao.main.array_helpers import flattened_value
from openmdao.main.component import Component
from openmdao.main.datatypes.api import Bool
from openmdao.main.interfaces import IImplicitComponent, IVariableTree, \
implements
from openmdao.main.mp_support import has_interface
from openmdao.main.rbac import rbac
class ImplicitComponent(Component):
"""This is the base class for a component that represents an implicit
function
"""
implements(IImplicitComponent)
eval_only = Bool(True, iotype='in', framework_var=True,
desc='Set to False if you define your own solver. '
'Otherwise, OpenMDAO must solve the implicit '
'equations for this component.')
def __init__(self):
super(ImplicitComponent, self).__init__()
self._state_names = None
self._resid_names = None
self._shape_cache = {}
# register callbacks for all of our 'state' traits
for name, trait in self.class_traits().items():
if trait.iotype == 'state':
self._set_input_callback(name)
# This flag is for testing. Set to True to run this as an explicit
# component so you can test derivatives.
self._run_explicit = False
@rbac(('owner', 'user'))
def list_states(self):
"""Return a list of names of state variables in alphabetical order.
This specifies the order the state vector, so if you use a different
ordering internally, override this function to return the states in
the desired order.
"""
if self._run_explicit == True:
return []
if self._state_names is None:
self._state_names = \
sorted([k for k, _ in self.items(iotype='state')])
return self._state_names
@rbac(('owner', 'user'))
def list_residuals(self):
"""Return a list of names of residual variables in alphabetical
order. This specifies the order the residual vector, so if you use
a different order internally, override this function to return the
residuals in the desired order.
"""
if self._run_explicit == True:
return []
if self._resid_names is None:
self._resid_names = \
sorted([k for k, _ in self.items(iotype='residual')])
return self._resid_names
def evaluate(self):
"""run a single step to calculate the residual
values for the given state var values.
This must be overridden in derived classes.
"""
raise NotImplementedError('%s.evaluate' % self.get_pathname())
@rbac(('owner', 'user'))
def config_changed(self, update_parent=True):
"""Reset internally cached values."""
super(ImplicitComponent, self).config_changed(update_parent)
self._state_names = None
self._resid_names = None
self._shape_cache = {}
def check_config(self, strict=False):
"""
Override this function to perform configuration checks specific to
your class. Bad configurations should raise an exception.
"""
super(ImplicitComponent, self).check_config(strict=strict)
# TODO: add check that total width of states == total widtJh of
# residuals
def execute(self):
""" Performs either an internal solver or a single evaluation.
Do not override this function.
"""
if self.eval_only:
self.evaluate()
else:
self.solve()
def get_residuals(self):
"""Return a vector of residual values."""
resids = []
if self._run_explicit == False:
for name in self.list_residuals():
resids.extend(flattened_value(name, getattr(self, name)))
return | np.array(resids) | numpy.array |
from itertools import product
from random import randint
from io import BytesIO
import pathlib
import ui
import numpy as np
from PIL import Image as ImageP
from cpu import CPU
from cartridge import Rom
from bus import Bus
PATH = '../'
ROM = 'snake'
NES_PATH = pathlib.Path(PATH + ROM + '.nes')
init_img = ImageP.new('RGB', (32, 32))
base_array = np.asarray(init_img)
diff_array = np.zeros((32, 32, 3), dtype=np.uint8)
BLACK = '#000000'
WHITE = '#ffffff'
GREY = '#808080'
RED = '#ff0000'
GREEN = '#008000'
BLUE = '#0000ff'
MAGENTA = '#ff00ff'
YELLOW = '#ffff00'
CYAN = '#00ffff'
def palette(c_byt: 'u8'):
if c_byt == 0: # 0 => BLACK
return BLACK
elif c_byt == 1: # 1 => WHITE
return WHITE
elif c_byt in (2, 9): # 2 | 9 => GREY
return GREY
elif c_byt in (3, 10): # 3 | 10 => RED
return RED
elif c_byt in (4, 11): # 4 | 11 => GREEN
return GREEN
elif c_byt in (5, 12): # 5 | 12 => BLUE
return BLUE
elif c_byt in (6, 13): # 6 | 13 => MAGENTA
return MAGENTA
elif c_byt in (7, 14): # 7 | 14 => YELLOW
return YELLOW
else: # _ => CYAN
return CYAN
def color(byt: str) -> list:
head = '0x'
r = int(head + byt[1:3], 16)
g = int(head + byt[3:5], 16)
b = int(head + byt[5:7], 16)
num_rgb = np.array([r, g, b], dtype=np.uint8)
return num_rgb
def show_canvas(_cpu):
#canvas = _cpu.memory[0x200:0x600]
#canvas = [_cpu.mem_read(i) for i in range(0x200, 0x600)]
canvas = _cpu.bus.cpu_vram[0x200:0x600]
count = 0
for x, y in product(range(32), range(32)):
byt = canvas[count]
diff_array[x][y] = color(palette(byt))
count += 1
#del canvas
out_array = base_array + diff_array
out_img = ImageP.fromarray(out_array)
re_out = out_img.resize((320, 320))
with BytesIO() as bIO:
re_out.save(bIO, 'png')
re_img = ui.Image.from_data(bIO.getvalue())
del bIO
return re_img
class Key(ui.View):
def __init__(self, call: 'CPU.mem_write', byte_key: int):
self.call = call
self.byte_key = byte_key
self.bg_color = GREY
self.height = 64
self.width = 64
self.alpha = 1
def touch_began(self, touch):
self.alpha = .25
self.call(0xff, self.byte_key)
def touch_ended(self, touch):
self.alpha = 1
class View(ui.View):
def __init__(self):
self.name = 'View'
self.bg_color = .128
self.update_interval = 1 / (2**14)
# --- load the game
# xxx: list or tuple ?
nes_bytes = pathlib.Path.read_bytes(NES_PATH)
rom = Rom(nes_bytes)
bus = Bus(rom)
self.cpu = CPU(bus)
self.cpu.reset()
self.screen_state = np.array([0] * (32 * 32), dtype=np.uint8)
self.im_view = ui.ImageView()
self.im_view.bg_color = 0
self.im_view.height = 320
self.im_view.width = 320
self.im_view.image = show_canvas(self.cpu)
self.add_subview(self.im_view)
self.key_W = Key(self.cpu.mem_write, 0x77)
self.key_S = Key(self.cpu.mem_write, 0x73)
self.key_A = Key(self.cpu.mem_write, 0x61)
self.key_D = Key(self.cpu.mem_write, 0x64)
self.add_subview(self.key_W)
self.add_subview(self.key_S)
self.add_subview(self.key_A)
self.add_subview(self.key_D)
def read_screen_state(self, _cpu: '&CPU') -> bool:
update = False
pre_array = self.screen_state
#memory = [_cpu.mem_read(i) for i in range(0x200, 0x600)]
memory = _cpu.bus.cpu_vram[0x200:0x600]
mem_array = | np.array(memory, dtype=np.uint8) | numpy.array |
from typing import Tuple, Dict, Any, Union, Callable
import numpy as np
import scipy.ndimage as ndi
from common.exceptionmanager import catch_error_exception
from common.functionutil import ImagesUtil
from preprocessing.imagegenerator import ImageGenerator
_epsilon = 1e-6
class TransformRigidImages(ImageGenerator):
def __init__(self,
size_image: Union[Tuple[int, int, int], Tuple[int, int]],
is_normalize_data: bool = False,
type_normalize_data: str = 'samplewise',
is_zca_whitening: bool = False,
is_inverse_transform: bool = False,
rescale_factor: float = None,
preprocessing_function: Callable[[np.ndarray], np.ndarray] = None
) -> None:
super(TransformRigidImages, self).__init__(size_image, num_images=1)
if is_normalize_data:
if type_normalize_data == 'featurewise':
self._featurewise_center = True
self._featurewise_std_normalization = True
self._samplewise_center = False
self._samplewise_std_normalization = False
else:
# type_normalize_data == 'samplewise'
self._featurewise_center = False
self._featurewise_std_normalization = False
self._samplewise_center = True
self._samplewise_std_normalization = True
else:
self._featurewise_center = False
self._featurewise_std_normalization = False
self._samplewise_center = False
self._samplewise_std_normalization = False
self._is_zca_whitening = is_zca_whitening
self._zca_epsilon = 1e-6
self._rescale_factor = rescale_factor
self._preprocessing_function = preprocessing_function
self._mean = None
self._std = None
self._principal_components = None
self._is_inverse_transform = is_inverse_transform
self._initialize_gendata()
def update_image_data(self, in_shape_image: Tuple[int, ...]) -> None:
# self._num_images = in_shape_image[0]
pass
def _initialize_gendata(self) -> None:
self._transform_matrix = None
self._transform_params = None
self._count_trans_in_images = 0
def _update_gendata(self, **kwargs) -> None:
seed = kwargs['seed']
(self._transform_matrix, self._transform_params) = self._calc_gendata_random_transform(seed)
self._count_trans_in_images = 0
def _get_image(self, in_image: np.ndarray) -> np.ndarray:
is_type_input_image = (self._count_trans_in_images == 0)
self._count_trans_in_images += 1
return self._get_transformed_image(in_image, is_type_input_image=is_type_input_image)
def _get_transformed_image(self, in_image: np.ndarray, is_type_input_image: bool = False) -> np.ndarray:
if ImagesUtil.is_without_channels(self._size_image, in_image.shape):
in_image = np.expand_dims(in_image, axis=-1)
is_reshape_input_image = True
else:
is_reshape_input_image = False
in_image = self._calc_transformed_image(in_image, is_type_input_image=is_type_input_image)
if is_type_input_image:
in_image = self._standardize(in_image)
if is_reshape_input_image:
in_image = np.squeeze(in_image, axis=-1)
return in_image
def _get_inverse_transformed_image(self, in_image: np.ndarray, is_type_input_image: bool = False) -> np.ndarray:
if ImagesUtil.is_without_channels(self._size_image, in_image.shape):
in_image = np.expand_dims(in_image, axis=-1)
is_reshape_input_image = True
else:
is_reshape_input_image = False
if is_type_input_image:
in_image = self._standardize_inverse(in_image)
in_image = self._calc_inverse_transformed_image(in_image, is_type_input_image=is_type_input_image)
if is_reshape_input_image:
in_image = np.squeeze(in_image, axis=-1)
return in_image
def _calc_transformed_image(self, in_array: np.ndarray, is_type_input_image: bool = False) -> np.ndarray:
raise NotImplementedError
def _calc_inverse_transformed_image(self, in_array: np.ndarray, is_type_input_image: bool = False) -> np.ndarray:
raise NotImplementedError
def _calc_gendata_random_transform(self, seed: int = None) -> Tuple[np.ndarray, Dict[str, Any]]:
raise NotImplementedError
def _calc_gendata_inverse_random_transform(self, seed: int = None) -> Tuple[np.ndarray, Dict[str, Any]]:
raise NotImplementedError
def _standardize(self, in_image: np.ndarray) -> np.ndarray:
if self._preprocessing_function:
in_image = self._preprocessing_function(in_image)
if self._rescale_factor:
in_image *= self._rescale_factor
if self._samplewise_center:
in_image -= np.mean(in_image, keepdims=True)
if self._samplewise_std_normalization:
in_image /= (np.std(in_image, keepdims=True) + _epsilon)
template_message_error = 'This ImageDataGenerator specifies \'%s\', but it hasn\'t been fit on any ' \
'training data. Fit it first by calling \'fit(numpy_data)\'.'
if self._featurewise_center:
if self._mean is not None:
in_image -= self._mean
else:
message = template_message_error % ('featurewise_center')
catch_error_exception(message)
if self._featurewise_std_normalization:
if self._std is not None:
in_image /= (self._std + _epsilon)
else:
message = template_message_error % ('featurewise_std_normalization')
catch_error_exception(template_message_error % (message))
if self._is_zca_whitening:
if self._principal_components is not None:
flatx = np.reshape(in_image, (-1, np.prod(in_image.shape[-3:])))
whitex = np.dot(flatx, self._principal_components)
in_image = np.reshape(whitex, in_image.shape)
else:
message = template_message_error % ('zca_whitening')
catch_error_exception(message)
return in_image
def _standardize_inverse(self, in_image: np.ndarray) -> np.ndarray:
template_message_error = 'This ImageDataGenerator specifies \'%s\', but it hasn\'t been fit on any ' \
'training data. Fit it first by calling \'fit(numpy_data)\'.'
if self._is_zca_whitening:
if self._principal_components is not None:
flatx = np.reshape(in_image, (-1, np.prod(in_image.shape[-3:])))
inverse_principal_componens = np.divide(1.0, self._principal_components)
whitex = np.dot(flatx, inverse_principal_componens)
in_image = np.reshape(whitex, in_image.shape)
else:
message = template_message_error % ('zca_whitening')
catch_error_exception(message)
if self._featurewise_std_normalization:
if self._std is not None:
in_image *= self._std
else:
message = template_message_error % ('featurewise_std_normalization')
catch_error_exception(message)
if self._featurewise_center:
if self._mean is not None:
in_image += self._mean
else:
message = template_message_error % ('featurewise_center')
catch_error_exception(message)
if self._samplewise_std_normalization:
in_image *= np.std(in_image, keepdims=True)
if self._samplewise_center:
in_image += np.mean(in_image, keepdims=True)
if self._rescale_factor:
in_image /= self._rescale_factor
if self._preprocessing_function:
catch_error_exception('Not implemented inverse preprocessing function')
return in_image
@staticmethod
def _flip_axis(in_image: np.ndarray, axis: int) -> np.ndarray:
in_image = np.asarray(in_image).swapaxes(axis, 0)
in_image = in_image[::-1, ...]
in_image = in_image.swapaxes(0, axis)
return in_image
@staticmethod
def _apply_channel_shift(in_image: np.ndarray, intensity: int, channel_axis: int = 0) -> np.ndarray:
in_image = np.rollaxis(in_image, channel_axis, 0)
min_x, max_x = np.min(in_image), np.max(in_image)
channel_images = [np.clip(x_channel + intensity, min_x, max_x) for x_channel in in_image]
in_image = np.stack(channel_images, axis=0)
in_image = np.rollaxis(in_image, 0, channel_axis + 1)
return in_image
def _apply_brightness_shift(self, in_image: np.ndarray, brightness: int) -> np.ndarray:
catch_error_exception('Not implemented brightness shifting option...')
# in_image = array_to_img(in_image)
# in_image = imgenhancer_Brightness = ImageEnhance.Brightness(in_image)
# in_image = imgenhancer_Brightness.enhance(brightness)
# in_image = img_to_array(in_image)
def get_text_description(self) -> str:
raise NotImplementedError
class TransformRigidImages2D(TransformRigidImages):
_img_row_axis = 0
_img_col_axis = 1
_img_channel_axis = 2
def __init__(self,
size_image: Tuple[int, int],
is_normalize_data: bool = False,
type_normalize_data: str = 'samplewise',
is_zca_whitening: bool = False,
rotation_range: float = 0.0,
width_shift_range: float = 0.0,
height_shift_range: float = 0.0,
brightness_range: Tuple[float, float] = None,
shear_range: float = 0.0,
zoom_range: Union[float, Tuple[float, float]] = 0.0,
channel_shift_range: float = 0.0,
fill_mode: str = 'nearest',
cval: float = 0.0,
horizontal_flip: bool = False,
vertical_flip: bool = False,
rescale_factor: float = None,
preprocessing_function: Callable[[np.ndarray], np.ndarray] = None
) -> None:
self._rotation_range = rotation_range
self._width_shift_range = width_shift_range
self._height_shift_range = height_shift_range
self._brightness_range = brightness_range
self._shear_range = shear_range
self._channel_shift_range = channel_shift_range
self._fill_mode = fill_mode
self._cval = cval
self._horizontal_flip = horizontal_flip
self._vertical_flip = vertical_flip
if np.isscalar(zoom_range):
self._zoom_range = (1 - zoom_range, 1 + zoom_range)
elif len(zoom_range) == 2:
self._zoom_range = (zoom_range[0], zoom_range[1])
else:
message = '\'zoom_range\' should be a float or a tuple of two floats. Received %s' % (str(zoom_range))
catch_error_exception(message)
if self._brightness_range is not None:
if len(self._brightness_range) != 2:
message = '\'brightness_range\' should be a tuple of two floats. Received %s' % (str(brightness_range))
catch_error_exception(message)
super(TransformRigidImages2D, self).__init__(size_image,
is_normalize_data=is_normalize_data,
type_normalize_data=type_normalize_data,
is_zca_whitening=is_zca_whitening,
rescale_factor=rescale_factor,
preprocessing_function=preprocessing_function)
def _calc_transformed_image(self, in_image: np.ndarray, is_type_input_image: bool = False) -> np.ndarray:
# Apply: 1st: rigid transformations
# 2nd: channel shift intensity / flipping
if self._transform_matrix is not None:
in_image = self._apply_transform(in_image, self._transform_matrix,
channel_axis=self._img_channel_axis,
fill_mode=self._fill_mode, cval=self._cval)
if is_type_input_image and (self._transform_params.get('channel_shift_intensity') is not None):
in_image = self._apply_channel_shift(in_image, self._transform_params['channel_shift_intensity'],
channel_axis=self._img_channel_axis)
if self._transform_params.get('flip_horizontal', False):
in_image = self._flip_axis(in_image, axis=self._img_col_axis)
if self._transform_params.get('flip_vertical', False):
in_image = self._flip_axis(in_image, axis=self._img_row_axis)
if is_type_input_image and (self._transform_params.get('brightness') is not None):
in_image = self._apply_brightness_shift(in_image, self._transform_params['brightness'])
return in_image
def _calc_inverse_transformed_image(self, in_image: np.ndarray, is_type_input_image: bool = False) -> np.ndarray:
# Apply: 1st: channel shift intensity / flipping
# 2nd: rigid transformations
if is_type_input_image and (self._transform_params.get('brightness') is not None):
in_image = self._apply_brightness_shift(in_image, self._transform_params['brightness'])
if self._transform_params.get('flip_vertical', False):
in_image = self._flip_axis(in_image, axis=self._img_row_axis)
if self._transform_params.get('flip_horizontal', False):
in_image = self._flip_axis(in_image, axis=self._img_col_axis)
if is_type_input_image and (self._transform_params.get('channel_shift_intensity') is not None):
in_image = self._apply_channel_shift(in_image, self._transform_params['channel_shift_intensity'],
channel_axis=self._img_channel_axis)
if self._transform_matrix is not None:
in_image = self._apply_transform(in_image, self._transform_matrix,
channel_axis=self._img_channel_axis,
fill_mode=self._fill_mode, cval=self._cval)
return in_image
def _calc_gendata_random_transform(self, seed: int = None) -> Tuple[np.ndarray, Dict[str, Any]]:
# compute composition of homographies
if seed is not None:
np.random.seed(seed)
# ****************************************************
if self._rotation_range:
theta = np.deg2rad(np.random.uniform(-self._rotation_range, self._rotation_range))
else:
theta = 0
if self._height_shift_range:
tx = np.random.uniform(-self._height_shift_range, self._height_shift_range)
if np.max(self._height_shift_range) < 1:
tx *= self._size_image[self._img_row_axis]
else:
tx = 0
if self._width_shift_range:
ty = np.random.uniform(-self._width_shift_range, self._width_shift_range)
if np.max(self._width_shift_range) < 1:
ty *= self._size_image[self._img_col_axis]
else:
ty = 0
if self._shear_range:
shear = np.deg2rad(np.random.uniform(-self._shear_range, self._shear_range))
else:
shear = 0
if self._zoom_range[0] == 1 and self._zoom_range[1] == 1:
zx, zy = 1, 1
else:
zx, zy = np.random.uniform(self._zoom_range[0], self._zoom_range[1], 2)
flip_horizontal = (np.random.random() < 0.5) * self._horizontal_flip
flip_vertical = (np.random.random() < 0.5) * self._vertical_flip
channel_shift_intensity = None
if self._channel_shift_range != 0:
channel_shift_intensity = np.random.uniform(-self._channel_shift_range, self._channel_shift_range)
brightness = None
if self._brightness_range is not None:
brightness = np.random.uniform(self._brightness_range[0], self._brightness_range[1])
transform_parameters = {'flip_horizontal': flip_horizontal,
'flip_vertical': flip_vertical,
'channel_shift_intensity': channel_shift_intensity,
'brightness': brightness}
# ****************************************************
# ****************************************************
transform_matrix = None
if theta != 0:
rotation_matrix = np.array([[np.cos(theta), -np.sin(theta), 0],
[np.sin(theta), np.cos(theta), 0],
[0, 0, 1]])
transform_matrix = rotation_matrix
if tx != 0 or ty != 0:
shift_matrix = np.array([[1, 0, tx],
[0, 1, ty],
[0, 0, 1]])
transform_matrix = shift_matrix if transform_matrix is None else np.dot(transform_matrix, shift_matrix)
if shear != 0:
shear_matrix = np.array([[1, -np.sin(shear), 0],
[0, np.cos(shear), 0],
[0, 0, 1]])
transform_matrix = shear_matrix if transform_matrix is None else np.dot(transform_matrix, shear_matrix)
if zx != 1 or zy != 1:
zoom_matrix = np.array([[zx, 0, 0],
[0, zy, 0],
[0, 0, 1]])
transform_matrix = zoom_matrix if transform_matrix is None else np.dot(transform_matrix, zoom_matrix)
if transform_matrix is not None:
h, w = self._size_image[self._img_row_axis], self._size_image[self._img_col_axis]
transform_matrix = self._transform_matrix_offset_center(transform_matrix, h, w)
# ****************************************************
return (transform_matrix, transform_parameters)
def _calc_gendata_inverse_random_transform(self, seed: int = None) -> Tuple[np.ndarray, Dict[str, Any]]:
# compute composition of inverse homographies
if seed is not None:
np.random.seed(seed)
# ****************************************************
if self._rotation_range:
theta = np.deg2rad(np.random.uniform(-self._rotation_range, self._rotation_range))
else:
theta = 0
if self._height_shift_range:
tx = np.random.uniform(-self._height_shift_range, self._height_shift_range)
if self._height_shift_range < 1:
tx *= self._size_image[self._img_row_axis]
else:
tx = 0
if self._width_shift_range:
ty = np.random.uniform(-self._width_shift_range, self._width_shift_range)
if self._width_shift_range < 1:
ty *= self._size_image[self._img_col_axis]
else:
ty = 0
if self._shear_range:
shear = np.deg2rad(np.random.uniform(-self._shear_range, self._shear_range))
else:
shear = 0
if self._zoom_range[0] == 1 and self._zoom_range[1] == 1:
zx, zy = 1, 1
else:
zx, zy = np.random.uniform(self._zoom_range[0], self._zoom_range[1], 2)
flip_horizontal = (np.random.random() < 0.5) * self._horizontal_flip
flip_vertical = (np.random.random() < 0.5) * self._vertical_flip
channel_shift_intensity = None
if self._channel_shift_range != 0:
channel_shift_intensity = np.random.uniform(-self._channel_shift_range, self._channel_shift_range)
brightness = None
if self._brightness_range is not None:
brightness = np.random.uniform(self._brightness_range[0], self._brightness_range[1])
transform_parameters = {'flip_horizontal': flip_horizontal,
'flip_vertical': flip_vertical,
'channel_shift_intensity': channel_shift_intensity,
'brightness': brightness}
# ****************************************************
# ****************************************************
transform_matrix = None
if theta != 0:
rotation_matrix = np.array([[np.cos(theta), np.sin(theta), 0],
[-np.sin(theta), np.cos(theta), 0],
[0, 0, 1]])
transform_matrix = rotation_matrix
if tx != 0 or ty != 0:
shift_matrix = np.array([[1, 0, -tx],
[0, 1, -ty],
[0, 0, 1]])
transform_matrix = shift_matrix if transform_matrix is None else np.dot(transform_matrix, shift_matrix)
if shear != 0:
shear_matrix = np.array([[1, np.tan(shear), 0],
[0, 1.0 / np.cos(shear), 0],
[0, 0, 1]])
transform_matrix = shear_matrix if transform_matrix is None else np.dot(transform_matrix, shear_matrix)
if zx != 1 or zy != 1:
zoom_matrix = np.array([[1.0 / zx, 0, 0],
[0, 1.0 / zy, 0],
[0, 0, 1]])
transform_matrix = zoom_matrix if transform_matrix is None else np.dot(transform_matrix, zoom_matrix)
if transform_matrix is not None:
h, w = self._size_image[self._img_row_axis], self._size_image[self._img_col_axis]
transform_matrix = self._transform_matrix_offset_center(transform_matrix, h, w)
# ****************************************************
return (transform_matrix, transform_parameters)
@staticmethod
def _transform_matrix_offset_center(matrix: np.ndarray, x: int, y: int) -> np.ndarray:
o_x = float(x) / 2 + 0.5
o_y = float(y) / 2 + 0.5
offset_matrix = np.array([[1, 0, o_x], [0, 1, o_y], [0, 0, 1]])
reset_matrix = np.array([[1, 0, -o_x], [0, 1, -o_y], [0, 0, 1]])
transform_matrix = np.dot(np.dot(offset_matrix, matrix), reset_matrix)
return transform_matrix
@staticmethod
def _apply_transform(in_image: np.ndarray, transform_matrix: np.ndarray,
channel_axis: int = 0, fill_mode: str = 'nearest', cval: float = 0.0) -> np.ndarray:
in_image = np.rollaxis(in_image, channel_axis, 0)
final_affine_matrix = transform_matrix[:2, :2]
final_offset = transform_matrix[:2, 2]
channel_images = [ndi.interpolation.affine_transform(x_channel, final_affine_matrix, final_offset, order=1,
mode=fill_mode, cval=cval) for x_channel in in_image]
in_image = np.stack(channel_images, axis=0)
in_image = np.rollaxis(in_image, 0, channel_axis + 1)
return in_image
def get_text_description(self) -> str:
message = 'Rigid 2D transformations of images, with parameters...\n'
message += 'rotation (plane_XY) range: \'%s\'...\n' % (self._rotation_range)
message += 'shift (width, height) range: \'(%s, %s)\'...\n' \
% (self._width_shift_range, self._height_shift_range)
message += 'flip (horizontal, vertical): \'(%s, %s)\'...\n' \
% (self._horizontal_flip, self._vertical_flip)
message += 'zoom (min, max) range: \'(%s, %s)\'...\n' % (self._zoom_range[0], self._zoom_range[1])
message += 'shear (plane_XY) range: \'%s\'...\n' % (self._shear_range)
message += 'fill mode, when applied transformation: \'%s\'...\n' % (self._fill_mode)
return message
class TransformRigidImages3D(TransformRigidImages):
_img_dep_axis = 0
_img_row_axis = 1
_img_col_axis = 2
_img_channel_axis = 3
def __init__(self,
size_image: Tuple[int, int, int],
is_normalize_data: bool = False,
type_normalize_data: str = 'samplewise',
is_zca_whitening: bool = False,
rotation_xy_range: float = 0.0,
rotation_xz_range: float = 0.0,
rotation_yz_range: float = 0.0,
width_shift_range: float = 0.0,
height_shift_range: float = 0.0,
depth_shift_range: float = 0.0,
brightness_range: Tuple[float, float] = None,
shear_xy_range: float = 0.0,
shear_xz_range: float = 0.0,
shear_yz_range: float = 0.0,
zoom_range: Union[float, Tuple[float, float]] = 0.0,
channel_shift_range: float = 0.0,
fill_mode: str = 'nearest',
cval: float = 0.0,
horizontal_flip: bool = False,
vertical_flip: bool = False,
axialdir_flip: bool = False,
rescale_factor: float = None,
preprocessing_function: Callable[[np.ndarray], np.ndarray] = None
) -> None:
self._rotation_xy_range = rotation_xy_range
self._rotation_xz_range = rotation_xz_range
self._rotation_yz_range = rotation_yz_range
self._width_shift_range = width_shift_range
self._height_shift_range = height_shift_range
self._depth_shift_range = depth_shift_range
self._brightness_range = brightness_range
self._shear_xy_range = shear_xy_range
self._shear_xz_range = shear_xz_range
self._shear_yz_range = shear_yz_range
self._channel_shift_range = channel_shift_range
self._fill_mode = fill_mode
self._cval = cval
self._horizontal_flip = horizontal_flip
self._vertical_flip = vertical_flip
self._axialdir_flip = axialdir_flip
if np.isscalar(zoom_range):
self._zoom_range = (1 - zoom_range, 1 + zoom_range)
elif len(zoom_range) == 2:
self._zoom_range = (zoom_range[0], zoom_range[1])
else:
message = '\'zoom_range\' should be a float or a tuple of two floats. Received %s' % (str(zoom_range))
catch_error_exception(message)
if self._brightness_range is not None:
if len(self._brightness_range) != 2:
message = '\'brightness_range\' should be a tuple of two floats. Received %s' % (str(brightness_range))
catch_error_exception(message)
super(TransformRigidImages3D, self).__init__(size_image,
is_normalize_data=is_normalize_data,
type_normalize_data=type_normalize_data,
is_zca_whitening=is_zca_whitening,
rescale_factor=rescale_factor,
preprocessing_function=preprocessing_function)
def _calc_transformed_image(self, in_image: np.ndarray, is_type_input_image: bool = False) -> np.ndarray:
# Apply: 1st: rigid transformations
# 2nd: channel shift intensity / flipping
if self._transform_matrix is not None:
in_image = self._apply_transform(in_image, self._transform_matrix,
channel_axis=self._img_channel_axis,
fill_mode=self._fill_mode, cval=self._cval)
if is_type_input_image and (self._transform_params.get('channel_shift_intensity') is not None):
in_image = self._apply_channel_shift(in_image, self._transform_params['channel_shift_intensity'],
channel_axis=self._img_channel_axis)
if self._transform_params.get('flip_horizontal', False):
in_image = self._flip_axis(in_image, axis=self._img_col_axis)
if self._transform_params.get('flip_vertical', False):
in_image = self._flip_axis(in_image, axis=self._img_row_axis)
if self._transform_params.get('flip_axialdir', False):
in_image = self._flip_axis(in_image, axis=self._img_dep_axis)
if is_type_input_image and (self._transform_params.get('brightness') is not None):
in_image = self._apply_brightness_shift(in_image, self._transform_params['brightness'])
return in_image
def _calc_inverse_transformed_image(self, in_image: np.ndarray, is_type_input_image: bool = False) -> np.ndarray:
# Apply: 1st: channel shift intensity / flipping
# 2nd: rigid transformations
if is_type_input_image and (self._transform_params.get('brightness') is not None):
in_image = self._apply_brightness_shift(in_image, self._transform_params['brightness'])
if self._transform_params.get('flip_axialdir', False):
in_image = self._flip_axis(in_image, axis=self._img_dep_axis)
if self._transform_params.get('flip_vertical', False):
in_image = self._flip_axis(in_image, axis=self._img_row_axis)
if self._transform_params.get('flip_horizontal', False):
in_image = self._flip_axis(in_image, axis=self._img_col_axis)
if is_type_input_image and (self._transform_params.get('channel_shift_intensity') is not None):
in_image = self._apply_channel_shift(in_image, self._transform_params['channel_shift_intensity'],
channel_axis=self._img_channel_axis)
if self._transform_matrix is not None:
in_image = self._apply_transform(in_image, self._transform_matrix,
channel_axis=self._img_channel_axis,
fill_mode=self._fill_mode, cval=self._cval)
return in_image
def _calc_gendata_random_transform(self, seed: int = None) -> Tuple[np.ndarray, Dict[str, Any]]:
# compute composition of homographies
if seed is not None:
np.random.seed(seed)
# ****************************************************
if self._rotation_xy_range:
angle_xy = np.deg2rad(np.random.uniform(-self._rotation_xy_range, self._rotation_xy_range))
else:
angle_xy = 0
if self._rotation_xz_range:
angle_xz = np.deg2rad(np.random.uniform(-self._rotation_xz_range, self._rotation_xz_range))
else:
angle_xz = 0
if self._rotation_yz_range:
angle_yz = np.deg2rad(np.random.uniform(-self._rotation_yz_range, self._rotation_yz_range))
else:
angle_yz = 0
if self._height_shift_range:
tx = np.random.uniform(-self._height_shift_range, self._height_shift_range)
if self._height_shift_range < 1:
tx *= self._size_image[self._img_row_axis]
else:
tx = 0
if self._width_shift_range:
ty = np.random.uniform(-self._width_shift_range, self._width_shift_range)
if self._width_shift_range < 1:
ty *= self._size_image[self._img_col_axis]
else:
ty = 0
if self._depth_shift_range:
tz = np.random.uniform(-self._depth_shift_range, self._depth_shift_range)
if self._depth_shift_range < 1:
tz *= self._size_image[self._img_dep_axis]
else:
tz = 0
if self._shear_xy_range:
shear_xy = np.deg2rad(np.random.uniform(-self._shear_xy_range, self._shear_xy_range))
else:
shear_xy = 0
if self._shear_xz_range:
shear_xz = np.deg2rad(np.random.uniform(-self._shear_xz_range, self._shear_xz_range))
else:
shear_xz = 0
if self._shear_yz_range:
shear_yz = np.deg2rad(np.random.uniform(-self._shear_yz_range, self._shear_yz_range))
else:
shear_yz = 0
if self._zoom_range[0] == 1 and self._zoom_range[1] == 1:
(zx, zy, zz) = (1, 1, 1)
else:
(zx, zy, zz) = np.random.uniform(self._zoom_range[0], self._zoom_range[1], 3)
flip_horizontal = (np.random.random() < 0.5) * self._horizontal_flip
flip_vertical = (np.random.random() < 0.5) * self._vertical_flip
flip_axialdir = (np.random.random() < 0.5) * self._axialdir_flip
channel_shift_intensity = None
if self._channel_shift_range != 0:
channel_shift_intensity = np.random.uniform(-self._channel_shift_range, self._channel_shift_range)
brightness = None
if self._brightness_range is not None:
brightness = np.random.uniform(self._brightness_range[0], self._brightness_range[1])
transform_parameters = {'flip_horizontal': flip_horizontal,
'flip_vertical': flip_vertical,
'flip_axialdir': flip_axialdir,
'channel_shift_intensity': channel_shift_intensity,
'brightness': brightness}
# ****************************************************
# ****************************************************
transform_matrix = None
if angle_xy != 0:
rotation_matrix = np.array([[1, 0, 0, 0],
[0, np.cos(angle_xy), -np.sin(angle_xy), 0],
[0, np.sin(angle_xy), np.cos(angle_xy), 0],
[0, 0, 0, 1]])
transform_matrix = rotation_matrix
if angle_xz != 0:
rotation_matrix = np.array([[np.cos(angle_xz), np.sin(angle_xz), 0, 0],
[-np.sin(angle_xz), np.cos(angle_xz), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1]])
transform_matrix = \
rotation_matrix if transform_matrix is None else np.dot(transform_matrix, rotation_matrix)
if angle_yz != 0:
rotation_matrix = np.array([[np.cos(angle_yz), 0, np.sin(angle_yz), 0],
[0, 1, 0, 0],
[-np.sin(angle_yz), 0, np.cos(angle_yz), 0],
[0, 0, 0, 1]])
transform_matrix = \
rotation_matrix if transform_matrix is None else np.dot(transform_matrix, rotation_matrix)
if tx != 0 or ty != 0 or tz != 0:
shift_matrix = np.array([[1, 0, 0, tz],
[0, 1, 0, tx],
[0, 0, 1, ty],
[0, 0, 0, 1]])
transform_matrix = shift_matrix if transform_matrix is None else | np.dot(transform_matrix, shift_matrix) | numpy.dot |
"""
Library of standardized plotting functions for basic plot formats
"""
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import pandas as pd
import xarray as xr
from scipy.interpolate import interp1d
from scipy.signal import welch
# Standard field labels
# - default: e.g., "Km/s"
# - all superscript: e.g., "K m s^{-1}"
fieldlabels_default_units = {
'wspd': r'Wind speed [m/s]',
'wdir': r'Wind direction [$^\circ$]',
'u': r'u [m/s]',
'v': r'v [m/s]',
'w': r'Vertical wind speed [m/s]',
'theta': r'$\theta$ [K]',
'thetav': r'$\theta_v$ [K]',
'uu': r'$\langle u^\prime u^\prime \rangle \;[\mathrm{m^2/s^2}]$',
'vv': r'$\langle v^\prime v^\prime \rangle \;[\mathrm{m^2/s^2}]$',
'ww': r'$\langle w^\prime w^\prime \rangle \;[\mathrm{m^2/s^2}]$',
'uv': r'$\langle u^\prime v^\prime \rangle \;[\mathrm{m^2/s^2}]$',
'uw': r'$\langle u^\prime w^\prime \rangle \;[\mathrm{m^2/s^2}]$',
'vw': r'$\langle v^\prime w^\prime \rangle \;[\mathrm{m^2/s^2}]$',
'tw': r'$\langle w^\prime \theta^\prime \rangle \;[\mathrm{Km/s}]$',
'TI': r'TI $[-]$',
'TKE': r'TKE $[\mathrm{m^2/s^2}]$',
}
fieldlabels_superscript_units = {
'wspd': r'Wind speed [m s$^{-1}$]',
'wdir': r'Wind direction [$^\circ$]',
'u': r'u [m s$^{-1}$]',
'v': r'v [m s$^{-1}$]',
'w': r'Vertical wind speed [m s$^{-1}$]',
'theta': r'$\theta$ [K]',
'thetav': r'$\theta_v$ [K]',
'uu': r'$\langle u^\prime u^\prime \rangle \;[\mathrm{m^2 s^{-2}}]$',
'vv': r'$\langle v^\prime v^\prime \rangle \;[\mathrm{m^2 s^{-2}}]$',
'ww': r'$\langle w^\prime w^\prime \rangle \;[\mathrm{m^2 s^{-2}}]$',
'uv': r'$\langle u^\prime v^\prime \rangle \;[\mathrm{m^2 s^{-2}}]$',
'uw': r'$\langle u^\prime w^\prime \rangle \;[\mathrm{m^2 s^{-2}}]$',
'vw': r'$\langle v^\prime w^\prime \rangle \;[\mathrm{m^2 s^{-2}}]$',
'tw': r'$\langle w^\prime \theta^\prime \rangle \;[\mathrm{K m s^{-1}}]$',
'TI': r'TI $[-]$',
'TKE': r'TKE $[\mathrm{m^2 s^{-2}}]$',
}
# Standard field labels for frequency spectra
spectrumlabels_default_units = {
'u': r'$E_{uu}\;[\mathrm{m^2/s}]$',
'v': r'$E_{vv}\;[\mathrm{m^2/s}]$',
'w': r'$E_{ww}\;[\mathrm{m^2/s}]$',
'theta': r'$E_{\theta\theta}\;[\mathrm{K^2 s}]$',
'thetav': r'$E_{\theta\theta}\;[\mathrm{K^2 s}]$',
'wspd': r'$E_{UU}\;[\mathrm{m^2/s}]$',
}
spectrumlabels_superscript_units = {
'u': r'$E_{uu}\;[\mathrm{m^2\;s^{-1}}]$',
'v': r'$E_{vv}\;[\mathrm{m^2\;s^{-1}}]$',
'w': r'$E_{ww}\;[\mathrm{m^2\;s^{-1}}]$',
'theta': r'$E_{\theta\theta}\;[\mathrm{K^2\;s}]$',
'thetav': r'$E_{\theta\theta}\;[\mathrm{K^2\;s}]$',
'wspd': r'$E_{UU}\;[\mathrm{m^2\;s^{-1}}]$',
}
# Default settings
default_colors = plt.rcParams['axes.prop_cycle'].by_key()['color']
standard_fieldlabels = fieldlabels_default_units
standard_spectrumlabels = spectrumlabels_default_units
# Supported dimensions and associated names
dimension_names = {
'time': ['datetime','time','Time'],
'height': ['height','heights','z'],
'frequency': ['frequency','f',]
}
# Show debug information
debug = False
def plot_timeheight(datasets,
fields=None,
fig=None,ax=None,
colorschemes={},
fieldlimits=None,
heightlimits=None,
timelimits=None,
fieldlabels={},
labelsubplots=False,
showcolorbars=True,
fieldorder='C',
ncols=1,
subfigsize=(12,4),
plot_local_time=False,
local_time_offset=0,
datasetkwargs={},
**kwargs
):
"""
Plot time-height contours for different datasets and fields
Usage
=====
datasets : pandas.DataFrame or dict
Dataset(s). If more than one set, datasets should
be a dictionary with entries <dataset_name>: dataset
fields : str, list, 'all' (or None)
Fieldname(s) corresponding to particular column(s) of
the datasets. fields can be None if input are MultiIndex Series.
'all' means all fields will be plotted (in this case all
datasets should have the same fields)
fig : figure handle
Custom figure handle. Should be specified together with ax
ax : axes handle, or list or numpy ndarray with axes handles
Customand axes handle(s).
Size of ax should equal ndatasets*nfields
colorschemes : str or dict
Name of colorschemes. If only one field is plotted, colorschemes
can be a string. Otherwise, it should be a dictionary with
entries <fieldname>: name_of_colorschemes
Missing colorschemess are set to 'viridis'
fieldlimits : list or tuple, or dict
Value range for the various fields. If only one field is
plotted, fieldlimits can be a list or tuple. Otherwise, it
should be a dictionary with entries <fieldname>: fieldlimit.
Missing fieldlimits are set automatically
heightlimits : list or tuple
Height axis limits
timelimits : list or tuple
Time axis limits
fieldlabels : str or dict
Custom field labels. If only one field is plotted, fieldlabels
can be a string. Otherwise it should be a dictionary with
entries <fieldname>: fieldlabel
labelsubplots : bool, list or tuple
Label subplots as (a), (b), (c), ... If a list or tuple is given
their values should be the horizontal and vertical position
relative to each subaxis.
showcolorbars : bool
Show colorbar per subplot
fieldorder : 'C' or 'F'
Index ordering for assigning fields and datasets to axes grid
(row by row). Fields is considered the first axis, so 'C' means
fields change slowest, 'F' means fields change fastest.
ncols : int
Number of columns in axes grid, must be a true divisor of total
number of axes.
subfigsize : list or tuple
Standard size of subfigures
plot_local_time : bool or str
Plot dual x axes with both UTC time and local time. If a str is
provided, then plot_local_time is assumed to be True and the str
is used as the datetime format.
local_time_offset : float
Local time offset from UTC
datasetkwargs : dict
Dataset-specific options that are passed on to the actual
plotting function. These options overwrite general options
specified through **kwargs. The argument should be a dictionary
with entries <dataset_name>: {**kwargs}
**kwargs : other keyword arguments
Options that are passed on to the actual plotting function.
Note that these options should be the same for all datasets and
fields and can not be used to set dataset or field specific
limits, colorschemess, norms, etc.
Example uses include setting shading, rasterized, etc.
"""
args = PlottingInput(
datasets=datasets,
fields=fields,
fieldlimits=fieldlimits,
fieldlabels=fieldlabels,
colorschemes=colorschemes,
fieldorder=fieldorder
)
args.set_missing_fieldlimits()
nfields = len(args.fields)
ndatasets = len(args.datasets)
ntotal = nfields * ndatasets
# Concatenate custom and standard field labels
# (custom field labels overwrite standard fields labels if existent)
args.fieldlabels = {**standard_fieldlabels, **args.fieldlabels}
fig, ax, nrows, ncols = _create_subplots_if_needed(
ntotal,
ncols,
sharex=True,
sharey=True,
subfigsize=subfigsize,
hspace=0.2,
fig=fig,
ax=ax
)
# Create flattened view of axes
axv = np.asarray(ax).reshape(-1)
# Initialise list of colorbars
cbars = []
# Loop over datasets, fields and times
for i, dfname in enumerate(args.datasets):
df = args.datasets[dfname]
heightvalues = _get_dim_values(df,'height')
timevalues = _get_dim_values(df,'time')
assert(heightvalues is not None), 'timeheight plot needs a height axis'
assert(timevalues is not None), 'timeheight plot needs a time axis'
if isinstance(timevalues, pd.DatetimeIndex):
# If plot local time, shift timevalues
if plot_local_time is not False:
timevalues = timevalues + pd.to_timedelta(local_time_offset,'h')
# Convert to days since 0001-01-01 00:00 UTC, plus one
numerical_timevalues = mdates.date2num(timevalues.values)
else:
if isinstance(timevalues, pd.TimedeltaIndex):
timevalues = timevalues.total_seconds()
# Timevalues is already a numerical array
numerical_timevalues = timevalues
# Create time-height mesh grid
tst = _get_staggered_grid(numerical_timevalues)
zst = _get_staggered_grid(heightvalues)
Ts,Zs = np.meshgrid(tst,zst,indexing='xy')
# Create list with available fields only
available_fields = _get_available_fieldnames(df,args.fields)
# Pivot all fields in a dataset at once
df_pivot = _get_pivot_table(df,'height',available_fields)
for j, field in enumerate(args.fields):
# If available_fields is [None,], fieldname is unimportant
if available_fields == [None]:
pass
# Else, check if field is available
elif not field in available_fields:
print('Warning: field "'+field+'" not available in dataset '+dfname)
continue
# Store plotting options in dictionary
plotting_properties = {
'vmin': args.fieldlimits[field][0],
'vmax': args.fieldlimits[field][1],
'cmap': args.cmap[field]
}
# Index of axis corresponding to dataset i and field j
if args.fieldorder=='C':
axi = i*nfields + j
else:
axi = j*ndatasets + i
# Extract data from dataframe
fieldvalues = _get_pivoted_field(df_pivot,field)
# Gather label, color, general options and dataset-specific options
# (highest priority to dataset-specific options, then general options)
try:
plotting_properties = {**plotting_properties,**kwargs,**datasetkwargs[dfname]}
except KeyError:
plotting_properties = {**plotting_properties,**kwargs}
# Plot data
im = axv[axi].pcolormesh(Ts,Zs,fieldvalues.T,**plotting_properties)
# Colorbar mark up
if showcolorbars:
cbar = fig.colorbar(im,ax=axv[axi],shrink=1.0)
# Set field label if known
try:
cbar.set_label(args.fieldlabels[field])
except KeyError:
pass
# Save colorbar
cbars.append(cbar)
# Set title if more than one dataset
if ndatasets>1:
axv[axi].set_title(dfname,fontsize=16)
# Format time axis
if isinstance(timevalues, (pd.DatetimeIndex, pd.TimedeltaIndex)):
ax2 = _format_time_axis(fig,axv[(nrows-1)*ncols:],plot_local_time,local_time_offset,timelimits)
else:
ax2 = None
# Set time limits if specified
if not timelimits is None:
axv[-1].set_xlim(timelimits)
# Set time label
for axi in axv[(nrows-1)*ncols:]:
axi.set_xlabel('time [s]')
if not heightlimits is None:
axv[-1].set_ylim(heightlimits)
# Add y labels
for r in range(nrows):
axv[r*ncols].set_ylabel(r'Height [m]')
# Align time, height and color labels
_align_labels(fig,axv,nrows,ncols)
if showcolorbars:
_align_labels(fig,[cb.ax for cb in cbars],nrows,ncols)
# Number sub figures as a, b, c, ...
if labelsubplots is not False:
try:
hoffset, voffset = labelsubplots
except (TypeError, ValueError):
hoffset, voffset = -0.14, 1.0
for i,axi in enumerate(axv):
axi.text(hoffset,voffset,'('+chr(i+97)+')',transform=axi.transAxes,size=16)
# Return cbar instead of array if ntotal==1
if len(cbars)==1:
cbars=cbars[0]
if (plot_local_time is not False) and ax2 is not None:
return fig, ax, ax2, cbars
else:
return fig, ax, cbars
def plot_timehistory_at_height(datasets,
fields=None,
heights=None,
fig=None,ax=None,
fieldlimits=None,
timelimits=None,
fieldlabels={},
cmap=None,
stack_by_datasets=None,
labelsubplots=False,
showlegend=None,
ncols=1,
subfigsize=(12,3),
plot_local_time=False,
local_time_offset=0,
datasetkwargs={},
**kwargs
):
"""
Plot time history at specified height(s) for various dataset(s)
and/or field(s).
By default, data for multiple datasets or multiple heights are
stacked in a single subplot. When multiple datasets and multiple
heights are specified together, heights are stacked in a subplot
per field and per dataset.
Usage
=====
datasets : pandas.DataFrame or dict
Dataset(s). If more than one set, datasets should
be a dictionary with entries <dataset_name>: dataset
fields : str, list, 'all' (or None)
Fieldname(s) corresponding to particular column(s) of
the datasets. fields can be None if input are Series.
'all' means all fields will be plotted (in this case all
datasets should have the same fields)
heights : float, list, 'all' (or None)
Height(s) for which time history is plotted. heights can be
None if all datasets combined have no more than one height
value. 'all' means the time history for all heights in the
datasets will be plotted (in this case all datasets should
have the same heights)
fig : figure handle
Custom figure handle. Should be specified together with ax
ax : axes handle, or list or numpy ndarray with axes handles
Customand axes handle(s).
Size of ax should equal nfields * (ndatasets or nheights)
fieldlimits : list or tuple, or dict
Value range for the various fields. If only one field is
plotted, fieldlimits can be a list or tuple. Otherwise, it
should be a dictionary with entries <fieldname>: fieldlimit.
Missing fieldlimits are set automatically
timelimits : list or tuple
Time axis limits
fieldlabels : str or dict
Custom field labels. If only one field is plotted, fieldlabels
can be a string. Otherwise it should be a dictionary with
entries <fieldname>: fieldlabel
cmap : str
Colormap used when stacking heights
stack_by_datasets : bool (or None)
Flag to specify what is plotted ("stacked") together per subfigure.
If True, stack datasets together, otherwise stack by heights. If
None, stack_by_datasets will be set based on the number of heights
and datasets.
labelsubplots : bool, list or tuple
Label subplots as (a), (b), (c), ... If a list or tuple is given
their values should be the horizontal and vertical position
relative to each subaxis.
showlegend : bool (or None)
Label different plots and show legend. If None, showlegend is set
to True if legend will have more than one entry, otherwise it is
set to False.
ncols : int
Number of columns in axes grid, must be a true divisor of total
number of axes.
subfigsize : list or tuple
Standard size of subfigures
plot_local_time : bool or str
Plot dual x axes with both UTC time and local time. If a str is
provided, then plot_local_time is assumed to be True and the str
is used as the datetime format.
local_time_offset : float
Local time offset from UTC
datasetkwargs : dict
Dataset-specific options that are passed on to the actual
plotting function. These options overwrite general options
specified through **kwargs. The argument should be a dictionary
with entries <dataset_name>: {**kwargs}
**kwargs : other keyword arguments
Options that are passed on to the actual plotting function.
Note that these options should be the same for all datasets,
fields and heights, and they can not be used to set dataset,
field or height specific colors, limits, etc.
Example uses include setting linestyle/width, marker, etc.
"""
# Avoid FutureWarning concerning the use of an implicitly registered
# datetime converter for a matplotlib plotting method. The converter
# was registered by pandas on import. Future versions of pandas will
# require explicit registration of matplotlib converters, as done here.
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
args = PlottingInput(
datasets=datasets,
fields=fields,
heights=heights,
fieldlimits=fieldlimits,
fieldlabels=fieldlabels,
)
nfields = len(args.fields)
nheights = len(args.heights)
ndatasets = len(args.datasets)
# Concatenate custom and standard field labels
# (custom field labels overwrite standard fields labels if existent)
args.fieldlabels = {**standard_fieldlabels, **args.fieldlabels}
# Set up subplot grid
if stack_by_datasets is None:
if nheights>1:
stack_by_datasets = False
else:
stack_by_datasets = True
if stack_by_datasets:
ntotal = nfields*nheights
else:
ntotal = nfields*ndatasets
fig, ax, nrows, ncols = _create_subplots_if_needed(
ntotal,
ncols,
sharex=True,
subfigsize=subfigsize,
hspace=0.2,
fig=fig,
ax=ax
)
# Create flattened view of axes
axv = np.asarray(ax).reshape(-1)
# Set showlegend if not specified
if showlegend is None:
if (stack_by_datasets and ndatasets>1) or (not stack_by_datasets and nheights>1):
showlegend = True
else:
showlegend = False
# Loop over datasets and fields
for i,dfname in enumerate(args.datasets):
df = args.datasets[dfname]
timevalues = _get_dim_values(df,'time',default_idx=True)
assert(timevalues is not None), 'timehistory plot needs a time axis'
heightvalues = _get_dim_values(df,'height')
if isinstance(timevalues, pd.TimedeltaIndex):
timevalues = timevalues.total_seconds()
# If plot local time, shift timevalues
if (plot_local_time is not False) and \
isinstance(timevalues, (pd.DatetimeIndex, pd.TimedeltaIndex)):
timevalues = timevalues + pd.to_timedelta(local_time_offset,'h')
# Create list with available fields only
available_fields = _get_available_fieldnames(df,args.fields)
# If any of the requested heights is not available,
# pivot the dataframe to allow interpolation.
# Pivot all fields in a dataset at once to reduce computation time
if (not heightvalues is None) and (not all([h in heightvalues for h in args.heights])):
df_pivot = _get_pivot_table(df,'height',available_fields)
pivoted = True
if debug: print('Pivoting '+dfname)
else:
pivoted = False
for j, field in enumerate(args.fields):
# If available_fields is [None,], fieldname is unimportant
if available_fields == [None]:
pass
# Else, check if field is available
elif not field in available_fields:
print('Warning: field "'+field+'" not available in dataset '+dfname)
continue
for k, height in enumerate(args.heights):
# Store plotting options in dictionary
# Set default linestyle to '-' and no markers
plotting_properties = {
'linestyle':'-',
'marker':None,
}
# Axis order, label and title depend on value of stack_by_datasets
if stack_by_datasets:
# Index of axis corresponding to field j and height k
axi = k*nfields + j
# Use datasetname as label
if showlegend:
plotting_properties['label'] = dfname
# Set title if multiple heights are compared
if nheights>1:
axv[axi].set_title('z = {:.1f} m'.format(height),fontsize=16)
# Set colors
plotting_properties['color'] = default_colors[i % len(default_colors)]
else:
# Index of axis corresponding to field j and dataset i
axi = i*nfields + j
# Use height as label
if showlegend:
plotting_properties['label'] = 'z = {:.1f} m'.format(height)
# Set title if multiple datasets are compared
if ndatasets>1:
axv[axi].set_title(dfname,fontsize=16)
# Set colors
if cmap is not None:
cmap = mpl.cm.get_cmap(cmap)
plotting_properties['color'] = cmap(k/(nheights-1))
else:
plotting_properties['color'] = default_colors[k % len(default_colors)]
# Extract data from dataframe
if pivoted:
signal = interp1d(heightvalues,_get_pivoted_field(df_pivot,field).values,axis=-1,fill_value="extrapolate")(height)
else:
slice_z = _get_slice(df,height,'height')
signal = _get_field(slice_z,field).values
# Gather label, color, general options and dataset-specific options
# (highest priority to dataset-specific options, then general options)
try:
plotting_properties = {**plotting_properties,**kwargs,**datasetkwargs[dfname]}
except KeyError:
plotting_properties = {**plotting_properties,**kwargs}
# Plot data
axv[axi].plot(timevalues,signal,**plotting_properties)
# Set field label if known
try:
axv[axi].set_ylabel(args.fieldlabels[field])
except KeyError:
pass
# Set field limits if specified
try:
axv[axi].set_ylim(args.fieldlimits[field])
except KeyError:
pass
# Set axis grid
for axi in axv:
axi.xaxis.grid(True,which='both')
axi.yaxis.grid(True)
# Format time axis
if isinstance(timevalues, (pd.DatetimeIndex, pd.TimedeltaIndex)):
ax2 = _format_time_axis(fig,axv[(nrows-1)*ncols:],plot_local_time,local_time_offset,timelimits)
else:
ax2 = None
# Set time limits if specified
if not timelimits is None:
axv[-1].set_xlim(timelimits)
# Set time label
for axi in axv[(nrows-1)*ncols:]:
axi.set_xlabel('time [s]')
# Number sub figures as a, b, c, ...
if labelsubplots is not False:
try:
hoffset, voffset = labelsubplots
except (TypeError, ValueError):
hoffset, voffset = -0.14, 1.0
for i,axi in enumerate(axv):
axi.text(hoffset,voffset,'('+chr(i+97)+')',transform=axi.transAxes,size=16)
# Add legend
if showlegend:
leg = _format_legend(axv,index=ncols-1)
# Align labels
_align_labels(fig,axv,nrows,ncols)
if (plot_local_time is not False) and ax2 is not None:
return fig, ax, ax2
else:
return fig, ax
def plot_profile(datasets,
fields=None,
times=None,
timerange=None,
fig=None,ax=None,
fieldlimits=None,
heightlimits=None,
fieldlabels={},
cmap=None,
stack_by_datasets=None,
labelsubplots=False,
showlegend=None,
fieldorder='C',
ncols=None,
subfigsize=(4,5),
plot_local_time=False,
local_time_offset=0,
datasetkwargs={},
**kwargs
):
"""
Plot vertical profile at specified time(s) for various dataset(s)
and/or field(s).
By default, data for multiple datasets or multiple times are
stacked in a single subplot. When multiple datasets and multiple
times are specified together, times are stacked in a subplot
per field and per dataset.
Usage
=====
datasets : pandas.DataFrame or dict
Dataset(s). If more than one set, datasets should
be a dictionary with entries <dataset_name>: dataset
fields : str, list, 'all' (or None)
Fieldname(s) corresponding to particular column(s) of
the datasets. fields can be None if input are Series.
'all' means all fields will be plotted (in this case all
datasets should have the same fields)
times : str, int, float, list (or None)
Time(s) for which vertical profiles are plotted, specified as
either datetime strings or numerical values (seconds, e.g.,
simulation time). times can be None if all datasets combined
have no more than one time value, or if timerange is specified.
timerange : tuple or list
Start and end times (inclusive) between which all times are
plotted. If cmap is None, then it will automatically be set to
viridis by default. This overrides times when specified.
fig : figure handle
Custom figure handle. Should be specified together with ax
ax : axes handle, or list or numpy ndarray with axes handles
Customand axes handle(s).
Size of ax should equal nfields * (ndatasets or ntimes)
fieldlimits : list or tuple, or dict
Value range for the various fields. If only one field is
plotted, fieldlimits can be a list or tuple. Otherwise, it
should be a dictionary with entries <fieldname>: fieldlimit.
Missing fieldlimits are set automatically
heightlimits : list or tuple
Height axis limits
fieldlabels : str or dict
Custom field labels. If only one field is plotted, fieldlabels
can be a string. Otherwise it should be a dictionary with
entries <fieldname>: fieldlabel
cmap : str
Colormap used when stacking times
stack_by_datasets : bool (or None)
Flag to specify what is plotted ("stacked") together per subfigure.
If True, stack datasets together, otherwise stack by times. If
None, stack_by_datasets will be set based on the number of times
and datasets.
labelsubplots : bool, list or tuple
Label subplots as (a), (b), (c), ... If a list or tuple is given
their values should be the horizontal and vertical position
relative to each subaxis.
showlegend : bool (or None)
Label different plots and show legend. If None, showlegend is set
to True if legend will have more than one entry, otherwise it is
set to False.
fieldorder : 'C' or 'F'
Index ordering for assigning fields and datasets/times (depending
on stack_by_datasets) to axes grid (row by row). Fields is considered the
first axis, so 'C' means fields change slowest, 'F' means fields
change fastest.
ncols : int
Number of columns in axes grid, must be a true divisor of total
number of axes.
subfigsize : list or tuple
Standard size of subfigures
plot_local_time : bool or str
Plot dual x axes with both UTC time and local time. If a str is
provided, then plot_local_time is assumed to be True and the str
is used as the datetime format.
local_time_offset : float
Local time offset from UTC
datasetkwargs : dict
Dataset-specific options that are passed on to the actual
plotting function. These options overwrite general options
specified through **kwargs. The argument should be a dictionary
with entries <dataset_name>: {**kwargs}
**kwargs : other keyword arguments
Options that are passed on to the actual plotting function.
Note that these options should be the same for all datasets,
fields and times, and they can not be used to set dataset,
field or time specific colors, limits, etc.
Example uses include setting linestyle/width, marker, etc.
"""
args = PlottingInput(
datasets=datasets,
fields=fields,
times=times,
timerange=timerange,
fieldlimits=fieldlimits,
fieldlabels=fieldlabels,
fieldorder=fieldorder,
)
nfields = len(args.fields)
ntimes = len(args.times)
ndatasets = len(args.datasets)
# Concatenate custom and standard field labels
# (custom field labels overwrite standard fields labels if existent)
args.fieldlabels = {**standard_fieldlabels, **args.fieldlabels}
# Set up subplot grid
if stack_by_datasets is None:
if ntimes>1:
stack_by_datasets = False
else:
stack_by_datasets = True
if stack_by_datasets:
ntotal = nfields * ntimes
else:
ntotal = nfields * ndatasets
fig, ax, nrows, ncols = _create_subplots_if_needed(
ntotal,
ncols,
default_ncols=int(ntotal/nfields),
fieldorder=args.fieldorder,
avoid_single_column=True,
sharey=True,
subfigsize=subfigsize,
hspace=0.4,
fig=fig,
ax=ax,
)
# Create flattened view of axes
axv = np.asarray(ax).reshape(-1)
# Set showlegend if not specified
if showlegend is None:
if (stack_by_datasets and ndatasets>1) or (not stack_by_datasets and ntimes>1):
showlegend = True
else:
showlegend = False
# Set default sequential colormap if timerange was specified
if (timerange is not None) and (cmap is None):
cmap = 'viridis'
# Loop over datasets, fields and times
for i, dfname in enumerate(args.datasets):
df = args.datasets[dfname]
heightvalues = _get_dim_values(df,'height',default_idx=True)
assert(heightvalues is not None), 'profile plot needs a height axis'
timevalues = _get_dim_values(df,'time')
# If plot local time, shift timevalues
timedelta_to_local = None
if plot_local_time is not False:
timedelta_to_local = pd.to_timedelta(local_time_offset,'h')
timevalues = timevalues + timedelta_to_local
# Create list with available fields only
available_fields = _get_available_fieldnames(df,args.fields)
# Pivot all fields in a dataset at once
if timevalues is not None:
df_pivot = _get_pivot_table(df,'height',available_fields)
for j, field in enumerate(args.fields):
# If available_fields is [None,], fieldname is unimportant
if available_fields == [None]:
pass
# Else, check if field is available
elif not field in available_fields:
print('Warning: field "'+field+'" not available in dataset '+dfname)
continue
for k, time in enumerate(args.times):
plotting_properties = {}
# Axis order, label and title depend on value of stack_by_datasets
if stack_by_datasets:
# Index of axis corresponding to field j and time k
if args.fieldorder == 'C':
axi = j*ntimes + k
else:
axi = k*nfields + j
# Use datasetname as label
if showlegend:
plotting_properties['label'] = dfname
# Set title if multiple times are compared
if ntimes>1:
if isinstance(time, (int,float,np.number)):
tstr = '{:g} s'.format(time)
else:
if plot_local_time is False:
tstr = pd.to_datetime(time).strftime('%Y-%m-%d %H%M UTC')
elif plot_local_time is True:
tstr = pd.to_datetime(time).strftime('%Y-%m-%d %H:%M')
else:
assert isinstance(plot_local_time,str), 'Unexpected plot_local_time format'
tstr = pd.to_datetime(time).strftime(plot_local_time)
axv[axi].set_title(tstr, fontsize=16)
# Set color
plotting_properties['color'] = default_colors[i % len(default_colors)]
else:
# Index of axis corresponding to field j and dataset i
if args.fieldorder == 'C':
axi = j*ndatasets + i
else:
axi = i*nfields + j
# Use time as label
if showlegend:
if isinstance(time, (int,float,np.number)):
plotting_properties['label'] = '{:g} s'.format(time)
else:
if plot_local_time is False:
plotting_properties['label'] = pd.to_datetime(time).strftime('%Y-%m-%d %H%M UTC')
elif plot_local_time is True:
plotting_properties['label'] = pd.to_datetime(time).strftime('%Y-%m-%d %H:%M')
else:
assert isinstance(plot_local_time,str), 'Unexpected plot_local_time format'
plotting_properties['label'] = pd.to_datetime(time).strftime(plot_local_time)
# Set title if multiple datasets are compared
if ndatasets>1:
axv[axi].set_title(dfname,fontsize=16)
# Set colors
if cmap is not None:
cmap = mpl.cm.get_cmap(cmap)
plotting_properties['color'] = cmap(k/(ntimes-1))
else:
plotting_properties['color'] = default_colors[k % len(default_colors)]
# Extract data from dataframe
if timevalues is None:
# Dataset will not be pivoted
fieldvalues = _get_field(df,field).values
else:
if plot_local_time is not False:
# specified times are in local time, convert back to UTC
slice_t = _get_slice(df_pivot,time-timedelta_to_local,'time')
else:
slice_t = _get_slice(df_pivot,time,'time')
fieldvalues = _get_pivoted_field(slice_t,field).values.squeeze()
# Gather label, color, general options and dataset-specific options
# (highest priority to dataset-specific options, then general options)
try:
plotting_properties = {**plotting_properties,**kwargs,**datasetkwargs[dfname]}
except KeyError:
plotting_properties = {**plotting_properties,**kwargs}
# Plot data
try:
axv[axi].plot(fieldvalues,heightvalues,**plotting_properties)
except ValueError as e:
print(e,'--', time, 'not found in index?')
# Set field label if known
try:
axv[axi].set_xlabel(args.fieldlabels[field])
except KeyError:
pass
# Set field limits if specified
try:
axv[axi].set_xlim(args.fieldlimits[field])
except KeyError:
pass
for axi in axv:
axi.grid(True,which='both')
# Set height limits if specified
if not heightlimits is None:
axv[0].set_ylim(heightlimits)
# Add y labels
for r in range(nrows):
axv[r*ncols].set_ylabel(r'Height [m]')
# Align labels
_align_labels(fig,axv,nrows,ncols)
# Number sub figures as a, b, c, ...
if labelsubplots is not False:
try:
hoffset, voffset = labelsubplots
except (TypeError, ValueError):
hoffset, voffset = -0.14, -0.18
for i,axi in enumerate(axv):
axi.text(hoffset,voffset,'('+chr(i+97)+')',transform=axi.transAxes,size=16)
# Add legend
if showlegend:
leg = _format_legend(axv,index=ncols-1)
return fig,ax
def plot_spectrum(datasets,
fields=None,
height=None,
times=None,
fig=None,ax=None,
fieldlimits=None,
freqlimits=None,
fieldlabels={},
labelsubplots=False,
showlegend=None,
ncols=None,
subfigsize=(4,5),
datasetkwargs={},
**kwargs
):
"""
Plot frequency spectrum at a given height for different datasets,
time(s) and field(s), using a subplot per time and per field.
Note that this function does not interpolate to the requested height,
i.e., if height is not None, the specified value should be available
in all datasets.
Usage
=====
datasets : pandas.DataFrame or dict
Dataset(s) with spectrum data. If more than one set,
datasets should be a dictionary with entries
<dataset_name>: dataset
fields : str, list, 'all' (or None)
Fieldname(s) corresponding to particular column(s) of
the datasets. fields can be None if input are Series.
'all' means all fields will be plotted (in this case all
datasets should have the same fields)
height : float (or None)
Height for which frequency spectra is plotted. If datasets
have no height dimension, height does not need to be specified.
times : str, int, float, list (or None)
Time(s) for which frequency spectra are plotted, specified as
either datetime strings or numerical values (seconds, e.g.,
simulation time). times can be None if all datasets combined
have no more than one time value.
fig : figure handle
Custom figure handle. Should be specified together with ax
ax : axes handle, or list or numpy ndarray with axes handles
Customand axes handle(s).
Size of ax should equal nfields * ntimes
fieldlimits : list or tuple, or dict
Value range for the various fields. If only one field is
plotted, fieldlimits can be a list or tuple. Otherwise, it
should be a dictionary with entries <fieldname>: fieldlimit.
Missing fieldlimits are set automatically
freqlimits : list or tuple
Frequency axis limits
fieldlabels : str or dict
Custom field labels. If only one field is plotted, fieldlabels
can be a string. Otherwise it should be a dictionary with
entries <fieldname>: fieldlabel
labelsubplots : bool, list or tuple
Label subplots as (a), (b), (c), ... If a list or tuple is given
their values should be the horizontal and vertical position
relative to each subaxis.
showlegend : bool (or None)
Label different plots and show legend. If None, showlegend is set
to True if legend will have more than one entry, otherwise it is
set to False.
ncols : int
Number of columns in axes grid, must be a true divisor of total
number of axes.
subfigsize : list or tuple
Standard size of subfigures
datasetkwargs : dict
Dataset-specific options that are passed on to the actual
plotting function. These options overwrite general options
specified through **kwargs. The argument should be a dictionary
with entries <dataset_name>: {**kwargs}
**kwargs : other keyword arguments
Options that are passed on to the actual plotting function.
Note that these options should be the same for all datasets,
fields and times, and they can not be used to set dataset,
field or time specific colors, limits, etc.
Example uses include setting linestyle/width, marker, etc.
"""
args = PlottingInput(
datasets=datasets,
fields=fields,
times=times,
fieldlimits=fieldlimits,
fieldlabels=fieldlabels,
)
nfields = len(args.fields)
ntimes = len(args.times)
ndatasets = len(args.datasets)
ntotal = nfields * ntimes
# Concatenate custom and standard field labels
# (custom field labels overwrite standard fields labels if existent)
args.fieldlabels = {**standard_spectrumlabels, **args.fieldlabels}
fig, ax, nrows, ncols = _create_subplots_if_needed(
ntotal,
ncols,
default_ncols=ntimes,
avoid_single_column=True,
sharex=True,
subfigsize=subfigsize,
wspace=0.3,
fig=fig,
ax=ax,
)
# Create flattened view of axes
axv = np.asarray(ax).reshape(-1)
# Set showlegend if not specified
if showlegend is None:
if ndatasets>1:
showlegend = True
else:
showlegend = False
# Loop over datasets, fields and times
for i, dfname in enumerate(args.datasets):
df = args.datasets[dfname]
frequencyvalues = _get_dim_values(df,'frequency',default_idx=True)
assert(frequencyvalues is not None), 'spectrum plot needs a frequency axis'
timevalues = _get_dim_values(df,'time')
# Create list with available fields only
available_fields = _get_available_fieldnames(df,args.fields)
for j, field in enumerate(args.fields):
# If available_fields is [None,], fieldname is unimportant
if available_fields == [None]:
pass
# Else, check if field is available
elif not field in available_fields:
print('Warning: field "'+field+'" not available in dataset '+dfname)
continue
for k, time in enumerate(args.times):
plotting_properties = {}
if showlegend:
plotting_properties['label'] = dfname
# Index of axis corresponding to field j and time k
axi = j*ntimes + k
# Axes mark up
if i==0 and ntimes>1:
axv[axi].set_title(pd.to_datetime(time).strftime('%Y-%m-%d %H%M UTC'),fontsize=16)
# Gather label, general options and dataset-specific options
# (highest priority to dataset-specific options, then general options)
try:
plotting_properties = {**plotting_properties,**kwargs,**datasetkwargs[dfname]}
except KeyError:
plotting_properties = {**plotting_properties,**kwargs}
# Get field spectrum
slice_t = _get_slice(df,time,'time')
slice_tz = _get_slice(slice_t,height,'height')
spectrum = _get_field(slice_tz,field).values
# Plot data
axv[axi].loglog(frequencyvalues[1:],spectrum[1:],**plotting_properties)
# Specify field limits if specified
try:
axv[axi].set_ylim(args.fieldlimits[field])
except KeyError:
pass
# Set frequency label
for c in range(ncols):
axv[ncols*(nrows-1)+c].set_xlabel('$f$ [Hz]')
# Specify field label if specified
for r in range(nrows):
try:
axv[r*ncols].set_ylabel(args.fieldlabels[args.fields[r]])
except KeyError:
pass
# Align labels
_align_labels(fig,axv,nrows,ncols)
# Set frequency limits if specified
if not freqlimits is None:
axv[0].set_xlim(freqlimits)
# Number sub figures as a, b, c, ...
if labelsubplots is not False:
try:
hoffset, voffset = labelsubplots
except (TypeError, ValueError):
hoffset, voffset = -0.14, -0.18
for i,axi in enumerate(axv):
axi.text(hoffset,voffset,'('+chr(i+97)+')',transform=axi.transAxes,size=16)
# Add legend
if showlegend:
leg = _format_legend(axv,index=ncols-1)
return fig, ax
# ---------------------------------------------
#
# DEFINITION OF AUXILIARY CLASSES AND FUNCTIONS
#
# ---------------------------------------------
class InputError(Exception):
"""Exception raised for errors in the input.
Attributes:
message -- explanation of the error
"""
def __init__(self, message):
self.message = message
class PlottingInput(object):
"""
Auxiliary class to collect input data and options for plotting
functions, and to check if the inputs are consistent
"""
supported_datatypes = (
pd.Series,
pd.DataFrame,
xr.DataArray,
xr.Dataset,
)
def __init__(self, datasets, fields, **argd):
# Add all arguments as class attributes
self.__dict__.update({'datasets':datasets,
'fields':fields,
**argd})
# Check consistency of all attributes
self._check_consistency()
def _check_consistency(self):
"""
Check consistency of all input data
"""
# ----------------------
# Check dataset argument
# ----------------------
# If a single dataset is provided, convert to a dictionary
# under a generic key 'Dataset'
if isinstance(self.datasets, self.supported_datatypes):
self.datasets = {'Dataset': self.datasets}
for dfname,df in self.datasets.items():
# convert dataset types here
if isinstance(df, (xr.Dataset,xr.DataArray)):
# handle xarray datatypes
self.datasets[dfname] = df.to_dataframe()
columns = self.datasets[dfname].columns
if len(columns) == 1:
# convert to pd.Series
self.datasets[dfname] = self.datasets[dfname][columns[0]]
else:
assert(isinstance(df, self.supported_datatypes)), \
"Dataset {:s} of type {:s} not supported".format(dfname,str(type(df)))
# ----------------------
# Check fields argument
# ----------------------
# If no fields are specified, check that
# - all datasets are series
# - the name of every series is either None or matches other series names
if self.fields is None:
assert(all([isinstance(self.datasets[dfname],pd.Series) for dfname in self.datasets])), \
"'fields' argument must be specified unless all datasets are pandas Series"
series_names = set()
for dfname in self.datasets:
series_names.add(self.datasets[dfname].name)
if len(series_names)==1:
self.fields = list(series_names)
else:
raise InputError('attempting to plot multiple series with different field names')
elif isinstance(self.fields,str):
# If fields='all', retrieve fields from dataset
if self.fields=='all':
self.fields = _get_fieldnames(list(self.datasets.values())[0])
assert(all([_get_fieldnames(df)==self.fields for df in self.datasets.values()])), \
"The option fields = 'all' only works when all datasets have the same fields"
# If fields is a single instance, convert to a list
else:
self.fields = [self.fields,]
# ----------------------------------
# Check match of fields and datasets
# ----------------------------------
# Check if all datasets have at least one of the requested fields
for dfname in self.datasets:
df = self.datasets[dfname]
if isinstance(df,pd.DataFrame):
assert(any([field in df.columns for field in self.fields])), \
'DataFrame '+dfname+' does not contain any of the requested fields'
elif isinstance(df,pd.Series):
if df.name is None:
assert(len(self.fields)==1), \
'Series must have a name if more than one fields is specified'
else:
assert(df.name in self.fields), \
'Series '+dfname+' does not match any of the requested fields'
# ---------------------------------
# Check heights argument (optional)
# ---------------------------------
try:
# If no heights are specified, check that all datasets combined have
# no more than one height value
if self.heights is None:
av_heights = set()
for df in self.datasets.values():
heightvalues = _get_dim_values(df,'height')
try:
for height in heightvalues:
av_heights.add(height)
except TypeError:
# heightvalues is None
pass
if len(av_heights)==0:
# None of the datasets have height values
self.heights = [None,]
elif len(av_heights)==1:
self.heights = list(av_heights)
else:
raise InputError("found more than one height value so 'heights' argument must be specified")
# If heights='all', retrieve heights from dataset
elif isinstance(self.heights,str) and self.heights=='all':
self.heights = _get_dim_values(list(self.datasets.values())[0],'height')
assert(all([np.allclose(_get_dim_values(df,'height'),self.heights) for df in self.datasets.values()])), \
"The option heights = 'all' only works when all datasets have the same vertical levels"
# If heights is single instance, convert to list
elif isinstance(self.heights,(int,float)):
self.heights = [self.heights,]
except AttributeError:
pass
# -----------------------------------
# Check timerange argument (optional)
# -----------------------------------
try:
if self.timerange is not None:
if self.times is not None:
print('Using specified time range',self.timerange,
'and ignoring',self.times)
assert isinstance(self.timerange,(tuple,list)), \
'Need to specify timerange as (starttime,endtime)'
assert (len(self.timerange) == 2)
try:
starttime = pd.to_datetime(self.timerange[0])
endtime = pd.to_datetime(self.timerange[1])
except ValueError:
print('Unable to convert timerange to timestamps')
else:
# get unique times from all datasets
alltimes = []
for df in self.datasets.values():
alltimes += list(_get_dim_values(df,'time'))
alltimes = pd.DatetimeIndex(np.unique(alltimes))
inrange = (alltimes >= starttime) & (alltimes <= endtime)
self.times = alltimes[inrange]
except AttributeError:
pass
# ---------------------------------
# Check times argument (optional)
# ---------------------------------
# If times is single instance, convert to list
try:
# If no times are specified, check that all datasets combined have
# no more than one time value
if self.times is None:
av_times = set()
for df in self.datasets.values():
timevalues = _get_dim_values(df,'time')
try:
for time in timevalues.values:
av_times.add(time)
except AttributeError:
pass
if len(av_times)==0:
# None of the datasets have time values
self.times = [None,]
elif len(av_times)==1:
self.times = list(av_times)
else:
raise InputError("found more than one time value so 'times' argument must be specified")
elif isinstance(self.times,(str,int,float,np.number,pd.Timestamp)):
self.times = [self.times,]
except AttributeError:
pass
# -------------------------------------
# Check fieldlimits argument (optional)
# -------------------------------------
# If one set of fieldlimits is specified, check number of fields
# and convert to dictionary
try:
if self.fieldlimits is None:
self.fieldlimits = {}
elif isinstance(self.fieldlimits, (list, tuple)):
assert(len(self.fields)==1), 'Unclear to what field fieldlimits corresponds'
self.fieldlimits = {self.fields[0]:self.fieldlimits}
except AttributeError:
self.fieldlimits = {}
# -------------------------------------
# Check fieldlabels argument (optional)
# -------------------------------------
# If one fieldlabel is specified, check number of fields
try:
if isinstance(self.fieldlabels, str):
assert(len(self.fields)==1), 'Unclear to what field fieldlabels corresponds'
self.fieldlabels = {self.fields[0]: self.fieldlabels}
except AttributeError:
self.fieldlabels = {}
# -------------------------------------
# Check colorscheme argument (optional)
# -------------------------------------
# If one colorscheme is specified, check number of fields
try:
self.cmap = {}
if isinstance(self.colorschemes, str):
assert(len(self.fields)==1), 'Unclear to what field colorschemes corresponds'
self.cmap[self.fields[0]] = mpl.cm.get_cmap(self.colorschemes)
else:
# Set missing colorschemes to viridis
for field in self.fields:
if field not in self.colorschemes.keys():
if field == 'wdir':
self.colorschemes[field] = 'twilight'
else:
self.colorschemes[field] = 'viridis'
self.cmap[field] = mpl.cm.get_cmap(self.colorschemes[field])
except AttributeError:
pass
# -------------------------------------
# Check fieldorder argument (optional)
# -------------------------------------
# Make sure fieldorder is recognized
try:
assert(self.fieldorder in ['C','F']), "Error: fieldorder '"\
+self.fieldorder+"' not recognized, must be either 'C' or 'F'"
except AttributeError:
pass
def set_missing_fieldlimits(self):
"""
Set missing fieldlimits to min and max over all datasets
"""
for field in self.fields:
if field not in self.fieldlimits.keys():
try:
self.fieldlimits[field] = [
min([_get_field(df,field).min() for df in self.datasets.values() if _contains_field(df,field)]),
max([_get_field(df,field).max() for df in self.datasets.values() if _contains_field(df,field)])
]
except ValueError:
self.fieldlimits[field] = [None,None]
def _get_dim(df,dim,default_idx=False):
"""
Search for specified dimension in dataset and return
level (referred to by either label or position) and
axis {0 or ‘index’, 1 or ‘columns’}
If default_idx is True, return a single unnamed index
if present
"""
assert(dim in dimension_names.keys()), \
"Dimension '"+dim+"' not supported"
# 1. Try to find dim based on name
for name in dimension_names[dim]:
if name in df.index.names:
if debug: print("Found "+dim+" dimension in index with name '{}'".format(name))
return name, 0
else:
try:
if name in df.columns:
if debug: print("Found "+dim+" dimension in column with name '{}'".format(name))
return name, 1
except AttributeError:
# pandas Series has no columns
pass
# 2. Look for Datetime or Timedelta index
if dim=='time':
for idx in range(len(df.index.names)):
if isinstance(df.index.get_level_values(idx),(pd.DatetimeIndex,pd.TimedeltaIndex,pd.PeriodIndex)):
if debug: print("Found "+dim+" dimension in index with level {} without a name ".format(idx))
return idx, 0
# 3. If default index is True, assume that a
# single nameless index corresponds to the
# requested dimension
if (not isinstance(df.index,(pd.MultiIndex,pd.DatetimeIndex,pd.TimedeltaIndex,pd.PeriodIndex))
and default_idx and (df.index.name is None) ):
if debug: print("Assuming nameless index corresponds to '{}' dimension".format(dim))
return 0,0
# 4. Did not found requested dimension
if debug: print("Found no "+dim+" dimension")
return None, None
def _get_available_fieldnames(df,fieldnames):
"""
Return subset of fields available in df
"""
available_fieldnames = []
if isinstance(df,pd.DataFrame):
for field in fieldnames:
if field in df.columns:
available_fieldnames.append(field)
# A Series only has one field, so return that field name
# (if that field is not in fields, an error would have been raised)
elif isinstance(df,pd.Series):
available_fieldnames.append(df.name)
return available_fieldnames
def _get_fieldnames(df):
"""
Return list of fieldnames in df
"""
if isinstance(df,pd.DataFrame):
fieldnames = list(df.columns)
# Remove any column corresponding to
# a dimension (time, height or frequency)
for dim in dimension_names.keys():
name, axis = _get_dim(df,dim)
if axis==1:
fieldnames.remove(name)
return fieldnames
elif isinstance(df,pd.Series):
return [df.name,]
def _contains_field(df,fieldname):
if isinstance(df,pd.DataFrame):
return fieldname in df.columns
elif isinstance(df,pd.Series):
return (df.name is None) or (df.name==fieldname)
def _get_dim_values(df,dim,default_idx=False):
"""
Return values for a given dimension
"""
level, axis = _get_dim(df,dim,default_idx)
# Requested dimension is an index
if axis==0:
return df.index.get_level_values(level).unique()
# Requested dimension is a column
elif axis==1:
return df[level].unique()
# Requested dimension not available
else:
return None
def _get_pivot_table(df,dim,fieldnames):
"""
Return pivot table with given fieldnames as columns
"""
level, axis = _get_dim(df,dim)
# Unstack an index
if axis==0:
return df.unstack(level=level)
# Pivot about a column
elif axis==1:
return df.pivot(columns=level,values=fieldnames)
# Dimension not found, return dataframe
else:
return df
def _get_slice(df,key,dim):
"""
Return cross-section of dataset
"""
if key is None:
return df
# Get dimension level and axis
level, axis = _get_dim(df,dim)
# Requested dimension is an index
if axis==0:
if isinstance(df.index,pd.MultiIndex):
return df.xs(key,level=level)
else:
return df.loc[df.index==key]
# Requested dimension is a column
elif axis==1:
return df.loc[df[level]==key]
# Requested dimension not available, return dataframe
else:
return df
def _get_field(df,fieldname):
"""
Return field from dataset
"""
if isinstance(df,pd.DataFrame):
return df[fieldname]
elif isinstance(df,pd.Series):
if df.name is None or df.name==fieldname:
return df
else:
return None
def _get_pivoted_field(df,fieldname):
"""
Return field from pivoted dataset
"""
if isinstance(df.columns,pd.MultiIndex):
return df[fieldname]
else:
return df
def _create_subplots_if_needed(ntotal,
ncols=None,
default_ncols=1,
fieldorder='C',
avoid_single_column=False,
sharex=False,
sharey=False,
subfigsize=(12,3),
wspace=0.2,
hspace=0.2,
fig=None,
ax=None
):
"""
Auxiliary function to create fig and ax
If fig and ax are None:
- Set nrows and ncols based on ntotal and specified ncols,
accounting for fieldorder and avoid_single_column
- Create fig and ax with nrows and ncols, taking into account
sharex, sharey, subfigsize, wspace, hspace
If fig and ax are not None:
- Try to determine nrows and ncols from ax
- Check whether size of ax corresponds to ntotal
"""
if ax is None:
if not ncols is None:
# Use ncols if specified and appropriate
assert(ntotal%ncols==0), 'Error: Specified number of columns is not a true divisor of total number of subplots'
nrows = int(ntotal/ncols)
else:
# Defaut number of columns
ncols = default_ncols
nrows = int(ntotal/ncols)
if fieldorder=='F':
# Swap number of rows and columns
nrows, ncols = ncols, nrows
if avoid_single_column and ncols==1:
# Swap number of rows and columns
nrows, ncols = ncols, nrows
# Create fig and ax with nrows and ncols
fig,ax = plt.subplots(nrows=nrows,ncols=ncols,sharex=sharex,sharey=sharey,figsize=(subfigsize[0]*ncols,subfigsize[1]*nrows))
# Adjust subplot spacing
fig.subplots_adjust(wspace=wspace,hspace=hspace)
else:
# Make sure user-specified axes has appropriate size
assert(np.asarray(ax).size==ntotal), 'Specified axes does not have the right size'
# Determine nrows and ncols in specified axes
if isinstance(ax,mpl.axes.Axes):
nrows, ncols = (1,1)
else:
try:
nrows,ncols = np.asarray(ax).shape
except ValueError:
# ax array has only one dimension
# Determine whether ax is single row or single column based
# on individual ax positions x0 and y0
x0s = [axi.get_position().x0 for axi in ax]
y0s = [axi.get_position().y0 for axi in ax]
if all(x0==x0s[0] for x0 in x0s):
# All axis have same relative x0 position
nrows = np.asarray(ax).size
ncols = 1
elif all(y0==y0s[0] for y0 in y0s):
# All axis have same relative y0 position
nrows = 1
ncols = np.asarray(ax).size
else:
# More complex axes configuration,
# currently not supported
raise InputError('could not determine nrows and ncols in specified axes, complex axes configuration currently not supported')
return fig, ax, nrows, ncols
def _format_legend(axv,index):
"""
Auxiliary function to format legend
Usage
=====
axv : numpy 1d array
Flattened array of axes
index : int
Index of the axis where to place the legend
"""
all_handles = []
all_labels = []
# Check each axes and add new handle
for axi in axv:
handles, labels = axi.get_legend_handles_labels()
for handle,label in zip(handles,labels):
if not label in all_labels:
all_labels.append(label)
all_handles.append(handle)
leg = axv[index].legend(all_handles,all_labels,loc='upper left',bbox_to_anchor=(1.05,1.0),fontsize=16)
return leg
def _format_time_axis(fig,ax,
plot_local_time,
local_time_offset,
timelimits
):
"""
Auxiliary function to format time axis
"""
ax[-1].xaxis_date()
if timelimits is not None:
timelimits = [pd.to_datetime(tlim) for tlim in timelimits]
hour_interval = _determine_hourlocator_interval(ax[-1],timelimits)
if plot_local_time is not False:
if plot_local_time is True:
localtimefmt = '%I %p'
else:
assert isinstance(plot_local_time,str), 'Unexpected plot_local_time format'
localtimefmt = plot_local_time
# Format first axis (local time)
ax[-1].xaxis.set_minor_locator(mdates.HourLocator(byhour=range(0,24,hour_interval)))
ax[-1].xaxis.set_minor_formatter(mdates.DateFormatter(localtimefmt))
ax[-1].xaxis.set_major_locator(mdates.DayLocator(interval=12)) #Choose large interval so dates are not plotted
ax[-1].xaxis.set_major_formatter(mdates.DateFormatter(''))
# Set time limits if specified
if not timelimits is None:
local_timelimits = pd.to_datetime(timelimits) + pd.to_timedelta(local_time_offset,'h')
ax[-1].set_xlim(local_timelimits)
tstr = 'Local time'
ax2 = []
for axi in ax:
# Format second axis (UTC time)
ax2i = axi.twiny()
ax2i.xaxis_date()
# Set time limits if specified
if not timelimits is None:
ax2i.set_xlim(timelimits)
else:
# Extract timelimits from main axis
local_timelimits = mdates.num2date(axi.get_xlim())
timelimits = pd.to_datetime(local_timelimits) - pd.to_timedelta(local_time_offset,'h')
ax2i.set_xlim(timelimits)
# Move twinned axis ticks and label from top to bottom
ax2i.xaxis.set_ticks_position("bottom")
ax2i.xaxis.set_label_position("bottom")
# Offset the twin axis below the host
ax2i.spines["bottom"].set_position(("axes", -0.35))
# Turn on the frame for the twin axis, but then hide all
# but the bottom spine
ax2i.set_frame_on(True)
ax2i.patch.set_visible(False)
#for sp in ax2.spines.itervalues():
# sp.set_visible(False)
ax2i.spines["bottom"].set_visible(True)
ax2i.xaxis.set_minor_locator(mdates.HourLocator(byhour=range(24),interval=hour_interval))
ax2i.xaxis.set_minor_formatter(mdates.DateFormatter('%H%M'))
ax2i.xaxis.set_major_locator(mdates.DayLocator())
ax2i.xaxis.set_major_formatter(mdates.DateFormatter('\n%Y-%m-%d'))
ax2i.set_xlabel('UTC time')
ax2.append(ax2i)
if len(ax2)==1:
ax2 = ax2[0]
else:
ax2 = np.array(ax2)
fig.align_xlabels(ax2)
else:
ax[-1].xaxis.set_minor_locator(mdates.HourLocator(byhour=range(0,24,hour_interval)))
ax[-1].xaxis.set_minor_formatter(mdates.DateFormatter('%H%M'))
ax[-1].xaxis.set_major_locator(mdates.DayLocator())
ax[-1].xaxis.set_major_formatter(mdates.DateFormatter('\n%Y-%m-%d'))
# Set time limits if specified
if not timelimits is None:
ax[-1].set_xlim(timelimits)
tstr = 'UTC time'
ax2 = None
# Now, update all axes
for axi in ax:
# Make sure both major and minor axis labels are visible when they are
# at the same time
axi.xaxis.remove_overlapping_locs = False
# Set time label
axi.set_xlabel(tstr)
return ax2
def _determine_hourlocator_interval(ax,timelimits=None):
"""
Determine hour interval based on timelimits
If plotted time period is
- less than 36 hours: interval = 3
- less than 72 hours: interval = 6
- otherwise: interval = 12
"""
# Get timelimits
if timelimits is None:
timelimits = pd.to_datetime(mdates.num2date(ax.get_xlim()))
elif isinstance(timelimits[0],str):
timelimits = pd.to_datetime(timelimits)
# Determine time period in hours
timeperiod = (timelimits[1] - timelimits[0])/pd.to_timedelta(1,'h')
# HourLocator interval
if timeperiod < 36:
return 3
elif timeperiod < 72:
return 6
else:
return 12
def _get_staggered_grid(x):
"""
Return staggered grid locations
For input array size N, output array
has a size of N+1
"""
idx = np.arange(x.size)
f = interp1d(idx,x,fill_value='extrapolate')
return f(np.arange(-0.5,x.size+0.5,1))
def _align_labels(fig,ax,nrows,ncols):
"""
Align labels of a given axes grid
"""
# Align xlabels row by row
for r in range(nrows):
fig.align_xlabels(ax[r*ncols:(r+1)*ncols])
# Align ylabels column by column
for c in range(ncols):
fig.align_ylabels(ax[c::ncols])
class TaylorDiagram(object):
"""
Taylor diagram.
Plot model standard deviation and correlation to reference (data)
sample in a single-quadrant polar plot, with r=stddev and
theta=arccos(correlation).
Based on code from <NAME> <<EMAIL>>
Downloaded from https://gist.github.com/ycopin/3342888 on 2020-06-19
"""
def __init__(self, refstd,
fig=None, rect=111, label='_', srange=(0, 1.5), extend=False,
normalize=False,
corrticks=[0, 0.2, 0.4, 0.6, 0.7, 0.8, 0.9, 0.95, 0.99, 1],
minorcorrticks=None,
stdevticks=None,
labelsize=None):
"""
Set up Taylor diagram axes, i.e. single quadrant polar
plot, using `mpl_toolkits.axisartist.floating_axes`.
Usage
=====
refstd: np.ndarray
Reference standard deviation to be compared to
fig: plt.Figure, optional
Input figure or None to create a new figure
rect: 3-digit integer
Subplot position, described by: nrows, ncols, index
label: str, optional
Legend label for reference point
srange: tuple, optional
Stdev axis limits, in units of *refstd*
extend: bool, optional
Extend diagram to negative correlations
normalize: bool, optional
Normalize stdev axis by `refstd`
corrticks: list-like, optional
Specify ticks positions on azimuthal correlation axis
minorcorrticks: list-like, optional
Specify minor tick positions on azimuthal correlation axis
stdevticks: int or list-like, optional
Specify stdev axis grid locator based on MaxNLocator (with
integer input) or FixedLocator (with list-like input)
labelsize: int or str, optional
Font size (e.g., 16 or 'x-large') for all axes labels
"""
from matplotlib.projections import PolarAxes
from mpl_toolkits.axisartist import floating_axes
from mpl_toolkits.axisartist import grid_finder
self.refstd = refstd # Reference standard deviation
self.normalize = normalize
tr = PolarAxes.PolarTransform()
# Correlation labels
if minorcorrticks is None:
rlocs = np.array(corrticks)
else:
rlocs = np.array(sorted(list(corrticks) + list(minorcorrticks)))
if extend:
# Diagram extended to negative correlations
self.tmax = np.pi
rlocs = np.concatenate((-rlocs[:0:-1], rlocs))
else:
# Diagram limited to positive correlations
self.tmax = np.pi/2
if minorcorrticks is None:
rlocstrs = [str(rloc) for rloc in rlocs]
else:
rlocstrs = [str(rloc) if abs(rloc) in corrticks else ''
for rloc in rlocs]
tlocs = np.arccos(rlocs) # Conversion to polar angles
gl1 = grid_finder.FixedLocator(tlocs) # Positions
tf1 = grid_finder.DictFormatter(dict(zip(tlocs, rlocstrs)))
# Stdev labels
if isinstance(stdevticks, int):
gl2 = grid_finder.MaxNLocator(stdevticks)
elif hasattr(stdevticks, '__iter__'):
gl2 = grid_finder.FixedLocator(stdevticks)
else:
gl2 = None
# Standard deviation axis extent (in units of reference stddev)
self.smin, self.smax = srange
if not normalize:
self.smin *= self.refstd
self.smax *= self.refstd
ghelper = floating_axes.GridHelperCurveLinear(
tr,
extremes=(0, self.tmax, self.smin, self.smax),
grid_locator1=gl1,
grid_locator2=gl2,
tick_formatter1=tf1,
#tick_formatter2=tf2
)
if fig is None:
fig = plt.figure()
ax = floating_axes.FloatingSubplot(fig, rect, grid_helper=ghelper)
fig.add_subplot(ax)
# Adjust axes
# - angle axis
ax.axis["top"].set_axis_direction("bottom")
ax.axis["top"].toggle(ticklabels=True, label=True)
ax.axis["top"].major_ticklabels.set_axis_direction("top")
ax.axis["top"].label.set_axis_direction("top")
ax.axis["top"].label.set_text("Correlation")
# - "x" axis
ax.axis["left"].set_axis_direction("bottom")
if normalize:
ax.axis["left"].label.set_text("Normalized standard deviation")
else:
ax.axis["left"].label.set_text("Standard deviation")
# - "y" axis
ax.axis["right"].set_axis_direction("top") # "Y-axis"
ax.axis["right"].toggle(ticklabels=True)
ax.axis["right"].major_ticklabels.set_axis_direction(
"bottom" if extend else "left")
# Set label sizes
if labelsize is not None:
ax.axis["top"].label.set_fontsize(labelsize)
ax.axis["left"].label.set_fontsize(labelsize)
ax.axis["right"].label.set_fontsize(labelsize)
ax.axis["top"].major_ticklabels.set_fontsize(labelsize)
ax.axis["left"].major_ticklabels.set_fontsize(labelsize)
ax.axis["right"].major_ticklabels.set_fontsize(labelsize)
if self.smin:
# get rid of cluster of labels at origin
ax.axis["bottom"].toggle(ticklabels=False, label=False)
else:
ax.axis["bottom"].set_visible(False) # Unused
self._ax = ax # Graphical axes
self.ax = ax.get_aux_axes(tr) # Polar coordinates
# Add reference point and stddev contour
t = np.linspace(0, self.tmax)
r = np.ones_like(t)
if self.normalize:
l, = self.ax.plot([0], [1], 'k*', ls='', ms=10, label=label)
else:
l, = self.ax.plot([0], self.refstd, 'k*', ls='', ms=10, label=label)
r *= refstd
self.ax.plot(t, r, 'k--', label='_')
# Collect sample points for latter use (e.g. legend)
self.samplePoints = [l]
def set_ref(self, refstd):
"""
Update the reference standard deviation value
Useful for cases in which datasets with different reference
values (e.g., originating from different reference heights)
are to be overlaid on the same diagram.
"""
self.refstd = refstd
def add_sample(self, stddev, corrcoef, norm=None, *args, **kwargs):
"""
Add sample (*stddev*, *corrcoeff*) to the Taylor
diagram. *args* and *kwargs* are directly propagated to the
`Figure.plot` command.
`norm` may be specified to override the default normalization
value if TaylorDiagram was initialized with normalize=True
"""
if (corrcoef < 0) and (self.tmax == np.pi/2):
print('Note: ({:g},{:g}) not shown for R2 < 0, set extend=True'.format(stddev,corrcoef))
return None
if self.normalize:
if norm is None:
norm = self.refstd
elif norm is False:
norm = 1
stddev /= norm
l, = self.ax.plot(np.arccos(corrcoef), stddev,
*args, **kwargs) # (theta, radius)
self.samplePoints.append(l)
return l
def add_grid(self, *args, **kwargs):
"""Add a grid."""
self._ax.grid(*args, **kwargs)
def add_contours(self, levels=5, scale=1.0, **kwargs):
"""
Add constant centered RMS difference contours, defined by *levels*.
"""
rs, ts = np.meshgrid(np.linspace(self.smin, self.smax),
np.linspace(0, self.tmax))
# Compute centered RMS difference
if self.normalize:
# - normalized refstd == 1
# - rs values were previously normalized in __init__
# - premultiply with (scale==refstd) to get correct rms diff
rms = scale * np.sqrt(1 + rs**2 - 2*rs* | np.cos(ts) | numpy.cos |
#!/usr/bin/env python
# coding: utf-8
# DO NOT EDIT
# Autogenerated from the notebook ordinal_regression.ipynb.
# Edit the notebook and then sync the output with this file.
#
# flake8: noqa
# DO NOT EDIT
# # Ordinal Regression
import numpy as np
import pandas as pd
import scipy.stats as stats
from statsmodels.miscmodels.ordinal_model import OrderedModel
# Loading a stata data file from the UCLA website.This notebook is
# inspired by https://stats.idre.ucla.edu/r/dae/ordinal-logistic-regression/
# which is a R notebook from UCLA.
url = "https://stats.idre.ucla.edu/stat/data/ologit.dta"
data_student = pd.read_stata(url)
data_student.head(5)
data_student.dtypes
data_student['apply'].dtype
# This dataset is about the probability for undergraduate students to
# apply to graduate school given three exogenous variables:
# - their grade point average(`gpa`), a float between 0 and 4.
# - `pared`, a binary that indicates if at least one parent went to
# graduate school.
# - and `public`, a binary that indicates if the current undergraduate
# institution of the student is public or private.
#
# `apply`, the target variable is categorical with ordered categories:
# `unlikely` < `somewhat likely` < `very likely`. It is a `pd.Serie` of
# categorical type, this is preferred over NumPy arrays.
# The model is based on a numerical latent variable $y_{latent}$ that we
# cannot observe but that we can compute thanks to exogenous variables.
# Moreover we can use this $y_{latent}$ to define $y$ that we can observe.
#
# For more details see the the Documentation of OrderedModel, [the UCLA
# webpage](https://stats.idre.ucla.edu/r/dae/ordinal-logistic-regression/)
# or this
# [book](https://onlinelibrary.wiley.com/doi/book/10.1002/9780470594001).
#
# ### Probit ordinal regression:
mod_prob = OrderedModel(data_student['apply'],
data_student[['pared', 'public', 'gpa']],
distr='probit')
res_prob = mod_prob.fit(method='bfgs')
res_prob.summary()
# In our model, we have 3 exogenous variables(the $\beta$s if we keep the
# documentation's notations) so we have 3 coefficients that need to be
# estimated.
#
# Those 3 estimations and their standard errors can be retrieved in the
# summary table.
#
# Since there are 3 categories in the target variable(`unlikely`,
# `somewhat likely`, `very likely`), we have two thresholds to estimate.
# As explained in the doc of the method
# `OrderedModel.transform_threshold_params`, the first estimated threshold
# is the actual value and all the other thresholds are in terms of
# cumulative exponentiated increments. Actual thresholds values can be
# computed as follows:
num_of_thresholds = 2
mod_prob.transform_threshold_params(res_prob.params[-num_of_thresholds:])
# ### Logit ordinal regression:
mod_log = OrderedModel(data_student['apply'],
data_student[['pared', 'public', 'gpa']],
distr='logit')
res_log = mod_log.fit(method='bfgs', disp=False)
res_log.summary()
predicted = res_log.model.predict(res_log.params,
exog=data_student[['pared', 'public',
'gpa']])
predicted
pred_choice = predicted.argmax(1)
print('Fraction of correct choice predictions')
print((np.asarray(data_student['apply'].values.codes) == pred_choice).mean())
# ### Ordinal regression with a custom cumulative cLogLog distribution:
# In addition to `logit` and `probit` regression, any continuous
# distribution from `SciPy.stats` package can be used for the `distr`
# argument. Alternatively, one can define its own distribution simply
# creating a subclass from `rv_continuous` and implementing a few methods.
# using a SciPy distribution
res_exp = OrderedModel(data_student['apply'],
data_student[['pared', 'public', 'gpa']],
distr=stats.expon).fit(method='bfgs', disp=False)
res_exp.summary()
# minimal definition of a custom scipy distribution.
class CLogLog(stats.rv_continuous):
def _ppf(self, q):
return np.log(-np.log(1 - q))
def _cdf(self, x):
return 1 - np.exp(-np.exp(x))
cloglog = CLogLog()
# definition of the model and fitting
res_cloglog = OrderedModel(data_student['apply'],
data_student[['pared', 'public', 'gpa']],
distr=cloglog).fit(method='bfgs', disp=False)
res_cloglog.summary()
# ### Using formulas - treatment of endog
#
# Pandas' ordered categorical and numeric values are supported as
# dependent variable in formulas. Other types will raise a ValueError.
modf_logit = OrderedModel.from_formula("apply ~ 0 + pared + public + gpa",
data_student,
distr='logit')
resf_logit = modf_logit.fit(method='bfgs')
resf_logit.summary()
# Using numerical codes for the dependent variable is supported but loses
# the names of the category levels. The levels and names correspond to the
# unique values of the dependent variable sorted in alphanumeric order as in
# the case without using formulas.
data_student["apply_codes"] = data_student['apply'].cat.codes * 2 + 5
data_student["apply_codes"].head()
OrderedModel.from_formula("apply_codes ~ 0 + pared + public + gpa",
data_student,
distr='logit').fit().summary()
resf_logit.predict(data_student.iloc[:5])
# Using string values directly as dependent variable raises a ValueError.
data_student["apply_str"] = | np.asarray(data_student["apply"]) | numpy.asarray |
import contextlib
import tempfile
import shutil
import os
from pandas.api.types import is_numeric_dtype
import numpy as np
import cooler
@contextlib.contextmanager
def isolated_filesystem():
"""A context manager that creates a temporary folder and changes
the current working directory to it for isolated filesystem tests.
"""
cwd = os.getcwd()
t = tempfile.mkdtemp()
os.chdir(t)
try:
yield t
finally:
os.chdir(cwd)
try:
shutil.rmtree(t)
except (OSError, IOError):
pass
def cooler_cmp(uri1, uri2):
c1 = cooler.Cooler(uri1)
c2 = cooler.Cooler(uri2)
with c1.open("r") as f1, c2.open("r") as f2:
for path in (
"chroms/name",
"chroms/length",
"bins/chrom",
"bins/start",
"bins/end",
"pixels/bin1_id",
"pixels/bin2_id",
"pixels/count",
):
dset1, dset2 = f1[path], f2[path]
dtype1 = dset1.dtype
dtype2 = dset2.dtype
if dtype1.kind == "S":
# Null padding of ascii arrays is not guaranteed to be
# preserved so we only check the kind.
assert dtype2.kind == "S"
else:
assert dtype1 == dtype2
if is_numeric_dtype(dtype1):
assert np.allclose(dset1[:], dset2[:])
else:
assert | np.all(dset1[:] == dset2[:]) | numpy.all |
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""convert dataset to mindrecord"""
from io import BytesIO
import os
import csv
import argparse
import glob
from PIL import Image
import numpy as np
import pandas as pd
from mindspore.mindrecord import FileWriter
parser = argparse.ArgumentParser(description='MindSpore delf eval')
parser.add_argument('--train_directory', type=str,
default="/tmp/", help='Training data directory.')
parser.add_argument('--output_directory', type=str,
default="/tmp/", help='Output data directory.')
parser.add_argument('--train_csv_path', type=str,
default="/tmp/train.csv", help='Training data csv file path.')
parser.add_argument('--train_clean_csv_path', type=str, default=None,
help='(Optional) Clean training data csv file path. ')
parser.add_argument('--num_shards', type=int, default=128,
help='Number of shards in output data.')
parser.add_argument(
'--generate_train_validation_splits', type=bool, default=True, help='(Optional) Whether to split the train dataset')
parser.add_argument(
'--validation_split_size', type=float, default=0.2, help='(Optional)The size of the VALIDATION split as a fraction')
parser.add_argument('--seed', type=int, default=0,
help='(Optional) The seed to be used while shuffling the train')
FLAGS = parser.parse_known_args()[0]
_FILE_IDS_KEY = 'file_ids'
_IMAGE_PATHS_KEY = 'image_paths'
_LABELS_KEY = 'labels'
_TEST_SPLIT = 'test'
_TRAIN_SPLIT = 'train'
_VALIDATION_SPLIT = 'validation'
def _get_all_image_files_and_labels(name, csv_path, image_dir):
"""Process input and get the image file paths, image ids and the labels.
Args:
name: 'train' or 'test'.
csv_path: path to the Google-landmark Dataset csv Data Sources files.
image_dir: directory that stores downloaded images.
Returns:
image_paths: the paths to all images in the image_dir.
file_ids: the unique ids of images.
labels: the landmark id of all images. When name='test', the returned labels
will be an empty list.
Raises:
ValueError: if input name is not supported.
"""
image_paths = glob.glob(os.path.join(image_dir, '*.jpg'))
file_ids = [os.path.basename(os.path.normpath(f))[:-4]
for f in image_paths]
if name == _TRAIN_SPLIT:
csv_file = open(csv_path, 'rb')
df = pd.read_csv(csv_file)
df = df.set_index('id')
labels = [int(df.loc[fid]['landmark_id']) for fid in file_ids]
elif name == _TEST_SPLIT:
labels = []
else:
raise ValueError('Unsupported dataset split name: %s' % name)
return image_paths, file_ids, labels
def _get_clean_train_image_files_and_labels(csv_path, image_dir):
"""Get image file paths, image ids and labels for the clean training split.
Args:
csv_path: path to the Google-landmark Dataset v2 CSV Data Sources files
of the clean train dataset. Assumes CSV header landmark_id;images.
image_dir: directory that stores downloaded images.
Returns:
image_paths: the paths to all images in the image_dir.
file_ids: the unique ids of images.
labels: the landmark id of all images.
relabeling: relabeling rules created to replace actual labels with
a continuous set of labels.
"""
# Load the content of the CSV file (landmark_id/label -> images).
csv_file = open(csv_path, 'rb')
df = pd.read_csv(csv_file)
# Create the dictionary (key = file_id, value = {label, file_id}).
images = {}
for _, row in df.iterrows():
label = row['landmark_id']
for file_id in row['images'].split(' '):
images[file_id] = {}
images[file_id]['label'] = label
images[file_id]['file_id'] = file_id
# Add the full image path to the dictionary of images.
image_paths = glob.glob(os.path.join(image_dir, '*.jpg'))
for image_path in image_paths:
file_id = os.path.basename(os.path.normpath(image_path))[:-4]
if file_id in images:
images[file_id]['image_path'] = image_path
# Explode the dictionary into lists (1 per image attribute).
image_paths = []
file_ids = []
labels = []
for _, value in images.items():
image_paths.append(value['image_path'])
file_ids.append(value['file_id'])
labels.append(value['label'])
# Relabel image labels to contiguous values.
unique_labels = sorted(set(labels))
relabeling = {label: index for index, label in enumerate(unique_labels)}
new_labels = [relabeling[label] for label in labels]
return image_paths, file_ids, new_labels, relabeling
def _process_image(filename):
"""Process a single image file.
Args:
filename: string, path to an image file e.g., '/path/to/example.jpg'.
Returns:
image_buffer: string, JPEG encoding of RGB image.
Raises:
ValueError: if parsed image has wrong number of dimensions or channels.
"""
white_io = BytesIO()
Image.open(filename).save(white_io, 'JPEG')
image_data = white_io.getvalue()
os.remove(filename)
return image_data
def _write_mindrecord(output_prefix, image_paths, file_ids, labels):
"""Read image files and write image and label data into MindRecord files.
Args:
output_prefix: string, the prefix of output files, e.g. 'train'.
image_paths: list of strings, the paths to images to be converted.
file_ids: list of strings, the image unique ids.
labels: list of integers, the landmark ids of images. It is an empty list
when output_prefix='test'.
Raises:
ValueError: if the length of input images, ids and labels don't match
"""
if output_prefix == _TEST_SPLIT:
labels = [None] * len(image_paths)
if not len(image_paths) == len(file_ids) == len(labels):
raise ValueError('length of image_paths, file_ids, labels should be the' +
' same. But they are %d, %d, %d, respectively' %
(len(image_paths), len(file_ids), len(labels)))
output_file = os.path.join(
FLAGS.output_directory, '%s.mindrecord' % (output_prefix))
writer = FileWriter(file_name=output_file, shard_num=FLAGS.num_shards)
cv_schema = {"file_id": {"type": "string"}, "label": {
"type": "int64"}, "data": {"type": "bytes"}}
writer.add_schema(cv_schema, "GLDv2")
writer.add_index(["file_id", "label"])
data = []
for i in range(len(image_paths)):
sample = {}
image_bytes = _process_image(image_paths[i])
sample['file_id'] = file_ids[i]
sample['label'] = labels[i]
sample['data'] = image_bytes
data.append(sample)
if i % 10 == 0:
writer.write_raw_data(data)
data = []
if data:
writer.write_raw_data(data)
print(writer.commit())
def _write_relabeling_rules(relabeling_rules):
"""Write to a file the relabeling rules when the clean train dataset is used.
Args:
relabeling_rules: dictionary of relabeling rules applied when the clean
train dataset is used (key = old_label, value = new_label).
"""
relabeling_file_name = os.path.join(
FLAGS.output_directory, 'relabeling.csv')
relabeling_file = open(relabeling_file_name, 'w')
csv_writer = csv.writer(relabeling_file, delimiter=',')
csv_writer.writerow(['new_label', 'old_label'])
for old_label, new_label in relabeling_rules.items():
csv_writer.writerow([new_label, old_label])
def _shuffle_by_columns(np_array, random_state):
"""Shuffle the columns of a 2D numpy array.
Args:
np_array: array to shuffle.
random_state: numpy RandomState to be used for shuffling.
Returns:
The shuffled array.
"""
columns = np_array.shape[1]
columns_indices = np.arange(columns)
random_state.shuffle(columns_indices)
return np_array[:, columns_indices]
def _build_train_and_validation_splits(image_paths, file_ids, labels, validation_split_size, seed):
"""Create TRAIN and VALIDATION splits containing all labels in equal proportion.
Args:
image_paths: list of paths to the image files in the train dataset.
file_ids: list of image file ids in the train dataset.
labels: list of image labels in the train dataset.
validation_split_size: size of the VALIDATION split as a ratio of the train
dataset.
seed: seed to use for shuffling the dataset for reproducibility purposes.
Returns:
splits : tuple containing the TRAIN and VALIDATION splits.
Raises:
ValueError: if the image attributes arrays don't all have the same length,
which makes the shuffling impossible.
"""
# Ensure all image attribute arrays have the same length.
total_images = len(file_ids)
if not (len(image_paths) == total_images and len(labels) == total_images):
raise ValueError('Inconsistencies between number of file_ids (%d), number '
'of image_paths (%d) and number of labels (%d). Cannot'
'shuffle the train dataset.' % (total_images, len(image_paths), len(labels)))
# Stack all image attributes arrays in a single 2D array of dimensions
# (3, number of images) and group by label the indices of datapoins in the
# image attributes arrays. Explicitly convert label types from 'int' to 'str'
# to avoid implicit conversion during stacking with image_paths and file_ids
# which are 'str'.
labels_str = [str(label) for label in labels]
image_attrs = np.stack((image_paths, file_ids, labels_str))
image_attrs_idx_by_label = {}
for index, label in enumerate(labels):
if label not in image_attrs_idx_by_label:
image_attrs_idx_by_label[label] = []
image_attrs_idx_by_label[label].append(index)
# Create subsets of image attributes by label, shuffle them separately and
# split each subset into TRAIN and VALIDATION splits based on the size of the
# validation split.
splits = {
_VALIDATION_SPLIT: [],
_TRAIN_SPLIT: []
}
rs = np.random.RandomState(np.random.MT19937(np.random.SeedSequence(seed)))
for label, indexes in image_attrs_idx_by_label.items():
# Create the subset for the current label.
image_attrs_label = image_attrs[:, indexes]
# Shuffle the current label subset.
image_attrs_label = _shuffle_by_columns(image_attrs_label, rs)
# Split the current label subset into TRAIN and VALIDATION splits and add
# each split to the list of all splits.
images_per_label = image_attrs_label.shape[1]
cutoff_idx = max(1, int(validation_split_size * images_per_label))
splits[_VALIDATION_SPLIT].append(image_attrs_label[:, 0: cutoff_idx])
splits[_TRAIN_SPLIT].append(image_attrs_label[:, cutoff_idx:])
# Concatenate all subsets of image attributes into TRAIN and VALIDATION splits
# and reshuffle them again to ensure variance of labels across batches.
validation_split = _shuffle_by_columns(
np.concatenate(splits[_VALIDATION_SPLIT], axis=1), rs)
train_split = _shuffle_by_columns(
| np.concatenate(splits[_TRAIN_SPLIT], axis=1) | numpy.concatenate |
#!/usr/bin/python
# Script that uses katsdpcal's calprocs to reduce data consisting of offset tracks on multiple point sources.
#
from katsdpcal import calprocs
import pickle
import katdal
import numpy as np
import scikits.fitting as fit
import katpoint
import optparse
#TODO Remove this function once katdal has this functionality
def activity(h5,state = 'track'):
"""Activity Sensor because some of antennas have a mind of their own,
others appear to have lost theirs entirely """
antlist = [a.name for a in h5.ants]
activityV = np.zeros((len(antlist),h5.shape[0]) ,dtype=np.bool)
for i,ant in enumerate(antlist) :
sensor = h5.sensor['%s_activity'%(ant)] ==state
if ~np.any(sensor):
print("Antenna %s has no valid %s data"%(ant,state))
noise_diode = ~h5.sensor['Antennas/%s/nd_coupler'%(ant)]
activityV[i,:] += noise_diode & sensor
return np.all(activityV,axis=0)
def w_average(arr,axis=None, weights=None):
return np.nansum(arr*weights,axis=axis)/np.nansum(weights,axis=axis)
def reduce_compscan_inf(h5,rfi_static_flags=None,chunks=16,return_raw=False,use_weights=False,compscan_index=None,debug=False):
"""Break the band up into chunks"""
chunk_size = chunks
rfi_static_flags = np.full(h5.shape[1], False) if (rfi_static_flags is None) else rfi_static_flags
gains_p = {}
stdv = {}
calibrated = False # placeholder for calibration
h5.select(compscans=compscan_index)
# Combine target indices if they refer to the same target for the purpose of this analysis
TGT = h5.catalogue.targets[h5.target_indices[0]].description.split(",")
def _eq_TGT_(tgt): # tgt==TGT, "tags" don't matter
tgt = tgt.description.split(",")
return (tgt[0] == TGT[0]) and (tgt[2] == TGT[2]) and (tgt[3] == TGT[3])
target_indices = [TI for TI in h5.target_indices if _eq_TGT_(h5.catalogue.targets[TI])]
if len(h5.target_indices) > len(target_indices):
print("Warning multiple targets in the compscan, using %s instead of %s"%(target_indices,h5.target_indices))
target = h5.catalogue.targets[h5.target_indices[0]]
if not return_raw: # Calculate average target flux over entire band
flux_spectrum = h5.catalogue.targets[h5.target_indices[0]].flux_density(h5.freqs) # include flags
average_flux = np.mean([flux for flux in flux_spectrum if not np.isnan(flux)])
temperature = np.mean(h5.temperature)
pressure = np.mean(h5.pressure)
humidity = np.mean(h5.humidity)
wind_speed = np.mean(h5.wind_speed)
wind_direction = np.degrees(np.angle(np.mean(np.exp(1j*np.radians(h5.wind_direction)))) )# Vector Mean
sun = katpoint.Target('Sun, special')
# Calculate pointing offset
# Obtain middle timestamp of compound scan, where all pointing calculations are done
middle_time = np.median(h5.timestamps[:], axis=None)
# Start with requested (az, el) coordinates, as they apply at the middle time for a moving target
requested_azel = target.azel(middle_time)
# Correct for refraction, which becomes the requested value at input of pointing model
rc = katpoint.RefractionCorrection()
requested_azel = [requested_azel[0], rc.apply(requested_azel[1], temperature, pressure, humidity)]
requested_azel = katpoint.rad2deg(np.array(requested_azel))
gaussian_centre = np.full((chunk_size * 2, 2, len(h5.ants)), np.nan)
gaussian_centre_std = np.full((chunk_size * 2, 2, len(h5.ants)), np.nan)
gaussian_width = np.full((chunk_size * 2, 2, len(h5.ants)), np.nan)
gaussian_width_std = np.full((chunk_size * 2, 2, len(h5.ants)), np.nan)
gaussian_height = np.full((chunk_size * 2, len(h5.ants)), np.nan)
gaussian_height_std = np.full((chunk_size * 2, len(h5.ants)), np.nan)
if debug :#debug_text
debug_text = []
line = []
line.append("#AntennaPol")
line.append("Target")
line.append("Freq(MHz)") #MHz
line.append("Centre Az")
line.append("Centre El")
line.append("Centre Az Std")
line.append("Centre El Std")
line.append("Centre Az Width")
line.append("Centre El Width")
line.append("Centre Az Width Std")
line.append("Centre El Width Std")
line.append("Height")
line.append("Height Std")
debug_text.append(','.join(line) )
pols = ["H","V"] # Put in logic for Intensity
for i,pol in enumerate(pols) :
gains_p[pol] = []
pos = []
stdv[pol] = []
h5.select(pol=pol,corrprods='cross',ants=h5.antlist,targets=[h5.catalogue.targets[TI] for TI in target_indices],compscans=compscan_index)
h5.bls_lookup = calprocs.get_bls_lookup(h5.antlist,h5.corr_products)
for scan in h5.scans() :
if scan[1] != 'track': continue
valid_index = activity(h5,state = 'track')
data = h5.vis[valid_index]
if data.shape[0] > 0 : # need at least one data point
#g0 = np.ones(len(h5.ants),np.complex)
if use_weights :
weights = h5.weights[valid_index].mean(axis=0)
else:
weights = np.ones(data.shape[1:]).astype(np.float)
gains_p[pol].append(calprocs.g_fit(data[:].mean(axis=0),weights,h5.bls_lookup,refant=0) )
stdv[pol].append(np.ones((data.shape[0],data.shape[1],len(h5.ants))).sum(axis=0))#number of data points
# Get coords in (x(time,ants),y(time,ants) coords)
pos.append( [h5.target_x[valid_index,:].mean(axis=0), h5.target_y[valid_index,:].mean(axis=0)] )
for ant in range(len(h5.ants)):
for chunk in range(chunks):
freq = slice(chunk*(h5.shape[1]//chunks),(chunk+1)*(h5.shape[1]//chunks))
rfi = ~rfi_static_flags[freq]
if (np.array(pos).shape[0] > 4) and np.any(rfi): # Make sure there is enough data for a fit
fitobj = fit.GaussianFit(np.array(pos)[:,:,ant].mean(axis=0),[1.,1.],1)
x = np.column_stack((np.array(pos)[:,0,ant],np.array(pos)[:,1,ant]))
y = np.abs( | np.array(gains_p[pol]) | numpy.array |
#!/usr/bin/env python
# coding: utf-8
"""
This script has to be executed after hi_freq_data_to_csv.py and get_interval.py have succesfully run.
This script should be called with 1 (or 2) arguments.
The 1st mandatory argument is the ABSOLUTE path of the top directory for the flight campaign.
/media/spectors/HDD320/lidar/20201218_fresh <<----- This is it!
----------------------------/20201218_fresh/p_00_joined_pcap_files
----------------------------/20201218_fresh/p_01_apx_csv_shapefile <<----- This must be present and will be used as input.
----------------------------/20201218_fresh/p_02_plt <<----- Not used. Just for reference.
----------------------------/20201218_fresh/p_03_pcap <<----- This must be present and will be used as input.
----------------------------/20201218_fresh/2_planned_mision
----------------------------/20201218_fresh/ .....
----------------------------/20201218_fresh/logging <<----- This is where the logs will be stored.
----------------------------/20201218_fresh/transl_table.txt <<----- This must be present and will be used as input.
The 2nd optional argument can be a boresight-calibration string.
It must contain the boresight angles and be of the following form:
# RabcdefghPijklmnopYqrstuvwx
# Where abcdefgh is milionths of degree to ROLL. a is sign (p/n)
# ..... ijklmnop is milionths of degree to PITCH. i is sign (p/n)
# ..... qrstuvwx is milionths of degree to YAW. q is sign (p/n)
# In this order! ROLL -> PITCH -> YAW !
# Theoretically can encode up to 9.9° around each axis
This script combines .csv files with each of the .pcap flight lines and writes point clouds in .txt files.
It then calls a lew lastools to convert them to las, denoise and set the correct (georeference) metadata.
The script is run non-interactively.
The only exception is choosing the p_01_apx_csv_shapefile and p__03_pcap folders at the beginning if there are muktiple of them.
TO DO: add support for different EPSG codes.
"""
import time
import os
import sys
import datetime
import platform
import logging
import shutil
import re
from collections import OrderedDict
from multiprocessing import Pool, cpu_count
from multiprocessing.managers import SharedMemoryManager
from multiprocessing.shared_memory import SharedMemory
from scipy.interpolate import interp1d
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from scapy.all import rdpcap
#from vlp16_tables import *
import vlp16_tables
log_dir = 'p_logging'
txt_dir_in = 'p_01_apx_csv_shapefile'
txt_in_base_len = len(txt_dir_in)
pcap_dir_in = 'p_03_pcap'
pcap_in_base_len = len(pcap_dir_in)
out_dir_ascii = 'p_04_ascii'
out_ascii_base_len = len(out_dir_ascii)
out_dir_las = 'p_05_las'
out_las_base_len = len(out_dir_las)
transl_table_fn = 'p_transl_table.txt'
fn_keyword = 'hi_freq_apx'
nl = '\n'
def shorten_string(text_string):
"""
Function to remove all duplicates from string
and keep the order of characters same
https://www.geeksforgeeks.org/remove-duplicates-given-string-python/
"""
return "".join(OrderedDict.fromkeys(text_string))
def remove_min_sec(ts):
return (int(ts) // 3600) * 3600
# ### Function to calculate the gaps between given azimuths. Needed to interpolate azimuths that are not given.
def get_azim_gap(azimuths, dual=True, preserve_shape=False):
"""
Only works for dual returns now.
preserve_shape is relevant for dual, where the azimuths repeat.
if False: return only unique gaps.
if True: return same shape as azimuths
"""
if dual:
azimuths_gap_flat = np.zeros_like(azimuths[:,0::2]).flatten()
azimuths_gap_flat[:-1] = ((azimuths[:,0::2].flatten()[1:] -\
azimuths[:,0::2].flatten()[:-1]) % 36000)
azimuths_gap_flat[-1] = azimuths_gap_flat[-2]
azimuths_gap = azimuths_gap_flat.reshape(azimuths[:,0::2].shape)
if preserve_shape:
azimuths_gap = np.tile(azimuths_gap,2)
return azimuths_gap
else:
raise NotImplementedError
def get_micros_pulses(micros, dual=True, preserve_shape=False):
"""
preserve_shape is relevant for dual, where the azimuths repeat.
if False: return only unique gaps.
if True: return same shape as azimuths
"""
if dual:
if preserve_shape:
micros_pulses = np.expand_dims(micros, axis=1) +\
vlp16_tables.TIMING_OFFSETS_DUAL.T.flatten() * 1e6
else:
micros_pulses = np.expand_dims(micros, axis=1) +\
vlp16_tables.TIMING_OFFSETS_DUAL.T[0::2,:].flatten() * 1e6
else:
micros_pulses = np.expand_dims(micros, axis=1) +\
vlp16_tables.TIMING_OFFSETS_SINGLE.T.flatten() * 1e6
return micros_pulses
def get_precision_azimuth(az_simple, azimuths_gap, dual=True, minimal_shape=True):
if dual:
timing_offsets_within_block = vlp16_tables.TIMING_OFFSETS_DUAL[:,0]
az_pulses = np.tile(az_simple,(vlp16_tables.LASERS_PER_DATA_BLOCK)).reshape(\
az_simple.shape[0], vlp16_tables.LASERS_PER_DATA_BLOCK, az_simple.shape[1])
az_pulses = az_pulses.transpose((0,2,1))
precision_azimuth = az_pulses[:,:,:] +\
timing_offsets_within_block / (2 * vlp16_tables.T_CYCLE) *\
np.expand_dims(azimuths_gap, axis=2)
precision_azimuth = precision_azimuth % 36000
if not minimal_shape:
precision_azimuth = np.tile(\
precision_azimuth.transpose((0,2,1)), (1,2,1)).transpose((0,2,1))
precision_azimuth = precision_azimuth.reshape(\
(precision_azimuth.shape[0], precision_azimuth.shape[1] * precision_azimuth.shape[2]))
return precision_azimuth
else:
raise NotImplementedError
def process_file(pcap_file_in, pcap_dir_in,
out_dir_ascii, out_dir_las,
shm_name, shm_shp, shm_dtp,
b_roll, b_pitch, b_yaw,
concat_cmd, wine_cmd):
print(f"Processing {pcap_file_in}")
logging.info(f"Processing {pcap_file_in}")
loc_shm = SharedMemory(shm_name)
loc_apx_arr = np.recarray(shape=shm_shp, dtype=shm_dtp, buf=loc_shm.buf)
### Temporary plug-in here.
# This is not a proper solution, just a quick proof-of-concept
# Before hand must manually copy the file yaw_correction.csv into the appropriate folder
if 'yaw_correction.csv' in os.listdir(pcap_dir_in):
yaw_agisoft = pd.read_csv(os.path.join(pcap_dir_in, 'yaw_correction.csv'), index_col=0)
else:
# just have a dataframe that when interpolated will result in 0 everywhere
idx = pd.Index([0, 1, 2597835528, 2597835529], name='utc_time')
yaw_agisoft = pd.DataFrame(data = | np.array([[0],[0],[0],[0]]) | numpy.array |
import json
import os
import numpy as np
from tqdm import tqdm
from mmhuman3d.core.conventions.keypoints_mapping import convert_kps
from mmhuman3d.data.data_converters.base_converter import BaseModeConverter
from mmhuman3d.data.data_converters.builder import DATA_CONVERTERS
from mmhuman3d.data.data_structures.human_data import HumanData
from mmhuman3d.data.datasets.pipelines.hybrik_transforms import (
get_bbox,
get_intrinsic_matrix,
)
@DATA_CONVERTERS.register_module()
class MpiInf3dhpHybrIKConverter(BaseModeConverter):
"""MPI-INF-3DHP dataset for HybrIK `Monocular 3D Human Pose Estimation In
The Wild Using Improved CNN Supervision' 3DC`2017 More details can be found
in the `paper.
<https://arxiv.org/pdf/1611.09813.pdf>`__.
Args:
modes (list): 'test' or 'train' for accepted modes
"""
ACCEPTED_MODES = ['test', 'train']
def __init__(self, modes=[]):
super(MpiInf3dhpHybrIKConverter, self).__init__(modes)
@staticmethod
def cam2pixel_matrix(cam_coord: np.ndarray,
intrinsic_param: np.ndarray) -> np.ndarray:
"""Convert coordinates from camera to image frame given intrinsic
matrix
Args:
cam_coord (np.ndarray): Coordinates in camera frame
intrinsic_param (np.ndarray): 3x3 Intrinsic matrix
Returns:
img_coord (np.ndarray): Coordinates in image frame
"""
cam_coord = cam_coord.transpose(1, 0)
cam_homogeneous_coord = np.concatenate(
(cam_coord, np.ones((1, cam_coord.shape[1]), dtype=np.float32)),
axis=0)
img_coord = np.dot(intrinsic_param, cam_homogeneous_coord) / (
cam_coord[2, :] + 1e-8)
img_coord = np.concatenate((img_coord[:2, :], cam_coord[2:3, :]),
axis=0)
return img_coord.transpose(1, 0)
def convert_by_mode(self, dataset_path: str, out_path: str,
mode: str) -> dict:
"""
Args:
dataset_path (str): Path to directory where hybrik preprocessed
json files are stored
out_path (str): Path to directory to save preprocessed npz file
mode (str): Mode in accepted modes
Returns:
dict:
A dict containing keys image_path, image_height, image_width,
bbox_xywh, cam_param, root_cam, depth_factor, keypoints3d,
keypoints3d_mask, keypoints3d_cam, keypoints3d_cam_mask
stored in HumanData() format
"""
if mode == 'train':
ann_file = os.path.join(dataset_path,
'annotation_mpi_inf_3dhp_train_v2.json')
elif mode == 'test':
ann_file = os.path.join(dataset_path,
'annotation_mpi_inf_3dhp_test.json')
with open(ann_file, 'r') as fid:
database = json.load(fid)
# use HumanData to store all data
human_data = HumanData()
# structs we use
image_path_, bbox_xywh_, root_cam_, image_width_, image_height_, \
joint_cam_, joint_img_, depth_factor_ = \
[], [], [], [], [], [], [], []
smpl = {}
smpl['thetas'] = []
smpl['betas'] = []
cam_param = {}
cam_param['f'] = []
cam_param['c'] = []
cam_param['intrinsic'] = []
num_datapoints = len(database['images'])
for ann_image, ann_annotations in tqdm(
zip(database['images'], database['annotations']),
total=num_datapoints):
ann = dict()
for k, v in ann_image.items():
assert k not in ann.keys()
ann[k] = v
for k, v in ann_annotations.items():
ann[k] = v
width, height = ann['width'], ann['height']
bbox = ann['bbox']
bbox = get_bbox(np.array(bbox), width, height)
K = np.array(ann['cam_param']['intrinsic_param'])
f = np.array([K[0, 0], K[1, 1]])
c = np.array([K[0, 2], K[1, 2]])
intrinsic = get_intrinsic_matrix(f, c, inv=True)
joint_cam = np.array(ann['keypoints_cam'])
num_joints = joint_cam.shape[0]
# if train
if mode == 'train':
root_idx = 4
_, sub, seq, vid, im = ann['file_name'].split('/')[-1].split(
'_')
fname = '{}/{}/{}/{}'.format(sub, seq,
vid.replace('V', 'video_'), im)
# fname = '{}/{}/imageFrames/{}/frame_{}'.format(
# sub, seq, vid.replace('V', 'video_'), im)
elif mode == 'test':
root_idx = 14
fname = 'mpi_inf_3dhp_test_set/' + ann['file_name']
# fname = 'mpi_inf_3dhp_test_set/mpi_inf_3dhp_test_set/' + ann[
# 'file_name']
joint_img = self.cam2pixel_matrix(joint_cam, K)
joint_img[:, 2] = joint_img[:, 2] - joint_cam[root_idx, 2]
root_cam = joint_cam[root_idx]
joint_img = np.hstack([joint_img, np.ones([num_joints, 1])])
joint_cam = np.hstack([joint_cam, np.ones([num_joints, 1])])
image_path_.append(fname)
image_height_.append(height)
image_width_.append(width)
bbox_xywh_.append(bbox)
depth_factor_.append(2000.)
cam_param['f'].append(f.reshape((-1, 2)))
cam_param['c'].append(c.reshape((-1, 2)))
cam_param['intrinsic'].append(intrinsic)
joint_cam_.append(joint_cam)
joint_img_.append(joint_img)
root_cam_.append(root_cam)
cam_param['f'] = np.array(cam_param['f']).reshape((-1, 2))
cam_param['c'] = np.array(cam_param['c']).reshape((-1, 2))
cam_param['intrinsic'] = | np.array(cam_param['intrinsic']) | numpy.array |
# _*_coding:utf-8_*_
# Author: xiaoran
# Time: 2017-12-08 21:10
# DecisionTreeClassifier
import numpy as np
import scipy as sp
import pandas as pd
class DecisionTreeClassifier(object):
"""决策树分类器,主要基于ID3和C4.5
criterion: string optional (default="gini")
选择特征的基础:
entropy [enrtopy]: 熵 for ID3
information_gain [i_gain]: 信息增益 for ID3
information_gain_ratio [i_gain_r]: 信息增益比 for C4.5
gini [gini]: gini 指数 for CART
max_depth: int or None, optional (default = None)
最大深度,if None, 则知道所有叶子都是一个类,或者剩下min_sample_split个例子.(用来防止过拟合)
min_sample_split: int float, optional (default=2)
剩余最少几个例子的时候不在进行分割,使用例子中出现最多的类,作为这个叶子节点的标签label.(用来防止过拟合)
IF float向上取整.
属性:
classes_ : 所有的类别.
feature_importances_:
特征重要性,根据分类的正确数进行递减排序,一共两维数据,[(feature1, nums1),...,(featureN,numsN)],首先给出一维
这是根据创建树的过程生成的,并不对应验证集的结果,而且返回的对应特征的列编号.
numpy: [column0,...,columni,...]
DataFrame: [DataFrame.columns]
tree_: 决策树的原型.
实现函数:
fit(),predict(),apply(), score(),
"""
def __init__(self,criterion='i_gain_r',max_depth=None,min_sample_split=2):
'''构造函数
'''
self.__criterion = criterion
self.__max_depth = max_depth
self.__min_sample_plite = min_sample_split
self.__featureLen = None
self.__tree_ = None
self.classes_ = None
self.feature_importances_ = []
self.tree_ = None
def __check_array(self,x):
'''
检查x的数据,
None:自动填充0,
if isinstance(x,list)--> x = np.array(x)
if x只有一个元素,将其变为二维的数据.x = np.array([x])
'''
if isinstance(x,list):
x = np.array(x)
if self.__featureLen == None:
self.__featureLen = x.shape[1]
if len(x.shape) == 1:
x = np.array([x])
if x.shape[1] != self.__featureLen:
raise ValueError("输入数据的格式与训练数据的格式不匹配.")
return x
def __spliteDataWithFeature(self,data,featureColumn,dataType='numpy'):
'''根据给定的特征,分割数据集,返回对应的数据子集,这里的特征使用列号[0,...,n]给出,
参数:
data: 被分割的数据
featureColumn: 特征的列索引
dataType: 数据类型,默认"ndarray",还有一种是pd.DataFrame,注意numpy的ndarry兼容DataFrame
return 对应的分离数据和对应得到这个子集的特征的值
'''
splitdataSet = []
if dataType == 'numpy':
featureSet = set(data[:,featureColumn])
# print("featureSet",featureSet)
for feature in featureSet:
tmp = | np.copy(data[data[:,featureColumn] == feature]) | numpy.copy |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# General imports.
import numpy as np
from warnings import warn
from scipy.integrate import AccuracyWarning
from scipy.sparse import find, diags, identity, csr_matrix
from scipy.sparse.linalg import spsolve
from scipy.interpolate import interp1d, RectBivariateSpline
# Plotting.
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
# ==============================================================================
# Code for generating indices on the oversampled wavelength grid.
# ==============================================================================
def arange_2d(starts, stops, dtype=None):
"""Create a 2D array containing a series of ranges. The ranges do not have
to be of equal length.
:param starts: start values for each range.
:param stops: end values for each range.
:param dtype: the type of the output values.
:type starts: int or array[int]
:type stops: int or array[int]
:type dtype: str
:returns: out, mask - 2D array of ranges and a mask indicating valid
elements.
:rtype: Tuple(array[int], array[bool])
"""
# Ensure starts and stops are arrays.
starts = np.asarray(starts)
stops = np.asarray(stops)
# Check input for starts and stops is valid.
if (starts.shape != stops.shape) & (starts.shape != ()):
msg = ('Shapes of starts and stops are not compatible, '
'they must either have the same shape or starts must be scalar.')
raise ValueError(msg)
if np.any(stops < starts):
msg = 'stops must be everywhere greater or equal to starts.'
raise ValueError(msg)
# If starts was given as a scalar match its shape to stops.
if starts.shape == ():
starts = starts * np.ones_like(stops)
# Compute the length of each range.
lengths = (stops - starts).astype(int)
# Initialize the output arrays.
nrows = len(stops)
ncols = np.amax(lengths)
out = np.ones((nrows, ncols), dtype=dtype)
mask = np.ones((nrows, ncols), dtype='bool')
# Compute the indices.
for irow in range(nrows):
out[irow, :lengths[irow]] = np.arange(starts[irow], stops[irow])
mask[irow, :lengths[irow]] = False
return out, mask
# ==============================================================================
# Code for converting to a sparse matrix and back.
# ==============================================================================
def sparse_k(val, k, n_k):
"""
Transform a 2D array `val` to a sparse matrix.
`k` is use for the position in the second axis
of the matrix. The resulting sparse matrix will
have the shape : ((len(k), n_k))
Set k elements to a negative value when not defined
"""
# Length of axis 0
n_i = len(k)
# Get row index
i_k = np.indices(k.shape)[0]
# Take only well defined coefficients
row = i_k[k >= 0]
col = k[k >= 0]
data = val[k >= 0]
mat = csr_matrix((data, (row, col)), shape=(n_i, n_k))
return mat
def unsparse(matrix, fill_value=np.nan):
"""
Convert a sparse matrix to a 2D array of values and a 2D array of position.
Returns
------
out: 2d array
values of the matrix. The shape of the array is given by:
(matrix.shape[0], maximum number of defined value in a column).
col_out: 2d array
position of the columns. Same shape as `out`.
"""
col, row, val = find(matrix.T)
n_row, n_col = matrix.shape
good_rows, counts = np.unique(row, return_counts=True)
# Define the new position in columns
i_col = np.indices((n_row, counts.max()))[1]
i_col = i_col[good_rows]
i_col = i_col[i_col < counts[:, None]]
# Create outputs and assign values
col_out = np.ones((n_row, counts.max()), dtype=int) * -1
col_out[row, i_col] = col
out = np.ones((n_row, counts.max())) * fill_value
out[row, i_col] = val
return out, col_out
# ==============================================================================
# Code for building wavelength grids.
# ==============================================================================
def get_wave_p_or_m(wave_map):
# TODO rename function?
"""Compute lambda_plus and lambda_minus of pixel map, given the pixel
central value.
:param wave_map: Array of the pixel wavelengths for a given order.
:type wave_map: array[float]
:returns: wave_plus, wave_minus - The wavelength edges of each pixel,
given the central value.
:rtype: Tuple(array[float], array[float])
"""
wave_map = wave_map.T # Simpler to use transpose
# Iniitialize arrays.
wave_left = np.zeros_like(wave_map)
wave_right = np.zeros_like(wave_map)
# Compute the change in wavelength.
delta_wave = np.diff(wave_map, axis=0)
# Compute the wavelength values on the left and right edges of each pixel.
wave_left[1:] = wave_map[:-1] + delta_wave/2 # TODO check this logic.
wave_left[0] = wave_map[0] - delta_wave[0]/2
wave_right[:-1] = wave_map[:-1] + delta_wave/2
wave_right[-1] = wave_map[-1] + delta_wave[-1]/2
# The outputs depend on the direction of the spectral axis.
if (wave_right >= wave_left).all():
wave_plus, wave_minus = wave_right.T, wave_left.T
elif (wave_right <= wave_left).all():
wave_plus, wave_minus = wave_left.T, wave_right.T
else:
raise ValueError('Bad pixel values for wavelength.')
return wave_plus, wave_minus
def oversample_grid(wave_grid, n_os=1):
"""Create an oversampled version of the input 1D wavelength grid.
:param wave_grid: Wavelength grid to be oversampled.
:param n_os: Oversampling factor. If it is a scalar, take the same value for each
interval of the grid. If it is an array, n_os specifies the oversampling
at each interval of the grid, so len(n_os) = len(wave_grid) - 1.
:type wave_grid: array[float]
:type n_os: int or array[int]
:returns: wave_grid_os - The oversampled wavelength grid.
:rtype: array[float]
"""
# Convert n_os to an array.
n_os = np.asarray(n_os)
# n_os needs to have the dimension: len(wave_grid) - 1.
if n_os.ndim == 0:
# A scalar was given, repeat the value.
n_os = np.repeat(n_os, len(wave_grid) - 1)
elif len(n_os) != (len(wave_grid) - 1):
# An array of incorrect size was given.
msg = 'n_os must be a scalar or an array of size len(wave_grid) - 1.'
raise ValueError(msg)
# Grid intervals.
delta_wave = np.diff(wave_grid)
# Initialize the new oversampled wavelength grid.
wave_grid_os = wave_grid.copy()
# Iterate over oversampling factors to generate new grid points.
for i_os in range(1, n_os.max()):
# Consider only intervals that are not complete yet.
mask = n_os > i_os
# Compute the new grid points.
sub_grid = (wave_grid[:-1][mask] + i_os*delta_wave[mask]/n_os[mask])
# Add the grid points to the oversampled wavelength grid.
wave_grid_os = np.concatenate([wave_grid_os, sub_grid])
# Take only uniqyue values and sort them.
wave_grid_os = np.unique(wave_grid_os)
return wave_grid_os
def extrapolate_grid(wave_grid, wave_range, poly_ord):
"""Extrapolate the given 1D wavelength grid to cover a given range of values
by fitting the derivate with a polynomial of a given order and using it to
compute subsequent values at both ends of the grid.
:param wave_grid: Wavelength grid to be extrapolated.
:param wave_range: Wavelength range the new grid should cover.
:param poly_ord: Order of the polynomial used to fit the derivative of
wave_grid.
:type wave_grid: array[float]
:type wave_range: list[float]
:type poly_ord: int
:returns: wave_grid_ext - The extrapolated 1D wavelength grid.
:rtype: array[float]
"""
# Define delta_wave as a function of wavelength by fitting a polynomial.
delta_wave = np.diff(wave_grid)
pars = np.polyfit(wave_grid[:-1], delta_wave, poly_ord)
f_delta = np.poly1d(pars)
# Extrapolate out-of-bound values on the left-side of the grid.
grid_left = []
if wave_range[0] < wave_grid.min():
# Compute the first extrapolated grid point.
grid_left = [wave_grid.min() - f_delta(wave_grid.min())]
# Iterate until the end of wave_range is reached.
while True:
next_val = grid_left[-1] - f_delta(grid_left[-1])
if next_val < wave_range[0]:
break
else:
grid_left.append(next_val)
# Sort extrapolated vales (and keep only unique).
grid_left = np.unique(grid_left)
# Extrapolate out-of-bound values on the right-side of the grid.
grid_right = []
if wave_range[-1] > wave_grid.max():
# Compute the first extrapolated grid point.
grid_right = [wave_grid.max() + f_delta(wave_grid.max())]
# Iterate until the end of wave_range is reached.
while True:
next_val = grid_right[-1] + f_delta(grid_right[-1])
if next_val > wave_range[-1]:
break
else:
grid_right.append(next_val)
# Sort extrapolated vales (and keep only unique).
grid_right = np.unique(grid_right)
# Combine the extrapolated sections with the original grid.
wave_grid_ext = np.concatenate([grid_left, wave_grid, grid_right])
return wave_grid_ext
def _grid_from_map(wave_map, aperture, out_col=False):
# TODO is out_col still needed.
"""Define a wavelength grid by taking the wavelength of each column at the
center of mass of the spatial profile.
:param wave_map: Array of the pixel wavelengths for a given order.
:param aperture: Array of the spatial profile for a given order.
:param out_col:
:type wave_map: array[float]
:type aperture: array[float]
:type out_col: bool
:returns:
:rtype:
"""
# Use only valid columns.
mask = (aperture > 0).any(axis=0) & (wave_map > 0).any(axis=0)
# Get central wavelength using PSF as weights.
num = (aperture * wave_map).sum(axis=0)
denom = aperture.sum(axis=0)
center_wv = num[mask]/denom[mask]
# Make sure the wavelength values are in ascending order.
sort = | np.argsort(center_wv) | numpy.argsort |
import numpy as np
import quicklens as ql
import scipy
import config
r2d = 180./np.pi
d2r = np.pi/180.
#pass array of form [img_id,:,:,channel], return same array normalized channel wise, and also return variances
def normalize_channelwise(images):
# #remove mean per image
# for img_id in range(images.shape[0]):
# for channel_id in range(images.shape[-1]):
# avg = (images[img_id,:,:,channel_id]).sum() / images[img_id,:,:,channel_id].size
# images[img_id,:,:,channel_id] = images[img_id,:,:,channel_id]-avg
#calculate variance over all images per channel
variances = np.zeros(images.shape[-1])
for channel_id in range(images.shape[-1]):
if len(images.shape) == 4:
variances[channel_id] = (images[:,:,:,channel_id]*images[:,:,:,channel_id]).sum() / images[:,:,:,channel_id].size
images[:,:,:,channel_id] = (images[:,:,:,channel_id])/variances[channel_id]**(1./2.)
if len(images.shape) == 3:
variances[channel_id] = (images[:,:,channel_id]*images[:,:,channel_id]).sum() / images[:,:,channel_id].size
images[:,:,channel_id] = (images[:,:,channel_id])/variances[channel_id]**(1./2.)
return images,variances
def ell_filter_maps(maps, nx, dx, lmax, lmin=0):
nsims = maps.shape[0]
ell_filter = np.ones(10000) #itlib.lib_qlm.ellmax=5133 for some reason
ell_filter[lmax:] = 0 #3500
ell_filter[0:lmin] = 0
for map_id in range(nsims):
fullmap_cfft = ql.maps.rmap(nx, dx,map=maps[map_id]).get_cfft()
filteredmap_cfft = fullmap_cfft * ell_filter
filteredmap_cfft.fft[0,0] = 0.
filteredmap = filteredmap_cfft.get_rffts()[0].get_rmap().map
maps[map_id] = filteredmap
return maps
def estimate_ps(maps, binnr=30, lmin=2, lmax=3000):
nmaps = maps.shape[0]
lbins = np.linspace(lmin, lmax, binnr)
ell_binned = lbins[:-1] + np.diff(lbins)
power_avg = np.zeros(ell_binned.shape[0])
for map_id in range(nmaps):
rmap = maps[map_id,:,:]
cfft = ql.maps.rmap(config.nx, config.dx,map=rmap).get_cfft()
power = cfft.get_cl(lbins)
power_avg += power.cl.real
power_avg = power_avg/nmaps
return ell_binned, power_avg
#periodic padding for image array (img_id,x,y,channels)
def periodic_padding(images,npad):
if len(images.shape)==4:
images = np.pad(images,pad_width=((0,0),(npad,npad),(npad,npad),(0,0)),mode='wrap')
if len(images.shape)==3:
images = np.pad(images,pad_width=((npad,npad),(npad,npad),(0,0)),mode='wrap')
return images
#pass true kappa and max like kappa
#find the spectrum S and N.
#wiener filter S/(S+N)*kappa_true
def wiener_filter_kappa(data_input, deg, nx,dx):
nsims = data_input.shape[0]
#################### calc S and N power spectra needed for WF
#calculate kappa correlation coeff
lmax = 3500 #3500
lbins = np.linspace(100, lmax, 20)
ell_binned = lbins[:-1] + np.diff(lbins)
#kappa
corr_coeff_qe_avg = np.zeros(ell_binned.shape[0])
corr_coeff_it_avg = np.zeros(ell_binned.shape[0])
auto_qe_avg = | np.zeros(ell_binned.shape[0]) | numpy.zeros |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Aug 9 19:58:05 2020
@author: mlampert
"""
import os
import copy
import pickle
import pandas
import numpy as np
import matplotlib
from matplotlib.backends.backend_pdf import PdfPages
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit, root
import flap
import flap_nstx
from flap_nstx.analysis import calculate_nstx_gpi_frame_by_frame_velocity, calculate_nstx_gpi_tde_velocity
from flap_nstx import flap_nstx_thomson_data, get_nstx_thomson_gradient, get_fit_nstx_thomson_profiles
from flap_nstx.publications import read_ahmed_fit_parameters, read_ahmed_edge_current, read_ahmed_matlab_file
from flap_nstx.analysis import thick_wire_estimation_numerical
thisdir = os.path.dirname(os.path.realpath(__file__))
fn = os.path.join(thisdir,'../flap_nstx.cfg')
flap.config.read(file_name=fn)
flap_nstx.register()
styled=True
if styled:
plt.rc('font', family='serif', serif='Helvetica')
labelsize=12.
linewidth=0.5
major_ticksize=6.
plt.rc('text', usetex=False)
plt.rcParams['pdf.fonttype'] = 42
plt.rcParams['ps.fonttype'] = 42
plt.rcParams['lines.linewidth'] = linewidth
plt.rcParams['axes.linewidth'] = linewidth
plt.rcParams['axes.labelsize'] = labelsize
plt.rcParams['axes.titlesize'] = labelsize
plt.rcParams['xtick.labelsize'] = labelsize
plt.rcParams['xtick.major.size'] = major_ticksize
plt.rcParams['xtick.major.width'] = linewidth
plt.rcParams['xtick.minor.width'] = linewidth/2
plt.rcParams['xtick.minor.size'] = major_ticksize/2
plt.rcParams['ytick.labelsize'] = labelsize
plt.rcParams['ytick.major.width'] = linewidth
plt.rcParams['ytick.major.size'] = major_ticksize
plt.rcParams['ytick.minor.width'] = linewidth/2
plt.rcParams['ytick.minor.size'] = major_ticksize/2
plt.rcParams['legend.fontsize'] = labelsize
else:
import matplotlib.style as pltstyle
pltstyle.use('default')
def calculate_phase_diagram(averaging='before',
parameter='grad_glob',
normalized_structure=True,
normalized_velocity=True,
subtraction_order=4,
test=False,
recalc=True,
elm_window=500e-6,
elm_duration=100e-6,
correlation_threshold=0.6,
plot=False,
auto_x_range=True,
auto_y_range=True,
plot_error=True,
pdf=True,
dependence_error_threshold=0.5,
plot_only_good=False,
plot_linear_fit=False,
pressure_grad_range=None, #Plot range for the pressure gradient
density_grad_range=None, #Plot range for the density gradient
temperature_grad_range=None, #Plot range for the temperature gradient (no outliers, no range)
):
coeff_r=np.asarray([3.7183594,-0.77821046,1402.8097])/1000. #The coordinates are in meters, the coefficients are in mm
coeff_r=np.asarray([3.7183594,-0.77821046,1402.8097])/1000. #The coordinates are in meters, the coefficients are in mm
coeff_z=np.asarray([0.18090118,3.0657776,70.544312])/1000. #The coordinates are in meters, the coefficients are in mm
coeff_r_new=3./800.
coeff_z_new=3./800.
flap.delete_data_object('*')
wd=flap.config.get_all_section('Module NSTX_GPI')['Working directory']
result_filename=wd+'/processed_data/'+'elm_profile_dependence'
result_filename+='_'+averaging+'_avg'
if normalized_structure:
result_filename+='_ns'
if normalized_velocity:
result_filename+='_nv'
result_filename+='_so'+str(subtraction_order)
scaling_db_file=result_filename+'.pickle'
db=read_ahmed_fit_parameters()
X=[]
Y=[]
if not os.path.exists(scaling_db_file) or recalc:
#Load and process the ELM database
database_file='/Users/mlampert/work/NSTX_workspace/db/ELM_findings_mlampert_velocity_good.csv'
db=pandas.read_csv(database_file, index_col=0)
elm_index=list(db.index)
for elm_ind in elm_index:
elm_time=db.loc[elm_ind]['ELM time']/1000.
shot=int(db.loc[elm_ind]['Shot'])
if normalized_velocity:
if normalized_structure:
str_add='_ns'
else:
str_add=''
filename=flap_nstx.analysis.filename(exp_id=shot,
working_directory=wd+'/processed_data',
time_range=[elm_time-2e-3,elm_time+2e-3],
comment='ccf_velocity_pfit_o'+str(subtraction_order)+'_fst_0.0'+str_add+'_nv',
extension='pickle')
else:
filename=wd+'/processed_data/'+db.loc[elm_ind]['Filename']+'.pickle'
#grad.slice_data(slicing=time_slicing)
status=db.loc[elm_ind]['OK/NOT OK']
if status != 'NO':
velocity_results=pickle.load(open(filename, 'rb'))
det=coeff_r[0]*coeff_z[1]-coeff_z[0]*coeff_r[1]
for key in ['Velocity ccf','Velocity str max','Velocity str avg','Size max','Size avg']:
orig=copy.deepcopy(velocity_results[key])
velocity_results[key][:,0]=coeff_r_new/det*(coeff_z[1]*orig[:,0]-coeff_r[1]*orig[:,1])
velocity_results[key][:,1]=coeff_z_new/det*(-coeff_z[0]*orig[:,0]+coeff_r[0]*orig[:,1])
velocity_results['Elongation max'][:]=(velocity_results['Size max'][:,0]-velocity_results['Size max'][:,1])/(velocity_results['Size max'][:,0]+velocity_results['Size max'][:,1])
velocity_results['Elongation avg'][:]=(velocity_results['Size avg'][:,0]-velocity_results['Size avg'][:,1])/(velocity_results['Size avg'][:,0]+velocity_results['Size avg'][:,1])
velocity_results['Velocity ccf'][np.where(velocity_results['Correlation max'] < correlation_threshold),:]=[np.nan,np.nan]
time=velocity_results['Time']
elm_time_interval_ind=np.where(np.logical_and(time >= elm_time-elm_duration,
time <= elm_time+elm_duration))
elm_time=(time[elm_time_interval_ind])[np.argmin(velocity_results['Frame similarity'][elm_time_interval_ind])]
elm_time_ind=int(np.argmin(np.abs(time-elm_time)))
try:
if velocity_results['Position max'][elm_time_ind,0] != 0.:
b_pol=flap.get_data('NSTX_MDSPlus',
name='EFIT02::BZZ0',
exp_id=shot,
object_name='BZZ0').slice_data(slicing={'Time':elm_time,
'Device R':velocity_results['Position max'][elm_time_ind,0]}).data
except:
pass
try:
if velocity_results['Position max'][elm_time_ind,0] != 0.:
b_tor=flap.get_data('NSTX_MDSPlus',
name='EFIT02::BTZ0',
exp_id=shot,
object_name='BTZ0').slice_data(slicing={'Time':elm_time,
'Device R':velocity_results['Position max'][elm_time_ind,0]}).data
except:
pass
try:
if velocity_results['Position max'][elm_time_ind,0] != 0.:
b_rad=flap.get_data('NSTX_MDSPlus',
name='EFIT02::BRZ0',
exp_id=shot,
object_name='BRZ0').slice_data(slicing={'Time':elm_time,
'Device R':velocity_results['Position max'][elm_time_ind,0]}).data
except:
pass
try:
shot_inds=np.where(db['shot'] == shot)
ind_db=tuple(shot_inds[0][np.where(np.abs(db['time2'][shot_inds]/1000.-elm_time) == np.min(np.abs(db['time2'][shot_inds]/1000.-elm_time)))])
n_e=db['Density']['value_at_max_grad'][ind_db]*1e20 #Conversion to 1/m3
T_e=db['Temperature']['value_at_max_grad'][ind_db]*1e3*1.16e4 #Conversion to K
k_x=2*np.pi/velocity_results['Size max'][elm_time_ind,0]
k_y=2*np.pi/velocity_results['Size max'][elm_time_ind,1]
R=velocity_results['Position max'][elm_time_ind,0]
L_N=velocity_results['Size max'][elm_time_ind,0]
m_e=9.1093835e-31
B=np.sqrt(b_pol**2+b_tor**2+b_rad**2)
q_e=1.6e-19
epsilon_0=8.854e-12
omega_pe=np.sqrt(n_e*q_e**2/m_e/epsilon_0)
v_e=velocity_results['Velocity ccf'][elm_time_ind,0]
gamma=5/3.
Z=1.
k=1.38e-23 #Boltzmann constant
m_i=2.014*1.66e-27 # Deuterium mass
c_s=np.sqrt(gamma*Z*k*(T_e)/m_i)
c=3e8
delta_e=c/omega_pe
omega_A=B/np.sqrt(4*np.pi*1e-7*n_e*m_e)
omega_A_CGS=B/np.sqrt(4*np.pi*n_e*m_e)
omega_eta=v_e*(np.sqrt(k_x**2 + k_y**2)*delta_e)**2
gamma_MHD=c_s**2/(R*L_N)
X.append(omega_eta/omega_A_CGS)
Y.append(gamma_MHD**2/omega_A**2)
except:
pass
plt.figure()
plt.scatter(np.abs(X),np.abs(Y))
plt.xscale('log')
plt.yscale('log')
plt.xlim(min(X),max(X))
plt.ylim(min(Y),max(Y))
plt.title('Curvature vs. resistivity')
plt.xlabel('omega_eta / omega_A')
plt.ylabel('gamma_MHD^2 / omega_A^2')
def calculate_radial_acceleration_diagram(elm_window=500e-6,
elm_duration=100e-6,
correlation_threshold=0.6,
elm_time_base='frame similarity', #'radial acceleration', 'radial velocity', 'frame similarity'
acceleration_base='numdev', #numdev or linefit
calculate_thick_wire=True,
delta_b_threshold=1,
plot=False,
plot_velocity=False,
auto_x_range=True,
auto_y_range=True,
plot_error=True,
plot_clear_peak=False,
calculate_acceleration=False,
calculate_dependence=False, #Calculate and plot dependence between the filament parameters and plasma parameters
calculate_ion_drift_velocity=False,
calculate_greenwald_fraction=False,
calculate_collisionality=False,
recalc=True,
test=False,
):
def linear_fit_function(x,a,b):
return a*x+b
def mtanh_function(x,a,b,c,h,x0):
return (h+b)/2 + (h-b)/2*((1 - a*2*(x - x0)/c)*np.exp(-2*(x - x0)/c) - np.exp(2*(x-x0)/c))/(np.exp(2*(x - x0)/c) + np.exp(-2*(x - x0)/c))
def mtanh_dx_function(x,a,b,c,h,x0):
return ((h-b)*((4*a*(x-x0)+(-a-4)*c)*np.exp((4*(x-x0))/c)-a*c))/(c**2*(np.exp((4*(x-x0))/c)+1)**2)
def mtanh_dxdx_function(x,a,b,c,h,x0):
return -(8*(h-b)*np.exp((4*(x-x0))/c)*((2*a*x-2*a*x0+(-a-2)*c)*np.exp((4*(x-x0))/c)-2*a*x+2*a*x0+(2-a)*c))/(c**3*(np.exp((4*(x-x0))/c)+1)**3)
if acceleration_base not in ['numdev','linefit']:
raise ValueError('acceleration_base should be either "numdev" or "linefit"')
coeff_r=np.asarray([3.7183594,-0.77821046,1402.8097])/1000. #The coordinates are in meters, the coefficients are in mm
coeff_r=np.asarray([3.7183594,-0.77821046,1402.8097])/1000. #The coordinates are in meters, the coefficients are in mm
coeff_z=np.asarray([0.18090118,3.0657776,70.544312])/1000. #The coordinates are in meters, the coefficients are in mm
coeff_r_new=3./800.
coeff_z_new=3./800.
sampling_time=2.5e-6
gamma=5/3.
Z=1.
k_B=1.38e-23 #Boltzmann constant
mu0=4*np.pi*1e-7
q_e=1.602e-19
m_e=9.1e-31 # Deuterium mass
m_i=2.014*1.66e-27
epsilon_0=8.85e-12
flap.delete_data_object('*')
wd=flap.config.get_all_section('Module NSTX_GPI')['Working directory']
result_filename='radial_acceleration_analysis'
if elm_time_base == 'frame similarity':
result_filename+='_fs'
elif elm_time_base == 'radial velocity':
result_filename+='_rv'
elif elm_time_base == 'radial acceleration':
result_filename+='_ra'
if calculate_thick_wire:
result_filename+='_thick'
result_filename+='_dblim_'+str(delta_b_threshold)
db_nt=read_ahmed_fit_parameters()
db_cur=read_ahmed_edge_current()
db_data=read_ahmed_matlab_file()
db_data_shotlist=[]
for i_shotind in range(len(db_data)):
db_data_shotlist.append(db_data[i_shotind]['shot'])
db_data_shotlist=np.asarray(db_data_shotlist)
db_data_timelist=[]
for i_shotind in range(len(db_data)):
db_data_timelist.append(db_data[i_shotind]['time2'])
db_data_timelist=np.asarray(db_data_timelist)
dependence_db={'Current':[],
'Pressure grad':[],
'Pressure grad own':[],
'Density grad':[],
'Density grad own':[],
'Temperature grad':[],
'Temperature grad own':[],
'Triangularity':[],
'Velocity ccf':[],
'Size max':[]}
dependence_db_err=copy.deepcopy(dependence_db)
ion_drift_velocity_db={'Drift vel':[],
'ExB vel':[],
'Error':[],
'Poloidal vel':[],
'Crossing psi':[],
}
greenwald_limit_db={'nG':[],
'ne maxgrad':[],
'Greenwald fraction':[],
'Velocity ccf':[],
'Size max':[],
'Elongation max':[],
'Str number':[],}
collisionality_db={'ei collision rate':[],
'Temperature':[],
'Collisionality':[],
'Velocity ccf':[],
'Size max':[],
'Elongation max':[],
'Str number':[],}
a_curvature=[]
a_curvature_error=[]
a_thin_wire=[]
a_thin_wire_error=[]
a_measurement=[]
a_measurement_error=[]
append_index=0
good_peak_indices=[]
lower_pol_vel=0.
plt.figure()
if not os.path.exists(wd+'/processed_data/'+result_filename+'.pickle') or recalc:
if not recalc and not wd+'/processed_data/'+result_filename+'.pickle':
print('Pickle file not found. Results will be recalculated!')
if plot_velocity:
matplotlib.use('agg')
pdf_velocities=PdfPages(wd+'/plots/velocity_results_for_ELMs.pdf')
plt.figure()
#Load and process the ELM database
database_file='/Users/mlampert/work/NSTX_workspace/db/ELM_findings_mlampert_velocity_good.csv'
db=pandas.read_csv(database_file, index_col=0)
elm_index=list(db.index)
for elm_ind in elm_index:
elm_time=db.loc[elm_ind]['ELM time']/1000.
shot=int(db.loc[elm_ind]['Shot'])
filename=flap_nstx.analysis.filename(exp_id=shot,
working_directory=wd+'/processed_data',
time_range=[elm_time-2e-3,elm_time+2e-3],
comment='ccf_velocity_pfit_o4_fst_0.0_ns_nv',
extension='pickle')
#grad.slice_data(slicing=time_slicing)
status=db.loc[elm_ind]['OK/NOT OK']
radial_velocity_status=db.loc[elm_ind]['Radial velocity peak']
radial_peak_status=db.loc[elm_ind]['Clear peak']
if status != 'NO' and radial_velocity_status != 'No':
velocity_results=pickle.load(open(filename, 'rb'))
velocity_results['Separatrix dist avg']=np.zeros(velocity_results['Position avg'].shape[0])
velocity_results['Separatrix dist max']=np.zeros(velocity_results['Position max'].shape[0])
R_sep=flap.get_data('NSTX_MDSPlus',
name='\EFIT01::\RBDRY',
exp_id=shot,
object_name='SEP R OBJ').slice_data(slicing={'Time':elm_time}).data
z_sep=flap.get_data('NSTX_MDSPlus',
name='\EFIT01::\ZBDRY',
exp_id=shot,
object_name='SEP Z OBJ').slice_data(slicing={'Time':elm_time}).data
sep_GPI_ind=np.where(np.logical_and(R_sep > coeff_r[2],
np.logical_and(z_sep > coeff_z[2],
z_sep < coeff_z[2]+79*coeff_z[0]+64*coeff_z[1])))
try:
sep_GPI_ind=np.asarray(sep_GPI_ind[0])
sep_GPI_ind=np.insert(sep_GPI_ind,0,sep_GPI_ind[0]-1)
sep_GPI_ind=np.insert(sep_GPI_ind,len(sep_GPI_ind),sep_GPI_ind[-1]+1)
z_sep_GPI=z_sep[(sep_GPI_ind)]
R_sep_GPI=R_sep[sep_GPI_ind]
GPI_z_vert=coeff_z[0]*np.arange(80)/80*64+coeff_z[1]*np.arange(80)+coeff_z[2]
R_sep_GPI_interp=np.interp(GPI_z_vert,np.flip(z_sep_GPI),np.flip(R_sep_GPI))
z_sep_GPI_interp=GPI_z_vert
for key in ['max','avg']:
for ind_time in range(len(velocity_results['Position '+key][:,0])):
velocity_results['Separatrix dist '+key][ind_time]=np.min(np.sqrt((velocity_results['Position '+key][ind_time,0]-R_sep_GPI_interp)**2 +
(velocity_results['Position '+key][ind_time,1]-z_sep_GPI_interp)**2))
ind_z_min=np.argmin(np.abs(z_sep_GPI-velocity_results['Position '+key][ind_time,1]))
if z_sep_GPI[ind_z_min] >= velocity_results['Position '+key][ind_time,1]:
ind1=ind_z_min
ind2=ind_z_min+1
else:
ind1=ind_z_min-1
ind2=ind_z_min
radial_distance=velocity_results['Position '+key][ind_time,0]-((velocity_results['Position '+key][ind_time,1]-z_sep_GPI[ind2])/(z_sep_GPI[ind1]-z_sep_GPI[ind2])*(R_sep_GPI[ind1]-R_sep_GPI[ind2])+R_sep_GPI[ind2])
if radial_distance < 0:
velocity_results['Separatrix dist '+key][ind_time]*=-1
except:
pass
det=coeff_r[0]*coeff_z[1]-coeff_z[0]*coeff_r[1]
for key in ['Velocity ccf','Velocity str max','Velocity str avg','Size max','Size avg']:
orig=copy.deepcopy(velocity_results[key])
velocity_results[key][:,0]=coeff_r_new/det*(coeff_z[1]*orig[:,0]-coeff_r[1]*orig[:,1])
velocity_results[key][:,1]=coeff_z_new/det*(-coeff_z[0]*orig[:,0]+coeff_r[0]*orig[:,1])
velocity_results['Elongation max'][:]=(velocity_results['Size max'][:,0]-velocity_results['Size max'][:,1])/(velocity_results['Size max'][:,0]+velocity_results['Size max'][:,1])
velocity_results['Elongation avg'][:]=(velocity_results['Size avg'][:,0]-velocity_results['Size avg'][:,1])/(velocity_results['Size avg'][:,0]+velocity_results['Size avg'][:,1])
velocity_results['Velocity ccf'][np.where(velocity_results['Correlation max'] < correlation_threshold),:]=[np.nan,np.nan]
#THIS NEEDS REVISION AS THE DATA IS TOO NOISY FOR DIFFERENTIAL CALCULATION
velocity_results['Acceleration ccf']=copy.deepcopy(velocity_results['Velocity ccf'])
velocity_results['Acceleration ccf'][1:,0]=(velocity_results['Velocity ccf'][1:,0]-velocity_results['Velocity ccf'][0:-1,0])/sampling_time
velocity_results['Acceleration ccf'][1:,1]=(velocity_results['Velocity ccf'][1:,1]-velocity_results['Velocity ccf'][0:-1,1])/sampling_time
time=velocity_results['Time']
elm_time_interval_ind=np.where(np.logical_and(time >= elm_time-elm_duration,
time <= elm_time+elm_duration))
elm_time=(time[elm_time_interval_ind])[np.argmin(velocity_results['Frame similarity'][elm_time_interval_ind])]
elm_time_ind=int(np.argmin(np.abs(time-elm_time)))
print(time[0], elm_time)
if elm_time_base == 'radial velocity':
ind_notnan=np.logical_not(np.isnan(velocity_results['Velocity ccf'][elm_time_ind-40:elm_time_ind+40,0]))
elm_time=(time[elm_time_ind-40:elm_time_ind+40][ind_notnan])[np.argmax(velocity_results['Velocity ccf'][elm_time_ind-40:elm_time_ind+40,0][ind_notnan])]
elm_time_ind=int(np.argmin(np.abs(time-elm_time)))
elif elm_time_base == 'radial acceleration':
ind_notnan=np.logical_not(np.isnan(velocity_results['Acceleration ccf'][elm_time_ind-40:elm_time_ind+40,0]))
elm_time=(time[elm_time_ind-40:elm_time_ind+40][ind_notnan])[np.argmax(velocity_results['Acceleration ccf'][elm_time_ind-40:elm_time_ind+40,0][ind_notnan])]
elm_time_ind=int(np.argmin(np.abs(time-elm_time)))
else:
pass
shot_inds=np.where(db_nt['shot'] == shot)
ind_db=tuple(shot_inds[0][np.where(np.abs(db_nt['time2'][shot_inds]/1000.-elm_time) == np.min(np.abs(db_nt['time2'][shot_inds]/1000.-elm_time)))])
shot_inds_2=np.where(db_data_shotlist == shot)
ind_db_2=(shot_inds_2[0][np.where(np.abs(db_data_timelist[shot_inds_2]/1000.-elm_time) == np.min(np.abs(db_data_timelist[shot_inds_2]/1000.-elm_time)))])
n_e=db_nt['Density']['value_at_max_grad'][ind_db]*1e20 #Conversion to 1/m3
ind_error_ne=np.where(np.logical_and(db_data[ind_db_2[0]]['psi_n'] < 1.1,
db_data[ind_db_2[0]]['psi_n'] > 0.7))
n_e_error=np.mean(db_data[ind_db_2[0]]['n_e_err_psi'][ind_error_ne])
T_e=db_nt['Temperature']['value_at_max_grad'][ind_db]*1e3*1.16e4 #Conversion to K
ind_error_te=np.where(np.logical_and(db_data[ind_db_2[0]]['psi_t'] < 1.1,
db_data[ind_db_2[0]]['psi_t'] > 0.7))
T_e_error=np.mean(db_data[ind_db_2[0]]['t_e_err_psi'][ind_error_te])
j_edge=db_cur['Current'][ind_db]*1e6
j_edge_error=j_edge*0.10 #Suspected fitting error of the edge current.
psi_n_e=db_data[ind_db_2[0]]['psi_n']
dev_n_e=db_data[ind_db_2[0]]['dev_n']
a_param=db_nt['Density']['a'][ind_db]
b_param=db_nt['Density']['b'][ind_db]
c_param=db_nt['Density']['c'][ind_db]
h_param=db_nt['Density']['h'][ind_db]
x0_param=db_nt['Density']['xo'][ind_db]
max_n_e_grad_psi=root(mtanh_dxdx_function, x0_param, args=(a_param,b_param,c_param,h_param,x0_param), method='hybr')
sep_inner_dist_max_grad=np.interp(max_n_e_grad_psi.x, np.asarray(psi_n_e)[:,0], np.asarray(dev_n_e)[:,0])
sep_inner_dist_max_grad=np.interp(x0_param, np.asarray(psi_n_e)[:,0], np.asarray(dev_n_e)[:,0])
# plt.plot(dev_n_e[:,0], db_data[ind_db_2[0]]['n_e_dev'])
# plt.pause(1.0)
if sep_inner_dist_max_grad > 0.1:
sep_inner_dist_max_grad=np.nan
n_i=n_e #Quasi neutrality
n_i_error=n_e_error
R=velocity_results['Position max'][elm_time_ind,0]
R_error=3.75e-3
c_s2=gamma*Z*k_B*(T_e)/m_i
delta_b=np.mean(velocity_results['Size max'][elm_time_ind-4:elm_time_ind+1,0])
delta_b_error=10e-3
"""
HIJACKING INFO FOR DEPENDENCE CALCULATION
"""
if calculate_dependence:
dependence_db['Velocity ccf'].append(velocity_results['Velocity ccf'][elm_time_ind,:])
dependence_db_err['Velocity ccf'].append(np.asarray([3.75e-3/2.5e-6,3.75e-3/2.5e-6]))
dependence_db['Size max'].append(velocity_results['Size max'][elm_time_ind,:])
dependence_db_err['Size max'].append([delta_b_error,delta_b_error])
dependence_db['Current'].append(j_edge)
dependence_db_err['Current'].append(j_edge*0.1)
for key in ['Density','Temperature', 'Pressure']:
a_param=db_nt[key]['a'][ind_db]
b_param=db_nt[key]['b'][ind_db]
c_param=db_nt[key]['c'][ind_db]
h_param=db_nt[key]['h'][ind_db]
x0_param=db_nt[key]['xo'][ind_db]
if key== 'Density':
profile_bl=[True,False,False]
elif key == 'Temperature':
profile_bl=[False,True,False]
elif key == 'Pressure':
profile_bl=[False,False,True]
thomson_profiles=get_fit_nstx_thomson_profiles(exp_id=shot,
density=profile_bl[0],
temperature=profile_bl[1],
pressure=profile_bl[2],
flux_coordinates=True,
return_parameters=True)
time_ind=np.argmin(np.abs(thomson_profiles['Time']-elm_time))
dependence_db[key+' grad'].append(max(mtanh_dx_function(np.arange(0,1.4,0.01),a_param,b_param,c_param,h_param,x0_param)))
dependence_db_err[key+' grad'].append(thomson_profiles['Error']['Max gradient'][time_ind])
"""
END OF HIJACKING
"""
try:
if velocity_results['Position max'][elm_time_ind,0] != 0.:
b_pol=flap.get_data('NSTX_MDSPlus',
name='\EFIT02::\BZZ0',
exp_id=shot,
object_name='BZZ0').slice_data(slicing={'Time':elm_time,
'Device R':velocity_results['Position max'][elm_time_ind,0]}).data
except:
pass
try:
if velocity_results['Position max'][elm_time_ind,0] != 0.:
b_tor=flap.get_data('NSTX_MDSPlus',
name='\EFIT02::\BTZ0',
exp_id=shot,
object_name='BTZ0').slice_data(slicing={'Time':elm_time,
'Device R':velocity_results['Position max'][elm_time_ind,0]}).data
except:
pass
try:
if velocity_results['Position max'][elm_time_ind,0] != 0.:
b_rad=flap.get_data('NSTX_MDSPlus',
name='\EFIT02::\BRZ0',
exp_id=shot,
object_name='BRZ0').slice_data(slicing={'Time':elm_time,
'Device R':velocity_results['Position max'][elm_time_ind,0]}).data
except:
pass
B=np.sqrt(b_pol**2+b_tor**2+b_rad**2)
omega_i=q_e*B/m_i
"""
HIJACKING FOR ION DIAMAGNETIC DRIFT VELOCITY CALCULATION
"""
if calculate_ion_drift_velocity:
d=flap_nstx_thomson_data(exp_id=shot, pressure=True, add_flux_coordinates=True, output_name='pressure')
time_index=np.argmin(np.abs(d.coordinate('Time')[0][0,:]-elm_time))
dpsi_per_dr=((d.coordinate('Device R')[0][0:-1,0]-d.coordinate('Device R')[0][1:,0])/(d.coordinate('Flux r')[0][0:-1,time_index]-d.coordinate('Flux r')[0][1:,time_index]))[-10:]
a_param=db_nt['Pressure']['a'][ind_db]
b_param=db_nt['Pressure']['b'][ind_db]
c_param=db_nt['Pressure']['c'][ind_db]
h_param=db_nt['Pressure']['h'][ind_db]
x0_param=db_nt['Pressure']['xo'][ind_db]
psi_prof=d.coordinate('Flux r')[0][-10:,time_index]
grad_p=mtanh_dx_function(psi_prof,a_param,b_param,c_param,h_param,x0_param)*dpsi_per_dr
a_param=db_nt['Density']['a'][ind_db]
b_param=db_nt['Density']['b'][ind_db]
c_param=db_nt['Density']['c'][ind_db]
h_param=db_nt['Density']['h'][ind_db]
x0_param=db_nt['Density']['xo'][ind_db]
n_i_profile=mtanh_function(psi_prof,a_param,b_param,c_param,h_param,x0_param)*dpsi_per_dr*1e20
poloidal_velocity=velocity_results['Velocity ccf'][elm_time_ind,1]
drift_velocity=-grad_p /(q_e * n_i_profile * B)
if -poloidal_velocity/1e3 < max(drift_velocity) and -poloidal_velocity > 0:
max_ind=np.argmax(drift_velocity)
drift_velocity_trunk=drift_velocity[max_ind:]
sort_ind=np.argsort(drift_velocity_trunk)
psi_crossing= | np.interp(-poloidal_velocity/1e3, drift_velocity_trunk[sort_ind], psi_prof[max_ind:][sort_ind]) | numpy.interp |
import pdb
import numpy as np
import os
import tensorflow as tf
import math
from .data_utils import minibatches, pad_sequences, get_chunks
from .general_utils import Progbar
from .base_model import BaseModel
class NERModel(BaseModel):
"""Specialized class of Model for NER"""
def __init__(self, config):
super(NERModel, self).__init__(config)
self.idx_to_tag = {idx: tag for tag, idx in
self.config.vocab_tags.items()}
self.tag_to_idx = {tag: idx for tag, idx in
self.config.vocab_tags.items()}
def add_placeholders(self):
"""Define placeholders = entries to computational graph"""
# shape = (batch size, max length of sentence in batch)
self.word_ids = tf.placeholder(tf.int32, shape=[None, None],
name="word_ids")
# shape = (batch size)
self.sequence_lengths = tf.placeholder(tf.int32, shape=[None],
name="sequence_lengths")
# shape = (batch size, max length of sentence, max length of word)
self.char_ids = tf.placeholder(tf.int32, shape=[None, None, None],
name="char_ids")
# shape = (batch_size, max_length of sentence)
self.word_lengths = tf.placeholder(tf.int32, shape=[None, None],
name="word_lengths")
# shape = (batch size, max length of sentence in batch)
self.labels = tf.placeholder(tf.int32, shape=[None, None],
name="labels")
# hyper parameters
self.dropout = tf.placeholder(dtype=tf.float32, shape=[],
name="dropout")
self.lr = tf.placeholder(dtype=tf.float32, shape=[],
name="lr")
def get_feed_dict(self, words, labels=None, lr=None, dropout=None):
"""Given some data, pad it and build a feed dictionary
Args:
words: list of sentences. A sentence is a list of ids of a list of
words. A word is a list of ids
labels: list of ids
lr: (float) learning rate
dropout: (float) keep prob
Returns:
dict {placeholder: value}
"""
# perform padding of the given data
if self.config.use_chars:
char_ids, word_ids = zip(*words)
word_ids, sequence_lengths = pad_sequences(word_ids, 0)
char_ids, word_lengths = pad_sequences(char_ids, pad_tok=0,
nlevels=2)
else:
word_ids, sequence_lengths = pad_sequences(words, 0)
# build feed dictionary
feed = {
self.word_ids: word_ids,
self.sequence_lengths: sequence_lengths
}
if self.config.use_chars:
feed[self.char_ids] = char_ids
feed[self.word_lengths] = word_lengths
if labels is not None:
labels, _ = pad_sequences(labels, 0)
feed[self.labels] = labels
if lr is not None:
feed[self.lr] = lr
if dropout is not None:
feed[self.dropout] = dropout
return feed, sequence_lengths
def add_word_embeddings_op(self):
"""Defines self.word_embeddings
If self.config.embeddings is not None and is a np array initialized
with pre-trained word vectors, the word embeddings is just a look-up
and we don't train the vectors. Otherwise, a random matrix with
the correct shape is initialized.
"""
with tf.variable_scope("words"):
if self.config.embeddings is None:
self.logger.info("WARNING: randomly initializing word vectors")
_word_embeddings = tf.get_variable(
name="_word_embeddings",
dtype=tf.float32,
shape=[self.config.nwords, self.config.dim_word])
else:
_word_embeddings = tf.Variable(
self.config.embeddings,
name="_word_embeddings",
dtype=tf.float32,
trainable=self.config.train_embeddings)
word_embeddings = tf.nn.embedding_lookup(_word_embeddings,
self.word_ids, name="word_embeddings")
with tf.variable_scope("chars"):
if self.config.use_chars:
# get char embeddings matrix
_char_embeddings = tf.get_variable(
name="_char_embeddings",
dtype=tf.float32,
shape=[self.config.nchars, self.config.dim_char])
char_embeddings = tf.nn.embedding_lookup(_char_embeddings,
self.char_ids, name="char_embeddings")
# put the time dimension on axis=1
s = tf.shape(char_embeddings)
char_embeddings = tf.reshape(char_embeddings,
shape=[s[0]*s[1], s[-2], self.config.dim_char])
word_lengths = tf.reshape(self.word_lengths, shape=[s[0]*s[1]])
# bi lstm on chars
cell_fw = tf.contrib.rnn.LSTMCell(self.config.hidden_size_char,
state_is_tuple=True)
cell_bw = tf.contrib.rnn.LSTMCell(self.config.hidden_size_char,
state_is_tuple=True)
_output = tf.nn.bidirectional_dynamic_rnn(
cell_fw, cell_bw, char_embeddings,
sequence_length=word_lengths, dtype=tf.float32)
# read and concat output
_, ((_, output_fw), (_, output_bw)) = _output
output = tf.concat([output_fw, output_bw], axis=-1)
# shape = (batch size, max sentence length, char hidden size)
output = tf.reshape(output,
shape=[s[0], s[1], 2*self.config.hidden_size_char])
word_embeddings = tf.concat([word_embeddings, output], axis=-1)
self.word_embeddings = tf.nn.dropout(word_embeddings, self.dropout)
def add_logits_op(self):
"""Defines self.logits
For each word in each sentence of the batch, it corresponds to a vector
of scores, of dimension equal to the number of tags.
"""
with tf.variable_scope("bi-lstm"):
cell_fw = tf.contrib.rnn.LSTMCell(self.config.hidden_size_lstm)
cell_bw = tf.contrib.rnn.LSTMCell(self.config.hidden_size_lstm)
(output_fw, output_bw), _ = tf.nn.bidirectional_dynamic_rnn(
cell_fw, cell_bw, self.word_embeddings,
sequence_length=self.sequence_lengths, dtype=tf.float32)
output = tf.concat([output_fw, output_bw], axis=-1)
output = tf.nn.dropout(output, self.dropout)
with tf.variable_scope("proj"):
W = tf.get_variable("W", dtype=tf.float32,
shape=[2*self.config.hidden_size_lstm, self.config.ntags])
b = tf.get_variable("b", shape=[self.config.ntags],
dtype=tf.float32, initializer=tf.zeros_initializer())
nsteps = tf.shape(output)[1]
output = tf.reshape(output, [-1, 2*self.config.hidden_size_lstm])
pred = tf.matmul(output, W) + b
self.logits = tf.reshape(pred, [-1, nsteps, self.config.ntags])
def add_pred_op(self):
"""Defines self.labels_pred
This op is defined only in the case where we don't use a CRF since in
that case we can make the prediction "in the graph" (thanks to tf
functions in other words). With theCRF, as the inference is coded
in python and not in pure tensroflow, we have to make the prediciton
outside the graph.
"""
if not self.config.use_crf:
self.labels_pred = tf.cast(tf.argmax(self.logits, axis=-1),
tf.int32)
def add_loss_op(self):
"""Defines the loss"""
if self.config.use_crf:
log_likelihood, trans_params = tf.contrib.crf.crf_log_likelihood(
self.logits, self.labels, self.sequence_lengths)
self.trans_params = trans_params # need to evaluate it for decoding
self.loss = tf.reduce_mean(-log_likelihood)
else:
losses = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=self.logits, labels=self.labels)
mask = tf.sequence_mask(self.sequence_lengths)
losses = tf.boolean_mask(losses, mask)
self.loss = tf.reduce_mean(losses)
# for tensorboard
tf.summary.scalar("loss", self.loss)
def build(self):
# NER specific functions
self.add_placeholders()
self.add_word_embeddings_op()
self.add_logits_op()
self.add_pred_op()
self.add_loss_op()
# Generic functions that add training op and initialize session
self.add_train_op(self.config.lr_method, self.lr, self.loss,
self.config.clip)
self.initialize_session() # now self.sess is defined and vars are init
def predict_batch(self, words):
"""
Args:
words: list of sentences
Returns:
labels_pred: list of labels for each sentence
sequence_length
"""
fd, sequence_lengths = self.get_feed_dict(words, dropout=1.0)
if self.config.use_crf:
# get tag scores and transition params of CRF
viterbi_sequences = []
scores = []
logits, trans_params = self.sess.run(
[self.logits, self.trans_params], feed_dict=fd)
#logits = sigmoid_v(logits)
#trans_params = sigmoid_v(trans_params)
# iterate over the sentences because no batching in vitervi_decode
for logit, sequence_length in zip(logits, sequence_lengths):
logit = logit[:sequence_length] # keep only the valid steps
#print("Logit ", logit)
viterbi_seq, viterbi_score = tf.contrib.crf.viterbi_decode(
logit, trans_params)
viterbi_sequences += [viterbi_seq]
#print('trans_params ', trans_params)
#print('Sequence ', viterbi_seq)
#print(sequence_length)
#print(len(viterbi_seq))
#print('Score ', viterbi_score)#Use to decide least-uncertainty
if self.config.active_strategy=="nus":
viterbi_score = float(viterbi_score/sequence_length)
else:
viterbi_score = active_strategy(logit, trans_params, self.config.active_strategy, self.tag_to_idx)
scores.append(viterbi_score)
return viterbi_sequences, sequence_lengths, scores
else:
labels_pred = self.sess.run(self.labels_pred, feed_dict=fd)
return labels_pred, sequence_lengths, None
def run_epoch(self, train, dev, epoch):
"""Performs one complete pass over the train set and evaluate on dev
Args:
train: dataset that yields tuple of sentences, tags
dev: dataset
epoch: (int) index of the current epoch
Returns:
f1: (python float), score to select model on, higher is better
"""
# progbar stuff for logging
batch_size = self.config.batch_size
nbatches = (len(train) + batch_size - 1) // batch_size
#prog = Progbar(target=nbatches)
# iterate over dataset
for i, (words, labels) in enumerate(minibatches(train, batch_size)):
#print(words, labels)
fd, _ = self.get_feed_dict(words, labels, self.config.lr,
self.config.dropout)
_, train_loss, summary = self.sess.run(
[self.train_op, self.loss, self.merged], feed_dict=fd)
#prog.update(i + 1, [("train loss", train_loss)])
# tensorboard
if i % 10 == 0:
self.file_writer.add_summary(summary, epoch*nbatches + i)
metrics = self.run_evaluate(dev)
msg = "Accuracy " + str(metrics["acc"]) + " - F1 " + str(metrics["f1"])
#msg = " - ".join(["{} {:04.2f}".format(k, v)
# for k, v in metrics.items()])
print(msg)
self.logger.info(msg)
return metrics["f1"]
def run_evaluate(self, test, mode="train"):
"""Evaluates performance on test set
Args:
test: dataset that yields tuple of (sentences, tags)
Returns:
metrics: (dict) metrics["acc"] = 98.4, ...
"""
accs = []
l = []
#correct_preds_ne, total_correct_ne, total_preds_ne = 0.,0.,0.
s= ""
correct_preds, total_correct, total_preds = 0., 0., 0.
for words, labels in minibatches(test, self.config.batch_size):
#print(words,labels)
labels_pred, sequence_lengths, prob = self.predict_batch(words)
#pdb.set_trace()
#l.append((list(words),prob)) #list of words, list of scores corresponding
#l += prob
#print('labels_pred ', labels_pred)
if 'test' in mode:
for lab, pred in zip(labels, labels_pred):
#print('lab',lab)
#print('pred',pred)
for i,j in zip(lab,pred):
s+=self.idx_to_tag[i] + '\t' + self.idx_to_tag[j] + '\n'
s+='\n'
for lab, lab_pred, length in zip(labels, labels_pred,
sequence_lengths):
lab = lab[:length]
lab_pred = lab_pred[:length]
accs += [a==b for (a, b) in zip(lab, lab_pred)]
lab_chunks = set(get_chunks(lab, self.config.vocab_tags))
lab_pred_chunks = set(get_chunks(lab_pred,
self.config.vocab_tags))
correct_preds += len(lab_chunks & lab_pred_chunks)
total_preds += len(lab_pred_chunks)
total_correct += len(lab_chunks)
#print("Total Preds ", total_preds)
#print("Total correct ", total_correct)
#print("Correct preds ", correct_preds)
p = correct_preds / total_preds if correct_preds > 0 else 0
r = correct_preds / total_correct if correct_preds > 0 else 0
f1 = 2 * p * r / (p + r) if correct_preds > 0 else 0
acc = np.mean(accs)
if "test" in mode:
f = open(self.config.file_out + "_" + mode,'w')
f.write(s)
f.close()
#Sort l to get most/least uncertain
#l2 = sorted(l)
#mu = []
#lu = []
#for i in range(0,self.config.num_query):
# mu.append(l.index(l2[i]))
# lu.append(l.index(l2[len(l2)-i-1]))
#l = sorted(l, key=lambda pr: pr[2])
#pdb.set_trace()
#print("l",l)
#return acc, f1, list of most uncertainty and list of least uncertainty examples
#return {"acc": 100*acc, "f1": 100*f1, "out":l}
return {"acc": 100*acc, "f1": 100*f1}
#return {"acc": 100*acc, "f1": 100*f1, "mu": l[0:self.config.num_query], "lu": l[len(l)-self.config.num_query: len(l)]}
def predict(self, words_raw):
"""Returns list of tags
Args:
words_raw: list of words (string), just one sentence (no batch)
Returns:
preds: list of tags (string), one for each word in the sentence
"""
#words = [self.config.processing_word(w) for w in words_raw] #this is used for word raw
#print(words)
words = words_raw
words_o = list(words)
#print(words_o)
if type(words[0]) == tuple:
words = zip(*words)
#print(words)
pred_ids, _, scores = self.predict_batch([words])
#print("Prediction: ")
#print(pred_ids, _, scores)
preds = [self.idx_to_tag[idx] for idx in list(pred_ids[0])]
return (words_o, scores)
#return preds
def active_strategy(score, transition_params, active_strategy, tag_to_idx):
"""
Args: output of CRF
score: A [seq_len, num_tags] matrix of unary potentials.
transition_params: A [num_tags, num_tags] matrix of binary potentials.
"""
if active_strategy=="cluster":
return score
trellis = np.zeros_like(score)
backpointers = np.zeros_like(score, dtype=np.int32)
trellis[0] = score[0]
for t in range(1, score.shape[0]):
v = np.expand_dims(trellis[t - 1], 1) + transition_params
trellis[t] = score[t] + np.max(v, 0)
backpointers[t] = np.argmax(v, 0)
viterbi = [np.argmax(trellis[-1])]
for bp in reversed(backpointers[1:]):
viterbi.append(bp[viterbi[-1]])
viterbi.reverse()
score_final = np.max(trellis[-1]) #Score of sequences (higher = better)
if (active_strategy=='mg'):
top_scores = trellis[-1][np.argsort(trellis[-1])[-2:]]
margin = abs(top_scores[0]-top_scores[1])
score_final = margin
elif (active_strategy=='ne'):
#print("Calling ne strategy")
#print("score", score)
#tag_to_idx = {tag: indx for tag, indx in self.config.vocab_tags.items()}
ne = ['NE.AMBIG','NE.DE', 'NE.LANG3', 'NE.MIXED', 'NE.OTHER','NE.TR']
ne_idx = []
for i in tag_to_idx:
if i in ne:
ne_idx.append(tag_to_idx[i])
#print('ne_idx ', ne_idx)
#print('score ', score)
#Get the highest score of NE
max_ne = []
#for i in ne_idx:
# max_ne.append(np.max(score[:,i]))
score_final = 0
for i in viterbi:
if i in ne_idx:
score_final+=1 #give higher score to sequences that have more named entities
#score_final = np.max(max_ne)
elif (active_strategy=='nemg'): #ne margin
ne_idx = tag_to_idx['NE.DE']
ne_de = tag_to_idx['DE']
margin = np.add(score[:,ne_idx],score[:,ne_de])
margin2 = abs(np.multiply(score[:,ne_idx],score[:,ne_de]))
margin = np.divide(margin, margin2)
sum_margin = np.sum(margin)
score_final = sum_margin
if (active_strategy=='entropy'):
#Find the highest prob for each token
ntoken = len(score)
ntags = len(score[0])
l = [] #max prob of each token
for i in range(0,ntoken):
l.append(np.max(score[i]))
ne_idx = tag_to_idx
#Compute entropy
score_final = 0.0
for i in range(0,ntoken):
score_final+=l[i]* | np.log(l[i]) | numpy.log |
# pre/test_shift_scale.py
"""Tests for rom_operator_inference.pre._shift_scale.py."""
import os
import h5py
import pytest
import itertools
import numpy as np
import rom_operator_inference as opinf
# Data preprocessing: shifting and MinMax scaling / unscaling =================
def test_shift(set_up_basis_data):
"""Test pre._shift_scale.shift()."""
X = set_up_basis_data
# Try with bad data shape.
with pytest.raises(ValueError) as ex:
opinf.pre.shift(np.random.random((3,3,3)))
assert ex.value.args[0] == "data X must be two-dimensional"
# Try with bad shift vector.
with pytest.raises(ValueError) as ex:
opinf.pre.shift(X, X)
assert ex.value.args[0] == "shift_by must be one-dimensional"
# Correct usage.
Xshifted, xbar = opinf.pre.shift(X)
assert xbar.shape == (X.shape[0],)
assert Xshifted.shape == X.shape
assert np.allclose(np.mean(Xshifted, axis=1), np.zeros(X.shape[0]))
for j in range(X.shape[1]):
assert np.allclose(Xshifted[:,j], X[:,j] - xbar)
Y = np.random.random(X.shape)
Yshifted = opinf.pre.shift(Y, xbar)
for j in range(Y.shape[1]):
assert np.allclose(Yshifted[:,j], Y[:,j] - xbar)
# Verify inverse shifting.
assert np.allclose(X, opinf.pre.shift(Xshifted, -xbar))
def test_scale(set_up_basis_data):
"""Test pre._shift_scale.scale()."""
X = set_up_basis_data
# Try with bad scales.
with pytest.raises(ValueError) as ex:
opinf.pre.scale(X, (1,2,3), (4,5))
assert ex.value.args[0] == "scale_to must have exactly 2 elements"
with pytest.raises(ValueError) as ex:
opinf.pre.scale(X, (1,2), (3,4,5))
assert ex.value.args[0] == "scale_from must have exactly 2 elements"
# Scale X to [-1,1] and then scale Y with the same transformation.
Xscaled, scaled_to, scaled_from = opinf.pre.scale(X, (-1,1))
assert Xscaled.shape == X.shape
assert scaled_to == (-1,1)
assert isinstance(scaled_from, tuple)
assert len(scaled_from) == 2
assert round(scaled_from[0],8) == round(X.min(),8)
assert round(scaled_from[1],8) == round(X.max(),8)
assert round(Xscaled.min(),8) == -1
assert round(Xscaled.max(),8) == 1
# Verify inverse scaling.
assert np.allclose(opinf.pre.scale(Xscaled, scaled_from, scaled_to), X)
# Transformer classes for centering and scaling ===============================
class TestSnapshotTransformer:
"""Test pre.SnapshotTransformer."""
def test_init(self):
"""Test pre.SnapshotTransformer.__init__()."""
st = opinf.pre.SnapshotTransformer()
for attr in ["scaling", "center", "verbose"]:
assert hasattr(st, attr)
# Properties --------------------------------------------------------------
def test_properties(self):
"""Test pre.SnapshotTransformer properties (attribute protection)."""
st = opinf.pre.SnapshotTransformer()
# Test center.
with pytest.raises(TypeError) as ex:
st.center = "nope"
assert ex.value.args[0] == "'center' must be True or False"
st.center = True
st.center = False
# Test scale.
with pytest.raises(ValueError) as ex:
st.scaling = "minimaxii"
assert ex.value.args[0].startswith("invalid scaling 'minimaxii'")
with pytest.raises(TypeError) as ex:
st.scaling = [2, 1]
assert ex.value.args[0] == "'scaling' must be of type 'str'"
for s in st._VALID_SCALINGS:
st.scaling = s
st.scaling = None
def test_eq(self, n=200):
"""Test pre.SnapshotTransformer.__eq__()."""
µ = np.random.randint(0, 100, (n,))
a, b = 10, -3
# Null transformers.
st1 = opinf.pre.SnapshotTransformer()
st2 = opinf.pre.SnapshotTransformer()
assert st1 == st2
assert st1 != 100
# Mismatched attributes.
st1.center = True
st2.center = False
assert not (st1 == st2)
assert st1 != st2
# Centering attributes.
st1.mean_ = µ
st2.center = True
assert st1 != st2
st2.mean_ = µ
assert st1 == st2
st2.mean_ = µ - 5
assert st1 != st2
# Scaling attributes.
st1.scaling = "standard"
st2.scaling = None
assert st1 != st2
st2.scaling = "minmax"
assert st1 != st2
st2.scaling = "standard"
assert st1 == st2
st1.scale_, st1.shift_ = a, b
assert st1 != st2
st2.scale_, st2.shift_ = a - 1, b + 1
assert st1 != st2
st2.scale_, st2.shift_ = a, b
assert st1 == st2
# Printing ----------------------------------------------------------------
def test_str(self):
"""Test pre.SnapshotTransformer.__str__()."""
st = opinf.pre.SnapshotTransformer()
st.center = False
st.scaling = None
assert str(st) == "Snapshot transformer"
st.center = True
trn = "(call fit_transform() to train)"
msc = "Snapshot transformer with mean-snapshot centering"
assert str(st) == f"{msc} {trn}"
for s in st._VALID_SCALINGS:
st.scaling = s
assert str(st) == f"{msc} and '{s}' scaling {trn}"
st.center = False
for s in st._VALID_SCALINGS:
st.scaling = s
assert str(st) == f"Snapshot transformer with '{s}' scaling {trn}"
def test_statistics_report(self):
"""Test pre.SnapshotTransformer._statistics_report()."""
X = | np.arange(10) | numpy.arange |
import numpy as np
from abc import ABC, abstractmethod
from pathlib import Path
import subprocess
import numpy.ma as ma
import scipy.constants as const
from multiprocessing import Pool
from scipy.interpolate import interp1d
from dans_pymodules import Vector2D
import matplotlib.pyplot as plt
# from scipy import meshgrid
from scipy.special import iv as bessel1
from scipy.optimize import root
# import pickle
# import scipy.constants as const
# import numpy as np
# import platform
# import matplotlib.pyplot as plt
# import gc
import datetime
import time
import copy
import os
import sys
import shutil
from matplotlib.patches import Arc as Arc
load_previous = False
# Check if we can connect to a display, if not disable all plotting and windowed stuff (like gmsh)
# TODO: This does not remotely cover all cases!
if "DISPLAY" in os.environ.keys():
x11disp = True
else:
x11disp = False
# --- Try importing BEMPP
HAVE_BEMPP = False
try:
import bempp.api
from bempp.api.shapes.shapes import __generate_grid_from_geo_string as generate_from_string
HAVE_BEMPP = True
except ImportError:
print("Couldn't import BEMPP, no meshing or BEM field calculation will be possible.")
bempp = None
generate_from_string = None
# --- Try importing mpi4py, if it fails, we fall back to single processor
try:
from mpi4py import MPI
COMM = MPI.COMM_WORLD
RANK = COMM.Get_rank()
SIZE = COMM.Get_size()
HOST = MPI.Get_processor_name()
print("Process {} of {} on host {} started!".format(RANK + 1, SIZE, HOST))
sys.stdout.flush()
except ImportError:
MPI = None
COMM = None
RANK = 0
SIZE = 1
import socket
HOST = socket.gethostname()
print("Could not import mpi4py, falling back to single core (and python multiprocessing in some instances)!")
# --- Try importing pythonocc-core
HAVE_OCC = False
try:
from OCC.Extend.DataExchange import read_stl_file
from OCC.Display.SimpleGui import init_display
from OCC.Core.BRepPrimAPI import BRepPrimAPI_MakeBox, BRepPrimAPI_MakeTorus, BRepPrimAPI_MakeSweep
from OCC.Core.BRepTools import breptools_Write
from OCC.Core.BRepBndLib import brepbndlib_Add
from OCC.Core.Bnd import Bnd_Box
from OCC.Core.gp import gp_Pnt, gp_Pnt2d
from OCC.Core.BRepClass3d import BRepClass3d_SolidClassifier
from OCC.Core.TopAbs import TopAbs_ON, TopAbs_OUT, TopAbs_IN
from OCC.Core.GeomAPI import GeomAPI_Interpolate, GeomAPI_PointsToBSpline
from OCC.Core.Geom import Geom_BSplineCurve
from OCC.Core.Geom2d import Geom2d_BSplineCurve
from OCC.Core.TColgp import TColgp_HArray1OfPnt, TColgp_Array1OfPnt
from OCC.Core.TColStd import TColStd_Array1OfInteger, TColStd_Array1OfReal
from OCC.Core.GeomAbs import GeomAbs_C1, GeomAbs_C2, GeomAbs_G1
from OCC.Core.Geom2dAPI import Geom2dAPI_Interpolate, Geom2dAPI_PointsToBSpline
from OCC.Core.TColgp import TColgp_HArray1OfPnt2d, TColgp_Array1OfPnt2d
from OCCUtils.Common import *
from py_electrodes import ElectrodeObject
HAVE_OCC = True
except ImportError:
ElectrodeObject = None
print("Something went wrong during OCC import. No CAD support possible!")
USE_MULTIPROC = True # In case we are not using mpi or only using 1 processor, fall back on multiprocessing
GMSH_EXE = "/home/daniel/src/gmsh-4.0.6-Linux64/bin/gmsh"
# GMSH_EXE = "E:/gmsh4/gmsh.exe"
HAVE_TEMP_FOLDER = False
np.set_printoptions(threshold=10000)
HAVE_GMSH = True
# Quick test if gmsh path is correct
if not Path(GMSH_EXE).is_file():
print("Gmsh path seems to be wrong! No meshing will be possible!")
HAVE_GMSH = False
# For now, everything involving the pymodules with be done on master proc (RANK 0)
if RANK == 0:
from dans_pymodules import *
colors = MyColors()
else:
colors = None
decimals = 12
__author__ = "<NAME>, <NAME>"
__doc__ = """Calculate RFQ fields from loaded cell parameters"""
# Initialize some global constants
amu = const.value("atomic mass constant energy equivalent in MeV")
echarge = const.value("elementary charge")
clight = const.value("speed of light in vacuum")
# Define the axis directions and vane rotations:
X = 0
Y = 1
Z = 2
XYZ = range(3)
AXES = {"X": 0, "Y": 1, "Z": 2}
rot_map = {"yp": 0.0,
"ym": 180.0,
"xp": 270.0,
"xm": 90.0}
class Polygon2D(object):
"""
Simple class to handle polygon operations such as point in polygon or
orientation of rotation (cw or ccw), area, etc.
"""
def add_point(self, p=None):
"""
Append a point to the polygon
"""
if p is not None:
if isinstance(p, tuple) and len(p) == 2:
self.poly.append(p)
else:
print("Error in add_point of Polygon: p is not a 2-tuple!")
else:
print("Error in add_point of Polygon: No p given!")
return 0
def add_polygon(self, poly=None):
"""
Append a polygon object to the end of this polygon
"""
if poly is not None:
if isinstance(poly, Polygon2D):
self.poly.extend(poly.poly)
# if isinstance(poly.poly, list) and len(poly.poly) > 0:
#
# if isinstance(poly.poly[0], tuple) and len(poly.poly[0]) == 2:
# self.poly.extend(poly.poly)
return 0
def area(self):
"""
Calculates the area of the polygon. only works if there are no crossings
Taken from http://paulbourke.net, algorithm written by <NAME>, 1998
If area is positive -> polygon is given clockwise
If area is negative -> polygon is given counter clockwise
"""
area = 0
poly = self.poly
npts = len(poly)
j = npts - 1
i = 0
for _ in poly:
p1 = poly[i]
p2 = poly[j]
area += (p1[0] * p2[1])
area -= p1[1] * p2[0]
j = i
i += 1
area /= 2
return area
def centroid(self):
"""
Calculate the centroid of the polygon
Taken from http://paulbourke.net, algorithm written by <NAME>, 1998
"""
poly = self.poly
npts = len(poly)
x = 0
y = 0
j = npts - 1
i = 0
for _ in poly:
p1 = poly[i]
p2 = poly[j]
f = p1[0] * p2[1] - p2[0] * p1[1]
x += (p1[0] + p2[0]) * f
y += (p1[1] + p2[1]) * f
j = i
i += 1
f = self.area() * 6
return x / f, y / f
def clockwise(self):
"""
Returns True if the polygon points are ordered clockwise
If area is positive -> polygon is given clockwise
If area is negative -> polygon is given counter clockwise
"""
if self.area() > 0:
return True
else:
return False
def closed(self):
"""
Checks whether the polygon is closed (i.e first point == last point)
"""
if self.poly[0] == self.poly[-1]:
return True
else:
return False
def nvertices(self):
"""
Returns the number of vertices in the polygon
"""
return len(self.poly)
def point_in_poly(self, p=None):
"""
Check if a point p (tuple of x,y) is inside the polygon
This is called the "ray casting method": If a ray cast from p crosses
the polygon an even number of times, it's outside, otherwise inside
From: http://www.ariel.com.au/a/python-point-int-poly.html
Note: Points directly on the edge or identical with a vertex are not
considered "inside" the polygon!
"""
if p is None:
return None
poly = self.poly
x = p[0]
y = p[1]
n = len(poly)
inside = False
p1x, p1y = poly[0]
for i in range(n + 1):
p2x, p2y = poly[i % n]
if y > min(p1y, p2y):
if y <= max(p1y, p2y):
if x <= max(p1x, p2x):
if p1y != p2y:
xinters = (y - p1y) * (p2x - p1x) / (p2y - p1y) + p1x
if p1x == p2x or x <= xinters:
inside = not inside
p1x, p1y = p2x, p2y
return inside
def remove_last(self):
"""
Remove the last tuple in the ploygon
"""
self.poly.pop(-1)
return 0
def reverse(self):
"""
Reverses the ordering of the polygon (from cw to ccw or vice versa)
"""
temp_poly = []
nv = self.nvertices()
for i in range(self.nvertices() - 1, -1, -1):
temp_poly.append(self.poly[i])
self.poly = temp_poly
return temp_poly
def rotate(self, index):
"""
rotates the polygon, so that the point with index 'index' before now has
index 0
"""
if index > self.nvertices() - 1:
return 1
for i in range(index):
self.poly.append(self.poly.pop(0))
return 0
def __init__(self, poly=None):
"""
construct a polygon object
If poly is not specified, an empty polygon is created
if poly is specified, it has to be a list of 2-tuples!
"""
self.poly = []
if poly is not None:
if isinstance(poly, list) and len(poly) > 0:
if isinstance(poly[0], tuple) and len(poly[0]) == 2:
self.poly = poly
def __getitem__(self, index):
return self.poly[index]
def __setitem__(self, index, value):
if isinstance(value, tuple) and len(value) == 2:
self.poly[index] = value
class PyRFQCell(object):
def __init__(self,
cell_type,
prev_cell=None,
next_cell=None,
debug=False,
**kwargs):
"""
:param cell_type:
STA: Start cell without length (necessary at beginning of RMS if there are no previous cells)
RMS: Radial Matching Section.
NCS: Normal Cell. A regular RFQ cell
TCS: Transition Cell.
DCS: Drift Cell. No modulation.
TRC: Trapezoidal cell (experimental, for re-bunching only!).
:param prev_cell:
:param next_cell:
:param debug:
Keyword Arguments (mostly from Parmteq Output File):
V: Intervane voltage in V
Wsyn: Energy of the synchronous particle in MeV
Sig0T: Transverse zero-current phase advance in degrees per period
Sig0L: Longitudinal zero-current phase advance in degrees per period
A10: Acceleration term [first theta-independent term in expansion]
Phi: Synchronous phase in degrees
a: Minimum radial aperture in m
m: Modulation (dimensionless)
B: Focusing parameter (dimensionless) B = q V lambda^2/(m c^2 r0^2)
L: Cell length in cm
A0: Quadrupole term [first z-independent term in expansion]
RFdef: RF defocusing term
Oct: Octupole term
A1: Duodecapole term [second z-independent term in expansion]
"""
assert cell_type in ["start", "rms", "regular",
"transition", "transition_auto", "drift", "trapezoidal"], \
"cell_type not recognized!"
self._type = cell_type
self._params = {"voltage": None,
"Wsyn": None,
"Sig0T": None,
"Sig0L": None,
"A10": None,
"Phi": None,
"a": None,
"m": None,
"B": None,
"L": None,
"A0": None,
"RFdef": None,
"Oct": None,
"A1": None,
"flip_z": False,
"shift_cell_no": False,
"fillet_radius": None
}
self._prev_cell = prev_cell
self._next_cell = next_cell
self._debug = debug
for key, item in self._params.items():
if key in kwargs.keys():
self._params[key] = kwargs[key]
if self.initialize() != 0:
print("Cell failed self-check! Aborting.")
exit(1)
self._profile_itp = None # Interpolation of the cell profile
def __str__(self):
return "Type: '{}', Aperture: {:.6f}, Modulation: {:.4f}, " \
"Length: {:.6f}, flip: {}, shift: {}".format(self._type,
self._params["a"],
self._params["m"],
self._params["L"],
self._params["flip_z"],
self._params["shift_cell_no"])
@property
def length(self):
return self._params["L"]
@property
def aperture(self):
return self._params["a"]
@property
def avg_radius(self):
return 0.5 * (self._params["a"] + self._params["m"] * self._params["a"])
@property
def cell_type(self):
return self._type
@property
def modulation(self):
return self._params["m"]
@property
def prev_cell(self):
return self._prev_cell
@property
def next_cell(self):
return self._next_cell
def calculate_transition_cell_length(self):
le = self._params["L"]
m = self._params["m"]
a = self._params["a"]
r0 = self.avg_radius
k = np.pi / np.sqrt(3.0) / le
def eta(kk):
return bessel1(0.0, kk * r0) / (3.0 * bessel1(0.0, 3.0 * kk * r0))
def func(kk):
return (bessel1(0.0, kk * m * a) + eta(kk) * bessel1(0.0, 3.0 * kk * m * a)) / \
(bessel1(0.0, kk * a) + eta(kk) * bessel1(0.0, 3.0 * kk * a)) \
+ ((m * a / r0) ** 2.0 - 1.0) / ((a / r0) ** 2.0 - 1.0)
k = root(func, k).x[0]
tcs_length = np.pi / 2.0 / k
print("Transition cell has length {} which is {} * cell length, ".format(tcs_length, tcs_length / le), end="")
assert tcs_length <= le, "Numerical determination of transition cell length " \
"yielded value larger than cell length parameter!"
if tcs_length > le:
print("the remainder will be filled with a drift.")
return tcs_length
def initialize(self):
# TODO: Refactor this maybe? seems overly complicated...
# Here we check the different cell types for consistency and minimum necessary parameters
if self._type in ["transition", "transition_auto"]:
assert self.prev_cell is not None, "A transition cell needs a preceeeding cell."
assert self.prev_cell.cell_type == "regular", "Currently a transition cell must follow a regular cell."
# Aperture:
assert self._params["a"] is not None, "No aperture given for {} cell".format(self._type)
if self._params["a"] == 'auto':
assert self._type in ["drift", "trapezoidal", "transition", "transition_auto"], \
"Unsupported cell type '{}' for auto-aperture".format(self._type)
assert self.prev_cell is not None, "Need a preceeding cell for auto aperture!"
if self.prev_cell.cell_type in ["transition", "transition_auto"]:
self._params["a"] = self.prev_cell.avg_radius
else:
self._params["a"] = self.prev_cell.aperture
self._params["a"] = np.round(self._params["a"], decimals)
# Modulation:
if self._type in ["start", "rms", "drift"]:
self._params["m"] = 1.0
assert self._params["m"] is not None, "No modulation given for {} cell".format(self._type)
if self._params["m"] == 'auto':
assert self._type in ["transition", "transition_auto"], \
"Only transition cell can have 'auto' modulation at the moment!"
self._params["m"] = self.prev_cell.modulation
self._params["m"] = np.round(self._params["m"], decimals)
# Length:
if self._type == "start":
self._params["L"] = 0.0
assert self._params["L"] is not None, "No length given for {} cell".format(self._type)
if self._params["L"] == "auto":
assert self._type == "transition_auto", "Only transition_auto cells allow auto-length!"
self._params["L"] = self.prev_cell.length # use preceeding cell length L for calculation of L'
self._params["L"] = self.calculate_transition_cell_length()
self._params["L"] = np.round(self._params["L"], decimals)
if self._type == "trapezoidal":
assert self._params["fillet_radius"] is not None, "For 'TRC' cell a fillet radius must be given!"
return 0
def set_prev_cell(self, prev_cell):
assert isinstance(prev_cell, PyRFQCell), "You are trying to set a PyRFQCell with a non-cell object!"
self._prev_cell = prev_cell
def set_next_cell(self, next_cell):
assert isinstance(next_cell, PyRFQCell), "You are trying to set a PyRFQCell with a non-cell object!"
self._next_cell = next_cell
def calculate_profile_rms(self, vane_type, cell_no):
# Assemble RMS section by finding adjacent RMS cells and get their apertures
cc = self
pc = cc.prev_cell
rms_cells = [cc]
shift = 0.0
while pc is not None and pc.cell_type == "rms":
rms_cells = [pc] + rms_cells
shift += pc.length
cc = pc
pc = cc.prev_cell
cc = self
nc = cc._next_cell
while nc is not None and nc.cell_type == "rms":
rms_cells = rms_cells + [nc]
cc = nc
nc = cc.next_cell
# Check for starting cell
assert rms_cells[0].prev_cell is not None, "Cannot assemble RMS section without a preceding cell! " \
"At the beginning ofthe RFQ consider using a start (STA) cell."
a = [0.5 * rms_cells[0].prev_cell.aperture * (1.0 + rms_cells[0].prev_cell.modulation)]
z = [0.0]
for _cell in rms_cells:
a.append(_cell.aperture)
z.append(z[-1] + _cell.length)
self._profile_itp = interp1d(np.array(z) - shift, np.array(a), kind='cubic')
return 0
def calculate_profile_transition(self, vane_type, cell_no):
le = self._params["L"]
m = self._params["m"]
a = self._params["a"]
k = np.pi / np.sqrt(3.0) / le # Initial guess
r0 = 0.5 * (a + m * a)
if self.cell_type == "transition_auto":
tcl = le
else:
tcl = self.calculate_transition_cell_length()
z = np.linspace(0.0, le, 200)
idx = np.where(z <= tcl)
vane = np.ones(z.shape) * r0
print("Average radius of transition cell (a + ma) / 2 = {}".format(r0))
def eta(kk):
return bessel1(0.0, kk * r0) / (3.0 * bessel1(0.0, 3.0 * kk * r0))
def a10(kk):
return ((m * a / r0) ** 2.0 - 1.0) / (
bessel1(0.0, kk * m * a) + eta(kk) * bessel1(0.0, 3.0 * kk * m * a))
def a30(kk):
return eta(kk) * a10(kk)
def func(kk):
return (bessel1(0.0, kk * m * a) + eta(kk) * bessel1(0.0, 3.0 * kk * m * a)) / \
(bessel1(0.0, kk * a) + eta(kk) * bessel1(0.0, 3.0 * kk * a)) \
+ ((m * a / r0) ** 2.0 - 1.0) / ((a / r0) ** 2.0 - 1.0)
k = root(func, k).x[0]
if self._params["shift_cell_no"]:
sign = (-1.0) ** (cell_no + 1)
else:
sign = (-1.0) ** cell_no
_vane = []
if "x" in vane_type:
def vane_x(xx):
return - (xx / r0) ** 2.0 \
+ sign * a10(k) * bessel1(0.0, k * xx) * np.cos(k * _z) \
+ sign * a30(k) * bessel1(0.0, 3.0 * k * xx) * np.cos(3.0 * k * _z) + 1.0
for _z in z[idx]:
_vane.append(root(vane_x, r0).x[0])
else:
def vane_y(yy):
return + (yy / r0) ** 2.0 \
+ sign * a10(k) * bessel1(0.0, k * yy) * np.cos(k * _z) \
+ sign * a30(k) * bessel1(0.0, 3.0 * k * yy) * np.cos(3.0 * k * _z) - 1.0
for _z in z[idx]:
_vane.append(root(vane_y, r0).x[0])
if self._params["flip_z"]:
_vane = _vane[::-1]
vane[np.where(z >= le - tcl)] = _vane
else:
vane[idx] = _vane
self._profile_itp = interp1d(z, vane, bounds_error=False, fill_value=0)
return 0
def calculate_profile_trapezoidal(self, vane_type, cell_no):
# TODO: This is a rough test of a trapezoidal cell: _/-\_
# TODO: tilted parts are as long as roof and start and end (cell_length/5)
fillet_radius = self._params["fillet_radius"] # m
def intersection(_p1, _v1, _p2, _v2):
s = (_v2[1] * (_p2[0] - _p1[0]) + _v2[0] * (_p1[1] - _p2[1])) / (_v1[0] * _v2[1] - _v1[1] * _v2[0])
return _p1 + s * _v1
def arc_to_poly(z1, r1, z2, r2, r_curv, invert):
"""
transform an arc into a polygon
"""
polygon = Polygon2D()
cur = 1
if invert:
cur = -1
dp = np.sqrt((z2 - z1) ** 2 + (r2 - r1) ** 2)
if r_curv < 0.5 * dp:
return None
dx = np.sqrt(abs((0.5 * dp) ** 2.0 - r_curv ** 2.0))
zc = (z1 + z2) * 0.5 - cur * dx * (r1 - r2) / dp
rc = (r1 + r2) * 0.5 + cur * dx * (z1 - z2) / dp
if round(z1 - zc, 8) == 0:
if r1 > rc:
p1 = 90
else:
p1 = 270
else:
p1 = np.arctan((r1 - rc) / (z1 - zc)) / np.pi * 180.0
if z1 < zc:
p1 += 180
if p1 < 0:
p1 += 360
if round(z2 - zc, 8) == 0:
if r2 > rc:
p2 = 90
else:
p2 = 270
else:
p2 = np.arctan((r2 - rc) / (z2 - zc)) / np.pi * 180.0
if z2 < zc:
p2 += 180
if p2 < 0:
p2 += 360
diff = p2 - p1
if diff < 0:
diff += 360
if diff > 180:
p3 = p1
p1 = p2
p2 = p3
num_vert = 10 # No need for too many, just spline guide points
if p2 < p1:
dp = float((p2 + 360.0 - p1) / (float(num_vert) - 1.0))
else:
dp = float((p2 - p1) / (float(num_vert) - 1.0))
for j in range(num_vert):
phi = np.deg2rad(p1 + dp * j)
z_temp = zc + (r_curv * np.cos(phi))
r_temp = rc + (r_curv * np.sin(phi))
polygon.add_point((z_temp, r_temp))
if not invert:
polygon.reverse()
return polygon, p1, p2
# Flip for y vane
flip_r = ("y" in vane_type) ^ self._params["shift_cell_no"]
# 6 vertices for 5 segments of the trapezoidal cell
_z = np.linspace(0, self._params["L"], 6, endpoint=True)
if flip_r:
_r = np.array([self._params["a"],
self._params["a"],
self._params["a"] * (2.0 - self._params["m"]),
self._params["a"] * (2.0 - self._params["m"]),
self._params["a"],
self._params["a"]
])
else:
_r = np.array([self._params["a"],
self._params["a"],
self._params["a"] * self._params["m"],
self._params["a"] * self._params["m"],
self._params["a"],
self._params["a"]
])
# Now we replace the inner vertices with fillets
_vertices = np.array(list(zip(_z, _r)))
_new_verts = Polygon2D([tuple(_vertices[0])])
for i in range(4):
temp_poly = Polygon2D([tuple(_vertices[0 + i]), tuple(_vertices[1 + i]), tuple(_vertices[i + 2])])
clockwise = temp_poly.clockwise()
# Calculate maximum radius for fillet
_v1 = Vector2D(p0=_vertices[i + 1], p1=_vertices[i + 0])
_v2 = Vector2D(p0=_vertices[i + 1], p1=_vertices[i + 2])
if clockwise:
p_in_line1 = Vector2D(_vertices[i + 1]) + _v1.rotate_ccw().normalize() * fillet_radius # belongs to v1
p_in_line2 = Vector2D(_vertices[i + 1]) + _v2.rotate_cw().normalize() * fillet_radius # belongs to v2
else:
p_in_line1 = Vector2D(_vertices[i + 1]) + _v1.rotate_cw().normalize() * fillet_radius # belongs to v1
p_in_line2 = Vector2D(_vertices[i + 1]) + _v2.rotate_ccw().normalize() * fillet_radius # belongs to v2
m_center = intersection(p_in_line1, _v1, p_in_line2, _v2)
v_new1 = intersection(Vector2D(_vertices[i + 1]), _v1.normalize(), m_center, _v1.rotate_cw().normalize())
v_new2 = intersection(Vector2D(_vertices[i + 1]), _v2.normalize(), m_center, _v2.rotate_cw().normalize())
arcpoly, ps, pe = arc_to_poly(v_new1[0], v_new1[1],
v_new2[0], v_new2[1],
fillet_radius,
not clockwise)
_new_verts.add_polygon(arcpoly)
_new_verts.add_point(tuple(_vertices[-1]))
_new_verts = np.array(_new_verts[:])
self._profile_itp = interp1d(_new_verts[:, 0], _new_verts[:, 1])
return 0
def calculate_profile(self, cell_no, vane_type, fudge=False):
print("cell_no: " + str(cell_no))
assert vane_type in ["xp", "xm", "yp", "ym"], "Did not understand vane type {}".format(vane_type)
if self._type == "start":
# Don't do anything for start cell
return 0
elif self._type == "trapezoidal":
assert self._prev_cell.cell_type == "drift", "Rebunching cell must follow a drift cell (DCS)!"
self.calculate_profile_trapezoidal(vane_type, cell_no)
return 0
elif self._type == "drift":
self._profile_itp = interp1d([0.0, self._params["L"]],
[self._params["a"], self._params["a"] * self._params["m"]])
return 0
elif self._type == "rms":
self.calculate_profile_rms(vane_type, cell_no)
return 0
elif self._type in ["transition", "transition_auto"]:
self.calculate_profile_transition(vane_type, cell_no)
return 0
# Else: regular cell:
z = np.linspace(0.0, self._params["L"], 100)
a = self.aperture
m = self.modulation
pc = self._prev_cell
if pc is not None and pc.cell_type in ["rms", "drift"]:
pc = None
nc = self._next_cell
if nc is not None and nc.cell_type in ["rms", "drift"]:
nc = None
if pc is None or not fudge:
a_fudge_begin = ma_fudge_begin = 1.0
else:
ma_fudge_begin = 0.5 * (1.0 + pc.aperture * pc.modulation / m / a)
a_fudge_begin = 0.5 * (1.0 + pc.aperture / a)
if nc is None or not fudge:
a_fudge_end = ma_fudge_end = 1.0
else:
ma_fudge_end = 0.5 * (1.0 + nc.aperture * nc.modulation / m / a)
a_fudge_end = 0.5 * (1.0 + nc.aperture / a)
a_fudge = interp1d([0.0, self.length], [a_fudge_begin, a_fudge_end])
ma_fudge = interp1d([0.0, self.length], [ma_fudge_begin, ma_fudge_end])
kp = np.pi / self.length
sign = (-1.0) ** (cell_no + 1)
def ap(zz):
return a * a_fudge(zz)
def mp(zz):
return m * ma_fudge(zz) / a_fudge(zz)
def a10(zz):
return (mp(zz) ** 2.0 - 1.0) / (mp(zz) ** 2.0 * bessel1(0, kp * ap(zz)) + bessel1(0, mp(zz) * kp * ap(zz)))
def r0(zz):
return ap(zz) / np.sqrt(1.0 - (mp(zz) ** 2.0 * bessel1(0, kp * ap(zz)) - bessel1(0, kp * ap(zz))) /
(mp(zz) ** 2.0 * bessel1(0, kp * ap(zz)) + bessel1(0, mp(zz) * kp * ap(zz))))
_vane = []
if "x" in vane_type:
def vane_x(xx):
return + sign * (xx / r0(_z)) ** 2.0 + a10(_z) * bessel1(0.0, kp * xx) * np.cos(kp * _z) - sign
for _z in z:
_vane.append(root(vane_x, ap(_z)).x[0])
else:
def vane_y(yy):
return - sign * (yy / r0(_z)) ** 2.0 + a10(_z) * bessel1(0.0, kp * yy) * np.cos(kp * _z) + sign
for _z in z:
_vane.append(root(vane_y, ap(_z)).x[0])
self._profile_itp = interp1d(z, _vane)
return 0
def profile(self, z):
return self._profile_itp(z)
class PyRFQElectrode(object):
def __init__(self,
name,
parent,
zmin,
zlen,
voltage,
reverse_normals=False,
h=0.025,
debug=False):
self._name = name
self._parent = parent
self._domain_idx = None
self._voltage = voltage
self._debug = debug
self._zmin = zmin
self._zlen = zlen
self._geo_str = None
self._occ_obj = None
self._occ_npart = 1
self._mesh_fn = None
self._reverse_normals = reverse_normals
self._h = h
self._refine_steps = 0
@property
def domain_idx(self):
return self._domain_idx
@property
def mesh_fn(self):
return self._mesh_fn
@property
def name(self):
return self._name
@property
def parent(self):
return self._parent
@property
def voltage(self):
return self._voltage
@abstractmethod
def generate_geo_str(self, *args, **kwargs):
pass
def generate_gmsh_files(self):
tmp_dir = self._parent.temp_dir
if tmp_dir is not None:
geo_fn = os.path.join(tmp_dir, "{}.geo".format(self.name))
msh_fn = os.path.splitext(geo_fn)[0] + ".msh"
stl_fn = os.path.splitext(geo_fn)[0] + ".stl"
brep_fn = os.path.splitext(geo_fn)[0] + ".brep"
refine_fn = os.path.join(tmp_dir, "refine_{}.geo".format(self.name))
gmsh_success = 0
with open(geo_fn, "w") as _of:
_of.write(self._geo_str)
command = "{} \"{}\" -0 -o \"{}\" -format brep".format(GMSH_EXE, geo_fn, brep_fn)
if self._debug:
print("Running", command)
sys.stdout.flush()
gmsh_success += os.system(command)
refine_str = """
Merge "{}";
Mesh.SecondOrderLinear = 0;
RefineMesh;
""".format(msh_fn)
with open(refine_fn, "w") as _of:
_of.write(refine_str)
# TODO: Could we use higher order (i.e. curved) meshes? -DW
# For now, we need to save in msh2 format for BEMPP compability
command = "{} \"{}\" -2 -o \"{}\" -format msh2".format(GMSH_EXE, geo_fn, msh_fn)
if self._debug:
print("Running", command)
sys.stdout.flush()
gmsh_success += os.system(command)
for i in range(self._refine_steps):
command = "{} \"{}\" -0 -o \"{}\" -format msh2".format(GMSH_EXE, refine_fn, msh_fn)
if self._debug:
print("Running", command)
sys.stdout.flush()
gmsh_success += os.system(command)
# --- TODO: For testing: save stl mesh file also
command = "{} \"{}\" -0 -o \"{}\" -format stl".format(GMSH_EXE, msh_fn, stl_fn)
if self._debug:
print("Running", command)
sys.stdout.flush()
gmsh_success += os.system(command)
# --- #
if gmsh_success != 0: # or not os.path.isfile("shape.stl"):
print("Something went wrong with gmsh, be sure you defined "
"the correct path at the beginning of the file!")
return 1
self._mesh_fn = msh_fn
return 0
def generate_occ(self):
if HAVE_OCC:
tmp_dir = self._parent.temp_dir
brep_fn = os.path.join(tmp_dir, "{}.brep".format(self._name))
self._occ_obj = ElectrodeObject()
self._occ_obj.load_from_brep(brep_fn)
self._occ_obj.partition_z(self._occ_npart)
return 0
else:
print("Couldn't load PythonOCC-Core earlier, cannot create OpenCasCade object!")
return 1
def points_inside(self, _points):
"""
Function that calculates whether the point(s) is/are inside the vane or not.
Currently this only works with pythonocc-core installed and can be very slow
for a large number of points.
:param _points: any shape (N, 3) structure holding the points to check. Can be a list of tuples,
a list of lists, a numpy array of points (N, 3)...
Alternatively: a single point with three coordinates (list, tuple or numpy array)
:return: boolean numpy array of True or False depending on whether the points are inside or
outside (on the surface is counted as inside!)
"""
if self._occ_obj is not None:
return self._occ_obj.points_inside(_points)
else:
return 1
class PyRFQAnnulus(PyRFQElectrode):
def __init__(self,
name,
parent,
zmin,
zlen,
voltage=0.0,
debug=False,
reverse_normals=False,
h=0.05,
plate_dia=1.0,
aperture_dia=0.0):
super().__init__(name,
parent,
zmin,
zlen,
voltage,
reverse_normals,
h,
debug)
self._aperture_dia = aperture_dia
self._plate_dia = plate_dia
self._domain_idx = 100
def generate_geo_str(self):
zlen = self._zlen
r_plate = self._plate_dia / 2.0
r_ap = self._aperture_dia / 2.0
zmin = self._zmin
reverse_normals = self._reverse_normals
h = self._h
self._geo_str = """SetFactory("OpenCASCADE");
Geometry.NumSubEdges = 100; // nicer display of curve
Mesh.CharacteristicLengthMax = {};
""".format(h)
self._geo_str += "// Create Plate \n"
self._geo_str += "Cylinder(1) = {{ 0, 0, {}, 0, 0, {}, {}, 2 * Pi }};\n".format(zmin,
zlen,
r_plate)
if r_ap > 0.0:
self._geo_str += "Cylinder(2) = {{ 0, 0, {}, 0, 0, {}, {}, 2 * Pi }};\n".format(zmin - 0.001,
zlen + 0.002,
r_ap)
self._geo_str += "BooleanDifference{ Volume{1}; Delete; }{ Volume{2}; Delete; }\n"
self._geo_str += """
s() = Surface "*";
Physical Surface({}) = {{ s() }};
""".format(self._domain_idx)
if reverse_normals:
self._geo_str += """
ReverseMesh Surface { s() };
"""
return 0
class PyRFQVane(PyRFQElectrode):
def __init__(self,
parent,
vane_type,
cells,
voltage,
occ_tolerance=1e-5,
occ_npart=1,
debug=False,
reverse_normals=False,
h=0.05):
self._cells = cells
self._length = np.sum([cell.length for cell in self._cells]) # type: float
super().__init__(name="vane_" + vane_type,
parent=parent,
zmin=0.0,
zlen=self._length,
voltage=voltage,
reverse_normals=reverse_normals,
h=h,
debug=debug)
self._occ_npart = occ_npart
self._type = vane_type
self._has_profile = False
self._fudge = False
self._mesh_params = {"dx": 0.001, # step length along z (m)
"nz": 100, # Number of steps along z, consolidate with dx!
"h": 0.005, # gmsh meshing parameter (m)
"tip": "semi-circle",
"r_tip": 0.005, # Radius of curvature of vane tip (m)
"h_block": 0.01, # height of block sitting atop the curvature (m)
"h_type": 'absolute', # whether the block height is measured from midplane or modulation
"symmetry": False,
"mirror": False,
"geo_str": None,
"msh_fn": None,
"refine_steps": 0, # Number of times gmsh is called to "refine by splitting"
"reverse_mesh": False
}
self._occ_params = {"tolerance": occ_tolerance,
"solid": None, # The OCC solid body,
"bbox": None, # The bounding box ofthe OCC solid body
}
self._mesh = None
@property
def has_profile(self):
return self._has_profile
@property
def length(self):
return self.length # type: float
@property
def mesh(self):
return self._mesh
@property
def vane_type(self):
return self._type
@property
def vertices_elements(self):
if self._mesh is not None:
return self._mesh.leaf_view.vertices, self._mesh.leaf_view.elements
else:
return None, None
def set_vane_type(self, vane_type=None):
if vane_type is not None:
self._type = vane_type
self._name = "vane_" + vane_type
def set_mesh_parameter(self, keyword=None, value=None):
if keyword is None or value is None:
print("In 'set_mesh_parameter': Either keyword or value were not specified.")
return 1
if keyword not in self._mesh_params.keys():
print("In 'set_mesh_parameter': Unrecognized keyword '{}'.".format(keyword))
return 1
self._mesh_params[keyword] = value
return 0
def get_parameter(self, key):
if key in self._mesh_params.keys():
return self._mesh_params[key]
else:
return None
def set_voltage(self, voltage):
self._voltage = voltage
def set_domain_index(self, idx):
self._mesh_params["domain_idx"] = idx
def generate_geo_str(self):
r_tip = None
h_block = None
h_type = None
symmetry = None
mirror = None
reverse_mesh = None
h = dx = self._h
if symmetry is not None:
self._mesh_params["symmetry"] = symmetry
else:
symmetry = self._mesh_params["symmetry"]
if mirror is not None:
self._mesh_params["mirror"] = mirror
else:
mirror = self._mesh_params["mirror"]
assert not (symmetry is True and mirror is True), "Cannot have mirroring and symmetry at the same time!"
if dx is not None:
self._mesh_params["dx"] = dx
else:
dx = self._mesh_params["dx"]
if h is not None:
self._mesh_params["h"] = h
else:
h = self._mesh_params["h"]
if r_tip is not None:
self._mesh_params["r_tip"] = r_tip
else:
r_tip = self._mesh_params["r_tip"]
if h_block is not None:
self._mesh_params["h_block"] = h_block
else:
h_block = self._mesh_params["h_block"]
if h_type is not None:
self._mesh_params["h_type"] = h_type
else:
h_type = self._mesh_params["h_type"]
if reverse_mesh is not None:
self._mesh_params["reverse_mesh"] = reverse_mesh
else:
reverse_mesh = self._mesh_params["reverse_mesh"]
# Calculate z_data and vane profile:
z, profile = self.get_profile(nz=self._mesh_params["nz"])
pmax = profile.max()
# Calculate minimum possible absolute height (1 mm above the maximum vane modulation):
h_min = 0.0
has_rms = False
for _cell in self._cells:
if _cell.cell_type == "rms":
has_rms = True
# Check for maximum modulated vanes plus 1 mm for safety.
if _cell.cell_type not in ["start", "rms"]:
_h = _cell.aperture * _cell.modulation + 0.001
if h_min < _h:
h_min = _h
# Consistency check for absolute h_type
if h_type == 'absolute':
if h_block >= pmax:
ymax = h_block
elif h_block >= h_min:
print("*** Warning: h_block < pmax, but larger than maximum vane modulation. "
"This will cut into the RMS Section! Continuing.")
ymax = h_block
else:
print("It seems that the 'absolute' h_block (height) value is too small" \
" and would leave less than 1 mm material in some places above the modulation. " \
"Aborting.")
return 1
elif h_type == 'relative':
ymax = pmax + h_block
print("h_type 'relative' deactivated for the moment. Aborting. -DW")
return 1
else:
print("Unknown 'h_type'.")
return 1
# TODO: Look into what the best meshing parameters are!
# TODO: Look into number of threads!
geo_str = """SetFactory("OpenCASCADE");
Geometry.NumSubEdges = 500; // nicer display of curve
//General.NumThreads = 2;
Mesh.CharacteristicLengthMax = {};
h = {};
""".format(h, h)
if symmetry:
assert self._type not in ["ym", "xm"], "Sorry, mesh generation with symmetry only works for vanes " \
"located in positive axis directions (i.e. 'yp', 'xp'). "
# if "x" in self._type:
# sign = -1
if "y" in self._type:
self._domain_idx = 2
else:
self._domain_idx = 1
new_pt = 1
new_ln = 1
new_loop = 1
new_surf = 1
new_vol = 1
spline1_pts = [new_pt]
# Center spline
# TODO: Here we could add an option for the cut-ins -DW
geo_str += "// Center Spline:\n"
for _z, _a in zip(z, profile):
geo_str += "Point({}) = {{ {}, {}, {}, h }};\n".format(spline1_pts[-1], 0.0, _a, _z)
spline1_pts.append(spline1_pts[-1] + 1)
new_pt = spline1_pts[-1]
spline1_pts.pop(-1)
geo_str += """
Spline({}) = {{ {}:{} }};
""".format(new_ln, spline1_pts[0], spline1_pts[-1])
# Immediately delete the points used up in the spline
geo_str += "Recursive Delete {{ Point{{ {}:{} }}; }}\n".format(spline1_pts[1], spline1_pts[-2])
spline_ln = new_ln
new_ln += 1
# --- Make a profile to follow the modulation path ('sweep' in Inventor, 'pipe' in OpenCascade) --- #
profile_start_angle = np.arctan2(profile[1] - profile[0], z[1] - z[0])
profile_end_angle = np.arctan2(profile[-1] - profile[-2], z[-1] - z[-2])
print("Profile Start Angle = {} deg".format(-np.rad2deg(profile_start_angle)))
print("Profile End Angle = {} deg".format(-np.rad2deg(profile_end_angle)))
adj_psa_deg = -np.rad2deg(profile_start_angle)
adj_pea_deg = np.rad2deg(profile_end_angle)
geo_str += "// Points making up the sweep face:\n"
face_pts = list(range(new_pt, new_pt + 4))
# Square points:
geo_str += "Point({}) = {{ {}, {}, {}, h }};\n".format(face_pts[0], -r_tip, profile[0] + r_tip, z[0])
geo_str += "Point({}) = {{ {}, {}, {}, h }};\n".format(face_pts[1], r_tip, profile[0] + r_tip, z[0])
# Semi-circle center:
geo_str += "Point({}) = {{ {}, {}, {}, h }};\n".format(face_pts[2], 0.0, profile[0] + r_tip, z[0])
geo_str += "\n"
# Lines for sweep face:
face_lns = []
for i in range(1):
face_lns.append(new_ln)
geo_str += "Line({}) = {{ {}, {} }};\n".format(new_ln, face_pts[i], face_pts[i + 1])
new_ln += 1
# Semi-circle:
face_lns.append(new_ln)
geo_str += "Circle({}) = {{ {}, {}, {}}};\n".format(new_ln, face_pts[1], face_pts[2], face_pts[0])
geo_str += "\n"
new_ln += 1
# Sweep Face:
geo_str += "Curve Loop({}) = {{ {}, {} }};\n".format(new_loop,
face_lns[0],
face_lns[1],
)
new_loop += 1
sweep_surf = new_surf
geo_str += "Plane Surface({}) = {{ {} }};\n".format(new_surf, new_loop - 1)
geo_str += "Rotate {{{{1, 0, 0}}, {{ {}, {}, {}}}, {}}} {{Surface {{ {} }}; }}\n".format(0.0,
profile[0],
z[0],
-profile_start_angle,
new_surf)
geo_str += "\n"
new_surf += 1
# Delete now unused center-point of circle (was duplicated)
geo_str += "Recursive Delete {{ Point{{ {} }}; }}\n".format(face_pts[2])
# Extrusion:
geo_str += "Wire({}) = {{ {} }};\n".format(new_loop, spline_ln)
geo_str += "Extrude {{ Surface{{ {} }}; }} Using Wire {{ {} }}\n".format(sweep_surf, new_loop)
new_loop += 1
extrude_vol_1 = new_vol
new_vol += 1 # Extrude creates a volume
# Delete initial sweep surface (now redundant)
geo_str += "Recursive Delete {{ Surface {{ {} }}; }}\n".format(sweep_surf)
# Delete the spline (now redundant)
geo_str += "Recursive Delete {{ Curve{{ {} }}; }}\n".format(spline_ln)
# We now have a volume of the modulated part regardless of h_block and RMS section yes/no.
# All redundant points, lines and surfaces have been deleted.
# ------------------------------------------------------------------------------------------------------------ #
# --- Next step: Fill up the volume above to make height of vane = ymax -------------------------------------- #
# - Cases:
# 1. Both start and end angles are tilted inwards /===\ (using minimum tilt of 1 deg for now).
# 2. Both start and end angles are straight or tilted outwards |===| or \===/
# 3. Start angle is tilted inwards, end angle is straight or tilted outwards /===| (e.g. ony using start RMS)
# 4. Start angle is straight or tilted outwards, end angle is tilted inwards |===\ (e.g. only using exit RMS)
if adj_psa_deg >= 1.0 and adj_pea_deg >= 1.0:
case = 1
elif adj_psa_deg < 1.0 and adj_pea_deg < 1.0:
case = 2
elif adj_pea_deg < 1.0 <= adj_psa_deg:
case = 3
else:
case = 4
# In case 1, we can extend the end-caps upwards 1 m (just some large number),
# then cut off a big block from the top. End caps will be surfaces 2 and 5
if case == 1:
geo_str += "Extrude {0, 1, 0} { Surface{ 2 }; }\n"
geo_str += "Extrude {0, 1, 0} { Surface{ 5 }; }\n\n"
geo_str += "// Delete redundant volumes, surfaces, lines to form a new volume later\n"
geo_str += "Delete { Volume{ 1, 2, 3 }; }\n"
geo_str += "Delete { Surface{ 2, 3, 5, 6, 9 }; }\n"
geo_str += "Delete { Curve{ 4, 8 }; }\n"
geo_str += "Line(18) = {{ {}, {} }};\n".format(new_pt + 12, new_pt + 10)
geo_str += "Line(19) = {{ {}, {} }};\n".format(new_pt + 9, new_pt + 11)
geo_str += """
Curve Loop(13) = {19, 16, 18, -12};
Plane Surface(12) = {13};
Curve Loop(14) = {18, -11, 7, 15};
Plane Surface(13) = {14};
Curve Loop(15) = {19, -14, -6, 10};
Plane Surface(14) = {15};
Surface Loop(4) = {13, 12, 14, 10, 11, 4, 7, 8};
Volume(1) = {4};
Delete { Surface{ 7, 10}; }
"""
# In case 2 we create a block above the 4 endpoints of the semi-circles
elif case == 2:
geo_str += "Translate {{ 0, 1, 0 }} {{ Duplicata{{ Point{{ {}, {}, {}, {} }}; }} }}\n".format(new_pt + 5,
new_pt + 6,
new_pt + 7,
new_pt + 8)
geo_str += "Delete { Volume{ 1 }; }\n"
geo_str += "Delete { Surface{ 3 }; }\n"
geo_str += "Line(10) = {{ {}, {} }};\n".format(new_pt + 10, new_pt + 9)
geo_str += "Line(11) = {{ {}, {} }};\n".format(new_pt + 9, new_pt + 11)
geo_str += "Line(12) = {{ {}, {} }};\n".format(new_pt + 11, new_pt + 12)
geo_str += "Line(13) = {{ {}, {} }};\n".format(new_pt + 12, new_pt + 10)
geo_str += "Line(14) = {{ {}, {} }};\n".format(new_pt + 8, new_pt + 12)
geo_str += "Line(15) = {{ {}, {} }};\n".format(new_pt + 11, new_pt + 7)
geo_str += "Line(16) = {{ {}, {} }};\n".format(new_pt + 6, new_pt + 10)
geo_str += "Line(17) = {{ {}, {}}};\n".format(new_pt + 9, new_pt + 5)
geo_str += """
Curve Loop(7) = {13, 10, 11, 12}; Plane Surface(6) = {7};
Curve Loop(8) = {12, -14, -8, -15}; Plane Surface(7) = {8};
Curve Loop(9) = {16, 10, 17, 4}; Plane Surface(8) = {9};
Curve Loop(10) = {13, -16, 7, 14}; Plane Surface(9) = {10};
Curve Loop(11) = {15, -6, -17, 11}; Plane Surface(10) = {11};
Surface Loop(2) = {6, 9, 8, 10, 7, 5, 4, 2}; Volume(1) = {2};
"""
elif case == 3:
geo_str += "Extrude {0, 1, 0} { Surface{ 2 }; }\n"
geo_str += "Translate {{ 0, 1, 0 }} {{ Duplicata{{ Point{{ {}, {} }}; }} }}\n".format(new_pt + 7,
new_pt + 8)
geo_str += "// Delete redundant volumes, surfaces, lines to form a new volume later\n"
geo_str += "Delete { Volume{ 1, 2 }; }\n"
geo_str += "Delete { Surface{ 2, 3, 6}; }\n"
geo_str += "Delete { Curve{ 4 }; }\n"
geo_str += "Line(14) = {{ {}, {} }};\n".format(new_pt + 10, new_pt + 12)
geo_str += "Line(15) = {{ {}, {} }};\n".format(new_pt + 9, new_pt + 11)
geo_str += "Line(16) = {{ {}, {} }};\n".format(new_pt + 11, new_pt + 12)
geo_str += "Line(17) = {{ {}, {}}};\n".format(new_pt + 12, new_pt + 8)
geo_str += "Line(18) = {{ {}, {} }};\n".format(new_pt + 11, new_pt + 7)
geo_str += """
Curve Loop(10) = {16, -14, -12, 15}; Plane Surface(9) = {10};
Curve Loop(11) = {17, -7, 11, 14}; Plane Surface(10) = {11};
Curve Loop(12) = {17, -8, -18, 16}; Plane Surface(11) = {12};
Curve Loop(13) = {18, -6, 10, 15}; Plane Surface(12) = {13};
Surface Loop(3) = {10, 11, 5, 4, 12, 7, 8, 9}; Volume(1) = {3};
"""
geo_str += "Delete { Surface{ 7 }; }\n"
elif case == 4:
geo_str += "Extrude {0, 1, 0} { Surface{ 5 }; }\n\n"
geo_str += "Translate {{ 0, 1, 0 }} {{ Duplicata{{ Point{{ {}, {} }}; }} }}\n".format(new_pt + 5,
new_pt + 6)
geo_str += "// Delete redundant volumes, surfaces, lines to form a new volume later\n"
geo_str += "Delete { Volume{ 1, 2 }; }\n"
geo_str += "Delete { Surface{3, 5, 6}; }\n"
geo_str += "Delete { Curve{ 8 }; }\n"
geo_str += "Line(14) = {{ {}, {} }};\n".format(new_pt + 10, new_pt + 12)
geo_str += "Line(15) = {{ {}, {} }};\n".format(new_pt + 9, new_pt + 11)
geo_str += "Line(16) = {{ {}, {} }};\n".format(new_pt + 12, new_pt + 11)
geo_str += "Line(17) = {{ {}, {}}};\n".format(new_pt + 6, new_pt + 12)
geo_str += "Line(18) = {{ {}, {} }};\n".format(new_pt + 5, new_pt + 11)
geo_str += """
Curve Loop(10) = {14, 16, -15, 12}; Plane Surface(9) = {10};
Curve Loop(11) = {14, -17, 7, 11}; Plane Surface(10) = {11};
Curve Loop(12) = {6, 10, 15, -18}; Plane Surface(11) = {12};
Curve Loop(13) = {16, -18, 4, 17}; Plane Surface(12) = {13};
Surface Loop(3) = {10, 9, 12, 11, 4, 7, 8, 2}; Volume(1) = {3};
"""
geo_str += "Delete { Surface{ 7 }; }\n"
# ------------------------------------------------ END CASES ------------------------------------------------- #
geo_str += "Box(2) = {{ -0.5, {}, {}, 1, 2, {} }};\n".format(ymax, z[0] - 0.25, z[-1] - z[0] + 0.5)
geo_str += """
BooleanDifference{ Volume{1}; Delete; }{ Volume{2}; Delete; }
"""
# Add physical surface to identify this vane in gmsh (unmirrored)
geo_str += """
s() = Surface "*";
Physical Surface({}) = {{ s() }};
""".format(self._domain_idx)
# Rotate according to vane type
if self.vane_type == "xp":
geo_str += "Rotate {{{{0, 0, 1}}, {{0, 0, 0}}, {}}} " \
"{{Volume {{ {} }}; }}\n".format(-0.5 * np.pi, extrude_vol_1)
elif self.vane_type == "xm":
geo_str += "Rotate {{{{0, 0, 1}}, {{0, 0, 0}}, {}}} " \
"{{Volume {{ {} }}; }}\n".format(0.5 * np.pi, extrude_vol_1)
elif self.vane_type == "ym":
geo_str += "Rotate {{{{0, 0, 1}}, {{0, 0, 0}}, {}}} " \
"{{Volume {{ {} }}; }}\n".format(np.pi, extrude_vol_1)
if reverse_mesh:
geo_str += """
ReverseMesh Surface { s() };
"""
# TODO: Adjust the transfinite surfaces for all the correct ones for the different cases.
if case == 1:
geo_str += """
Transfinite Surface { 2, 3 };
"""
elif case == 2:
geo_str += """
Transfinite Surface { 3 };
"""
elif case == 3:
geo_str += """
Transfinite Surface { 3, 4 };
"""
elif case == 4:
geo_str += """
Transfinite Surface { 3 };
"""
self._geo_str = geo_str
return geo_str
def calculate_profile(self, fudge=None):
if fudge is None:
fudge = self._fudge
for cell_no in range(len(self._cells)):
self._cells[cell_no].calculate_profile(cell_no, self._type, fudge=fudge)
sys.stdout.flush()
self._has_profile = True
return 0
def get_profile(self, nz=1000):
assert self._has_profile, "No profile has been generated!"
# Cutting the RFQ short by 1e-10 to not get out of bound error in interpolation
z = np.round(np.linspace(0.0, self._length - 1e-10, nz), decimals)
vane = np.zeros(z.shape)
cum_len = 0.0
# count = 0
for cell in self._cells:
if cell.cell_type != "start":
_z_end = np.round(cum_len + cell.length, decimals)
idx = np.where((z >= cum_len) & (z <= _z_end))
# print("")
# print("Cell # {}".format(count))
# print("Cell extent: {} to {}".format(cum_len, _z_end))
# print("z_lab = [{};{}]".format(z[idx][0], z[idx][-1]))
# print("z_loc = [{};{}]".format(z[idx][0] - cum_len, z[idx][-1] - cum_len))
vane[idx] = cell.profile(np.round(z[idx] - cum_len, decimals))
cum_len = np.round(cum_len + cell.length, decimals)
# count += 1
return z, vane
# noinspection PyUnresolvedReferences
class PyRFQ(object):
def __init__(self, voltage, occ_tolerance=1e-5, debug=False, fudge_vanes=False):
self._debug = debug
self._fudge_vanes = fudge_vanes
self._voltage = voltage
self._vanes = []
self._elec_objects = []
self._cells = []
self._cell_nos = []
self._length = 0.0
self._full_mesh = None
self._full_mesh_fn = None
self._occ_tolerance = occ_tolerance # Tolerace for bounding box and intersection tests in pythonocc-core
self._temp_dir = None
# Flags
self._have_geo_str = False
self._have_occ_obj = False
self._have_bem_obj = False
self._initialized = False
self._variables_gmtry = {"vane_type": "hybrid",
"vane_radius": 0.005, # m
"vane_height": 0.05, # m
"vane_height_type": 'absolute',
"nz": 500 # number of points to use for modulation spline along z
# TODO: nz is confusing, now we have dx, numz and nz that could all determine
# TODO: the step length along axis for geometry purposes! -DW
}
self._variables_bempp = {
# "solution": None,
# "f_space": None,
# "operator": None,
# "grid_fun": None,
"grid_res": 0.005, # grid resolution in (m)
"refine_steps": 0,
"reverse_mesh": False,
"n_fun_coeff": None, # Coefficients of the Neumann GridFunction
"d_fun_coeff": None, # Coefficients of the Dirichlet GridFunction
"ef_itp": None, # type: Field
"ef_phi": None, # type: np.ndarray
"ef_mask": None, # A numpy boolean array holding flags for points inside electrodes
# This can help with jitter on z axis where pot ~ 0 otherwise
# TODO: Should put pot in its own class that also holds dx, nx, etc.
"add_cyl": False, # Do we want to add a grounded cylinder to the BEMPP problem
"add_endplates": False, # Or just grounded end plates
"cyl_id": 0.2, # Inner diameter of surrounding cylinder
"ap_id": 0.02, # Entrance and exit aperture diameter TODO: Make this asymmetric!
"cyl_gap": 0.01, # gap between vanes and cylinder TODO: Make this asymmetric!
"d": None,
"n": None,
"limits": None
}
self.create_temp_dir()
@property
def temp_dir(self):
return self._temp_dir
def create_temp_dir(self):
if RANK == 0:
tmp_path = os.path.join(os.getcwd(), "temp")
if not os.path.exists(tmp_path):
os.mkdir(tmp_path)
else:
shutil.rmtree(tmp_path)
os.mkdir(tmp_path)
if os.path.exists(tmp_path):
global HAVE_TEMP_FOLDER
HAVE_TEMP_FOLDER = True
else:
print("Could not create temp folder. Aborting.")
exit(1)
mpi_data = {"tmp_path": tmp_path}
else:
mpi_data = None
if MPI is not None:
mpi_data = COMM.bcast(mpi_data, root=0)
self._temp_dir = mpi_data["tmp_path"]
return self._temp_dir
def __str__(self):
text = "\nPyRFQ object with {} cells and length {:.4f} m. Vane voltage = {} V\n".format(self._cell_nos[-1],
self._length,
self._voltage)
text += "Cells:\n"
for i, cell in enumerate(self._cells):
text += "Cell {}: ".format(i) + cell.__str__() + "\n"
return text
def set_bempp_parameter(self, keyword=None, value=None):
if keyword is None or value is None:
print("In 'set_bempp_parameter': Either keyword or value were not specified.")
return 1
if keyword not in self._variables_bempp.keys():
print("In 'set_bempp_parameter': Unrecognized keyword '{}'.".format(keyword))
return 1
self._variables_bempp[keyword] = value
return 0
def get_bempp_parameter(self, keyword=None):
if keyword is None:
print("In 'set_bempp_parameter': No keyword specified.")
return 1
if keyword not in self._variables_bempp.keys():
print("In 'set_bempp_parameter': Unrecognized keyword '{}'.".format(keyword))
return 1
return self._variables_bempp[keyword]
def set_geometry_parameter(self, keyword=None, value=None):
if keyword is None or value is None:
print("In 'set_geometry_parameter': Either keyword or value were not specified.")
return 1
if keyword not in self._variables_gmtry.keys():
print("In 'set_geometry_parameter': Unrecognized keyword '{}'.".format(keyword))
return 1
self._variables_gmtry[keyword] = value
return 0
def get_geometry_parameter(self, keyword=None):
if keyword is None:
print("In 'set_geometry_parameter': No keyword specified.")
return 1
if keyword not in self._variables_gmtry.keys():
print("In 'set_geometry_parameter': Unrecognized keyword '{}'.".format(keyword))
return 1
return self._variables_gmtry[keyword]
def add_cells_from_file(self, filename=None, ignore_rms=False):
"""
Reads a file with cell parameters and generates the respective RFQCell objects
:param filename:
:param ignore_rms: Bool. If True, any radial matching cells in the file are ignored.
:return:
"""
if filename is None:
if RANK == 0:
fd = FileDialog()
mpi_data = {"fn": fd.get_filename('open')}
else:
mpi_data = None
if MPI is not None:
mpi_data = COMM.bcast(mpi_data, root=0)
filename = mpi_data["fn"]
if filename is None:
return 1
with open(filename, "r") as infile:
if "Parmteqm" in infile.readline():
# Detected Parmteqm file
self.read_input_parmteq(filename, ignore_rms)
else:
# Assume only other case is VECC input file for now
self.read_input_vecc(filename, ignore_rms)
return 0
def add_cylinder(self):
for _elec_obj in self._elec_objects:
if "plate" in _elec_obj.name:
print("Cannot create cylinder if there are endplates already!")
return 1
print("Cylinder not yet implemented :(")
return 0
def add_endplates(self,
gap_sta,
gap_end,
thickness,
plate_dia,
voltage=0.0,
aperture_dia=0.0):
for _elec_obj in self._elec_objects:
if "cylinder" in _elec_obj.name:
print("Cannot create endplates if there is an outer cylinder already!")
return 1
# Delete all existing plate objects
self._elec_objects = [_elec_obj for _elec_obj in self._elec_objects if "plate" not in _elec_obj.name]
# Entrance Plate
zmin = 0.0 - gap_sta - thickness
plate_sta = PyRFQAnnulus(name="entrance_plate",
parent=self,
zmin=zmin,
zlen=thickness,
voltage=voltage,
debug=self._debug,
reverse_normals=self._variables_bempp["reverse_mesh"],
h=self._variables_bempp["grid_res"],
plate_dia=plate_dia,
aperture_dia=aperture_dia)
self._elec_objects.append(plate_sta)
# Exit Plate
zmin = self._length + gap_end
plate_sta = PyRFQAnnulus(name="exit_plate",
parent=self,
zmin=zmin,
zlen=thickness,
voltage=voltage,
debug=self._debug,
reverse_normals=self._variables_bempp["reverse_mesh"],
h=self._variables_bempp["grid_res"],
plate_dia=plate_dia,
aperture_dia=aperture_dia)
self._elec_objects.append(plate_sta)
return 0
def append_cell(self,
cell_type,
**kwargs):
assert cell_type in ["start", "rms", "regular",
"transition", "transition_auto", "drift", "trapezoidal"], \
"cell_type not recognized!"
if len(self._cells) > 0:
pc = self._cells[-1]
else:
pc = None
self.reset()
self._cells.append(PyRFQCell(cell_type=cell_type,
prev_cell=pc,
next_cell=None,
debug=self._debug,
**kwargs))
if len(self._cells) > 1:
self._cells[-2].set_next_cell(self._cells[-1])
self._cell_nos = range(len(self._cells))
self._length = np.sum([cell.length for cell in self._cells])
return 0
def read_input_parmteq(self, filename, ignore_rms):
with open(filename, "r") as infile:
# Some user feedback:
version = infile.readline().strip().split()[1].split(",")[0]
print("Loading cells from Parmteqm v{} output file...".format(version))
# Find begin of cell information
for line in infile:
if "Cell" in line and "V" in line:
break
for line in infile:
# Last line in cell data is repetition of header line
if "Cell" in line and "V" in line:
break
# Cell number is a string (has key sometimes)
items = line.strip().split()
cell_no = items[0]
params = [float(item) for item in items[1:]]
if len(items) == 10 and cell_no == "0":
# This is the start cell, only there to provide a starting aperture
if len(self._cells) == 0 and not ignore_rms:
# We use this only if there are no previous cells in the pyRFQ
# Else we ignore it...
self._cells.append(PyRFQCell(cell_type="start",
V=params[0] * 1000.0,
Wsyn=params[1],
Sig0T=params[2],
Sig0L=params[3],
A10=params[4],
Phi=params[5],
a=params[6] * 0.01,
B=params[8],
debug=self._debug))
continue
# For now we ignore "special" cells and add them manually
if "T" in cell_no or "M" in cell_no or "F" in cell_no:
print("Ignored cell {}".format(cell_no))
continue
if "R" in cell_no:
cell_type = "rms"
if ignore_rms:
print("Ignored cell {}".format(cell_no))
continue
else:
cell_type = "regular"
if len(self._cells) > 0:
pc = self._cells[-1]
else:
pc = None
if cell_type == "rms":
self._cells.append(PyRFQCell(cell_type=cell_type,
V=params[0] * 1000.0,
Wsyn=params[1],
Sig0T=params[2],
Sig0L=params[3],
A10=params[4],
Phi=params[5],
a=params[6] * 0.01,
m=params[7],
B=params[8],
L=params[9] * 0.01,
prev_cell=pc,
debug=self._debug))
else:
self._cells.append(PyRFQCell(cell_type=cell_type,
V=params[0] * 1000.0,
Wsyn=params[1],
Sig0T=params[2],
Sig0L=params[3],
A10=params[4],
Phi=params[5],
a=params[6] * 0.01,
m=params[7],
B=params[8],
L=params[9] * 0.01,
A0=params[11],
RFdef=params[12],
Oct=params[13],
A1=params[14],
prev_cell=pc,
debug=self._debug))
if len(self._cells) > 1:
self._cells[-2].set_next_cell(self._cells[-1])
self._cell_nos = range(len(self._cells))
self._length = np.sum([cell.length for cell in self._cells])
return 0
def read_input_vecc(self, filename, ignore_rms):
print("Loading from VECC files is currently not supported (function needs to be mofernized)!")
exit(1)
with open(filename, "r") as infile:
for line in infile:
params = [float(item) for item in line.strip().split()]
if params[4] == 1.0:
cell_type = "rms"
if ignore_rms:
continue
else:
cell_type = "regular"
if len(self._cells) > 0:
pc = self._cells[-1]
else:
pc = None
self._cells.append(PyRFQCell(cell_type=cell_type,
aperture=params[3],
modulation=params[4],
length=params[6],
flip_z=False,
shift_cell_no=False,
prev_cell=pc,
next_cell=None))
if len(self._cells) > 1:
self._cells[-2].set_next_cell(self._cells[-1])
self._cell_nos = range(len(self._cells))
self._length = np.sum([cell.length for cell in self._cells])
return 0
def calculate_efield(self):
assert self._variables_bempp["ef_phi"] is not None, \
"Please calculate the potential first!"
# TODO: Replace gradient with something that accepts mask
_d = self._variables_bempp["d"]
phi_masked = np.ma.masked_array(self._variables_bempp["ef_phi"],
mask=self._variables_bempp["ef_mask"])
ex, ey, ez = np.gradient(phi_masked,
_d[X], _d[Y], _d[Z])
if RANK == 0:
_field = Field("RFQ E-Field",
dim=3,
field={"x": RegularGridInterpolator(points=_r, values=-ex,
bounds_error=False, fill_value=0.0),
"y": RegularGridInterpolator(points=_r, values=-ey,
bounds_error=False, fill_value=0.0),
"z": RegularGridInterpolator(points=_r, values=-ez,
bounds_error=False, fill_value=0.0)
})
mpi_data = {"efield": _field}
else:
mpi_data = None
mpi_data = COMM.bcast(mpi_data, root=0)
self._variables_bempp["ef_itp"] = mpi_data["efield"]
return 0
def calculate_potential(self,
limits=((None, None), (None, None), (None, None)),
res=(0.002, 0.002, 0.002),
domain_decomp=(4, 4, 4),
overlap=0):
"""
Calculates the E-Field from the BEMPP solution using the user defined cube or
the cube corresponding to the cyclindrical outer boundary.
TODO: This function is not very MPI aware and could be optimized!
TODO: BEMPP uses all available processors on the node to calculate the potential.
TODO: But if we run on multiple nodes, we could partition the domains.
:param limits: tuple, list or np.ndarray of shape (3, 2)
containing xmin, xmax, ymin, ymax, zmin, zmax
use None to use the individual limit from the electrode system.
:param res: resolution of the 3D mesh
:param domain_decomp: how many subdomains to use for calculation in the three directions x, y, z
Note: it can significantly increase computation speed to use more subdomains,
up to a point...
:param overlap: overlap of the subdomains in cell numbers, does not have effect at the moment.
Note: There is a minimum overlap of one cell at overlap = 0
:return:
"""
limits = np.array(limits)
if limits.shape != (3, 2):
print("Wrong shape of limits: {}. "
"Must be ((xmin, xmax), (ymin, ymax), (zmin, zmax)) = (3, 2).".format(limits.shape))
return 1
_mesh_data = self._full_mesh
_n_data = self._variables_bempp["n_fun_coeff"]
# _d_data = self._variables_bempp["d_fun_coeff"]
assert _mesh_data is not None and _n_data is not None, \
"One of grid, dirichlet_function, neumann_function is None!"
_ts = time.time()
self.message("Re-Generating Grid, GridFunctions, and FunctionSpace")
_mesh = bempp.api.grid.grid_from_element_data(_mesh_data["verts"], _mesh_data["elems"], _mesh_data["domns"])
dp0_space = bempp.api.function_space(_mesh, "DP", 0)
n_fun = bempp.api.GridFunction(dp0_space, coefficients=_n_data)
# d_fun = bempp.api.GridFunction(dp0_space, coefficients=_d_data)
# dp0_space = self._variables_bempp["dp0_space"]
# p1_space = self._variables_bempp["p1_space"]
self.message("Re-Generating took {}".format(time.strftime('%H:%M:%S', time.gmtime(int(time.time() - _ts)))))
# noinspection PyUnresolvedReferences
all_vert = self._full_mesh["verts"]
# get limits from electrodes
limits_elec = np.array([[np.min(all_vert[i, :]), np.max(all_vert[i, :])] for i in XYZ])
# replace None limits with electrode limits
limits[np.where(limits is None)] = limits_elec[np.where(limits is None)]
res = np.array([res]).ravel()
_n = np.array(np.round((limits[:, 1] - limits[:, 0]) / res, 10), int) + 1
# Recalculate resolution to match integer n's
_d = (limits[:, 1] - limits[:, 0]) / (_n - 1)
# Generate a full mesh to be indexed later
_r = np.array([np.linspace(limits[i, 0], limits[i, 1], _n[i]) for i in XYZ])
mesh = np.meshgrid(_r[X], _r[Y], _r[Z], indexing='ij') # type: np.ndarray
# Initialize potential array
pot = np.zeros(mesh[0].shape)
# Index borders (can be float)
borders = np.array([np.linspace(0, _n[i], domain_decomp[i] + 1) for i in XYZ])
# Indices (must be int)
# note: rounding will likely lead to domains that are off in size by one index, but that's fine
start_idxs = np.array([np.array(borders[i][:-1], int) - overlap for i in XYZ])
end_idxs = np.array([np.array(borders[i][1:], int) + overlap for i in XYZ])
for i in XYZ:
start_idxs[i][0] = 0
end_idxs[i][-1] = int(borders[i][-1])
# Print out domain information
if RANK == 0 and self._debug:
print("Potential Calculation. "
"Grid spacings: ({:.4f}, {:.4f}, {:.4f}), number of meshes: {}".format(_d[0], _d[1], _d[2], _n))
print("Number of Subdomains: {}, "
"Domain decomposition {}:".format(np.product(domain_decomp), domain_decomp))
for i, dirs in enumerate(["x", "y", "z"]):
print("{}: Indices {} to {}".format(dirs, start_idxs[i], end_idxs[i] - 1))
# Calculate mask (True if inside/on surface of an electrode)
all_grid_pts = np.vstack([_mesh.ravel() for _mesh in mesh]).T
mymask = np.zeros(all_grid_pts.shape[0], dtype=bool)
_ts = time.time()
if RANK == 0:
print("\n*** Calculating mask for {} points ***".format(all_grid_pts.shape[0]))
for _elec_object in self._elec_objects:
self.message("[{}] Working on electrode object {}".format(
time.strftime('%H:%M:%S', time.gmtime(int(time.time() - _ts))), _elec_object.name))
mymask = mymask | _elec_object.points_inside(all_grid_pts)
# Number of masked points
n_masked = np.where(mymask is True)[0].shape[0]
# Reshape mask to match original mesh
mymask = mymask.T.reshape(mesh[0].shape)
self.message("Generating mask took {}".format(time.strftime('%H:%M:%S', time.gmtime(int(time.time() - _ts)))))
self.message("\n*** Calculating potential for {} points ***".format(all_grid_pts.shape[0] - n_masked))
_ts = time.time()
# Iterate over all the dimensions, calculate the subset of potential
if RANK == 0:
domain_idx = 1
for x1, x2 in zip(start_idxs[X], end_idxs[X]):
for y1, y2 in zip(start_idxs[Y], end_idxs[Y]):
for z1, z2 in zip(start_idxs[Z], end_idxs[Z]):
# Create mask subset for this set of points and only calculate those
local_mask = mymask[x1:x2, y1:y2, z1:z2].ravel()
grid_pts = np.vstack([_mesh[x1:x2, y1:y2, z1:z2].ravel() for _mesh in mesh])
grid_pts_len = grid_pts.shape[1] # save shape for later
grid_pts = grid_pts[:, ~local_mask] # reduce for faster calculation
self.message(
"[{}] Domain {}/{}, Index Limits: x = ({}, {}), y = ({}, {}), z = ({}, {})".format(
time.strftime('%H:%M:%S', time.gmtime(int(time.time() - _ts))), domain_idx,
np.product(domain_decomp), x1, x2 - 1, y1, y2 - 1, z1, z2 - 1))
self.message("Removed {} points due to mask".format(grid_pts_len - grid_pts.shape[1]))
temp_pot = bempp.api.operators.potential.laplace.single_layer(dp0_space, grid_pts) * n_fun
# Create array of original shape and fill with result at right place,
# then move into master array
_pot = np.zeros(grid_pts_len)
_pot[~local_mask] = temp_pot[0]
pot[x1:x2, y1:y2, z1:z2] = _pot.reshape([x2 - x1, y2 - y1, z2 - z1])
domain_idx += 1
try:
del grid_pts
del _pot
del temp_pot
except Exception as _e:
print("Exception {} happened, but trying to carry on...".format(_e))
# TODO: Distribute results to other nodes -DW
self._variables_bempp["ef_phi"] = pot
self._variables_bempp["ef_mask"] = mymask
self._variables_bempp["d"] = _d
self._variables_bempp["n"] = _n
self._variables_bempp["limits"] = limits
return 0
def plot_combo(self, xypos=0.000, xyscale=1.0, zlim=None):
assert self._variables_bempp["ef_itp"] is not None, "No E-Field calculated yet!"
numpts = 5000
if zlim is None:
zmin = np.min(self._variables_bempp["rf_itp"]._field["z"].grid[2]) # TODO: Field() should have limits
zmax = np.min(self._variables_bempp["rf_itp"]._field["z"].grid[2])
else:
zmin, zmax = zlim
# Bz of z at x = y = 0
x = np.zeros(numpts)
y = np.zeros(numpts)
z = np.linspace(zmin, zmax, numpts)
points = np.vstack([x, y, z]).T
_, _, ez = self._variables_bempp["ef_itp"](points)
plt.plot(z, ez, color=colors[0], label="$E_z$")
# Bx of z at x = 0.005, y = 0
x = np.ones(numpts) * xypos
points = np.vstack([x, y, z]).T
ex, _, _ = self._variables_bempp["ef_itp"](points)
plt.plot(z, xyscale * ex,
color=colors[1],
label="$\mathrm{{E}}_\mathrm{{x}}$ at x = {} m".format(xypos))
# By of z at x = 0.0, y = 0.005
x = np.zeros(numpts)
y = np.ones(numpts) * xypos
points = np.vstack([x, y, z]).T
_, ey, _ = self._variables_bempp["ef_itp"](points)
plt.plot(z, xyscale * ey,
color=colors[2],
label="$\mathrm{{E}}_\mathrm{{y}}$ at y = {} m".format(xypos))
plt.xlabel("z (m)")
plt.ylabel("Field (V/m)")
plt.legend(loc=2)
plt.show()
def get_phi(self):
return {"phi": self._variables_bempp["ef_phi"],
"mask": self._variables_bempp["ef_mask"],
"limits": self._variables_bempp["limits"],
"d": self._variables_bempp["d"],
"n": self._variables_bempp["n"]}
def solve_bempp(self):
if self._full_mesh is None:
print("No mesh generated, trying now...")
mesh = self.generate_full_mesh()
else:
mesh = bempp.api.grid.grid_from_element_data(self._full_mesh["verts"],
self._full_mesh["elems"],
self._full_mesh["domns"])
dp0_space = bempp.api.function_space(mesh, "DP", 0)
domain_mapping = {}
for _elec_obj in self._elec_objects:
domain_mapping[_elec_obj.domain_idx] = _elec_obj.voltage
def f(*args):
domain_index = args[2]
result = args[3]
result[0] = domain_mapping[domain_index]
dirichlet_fun = bempp.api.GridFunction(dp0_space, fun=f)
self._variables_bempp["d_fun_coeff"] = dirichlet_fun.coefficients
if self._debug and RANK == 0:
dirichlet_fun.plot()
# Solve BEMPP problem only on 1 cpu (has internal multiprocessing)
if RANK == 0:
slp = bempp.api.operators.boundary.laplace.single_layer(dp0_space, dp0_space, dp0_space)
neumann_fun, info = bempp.api.linalg.gmres(slp, dirichlet_fun, tol=1e-6, use_strong_form=True)
mpi_data = {"n_coeff": neumann_fun.coefficients,
"info": info}
else:
mpi_data = None
mpi_data = COMM.bcast(mpi_data, root=0)
COMM.barrier()
self._variables_bempp["n_fun_coeff"] = mpi_data["n_coeff"]
return 0
def initialize(self):
assert len(self._cells) > 0, "No cells have been added, no vanes can be generated."
# Delete all existing vane objects
self._elec_objects = [_elec_obj for _elec_obj in self._elec_objects if "vane" not in _elec_obj.name]
# There are four vanes (rods) in the RFQ
# x = horizontal, y = vertical, with p, m denoting positive and negative axis directions
# but they are symmetric, so we createonly two and copy them later
local_vanes = [PyRFQVane(parent=self,
vane_type="yp",
cells=self._cells,
voltage=self._voltage * 0.5, # Given voltage is 'inter-vane'
occ_tolerance=self._occ_tolerance,
occ_npart=15,
h=self._variables_bempp["grid_res"],
debug=self._debug),
PyRFQVane(parent=self,
vane_type="xp",
cells=self._cells,
voltage=-self._voltage * 0.5, # Given voltage is 'inter-vane'
occ_tolerance=self._occ_tolerance,
occ_npart=15,
h=self._variables_bempp["grid_res"],
debug=self._debug)]
for _vane in local_vanes:
_vane.set_mesh_parameter("r_tip", self.get_geometry_parameter("vane_radius"))
_vane.set_mesh_parameter("h_type", self.get_geometry_parameter("vane_height_type"))
_vane.set_mesh_parameter("h_block", self.get_geometry_parameter("vane_height"))
_vane.set_mesh_parameter("refine_steps", self.get_bempp_parameter("refine_steps"))
_vane.set_mesh_parameter("reverse_mesh", self.get_bempp_parameter("reverse_mesh"))
_vane.set_mesh_parameter("nz", self.get_geometry_parameter("nz"))
# Generate the two vanes in parallel:
if MPI is None or SIZE == 1:
if USE_MULTIPROC:
p = Pool(2)
local_vanes = p.map(self._worker_generate_vane_profile, local_vanes)
else:
for i, _vane in enumerate(local_vanes):
local_vanes[i] = self._worker_generate_vane_profile(_vane)
else:
if RANK == 0:
self.message("Proc {} working on vane {}".format(RANK, local_vanes[0].vane_type), rank=RANK)
_vane = self._worker_generate_vane_profile(local_vanes[0])
mpi_data = {"vanes": [_vane, COMM.recv(source=1)]}
elif RANK == 1:
self.message("Proc {} working on vane {}".format(RANK, local_vanes[1].vane_type), rank=RANK)
_vane = self._worker_generate_vane_profile(local_vanes[1])
COMM.send(_vane, dest=0)
mpi_data = None
else:
if self._debug:
self.message("Proc {} idle.".format(RANK), rank=RANK)
mpi_data = None
mpi_data = COMM.bcast(mpi_data, root=0)
local_vanes = mpi_data["vanes"]
# --- Now make copies, set vane_type
self.message("Copying vanes...")
for i, vane_type in enumerate(["ym", "xm"]):
new_vane = copy.deepcopy(local_vanes[i]) # First one is y direction
new_vane.set_vane_type(vane_type)
local_vanes.append(new_vane)
for _vane in local_vanes:
self._elec_objects.append(_vane)
self._initialized = True
COMM.barrier() # TODO: Necessary?
return 0
def generate_full_mesh(self):
assert HAVE_BEMPP, "Can only create the full mesh with BEMPP at the moment!"
# TODO: Better assertion/init/generate
# assert self._vanes is not None, "No vanes generated yet, cannot mesh..."
assert len(self._elec_objects) != 0, "Need at least one electrode object to generate full mesh!"
# Initialize empty arrays of the correct shape (3 x n)
vertices = np.zeros([3, 0])
elements = np.zeros([3, 0])
vertex_counter = 0
domains = np.zeros([0], int)
# For now, do this only on the first node
if RANK == 0:
# Trying to do this with gmsh, but running into trouble with identical surface numbers
# self._full_mesh_fn = os.path.join(self._temp_dir, "full_rfq.msh")
# command = [GMSH_EXE, '-merge']
#
# for _elec in self._elec_objects:
# command.append(_elec.mesh_fn)
#
# command.append('-o')
# command.append(self._full_mesh_fn)
# command.append('-format')
# command.append('msh2')
# command.append('-save')
#
# output = subprocess.run(command)
#
# if self._debug:
# print(output)
for _elec in self._elec_objects:
mesh = bempp.api.import_grid(_elec.mesh_fn)
_vertices = mesh.leaf_view.vertices
_elements = mesh.leaf_view.elements
_domain_ids = mesh.leaf_view.domain_indices
vertices = np.concatenate((vertices, _vertices), axis=1)
elements = np.concatenate((elements, _elements + vertex_counter), axis=1)
domains = np.concatenate((domains, _domain_ids), axis=0)
# Increase the running counters
vertex_counter += _vertices.shape[1]
self._full_mesh = {"verts": vertices,
"elems": elements,
"domns": domains}
if self._debug:
bempp.api.grid.grid_from_element_data(vertices,
elements,
domains).plot()
# Broadcast results to all nodes
self._full_mesh = COMM.bcast(self._full_mesh, root=0)
COMM.barrier()
return self._full_mesh
def generate_geo_str(self):
# Check if RFQ has been initialized
if not self._initialized:
print("RFQ needs to be initialized, attempting now...")
# if MPI is None or SIZE == 1:
if RANK == 0:
# no mpi or single core: use python multiprocessing and at least have threads for speedup
if USE_MULTIPROC:
p = Pool()
_elec_objects = p.map(self._worker_generate_geo_str, self._elec_objects)
else:
_elec_objects = []
for i, _elec_obj in enumerate(self._elec_objects):
_elec_objects.append(self._worker_generate_geo_str(_elec_obj))
mpi_data = {"elecs": _elec_objects}
else:
mpi_data = None
self._elec_objects = COMM.bcast(mpi_data, root=0)["elecs"]
COMM.barrier()
# TODO: MPI-ify this?
# elif SIZE >= 4:
# # We have 4 or more procs and can use a single processor per vane
#
# if RANK <= 3:
# # Generate on proc 0-3
# print("Proc {} working on electrode {}.".format(RANK + 1, self._elec_objects[RANK].name))
# sys.stdout.flush()
# _vane = self._worker_generate_geo_str(self._elec_objects[RANK])
#
# if RANK == 0:
# # Assemble on proc 0
# mpi_data = {"vanes": [_vane,
# COMM.recv(source=1),
# COMM.recv(source=2),
# COMM.recv(source=3)]}
# else:
# COMM.send(_vane, dest=0)
# mpi_data = None
#
# else:
# print("Proc {} idle.".format(RANK + 1))
# sys.stdout.flush()
# mpi_data = None
#
# # Distribute
# self._elec_objects = COMM.bcast(mpi_data, root=0)["vanes"]
# COMM.barrier()
# else:
# # We have 2 or 3 procs, so do two vanes each on proc 0 and proc 1
# if RANK <= 1:
# # Generate on proc 0, 1
# print("Proc {} working on vanes {} and {}.".format(RANK + 1,
# self._elec_objects[RANK].vane_type,
# self._elec_objects[RANK + 2].vane_type))
# sys.stdout.flush()
# local_vanes = [self._worker_generate_geo_str(self._elec_objects[RANK]),
# self._worker_generate_geo_str(self._elec_objects[RANK + 2])]
#
# if RANK == 0:
# # Assemble on proc 0
# other_vanes = COMM.recv(source=1)
# mpi_data = {"vanes": [local_vanes[0],
# other_vanes[0],
# local_vanes[1],
# other_vanes[1]]}
# else:
# COMM.send(local_vanes, dest=0)
# mpi_data = None
#
# else:
# print("Proc {} idle.".format(RANK + 1))
# sys.stdout.flush()
# mpi_data = None
#
# # Distribute
# self._elec_objects = COMM.bcast(mpi_data, root=0)["vanes"]
# COMM.barrier()
self._have_geo_str = True
return 0
def generate_gmsh_files(self):
if not HAVE_GMSH:
print("Gmsh could not be found (check path?) not creating msh files and brep.")
return 1
# Check for existing geo string
if not self._have_geo_str:
print("No geo string has been generated, attemting to do so now...")
self.generate_geo_str()
if RANK == 0:
print("Generating gmsh files of the electrodes.")
sys.stdout.flush()
# no mpi or single core: use python multiprocessing and at least have threads for speedup
if USE_MULTIPROC:
p = Pool()
_elec_objects = p.map(self._worker_generate_gmsh_files, self._elec_objects)
else:
_elec_objects = []
for i, _elec_obj in enumerate(self._elec_objects):
_elec_objects.append(self._worker_generate_gmsh_files(_elec_obj))
mpi_data = {"elecs": _elec_objects}
else:
mpi_data = None
self._elec_objects = COMM.bcast(mpi_data, root=0)["elecs"]
COMM.barrier()
# if MPI is None or SIZE == 1:
# # no mpi or single core: use python multiprocessing and at least have threads for speedup
# if USE_MULTIPROC:
# p = Pool()
# self._elec_objects = p.map(self._worker_generate_gmsh_files, self._elec_objects)
# else:
# for i, _vane in enumerate(self._elec_objects):
# self._elec_objects[i] = self._worker_generate_gmsh_files(_vane)
#
# elif SIZE >= 4:
# # We have 4 or more procs and can use a single processor per vane
#
# if RANK <= 3:
# # Generate on proc 0-3
# print("Proc {} working on vane {}.".format(RANK + 1, self._elec_objects[RANK].vane_type))
# sys.stdout.flush()
# _vane = self._worker_generate_gmsh_files(self._elec_objects[RANK])
#
# if RANK == 0:
# # Assemble on proc 0
# mpi_data = {"vanes": [_vane,
# COMM.recv(source=1),
# COMM.recv(source=2),
# COMM.recv(source=3)]}
# else:
# COMM.send(_vane, dest=0)
# mpi_data = None
#
# else:
# print("Proc {} idle.".format(RANK + 1))
# sys.stdout.flush()
# mpi_data = None
#
# # Distribute
# self._elec_objects = COMM.bcast(mpi_data, root=0)["vanes"]
# COMM.barrier()
#
# else:
# # We have 2 or 3 procs, so do two vanes each on proc 0 and proc 1
# if RANK <= 1:
# # Generate on proc 0, 1
# print("Proc {} working on vanes {} and {}.".format(RANK + 1,
# self._elec_objects[RANK].vane_type,
# self._elec_objects[RANK + 2].vane_type))
# sys.stdout.flush()
# local_vanes = [self._worker_generate_gmsh_files(self._elec_objects[RANK]),
# self._worker_generate_gmsh_files(self._elec_objects[RANK + 2])]
#
# if RANK == 0:
# # Assemble on proc 0
# other_vanes = COMM.recv(source=1)
# mpi_data = {"vanes": [local_vanes[0],
# other_vanes[0],
# local_vanes[1],
# other_vanes[1]]}
# else:
# COMM.send(local_vanes, dest=0)
# mpi_data = None
#
# else:
# print("Proc {} idle.".format(RANK + 1))
# sys.stdout.flush()
# mpi_data = None
#
# # Distribute
# self._elec_objects = COMM.bcast(mpi_data, root=0)["vanes"]
# COMM.barrier()
return 0
def generate_occ(self):
# Unfortunately, multiprocessing/MPI can't handle SwigPyObject objects
for _elec_object in self._elec_objects:
_elec_object.generate_occ()
return 0
def plot_vane_profile(self):
_vanes = [_elec_obj for _elec_obj in self._elec_objects if "vane" in _elec_obj.name]
assert len(_vanes) != 0, "No vanes calculated yet!"
_fig, _ax = plt.subplots()
for vane in _vanes:
if vane.vane_type == "xp":
z, x = vane.get_profile(nz=10000)
_ax.plot(z, x, color=colors[0], label="x-profile")
print("X Vane starting point", z[0], x[0])
if vane.vane_type == "yp":
z, y = vane.get_profile(nz=10000)
_ax.plot(z, -y, color=colors[1], label="y-profile")
print("Y Vane starting point", z[0], y[0])
plt.xlabel("z (m)")
plt.ylabel("x/y (m)")
plt.legend(loc=1)
plt.show()
def print_cells(self):
for number, cell in enumerate(self._cells):
print("RFQ Cell {}: ".format(number + 1), cell)
return 0
@staticmethod
def message(*args, rank=0):
if RANK == rank:
print(*args)
sys.stdout.flush()
return 0
def reset(self):
self._vanes = []
self._elec_objects = []
self._full_mesh = None
self._have_geo_str = False
self._have_occ_obj = False
self._have_bem_obj = False
self._initialized = False
return 0
def save_to_file(self, filename=None):
# if RANK == 0:
# if filename is None:
# filename = FileDialog().get_filename(action="save")
for key, item in self.__dict__.items():
print(key, ":", item)
# with open(filename, "wb") as of:
# pass
return 0
@staticmethod
def _worker_generate_gmsh_files(electrode):
electrode.generate_gmsh_files()
return electrode
@staticmethod
def _worker_generate_geo_str(electrode):
electrode.generate_geo_str()
return electrode
def _worker_generate_vane_profile(self, vane):
vane.calculate_profile(fudge=self._fudge_vanes)
return vane
def write_inventor_macro(self,
save_folder=None,
**kwargs):
"""
This function writes out the vane profiles for X and Y and Inventor VBA macros that can
be run immediately to generate 3D solid models in Autodesk Inventor (c).
kwargs:
vane_type: one of 'rod', 'vane', default is 'vane' TODO: Only 'vane' implemented as of now.
vane_radius: radius of curvature of circular vanes, default is 0.005 m TODO: add hyperbolic vanes
vane_height: height of a single vane either from the minimum point (vane_height_type = 'relative')
or from the center of the RFQ (vane_height_type = 'absolute')
default is 0.05 m
vane_height_type: see above. default is absolute
nz: number of points to use for spline in z direction. default is 500.
:param save_folder: If None, a prompt is opened
:return:
"""
# TODO: assert that height and absolute/relative combination work out geometrically
# TODO: with the amplitude ofthe modulations (i.e. no degenerate geometry)
for key, value in kwargs.items():
assert key in self._variables_gmtry.keys(), "write_inventor_macro: Unrecognized kwarg '{}'".format(key)
self._variables_gmtry[key] = value
assert self._variables_gmtry["vane_type"] != "rod", "vane_type == 'rod' not implemented yet. Aborting"
if save_folder is None:
fd = FileDialog()
save_folder, _ = fd.get_filename('folder')
if save_folder is None:
return 0
for direction in ["X", "Y"]:
# Generate text for Inventor macro
header_text = """Sub CreateRFQElectrode{}()
Dim oApp As Application
Set oApp = ThisApplication
' Get a reference to the TransientGeometry object.
Dim tg As TransientGeometry
Set tg = oApp.TransientGeometry
Dim oPart As PartDocument
Dim oCompDef As PartComponentDefinition
Dim oSketch3D As Sketch3D
Dim oSpline As SketchSpline3D
Dim vertexCollection1 As ObjectCollection
""".format(direction)
electrode_text = """
Set oPart = oApp.Documents.Add(kPartDocumentObject, , True)
Set oCompDef = oPart.ComponentDefinition
Set oSketch3D = oCompDef.Sketches3D.Add
Set vertexCollection1 = oApp.TransientObjects.CreateObjectCollection(Null)
FileName = "{}"
fileNo = FreeFile 'Get first free file number
Dim minHeight As Double
minHeight = 10000 'cm, large number
Open FileName For Input As #fileNo
Do While Not EOF(fileNo)
Dim strLine As String
Line Input #1, strLine
strLine = Trim$(strLine)
If strLine <> "" Then
' Break the line up, using commas as the delimiter.
Dim astrPieces() As String
astrPieces = Split(strLine, ",")
End If
Call vertexCollection1.Add(tg.CreatePoint(astrPieces(0), astrPieces(1), astrPieces(2)))
' For X vane this is idx 0, for y vane it is idx 1
If CDbl(astrPieces({})) < minHeight Then
minHeight = CDbl(astrPieces({}))
End If
Loop
Close #fileNo
Set oSpline = oSketch3D.SketchSplines3D.Add(vertexCollection1)
""".format(os.path.join(save_folder, "Vane_{}.txt".format(direction)), AXES[direction], AXES[direction])
sweep_text = """
' Now make a sketch to be swept
' Start with a work plane
Dim oWP As WorkPlane
Set oWP = oCompDef.WorkPlanes.AddByNormalToCurve(oSpline, oSpline.StartSketchPoint)
' Add a 2D sketch
Dim oSketch2D As PlanarSketch
Set oSketch2D = oCompDef.Sketches.Add(oWP)
"""
if direction == "X":
sweep_text += """
' Make sure the orientation of the sketch is correct
' We want the sketch x axis oriented with the lab y axis for X vane
oSketch2D.AxisEntity = oCompDef.WorkAxes.Item(2)
"""
else:
sweep_text += """
' Make sure the orientation of the sketch is correct
' We want the sketch x axis oriented with the lab y axis for X vane
oSketch2D.AxisEntity = oCompDef.WorkAxes.Item(1)
' Also, we need to flip the axis for Y vanes
oSketch2D.NaturalAxisDirection = False
"""
sweep_text += """
' Draw the half circle and block
Dim radius As Double
Dim height As Double
radius = {} 'cm
height = {} 'cm
Dim oOrigin As SketchEntity
Set oOrigin = oSketch2D.AddByProjectingEntity(oSpline.StartSketchPoint)
""".format(self._variables_gmtry["vane_radius"] * 100.0,
self._variables_gmtry["vane_height"] * 100.0)
sweep_text += """
Dim oCenter As Point2d
Set oCenter = tg.CreatePoint2d(oOrigin.Geometry.X, oOrigin.Geometry.Y - radius)
Dim oCirc1 As Point2d
Set oCirc1 = tg.CreatePoint2d(oOrigin.Geometry.X - radius, oOrigin.Geometry.Y - radius)
Dim oCirc2 As Point2d
Set oCirc2 = tg.CreatePoint2d(oOrigin.Geometry.X + radius, oOrigin.Geometry.Y - radius)
Dim arc As SketchArc
Set arc = oSketch2D.SketchArcs.AddByThreePoints(oCirc1, oOrigin.Geometry, oCirc2)
"""
sweep_text += """
Dim line1 As SketchLine
Set line1 = oSketch2D.SketchLines.AddByTwoPoints(arc.EndSketchPoint, arc.StartSketchPoint)
' Create a Path
Dim oPath As Path
Set oPath = oCompDef.Features.CreatePath(oSpline)
' Create a profile.
Dim oProfile As Profile
Set oProfile = oSketch2D.Profiles.AddForSolid
' Create the sweep feature.
Dim oSweep As SweepFeature
' Note: I am not sure if keeping the profile perpendicular to the path is more accurate,
' but unfortunately for trapezoidal cells (small fillets) it doesn't work
' so it has to be a 'parallel to original profile' kinda sweep -- or not? , kParallelToOriginalProfile
Set oSweep = oCompDef.Features.SweepFeatures.AddUsingPath(oProfile, oPath, kJoinOperation)
"""
# Small modification depending on absolute or relative vane height:
if self._variables_gmtry["vane_height_type"] == 'relative':
sweep_text += """
' Create another work plane above the vane
Dim oWP2 As WorkPlane
Set oWP2 = oCompDef.WorkPlanes.AddByPlaneAndOffset(oCompDef.WorkPlanes.Item({}), minHeight + height)
""".format(AXES[direction] + 1) # X is 0 and Y is 1, but the correct plane indices are 1 and 2
else:
sweep_text += """
' Create another work plane above the vane
Dim oWP2 As WorkPlane
Set oWP2 = oCompDef.WorkPlanes.AddByPlaneAndOffset(oCompDef.WorkPlanes.Item({}), height)
""".format(AXES[direction] + 1) # X is 0 and Y is 1, but the correct plane indices are 1 and 2
sweep_text += """
' Start a sketch
Set oSketch2D = oCompDef.Sketches.Add(oWP2)
' Project the bottom face of the sweep
' (start and end face might be tilted and contribute)
' at this point I don't know how Inventor orders the faces, 2 is my best guess but
' might be different occasionally... -DW
Dim oEdge As Edge
For Each oEdge In oSweep.SideFaces.Item(2).Edges
Call oSketch2D.AddByProjectingEntity(oEdge)
Next
' Create a profile.
Set oProfile = oSketch2D.Profiles.AddForSolid
' Extrude
Dim oExtDef As ExtrudeDefinition
Dim oExt As ExtrudeFeature
Set oExtDef = oCompDef.Features.ExtrudeFeatures.CreateExtrudeDefinition(oProfile, kJoinOperation)
Call oExtDef.SetToNextExtent(kNegativeExtentDirection, oSweep.SurfaceBody)
Set oExt = oCompDef.Features.ExtrudeFeatures.Add(oExtDef)
' Repeat but cutting in the up-direction
' Extrude
Set oExtDef = oCompDef.Features.ExtrudeFeatures.CreateExtrudeDefinition(oProfile, kCutOperation)
Call oExtDef.SetThroughAllExtent(kPositiveExtentDirection)
Set oExt = oCompDef.Features.ExtrudeFeatures.Add(oExtDef)
"""
footer_text = """
oPart.UnitsOfMeasure.LengthUnits = kMillimeterLengthUnits
ThisApplication.ActiveView.Fit
End Sub
"""
# Write the Autodesk Inventor VBA macros:
with open(os.path.join(save_folder, "Vane_{}.ivb".format(direction)), "w") as outfile:
outfile.write(header_text + electrode_text + sweep_text + footer_text)
# Write the vane profile files:
with open(os.path.join(save_folder, "Vane_{}.txt".format(direction)), "w") as outfile:
if direction == "X":
for vane in self._vanes:
if vane.vane_type == "xp":
z, x = vane.get_profile(nz=self._variables_gmtry["nz"])
min_x = np.min(x)
max_x = np.max(x)
z_start = np.min(z)
z_end = np.max(z)
for _x, _z in zip(x, z):
outfile.write("{:.6f}, {:.6f}, {:.6f}\r\n".format(
_x * 100.0, # For some weird reason Inventor uses cm as default...
0.0,
_z * 100.0))
else:
for vane in self._vanes:
if vane.vane_type == "yp":
z, y = vane.get_profile(nz=self._variables_gmtry["nz"])
min_y = np.min(y)
max_y = | np.max(y) | numpy.max |
"""
Tests to make sure deepchem models can overfit on tiny datasets.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
__author__ = "<NAME>"
__copyright__ = "Copyright 2016, Stanford University"
__license__ = "MIT"
import os
import tempfile
import numpy as np
import unittest
import sklearn
import shutil
import tensorflow as tf
import deepchem as dc
import scipy.io
from tensorflow.python.framework import test_util
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import RandomForestRegressor
from flaky import flaky
class TestOverfit(test_util.TensorFlowTestCase):
"""
Test that models can overfit simple datasets.
"""
def setUp(self):
super(TestOverfit, self).setUp()
self.current_dir = os.path.dirname(os.path.abspath(__file__))
def test_sklearn_regression_overfit(self):
"""Test that sklearn models can overfit simple regression datasets."""
n_samples = 10
n_features = 3
n_tasks = 1
# Generate dummy dataset
np.random.seed(123)
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features)
y = np.random.rand(n_samples, n_tasks)
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
regression_metric = dc.metrics.Metric(dc.metrics.r2_score)
sklearn_model = RandomForestRegressor()
model = dc.models.SklearnModel(sklearn_model)
# Fit trained model
model.fit(dataset)
model.save()
# Eval model on train
scores = model.evaluate(dataset, [regression_metric])
assert scores[regression_metric.name] > .7
def test_sklearn_classification_overfit(self):
"""Test that sklearn models can overfit simple classification datasets."""
n_samples = 10
n_features = 3
n_tasks = 1
# Generate dummy dataset
np.random.seed(123)
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features)
y = np.random.randint(2, size=(n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
classification_metric = dc.metrics.Metric(dc.metrics.roc_auc_score)
sklearn_model = RandomForestClassifier()
model = dc.models.SklearnModel(sklearn_model)
# Fit trained model
model.fit(dataset)
model.save()
# Eval model on train
scores = model.evaluate(dataset, [classification_metric])
assert scores[classification_metric.name] > .9
def test_sklearn_skewed_classification_overfit(self):
"""Test sklearn models can overfit 0/1 datasets with few actives."""
n_samples = 100
n_features = 3
n_tasks = 1
# Generate dummy dataset
np.random.seed(123)
p = .05
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features)
y = np.random.binomial(1, p, size=(n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
classification_metric = dc.metrics.Metric(dc.metrics.roc_auc_score)
sklearn_model = RandomForestClassifier()
model = dc.models.SklearnModel(sklearn_model)
# Fit trained model
model.fit(dataset)
model.save()
# Eval model on train
scores = model.evaluate(dataset, [classification_metric])
assert scores[classification_metric.name] > .9
def test_tf_regression_overfit(self):
"""Test that TensorFlow models can overfit simple regression datasets."""
n_samples = 10
n_features = 3
n_tasks = 1
# Generate dummy dataset
np.random.seed(123)
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features)
y = np.zeros((n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
regression_metric = dc.metrics.Metric(dc.metrics.mean_squared_error)
# TODO(rbharath): This breaks with optimizer="momentum". Why?
model = dc.models.TensorflowMultiTaskRegressor(
n_tasks,
n_features,
dropouts=[0.],
learning_rate=0.003,
weight_init_stddevs=[np.sqrt(6) / np.sqrt(1000)],
batch_size=n_samples)
# Fit trained model
model.fit(dataset, nb_epoch=100)
model.save()
# Eval model on train
scores = model.evaluate(dataset, [regression_metric])
assert scores[regression_metric.name] < .1
def test_tg_regression_overfit(self):
"""Test that TensorGraph models can overfit simple regression datasets."""
n_samples = 10
n_features = 3
n_tasks = 1
# Generate dummy dataset
np.random.seed(123)
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features)
y = np.zeros((n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
regression_metric = dc.metrics.Metric(dc.metrics.mean_squared_error)
# TODO(rbharath): This breaks with optimizer="momentum". Why?
model = dc.models.TensorGraphMultiTaskRegressor(
n_tasks,
n_features,
dropouts=[0.],
weight_init_stddevs=[np.sqrt(6) / np.sqrt(1000)],
batch_size=n_samples)
model.set_optimizer(
dc.models.tensorgraph.tensor_graph.TFWrapper(
tf.train.AdamOptimizer, learning_rate=0.003, beta1=0.9,
beta2=0.999))
# Fit trained model
model.fit(dataset, nb_epoch=100)
model.save()
# Eval model on train
scores = model.evaluate(dataset, [regression_metric])
assert scores[regression_metric.name] < .1
def test_tf_classification_overfit(self):
"""Test that tensorflow models can overfit simple classification datasets."""
n_samples = 10
n_features = 3
n_tasks = 1
n_classes = 2
# Generate dummy dataset
np.random.seed(123)
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features)
y = np.zeros((n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
classification_metric = dc.metrics.Metric(dc.metrics.accuracy_score)
model = dc.models.TensorflowMultiTaskClassifier(
n_tasks,
n_features,
dropouts=[0.],
learning_rate=0.0003,
weight_init_stddevs=[.1],
batch_size=n_samples)
# Fit trained model
model.fit(dataset, nb_epoch=100)
model.save()
# Eval model on train
scores = model.evaluate(dataset, [classification_metric])
assert scores[classification_metric.name] > .9
def test_tg_classification_overfit(self):
"""Test that TensorGraph models can overfit simple classification datasets."""
n_samples = 10
n_features = 3
n_tasks = 1
n_classes = 2
# Generate dummy dataset
np.random.seed(123)
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features)
y = np.zeros((n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
classification_metric = dc.metrics.Metric(dc.metrics.accuracy_score)
model = dc.models.TensorGraphMultiTaskClassifier(
n_tasks,
n_features,
dropouts=[0.],
weight_init_stddevs=[.1],
batch_size=n_samples)
model.set_optimizer(
dc.models.tensorgraph.tensor_graph.TFWrapper(
tf.train.AdamOptimizer,
learning_rate=0.0003,
beta1=0.9,
beta2=0.999))
# Fit trained model
model.fit(dataset, nb_epoch=100)
model.save()
# Eval model on train
scores = model.evaluate(dataset, [classification_metric])
assert scores[classification_metric.name] > .9
def test_tf_fittransform_regression_overfit(self):
"""Test that TensorFlow FitTransform models can overfit simple regression datasets."""
n_samples = 10
n_features = 3
n_tasks = 1
# Generate dummy dataset
np.random.seed(123)
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features, n_features)
y = np.zeros((n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
fit_transformers = [dc.trans.CoulombFitTransformer(dataset)]
regression_metric = dc.metrics.Metric(dc.metrics.mean_squared_error)
model = dc.models.TensorflowMultiTaskFitTransformRegressor(
n_tasks, [n_features, n_features],
dropouts=[0.],
learning_rate=0.003,
weight_init_stddevs=[np.sqrt(6) / np.sqrt(1000)],
batch_size=n_samples,
fit_transformers=fit_transformers,
n_evals=1)
# Fit trained model
model.fit(dataset, nb_epoch=100)
model.save()
# Eval model on train
scores = model.evaluate(dataset, [regression_metric])
assert scores[regression_metric.name] < .1
def test_tg_fittransform_regression_overfit(self):
"""Test that TensorGraph FitTransform models can overfit simple regression datasets."""
n_samples = 10
n_features = 3
n_tasks = 1
# Generate dummy dataset
np.random.seed(123)
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features, n_features)
y = np.zeros((n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
fit_transformers = [dc.trans.CoulombFitTransformer(dataset)]
regression_metric = dc.metrics.Metric(dc.metrics.mean_squared_error)
model = dc.models.TensorGraphMultiTaskFitTransformRegressor(
n_tasks, [n_features, n_features],
dropouts=[0.],
weight_init_stddevs=[np.sqrt(6) / np.sqrt(1000)],
batch_size=n_samples,
fit_transformers=fit_transformers,
n_evals=1)
model.set_optimizer(
dc.models.tensorgraph.tensor_graph.TFWrapper(
tf.train.AdamOptimizer, learning_rate=0.003, beta1=0.9,
beta2=0.999))
# Fit trained model
model.fit(dataset, nb_epoch=100)
model.save()
# Eval model on train
scores = model.evaluate(dataset, [regression_metric])
assert scores[regression_metric.name] < .1
def test_tf_skewed_classification_overfit(self):
"""Test tensorflow models can overfit 0/1 datasets with few actives."""
#n_samples = 100
n_samples = 100
n_features = 3
n_tasks = 1
n_classes = 2
# Generate dummy dataset
np.random.seed(123)
p = .05
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features)
y = np.random.binomial(1, p, size=(n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
classification_metric = dc.metrics.Metric(dc.metrics.roc_auc_score)
model = dc.models.TensorflowMultiTaskClassifier(
n_tasks,
n_features,
dropouts=[0.],
learning_rate=0.003,
weight_init_stddevs=[.1],
batch_size=n_samples)
# Fit trained model
model.fit(dataset, nb_epoch=100)
model.save()
# Eval model on train
scores = model.evaluate(dataset, [classification_metric])
assert scores[classification_metric.name] > .75
def test_tg_skewed_classification_overfit(self):
"""Test TensorGraph models can overfit 0/1 datasets with few actives."""
#n_samples = 100
n_samples = 100
n_features = 3
n_tasks = 1
n_classes = 2
# Generate dummy dataset
np.random.seed(123)
p = .05
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features)
y = np.random.binomial(1, p, size=(n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
classification_metric = dc.metrics.Metric(dc.metrics.roc_auc_score)
model = dc.models.TensorGraphMultiTaskClassifier(
n_tasks,
n_features,
dropouts=[0.],
weight_init_stddevs=[.1],
batch_size=n_samples)
model.set_optimizer(
dc.models.tensorgraph.tensor_graph.TFWrapper(
tf.train.AdamOptimizer, learning_rate=0.003, beta1=0.9,
beta2=0.999))
# Fit trained model
model.fit(dataset, nb_epoch=100)
model.save()
# Eval model on train
scores = model.evaluate(dataset, [classification_metric])
assert scores[classification_metric.name] > .75
def test_tf_skewed_missing_classification_overfit(self):
"""TF, skewed data, few actives
Test tensorflow models overfit 0/1 datasets with missing data and few
actives. This is intended to be as close to singletask MUV datasets as
possible.
"""
n_samples = 5120
n_features = 6
n_tasks = 1
n_classes = 2
# Generate dummy dataset
np.random.seed(123)
p = .002
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features)
y = np.random.binomial(1, p, size=(n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
y_flat, w_flat = np.squeeze(y), np.squeeze(w)
y_nonzero = y_flat[w_flat != 0]
num_nonzero = np.count_nonzero(y_nonzero)
weight_nonzero = len(y_nonzero) / num_nonzero
w_flat[y_flat != 0] = weight_nonzero
w = np.reshape(w_flat, (n_samples, n_tasks))
dataset = dc.data.DiskDataset.from_numpy(X, y, w, ids)
classification_metric = dc.metrics.Metric(dc.metrics.roc_auc_score)
model = dc.models.TensorflowMultiTaskClassifier(
n_tasks,
n_features,
dropouts=[0.],
learning_rate=0.003,
weight_init_stddevs=[1.],
batch_size=n_samples)
# Fit trained model
model.fit(dataset, nb_epoch=50)
model.save()
# Eval model on train
scores = model.evaluate(dataset, [classification_metric])
assert scores[classification_metric.name] > .8
def test_tg_skewed_missing_classification_overfit(self):
"""TG, skewed data, few actives
Test TensorGraph models overfit 0/1 datasets with missing data and few
actives. This is intended to be as close to singletask MUV datasets as
possible.
"""
n_samples = 5120
n_features = 6
n_tasks = 1
n_classes = 2
# Generate dummy dataset
np.random.seed(123)
p = .002
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features)
y = np.random.binomial(1, p, size=(n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
y_flat, w_flat = np.squeeze(y), np.squeeze(w)
y_nonzero = y_flat[w_flat != 0]
num_nonzero = np.count_nonzero(y_nonzero)
weight_nonzero = len(y_nonzero) / num_nonzero
w_flat[y_flat != 0] = weight_nonzero
w = np.reshape(w_flat, (n_samples, n_tasks))
dataset = dc.data.DiskDataset.from_numpy(X, y, w, ids)
classification_metric = dc.metrics.Metric(dc.metrics.roc_auc_score)
model = dc.models.TensorGraphMultiTaskClassifier(
n_tasks,
n_features,
dropouts=[0.],
weight_init_stddevs=[1.],
batch_size=n_samples)
model.set_optimizer(
dc.models.tensorgraph.tensor_graph.TFWrapper(
tf.train.AdamOptimizer, learning_rate=0.003, beta1=0.9,
beta2=0.999))
# Fit trained model
model.fit(dataset, nb_epoch=100)
model.save()
# Eval model on train
scores = model.evaluate(dataset, [classification_metric])
assert scores[classification_metric.name] > .7
def test_sklearn_multitask_classification_overfit(self):
"""Test SKLearn singletask-to-multitask overfits tiny data."""
n_tasks = 10
tasks = ["task%d" % task for task in range(n_tasks)]
n_samples = 10
n_features = 3
# Generate dummy dataset
np.random.seed(123)
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features)
y = np.random.randint(2, size=(n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
dataset = dc.data.DiskDataset.from_numpy(X, y, w, ids)
classification_metric = dc.metrics.Metric(
dc.metrics.roc_auc_score, task_averager=np.mean)
def model_builder(model_dir):
sklearn_model = RandomForestClassifier()
return dc.models.SklearnModel(sklearn_model, model_dir)
model = dc.models.SingletaskToMultitask(tasks, model_builder)
# Fit trained model
model.fit(dataset)
model.save()
# Eval model on train
scores = model.evaluate(dataset, [classification_metric])
assert scores[classification_metric.name] > .9
def test_tf_multitask_classification_overfit(self):
"""Test tf multitask overfits tiny data."""
n_tasks = 10
n_samples = 10
n_features = 3
n_classes = 2
# Generate dummy dataset
np.random.seed(123)
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features)
y = np.zeros((n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
classification_metric = dc.metrics.Metric(
dc.metrics.accuracy_score, task_averager=np.mean)
model = dc.models.TensorflowMultiTaskClassifier(
n_tasks,
n_features,
dropouts=[0.],
learning_rate=0.0003,
weight_init_stddevs=[.1],
batch_size=n_samples)
# Fit trained model
model.fit(dataset)
model.save()
# Eval model on train
scores = model.evaluate(dataset, [classification_metric])
assert scores[classification_metric.name] > .9
@flaky
def test_tg_multitask_classification_overfit(self):
"""Test TensorGraph multitask overfits tiny data."""
n_tasks = 10
n_samples = 10
n_features = 3
n_classes = 2
# Generate dummy dataset
np.random.seed(123)
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features)
y = np.zeros((n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
classification_metric = dc.metrics.Metric(
dc.metrics.accuracy_score, task_averager=np.mean)
model = dc.models.TensorGraphMultiTaskClassifier(
n_tasks,
n_features,
dropouts=[0.],
weight_init_stddevs=[.1],
batch_size=n_samples)
model.set_optimizer(
dc.models.tensorgraph.tensor_graph.TFWrapper(
tf.train.AdamOptimizer,
learning_rate=0.0003,
beta1=0.9,
beta2=0.999))
# Fit trained model
model.fit(dataset)
model.save()
# Eval model on train
scores = model.evaluate(dataset, [classification_metric])
assert scores[classification_metric.name] > .9
def test_tf_robust_multitask_classification_overfit(self):
"""Test tf robust multitask overfits tiny data."""
n_tasks = 10
n_samples = 10
n_features = 3
n_classes = 2
# Generate dummy dataset
np.random.seed(123)
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features)
y = np.zeros((n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
classification_metric = dc.metrics.Metric(
dc.metrics.accuracy_score, task_averager=np.mean)
model = dc.models.RobustMultitaskClassifier(
n_tasks,
n_features,
layer_sizes=[50],
bypass_layer_sizes=[10],
dropouts=[0.],
learning_rate=0.003,
weight_init_stddevs=[.1],
batch_size=n_samples)
# Fit trained model
model.fit(dataset, nb_epoch=25)
model.save()
# Eval model on train
scores = model.evaluate(dataset, [classification_metric])
assert scores[classification_metric.name] > .9
def test_tf_logreg_multitask_classification_overfit(self):
"""Test tf multitask overfits tiny data."""
n_tasks = 10
n_samples = 10
n_features = 3
n_classes = 2
# Generate dummy dataset
np.random.seed(123)
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features)
y = np.zeros((n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
classification_metric = dc.metrics.Metric(
dc.metrics.accuracy_score, task_averager=np.mean)
model = dc.models.TensorflowLogisticRegression(
n_tasks,
n_features,
learning_rate=0.5,
weight_init_stddevs=[.01],
batch_size=n_samples)
# Fit trained model
model.fit(dataset)
model.save()
# Eval model on train
scores = model.evaluate(dataset, [classification_metric])
assert scores[classification_metric.name] > .9
def test_IRV_multitask_classification_overfit(self):
"""Test IRV classifier overfits tiny data."""
n_tasks = 5
n_samples = 10
n_features = 128
n_classes = 2
# Generate dummy dataset
np.random.seed(123)
ids = np.arange(n_samples)
X = np.random.randint(2, size=(n_samples, n_features))
y = np.ones((n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
IRV_transformer = dc.trans.IRVTransformer(5, n_tasks, dataset)
dataset_trans = IRV_transformer.transform(dataset)
classification_metric = dc.metrics.Metric(
dc.metrics.accuracy_score, task_averager=np.mean)
model = dc.models.TensorflowMultiTaskIRVClassifier(
n_tasks, K=5, learning_rate=0.01, batch_size=n_samples)
# Fit trained model
model.fit(dataset_trans)
model.save()
# Eval model on train
scores = model.evaluate(dataset_trans, [classification_metric])
assert scores[classification_metric.name] > .9
def test_sklearn_multitask_regression_overfit(self):
"""Test SKLearn singletask-to-multitask overfits tiny regression data."""
n_tasks = 2
tasks = ["task%d" % task for task in range(n_tasks)]
n_samples = 10
n_features = 3
# Generate dummy dataset
np.random.seed(123)
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features)
y = np.random.rand(n_samples, n_tasks)
w = np.ones((n_samples, n_tasks))
dataset = dc.data.DiskDataset.from_numpy(X, y, w, ids)
regression_metric = dc.metrics.Metric(
dc.metrics.r2_score, task_averager=np.mean)
def model_builder(model_dir):
sklearn_model = RandomForestRegressor()
return dc.models.SklearnModel(sklearn_model, model_dir)
model = dc.models.SingletaskToMultitask(tasks, model_builder)
# Fit trained model
model.fit(dataset)
model.save()
# Eval model on train
scores = model.evaluate(dataset, [regression_metric])
assert scores[regression_metric.name] > .7
def test_tf_multitask_regression_overfit(self):
"""Test tf multitask overfits tiny data."""
n_tasks = 10
n_samples = 10
n_features = 3
n_classes = 2
# Generate dummy dataset
np.random.seed(123)
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features)
y = np.zeros((n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
regression_metric = dc.metrics.Metric(
dc.metrics.mean_squared_error, task_averager=np.mean, mode="regression")
model = dc.models.TensorflowMultiTaskRegressor(
n_tasks,
n_features,
dropouts=[0.],
learning_rate=0.0003,
weight_init_stddevs=[.1],
batch_size=n_samples)
# Fit trained model
model.fit(dataset, nb_epoch=50)
model.save()
# Eval model on train
scores = model.evaluate(dataset, [regression_metric])
assert scores[regression_metric.name] < .1
def test_tg_multitask_regression_overfit(self):
"""Test TensorGraph multitask overfits tiny data."""
n_tasks = 10
n_samples = 10
n_features = 3
n_classes = 2
# Generate dummy dataset
np.random.seed(123)
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features)
y = np.zeros((n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
regression_metric = dc.metrics.Metric(
dc.metrics.mean_squared_error, task_averager=np.mean, mode="regression")
model = dc.models.TensorGraphMultiTaskRegressor(
n_tasks,
n_features,
dropouts=[0.],
weight_init_stddevs=[.1],
batch_size=n_samples)
model.set_optimizer(
dc.models.tensorgraph.tensor_graph.TFWrapper(
tf.train.AdamOptimizer,
learning_rate=0.0003,
beta1=0.9,
beta2=0.999))
# Fit trained model
model.fit(dataset, nb_epoch=50)
model.save()
# Eval model on train
scores = model.evaluate(dataset, [regression_metric])
assert scores[regression_metric.name] < .1
def test_tf_robust_multitask_regression_overfit(self):
"""Test tf robust multitask overfits tiny data."""
np.random.seed(123)
tf.set_random_seed(123)
n_tasks = 10
n_samples = 10
n_features = 3
n_classes = 2
# Generate dummy dataset
np.random.seed(123)
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features)
y = np.zeros((n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
regression_metric = dc.metrics.Metric(
dc.metrics.mean_squared_error, task_averager=np.mean, mode="regression")
model = dc.models.RobustMultitaskRegressor(
n_tasks,
n_features,
layer_sizes=[50],
bypass_layer_sizes=[10],
dropouts=[0.],
learning_rate=0.003,
weight_init_stddevs=[.1],
batch_size=n_samples)
# Fit trained model
model.fit(dataset, nb_epoch=25)
model.save()
# Eval model on train
scores = model.evaluate(dataset, [regression_metric])
assert scores[regression_metric.name] < .2
def test_graph_conv_singletask_classification_overfit(self):
"""Test graph-conv multitask overfits tiny data."""
np.random.seed(123)
tf.set_random_seed(123)
g = tf.Graph()
sess = tf.Session(graph=g)
n_tasks = 1
n_samples = 10
n_features = 3
n_classes = 2
# Load mini log-solubility dataset.
featurizer = dc.feat.ConvMolFeaturizer()
tasks = ["outcome"]
input_file = os.path.join(self.current_dir, "example_classification.csv")
loader = dc.data.CSVLoader(
tasks=tasks, smiles_field="smiles", featurizer=featurizer)
dataset = loader.featurize(input_file)
classification_metric = dc.metrics.Metric(dc.metrics.accuracy_score)
n_feat = 75
batch_size = 10
graph_model = dc.nn.SequentialGraph(n_feat)
graph_model.add(dc.nn.GraphConv(64, n_feat, activation='relu'))
graph_model.add(dc.nn.BatchNormalization(epsilon=1e-5, mode=1))
graph_model.add(dc.nn.GraphPool())
# Gather Projection
graph_model.add(dc.nn.Dense(128, 64, activation='relu'))
graph_model.add(dc.nn.BatchNormalization(epsilon=1e-5, mode=1))
graph_model.add(dc.nn.GraphGather(batch_size, activation="tanh"))
model = dc.models.MultitaskGraphClassifier(
graph_model,
n_tasks,
n_feat,
batch_size=batch_size,
learning_rate=1e-3,
learning_rate_decay_time=1000,
optimizer_type="adam",
beta1=.9,
beta2=.999)
# Fit trained model
model.fit(dataset, nb_epoch=20)
model.save()
# Eval model on train
scores = model.evaluate(dataset, [classification_metric])
assert scores[classification_metric.name] > .65
def test_graph_conv_singletask_regression_overfit(self):
"""Test graph-conv multitask overfits tiny data."""
np.random.seed(123)
tf.set_random_seed(123)
g = tf.Graph()
sess = tf.Session(graph=g)
n_tasks = 1
n_samples = 10
n_features = 3
n_classes = 2
# Load mini log-solubility dataset.
featurizer = dc.feat.ConvMolFeaturizer()
tasks = ["outcome"]
input_file = os.path.join(self.current_dir, "example_regression.csv")
loader = dc.data.CSVLoader(
tasks=tasks, smiles_field="smiles", featurizer=featurizer)
dataset = loader.featurize(input_file)
classification_metric = dc.metrics.Metric(
dc.metrics.mean_squared_error, task_averager=np.mean)
n_feat = 75
batch_size = 10
graph_model = dc.nn.SequentialGraph(n_feat)
graph_model.add(dc.nn.GraphConv(64, n_feat, activation='relu'))
graph_model.add(dc.nn.BatchNormalization(epsilon=1e-5, mode=1))
graph_model.add(dc.nn.GraphPool())
# Gather Projection
graph_model.add(dc.nn.Dense(128, 64))
graph_model.add(dc.nn.BatchNormalization(epsilon=1e-5, mode=1))
graph_model.add(dc.nn.GraphGather(batch_size, activation="tanh"))
model = dc.models.MultitaskGraphRegressor(
graph_model,
n_tasks,
n_feat,
batch_size=batch_size,
learning_rate=1e-2,
learning_rate_decay_time=1000,
optimizer_type="adam",
beta1=.9,
beta2=.999)
# Fit trained model
model.fit(dataset, nb_epoch=40)
model.save()
# Eval model on train
scores = model.evaluate(dataset, [classification_metric])
assert scores[classification_metric.name] < .2
def test_DTNN_multitask_regression_overfit(self):
"""Test deep tensor neural net overfits tiny data."""
np.random.seed(123)
tf.set_random_seed(123)
input_file = os.path.join(self.current_dir, "example_DTNN.mat")
dataset = scipy.io.loadmat(input_file)
X = dataset['X']
y = dataset['T']
w = np.ones_like(y)
dataset = dc.data.DiskDataset.from_numpy(X, y, w, ids=None)
regression_metric = dc.metrics.Metric(
dc.metrics.pearson_r2_score, task_averager=np.mean)
n_tasks = y.shape[1]
batch_size = 10
graph_model = dc.nn.SequentialDTNNGraph()
graph_model.add(dc.nn.DTNNEmbedding(n_embedding=20))
graph_model.add(dc.nn.DTNNStep(n_embedding=20))
graph_model.add(dc.nn.DTNNStep(n_embedding=20))
graph_model.add(dc.nn.DTNNGather(n_embedding=20))
n_feat = 20
model = dc.models.MultitaskGraphRegressor(
graph_model,
n_tasks,
n_feat,
batch_size=batch_size,
learning_rate=1e-3,
learning_rate_decay_time=1000,
optimizer_type="adam",
beta1=.9,
beta2=.999)
# Fit trained model
model.fit(dataset, nb_epoch=20)
model.save()
# Eval model on train
scores = model.evaluate(dataset, [regression_metric])
assert scores[regression_metric.name] > .9
def test_tensorgraph_DTNN_multitask_regression_overfit(self):
"""Test deep tensor neural net overfits tiny data."""
np.random.seed(123)
tf.set_random_seed(123)
input_file = os.path.join(self.current_dir, "example_DTNN.mat")
dataset = scipy.io.loadmat(input_file)
X = dataset['X']
y = dataset['T']
w = np.ones_like(y)
dataset = dc.data.DiskDataset.from_numpy(X, y, w, ids=None)
regression_metric = dc.metrics.Metric(
dc.metrics.pearson_r2_score, task_averager=np.mean)
n_tasks = y.shape[1]
batch_size = 10
model = dc.models.DTNNTensorGraph(
n_tasks,
n_embedding=20,
n_distance=100,
batch_size=batch_size,
learning_rate=0.001,
use_queue=False,
mode="regression")
# Fit trained model
model.fit(dataset, nb_epoch=20)
# Eval model on train
scores = model.evaluate(dataset, [regression_metric])
assert scores[regression_metric.name] > .9
def test_ANI_multitask_regression_overfit(self):
"""Test ANI-1 regression overfits tiny data."""
input_file = os.path.join(self.current_dir, "example_DTNN.mat")
np.random.seed(123)
tf.set_random_seed(123)
dataset = scipy.io.loadmat(input_file)
X = np.concatenate([np.expand_dims(dataset['Z'], 2), dataset['R']], axis=2)
X = X[:, :13, :]
y = dataset['T']
w = np.ones_like(y)
dataset = dc.data.DiskDataset.from_numpy(X, y, w, ids=None)
regression_metric = dc.metrics.Metric(
dc.metrics.pearson_r2_score, mode="regression")
n_tasks = y.shape[1]
batch_size = 10
transformers = [
dc.trans.NormalizationTransformer(transform_y=True, dataset=dataset),
dc.trans.ANITransformer(
max_atoms=13,
atom_cases=[1, 6, 7, 8],
radial_cutoff=8.,
angular_cutoff=5.,
radial_length=8,
angular_length=4)
]
for transformer in transformers:
dataset = transformer.transform(dataset)
n_feat = transformers[-1].get_num_feats() - 1
model = dc.models.ANIRegression(
n_tasks,
13,
n_feat,
atom_number_cases=[1, 6, 7, 8],
batch_size=batch_size,
learning_rate=0.001,
use_queue=False,
mode="regression")
# Fit trained model
model.fit(dataset, nb_epoch=50)
# Eval model on train
scores = model.evaluate(dataset, [regression_metric], transformers[0:1])
assert scores[regression_metric.name] > .8
def test_BP_symmetry_function_overfit(self):
"""Test ANI-1 regression overfits tiny data."""
input_file = os.path.join(self.current_dir, "example_DTNN.mat")
np.random.seed(123)
tf.set_random_seed(123)
dataset = scipy.io.loadmat(input_file)
X = np.concatenate([np.expand_dims(dataset['Z'], 2), dataset['R']], axis=2)
X = X[:, :13, :]
y = dataset['T']
w = np.ones_like(y)
dataset = dc.data.DiskDataset.from_numpy(X, y, w, ids=None)
regression_metric = dc.metrics.Metric(
dc.metrics.pearson_r2_score, mode="regression")
n_tasks = y.shape[1]
batch_size = 10
transformers = [
dc.trans.NormalizationTransformer(transform_y=True, dataset=dataset),
dc.trans.ANITransformer(
max_atoms=13,
atom_cases=[1, 6, 7, 8],
atomic_number_differentiated=False,
radial_cutoff=8.,
angular_cutoff=5.,
radial_length=8,
angular_length=4)
]
for transformer in transformers:
dataset = transformer.transform(dataset)
n_feat = transformers[-1].get_num_feats() - 1
model = dc.models.ANIRegression(
n_tasks,
13,
n_feat,
atom_number_cases=[1, 6, 7, 8],
batch_size=batch_size,
learning_rate=0.001,
use_queue=False,
mode="regression")
# Fit trained model
model.fit(dataset, nb_epoch=50)
# Eval model on train
scores = model.evaluate(dataset, [regression_metric], transformers[0:1])
assert scores[regression_metric.name] > .8
def test_DAG_singletask_regression_overfit(self):
"""Test DAG regressor multitask overfits tiny data."""
np.random.seed(123)
tf.set_random_seed(123)
n_tasks = 1
# Load mini log-solubility dataset.
featurizer = dc.feat.ConvMolFeaturizer()
tasks = ["outcome"]
input_file = os.path.join(self.current_dir, "example_regression.csv")
loader = dc.data.CSVLoader(
tasks=tasks, smiles_field="smiles", featurizer=featurizer)
dataset = loader.featurize(input_file)
regression_metric = dc.metrics.Metric(
dc.metrics.pearson_r2_score, task_averager=np.mean)
n_feat = 75
batch_size = 10
transformer = dc.trans.DAGTransformer(max_atoms=50)
dataset = transformer.transform(dataset)
graph = dc.nn.SequentialDAGGraph(n_atom_feat=n_feat, max_atoms=50)
graph.add(dc.nn.DAGLayer(30, n_feat, max_atoms=50, batch_size=batch_size))
graph.add(dc.nn.DAGGather(30, max_atoms=50))
model = dc.models.MultitaskGraphRegressor(
graph,
n_tasks,
n_feat,
batch_size=batch_size,
learning_rate=0.001,
learning_rate_decay_time=1000,
optimizer_type="adam",
beta1=.9,
beta2=.999)
# Fit trained model
model.fit(dataset, nb_epoch=50)
model.save()
# Eval model on train
scores = model.evaluate(dataset, [regression_metric])
assert scores[regression_metric.name] > .8
def test_tensorgraph_DAG_singletask_regression_overfit(self):
"""Test DAG regressor multitask overfits tiny data."""
np.random.seed(123)
tf.set_random_seed(123)
n_tasks = 1
# Load mini log-solubility dataset.
featurizer = dc.feat.ConvMolFeaturizer()
tasks = ["outcome"]
input_file = os.path.join(self.current_dir, "example_regression.csv")
loader = dc.data.CSVLoader(
tasks=tasks, smiles_field="smiles", featurizer=featurizer)
dataset = loader.featurize(input_file)
regression_metric = dc.metrics.Metric(
dc.metrics.pearson_r2_score, task_averager=np.mean)
n_feat = 75
batch_size = 10
transformer = dc.trans.DAGTransformer(max_atoms=50)
dataset = transformer.transform(dataset)
model = dc.models.DAGTensorGraph(
n_tasks,
max_atoms=50,
n_atom_feat=n_feat,
batch_size=batch_size,
learning_rate=0.001,
use_queue=False,
mode="regression")
# Fit trained model
model.fit(dataset, nb_epoch=50)
# Eval model on train
scores = model.evaluate(dataset, [regression_metric])
assert scores[regression_metric.name] > .8
def test_weave_singletask_classification_overfit(self):
"""Test weave model overfits tiny data."""
np.random.seed(123)
tf.set_random_seed(123)
n_tasks = 1
# Load mini log-solubility dataset.
featurizer = dc.feat.WeaveFeaturizer()
tasks = ["outcome"]
input_file = os.path.join(self.current_dir, "example_classification.csv")
loader = dc.data.CSVLoader(
tasks=tasks, smiles_field="smiles", featurizer=featurizer)
dataset = loader.featurize(input_file)
classification_metric = dc.metrics.Metric(dc.metrics.accuracy_score)
n_atom_feat = 75
n_pair_feat = 14
n_feat = 128
batch_size = 10
max_atoms = 50
graph = dc.nn.AlternateSequentialWeaveGraph(
batch_size,
max_atoms=max_atoms,
n_atom_feat=n_atom_feat,
n_pair_feat=n_pair_feat)
graph.add(dc.nn.AlternateWeaveLayer(max_atoms, 75, 14))
graph.add(dc.nn.AlternateWeaveLayer(max_atoms, 50, 50, update_pair=False))
graph.add(dc.nn.Dense(n_feat, 50, activation='tanh'))
graph.add(dc.nn.BatchNormalization(epsilon=1e-5, mode=1))
graph.add(
dc.nn.AlternateWeaveGather(
batch_size, n_input=n_feat, gaussian_expand=True))
model = dc.models.MultitaskGraphClassifier(
graph,
n_tasks,
n_feat,
batch_size=batch_size,
learning_rate=1e-3,
learning_rate_decay_time=1000,
optimizer_type="adam",
beta1=.9,
beta2=.999)
# Fit trained model
model.fit(dataset, nb_epoch=20)
model.save()
# Eval model on train
scores = model.evaluate(dataset, [classification_metric])
assert scores[classification_metric.name] > .65
def test_tensorgraph_weave_singletask_classification_overfit(self):
"""Test weave model overfits tiny data."""
np.random.seed(123)
tf.set_random_seed(123)
n_tasks = 1
# Load mini log-solubility dataset.
featurizer = dc.feat.WeaveFeaturizer()
tasks = ["outcome"]
input_file = os.path.join(self.current_dir, "example_classification.csv")
loader = dc.data.CSVLoader(
tasks=tasks, smiles_field="smiles", featurizer=featurizer)
dataset = loader.featurize(input_file)
classification_metric = dc.metrics.Metric(dc.metrics.accuracy_score)
n_atom_feat = 75
n_pair_feat = 14
n_feat = 128
batch_size = 10
model = dc.models.WeaveTensorGraph(
n_tasks,
n_atom_feat=n_atom_feat,
n_pair_feat=n_pair_feat,
n_graph_feat=n_feat,
batch_size=batch_size,
learning_rate=0.001,
use_queue=False,
mode="classification")
# Fit trained model
model.fit(dataset, nb_epoch=20)
# Eval model on train
scores = model.evaluate(dataset, [classification_metric])
assert scores[classification_metric.name] > .65
def test_weave_singletask_regression_overfit(self):
"""Test weave model overfits tiny data."""
np.random.seed(123)
tf.set_random_seed(123)
n_tasks = 1
# Load mini log-solubility dataset.
featurizer = dc.feat.WeaveFeaturizer()
tasks = ["outcome"]
input_file = os.path.join(self.current_dir, "example_regression.csv")
loader = dc.data.CSVLoader(
tasks=tasks, smiles_field="smiles", featurizer=featurizer)
dataset = loader.featurize(input_file)
regression_metric = dc.metrics.Metric(
dc.metrics.pearson_r2_score, task_averager=np.mean)
n_atom_feat = 75
n_pair_feat = 14
n_feat = 128
batch_size = 10
max_atoms = 50
graph = dc.nn.AlternateSequentialWeaveGraph(
batch_size,
max_atoms=max_atoms,
n_atom_feat=n_atom_feat,
n_pair_feat=n_pair_feat)
graph.add(dc.nn.AlternateWeaveLayer(max_atoms, 75, 14))
graph.add(dc.nn.AlternateWeaveLayer(max_atoms, 50, 50, update_pair=False))
graph.add(dc.nn.Dense(n_feat, 50, activation='tanh'))
graph.add(dc.nn.BatchNormalization(epsilon=1e-5, mode=1))
graph.add(
dc.nn.AlternateWeaveGather(
batch_size, n_input=n_feat, gaussian_expand=True))
model = dc.models.MultitaskGraphRegressor(
graph,
n_tasks,
n_feat,
batch_size=batch_size,
learning_rate=1e-3,
learning_rate_decay_time=1000,
optimizer_type="adam",
beta1=.9,
beta2=.999)
# Fit trained model
model.fit(dataset, nb_epoch=40)
model.save()
# Eval model on train
scores = model.evaluate(dataset, [regression_metric])
assert scores[regression_metric.name] > .9
def test_tensorgraph_weave_singletask_regression_overfit(self):
"""Test weave model overfits tiny data."""
np.random.seed(123)
tf.set_random_seed(123)
n_tasks = 1
# Load mini log-solubility dataset.
featurizer = dc.feat.WeaveFeaturizer()
tasks = ["outcome"]
input_file = os.path.join(self.current_dir, "example_regression.csv")
loader = dc.data.CSVLoader(
tasks=tasks, smiles_field="smiles", featurizer=featurizer)
dataset = loader.featurize(input_file)
regression_metric = dc.metrics.Metric(
dc.metrics.pearson_r2_score, task_averager=np.mean)
n_atom_feat = 75
n_pair_feat = 14
n_feat = 128
batch_size = 10
model = dc.models.WeaveTensorGraph(
n_tasks,
n_atom_feat=n_atom_feat,
n_pair_feat=n_pair_feat,
n_graph_feat=n_feat,
batch_size=batch_size,
learning_rate=0.001,
use_queue=False,
mode="regression")
# Fit trained model
model.fit(dataset, nb_epoch=120)
# Eval model on train
scores = model.evaluate(dataset, [regression_metric])
assert scores[regression_metric.name] > .8
def test_siamese_singletask_classification_overfit(self):
"""Test siamese singletask model overfits tiny data."""
np.random.seed(123)
tf.set_random_seed(123)
n_tasks = 1
n_feat = 75
max_depth = 4
n_pos = 6
n_neg = 4
test_batch_size = 10
n_train_trials = 80
support_batch_size = n_pos + n_neg
# Load mini log-solubility dataset.
featurizer = dc.feat.ConvMolFeaturizer()
tasks = ["outcome"]
input_file = os.path.join(self.current_dir, "example_classification.csv")
loader = dc.data.CSVLoader(
tasks=tasks, smiles_field="smiles", featurizer=featurizer)
dataset = loader.featurize(input_file)
classification_metric = dc.metrics.Metric(dc.metrics.accuracy_score)
support_model = dc.nn.SequentialSupportGraph(n_feat)
# Add layers
# output will be (n_atoms, 64)
support_model.add(dc.nn.GraphConv(64, n_feat, activation='relu'))
# Need to add batch-norm separately to test/support due to differing
# shapes.
# output will be (n_atoms, 64)
support_model.add_test(dc.nn.BatchNormalization(epsilon=1e-5, mode=1))
# output will be (n_atoms, 64)
support_model.add_support(dc.nn.BatchNormalization(epsilon=1e-5, mode=1))
support_model.add(dc.nn.GraphPool())
support_model.add_test(dc.nn.GraphGather(test_batch_size))
support_model.add_support(dc.nn.GraphGather(support_batch_size))
model = dc.models.SupportGraphClassifier(
support_model,
test_batch_size=test_batch_size,
support_batch_size=support_batch_size,
learning_rate=1e-3)
# Fit trained model. Dataset has 6 positives and 4 negatives, so set
# n_pos/n_neg accordingly.
model.fit(
dataset, n_episodes_per_epoch=n_train_trials, n_pos=n_pos, n_neg=n_neg)
model.save()
# Eval model on train. Dataset has 6 positives and 4 negatives, so set
# n_pos/n_neg accordingly. Note that support is *not* excluded (so we
# can measure model has memorized support). Replacement is turned off to
# ensure that support contains full training set. This checks that the
# model has mastered memorization of provided support.
scores, _ = model.evaluate(
dataset,
classification_metric,
n_trials=5,
n_pos=n_pos,
n_neg=n_neg,
exclude_support=False)
##################################################### DEBUG
# TODO(rbharath): Check if something went wrong here...
# Measure performance on 0-th task.
#assert scores[0] > .9
assert scores[0] > .75
##################################################### DEBUG
def test_attn_lstm_singletask_classification_overfit(self):
"""Test attn lstm singletask overfits tiny data."""
np.random.seed(123)
tf.set_random_seed(123)
n_tasks = 1
n_feat = 75
max_depth = 4
n_pos = 6
n_neg = 4
test_batch_size = 10
support_batch_size = n_pos + n_neg
n_train_trials = 80
# Load mini log-solubility dataset.
featurizer = dc.feat.ConvMolFeaturizer()
tasks = ["outcome"]
input_file = os.path.join(self.current_dir, "example_classification.csv")
loader = dc.data.CSVLoader(
tasks=tasks, smiles_field="smiles", featurizer=featurizer)
dataset = loader.featurize(input_file)
classification_metric = dc.metrics.Metric(dc.metrics.accuracy_score)
support_model = dc.nn.SequentialSupportGraph(n_feat)
# Add layers
# output will be (n_atoms, 64)
support_model.add(dc.nn.GraphConv(64, n_feat, activation='relu'))
# Need to add batch-norm separately to test/support due to differing
# shapes.
# output will be (n_atoms, 64)
support_model.add_test(dc.nn.BatchNormalization(epsilon=1e-5, mode=1))
# output will be (n_atoms, 64)
support_model.add_support(dc.nn.BatchNormalization(epsilon=1e-5, mode=1))
support_model.add(dc.nn.GraphPool())
support_model.add_test(dc.nn.GraphGather(test_batch_size))
support_model.add_support(dc.nn.GraphGather(support_batch_size))
# Apply an attention lstm layer
support_model.join(
dc.nn.AttnLSTMEmbedding(test_batch_size, support_batch_size, 64,
max_depth))
model = dc.models.SupportGraphClassifier(
support_model,
test_batch_size=test_batch_size,
support_batch_size=support_batch_size,
learning_rate=1e-3)
# Fit trained model. Dataset has 6 positives and 4 negatives, so set
# n_pos/n_neg accordingly.
model.fit(
dataset, n_episodes_per_epoch=n_train_trials, n_pos=n_pos, n_neg=n_neg)
model.save()
# Eval model on train. Dataset has 6 positives and 4 negatives, so set
# n_pos/n_neg accordingly. Note that support is *not* excluded (so we
# can measure model has memorized support). Replacement is turned off to
# ensure that support contains full training set. This checks that the
# model has mastered memorization of provided support.
scores, _ = model.evaluate(
dataset,
classification_metric,
n_trials=5,
n_pos=n_pos,
n_neg=n_neg,
exclude_support=False)
# Measure performance on 0-th task.
##################################################### DEBUG
# TODO(rbharath): Check if something went wrong here...
# Measure performance on 0-th task.
#assert scores[0] > .85
assert scores[0] > .79
##################################################### DEBUG
def test_residual_lstm_singletask_classification_overfit(self):
"""Test resi-lstm multitask overfits tiny data."""
n_tasks = 1
n_feat = 75
max_depth = 4
n_pos = 6
n_neg = 4
test_batch_size = 10
support_batch_size = n_pos + n_neg
n_train_trials = 80
# Load mini log-solubility dataset.
featurizer = dc.feat.ConvMolFeaturizer()
tasks = ["outcome"]
input_file = os.path.join(self.current_dir, "example_classification.csv")
loader = dc.data.CSVLoader(
tasks=tasks, smiles_field="smiles", featurizer=featurizer)
dataset = loader.featurize(input_file)
classification_metric = dc.metrics.Metric(dc.metrics.accuracy_score)
support_model = dc.nn.SequentialSupportGraph(n_feat)
# Add layers
# output will be (n_atoms, 64)
support_model.add(dc.nn.GraphConv(64, n_feat, activation='relu'))
# Need to add batch-norm separately to test/support due to differing
# shapes.
# output will be (n_atoms, 64)
support_model.add_test(dc.nn.BatchNormalization(epsilon=1e-5, mode=1))
# output will be (n_atoms, 64)
support_model.add_support(dc.nn.BatchNormalization(epsilon=1e-5, mode=1))
support_model.add(dc.nn.GraphPool())
support_model.add_test(dc.nn.GraphGather(test_batch_size))
support_model.add_support(dc.nn.GraphGather(support_batch_size))
# Apply a residual lstm layer
support_model.join(
dc.nn.ResiLSTMEmbedding(test_batch_size, support_batch_size, 64,
max_depth))
model = dc.models.SupportGraphClassifier(
support_model,
test_batch_size=test_batch_size,
support_batch_size=support_batch_size,
learning_rate=1e-3)
# Fit trained model. Dataset has 6 positives and 4 negatives, so set
# n_pos/n_neg accordingly.
model.fit(
dataset, n_episodes_per_epoch=n_train_trials, n_pos=n_pos, n_neg=n_neg)
model.save()
# Eval model on train. Dataset has 6 positives and 4 negatives, so set
# n_pos/n_neg accordingly. Note that support is *not* excluded (so we
# can measure model has memorized support). Replacement is turned off to
# ensure that support contains full training set. This checks that the
# model has mastered memorization of provided support.
scores, _ = model.evaluate(
dataset,
classification_metric,
n_trials=5,
n_pos=n_pos,
n_neg=n_neg,
exclude_support=False)
# Measure performance on 0-th task.
##################################################### DEBUG
# TODO(rbharath): Check if something went wrong here...
# Measure performance on 0-th task.
#assert scores[0] > .9
assert scores[0] > .65
##################################################### DEBUG
def test_tf_progressive_regression_overfit(self):
"""Test tf progressive multitask overfits tiny data."""
np.random.seed(123)
n_tasks = 9
n_samples = 10
n_features = 3
n_classes = 2
# Generate dummy dataset
| np.random.seed(123) | numpy.random.seed |
# cvstitch.py
# ---------------------------
# Contains the logic for stitching masks. See class doc for details.
import numpy as np
import cv2
import sys
import time
from math import ceil
import matplotlib.pyplot as plt
import warnings
import pandas as pd
class CVMaskStitcher():
"""
Implements basic stitching between mask subtiles of semi-uniform size (see constraints below).
Initialized with the pixel overlap between masks and the threshold over which an overlap is considered
to be one cell, and returns the full set of masks for the passed in rows and cols.
"""
def __init__(self, overlap=80, threshold=8):
self.overlap = overlap
self.threshold = threshold
self.memory_max = 15
#reindexes masks for final stitching so no two masks have same id number
def renumber_masks(self, masks):
prev_max = 0
for i, crop in enumerate(masks):
if np.any(crop.astype(bool)):
newcrop = | np.copy(crop) | numpy.copy |
import numpy as np
from jax import numpy as jnp
from ff import nonbonded
from typing import Union, Optional
try:
from scipy.optimize import root_scalar
except ImportError as error:
import scipy
print(f"scipy version is {scipy.__version__}, but `scipy.optimize.root_scalar` was added in 1.2")
raise error
array = Union[np.array, jnp.array]
def _taylor_first_order(x: array, f_x: float, grad: array) -> callable:
"""
Notes:
TODO: is it preferable to use jax linearize? https://jax.readthedocs.io/en/latest/jax.html#jax.linearize
"""
def f_prime(y: array) -> float:
return f_x + | np.dot(grad, y - x) | numpy.dot |
import time , os ,cv2
from options.train_options import TrainOptions
from data.data_loader import CreateDataLoader
from models.models import create_model
import ntpath
import numpy as np
import skvideo.io
import time
output_video = False
opt = TrainOptions().parse()
data_loader = CreateDataLoader(opt)
dataset = data_loader.load_data()
print(dataset)
dataset_size = len(data_loader)
print('#training videos = %d' % dataset_size)
model = create_model(opt)
opt.results_dir = './results/'
total_steps = 0
web_dir = os.path.join(opt.results_dir, opt.name, '%s_%s' % (opt.phase, opt.which_epoch))
def ck_array(i,o):
i_A = np.transpose(127.5*(i['A']+1.)[0],(1,2,3,0))
i_B = np.transpose(127.5*(i['B']+1.)[0],(1,2,3,0))
o_A = o['real_A']
o_B = o['real_B']
a_diff = i_A - o_A
b_diff = i_B - o_B
print('diffa',a_diff.max(),a_diff.min(),a_diff.mean())
print('diffb', b_diff.max(), b_diff.min(), b_diff.mean())
def save_videos(web_dir, visuals, vid_path, epoch):
vid_dir = os.path.join(web_dir, 'videos')
#name = ntpath.basename(vid_path).split('.')[0]
# add data generation time as name
name = time.strftime('%Y%m%d-%H%M%S')
#print("vid_dir: {}".format(vid_dir))
#print("name: {}".format(name))
A = visuals['real_A']
last_A = np.tile(A[-1], (A.shape[0], 1, 1, 1))
#print("A_last shape: {}".format(A[-1].shape))
#print('last_A: {}'.format(last_A.shape))
B = visuals['real_B']
first_B = np.tile(B[0], (A.shape[0], 1, 1, 1))
fake = visuals['fake_B']
first_fake = | np.tile(fake[0], (A.shape[0], 1, 1, 1)) | numpy.tile |
from __future__ import print_function
from datetime import datetime, timedelta
import numpy as np
import pandas as pd
from pandas import (Series, Index, Int64Index, Timestamp, Period,
DatetimeIndex, PeriodIndex, TimedeltaIndex,
Timedelta, timedelta_range, date_range, Float64Index,
_np_version_under1p10)
import pandas.tslib as tslib
import pandas.tseries.period as period
import pandas.util.testing as tm
from pandas.tests.test_base import Ops
class TestDatetimeIndexOps(Ops):
tz = [None, 'UTC', 'Asia/Tokyo', 'US/Eastern', 'dateutil/Asia/Singapore',
'dateutil/US/Pacific']
def setUp(self):
super(TestDatetimeIndexOps, self).setUp()
mask = lambda x: (isinstance(x, DatetimeIndex) or
isinstance(x, PeriodIndex))
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = [o for o in self.objs if not mask(o)]
def test_ops_properties(self):
self.check_ops_properties(
['year', 'month', 'day', 'hour', 'minute', 'second', 'weekofyear',
'week', 'dayofweek', 'dayofyear', 'quarter'])
self.check_ops_properties(['date', 'time', 'microsecond', 'nanosecond',
'is_month_start', 'is_month_end',
'is_quarter_start',
'is_quarter_end', 'is_year_start',
'is_year_end', 'weekday_name'],
lambda x: isinstance(x, DatetimeIndex))
def test_ops_properties_basic(self):
# sanity check that the behavior didn't change
# GH7206
for op in ['year', 'day', 'second', 'weekday']:
self.assertRaises(TypeError, lambda x: getattr(self.dt_series, op))
# attribute access should still work!
s = Series(dict(year=2000, month=1, day=10))
self.assertEqual(s.year, 2000)
self.assertEqual(s.month, 1)
self.assertEqual(s.day, 10)
self.assertRaises(AttributeError, lambda: s.weekday)
def test_asobject_tolist(self):
idx = pd.date_range(start='2013-01-01', periods=4, freq='M',
name='idx')
expected_list = [Timestamp('2013-01-31'),
Timestamp('2013-02-28'),
Timestamp('2013-03-31'),
Timestamp('2013-04-30')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = pd.date_range(start='2013-01-01', periods=4, freq='M',
name='idx', tz='Asia/Tokyo')
expected_list = [Timestamp('2013-01-31', tz='Asia/Tokyo'),
Timestamp('2013-02-28', tz='Asia/Tokyo'),
Timestamp('2013-03-31', tz='Asia/Tokyo'),
Timestamp('2013-04-30', tz='Asia/Tokyo')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = DatetimeIndex([datetime(2013, 1, 1), datetime(2013, 1, 2),
pd.NaT, datetime(2013, 1, 4)], name='idx')
expected_list = [Timestamp('2013-01-01'),
Timestamp('2013-01-02'), pd.NaT,
Timestamp('2013-01-04')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
def test_minmax(self):
for tz in self.tz:
# monotonic
idx1 = pd.DatetimeIndex(['2011-01-01', '2011-01-02',
'2011-01-03'], tz=tz)
self.assertTrue(idx1.is_monotonic)
# non-monotonic
idx2 = pd.DatetimeIndex(['2011-01-01', pd.NaT, '2011-01-03',
'2011-01-02', pd.NaT], tz=tz)
self.assertFalse(idx2.is_monotonic)
for idx in [idx1, idx2]:
self.assertEqual(idx.min(), Timestamp('2011-01-01', tz=tz))
self.assertEqual(idx.max(), Timestamp('2011-01-03', tz=tz))
self.assertEqual(idx.argmin(), 0)
self.assertEqual(idx.argmax(), 2)
for op in ['min', 'max']:
# Return NaT
obj = DatetimeIndex([])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = DatetimeIndex([pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = DatetimeIndex([pd.NaT, pd.NaT, pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
def test_numpy_minmax(self):
dr = pd.date_range(start='2016-01-15', end='2016-01-20')
self.assertEqual(np.min(dr),
Timestamp('2016-01-15 00:00:00', freq='D'))
self.assertEqual(np.max(dr),
Timestamp('2016-01-20 00:00:00', freq='D'))
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.min, dr, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.max, dr, out=0)
self.assertEqual(np.argmin(dr), 0)
self.assertEqual(np.argmax(dr), 5)
if not _np_version_under1p10:
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.argmin, dr, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.argmax, dr, out=0)
def test_round(self):
for tz in self.tz:
rng = pd.date_range(start='2016-01-01', periods=5,
freq='30Min', tz=tz)
elt = rng[1]
expected_rng = DatetimeIndex([
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 01:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 02:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 02:00:00', tz=tz, freq='30T'),
])
expected_elt = expected_rng[1]
tm.assert_index_equal(rng.round(freq='H'), expected_rng)
self.assertEqual(elt.round(freq='H'), expected_elt)
msg = pd.tseries.frequencies._INVALID_FREQ_ERROR
with tm.assertRaisesRegexp(ValueError, msg):
rng.round(freq='foo')
with tm.assertRaisesRegexp(ValueError, msg):
elt.round(freq='foo')
msg = "<MonthEnd> is a non-fixed frequency"
tm.assertRaisesRegexp(ValueError, msg, rng.round, freq='M')
tm.assertRaisesRegexp(ValueError, msg, elt.round, freq='M')
def test_repeat_range(self):
rng = date_range('1/1/2000', '1/1/2001')
result = rng.repeat(5)
self.assertIsNone(result.freq)
self.assertEqual(len(result), 5 * len(rng))
for tz in self.tz:
index = pd.date_range('2001-01-01', periods=2, freq='D', tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01',
'2001-01-02', '2001-01-02'], tz=tz)
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
index = pd.date_range('2001-01-01', periods=2, freq='2D', tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01',
'2001-01-03', '2001-01-03'], tz=tz)
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
index = pd.DatetimeIndex(['2001-01-01', 'NaT', '2003-01-01'],
tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01', '2001-01-01',
'NaT', 'NaT', 'NaT',
'2003-01-01', '2003-01-01', '2003-01-01'],
tz=tz)
for res in [index.repeat(3), np.repeat(index, 3)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
def test_repeat(self):
reps = 2
msg = "the 'axis' parameter is not supported"
for tz in self.tz:
rng = pd.date_range(start='2016-01-01', periods=2,
freq='30Min', tz=tz)
expected_rng = DatetimeIndex([
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:30:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:30:00', tz=tz, freq='30T'),
])
res = rng.repeat(reps)
tm.assert_index_equal(res, expected_rng)
self.assertIsNone(res.freq)
tm.assert_index_equal(np.repeat(rng, reps), expected_rng)
tm.assertRaisesRegexp(ValueError, msg, np.repeat,
rng, reps, axis=1)
def test_representation(self):
idx = []
idx.append(DatetimeIndex([], freq='D'))
idx.append(DatetimeIndex(['2011-01-01'], freq='D'))
idx.append(DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D'))
idx.append(DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00'
], freq='H', tz='Asia/Tokyo'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT], tz='US/Eastern'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT], tz='UTC'))
exp = []
exp.append("""DatetimeIndex([], dtype='datetime64[ns]', freq='D')""")
exp.append("DatetimeIndex(['2011-01-01'], dtype='datetime64[ns]', "
"freq='D')")
exp.append("DatetimeIndex(['2011-01-01', '2011-01-02'], "
"dtype='datetime64[ns]', freq='D')")
exp.append("DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03'], "
"dtype='datetime64[ns]', freq='D')")
exp.append("DatetimeIndex(['2011-01-01 09:00:00+09:00', "
"'2011-01-01 10:00:00+09:00', '2011-01-01 11:00:00+09:00']"
", dtype='datetime64[ns, Asia/Tokyo]', freq='H')")
exp.append("DatetimeIndex(['2011-01-01 09:00:00-05:00', "
"'2011-01-01 10:00:00-05:00', 'NaT'], "
"dtype='datetime64[ns, US/Eastern]', freq=None)")
exp.append("DatetimeIndex(['2011-01-01 09:00:00+00:00', "
"'2011-01-01 10:00:00+00:00', 'NaT'], "
"dtype='datetime64[ns, UTC]', freq=None)""")
with pd.option_context('display.width', 300):
for indx, expected in zip(idx, exp):
for func in ['__repr__', '__unicode__', '__str__']:
result = getattr(indx, func)()
self.assertEqual(result, expected)
def test_representation_to_series(self):
idx1 = DatetimeIndex([], freq='D')
idx2 = DatetimeIndex(['2011-01-01'], freq='D')
idx3 = DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'], freq='H', tz='Asia/Tokyo')
idx6 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT],
tz='US/Eastern')
idx7 = DatetimeIndex(['2011-01-01 09:00', '2011-01-02 10:15'])
exp1 = """Series([], dtype: datetime64[ns])"""
exp2 = """0 2011-01-01
dtype: datetime64[ns]"""
exp3 = """0 2011-01-01
1 2011-01-02
dtype: datetime64[ns]"""
exp4 = """0 2011-01-01
1 2011-01-02
2 2011-01-03
dtype: datetime64[ns]"""
exp5 = """0 2011-01-01 09:00:00+09:00
1 2011-01-01 10:00:00+09:00
2 2011-01-01 11:00:00+09:00
dtype: datetime64[ns, Asia/Tokyo]"""
exp6 = """0 2011-01-01 09:00:00-05:00
1 2011-01-01 10:00:00-05:00
2 NaT
dtype: datetime64[ns, US/Eastern]"""
exp7 = """0 2011-01-01 09:00:00
1 2011-01-02 10:15:00
dtype: datetime64[ns]"""
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4,
idx5, idx6, idx7],
[exp1, exp2, exp3, exp4,
exp5, exp6, exp7]):
result = repr(Series(idx))
self.assertEqual(result, expected)
def test_summary(self):
# GH9116
idx1 = DatetimeIndex([], freq='D')
idx2 = DatetimeIndex(['2011-01-01'], freq='D')
idx3 = DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'],
freq='H', tz='Asia/Tokyo')
idx6 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT],
tz='US/Eastern')
exp1 = """DatetimeIndex: 0 entries
Freq: D"""
exp2 = """DatetimeIndex: 1 entries, 2011-01-01 to 2011-01-01
Freq: D"""
exp3 = """DatetimeIndex: 2 entries, 2011-01-01 to 2011-01-02
Freq: D"""
exp4 = """DatetimeIndex: 3 entries, 2011-01-01 to 2011-01-03
Freq: D"""
exp5 = ("DatetimeIndex: 3 entries, 2011-01-01 09:00:00+09:00 "
"to 2011-01-01 11:00:00+09:00\n"
"Freq: H")
exp6 = """DatetimeIndex: 3 entries, 2011-01-01 09:00:00-05:00 to NaT"""
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5, idx6],
[exp1, exp2, exp3, exp4, exp5, exp6]):
result = idx.summary()
self.assertEqual(result, expected)
def test_resolution(self):
for freq, expected in zip(['A', 'Q', 'M', 'D', 'H', 'T',
'S', 'L', 'U'],
['day', 'day', 'day', 'day', 'hour',
'minute', 'second', 'millisecond',
'microsecond']):
for tz in self.tz:
idx = pd.date_range(start='2013-04-01', periods=30, freq=freq,
tz=tz)
self.assertEqual(idx.resolution, expected)
def test_union(self):
for tz in self.tz:
# union
rng1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other1 = pd.date_range('1/6/2000', freq='D', periods=5, tz=tz)
expected1 = pd.date_range('1/1/2000', freq='D', periods=10, tz=tz)
rng2 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other2 = pd.date_range('1/4/2000', freq='D', periods=5, tz=tz)
expected2 = pd.date_range('1/1/2000', freq='D', periods=8, tz=tz)
rng3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other3 = pd.DatetimeIndex([], tz=tz)
expected3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
for rng, other, expected in [(rng1, other1, expected1),
(rng2, other2, expected2),
(rng3, other3, expected3)]:
result_union = rng.union(other)
tm.assert_index_equal(result_union, expected)
def test_add_iadd(self):
for tz in self.tz:
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
result = rng + delta
expected = pd.date_range('2000-01-01 02:00',
'2000-02-01 02:00', tz=tz)
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
# int
rng = pd.date_range('2000-01-01 09:00', freq='H', periods=10,
tz=tz)
result = rng + 1
expected = pd.date_range('2000-01-01 10:00', freq='H', periods=10,
tz=tz)
tm.assert_index_equal(result, expected)
rng += 1
tm.assert_index_equal(rng, expected)
idx = DatetimeIndex(['2011-01-01', '2011-01-02'])
msg = "cannot add a datelike to a DatetimeIndex"
with tm.assertRaisesRegexp(TypeError, msg):
idx + Timestamp('2011-01-01')
with tm.assertRaisesRegexp(TypeError, msg):
Timestamp('2011-01-01') + idx
def test_add_dti_dti(self):
# previously performed setop (deprecated in 0.16.0), now raises
# TypeError (GH14164)
dti = date_range('20130101', periods=3)
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
with tm.assertRaises(TypeError):
dti + dti
with tm.assertRaises(TypeError):
dti_tz + dti_tz
with tm.assertRaises(TypeError):
dti_tz + dti
with tm.assertRaises(TypeError):
dti + dti_tz
def test_difference(self):
for tz in self.tz:
# diff
rng1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other1 = pd.date_range('1/6/2000', freq='D', periods=5, tz=tz)
expected1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
rng2 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other2 = pd.date_range('1/4/2000', freq='D', periods=5, tz=tz)
expected2 = pd.date_range('1/1/2000', freq='D', periods=3, tz=tz)
rng3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other3 = pd.DatetimeIndex([], tz=tz)
expected3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
for rng, other, expected in [(rng1, other1, expected1),
(rng2, other2, expected2),
(rng3, other3, expected3)]:
result_diff = rng.difference(other)
tm.assert_index_equal(result_diff, expected)
def test_sub_isub(self):
for tz in self.tz:
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
expected = pd.date_range('1999-12-31 22:00',
'2000-01-31 22:00', tz=tz)
result = rng - delta
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
# int
rng = pd.date_range('2000-01-01 09:00', freq='H', periods=10,
tz=tz)
result = rng - 1
expected = pd.date_range('2000-01-01 08:00', freq='H', periods=10,
tz=tz)
tm.assert_index_equal(result, expected)
rng -= 1
tm.assert_index_equal(rng, expected)
def test_sub_dti_dti(self):
# previously performed setop (deprecated in 0.16.0), now changed to
# return subtraction -> TimeDeltaIndex (GH ...)
dti = date_range('20130101', periods=3)
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
dti_tz2 = date_range('20130101', periods=3).tz_localize('UTC')
expected = TimedeltaIndex([0, 0, 0])
result = dti - dti
tm.assert_index_equal(result, expected)
result = dti_tz - dti_tz
tm.assert_index_equal(result, expected)
with tm.assertRaises(TypeError):
dti_tz - dti
with tm.assertRaises(TypeError):
dti - dti_tz
with tm.assertRaises(TypeError):
dti_tz - dti_tz2
# isub
dti -= dti
tm.assert_index_equal(dti, expected)
# different length raises ValueError
dti1 = date_range('20130101', periods=3)
dti2 = date_range('20130101', periods=4)
with tm.assertRaises(ValueError):
dti1 - dti2
# NaN propagation
dti1 = DatetimeIndex(['2012-01-01', np.nan, '2012-01-03'])
dti2 = DatetimeIndex(['2012-01-02', '2012-01-03', np.nan])
expected = TimedeltaIndex(['1 days', np.nan, np.nan])
result = dti2 - dti1
tm.assert_index_equal(result, expected)
def test_sub_period(self):
# GH 13078
# not supported, check TypeError
p = pd.Period('2011-01-01', freq='D')
for freq in [None, 'D']:
idx = pd.DatetimeIndex(['2011-01-01', '2011-01-02'], freq=freq)
with tm.assertRaises(TypeError):
idx - p
with tm.assertRaises(TypeError):
p - idx
def test_comp_nat(self):
left = pd.DatetimeIndex([pd.Timestamp('2011-01-01'), pd.NaT,
pd.Timestamp('2011-01-03')])
right = pd.DatetimeIndex([pd.NaT, pd.NaT, pd.Timestamp('2011-01-03')])
for l, r in [(left, right), (left.asobject, right.asobject)]:
result = l == r
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = l != r
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l == pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT == r, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(l != pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT != l, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l < pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT > l, expected)
def test_value_counts_unique(self):
# GH 7735
for tz in self.tz:
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=10)
# create repeated values, 'n'th element is repeated by n+1 times
idx = DatetimeIndex(np.repeat(idx.values, range(1, len(idx) + 1)),
tz=tz)
exp_idx = pd.date_range('2011-01-01 18:00', freq='-1H', periods=10,
tz=tz)
expected = Series(range(10, 0, -1), index=exp_idx, dtype='int64')
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
expected = pd.date_range('2011-01-01 09:00', freq='H', periods=10,
tz=tz)
tm.assert_index_equal(idx.unique(), expected)
idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 09:00',
'2013-01-01 09:00', '2013-01-01 08:00',
'2013-01-01 08:00', pd.NaT], tz=tz)
exp_idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 08:00'],
tz=tz)
expected = Series([3, 2], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
exp_idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 08:00',
pd.NaT], tz=tz)
expected = Series([3, 2, 1], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(dropna=False),
expected)
tm.assert_index_equal(idx.unique(), exp_idx)
def test_nonunique_contains(self):
# GH 9512
for idx in map(DatetimeIndex,
([0, 1, 0], [0, 0, -1], [0, -1, -1],
['2015', '2015', '2016'], ['2015', '2015', '2014'])):
tm.assertIn(idx[0], idx)
def test_order(self):
# with freq
idx1 = DatetimeIndex(['2011-01-01', '2011-01-02',
'2011-01-03'], freq='D', name='idx')
idx2 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'], freq='H',
tz='Asia/Tokyo', name='tzidx')
for idx in [idx1, idx2]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, idx)
self.assertEqual(ordered.freq, idx.freq)
ordered = idx.sort_values(ascending=False)
expected = idx[::-1]
self.assert_index_equal(ordered, expected)
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq.n, -1)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, idx)
self.assert_numpy_array_equal(indexer,
np.array([0, 1, 2]),
check_dtype=False)
self.assertEqual(ordered.freq, idx.freq)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
expected = idx[::-1]
self.assert_index_equal(ordered, expected)
self.assert_numpy_array_equal(indexer,
np.array([2, 1, 0]),
check_dtype=False)
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq.n, -1)
# without freq
for tz in self.tz:
idx1 = DatetimeIndex(['2011-01-01', '2011-01-03', '2011-01-05',
'2011-01-02', '2011-01-01'],
tz=tz, name='idx1')
exp1 = DatetimeIndex(['2011-01-01', '2011-01-01', '2011-01-02',
'2011-01-03', '2011-01-05'],
tz=tz, name='idx1')
idx2 = DatetimeIndex(['2011-01-01', '2011-01-03', '2011-01-05',
'2011-01-02', '2011-01-01'],
tz=tz, name='idx2')
exp2 = DatetimeIndex(['2011-01-01', '2011-01-01', '2011-01-02',
'2011-01-03', '2011-01-05'],
tz=tz, name='idx2')
idx3 = DatetimeIndex([pd.NaT, '2011-01-03', '2011-01-05',
'2011-01-02', pd.NaT], tz=tz, name='idx3')
exp3 = DatetimeIndex([pd.NaT, pd.NaT, '2011-01-02', '2011-01-03',
'2011-01-05'], tz=tz, name='idx3')
for idx, expected in [(idx1, exp1), (idx2, exp2), (idx3, exp3)]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, expected)
self.assertIsNone(ordered.freq)
ordered = idx.sort_values(ascending=False)
self.assert_index_equal(ordered, expected[::-1])
self.assertIsNone(ordered.freq)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, expected)
exp = np.array([0, 4, 3, 1, 2])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertIsNone(ordered.freq)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
self.assert_index_equal(ordered, expected[::-1])
exp = np.array([2, 1, 3, 4, 0])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertIsNone(ordered.freq)
def test_getitem(self):
idx1 = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
idx2 = pd.date_range('2011-01-01', '2011-01-31', freq='D',
tz='Asia/Tokyo', name='idx')
for idx in [idx1, idx2]:
result = idx[0]
self.assertEqual(result, Timestamp('2011-01-01', tz=idx.tz))
result = idx[0:5]
expected = pd.date_range('2011-01-01', '2011-01-05', freq='D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[0:10:2]
expected = pd.date_range('2011-01-01', '2011-01-09', freq='2D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[-20:-5:3]
expected = pd.date_range('2011-01-12', '2011-01-24', freq='3D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[4::-1]
expected = DatetimeIndex(['2011-01-05', '2011-01-04', '2011-01-03',
'2011-01-02', '2011-01-01'],
freq='-1D', tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
def test_drop_duplicates_metadata(self):
# GH 10115
idx = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
result = idx.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertEqual(idx.freq, result.freq)
idx_dup = idx.append(idx)
self.assertIsNone(idx_dup.freq) # freq is reset
result = idx_dup.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertIsNone(result.freq)
def test_drop_duplicates(self):
# to check Index/Series compat
base = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
idx = base.append(base[:5])
res = idx.drop_duplicates()
tm.assert_index_equal(res, base)
res = Series(idx).drop_duplicates()
tm.assert_series_equal(res, Series(base))
res = idx.drop_duplicates(keep='last')
exp = base[5:].append(base[:5])
tm.assert_index_equal(res, exp)
res = Series(idx).drop_duplicates(keep='last')
tm.assert_series_equal(res, Series(exp, index=np.arange(5, 36)))
res = idx.drop_duplicates(keep=False)
tm.assert_index_equal(res, base[5:])
res = Series(idx).drop_duplicates(keep=False)
tm.assert_series_equal(res, Series(base[5:], index=np.arange(5, 31)))
def test_take(self):
# GH 10295
idx1 = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
idx2 = pd.date_range('2011-01-01', '2011-01-31', freq='D',
tz='Asia/Tokyo', name='idx')
for idx in [idx1, idx2]:
result = idx.take([0])
self.assertEqual(result, Timestamp('2011-01-01', tz=idx.tz))
result = idx.take([0, 1, 2])
expected = pd.date_range('2011-01-01', '2011-01-03', freq='D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([0, 2, 4])
expected = pd.date_range('2011-01-01', '2011-01-05', freq='2D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([7, 4, 1])
expected = pd.date_range('2011-01-08', '2011-01-02', freq='-3D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([3, 2, 5])
expected = DatetimeIndex(['2011-01-04', '2011-01-03',
'2011-01-06'],
freq=None, tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertIsNone(result.freq)
result = idx.take([-3, 2, 5])
expected = DatetimeIndex(['2011-01-29', '2011-01-03',
'2011-01-06'],
freq=None, tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertIsNone(result.freq)
def test_take_invalid_kwargs(self):
idx = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
indices = [1, 6, 5, 9, 10, 13, 15, 3]
msg = r"take\(\) got an unexpected keyword argument 'foo'"
tm.assertRaisesRegexp(TypeError, msg, idx.take,
indices, foo=2)
msg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, idx.take,
indices, out=indices)
msg = "the 'mode' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, idx.take,
indices, mode='clip')
def test_infer_freq(self):
# GH 11018
for freq in ['A', '2A', '-2A', 'Q', '-1Q', 'M', '-1M', 'D', '3D',
'-3D', 'W', '-1W', 'H', '2H', '-2H', 'T', '2T', 'S',
'-3S']:
idx = pd.date_range('2011-01-01 09:00:00', freq=freq, periods=10)
result = pd.DatetimeIndex(idx.asi8, freq='infer')
tm.assert_index_equal(idx, result)
self.assertEqual(result.freq, freq)
def test_nat_new(self):
idx = pd.date_range('2011-01-01', freq='D', periods=5, name='x')
result = idx._nat_new()
exp = pd.DatetimeIndex([pd.NaT] * 5, name='x')
tm.assert_index_equal(result, exp)
result = idx._nat_new(box=False)
exp = np.array([tslib.iNaT] * 5, dtype=np.int64)
tm.assert_numpy_array_equal(result, exp)
def test_shift(self):
# GH 9903
for tz in self.tz:
idx = pd.DatetimeIndex([], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
tm.assert_index_equal(idx.shift(3, freq='H'), idx)
idx = pd.DatetimeIndex(['2011-01-01 10:00', '2011-01-01 11:00'
'2011-01-01 12:00'], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
exp = pd.DatetimeIndex(['2011-01-01 13:00', '2011-01-01 14:00'
'2011-01-01 15:00'], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(3, freq='H'), exp)
exp = pd.DatetimeIndex(['2011-01-01 07:00', '2011-01-01 08:00'
'2011-01-01 09:00'], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(-3, freq='H'), exp)
def test_nat(self):
self.assertIs(pd.DatetimeIndex._na_value, pd.NaT)
self.assertIs(pd.DatetimeIndex([])._na_value, pd.NaT)
for tz in [None, 'US/Eastern', 'UTC']:
idx = pd.DatetimeIndex(['2011-01-01', '2011-01-02'], tz=tz)
self.assertTrue(idx._can_hold_na)
tm.assert_numpy_array_equal(idx._isnan, np.array([False, False]))
self.assertFalse(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([], dtype=np.intp))
idx = pd.DatetimeIndex(['2011-01-01', 'NaT'], tz=tz)
self.assertTrue(idx._can_hold_na)
tm.assert_numpy_array_equal(idx._isnan, np.array([False, True]))
self.assertTrue(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([1], dtype=np.intp))
def test_equals(self):
# GH 13107
for tz in [None, 'UTC', 'US/Eastern', 'Asia/Tokyo']:
idx = pd.DatetimeIndex(['2011-01-01', '2011-01-02', 'NaT'])
self.assertTrue(idx.equals(idx))
self.assertTrue(idx.equals(idx.copy()))
self.assertTrue(idx.equals(idx.asobject))
self.assertTrue(idx.asobject.equals(idx))
self.assertTrue(idx.asobject.equals(idx.asobject))
self.assertFalse(idx.equals(list(idx)))
self.assertFalse(idx.equals(pd.Series(idx)))
idx2 = pd.DatetimeIndex(['2011-01-01', '2011-01-02', 'NaT'],
tz='US/Pacific')
self.assertFalse(idx.equals(idx2))
self.assertFalse(idx.equals(idx2.copy()))
self.assertFalse(idx.equals(idx2.asobject))
self.assertFalse(idx.asobject.equals(idx2))
self.assertFalse(idx.equals(list(idx2)))
self.assertFalse(idx.equals(pd.Series(idx2)))
# same internal, different tz
idx3 = pd.DatetimeIndex._simple_new(idx.asi8, tz='US/Pacific')
tm.assert_numpy_array_equal(idx.asi8, idx3.asi8)
self.assertFalse(idx.equals(idx3))
self.assertFalse(idx.equals(idx3.copy()))
self.assertFalse(idx.equals(idx3.asobject))
self.assertFalse(idx.asobject.equals(idx3))
self.assertFalse(idx.equals(list(idx3)))
self.assertFalse(idx.equals(pd.Series(idx3)))
class TestTimedeltaIndexOps(Ops):
def setUp(self):
super(TestTimedeltaIndexOps, self).setUp()
mask = lambda x: isinstance(x, TimedeltaIndex)
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = []
def test_ops_properties(self):
self.check_ops_properties(['days', 'hours', 'minutes', 'seconds',
'milliseconds'])
self.check_ops_properties(['microseconds', 'nanoseconds'])
def test_asobject_tolist(self):
idx = timedelta_range(start='1 days', periods=4, freq='D', name='idx')
expected_list = [Timedelta('1 days'), Timedelta('2 days'),
Timedelta('3 days'), Timedelta('4 days')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = TimedeltaIndex([timedelta(days=1), timedelta(days=2), pd.NaT,
timedelta(days=4)], name='idx')
expected_list = [Timedelta('1 days'), Timedelta('2 days'), pd.NaT,
Timedelta('4 days')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
def test_minmax(self):
# monotonic
idx1 = TimedeltaIndex(['1 days', '2 days', '3 days'])
self.assertTrue(idx1.is_monotonic)
# non-monotonic
idx2 = TimedeltaIndex(['1 days', np.nan, '3 days', 'NaT'])
self.assertFalse(idx2.is_monotonic)
for idx in [idx1, idx2]:
self.assertEqual(idx.min(), Timedelta('1 days')),
self.assertEqual(idx.max(), Timedelta('3 days')),
self.assertEqual(idx.argmin(), 0)
self.assertEqual(idx.argmax(), 2)
for op in ['min', 'max']:
# Return NaT
obj = TimedeltaIndex([])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = TimedeltaIndex([pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = TimedeltaIndex([pd.NaT, pd.NaT, pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
def test_numpy_minmax(self):
dr = pd.date_range(start='2016-01-15', end='2016-01-20')
td = TimedeltaIndex(np.asarray(dr))
self.assertEqual(np.min(td), Timedelta('16815 days'))
self.assertEqual(np.max(td), Timedelta('16820 days'))
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.min, td, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.max, td, out=0)
self.assertEqual(np.argmin(td), 0)
self.assertEqual(np.argmax(td), 5)
if not _np_version_under1p10:
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.argmin, td, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.argmax, td, out=0)
def test_round(self):
td = pd.timedelta_range(start='16801 days', periods=5, freq='30Min')
elt = td[1]
expected_rng = TimedeltaIndex([
Timedelta('16801 days 00:00:00'),
Timedelta('16801 days 00:00:00'),
Timedelta('16801 days 01:00:00'),
Timedelta('16801 days 02:00:00'),
Timedelta('16801 days 02:00:00'),
])
expected_elt = expected_rng[1]
tm.assert_index_equal(td.round(freq='H'), expected_rng)
self.assertEqual(elt.round(freq='H'), expected_elt)
msg = pd.tseries.frequencies._INVALID_FREQ_ERROR
with self.assertRaisesRegexp(ValueError, msg):
td.round(freq='foo')
with tm.assertRaisesRegexp(ValueError, msg):
elt.round(freq='foo')
msg = "<MonthEnd> is a non-fixed frequency"
tm.assertRaisesRegexp(ValueError, msg, td.round, freq='M')
tm.assertRaisesRegexp(ValueError, msg, elt.round, freq='M')
def test_representation(self):
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """TimedeltaIndex([], dtype='timedelta64[ns]', freq='D')"""
exp2 = ("TimedeltaIndex(['1 days'], dtype='timedelta64[ns]', "
"freq='D')")
exp3 = ("TimedeltaIndex(['1 days', '2 days'], "
"dtype='timedelta64[ns]', freq='D')")
exp4 = ("TimedeltaIndex(['1 days', '2 days', '3 days'], "
"dtype='timedelta64[ns]', freq='D')")
exp5 = ("TimedeltaIndex(['1 days 00:00:01', '2 days 00:00:00', "
"'3 days 00:00:00'], dtype='timedelta64[ns]', freq=None)")
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
for func in ['__repr__', '__unicode__', '__str__']:
result = getattr(idx, func)()
self.assertEqual(result, expected)
def test_representation_to_series(self):
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """Series([], dtype: timedelta64[ns])"""
exp2 = """0 1 days
dtype: timedelta64[ns]"""
exp3 = """0 1 days
1 2 days
dtype: timedelta64[ns]"""
exp4 = """0 1 days
1 2 days
2 3 days
dtype: timedelta64[ns]"""
exp5 = """0 1 days 00:00:01
1 2 days 00:00:00
2 3 days 00:00:00
dtype: timedelta64[ns]"""
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
result = repr(pd.Series(idx))
self.assertEqual(result, expected)
def test_summary(self):
# GH9116
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """TimedeltaIndex: 0 entries
Freq: D"""
exp2 = """TimedeltaIndex: 1 entries, 1 days to 1 days
Freq: D"""
exp3 = """TimedeltaIndex: 2 entries, 1 days to 2 days
Freq: D"""
exp4 = """TimedeltaIndex: 3 entries, 1 days to 3 days
Freq: D"""
exp5 = ("TimedeltaIndex: 3 entries, 1 days 00:00:01 to 3 days "
"00:00:00")
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
result = idx.summary()
self.assertEqual(result, expected)
def test_add_iadd(self):
# only test adding/sub offsets as + is now numeric
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = timedelta_range('1 days', '10 days')
result = rng + delta
expected = timedelta_range('1 days 02:00:00', '10 days 02:00:00',
freq='D')
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
# int
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
result = rng + 1
expected = timedelta_range('1 days 10:00:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng += 1
tm.assert_index_equal(rng, expected)
def test_sub_isub(self):
# only test adding/sub offsets as - is now numeric
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = timedelta_range('1 days', '10 days')
result = rng - delta
expected = timedelta_range('0 days 22:00:00', '9 days 22:00:00')
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
# int
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
result = rng - 1
expected = timedelta_range('1 days 08:00:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng -= 1
tm.assert_index_equal(rng, expected)
idx = TimedeltaIndex(['1 day', '2 day'])
msg = "cannot subtract a datelike from a TimedeltaIndex"
with tm.assertRaisesRegexp(TypeError, msg):
idx - Timestamp('2011-01-01')
result = Timestamp('2011-01-01') + idx
expected = DatetimeIndex(['2011-01-02', '2011-01-03'])
tm.assert_index_equal(result, expected)
def test_ops_compat(self):
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
rng = timedelta_range('1 days', '10 days', name='foo')
# multiply
for offset in offsets:
self.assertRaises(TypeError, lambda: rng * offset)
# divide
expected = Int64Index((np.arange(10) + 1) * 12, name='foo')
for offset in offsets:
result = rng / offset
tm.assert_index_equal(result, expected, exact=False)
# divide with nats
rng = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
expected = Float64Index([12, np.nan, 24], name='foo')
for offset in offsets:
result = rng / offset
tm.assert_index_equal(result, expected)
# don't allow division by NaT (make could in the future)
self.assertRaises(TypeError, lambda: rng / pd.NaT)
def test_subtraction_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
self.assertRaises(TypeError, lambda: tdi - dt)
self.assertRaises(TypeError, lambda: tdi - dti)
self.assertRaises(TypeError, lambda: td - dt)
self.assertRaises(TypeError, lambda: td - dti)
result = dt - dti
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = dti - dt
expected = TimedeltaIndex(['0 days', '1 days', '2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = tdi - td
expected = TimedeltaIndex(['0 days', pd.NaT, '1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = td - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '-1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = dti - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], name='bar')
tm.assert_index_equal(result, expected, check_names=False)
result = dt - tdi
expected = DatetimeIndex(['20121231', pd.NaT, '20121230'], name='foo')
tm.assert_index_equal(result, expected)
def test_subtraction_ops_with_tz(self):
# check that dt/dti subtraction ops with tz are validated
dti = date_range('20130101', periods=3)
ts = Timestamp('20130101')
dt = ts.to_pydatetime()
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
ts_tz = Timestamp('20130101').tz_localize('US/Eastern')
ts_tz2 = Timestamp('20130101').tz_localize('CET')
dt_tz = ts_tz.to_pydatetime()
td = Timedelta('1 days')
def _check(result, expected):
self.assertEqual(result, expected)
self.assertIsInstance(result, Timedelta)
# scalars
result = ts - ts
expected = Timedelta('0 days')
_check(result, expected)
result = dt_tz - ts_tz
expected = Timedelta('0 days')
_check(result, expected)
result = ts_tz - dt_tz
expected = Timedelta('0 days')
_check(result, expected)
# tz mismatches
self.assertRaises(TypeError, lambda: dt_tz - ts)
self.assertRaises(TypeError, lambda: dt_tz - dt)
self.assertRaises(TypeError, lambda: dt_tz - ts_tz2)
self.assertRaises(TypeError, lambda: dt - dt_tz)
self.assertRaises(TypeError, lambda: ts - dt_tz)
self.assertRaises(TypeError, lambda: ts_tz2 - ts)
self.assertRaises(TypeError, lambda: ts_tz2 - dt)
self.assertRaises(TypeError, lambda: ts_tz - ts_tz2)
# with dti
self.assertRaises(TypeError, lambda: dti - ts_tz)
self.assertRaises(TypeError, lambda: dti_tz - ts)
self.assertRaises(TypeError, lambda: dti_tz - ts_tz2)
result = dti_tz - dt_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = dt_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = dti_tz - ts_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = ts_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = td - td
expected = Timedelta('0 days')
_check(result, expected)
result = dti_tz - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], tz='US/Eastern')
tm.assert_index_equal(result, expected)
def test_dti_tdi_numeric_ops(self):
# These are normally union/diff set-like ops
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
# TODO(wesm): unused?
# td = Timedelta('1 days')
# dt = Timestamp('20130101')
result = tdi - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '0 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '4 days'], name='foo')
tm.assert_index_equal(result, expected)
result = dti - tdi # name will be reset
expected = DatetimeIndex(['20121231', pd.NaT, '20130101'])
tm.assert_index_equal(result, expected)
def test_sub_period(self):
# GH 13078
# not supported, check TypeError
p = pd.Period('2011-01-01', freq='D')
for freq in [None, 'H']:
idx = pd.TimedeltaIndex(['1 hours', '2 hours'], freq=freq)
with tm.assertRaises(TypeError):
idx - p
with tm.assertRaises(TypeError):
p - idx
def test_addition_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
result = tdi + dt
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = dt + tdi
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = td + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + td
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
# unequal length
self.assertRaises(ValueError, lambda: tdi + dti[0:1])
self.assertRaises(ValueError, lambda: tdi[0:1] + dti)
# random indexes
self.assertRaises(TypeError, lambda: tdi + Int64Index([1, 2, 3]))
# this is a union!
# self.assertRaises(TypeError, lambda : Int64Index([1,2,3]) + tdi)
result = tdi + dti # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dti + tdi # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dt + td
expected = Timestamp('20130102')
self.assertEqual(result, expected)
result = td + dt
expected = Timestamp('20130102')
self.assertEqual(result, expected)
def test_comp_nat(self):
left = pd.TimedeltaIndex([pd.Timedelta('1 days'), pd.NaT,
pd.Timedelta('3 days')])
right = pd.TimedeltaIndex([pd.NaT, pd.NaT, pd.Timedelta('3 days')])
for l, r in [(left, right), (left.asobject, right.asobject)]:
result = l == r
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = l != r
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l == pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT == r, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(l != pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT != l, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l < pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT > l, expected)
def test_value_counts_unique(self):
# GH 7735
idx = timedelta_range('1 days 09:00:00', freq='H', periods=10)
# create repeated values, 'n'th element is repeated by n+1 times
idx = TimedeltaIndex(np.repeat(idx.values, range(1, len(idx) + 1)))
exp_idx = timedelta_range('1 days 18:00:00', freq='-1H', periods=10)
expected = Series(range(10, 0, -1), index=exp_idx, dtype='int64')
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
expected = timedelta_range('1 days 09:00:00', freq='H', periods=10)
tm.assert_index_equal(idx.unique(), expected)
idx = TimedeltaIndex(['1 days 09:00:00', '1 days 09:00:00',
'1 days 09:00:00', '1 days 08:00:00',
'1 days 08:00:00', pd.NaT])
exp_idx = TimedeltaIndex(['1 days 09:00:00', '1 days 08:00:00'])
expected = Series([3, 2], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
exp_idx = TimedeltaIndex(['1 days 09:00:00', '1 days 08:00:00',
pd.NaT])
expected = Series([3, 2, 1], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(dropna=False), expected)
tm.assert_index_equal(idx.unique(), exp_idx)
def test_nonunique_contains(self):
# GH 9512
for idx in map(TimedeltaIndex, ([0, 1, 0], [0, 0, -1], [0, -1, -1],
['00:01:00', '00:01:00', '00:02:00'],
['00:01:00', '00:01:00', '00:00:01'])):
tm.assertIn(idx[0], idx)
def test_unknown_attribute(self):
# GH 9680
tdi = pd.timedelta_range(start=0, periods=10, freq='1s')
ts = pd.Series(np.random.normal(size=10), index=tdi)
self.assertNotIn('foo', ts.__dict__.keys())
self.assertRaises(AttributeError, lambda: ts.foo)
def test_order(self):
# GH 10295
idx1 = TimedeltaIndex(['1 day', '2 day', '3 day'], freq='D',
name='idx')
idx2 = TimedeltaIndex(
['1 hour', '2 hour', '3 hour'], freq='H', name='idx')
for idx in [idx1, idx2]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, idx)
self.assertEqual(ordered.freq, idx.freq)
ordered = idx.sort_values(ascending=False)
expected = idx[::-1]
self.assert_index_equal(ordered, expected)
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq.n, -1)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, idx)
self.assert_numpy_array_equal(indexer,
np.array([0, 1, 2]),
check_dtype=False)
self.assertEqual(ordered.freq, idx.freq)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
self.assert_index_equal(ordered, idx[::-1])
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq.n, -1)
idx1 = TimedeltaIndex(['1 hour', '3 hour', '5 hour',
'2 hour ', '1 hour'], name='idx1')
exp1 = TimedeltaIndex(['1 hour', '1 hour', '2 hour',
'3 hour', '5 hour'], name='idx1')
idx2 = TimedeltaIndex(['1 day', '3 day', '5 day',
'2 day', '1 day'], name='idx2')
# TODO(wesm): unused?
# exp2 = TimedeltaIndex(['1 day', '1 day', '2 day',
# '3 day', '5 day'], name='idx2')
# idx3 = TimedeltaIndex([pd.NaT, '3 minute', '5 minute',
# '2 minute', pd.NaT], name='idx3')
# exp3 = TimedeltaIndex([pd.NaT, pd.NaT, '2 minute', '3 minute',
# '5 minute'], name='idx3')
for idx, expected in [(idx1, exp1), (idx1, exp1), (idx1, exp1)]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, expected)
self.assertIsNone(ordered.freq)
ordered = idx.sort_values(ascending=False)
self.assert_index_equal(ordered, expected[::-1])
self.assertIsNone(ordered.freq)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, expected)
exp = np.array([0, 4, 3, 1, 2])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertIsNone(ordered.freq)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
self.assert_index_equal(ordered, expected[::-1])
exp = np.array([2, 1, 3, 4, 0])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertIsNone(ordered.freq)
def test_getitem(self):
idx1 = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
for idx in [idx1]:
result = idx[0]
self.assertEqual(result, pd.Timedelta('1 day'))
result = idx[0:5]
expected = pd.timedelta_range('1 day', '5 day', freq='D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[0:10:2]
expected = pd.timedelta_range('1 day', '9 day', freq='2D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[-20:-5:3]
expected = pd.timedelta_range('12 day', '24 day', freq='3D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[4::-1]
expected = TimedeltaIndex(['5 day', '4 day', '3 day',
'2 day', '1 day'],
freq='-1D', name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
def test_drop_duplicates_metadata(self):
# GH 10115
idx = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
result = idx.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertEqual(idx.freq, result.freq)
idx_dup = idx.append(idx)
self.assertIsNone(idx_dup.freq) # freq is reset
result = idx_dup.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertIsNone(result.freq)
def test_drop_duplicates(self):
# to check Index/Series compat
base = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
idx = base.append(base[:5])
res = idx.drop_duplicates()
tm.assert_index_equal(res, base)
res = Series(idx).drop_duplicates()
tm.assert_series_equal(res, Series(base))
res = idx.drop_duplicates(keep='last')
exp = base[5:].append(base[:5])
tm.assert_index_equal(res, exp)
res = Series(idx).drop_duplicates(keep='last')
tm.assert_series_equal(res, Series(exp, index=np.arange(5, 36)))
res = idx.drop_duplicates(keep=False)
tm.assert_index_equal(res, base[5:])
res = Series(idx).drop_duplicates(keep=False)
tm.assert_series_equal(res, Series(base[5:], index=np.arange(5, 31)))
def test_take(self):
# GH 10295
idx1 = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
for idx in [idx1]:
result = idx.take([0])
self.assertEqual(result, pd.Timedelta('1 day'))
result = idx.take([-1])
self.assertEqual(result, pd.Timedelta('31 day'))
result = idx.take([0, 1, 2])
expected = pd.timedelta_range('1 day', '3 day', freq='D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([0, 2, 4])
expected = pd.timedelta_range('1 day', '5 day', freq='2D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([7, 4, 1])
expected = pd.timedelta_range('8 day', '2 day', freq='-3D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([3, 2, 5])
expected = TimedeltaIndex(['4 day', '3 day', '6 day'], name='idx')
self.assert_index_equal(result, expected)
self.assertIsNone(result.freq)
result = idx.take([-3, 2, 5])
expected = TimedeltaIndex(['29 day', '3 day', '6 day'], name='idx')
self.assert_index_equal(result, expected)
self.assertIsNone(result.freq)
def test_take_invalid_kwargs(self):
idx = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
indices = [1, 6, 5, 9, 10, 13, 15, 3]
msg = r"take\(\) got an unexpected keyword argument 'foo'"
tm.assertRaisesRegexp(TypeError, msg, idx.take,
indices, foo=2)
msg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, idx.take,
indices, out=indices)
msg = "the 'mode' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, idx.take,
indices, mode='clip')
def test_infer_freq(self):
# GH 11018
for freq in ['D', '3D', '-3D', 'H', '2H', '-2H', 'T', '2T', 'S', '-3S'
]:
idx = pd.timedelta_range('1', freq=freq, periods=10)
result = pd.TimedeltaIndex(idx.asi8, freq='infer')
tm.assert_index_equal(idx, result)
self.assertEqual(result.freq, freq)
def test_nat_new(self):
idx = pd.timedelta_range('1', freq='D', periods=5, name='x')
result = idx._nat_new()
exp = pd.TimedeltaIndex([pd.NaT] * 5, name='x')
tm.assert_index_equal(result, exp)
result = idx._nat_new(box=False)
exp = np.array([tslib.iNaT] * 5, dtype=np.int64)
tm.assert_numpy_array_equal(result, exp)
def test_shift(self):
# GH 9903
idx = pd.TimedeltaIndex([], name='xxx')
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
tm.assert_index_equal(idx.shift(3, freq='H'), idx)
idx = pd.TimedeltaIndex(['5 hours', '6 hours', '9 hours'], name='xxx')
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
exp = pd.TimedeltaIndex(['8 hours', '9 hours', '12 hours'], name='xxx')
tm.assert_index_equal(idx.shift(3, freq='H'), exp)
exp = pd.TimedeltaIndex(['2 hours', '3 hours', '6 hours'], name='xxx')
tm.assert_index_equal(idx.shift(-3, freq='H'), exp)
tm.assert_index_equal(idx.shift(0, freq='T'), idx)
exp = pd.TimedeltaIndex(['05:03:00', '06:03:00', '9:03:00'],
name='xxx')
tm.assert_index_equal(idx.shift(3, freq='T'), exp)
exp = pd.TimedeltaIndex(['04:57:00', '05:57:00', '8:57:00'],
name='xxx')
tm.assert_index_equal(idx.shift(-3, freq='T'), exp)
def test_repeat(self):
index = pd.timedelta_range('1 days', periods=2, freq='D')
exp = pd.TimedeltaIndex(['1 days', '1 days', '2 days', '2 days'])
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
index = TimedeltaIndex(['1 days', 'NaT', '3 days'])
exp = TimedeltaIndex(['1 days', '1 days', '1 days',
'NaT', 'NaT', 'NaT',
'3 days', '3 days', '3 days'])
for res in [index.repeat(3), np.repeat(index, 3)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
def test_nat(self):
self.assertIs(pd.TimedeltaIndex._na_value, pd.NaT)
self.assertIs(pd.TimedeltaIndex([])._na_value, pd.NaT)
idx = pd.TimedeltaIndex(['1 days', '2 days'])
self.assertTrue(idx._can_hold_na)
tm.assert_numpy_array_equal(idx._isnan, np.array([False, False]))
self.assertFalse(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([], dtype=np.intp))
idx = pd.TimedeltaIndex(['1 days', 'NaT'])
self.assertTrue(idx._can_hold_na)
tm.assert_numpy_array_equal(idx._isnan, np.array([False, True]))
self.assertTrue(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([1], dtype=np.intp))
def test_equals(self):
# GH 13107
idx = pd.TimedeltaIndex(['1 days', '2 days', 'NaT'])
self.assertTrue(idx.equals(idx))
self.assertTrue(idx.equals(idx.copy()))
self.assertTrue(idx.equals(idx.asobject))
self.assertTrue(idx.asobject.equals(idx))
self.assertTrue(idx.asobject.equals(idx.asobject))
self.assertFalse(idx.equals(list(idx)))
self.assertFalse(idx.equals(pd.Series(idx)))
idx2 = pd.TimedeltaIndex(['2 days', '1 days', 'NaT'])
self.assertFalse(idx.equals(idx2))
self.assertFalse(idx.equals(idx2.copy()))
self.assertFalse(idx.equals(idx2.asobject))
self.assertFalse(idx.asobject.equals(idx2))
self.assertFalse(idx.asobject.equals(idx2.asobject))
self.assertFalse(idx.equals(list(idx2)))
self.assertFalse(idx.equals(pd.Series(idx2)))
class TestPeriodIndexOps(Ops):
def setUp(self):
super(TestPeriodIndexOps, self).setUp()
mask = lambda x: (isinstance(x, DatetimeIndex) or
isinstance(x, PeriodIndex))
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = [o for o in self.objs if not mask(o)]
def test_ops_properties(self):
self.check_ops_properties(
['year', 'month', 'day', 'hour', 'minute', 'second', 'weekofyear',
'week', 'dayofweek', 'dayofyear', 'quarter'])
self.check_ops_properties(['qyear'],
lambda x: isinstance(x, PeriodIndex))
def test_asobject_tolist(self):
idx = pd.period_range(start='2013-01-01', periods=4, freq='M',
name='idx')
expected_list = [pd.Period('2013-01-31', freq='M'),
pd.Period('2013-02-28', freq='M'),
pd.Period('2013-03-31', freq='M'),
pd.Period('2013-04-30', freq='M')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = PeriodIndex(['2013-01-01', '2013-01-02', 'NaT',
'2013-01-04'], freq='D', name='idx')
expected_list = [pd.Period('2013-01-01', freq='D'),
pd.Period('2013-01-02', freq='D'),
pd.Period('NaT', freq='D'),
pd.Period('2013-01-04', freq='D')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
tm.assert_index_equal(result, expected)
for i in [0, 1, 3]:
self.assertEqual(result[i], expected[i])
self.assertIs(result[2], pd.NaT)
self.assertEqual(result.name, expected.name)
result_list = idx.tolist()
for i in [0, 1, 3]:
self.assertEqual(result_list[i], expected_list[i])
self.assertIs(result_list[2], pd.NaT)
def test_minmax(self):
# monotonic
idx1 = pd.PeriodIndex([pd.NaT, '2011-01-01', '2011-01-02',
'2011-01-03'], freq='D')
self.assertTrue(idx1.is_monotonic)
# non-monotonic
idx2 = pd.PeriodIndex(['2011-01-01', pd.NaT, '2011-01-03',
'2011-01-02', pd.NaT], freq='D')
self.assertFalse(idx2.is_monotonic)
for idx in [idx1, idx2]:
self.assertEqual(idx.min(), pd.Period('2011-01-01', freq='D'))
self.assertEqual(idx.max(), pd.Period('2011-01-03', freq='D'))
self.assertEqual(idx1.argmin(), 1)
self.assertEqual(idx2.argmin(), 0)
self.assertEqual(idx1.argmax(), 3)
self.assertEqual(idx2.argmax(), 2)
for op in ['min', 'max']:
# Return NaT
obj = PeriodIndex([], freq='M')
result = getattr(obj, op)()
self.assertIs(result, tslib.NaT)
obj = PeriodIndex([pd.NaT], freq='M')
result = getattr(obj, op)()
self.assertIs(result, tslib.NaT)
obj = PeriodIndex([pd.NaT, pd.NaT, pd.NaT], freq='M')
result = getattr(obj, op)()
self.assertIs(result, tslib.NaT)
def test_numpy_minmax(self):
pr = pd.period_range(start='2016-01-15', end='2016-01-20')
self.assertEqual( | np.min(pr) | numpy.min |
# coding=utf-8
# Copyright 2020 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tensor2tensor.layers.transformer_glow_layers.
1. Actnorm test (zero mean and unit variance).
2. Invertibility tests for:
* actnorm
* actnorm with weight normalization
* 1x1 invertible convolution
* multi-head 1x1 invertible convolution
* affine coupling
* split
* 1 step of flow
* k steps of flow
* entire pipeline (tested up to 3 levels, 32 steps: tca/tca/ca, 12/12/8)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tempfile
from absl.testing import parameterized
import numpy as np
from tensor2tensor.layers import common_attention
from tensor2tensor.layers import transformer_glow_layers as glow
from tensor2tensor.layers import transformer_glow_layers_ops as gops
from tensor2tensor.models import transformer
import tensorflow.compat.v1 as tf
BATCH_SIZE = 20
INPUT_LENGTH = 3
TARGET_LENGTH = 16
N_CHANNELS = 256
HIDDEN_SIZE = 64
N_1X1_HEADS = 4
DTYPE = tf.float32
def float32_bottleneck(x):
return tf.cast(tf.cast(x, tf.float32), tf.float64)
def get_diff(l1, l2):
l2 = l2[::-1]
for i1, i2 in zip(l1, l2):
print (i1 - i2)
for i1, i2 in zip(l1, l2):
print (np.max(np.abs(i1 - i2)))
class TransformerGlowLayersTest(parameterized.TestCase, tf.test.TestCase):
def get_hparams(self):
hparams = transformer.transformer_small()
hparams.add_hparam("prior_type", "affine")
hparams.add_hparam("factor", 2) # squeezing factor
hparams.add_hparam("n_layers_transform_params", 1)
hparams.add_hparam("n_1x1_heads", N_1X1_HEADS)
hparams.add_hparam("flow_num_1x1_heads", 4)
hparams.add_hparam("flow_num_heads", 4)
hparams.add_hparam("flow_hidden_size", 64)
hparams.add_hparam("flow_filter_size", 128)
hparams.add_hparam("flow_layer_prepostprocess_dropout", 0.0)
hparams.add_hparam("flow_attention_dropout", 0.0)
hparams.add_hparam("flow_relu_dropout", 0.0)
hparams.add_hparam("latent_size", N_CHANNELS)
hparams.add_hparam("use_weightnorm", True)
hparams.add_hparam("kl_startup_steps", 2000)
hparams.add_hparam("affine_scale", "glow")
hparams.add_hparam("scale_width", 0.999)
hparams.add_hparam("step_fn", "glow") # glow / chunting
hparams.add_hparam("conv_fn", "np") # np / tf
hparams.add_hparam("posterior_type", "diagonal_normal")
hparams.causal_decoder_self_attention = False
hparams.hidden_size = HIDDEN_SIZE
hparams.weight_dtype = "float32"
hparams.add_hparam("pos_attn", False)
return hparams
def get_data(self):
x = tf.random_normal(
(BATCH_SIZE, TARGET_LENGTH, N_CHANNELS), dtype=DTYPE)
x_lengths = np.random.randint(
low=1, high=TARGET_LENGTH+1, size=BATCH_SIZE)
x_lengths = np.ceil(x_lengths / 4.0) * 4.0
x_lengths = x_lengths.astype(int)
x_mask = tf.sequence_mask(x_lengths, maxlen=TARGET_LENGTH, dtype=DTYPE)
return x, x_mask, x_lengths
def get_kwargs(self, x_mask, hparams=None):
if hparams is None:
hparams = self.get_hparams()
encoder_output = tf.random.uniform(
(BATCH_SIZE, INPUT_LENGTH, HIDDEN_SIZE), dtype=DTYPE)
encoder_decoder_attention_bias = tf.zeros(
(BATCH_SIZE, 1, 1, INPUT_LENGTH), dtype=DTYPE)
decoder_self_attention_bias = 1.0 - x_mask[:, tf.newaxis, tf.newaxis, :]
decoder_self_attention_bias *= -1e9
kwargs = {"hparams": hparams,
"encoder_output": encoder_output,
"encoder_decoder_attention_bias": encoder_decoder_attention_bias,
"decoder_self_attention_bias": decoder_self_attention_bias}
return kwargs
def test_actnorm(self):
_, x_mask, _ = self.get_data()
x = tf.random_normal((BATCH_SIZE, TARGET_LENGTH, N_CHANNELS),
mean=50.0, stddev=10.0, dtype=DTYPE)
x_act, logabsdet = glow.actnorm(
"actnorm", x, x_mask, inverse=False, init=True)
x_act_nopad = tf.boolean_mask(x_act, x_mask)
x_mean, x_var = tf.nn.moments(x_act_nopad, axes=[0])
self.evaluate(tf.global_variables_initializer())
x, x_act, logabsdet, x_mean, x_var = (
self.evaluate([x, x_act, logabsdet, x_mean, x_var]))
self.assertEqual(x_act.shape, (BATCH_SIZE, TARGET_LENGTH, N_CHANNELS))
self.assertEqual(logabsdet.shape, (BATCH_SIZE,))
self.assertTrue(np.allclose(x_mean, 0.0, atol=1e-5))
self.assertTrue(np.allclose(x_var, 1.0, atol=1e-5))
def test_actnorm_invertibility(self):
name = "actnorm"
x, x_mask, _ = self.get_data()
x_inv, logabsdet = glow.actnorm(
name, x, x_mask, inverse=False, init=False)
x_inv_inv, logabsdet_inv = glow.actnorm(
name, x_inv, x_mask, inverse=True, init=False)
self.evaluate(tf.global_variables_initializer())
x, x_inv, x_inv_inv, x_mask, logabsdet, logabsdet_inv = (
self.evaluate(
[x, x_inv, x_inv_inv, x_mask, logabsdet, logabsdet_inv]))
diff = x - x_inv_inv
logabsdet_sum = logabsdet + logabsdet_inv
self.assertEqual(x.shape, (BATCH_SIZE, TARGET_LENGTH, N_CHANNELS))
self.assertEqual(x_inv.shape, (BATCH_SIZE, TARGET_LENGTH, N_CHANNELS))
self.assertEqual(x_inv_inv.shape, (BATCH_SIZE, TARGET_LENGTH, N_CHANNELS))
self.assertTrue(np.allclose(diff, 0.0, atol=1e-5))
self.assertTrue(np.allclose(logabsdet_sum, 0.0, atol=1e-5))
@parameterized.parameters(
(glow.multihead_invertible_1x1_conv_np, "a"),
(glow.multihead_invertible_1x1_conv_np, "c"),
)
def test_multi_1x1_invertibility(
self, func, multihead_split):
name = "multi_1x1"
x, x_mask, _ = self.get_data()
x_inv, logabsdet = func(
name, x, x_mask, multihead_split, inverse=False, dtype=DTYPE)
x_inv_inv, logabsdet_inv = func(
name, x_inv, x_mask, multihead_split, inverse=True, dtype=DTYPE)
self.evaluate(tf.global_variables_initializer())
x, x_mask, x_inv, x_inv_inv, logabsdet, logabsdet_inv = (
self.evaluate(
[x, x_mask, x_inv, x_inv_inv, logabsdet, logabsdet_inv]))
diff = x - x_inv_inv
logabsdet_sum = logabsdet + logabsdet_inv
logabsdet_ = logabsdet / np.sum(x_mask, -1)
self.assertTrue(np.allclose(diff, 0.0, atol=1e-5))
self.assertTrue(np.allclose(logabsdet_, 0.0, atol=1e-5))
self.assertTrue(np.allclose(logabsdet_sum, 0.0, atol=1e-5))
@parameterized.parameters(
(glow.additive_coupling, "c"),
(glow.additive_coupling, "t"),
(glow.additive_coupling, "a"),
(glow.affine_coupling, "c"),
(glow.affine_coupling, "t"),
(glow.affine_coupling, "a"),
)
def test_coupling_invertibility(self, func, split_dim):
name = "affine"
x, x_mask, _ = self.get_data()
kwargs = self.get_kwargs(x_mask)
x_inv, logabsdet = func(
name, x, x_mask, split_dim=split_dim,
identity_first=True, inverse=False, init=False, disable_dropout=True,
**kwargs)
x_inv_inv, logabsdet_inv = func(
name, x_inv, x_mask, split_dim=split_dim,
identity_first=True, inverse=True, init=False, disable_dropout=True,
**kwargs)
self.evaluate(tf.global_variables_initializer())
x, x_mask, x_inv, x_inv_inv, logabsdet, logabsdet_inv = (
self.evaluate(
[x, x_mask, x_inv, x_inv_inv, logabsdet, logabsdet_inv]))
diff = x - x_inv_inv
logabsdet_sum = logabsdet + logabsdet_inv
self.assertTrue(np.allclose(diff, 0.0, atol=1e-5))
self.assertTrue(np.allclose(logabsdet_sum, 0.0, atol=1e-5))
def test_split(self):
x, x_mask, _ = self.get_data()
x_inv, z, log_p = glow.split(
"split", x, x_mask, inverse=False)
x_inv_inv, _, log_p_inv = glow.split(
"split", x_inv, x_mask, z=z, inverse=True)
self.evaluate(tf.global_variables_initializer())
x, x_inv, x_inv_inv, z, log_p, log_p_inv = self.evaluate(
[x, x_inv, x_inv_inv, z, log_p, log_p_inv])
diff = x - x_inv_inv
log_p_diff = log_p - log_p_inv
self.assertEqual(
x_inv.shape, (BATCH_SIZE, TARGET_LENGTH, N_CHANNELS//2))
self.assertEqual(
z.shape, (BATCH_SIZE, TARGET_LENGTH, N_CHANNELS//2))
self.assertTrue(np.allclose(diff, 0.0, atol=1e-5))
self.assertTrue(np.allclose(log_p_diff, 0.0, atol=1e-5))
def test_flow_invertibility(self):
name = "flow_step"
split_dims = "cat"
x, x_mask, _ = self.get_data()
kwargs = self.get_kwargs(x_mask)
x_inv, logabsdet = glow.flow_step_glow(
name, x, x_mask, split_dims, inverse=False, init=False, dtype=DTYPE,
disable_dropout=True, **kwargs)
x_inv_inv, logabsdet_inv = glow.flow_step_glow(
name, x_inv, x_mask, split_dims, inverse=True, init=False,
dtype=DTYPE, disable_dropout=True, **kwargs)
self.evaluate(tf.global_variables_initializer())
x, x_mask, x_inv, x_inv_inv, logabsdet, logabsdet_inv = (
self.evaluate(
[x, x_mask, x_inv, x_inv_inv, logabsdet, logabsdet_inv]))
diff = x - x_inv_inv
logabsdet_sum = logabsdet + logabsdet_inv
self.assertTrue(np.allclose(diff, 0.0, atol=1e-5))
self.assertTrue(np.allclose(logabsdet_sum, 0.0, atol=1e-5))
@parameterized.parameters(
("1", "cat", "affine"),
("1/1", "cat/cat", "affine"),
("1/1/1", "cat/cat/ca", "affine"),
)
def test_aaa_glow_training(self, depths, split_plans, prior_type):
with tf.Graph().as_default():
_, x_mask, _ = self.get_data()
x = tf.random_normal((BATCH_SIZE, TARGET_LENGTH, N_CHANNELS),
mean=10.0, stddev=3.0, dtype=DTYPE)
bias = common_attention.attention_bias_ignore_padding(1.0 - x_mask)
hparams = self.get_hparams()
hparams.prior_type = prior_type
hparams.depths = depths
hparams.split_plans = split_plans
n_levels = len(hparams.depths.split("/"))
kwargs = self.get_kwargs(x_mask, hparams)
_ = kwargs.pop("decoder_self_attention_bias")
x_inv, _, _, _ = glow.glow(
"glow", x, x_mask, bias, inverse=False, init=True,
disable_dropout=True, **kwargs)
curr_dir = tempfile.mkdtemp()
model_path = os.path.join(curr_dir, "model")
with tf.Session() as session:
saver = tf.train.Saver()
session.run(tf.global_variables_initializer())
session.run(x_inv)
saver.save(session, model_path)
with tf.Graph().as_default():
_, x_mask, _ = self.get_data()
x = tf.random_normal((BATCH_SIZE, TARGET_LENGTH, N_CHANNELS),
mean=10.0, stddev=3.0, dtype=DTYPE)
bias = common_attention.attention_bias_ignore_padding(1.0 - x_mask)
hparams = self.get_hparams()
hparams.depths = depths
hparams.split_plans = split_plans
kwargs = self.get_kwargs(x_mask, hparams)
_ = kwargs.pop("decoder_self_attention_bias")
log_q_z = gops.standard_normal_density(x, x_mask)
log_q_z = tf.reduce_sum(log_q_z) / tf.reduce_sum(x_mask)
x_inv, logabsdets, log_ps, zs = glow.glow(
"glow", x, x_mask, bias, inverse=False, init=False,
disable_dropout=True, **kwargs)
x_inv_inv, logabsdets_inv, log_ps_inv, _ = glow.glow(
"glow", x_inv, x_mask, bias, inverse=True, split_zs=zs, init=False,
disable_dropout=True, **kwargs)
logabsdets = tf.reduce_sum(
logabsdets, axis=0) / tf.reduce_sum(x_mask)
logabsdets_inv = tf.reduce_sum(
logabsdets_inv, axis=0) / tf.reduce_sum(x_mask)
log_ps = tf.reduce_sum(log_ps, axis=0) / tf.reduce_sum(x_mask)
log_ps_inv = tf.reduce_sum(log_ps_inv, axis=0) / tf.reduce_sum(x_mask)
with tf.Session() as session:
saver = tf.train.Saver()
saver.restore(session, model_path)
(x, x_inv, x_inv_inv, log_q_z, logabsdets, log_ps,
logabsdets_inv, log_ps_inv) = session.run([
x, x_inv, x_inv_inv, log_q_z, logabsdets, log_ps,
logabsdets_inv, log_ps_inv])
diff = x - x_inv_inv
log_ps_diff = log_ps - log_ps_inv
logabsdets_sum = logabsdets + logabsdets_inv
self.assertEqual(
x_inv.shape,
(BATCH_SIZE, TARGET_LENGTH//(2**(n_levels-1)), N_CHANNELS))
print (np.max(np.abs(diff)))
print (np.max(np.abs(log_ps_diff)))
print (np.max(np.abs(logabsdets_sum)))
self.assertTrue(np.allclose(diff, 0.0, atol=1e-4),
msg=np.max( | np.abs(diff) | numpy.abs |
#!/usr/bin/env python
"""Tests for `mpipartition` package."""
import pytest
from mpipartition import Partition, distribute, overload
import numpy as np
def _overloading(dimensions, n, ol):
assert dimensions < 7
labels = "xyzuvw"[:dimensions]
partition = Partition(dimensions)
for i in range(dimensions):
assert ol < partition.extent[i]
rank = partition.rank
nranks = partition.nranks
| np.random.seed(rank) | numpy.random.seed |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Oct 14 21:31:56 2017
@author: Franz
"""
import scipy.signal
import numpy as np
import scipy.io as so
import os.path
import re
import matplotlib.pylab as plt
import h5py
import matplotlib.patches as patches
import numpy.random as rand
import seaborn as sns
import pandas as pd
from functools import reduce
import random
import pdb
class Mouse :
def __init__(self, idf, list=None, typ='') :
self.recordings = []
self.recordings.append(list)
self.typ = typ
self.idf = idf
def add(self, rec) :
self.recordings.append(rec)
def __len__(self) :
return len(self.recordings)
def __repr__(self) :
return ", ".join(self.recordings)
### PROCESSING OF RECORDING DATA ##############################################
def load_stateidx(ppath, name, ann_name=''):
""" load the sleep state file of recording (folder) $ppath/$name
@Return:
M,K sequence of sleep states, sequence of
0'1 and 1's indicating non- and annotated states
"""
ddir = os.path.join(ppath, name)
ppath, name = os.path.split(ddir)
if ann_name == '':
ann_name = name
sfile = os.path.join(ppath, name, 'remidx_' + ann_name + '.txt')
f = open(sfile, 'r')
lines = f.readlines()
f.close()
n = 0
for l in lines:
if re.match('\d', l):
n += 1
M = np.zeros(n, dtype='int')
K = np.zeros(n, dtype='int')
i = 0
for l in lines :
if re.search('^\s+$', l) :
continue
if re.search('\s*#', l) :
continue
if re.match('\d+\s+-?\d+', l) :
a = re.split('\s+', l)
M[i] = int(a[0])
K[i] = int(a[1])
i += 1
return M,K
def load_recordings(ppath, rec_file) :
"""
load_recordings(ppath, rec_file)
load recording listing with syntax:
[E|C] \s+ recording_name
#COMMENT
@RETURN:
(list of controls, lis of experiments)
"""
exp_list = []
ctr_list = []
rfile = os.path.join(ppath, rec_file)
f = open(rfile, newline=None)
lines = f.readlines()
f.close()
for l in lines :
if re.search('^\s+$', l) :
continue
if re.search('^\s*#', l) :
continue
a = re.split('\s+', l)
if re.search('E', a[0]) :
exp_list.append(a[1])
if re.search('C', a[0]) :
ctr_list.append(a[1])
return ctr_list, exp_list
def load_dose_recordings(ppath, rec_file):
"""
load recording list with following syntax:
A line is either control or experiments; Control recordings look like:
C \s recording_name
Experimental recordings also come with an additional dose parameter
(allowing for comparison of multiple doses with controls)
E \s recording_name \s dose_1
E \s recording_name \s dose_2
"""
rfile = os.path.join(ppath, rec_file)
f = open(rfile, newline=None)
lines = f.readlines()
f.close()
# first get all potential doses
doses = {}
ctr_list = []
for l in lines :
if re.search('^\s+$', l):
continue
if re.search('^\s*#', l):
continue
a = re.split('\s+', l)
if re.search('E', a[0]):
if a[2] in doses:
doses[a[2]].append(a[1])
else:
doses[a[2]] = [a[1]]
if re.search('C', a[0]):
ctr_list.append(a[1])
return ctr_list, doses
def get_snr(ppath, name):
"""
read and return sampling rate (SR) from file $ppath/$name/info.txt
"""
fid = open(os.path.join(ppath, name, 'info.txt'), newline=None)
lines = fid.readlines()
fid.close()
values = []
for l in lines :
a = re.search("^" + 'SR' + ":" + "\s+(.*)", l)
if a :
values.append(a.group(1))
return float(values[0])
def get_infoparam(ifile, field):
"""
NOTE: field is a single string
and the function does not check for the type
of the values for field.
In fact, it just returns the string following field
"""
fid = open(ifile, newline=None)
lines = fid.readlines()
fid.close()
values = []
for l in lines :
a = re.search("^" + field + ":" + "\s+(.*)", l)
if a :
values.append(a.group(1))
return values
def add_infoparam(ifile, field, vals):
"""
:param ifile: info file
:param field: Parameters specifier, e.g. 'SR'
:param vals: list with parameters
"""
fid = open(ifile, 'a')
vals = [str(s) for s in vals]
param = " ".join(vals)
fid.write('%s:\t%s' % (field, param))
fid.write(os.linesep)
fid.close()
def laser_start_end(laser, SR=1525.88, intval=5):
"""laser_start_end(ppath, name)
print start and end index of laser stimulation trains: For example,
if you was stimulated for 2min every 20 min with 20 Hz, return the
start and end index of the each 2min stimulation period (train)
returns the tuple (istart, iend), both indices are inclusive,
i.e. part of the sequence
@Param:
laser - laser, vector of 0s and 1s
intval - minimum time separation [s] between two laser trains
@Return:
(istart, iend) - tuple of two np.arrays with laser start and end indices
"""
idx = np.where(laser > 0.5)[0]
if len(idx) == 0 :
return ([], [])
idx2 = np.nonzero(np.diff(idx)*(1./SR) > intval)[0]
istart = np.hstack([idx[0], idx[idx2+1]])
iend = np.hstack([idx[idx2], idx[-1]])
return (istart, iend)
def load_laser(ppath, name):
"""
load laser from recording ppath/name
@RETURN:
@laser, vector of 0's and 1's
"""
# laser might be .mat or h5py file
# perhaps we could find a better way of testing that
file = os.path.join(ppath, name, 'laser_'+name+'.mat')
try:
laser = np.array(h5py.File(file,'r').get('laser'))
except:
laser = so.loadmat(file)['laser']
return np.squeeze(laser)
def laser_protocol(ppath, name):
"""
What was the stimulation frequency and the inter-stimulation interval for recording
$ppath/$name?
@Return:
iinter-stimulation intervals, avg. inter-stimulation interval, frequency
"""
laser = load_laser(ppath, name)
SR = get_snr(ppath, name)
# first get inter-stimulation interval
(istart, iend) = laser_start_end(laser, SR)
intv = np.diff(np.array(istart/float(SR)))
d = intv/60.0
print("The laser was turned on in average every %.2f min," % (np.mean(d)))
print("with a min. interval of %.2f min and max. interval of %.2f min." % (np.min(d), np.max(d)))
print("Laser stimulation lasted for %f s." % (np.mean(np.array(iend/float(SR)-istart/float(SR)).mean())))
# print laser start times
print("Start time of each laser trial:")
j=1
for t in istart:
print("trial %d: %.2f" % (j, (t / float(SR)) / 60))
j += 1
# for each laser stimulation interval, check laser stimulation frequency
dt = 1/float(SR)
freq = []
laser_up = []
laser_down = []
for (i,j) in zip(istart, iend):
part = laser[i:j+1]
(a,b) = laser_start_end(part, SR, 0.005)
dur = (j-i+1)*dt
freq.append(len(a) / dur)
up_dur = (b-a+1)*dt*1000
down_dur = (a[1:]-b[0:-1]-1)*dt*1000
laser_up.append(np.mean(up_dur))
laser_down.append(np.mean(down_dur))
print(os.linesep + "Laser stimulation freq. was %.2f Hz," % np.mean(np.array(freq)))
print("with laser up and down duration of %.2f and %.2f ms." % (np.mean(np.array(laser_up)), np.mean(np.array(laser_down))))
return d, np.mean(d), np.mean(np.array(freq))
def swap_eeg(ppath, rec, ch='EEG'):
"""
swap EEG and EEG2 or EMG with EMG2 if $ch='EMG'
"""
if ch == 'EEG':
name = 'EEG'
else:
name = ch
EEG = so.loadmat(os.path.join(ppath, rec, name+'.mat'))[name]
EEG2 = so.loadmat(os.path.join(ppath, rec, name+'2.mat'))[name + '2']
tmp = EEG
EEG = EEG2
EEG2 = tmp
file_eeg1 = os.path.join(ppath, rec, '%s.mat' % name)
file_eeg2 = os.path.join(ppath, rec, '%s2.mat' % name)
so.savemat(file_eeg1, {name : EEG})
so.savemat(file_eeg2, {name+'2' : EEG2})
def eeg_conversion(ppath, rec, conv_factor=0.195):
"""
multiply all EEG and EMG channels with the given
conversion factor and write the conversion factor
as parameter (conversion:) into the info file.
Only if there's no conversion factor in the info file
specified, the conversion will be executed
:param ppath: base filder
:param rec: recording
:param conv_factor: conversion factor
:return: n/s
"""
ifile = os.path.join(ppath, rec, 'info.txt')
conv = get_infoparam(ifile, 'conversion')
if len(conv) > 0:
print("found conversion: parameter in info file")
print("returning: no conversion necessary!!!")
return
else:
files = os.listdir(os.path.join(ppath, rec))
files = [f for f in files if re.match('^EEG', f)]
for f in files:
name = re.split('\.', f)[0]
EEG = so.loadmat(os.path.join(ppath, rec, name+'.mat'), squeeze_me=True)[name]
if EEG[0].dtype == 'int16':
EEG = EEG * conv_factor
file_eeg = os.path.join(ppath, rec, '%s.mat' % name)
print(file_eeg)
so.savemat(file_eeg, {name: EEG})
else:
print('Wrong datatype! probably already converted; returning...')
return
files = os.listdir(os.path.join(ppath, rec))
files = [f for f in files if re.match('^EMG', f)]
for f in files:
name = re.split('\.', f)[0]
EMG = so.loadmat(os.path.join(ppath, rec, name+'.mat'), squeeze_me=True)[name]
if EMG[0].dtype == 'int16':
EMG = EMG * conv_factor
file_emg = os.path.join(ppath, rec, '%s.mat' % name)
print(file_emg)
so.savemat(file_emg, {name: EMG})
else:
print('Wrong datatype! probably already converted; returning...')
return
add_infoparam(ifile, 'conversion', [conv_factor])
calculate_spectrum(ppath, rec)
### DEPRICATED ############################################
def video_pulse_detection(ppath, rec, SR=1000, iv = 0.01):
"""
return index of each video frame onset
ppath/rec - recording
@Optional
SR - sampling rate of EEG(!) recording
iv - minimum time inverval (in seconds) between two frames
@Return
index of each video frame onset
"""
V = np.squeeze(so.loadmat(os.path.join(ppath, rec, 'videotime_' + rec + '.mat'))['video'])
TS = np.arange(0, len(V))
# indices where there's a jump in the signal
t = TS[np.where(V<0.5)];
if len(t) == 0:
idx = []
return idx
# time points where the interval between jumps is longer than iv
t2 = np.where(np.diff(t)*(1.0/SR)>=iv)[0]
idx = np.concatenate(([t[0]],t[t2+1]))
return idx
# SIGNAL PROCESSING ###########################################################
def my_lpfilter(x, w0, N=4):
"""
create a lowpass Butterworth filter with a cutoff of w0 * the Nyquist rate.
The nice thing about this filter is that is has zero-phase distortion.
A conventional lowpass filter would introduce a phase lag.
w0 - filter cutoff; value between 0 and 1, where 1 corresponds to nyquist frequency.
So if you want a filter with cutoff at x Hz, the corresponding w0 value is given by
w0 = 2 * x / sampling_rate
N - order of filter
@Return:
low-pass filtered signal
See also my hp_filter, or my_bpfilter
"""
from scipy import signal
b,a = signal.butter(N, w0)
y = signal.filtfilt(b,a, x)
return y
def my_hpfilter(x, w0, N=4):
"""
create an N-th order highpass Butterworth filter with cutoff frequency w0 * sampling_rate/2
"""
from scipy import signal
# use scipy.signal.firwin to generate filter
#taps = signal.firwin(numtaps, w0, pass_zero=False)
#y = signal.lfilter(taps, 1.0, x)
b,a = signal.butter(N, w0, 'high')
y = signal.filtfilt(b,a, x, padlen = x.shape[0]-1)
return y
def my_bpfilter(x, w0, w1, N=4,bf=True):
"""
create N-th order bandpass Butterworth filter with corner frequencies
w0*sampling_rate/2 and w1*sampling_rate/2
"""
#from scipy import signal
#taps = signal.firwin(numtaps, w0, pass_zero=False)
#y = signal.lfilter(taps, 1.0, x)
#return y
from scipy import signal
b,a = signal.butter(N, [w0, w1], 'bandpass')
if bf:
y = signal.filtfilt(b,a, x)
else:
y = signal.lfilter(b,a, x)
return y
def my_notchfilter(x, sr=1000, band=5, freq=60, ripple=10, order=3, filter_type='butter'):
from scipy.signal import iirfilter,lfilter
fs = sr
nyq = fs/2.0
low = freq - band/2.0
high = freq + band/2.0
low = low/nyq
high = high/nyq
b, a = iirfilter(order, [low, high], rp=ripple, btype='bandstop',
analog=False, ftype=filter_type)
filtered_data = lfilter(b, a, x)
return filtered_data
def downsample_vec(x, nbin):
"""
y = downsample_vec(x, nbin)
downsample the vector x by replacing nbin consecutive \
bin by their mean \
@RETURN: the downsampled vector
"""
n_down = int(np.floor(len(x) / nbin))
x = x[0:n_down*nbin]
x_down = np.zeros((n_down,))
# 0 1 2 | 3 4 5 | 6 7 8
for i in range(nbin) :
idx = list(range(i, int(n_down*nbin), int(nbin)))
x_down += x[idx]
return x_down / nbin
def smooth_data(x, sig):
"""
y = smooth_data(x, sig)
smooth data vector @x with gaussian kernel
with standard deviation $sig
"""
sig = float(sig)
if sig == 0.0:
return x
# gaussian:
gauss = lambda x, sig : (1/(sig*np.sqrt(2.*np.pi)))*np.exp(-(x*x)/(2.*sig*sig))
bound = 1.0/10000
L = 10.
p = gauss(L, sig)
while (p > bound):
L = L+10
p = gauss(L, sig)
#F = map(lambda x: gauss((x, sig)), np.arange(-L, L+1.))
# py3:
F = [gauss(x, sig) for x in | np.arange(-L, L+1.) | numpy.arange |
#
# BSD 3-Clause License
#
# Copyright (c) 2022 University of Wisconsin - Madison
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.#
import torch
import torchvision
# import torch.nn as nn
# import torch.optim as optim
import torchvision.transforms as transforms
import time
import os
import glob
import numpy as np
from PIL import Image, ImageEnhance, ImageFilter
import matplotlib.pyplot as plt
import matplotlib.patches as patches
# import cv2
class SegImgLoader():
def __init__(self, data_root, max_samples=-1, img_format=".png", seg_format=".png"):
self.imgs = []
self.seg_imgs = []
self.data_root = data_root
self.img_dir = os.path.join(data_root, "imgs")
self.seg_dir = os.path.join(data_root, "seg_imgs")
self.max_samples = max_samples
if(not os.path.exists(self.img_dir)):
print("Error, img directory not found: {}".format(self.img_dir))
exit(1)
if(not os.path.exists(self.seg_dir)):
print("Error, segmented img directory not found: {}".format(self.seg_dir))
exit(1)
self.imgs = glob.glob(self.img_dir + "/*"+img_format)
if(len(self.imgs) == 0):
print("Error: no images found in {}".format(
os.path.join(self.data_root, "imgs/*"+img_format)))
exit(1)
for f in self.imgs:
basename = os.path.splitext(os.path.basename(f))[0]
self.seg_imgs.append(os.path.join(
self.seg_dir, basename+seg_format))
if(self.max_samples > 0 and self.max_samples < len(self.imgs)):
self.imgs = self.imgs[0:self.max_samples]
self.seg_imgs = self.seg_imgs[0:self.max_samples]
print("Data loaded. Imgs={}, Seg Imgs={}".format(
len(self.imgs), len(self.seg_imgs)))
def ConvertSegToBoxes(self, semantic_maps):
# boxes = np.asarray(
# [0, 0, 1, 1]*self.max_boxes).reshape((self.max_boxes, 4))
# labels = np.zeros(self.max_boxes).reshape(
# (self.max_boxes)).astype(np.int64)
boxes = []
labels = []
box_count = 0
for c in range(1, np.max(semantic_maps[:, :, 0])+1):
for i in range(1, np.max(semantic_maps[:, :, 1])+1):
indices = np.where(
np.logical_and(semantic_maps[:, :, 1] == i, semantic_maps[:, :, 0] == c))
if(indices[0].shape[0] > 1):
y0 = np.min(indices[0])
y1 = np.max(indices[0])
x0 = np.min(indices[1])
x1 = np.max(indices[1])
if(x1 > x0 and y1 > y0 ):
#change x0,y0,x1,y1 to normalized center (x,y) and width, height
x_center = .5 * (x0 + x1) / float(semantic_maps.shape[1])
y_center = .5 * (y0 + y1) / float(semantic_maps.shape[0])
x_size = (x1-x0) / float(semantic_maps.shape[1])
y_size = (y1-y0) / float(semantic_maps.shape[0])
# boxes.append(np.array([x0, y0, x1, y1]))
boxes.append(np.array([x_center, y_center, x_size, y_size]))
labels.append(c)
box_count += 1
boxes = np.asarray(boxes)#.astype(np.int32)
labels = np.asarray(labels).astype(np.int32)
return boxes, labels
def GenerateAAVBBFromSeg(self,label_format=".txt"):
label_dir = os.path.join(self.data_root, "labels")
if(not os.path.exists(label_dir)):
os.mkdir(label_dir)
for i in range(len(self.imgs)):
#load segmentation img
seg_img = np.array(Image.open(self.seg_imgs[i])).view(np.uint16)[:, :, :]
#generate boxes and labels from segmentation img
boxes,classes = self.ConvertSegToBoxes(seg_img)
if(len(classes)>0):
classes -= np.ones(classes.shape).astype(np.int32)
classes = np.reshape(classes, (len(classes),1))
output = np.append(classes,boxes,axis=1)
basename = os.path.splitext(os.path.basename(self.imgs[i]))[0]
file_name = os.path.join(label_dir, basename+label_format)
np.savetxt(file_name,output,fmt='%.6f')
print("Generated AABB file {}/{}".format(i+1,len(self.imgs)))
class ObjectDetectionImgLoader(torch.utils.data.Dataset):
def __init__(self, data_root, max_boxes, apply_transforms=False, max_samples=-1, img_format=".png", box_format=".txt"):
self.name = "Object Detection Image Loader"
self.data_root = data_root
self.max_boxes = max_boxes
self.max_samples = max_samples
self.img_dir = os.path.join(self.data_root, "imgs")
self.label_dir = os.path.join(self.data_root, "labels")
self.imgs = []
self.labels = []
#transform parameters
self.use_transforms = apply_transforms
np.random.seed(1)
self.flip_prob = 0.5
self.max_translation = (0.2,0.2)
self.brightness = (0.75,1.33)
self.sharpness = (0.25,4.0)
self.saturation = (0.75,1.33)
self.contrast = (0.75,1.33)
self.max_zoom = 2.0
if(not os.path.exists(self.data_root)):
print("Error: directory not found. Data root = {}".format(self.data_root))
exit(1)
if(not os.path.exists(self.img_dir)):
print("Error: directory not found. Image directory = {}".format(self.img_dir))
exit(1)
if(not os.path.exists(self.label_dir)):
print("Error: directory not found. Label directory = {}".format(self.label_dir))
exit(1)
self.imgs = glob.glob(os.path.join(self.data_root, "imgs/*"))
if(len(self.imgs) == 0):
print("Error: no images found in {}".format(
os.path.join(self.data_root, "imgs/*")))
exit(1)
for f in self.imgs:
basename = os.path.splitext(os.path.basename(f))[0]
self.labels.append(os.path.join(self.label_dir, basename+box_format))
if(self.max_samples > 0 and self.max_samples < len(self.imgs)):
self.imgs = self.imgs[0:self.max_samples]
self.labels = self.labels[0:self.max_samples]
print("Data loaded. Imgs={}, Labels={}".format(
len(self.imgs), len(self.labels)))
def __len__(self):
return len(self.imgs)
def ApplyTransforms(self,img,boxes,classes):
#get height and width parameters
height = np.asarray(img).shape[0]
width = np.asarray(img).shape[1]
#=== random horizontal flip ===
if(np.random.rand() > self.flip_prob):
#flip image horizontally
img = img.transpose(Image.FLIP_LEFT_RIGHT)
#flip boxes horizontally
boxes_x_0 = width - 1 - boxes[:,0]
boxes_x_1 = width - 1 - boxes[:,2]
boxes[:,0] = boxes_x_1
boxes[:,2] = boxes_x_0
#=== random zoom and crop ===
zoom = np.random.uniform(1.0,self.max_zoom)
img = img.resize((int(zoom*width),int(zoom*height)),resample=Image.BILINEAR)
crop_pt = np.random.uniform(0.0,0.9,(2))
# crop_pt = [0,0]
crop_pt = (crop_pt * (zoom*width - width, zoom*height - height)).astype(np.int)
crop_pt[0] = np.clip(crop_pt[0],0,img.size[0] - width)
crop_pt[1] = np.clip(crop_pt[1],0,img.size[1] - height)
# img = img[crop_pt[1],crop_pt[1]+height,crop_pt[0],crop_pt[0]+width,:]
img = img.crop((crop_pt[0],crop_pt[1],crop_pt[0]+width,crop_pt[1]+height))
boxes = (boxes * zoom).astype(np.int)
boxes = boxes - (crop_pt[0],crop_pt[1],crop_pt[0],crop_pt[1])
boxes[:,0] = np.clip(boxes[:,0],0,width-1) #clip x0 value
boxes[:,2] = np.clip(boxes[:,2],0,width-1) #clip x1 value
boxes[:,1] = np.clip(boxes[:,1],0,height-1) #clip y0 value
boxes[:,3] = np.clip(boxes[:,3],0,height-1) #clip y1 value
for i in range(len(classes)):
#if box moved out of image, set label to 0
if(abs(boxes[i,0] - boxes[i,2]) < 0.5 or abs(boxes[i,1] - boxes[i,3]) < 0.5):
classes[i] = 0 #reset label
boxes[i,:] = np.array([0,1,0,1]) #reset to valid box coords
#=== random translation ===
#get translation parameters
t_x,t_y = np.random.uniform(-1,1,size=2) * self.max_translation * (height,width)
t_x = int(t_x)
t_y = int(t_y)
#apply translation to img
img = img.transform(img.size,Image.AFFINE,(1,0,t_x,0,1,t_y))
# img = img.rotate(1,translate=(t_x,t_y))
#apply translation to boxes, cutting any that fully leave the image
boxes = boxes - np.array([t_x,t_y,t_x,t_y])
boxes[:,0] = np.clip(boxes[:,0],0,width-1) #clip x0 value
boxes[:,2] = np.clip(boxes[:,2],0,width-1) #clip x1 value
boxes[:,1] = np.clip(boxes[:,1],0,height-1) #clip y0 value
boxes[:,3] = np.clip(boxes[:,3],0,height-1) #clip y1 value
for i in range(len(classes)):
#if box moved out of image, set label to 0
if(abs(boxes[i,0] - boxes[i,2]) < 0.5 or abs(boxes[i,1] - boxes[i,3]) < 0.5):
classes[i] = 0 #reset label
boxes[i,:] = np.array([0,1,0,1]) #reset to valid box coords
#=== random brightness, hue, saturation changes ===
brighten = ImageEnhance.Brightness(img)
img = brighten.enhance(np.random.uniform(self.brightness[0],self.brightness[1],size=1))
sharpen = ImageEnhance.Sharpness(img)
img = sharpen.enhance(np.random.uniform(self.sharpness[0],self.sharpness[1],size=1))
saturate = ImageEnhance.Color(img)
img = saturate.enhance(np.random.uniform(self.saturation[0],self.saturation[1],size=1))
contrast = ImageEnhance.Contrast(img)
img = saturate.enhance(np.random.uniform(self.contrast[0],self.contrast[1],size=1))
return img,boxes,classes
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
# load files
img = Image.open(self.imgs[idx])
boxes = np.asarray([.5, .5, .2, .2]*self.max_boxes).reshape((self.max_boxes, 4))
classes = np.zeros(self.max_boxes)
#if the boxes file doesn't exist, it means there were no boxes in that image
if(not os.path.exists(self.labels[idx])):
#ensure correct datatypes
img = np.asarray(img)/255.0
img = | np.transpose(img, (2, 0, 1)) | numpy.transpose |
#
# Copyright 2021 Budapest Quantum Computing Group
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Tuple, List
import numpy as np
from scipy.linalg import sqrtm
from piquasso.api.config import Config
from piquasso.api.errors import InvalidState, InvalidParameter, PiquassoException
from piquasso.api.state import State
from piquasso._math.functions import gaussian_wigner_function
from piquasso._math.linalg import (
is_symmetric,
is_positive_semidefinite,
)
from piquasso._math.symplectic import symplectic_form, xp_symplectic_form
from piquasso._math.combinatorics import get_occupation_numbers
from piquasso._math.transformations import from_xxpp_to_xpxp_transformation_matrix
from .probabilities import (
DensityMatrixCalculation,
DisplacedDensityMatrixCalculation,
NondisplacedDensityMatrixCalculation,
)
class GaussianState(State):
r"""Class to represent a Gaussian state."""
def __init__(self, d: int, config: Config = None) -> None:
"""
Args:
d (int): The number of modes.
"""
super().__init__(config=config)
self._d = d
self.reset()
def __len__(self) -> int:
return self._d
@property
def d(self) -> int:
return len(self)
def reset(self) -> None:
r"""Resets the state to a vacuum."""
vector_shape = (self.d,)
matrix_shape = vector_shape * 2
self._m = np.zeros(vector_shape, dtype=complex)
self._G = np.zeros(matrix_shape, dtype=complex)
self._C = np.zeros(matrix_shape, dtype=complex)
@classmethod
def _from_representation(
cls,
*,
m: np.ndarray,
G: np.ndarray,
C: np.ndarray,
config: Config,
) -> "GaussianState":
obj = cls(d=len(m), config=config)
obj._m = m
obj._G = G
obj._C = C
return obj
def __eq__(self, other: object) -> bool:
if not isinstance(other, GaussianState):
return False
return (
| np.allclose(self._C, other._C) | numpy.allclose |
import pytest
import numpy as np
import pandas as pd
import numpy.testing as npt
import pandas.testing as pdt
from scipy.stats import logistic
import zepid as ze
from zepid import (RiskRatio, RiskDifference, OddsRatio, NNT, IncidenceRateRatio, IncidenceRateDifference,
Sensitivity, Specificity, Diagnostics, interaction_contrast, interaction_contrast_ratio, spline,
table1_generator)
from zepid.calc import sensitivity, specificity
@pytest.fixture
def data_set():
df = pd.DataFrame()
df['exp'] = [1]*50 + [0]*50
df['dis'] = [1]*25 + [0]*25 + [1]*25 + [0]*25
return df
@pytest.fixture
def multi_exposures():
df = pd.DataFrame()
df['exp'] = [1]*50 + [0]*50 + [2]*50
df['dis'] = [1]*25 + [0]*25 + [1]*25 + [0]*25 + [1]*25 + [0]*25
return df
@pytest.fixture
def time_data():
df = pd.DataFrame()
df['exp'] = [1]*50 + [0]*50
df['dis'] = [1]*6 + [0]*44 + [1]*14 + [0]*36
df['t'] = [2]*50 + [8]*50
return df
class TestRiskRatio:
def test_risk_ratio_reference_equal_to_1(self, data_set):
rr = RiskRatio()
rr.fit(data_set, exposure='exp', outcome='dis')
assert rr.risk_ratio[0] == 1
def test_risk_ratio_equal_to_1(self, data_set):
rr = RiskRatio()
rr.fit(data_set, exposure='exp', outcome='dis')
assert rr.risk_ratio[1] == 1
def test_multiple_exposures(self, multi_exposures):
rr = RiskRatio()
rr.fit(multi_exposures, exposure='exp', outcome='dis')
assert rr.results.shape[0] == 3
assert list(rr.results.index) == ['Ref:0', '1', '2']
def test_match_sas_ci(self, data_set):
sas_ci = 0.6757, 1.4799
rr = RiskRatio()
rr.fit(data_set, exposure='exp', outcome='dis')
df = rr.results
npt.assert_allclose(np.round(df.loc[df.index == '1'][['RR_LCL', 'RR_UCL']], 4), [sas_ci])
def test_match_sas_sampledata(self):
sas_rd = 0.742118331
sas_se = 0.312612740
sas_ci = 0.402139480, 1.369523870
df = ze.load_sample_data(False)
rr = RiskRatio()
rr.fit(df, exposure='art', outcome='dead')
npt.assert_allclose(rr.risk_ratio[1], sas_rd, rtol=1e-5)
rf = rr.results
npt.assert_allclose(rf.loc[rf.index == '1'][['RR_LCL', 'RR_UCL']], [sas_ci], rtol=1e-5)
npt.assert_allclose(rf.loc[rf.index == '1'][['SD(RR)']], sas_se, rtol=1e-5)
class TestRiskDifference:
def test_risk_difference_reference_equal_to_0(self, data_set):
rd = RiskDifference()
rd.fit(data_set, exposure='exp', outcome='dis')
assert rd.risk_difference[0] == 0
def test_risk_difference_equal_to_0(self, data_set):
rd = RiskDifference()
rd.fit(data_set, exposure='exp', outcome='dis')
assert rd.risk_difference[1] == 0
def test_multiple_exposures(self, multi_exposures):
rd = RiskDifference()
rd.fit(multi_exposures, exposure='exp', outcome='dis')
assert rd.results.shape[0] == 3
assert list(rd.results.index) == ['Ref:0', '1', '2']
def test_match_sas_ci(self, data_set):
sas_ci = -0.195996398, 0.195996398
rd = RiskDifference()
rd.fit(data_set, exposure='exp', outcome='dis')
df = rd.results
npt.assert_allclose(df.loc[df.index == '1'][['RD_LCL', 'RD_UCL']], [sas_ci])
def test_match_sas_se(self, data_set):
sas_se = 0.1
rd = RiskDifference()
rd.fit(data_set, exposure='exp', outcome='dis')
df = rd.results
npt.assert_allclose(df.loc[df.index == '1'][['SD(RD)']], sas_se)
def test_match_sas_sampledata(self):
sas_rr = -0.045129870
sas_se = 0.042375793
sas_ci = -0.128184899, 0.037925158
df = ze.load_sample_data(False)
rd = RiskDifference()
rd.fit(df, exposure='art', outcome='dead')
npt.assert_allclose(rd.risk_difference[1], sas_rr)
rf = rd.results
npt.assert_allclose(rf.loc[rf.index == '1'][['RD_LCL', 'RD_UCL']], [sas_ci])
npt.assert_allclose(rf.loc[rf.index == '1'][['SD(RD)']], sas_se)
def test_frechet_bounds(self):
df = ze.load_sample_data(False)
rd = RiskDifference()
rd.fit(df, exposure='art', outcome='dead')
npt.assert_allclose(rd.results['UpperBound'][1] - rd.results['LowerBound'][1], 1.0000)
def test_frechet_bounds2(self, multi_exposures):
rd = RiskDifference()
rd.fit(multi_exposures, exposure='exp', outcome='dis')
npt.assert_allclose(rd.results['UpperBound'][1:] - rd.results['LowerBound'][1:], [1.0000, 1.0000])
class TestOddsRatio:
def test_odds_ratio_reference_equal_to_1(self, data_set):
ord = OddsRatio()
ord.fit(data_set, exposure='exp', outcome='dis')
assert ord.odds_ratio[0] == 1
def test_odds_ratio_equal_to_1(self, data_set):
ord = OddsRatio()
ord.fit(data_set, exposure='exp', outcome='dis')
assert ord.odds_ratio[1] == 1
def test_multiple_exposures(self, multi_exposures):
ord = OddsRatio()
ord.fit(multi_exposures, exposure='exp', outcome='dis')
assert ord.results.shape[0] == 3
assert list(ord.results.index) == ['Ref:0', '1', '2']
def test_match_sas_ci(self, data_set):
sas_ci = 0.4566, 2.1902
ord = OddsRatio()
ord.fit(data_set, exposure='exp', outcome='dis')
df = ord.results
npt.assert_allclose(df.loc[df.index == '1'][['OR_LCL', 'OR_UCL']], [sas_ci], rtol=1e-4)
def test_match_sas_sampledata(self):
sas_or = 0.7036
sas_se = 0.361479191
sas_ci = 0.3465, 1.4290
df = ze.load_sample_data(False)
ord = OddsRatio()
ord.fit(df, exposure='art', outcome='dead')
npt.assert_allclose(ord.odds_ratio[1], sas_or, rtol=1e-4)
rf = ord.results
npt.assert_allclose(rf.loc[rf.index == '1'][['OR_LCL', 'OR_UCL']], [sas_ci], rtol=1e-3)
npt.assert_allclose(rf.loc[rf.index == '1'][['SD(OR)']], sas_se, rtol=1e-4)
class TestNNT:
def test_return_infinity(self, data_set):
nnt = NNT()
nnt.fit(data_set, exposure='exp', outcome='dis')
assert np.isinf(nnt.number_needed_to_treat[1])
def test_match_inverse_of_risk_difference(self):
df = ze.load_sample_data(False)
rd = RiskDifference()
rd.fit(df, exposure='art', outcome='dead')
nnt = NNT()
nnt.fit(df, exposure='art', outcome='dead')
npt.assert_allclose(nnt.number_needed_to_treat[1], 1/rd.risk_difference[1])
rf = rd.results
nf = nnt.results
npt.assert_allclose(nf.loc[nf.index == '1'][['NNT_LCL', 'NNT_UCL']],
1 / rf.loc[rf.index == '1'][['RD_LCL', 'RD_UCL']])
npt.assert_allclose(nf.loc[nf.index == '1'][['SD(RD)']], rf.loc[rf.index == '1'][['SD(RD)']])
def test_multiple_exposures(self, multi_exposures):
nnt = NNT()
nnt.fit(multi_exposures, exposure='exp', outcome='dis')
assert nnt.results.shape[0] == 3
assert list(nnt.results.index) == ['Ref:0', '1', '2']
class TestIncidenceRateRatio:
def test_incidence_rate_ratio_reference_equal_to_1(self, time_data):
irr = IncidenceRateRatio()
irr.fit(time_data, exposure='exp', outcome='dis', time='t')
assert irr.incidence_rate_ratio[0] == 1
def test_incidence_rate_ratio_equal_to_expected(self, time_data):
sas_irr = 1.714285714
sas_se = 0.487950036
sas_ci = 0.658778447, 4.460946657
irr = IncidenceRateRatio()
irr.fit(time_data, exposure='exp', outcome='dis', time='t')
npt.assert_allclose(irr.incidence_rate_ratio[1], sas_irr, rtol=1e-4)
rf = irr.results
npt.assert_allclose(rf.loc[rf.index == '1'][['IRR_LCL', 'IRR_UCL']], [sas_ci], rtol=1e-4)
npt.assert_allclose(rf.loc[rf.index == '1'][['SD(IRR)']], sas_se, rtol=1e-4)
def test_multiple_exposures(self):
df = pd.DataFrame()
df['exp'] = [1]*50 + [0]*50 + [2]*50
df['dis'] = [1]*25 + [0]*25 + [1]*25 + [0]*25 + [1]*25 + [0]*25
df['t'] = 2
irr = IncidenceRateRatio()
irr.fit(df, exposure='exp', outcome='dis', time='t')
assert irr.results.shape[0] == 3
assert list(irr.results.index) == ['Ref:0', '1', '2']
def test_match_sas_sampledata(self):
sas_irr = 0.753956
sas_se = 0.336135409
sas_ci = 0.390146, 1.457017
df = ze.load_sample_data(False)
irr = IncidenceRateRatio()
irr.fit(df, exposure='art', outcome='dead', time='t')
npt.assert_allclose(irr.incidence_rate_ratio[1], sas_irr, rtol=1e-5)
rf = irr.results
npt.assert_allclose(rf.loc[rf.index == '1'][['IRR_LCL', 'IRR_UCL']], [sas_ci], rtol=1e-5)
npt.assert_allclose(rf.loc[rf.index == '1'][['SD(IRR)']], sas_se, rtol=1e-5)
class TestIncidenceRateDifference:
def test_incidence_rate_difference_reference_equal_to_0(self, time_data):
ird = IncidenceRateDifference()
ird.fit(time_data, exposure='exp', outcome='dis', time='t')
assert ird.incidence_rate_difference[0] == 0
def test_multiple_exposures(self):
df = pd.DataFrame()
df['exp'] = [1]*50 + [0]*50 + [2]*50
df['dis'] = [1]*25 + [0]*25 + [1]*25 + [0]*25 + [1]*25 + [0]*25
df['t'] = 2
ird = IncidenceRateDifference()
ird.fit(df, exposure='exp', outcome='dis', time='t')
assert ird.results.shape[0] == 3
assert list(ird.results.index) == ['Ref:0', '1', '2']
def test_match_openepi_sampledata(self):
oe_irr = -0.001055
oe_ci = -0.003275, 0.001166
df = ze.load_sample_data(False)
ird = IncidenceRateDifference()
ird.fit(df, exposure='art', outcome='dead', time='t')
npt.assert_allclose(ird.incidence_rate_difference[1], oe_irr, atol=1e-5)
rf = ird.results
npt.assert_allclose(rf.loc[rf.index == '1'][['IRD_LCL', 'IRD_UCL']], [oe_ci], atol=1e-5)
class TestDiagnostics:
@pytest.fixture
def test_data(self):
df = pd.DataFrame()
df['test'] = [1]*50 + [0]*50
df['case'] = [1]*40 + [0]*10 + [1]*15 + [0]*35
return df
def test_sensitivity_same_as_calc(self, test_data):
se = Sensitivity()
se.fit(test_data, test='test', disease='case')
sens = sensitivity(40, 50)
npt.assert_allclose(se.sensitivity, sens[0])
def test_specificity_same_as_calc(self, test_data):
sp = Specificity()
sp.fit(test_data, test='test', disease='case')
spec = specificity(15, 50)
npt.assert_allclose(sp.specificity, spec[0])
def test_diagnostic_same_as_compositions(self, test_data):
se = Sensitivity()
se.fit(test_data, test='test', disease='case')
sp = Specificity()
sp.fit(test_data, test='test', disease='case')
diag = Diagnostics()
diag.fit(test_data, test='test', disease='case')
npt.assert_allclose(diag.sensitivity.sensitivity, se.sensitivity)
npt.assert_allclose(diag.specificity.specificity, sp.specificity)
def test_match_sas_sensitivity_ci(self, test_data):
sas_ci = [0.689127694, 0.910872306]
diag = Diagnostics()
diag.fit(test_data, test='test', disease='case')
| npt.assert_allclose(diag.sensitivity.results[['Se_LCL', 'Se_UCL']], [sas_ci]) | numpy.testing.assert_allclose |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#################
## Import modules
#################
import sys
# walk directories
import glob
# access to OS functionality
import os
# copy things
import copy
# numpy
import numpy as np
# open3d
import open3d
# matplotlib for colormaps
import matplotlib.cm
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
# struct for reading binary ply files
import struct
# the main class that loads raw 3D scans
class Kitti360Viewer3DRaw(object):
# Constructor
def __init__(self, seq=0, mode='velodyne'):
if 'KITTI360_DATASET' in os.environ:
kitti360Path = os.environ['KITTI360_DATASET']
else:
kitti360Path = os.path.join(os.path.dirname(
os.path.realpath(__file__)), '..', '..')
if mode=='velodyne':
self.sensor_dir='velodyne_points'
elif mode=='sick':
self.sensor_dir='sick_points'
else:
raise RuntimeError('Unknown sensor type!')
sequence = '2013_05_28_drive_%04d_sync' % seq
self.raw3DPcdPath = os.path.join(kitti360Path, 'data_3d_raw', sequence, self.sensor_dir, 'data')
def loadVelodyneData(self, frame=0):
pcdFile = os.path.join(self.raw3DPcdPath, '%010d.bin' % frame)
if not os.path.isfile(pcdFile):
raise RuntimeError('%s does not exist!' % pcdFile)
pcd = np.fromfile(pcdFile, dtype=np.float32)
pcd = | np.reshape(pcd,[-1,4]) | numpy.reshape |
import numpy as np
from collections import Counter
import sklearn.metrics as metrics
class DataHandler:
def __init__(self, config, load_data=True):
""" The initialiser for the DataHandler class.
:param config: A ArgumentParser object.
"""
# Creates the lists to store data.
self.train_x, self.train_y = np.array([]), np.array([])
self.test_x, self.test_y = np.array([]), np.array([])
self.val_x, self.val_y = np.array([]), np.array([])
self.data_x, self.data_y = np.array([]), np.array([])
# Sets the class members.
self.val_per = config.val_per
self.verbose = config.verbose
self.config = config
self.pseudo_indices = []
# Loads the training data into the unannotated data stores.
if load_data:
self.load_training_data(config.data_dir)
self.load_testing_data(config.data_dir)
def log(self, message):
""" Method to handle printing and logging of messages.
:param message: String of message to be printed and logged.
"""
if self.config.verbose:
print(message)
if self.config.log_file != '':
print(message, file=open(self.config.log_file, 'a'))
def load_training_data(self, data_dir):
""" Loads the training data to the unannotated lists.
:param data_dir: The data directory.
"""
values = np.load(data_dir + "Training/values.npy")
self.data_x = np.array(values[:, 0])
self.data_x = np.array(["Training/" + i for i in self.data_x])
self.data_y = values[:, 1].astype(int)
self.log("Loaded " + str(int(len(self.data_y) / self.config.cell_patches)) + " Unannotated Cells")
def load_testing_data(self, data_dir):
""" Loads the testing data to the testing data lists.
:param data_dir: The data directory.
"""
values = np.load(data_dir + "Testing/values.npy")
self.test_x = np.array(values[:, 0])
self.test_x = np.array(["Testing/" + i for i in self.test_x])
self.test_y = values[:,1].astype(int)
self.log("Loaded " + str(int(len(self.test_y) / self.config.cell_patches)) + " Testing Cells")
def balance(self, x_list, y_list):
""" A method to balance a set of data.
:param x_list: A list of data.
:param y_list: A list of labels.
:return: balanced x and y lists.
"""
# TODO - make this work with cell patches
balance = Counter(y_list)
min_values = min(list(balance.values()))
indices = []
for c in range(self.config.num_classes):
class_values = balance[c]
indices.append(np.random.permutation([j for j, i in enumerate(y_list) if i == c])
[:class_values - min_values])
x_list = np.array([i for j, i in enumerate(x_list) if j not in indices])
y_list = np.array([i for j, i in enumerate(y_list) if j not in indices])
return x_list, y_list
def set_validation_set(self, x, y):
""" Sets the validation set from the training data.
"""
num_val = int((len(y) / self.config.cell_patches) * self.val_per)
indices = []
cell_indices = np.random.choice(list(range(len(y) // self.config.cell_patches)), num_val, False)
for i in cell_indices:
index = i * self.config.cell_patches
indices += list(range(index, index + self.config.cell_patches))
val_x = np.take(x, indices)
val_y = np.take(y, indices)
x = np.delete(x, indices)
y = | np.delete(y, indices) | numpy.delete |
from __future__ import print_function
import argparse
import brain
import h5py
import math
import numpy
import sys
def compute_frames_per_block(cells_to_frames_ratio, report, block_values,
gids=None):
# The number of frames per block is fixed using the median number of
# compartments per cell, the expected block size and the cells to frames
# ratio. Chunks are later guaranteed to not exceed the size of a block.
if gids is None:
gids = report.gids
view = report.create_view(gids)
mapping = view.mapping
counts = numpy.zeros((len(gids)), dtype="u4")
for i in range(len(gids)):
counts[i] = mapping.num_compartments(i)
total_compartments = | numpy.sum(counts) | numpy.sum |
from collections import OrderedDict
import numpy as np
import pytest
from gym.spaces import Box, Dict, Discrete, MultiBinary, MultiDiscrete, Tuple, utils
@pytest.mark.parametrize(["space", "flatdim"], [
(Discrete(3), 3),
(Box(low=0., high=np.inf, shape=(2, 2)), 4),
(Tuple([Discrete(5), Discrete(10)]), 15),
(Tuple([Discrete(5), Box(low=np.array([0, 0]), high=np.array([1, 5]), dtype=np.float32)]), 7),
(Tuple((Discrete(5), Discrete(2), Discrete(2))), 9),
(MultiDiscrete([2, 2, 100]), 3),
(MultiBinary(10), 10),
(Dict({"position": Discrete(5),
"velocity": Box(low=np.array([0, 0]), high=np.array([1, 5]), dtype=np.float32)}), 7),
])
def test_flatdim(space, flatdim):
dim = utils.flatdim(space)
assert dim == flatdim, "Expected {} to equal {}".format(dim, flatdim)
@pytest.mark.parametrize("space", [
Discrete(3),
Box(low=0., high=np.inf, shape=(2, 2)),
Tuple([Discrete(5), Discrete(10)]),
Tuple([Discrete(5), Box(low=np.array([0, 0]), high=np.array([1, 5]), dtype=np.float32)]),
Tuple((Discrete(5), Discrete(2), Discrete(2))),
MultiDiscrete([2, 2, 100]),
MultiBinary(10),
Dict({"position": Discrete(5),
"velocity": Box(low=np.array([0, 0]), high=np.array([1, 5]), dtype=np.float32)}),
])
def test_flatten_space_boxes(space):
flat_space = utils.flatten_space(space)
assert isinstance(flat_space, Box), "Expected {} to equal {}".format(type(flat_space), Box)
flatdim = utils.flatdim(space)
(single_dim, ) = flat_space.shape
assert single_dim == flatdim, "Expected {} to equal {}".format(single_dim, flatdim)
@pytest.mark.parametrize("space", [
Discrete(3),
Box(low=0., high=np.inf, shape=(2, 2)),
Tuple([Discrete(5), Discrete(10)]),
Tuple([Discrete(5), Box(low=np.array([0, 0]), high=np.array([1, 5]), dtype=np.float32)]),
Tuple((Discrete(5), Discrete(2), Discrete(2))),
MultiDiscrete([2, 2, 100]),
MultiBinary(10),
Dict({"position": Discrete(5),
"velocity": Box(low=np.array([0, 0]), high=np.array([1, 5]), dtype=np.float32)}),
])
def test_flat_space_contains_flat_points(space):
some_samples = [space.sample() for _ in range(10)]
flattened_samples = [utils.flatten(space, sample) for sample in some_samples]
flat_space = utils.flatten_space(space)
for i, flat_sample in enumerate(flattened_samples):
assert flat_sample in flat_space,\
'Expected sample #{} {} to be in {}'.format(i, flat_sample, flat_space)
@pytest.mark.parametrize("space", [
Discrete(3),
Box(low=0., high=np.inf, shape=(2, 2)),
Tuple([Discrete(5), Discrete(10)]),
Tuple([Discrete(5), Box(low=np.array([0, 0]), high=np.array([1, 5]), dtype=np.float32)]),
Tuple((Discrete(5), Discrete(2), Discrete(2))),
MultiDiscrete([2, 2, 100]),
MultiBinary(10),
Dict({"position": Discrete(5),
"velocity": Box(low=np.array([0, 0]), high= | np.array([1, 5]) | numpy.array |
import numpy as np
import math
import cv2
import os
from skimage.measure import compare_ssim as ssim
def psnr(img1, img2):
mse = | np.mean((img1 - img2) ** 2) | numpy.mean |
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d.art3d import Poly3DCollection
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
from fatiando.vis import mpl
from fatiando.gravmag import polyprism
import scipy.stats as sp
def plot_prisms(prisms, scale=1.):
'''
Returns a list of ordered vertices to build the model
on matplotlib 3D
input
prisms: list - objects of fatiando.mesher.polyprisms
scale: float - factor used to scale the coordinate values
output
verts: list - ordered vertices
'''
assert np.isscalar(scale), 'scale must be a scalar'
assert scale > 0., 'scale must be positive'
verts = []
for o in prisms:
top = []
bottom = []
for x, y in zip(o.x, o.y):
top.append(scale*np.array([y,x,o.z1]))
bottom.append(scale*np.array([y,x,o.z2]))
verts.append(top)
verts.append(bottom)
for i in range(o.x.size-1):
sides = []
sides.append(scale*np.array([o.y[i], o.x[i], o.z1]))
sides.append(scale*np.array([o.y[i+1], o.x[i+1], o.z1]))
sides.append(scale*np.array([o.y[i+1], o.x[i+1], o.z2]))
sides.append(scale*np.array([o.y[i], o.x[i], o.z2]))
verts.append(sides)
sides = []
sides.append(scale*np.array([o.y[-1], o.x[-1], o.z1]))
sides.append(scale*np.array([o.y[0], o.x[0], o.z1]))
sides.append(scale*np.array([o.y[0], o.x[0], o.z2]))
sides.append(scale*np.array([o.y[-1], o.x[-1], o.z2]))
verts.append(sides)
return verts
def plot_simple_model_data(x, y, obs, initial, model, filename):
'''
Returns a plot of synthetic total-field anomaly
data produced by the simple model and the true model
input
x, y: 1D array - Cartesian coordinates of the upward
continued total-field anomaly data
xa, ya: 1D array - Cartesian coordinates of the observations
obs: 1D array - synthetic total-field anomaly data
initial: list - fatiando.mesher.PolygonalPrism
of the initial approximate
model: list - list of fatiando.mesher.PolygonalPrism
of the simple model
filename: string - directory and filename of the figure
output
fig: figure - plot
'''
plt.figure(figsize=(11,5))
# sinthetic data
ax=plt.subplot(1,2,1)
plt.tricontour(y, x, obs, 20, linewidths=0.5, colors='k')
plt.tricontourf(y, x, obs, 20,
cmap='RdBu_r', vmin=np.min(obs),
vmax=-np.min(obs)).ax.tick_params(labelsize=12)
plt.plot(y, x, 'ko', markersize=.25)
mpl.polygon(initial, '.-r', xy2ne=True)
plt.xlabel('$y$(km)', fontsize=14)
plt.ylabel('$x$(km)', fontsize=14)
clb = plt.colorbar(pad=0.01, aspect=20, shrink=1)
clb.ax.set_title('nT', pad=-305)
mpl.m2km()
clb.ax.tick_params(labelsize=14)
plt.text(-6700, 3800, '(a)', fontsize=20)
verts_true = plot_prisms(model, scale=0.001)
# true model
ax = plt.subplot(1,2,2, projection='3d')
ax.add_collection3d(Poly3DCollection(verts_true, alpha=0.3,
facecolor='b', linewidths=0.5, edgecolors='k'))
ax.set_xlim(-2.5, 2.5, 100)
ax.set_ylim(-2.5, 2.5, 100)
ax.set_zlim(2, -0.1, 100)
ax.tick_params(labelsize=14)
ax.set_ylabel('y (km)', fontsize= 14)
ax.set_xlabel('x (km)', fontsize= 14)
ax.set_zlabel('z (km)', fontsize= 14)
ax.view_init(10, 50)
ax.text2D(-0.1, 0.07, '(b)', fontsize=20)
plt.tight_layout()
plt.savefig(filename, dpi=300, bbox_inches='tight')
return plt.show()
def plot_matrix(z0, intensity, matrix, vmin,
vmax, solutions, xtitle, ytitle, unity,
figsize, dpi=300,
truevalues=[], filename=''):
'''
Returns a plot of the goal function values for each inversion
organized in a matrix
input
z0: 1D array - range of depth to the top values in meters
intensity: 1D array - range of total-magnetization
intensity values in nT
matrix: 2D array - values for the goal or misfit function
produced by the solutions of the multiple
inversions
vmin: float - minimum value for the colorbar
vmin: float - maximum value for the colorbar
solutions: list - list of position on the map of the chosen
solutions for the plots [[x1, y1],[x2, y2]]
xtitle: string - x axis title
ytitle: string - y axis title
unity: string - unity of the function
figsize: tuple - size of the figure
dpi: integer - resolution of the figure
truevalues: list - list of position [x, y] on the map of the
true values for the parameters z0 and intensity
filename: string - directory and filename of the figure
output
fig: figure - plot of the result
'''
n = z0.size
m = intensity.size
fig, ax = fig, ax = plt.subplots(1,1)
fig.set_size_inches(6,5)
w = 3
img = ax.imshow(matrix, vmin=vmin, vmax=vmax, origin='lower',extent=[0,w,0,w])
img.axes.tick_params(labelsize=14)
plt.ylabel(ytitle, fontsize=14)
plt.xlabel(xtitle, fontsize=14)
if truevalues == []:
pass
else:
plt.plot((2.*truevalues[1]+1.)*w/(2.*m), (2.*truevalues[0]+1.)*w/(2.*n), '^r', markersize=12)
colors = ['Dw', 'Dm']
for s, c in zip(solutions, colors):
plt.plot((2.*s[1]+1.)*w/(2.*m), (2.*s[0]+1.)*w/(2.*n), c, markersize=12)
x_label_list = []
y_label_list = []
for xl, yl in zip(intensity,z0):
x_label_list.append(str(xl)[:-2])
y_label_list.append(str(yl)[:-2])
ax.set_xticks(np.linspace(w/(2.*n), w - w/(2.*n), n))
ax.set_yticks(np.linspace(w/(2.*m), w - w/(2.*m), m))
ax.set_xticklabels(x_label_list)
ax.set_yticklabels(y_label_list)
# Minor ticks
ax.set_xticks(np.linspace(0, w, n+1), minor=True)
ax.set_yticks(np.linspace(0, w, m+1), minor=True)
ax.grid(which='minor', color='k', linewidth=2)
clb = plt.colorbar(img, pad=0.01, aspect=20, shrink=1)
clb.ax.set_title(unity, pad=-288)
clb.ax.tick_params(labelsize=14)
if filename == '':
pass
else:
plt.savefig(filename, dpi=300, bbox_inches='tight')
return plt.show()
def plot_complex_model_data(x, y, obs, alt, initial, model,
figsize, dpi=300, filename=''):
'''
Returns a plot of synthetic total-field anomaly
data produced by the complex model and the true model
input
x, y: 1D array - Cartesian coordinates of the upward
continued total-field anomaly data
xa, ya: 1D array - Cartesian coordinates of the observations
obs: 1D array - synthetic total-field anomaly data
alt: 1D array - geometric heigt of the observations
initial: list - fatiando.mesher.PolygonalPrism
of the initial approximate
model: list - list of fatiando.mesher.PolygonalPrism
of the simple model
figsize: tuple - size of the figure
dpi: integer - resolution of the figure
filename: string - directory and filename of the figure
output
fig: figure - plot
'''
verts_true = plot_prisms(model, scale=0.001)
plt.figure(figsize=figsize)
# sinthetic data
ax=plt.subplot(2,2,1)
plt.tricontour(y, x, obs, 20, linewidths=0.5, colors='k')
plt.tricontourf(y, x, obs, 20,
cmap='RdBu_r', vmin=np.min(obs),
vmax=- | np.min(obs) | numpy.min |
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import numpy as np
import copy
from collections import defaultdict
import sys
import warnings
try:
from torchreid.eval_cylib.eval_metrics_cy import evaluate_cy
IS_CYTHON_AVAI = True
print("Using Cython evaluation code as the backend")
except ImportError:
IS_CYTHON_AVAI = False
warnings.warn("Cython evaluation is UNAVAILABLE, which is highly recommended")
def eval_cuhk03(distmat, q_pids, g_pids, q_camids, g_camids, max_rank):
"""Evaluation with cuhk03 metric
Key: one image for each gallery identity is randomly sampled for each query identity.
Random sampling is performed num_repeats times.
"""
num_repeats = 10
num_q, num_g = distmat.shape
if num_g < max_rank:
max_rank = num_g
print("Note: number of gallery samples is quite small, got {}".format(num_g))
indices = | np.argsort(distmat, axis=1) | numpy.argsort |
# Copyright (c) 2021, <NAME>
# May 2021. Started on May 17 for Localized Conflict Modeling.
# First, read GPW population count data as GeoTIFF.
# Write-up and theoretical results in late May.
#-------------------------------------------------------------------
#
# conda activate tf36
# python
# from topoflow.utils import conflict
# pop_grid = conflict.read_geotiff()
#
#-------------------------------------------------------------------
#
# test1()
# test2()
#
# class conflict()
# initialize()
# initialize_U()
# initialize_C1()
# initialize_C2()
#-----------------------
# update()
# update_U()
# update_C1()
# update_C2()
#-----------------------
# update_p()
# update_S()
# update_S1()
# update_S2() # (obsolete soon?)
# update_S_old() # (obsolete soon?)
# update_time()
#--------------------------------------
# get_neighbor_cols_and_rows()
# get_neighbor_values()
# spread_conflicts1()
# spread_conflicts2()
# finalize()
# run_model()
#
# get_raster_cellsize()
# get_raster_bounds()
# bounds_disjoint()
# read_geotiff() # can also create RTG and RTI files
# regrid_geotiff()
#
# read_acled_data()
#
#-------------------------------------------------------------------
import numpy as np
import numpy.random as rn
#### import random as rn
import pandas as pd
import time
# For read_geotiff(), etc.
try:
from osgeo import gdal
except ImportError:
import gdal
import glob, sys
import os, os.path
from . import rti_files
from . import rtg_files
#-------------------------------------------------------------------
def test1():
cfg_file = 'conflict.cfg'
c = conflict()
c.run_model( cfg_file )
# test1()
#-------------------------------------------------------------------
def test2():
pop_grid = read_geotiff()
# test2()
#-------------------------------------------------------------------
def test3( SUBSAMPLE=False ):
#--------------------------------
# Use Horn of Africa as a test.
#--------------------------------
in_dir = '/Users/peckhams/Conflict/Data/GPW-v4/'
in_file = 'gpw_v4_population_count_rev11_2020_30_sec.tif'
in_file = in_dir + in_file
# Bounds = [ minlon, minlat, maxlon, maxlat ]
out_bounds = [ 25.0, -5.0, 55.0, 25.0]
if not(SUBSAMPLE):
#--------------------------------------
# 3600 cols x 3600 rows, 30 arcseconds
#--------------------------------------
out_file = 'Horn_of_Africa_GPW-v4_pop_count_2020_30sec.tif'
out_file = in_dir + out_file
out_xres_sec = None # will default to in_xres_sec
out_yres_sec = None # will default to in_yres_sec
print('Reading & clipping GeoTIFF file...')
else:
#--------------------------------------
# 360 cols x 360 rows, 300 arcseconds
#--------------------------------------
out_file = 'Horn_of_Africa_GPW-v4_pop_count_2020_450sec.tif'
out_file = in_dir + out_file
out_xres_sec = 450.0 # (15 times lower resolution)
out_yres_sec = 450.0 # (15 times lower resolution)
print('Reading, clipping & subsampling GeoTIFF file...')
#--------------------------------------
# 360 cols x 360 rows, 300 arcseconds
#--------------------------------------
# out_file = 'Horn_of_Africa_GPW-v4_pop_count_2020_300sec.tif'
# out_file = in_dir + out_file
# out_xres_sec = 300.0 # (10 times lower resolution)
# out_yres_sec = 300.0 # (10 times lower resolution)
# print('Reading, clipping & subsampling GeoTIFF file...')
regrid_geotiff(in_file=in_file, out_file=out_file,
out_bounds=out_bounds,
out_xres_sec=out_xres_sec,
out_yres_sec=out_yres_sec,
RESAMPLE_ALGO='bilinear', REPORT=True)
# test3()
#-------------------------------------------------------------------
class conflict():
#---------------------------------------------------------------
def initialize( self, cfg_file=None ):
home_dir = os.path.expanduser('~') + os.sep
if (cfg_file is None):
self.in_dir = home_dir + 'Conflict/Data/GPW-v4/'
self.out_dir = home_dir + 'Conflict/Output/'
cfg_file = self.in_dir + 'conflict.cfg'
self.out_file = self.out_dir + 'conflicts.rts'
self.IDs_file = self.out_dir + 'conflict_IDs.rts'
self.C1_file = ''
self.C2_file = ''
#---------------------------
# Was good for pop count U
#---------------------------
# self.U_file = 'Horn_of_Africa_GPW-v4_pop_count_2020_300sec.tif'
# self.nx = 360
# self.ny = 360
# self.c_emerge = 0.01 # (must be in (0,1])
# self.c_spread = 0.1
# ## self.c_spread = 0.03
# ## self.c_spread = 0.05
# ## self.p_geom = 0.2
# self.p_geom = 0.4
#--------------------------
# Is good for pop count U
#--------------------------
# self.U_file = 'Horn_of_Africa_GPW-v4_pop_count_2020_450sec.tif'
# self.nx = 240
# self.ny = 240
# self.c_emerge = 0.5 # (must be in (0,1])
# ## self.c_emerge = 0.1 # (must be in (0,1])
# ## self.c_emerge = 0.01 # (must be in (0,1])
# self.c_spread = 0.5
# ## self.c_spread = 0.1
# self.p_resolve = 0.4
# self.p_geom = 0.4 # (not used now)
#-------------------------
# Was good for uniform U
#-------------------------
self.U_file = '' # (To use uniform U)
self.nx = 240
self.ny = 240
self.c_emerge = 0.001 ####
## self.c_emerge = 0.2
## self.c_emerge = 0.001 # (must be in (0,1])
## self.c_spread = 0.1 ####
self.c_spread = 0.4 ####
## self.c_spread = 0.03
## self.c_spread = 0.05
## self.p_geom = 0.2
self.p_resolve = 0.4
self.p_geom = 0.4
self.spread_method = 1
#--------------------------
self.time_lag = 1 # (not used yet)
self.n_steps = 100
self.REPORT = True
else:
#-----------------------------------
# Read params from the config file
#-----------------------------------
dum = 0
self.cfg_file = cfg_file
self.time_index = 0
self.n_conflict_cells = 0
self.grid_shape = (self.ny, self.nx)
## self.start_time = time.time()
self.start_ID = 1
self.start_index = 0
#----------------------------
# Change to input directory
#----------------------------
os.chdir( self.in_dir )
#-----------------------------
# Open output files to write
#-----------------------------
self.out_unit = open( self.out_file, 'wb')
self.IDs_unit = open( self.IDs_file, 'wb')
#--------------------------------------
# Make grids with col and row numbers
#--------------------------------------
cols = np.arange( self.nx )
rows = np.arange( self.ny )
cg, rg = np.meshgrid( cols, rows )
self.col_grid = cg
self.row_grid = rg
self.initialize_U()
self.initialize_C1()
self.initialize_C2()
#------------------------------------------------------
# Initialize to no conflicts
# S will later contain 1s in grid cells with conflict
# Initialize durations to zero also.
# IDs will contain a unique ID for each conflict.
# Using 'float32' for IDs now for viewing the RTS.
#------------------------------------------------------
self.S = np.zeros( self.grid_shape, dtype='uint8' )
self.durs = np.zeros( self.grid_shape, dtype='uint32')
self.IDs = np.zeros( self.grid_shape, dtype='float32')
#----------------------------------------------------------
# Create a set of random integer IDs, without replacement
# so when we colorize, it will look better.
#----------------------------------------------------------
self.ran_IDs = rn.choice( 10000000, 10000000, replace=False)
# This next method used built-in random & and problems.
### self.ran_IDs = rn.sample( range(1000000), 500000)
self.start_time = time.time()
# initialize()
#---------------------------------------------------------------
def initialize_U( self ):
#-----------------------------------
# Start with U = a population grid
#-----------------------------------
if (self.U_file != ''):
self.U = read_geotiff(in_file=self.U_file,
REPORT=True)
# In case of negative nodata value
np.maximum(self.U, 0.0, self.U) # (in place)
else:
#---------------------
# Use a grid of ones
#---------------------
self.U = np.ones( self.grid_shape, dtype='float32' )
#-----------------------------------
# Disallow conflict on the 4 edges
#-----------------------------------
self.U[0,:] = 0.0
self.U[self.ny - 1,:] = 0.0
self.U[:,0] = 0.0
self.U[:,self.nx - 1] = 0.0
# initialize_U()
#---------------------------------------------------------------
def initialize_C1( self ):
if (self.C1_file != ''):
self.C1 = read_geotiff(in_file=self.C1_file,
REPORT=True)
else:
#---------------------
# Use a grid of ones
#---------------------
self.C1 = np.ones( self.grid_shape, dtype='float32' )
# initialize_C1()
#---------------------------------------------------------------
def initialize_C2( self ):
if (self.C2_file != ''):
self.C2 = read_geotiff(in_file=self.C2_file,
REPORT=True)
else:
#---------------------
# Use a grid of ones
#---------------------
self.C2 = np.ones( self.grid_shape, dtype='float32' )
# initialize_C2()
#---------------------------------------------------------------
def update( self ):
self.update_U()
self.update_C1()
self.update_C2()
#-------------------
self.update_p()
self.update_S() # also updates IDs
## self.update_S1() # same speed as update_S()
## self.update_S2()
self.update_time()
# update()
#---------------------------------------------------------------
def update_U( self ):
pass
# update_U()
#---------------------------------------------------------------
def update_C1( self ):
pass
# update_C1()
#---------------------------------------------------------------
def update_C2( self ):
pass
# update_C2()
#---------------------------------------------------------------
def update_p( self ):
#----------------------------------------------------------
# Note: p is the probability that a conflict emerges in
# a grid cell, and is a function of the unrest, U.
# In order for p to be a probability, in (0,1],
# we need 0 < c_emerge <= 1.
#----------------------------------------------------------
self.p_emerge = (self.c_emerge / self.U.max()) * self.U
# update_p()
#---------------------------------------------------------------
def update_S( self ):
#-----------------------------------------------------------
# Note: The previous version of this method generated
# Geometric random variables to model conflict
# durations. This new version does not track
# durations explicitly, but should produce exactly
# the same result. Here, any conflict ends in the
# kth time interval with fixed probability, p.
# This is modeled with a Bernoulli random variable.
#-----------------------------------------------------------
# Note: A Bernoulli random variable takes the value 1
# with probability p and 0 with probability (1-p).
# It is a special case of a Binomial r.v. (n=1).
# np.random.binomial() allows p to be an array.
#-----------------------------------------------------------
#--------------------------------------------------
# Initiate new conflicts in cells with no conflict
#-----------------------------------------------------
# Generate Bernoulli random variables with parameter
# p_emerge, and initiate conflicts where B1=1.
#-----------------------------------------------------
# Convert b from dtype='int64' to dtype='uint8' ?
# This requires 8 times less memory.
#-----------------------------------------------------
B1 = np.random.binomial(1, self.p_emerge)
w2 = np.logical_and( self.S == 0, B1 == 1 )
n2 = w2.sum()
#------------------------------------------------
# Resolve some conflicts in cells with conflict
#-----------------------------------------------------
# Generate Bernoulli random variables with parameter
# p_resolve and terminate conflicts that get B2=1.
# Conflict durations will then turn out to be
# Geometric random variables, same parameter.
#-----------------------------------------------------
B2 = np.random.binomial(1, self.p_resolve, size=self.grid_shape)
w3 = np.logical_and( self.S == 1, B2 == 1 )
n3 = w3.sum()
#------------------------------------
# Perform the required updates to S
#------------------------------------
i = self.start_index
self.S[ w2 ] = B1[ w2 ]
self.IDs[ w2 ] = self.ran_IDs[i:i + n2]
self.start_index += n2
self.n_conflict_cells += n2
#---------------------------------------------
self.S[ w3 ] = (1 - B2[ w3 ])
self.n_conflict_cells -= n3
# Reset IDs to zero where resolved (i.e. B2 = 1).
self.IDs[ w3 ] = self.IDs[w3] * (1 - B2[w3])
#---------------------------------------------
if (self.REPORT):
print('time_index =', self.time_index)
print('Number of new conflict cells =', n2)
print('Number of resolved conflicts =', n3)
if (self.spread_method == 0):
print()
#----------------------------------
# Attempt to spread the conflicts
#------------------------------------------------
# Set spread_method == 0 to turn off spreading,
# e.g. to test against theoretical results.
#------------------------------------------------
if (self.spread_method == 1):
self.spread_conflicts1()
# elif (self.spread_method == 2):
# self.spread_conflicts2()
# elif (self.spread_method == 3):
# self.spread_conflicts3()
# else:
# pass
SAVE_S = True
if (SAVE_S):
#---------------------------------
# Write grid as binary to file
# (could use .astype('float32'))
#---------------------------------
S2 = np.float32(self.S)
S2.tofile( self.out_unit )
SAVE_IDs = True
if (SAVE_IDs):
self.IDs.tofile( self.IDs_unit )
# update_S()
#---------------------------------------------------------------
def update_S1( self ):
#-----------------------------------------------------------
# Note: The previous version of this method generated
# Geometric random variables to model conflict
# durations. This new version does not track
# durations explicitly, but should produce exactly
# the same result. Here, any conflict ends in the
# kth time interval with fixed probability, p.
# This is modeled with a Bernoulli random variable.
#-----------------------------------------------------------
# Note: A Bernoulli random variable takes the value 1
# with probability p and 0 with probability (1-p).
# It is a special case of a Binomial r.v. (n=1).
# np.random.binomial() allows p to be an array.
#-----------------------------------------------------------
#----------------------------------------------
# Make a copy of self.S, i.e. S(k) vs. S(k+1)
#----------------------------------------------
# Otherwise, conflicts may be resolved in the
# same time step as they were initiated.
# Could also apply self.S updates at end.
#----------------------------------------------
S = self.S.copy() # This may be costly
#--------------------------------------------------
# Initiate new conflicts in cells with no conflict
#-----------------------------------------------------
# Generate Bernoulli random variables with parameter
# p_emerge, and initiate conflicts where B1=1.
#-----------------------------------------------------
# Convert b from dtype='int64' to dtype='uint8' ?
# This requires 8 times less memory.
#-----------------------------------------------------
B1 = np.random.binomial(1, self.p_emerge)
w2 = np.logical_and( S == 0, B1 == 1 )
n2 = w2.sum()
i = self.start_index
self.S[ w2 ] = B1[ w2 ]
self.IDs[ w2 ] = self.ran_IDs[i:i + n2]
self.start_index += n2
self.n_conflict_cells += n2
#------------------------------------------------
# Resolve some conflicts in cells with conflict
#-----------------------------------------------------
# Generate Bernoulli random variables with parameter
# p_resolve and terminate conflicts that get B2=1.
# Conflict durations will then turn out to be
# Geometric random variables, same parameter.
#-----------------------------------------------------
B2 = np.random.binomial(1, self.p_resolve, size=self.grid_shape)
w3 = np.logical_and( S == 1, B2 == 1 )
n3 = w3.sum()
self.S[ w3 ] = (1 - B2[ w3 ])
self.n_conflict_cells -= n3
# Reset IDs to zero where resolved (i.e. B2 = 1).
self.IDs[ w3 ] = self.IDs[w3] * (1 - B2[w3])
if (self.REPORT):
print('time_index =', self.time_index)
print('Number of new conflicts =', n2)
print('Number of resolved conflicts =', n3)
print()
#----------------------------------
# Attempt to spread the conflicts
#------------------------------------------------
# Set spread_method == 0 to turn off spreading,
# e.g. to test against theoretical results.
#------------------------------------------------
if (self.spread_method == 1):
self.spread_conflicts1()
# elif (self.spread_method == 2):
# self.spread_conflicts2()
# elif (self.spread_method == 3):
# self.spread_conflicts3()
# else:
# pass
SAVE_S = True
if (SAVE_S):
#---------------------------------
# Write grid as binary to file
# (could use .astype('float32'))
#---------------------------------
S2 = np.float32(self.S)
S2.tofile( self.out_unit )
SAVE_IDs = True
if (SAVE_IDs):
self.IDs.tofile( self.IDs_unit )
# update_S1()
#---------------------------------------------------------------
def update_S2( self ):
#-----------------------------------------------------------
# Note: The previous version of this method generated
# Geometric random variables to model conflict
# durations. This new version does not track
# durations explicitly, but should produce exactly
# the same result. Here, any conflict ends in the
# kth time interval with fixed probability, p.
# This is modeled with a Bernoulli random variable.
#-----------------------------------------------------------
# Note: A Bernoulli random variable takes the value 1
# with probability p and 0 with probability (1-p).
# It is a special case of a Binomial r.v. (n=1).
# np.random.binomial() allows p to be an array.
#-----------------------------------------------------------
#---------------------------------------
# Find cells with and without conflict
#---------------------------------------
w1 = (self.S == 1)
w0 = np.invert( w1 )
n1 = w1.sum()
n0 = w0.sum()
## S = self.S.copy() ########
#--------------------------------------------------
# Initiate new conflicts in cells with no conflict
#--------------------------------------------------
# Convert b from dtype='int64' to dtype='uint8' ?
# This requires 8 times less memory.
#--------------------------------------------------
B1 = np.random.binomial(1, self.p_emerge[w0])
w2 = (B1 == 1)
n2 = w2.sum()
i = self.start_index
self.S.flat[ w2 ] = B1[w2]
self.IDs.flat[ w2 ] = self.ran_IDs[i:i + n2]
self.start_index += n2
self.n_conflict_cells += n2
if (self.REPORT):
print('Number of new conflicts =', n2)
#------------------------------
# Update S with new conflicts
#------------------------------
# if (n3 > 0):
# i = self.start_index
# self.S[ w3 ] = 1 # (for method 1)
# self.IDs[ w3 ] = self.ran_IDs[i:i + n3]
# #---------------------------------------------
# #### self.S[ w0 ] = B1 # (for method 2)
# #### self.IDs[ w0 ] = self.ran_IDs[i:i + n3]
# #---------------------------------------------
# self.start_index += n3
# self.n_conflict_cells += n3
#------------------------------------------------
# Resolve some conflicts in cells with conflict
#-----------------------------------------------------
# Generate Bernoulli random variables, and terminate
# conflicts that get B2=1. Conflict durations will
# then turn out to be Geometric random variables.
#-----------------------------------------------------
B2 = np.random.binomial(1, self.p_resolve, size=n1)
w3 = (B2 == 1)
n3 = w3.sum()
self.S.flat[ w3 ] = (1 - B2[ w3 ])
self.n_conflict_cells -= n3
# Reset IDs to zero where resolved (i.e. B2 = 1).
self.IDs.flat[ w3 ] *= (1 - B2[w3])
#-----------------------------------
# Update S with resolved conflicts
#-----------------------------------
if (self.REPORT):
print('time_index =', self.time_index)
print('Number of resolved conflicts =', n3)
#----------------------------------
# Attempt to spread the conflicts
#------------------------------------------------
# Set spread_method == 0 to turn off spreading,
# e.g. to test against theoretical results.
#------------------------------------------------
if (self.spread_method == 1):
self.spread_conflicts1()
# elif (self.spread_method == 2):
# self.spread_conflicts2()
# elif (self.spread_method == 3):
# self.spread_conflicts3()
# else:
# pass
SAVE_S = True
if (SAVE_S):
#---------------------------------
# Write grid as binary to file
# (could use .astype('float32'))
#---------------------------------
S2 = np.float32(self.S)
S2.tofile( self.out_unit )
SAVE_IDs = True
if (SAVE_IDs):
self.IDs.tofile( self.IDs_unit )
# update_S2()
#---------------------------------------------------------------
def update_S_old( self ):
#-----------------------------------------------------------
# Notes: A Bernoulli random variable takes the value 1
# with probability p and 0 with probability (1-p).
# It is a special case of a Binomial r.v. (n=1).
# np.random.binomial() allows p to be an array.
#-----------------------------------------------------------
#------------------------------------------
# Reduce the existing durations by 1
# Durations are integers (# of timesteps)
#------------------------------------------
w1 = (self.S == 1)
# n1 = w1.sum()
self.durs[ w1 ] -= 1
#---------------------------------------------
# Have any conflicts reached their duration?
# If so, set their S value to 0.
#---------------------------------------------
# S[w1][w2] works for retrieving values, but
# it doesn't work for assignments.
#---------------------------------------------
# w2 = (self.durs[ w1 ] == 0) # can be empty
# self.S[ w1 ][ w2 ] = 0 # conflict is over
#-----------------------------------------------------
# METHOD 1: Works, but seems no faster than METHOD 2
#-----------------------------------------------------
# w2 = (self.durs[ w1 ] == 0) # can be empty
# self.S[ w1 ] = (1 - w2.astype('uint8'))
#----------------------------------------------------
# METHOD 2
#----------------------------------------------------
w2 = np.logical_and( (self.S == 1),(self.durs == 0) )
self.S[ w2 ] = 0
self.IDs[ w2 ] = 0 # reset the IDs
n2 = w2.sum()
self.n_conflict_cells -= n2
if (self.REPORT):
print('time_index =', self.time_index)
print('Number of resolved conflicts =', n2)
#--------------------------------------------------
# Initiate new conflicts; inherit size from p
#--------------------------------------------------
# Convert b from dtype='int64' to dtype='uint8' ?
# This requires 8 times less memory.
#--------------------------------------------------
dS = np.random.binomial(1, self.p_emerge)
dS = dS.astype('uint8')
w3 = np.logical_and(self.S == 0, dS == 1)
#--------------------------------------------
# This would allow existing conflicts to be
# "reseeded" and causes error in counting.
#--------------------------------------------
### w3 = (dS == 1)
n3 = w3.sum()
if (self.REPORT):
print('Number of new conflicts =', n3)
if (n3 > 0):
#------------------------------
# Update S with new conflicts
#------------------------------
self.S[ w3 ] = 1
## self.IDs[ w3 ] = np.arange( n3 ) + self.start_ID
## self.start_ID += n3
i = self.start_index
self.IDs[ w3 ] = self.ran_IDs[i:i + n3]
self.start_index += n3
### np.maximum( self.S, dS, self.S) # in place
#------------------------------------------
# New durations are Geometric random vars
#------------------------------------------
g = np.random.geometric( self.p_geom, size=n3 )
self.durs[ w3 ] = g
self.n_conflict_cells += n3
#----------------------------------
# Attempt to spread the conflicts
#------------------------------------------------
# Set spread_method == 0 to turn off spreading,
# e.g. to test against theoretical results.
#------------------------------------------------
if (self.spread_method == 1):
self.spread_conflicts1()
# elif (self.spread_method == 2):
# self.spread_conflicts2()
# elif (self.spread_method == 3):
# self.spread_conflicts3()
# else:
# pass
SAVE_S = True
if (SAVE_S):
#---------------------------------
# Write grid as binary to file
# (could use .astype('float32'))
#---------------------------------
S2 = np.float32(self.S)
S2.tofile( self.out_unit )
SAVE_IDs = True
if (SAVE_IDs):
self.IDs.tofile( self.IDs_unit )
# update_S_old()
#---------------------------------------------------------------
def update_time( self ):
self.time_index += 1
# update_time()
#---------------------------------------------------------------
def get_neighbor_cols_and_rows( self, w1, n1 ):
cols = self.col_grid[ w1 ]
rows = self.row_grid[ w1 ]
#--------------------------------------------------
# 1st index is over grid cells that have conflict.
# 2nd index is over the 8 nearest neighbors.
#--------------------------------------------------
cn = np.zeros( (n1, 8), dtype='int32')
cn[:,0] = cols-1
cn[:,1] = cols
cn[:,2] = cols+1
cn[:,3] = cols-1
cn[:,4] = cols+1
cn[:,5] = cols-1
cn[:,6] = cols
cn[:,7] = cols+1
#---------------------------------------
rn = np.zeros( (n1, 8), dtype='int32')
rn[:,0] = rows-1
rn[:,1] = rows-1
rn[:,2] = rows-1
rn[:,3] = rows
rn[:,4] = rows
rn[:,5] = rows+1
rn[:,6] = rows+1
rn[:,7] = rows+1
#------------------
self.cn = cn
self.rn = rn
# get_neighbor_cols_and_rows()
#---------------------------------------------------------------
def get_neighbor_values( self, var, n1 ):
#----------------------------------------
# Get values of 8 nearest neighbors
# vals[k,:] = neighbor values of cell k
#----------------------------------------
cn = self.cn
rn = self.rn
vals = np.zeros( (n1, 8), dtype='float32')
vals[:,0] = var[rn[:,0], cn[:,0]] # (top left)
vals[:,1] = var[rn[:,1], cn[:,1]] # (top center)
vals[:,2] = var[rn[:,2], cn[:,2]] # (top right)
vals[:,3] = var[rn[:,3], cn[:,3]] # (left center)
vals[:,4] = var[rn[:,4], cn[:,4]] # (right center)
vals[:,5] = var[rn[:,5], cn[:,5]] # (bottom left)
vals[:,6] = var[rn[:,6], cn[:,6]] # (bottom center)
vals[:,7] = var[rn[:,7], cn[:,7]] # (bottom right)
# vals[:,8] = var[rn[:,8], cn[:,8]] # (center)
return vals
# get_neighbor_values()
#---------------------------------------------------------------
def spread_conflicts1( self, USE_LOOP=False ):
#-------------------------------------------------
# Note: Can only spread to cells that have S=0.
#-------------------------------------------------
w1 = (self.S == 1)
n1 = w1.sum()
if (n1 == 0):
print('No conflicts to spread at time:', self.time_index)
return
if (USE_LOOP):
ID_vals = self.IDs[ w1 ] #(for the for loop version)
else:
ID_vals = np.tile( np.array([self.IDs[w1]]).transpose(), (1,8))
#------------------
# This also works
#------------------
# ID_vals = np.zeros((n1,8), dtype='int64')
# ID_vals[:,0] = self.IDs[w1]
# ID_vals[:,1] = self.IDs[w1]
# ID_vals[:,2] = self.IDs[w1]
# ID_vals[:,3] = self.IDs[w1]
# ID_vals[:,4] = self.IDs[w1]
# ID_vals[:,5] = self.IDs[w1]
# ID_vals[:,6] = self.IDs[w1]
# ID_vals[:,7] = self.IDs[w1]
#---------------------------------------------
# Get nearest neighbor values for U, S, & C1
#---------------------------------------------
self.get_neighbor_cols_and_rows( w1, n1 )
#---------------------------------------------
## Sn = self.get_neighbor_values( self.S, n1)
Un = self.get_neighbor_values( self.U, n1)
## C1n = self.get_neighbor_values( self.C1, n1)
#------------------------------------------------
# Compute probability of spreading to neighbors
#------------------------------------------------
# The "None trick" shown here allows us to do
# the following for all k at once:
# pn[k,:] = Un[k,:] * (c2 / Un[k,:].max() )
# Need c2 = c_spread to be in (0,1].
# np.amax lets us take the max along an axis.
#------------------------------------------------
# NOTE: Un and pn have shape = (n1, 8)
# NOTE: pn is initialized & defaults to 0.
#------------------------------------------------
Un_max = np.amax( Un, axis=1 ) # a 1D array
wg = (Un_max > 0)
pn = np.zeros(Un.shape, dtype='float32')
pn[ wg,: ] = self.c_spread * Un[wg,:] / (Un_max[wg,None])
#--------------------------------------
# Alternate method that uses U and C1
#--------------------------------------
# Rn = Un * C1n
# Rn_max = np.amax( Rn, axis=1 ) # a 1D array
# wg = (Rn_max > 0)
# pn = np.zeros(Rn.shape, dtype='float32')
# pn[ wg,: ] = self.c_spread * Rn[wg,:] / (Rn_max[wg,None])
#---------------------------------------------
# Use Bernoulli r.v.s to determine spreading
#---------------------------------------------
cn = self.cn
rn = self.rn
n_start = self.n_conflict_cells
if (USE_LOOP):
for k in range(n1):
B = | np.random.binomial(1, pn[k,:]) | numpy.random.binomial |
#!/usr/bin/env python3
import sys
import numpy as np
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import multiprocessing as mp
import getopt
import imageio
import os
import pylab as pb
def create_movie(filenames, fps, output_file):
with imageio.get_writer(output_file, fps=fps) as writer:
for filename in filenames:
writer.append_data(imageio.imread(filename))
writer.close()
def createImage(index_list, xyscale, plot_format, log_flag, **kwargs):
"""
index_list: file index list
xyscale: plot x, y scale
plot_format: plot format
log_flag: if true, color is in logscale
kwargs: vmin, vmax
"""
fig, axes=plt.subplots(1,3,figsize=(8,3), gridspec_kw={'width_ratios': [20, 20, 1]})
plt.subplots_adjust(wspace=0.4)
for k in index_list:
print('process index ',k)
data=dict()
labels=['xy','xz']
xylabels=[['X','Y'],['X','Z']]
for i in range(len(labels)):
axes[i].clear()
key=labels[i]
data[key]=dict()
fname = key+str(k)
fp = open(fname, 'r')
header = fp.readline()
fp.close()
time, nx, ny = header.split()
nx=int(nx)
ny=int(ny)
x, y, z, ax, ay, az, pot = np.loadtxt(fname, unpack=True, usecols=(1,2,3,7,8,9,10),skiprows=1)
data[key]['x']=x.reshape(nx,ny)
data[key]['y']=y.reshape(nx,ny)
data[key]['z']=z.reshape(nx,ny)
data[key]['pot']=pot.reshape(nx,ny)
count = np.log10(-data[key]['pot']) if log_flag else -data[key]['pot']
cset = axes[i].contour(count,linewidths=2,extent=xyscale[i], **kwargs)
axes[i].clabel(cset,inline=True,fmt='%1.1f',fontsize=10)
axes[i].set_xlabel(xylabels[i][0])
axes[i].set_ylabel(xylabels[i][1])
im = axes[i].imshow(count,cmap=pb.cm.RdBu,
aspect=(xyscale[i][1]-xyscale[i][0])/(xyscale[i][3]-xyscale[i][2]),
interpolation='bilinear', origin='lower',
extent=xyscale[i], **kwargs)
axes[0].set_title('Time = %s' % time)
cbar = plt.colorbar(im, cax = axes[2])
if log_flag: cbar.set_label(r'$\log{(-P)}$')
else: cbar.set_label(r'-P')
fig.savefig('pot'+str(k)+'.png', bbox_inches = "tight")
if __name__ == '__main__':
fps = 30
output_file = 'pot_movie'
plot_format='mp4'
n_cpu = 0
log_flag = False
kwargs=dict()
def usage():
print("A tool to generate a movie for the evolution of galpy potential")
print("Need to use petar.galpy first to generate snapshots of potential map")
print("Usage: petar.galpy.pot.movie [options] [petar.galpy parameter file]")
print("Options:")
print(" -h(--help): help")
print(" -f [F]: output frame FPS: ",fps)
print(" -o [S]: output movie filename: ",output_file)
print(" --vmin [F]: (positive) potential minimum for color map, if not provided, use first snapshot for reference")
print(" --vmax [F]: (positive) potential maximum for color map")
print(" --log: color map is in logscale")
print(" --n-cpu [I]: number of CPU processors to use: all CPU cores")
print(" --format [S]: video format, require imageio installed, for some formats (e.g. avi, mp4) may require ffmpeg and imageio-ffmpeg installed: ", plot_format)
try:
shortargs = 'f:o:h'
longargs = ['help','vmax=','vmin=','format=','n-cpu=','log']
opts, remainder = getopt.getopt(sys.argv[1:], shortargs, longargs)
for opt,arg in opts:
if opt in ('-h','--help'):
usage()
sys.exit(1)
elif opt in ('-f'):
fps = float(arg)
elif opt in ('-o'):
output_file = arg
elif opt in ('--vmin'):
kwargs['vmin'] = float(arg)
elif opt in ('--vmax'):
kwargs['vmax'] = float(arg)
elif opt in ('--format'):
plot_format = arg
elif opt in ('--n-cpu'):
n_cpu = int(arg)
elif opt in ('--log'):
log_flag = True
else:
assert False, "unhandeld option"
except getopt.GetoptError as err:
print(err)
usage()
sys.exit(2)
fpar = remainder[0]
fp = open(fpar, 'r')
header = fp.readline()
fp.close()
t0, dt, nstep, dt_out, xmin, xmax, nx, ymin, ymax, ny, zmin, zmax, nz = header.split()
xmin=float(xmin)*1e-3
xmax=float(xmax)*1e-3
ymin=float(ymin)*1e-3
ymax=float(ymax)*1e-3
zmin=float(zmin)*1e-3
zmax=float(zmax)*1e-3
nstep= int(nstep)
xyscale=[[xmin,xmax,ymin,ymax],[xmin,xmax,zmin,zmax]]
if (not 'vmin' in kwargs.keys()) | (not 'vmax' in kwargs.keys()) :
pot = np.loadtxt('xy0', unpack=True, usecols=(10),skiprows=1)
pot_min = pot.min()
pot_max = pot.max()
pot = np.loadtxt('xz0', unpack=True, usecols=(10),skiprows=1)
pot_min = np.minimum(pot_min, pot.min())
pot_max = np.maximum(pot_max, pot.max())
if (not 'vmin' in kwargs.keys()):
if (log_flag): kwargs['vmin'] = np.log10(-pot_max)
else: kwargs['vmin'] = -pot_max
if (not 'vmax' in kwargs.keys()):
if (log_flag): kwargs['vmax'] = | np.log10(-pot_min) | numpy.log10 |
# -*- coding: utf-8 -*-
"""
v9s model
* Input: v5_im
Author: Kohei <<EMAIL>>
"""
from logging import getLogger, Formatter, StreamHandler, INFO, FileHandler
from pathlib import Path
import subprocess
import argparse
import math
import glob
import sys
import json
import re
import warnings
import scipy
import tqdm
import click
import tables as tb
import pandas as pd
import numpy as np
from keras.models import Model
from keras.engine.topology import merge as merge_l
from keras.layers import (
Input, Convolution2D, MaxPooling2D, UpSampling2D,
Reshape, core, Dropout,
Activation, BatchNormalization)
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint, EarlyStopping, History
from keras import backend as K
import skimage.transform
import skimage.morphology
import rasterio.features
import shapely.wkt
import shapely.ops
import shapely.geometry
MODEL_NAME = 'v9s'
ORIGINAL_SIZE = 650
INPUT_SIZE = 256
LOGFORMAT = '%(asctime)s %(levelname)s %(message)s'
BASE_DIR = "/data/train"
WORKING_DIR = "/data/working"
IMAGE_DIR = "/data/working/images/{}".format('v5')
MODEL_DIR = "/data/working/models/{}".format(MODEL_NAME)
FN_SOLUTION_CSV = "/data/output/{}.csv".format(MODEL_NAME)
# Parameters
MIN_POLYGON_AREA = 30
# Input files
FMT_TRAIN_SUMMARY_PATH = str(
Path(BASE_DIR) /
Path("{prefix:s}_Train/") /
Path("summaryData/{prefix:s}_Train_Building_Solutions.csv"))
FMT_TRAIN_RGB_IMAGE_PATH = str(
Path(BASE_DIR) /
Path("{prefix:s}_Train/") /
Path("RGB-PanSharpen/RGB-PanSharpen_{image_id:s}.tif"))
FMT_TEST_RGB_IMAGE_PATH = str(
Path(BASE_DIR) /
Path("{prefix:s}_Test_public/") /
Path("RGB-PanSharpen/RGB-PanSharpen_{image_id:s}.tif"))
FMT_TRAIN_MSPEC_IMAGE_PATH = str(
Path(BASE_DIR) /
Path("{prefix:s}_Train/") /
Path("MUL-PanSharpen/MUL-PanSharpen_{image_id:s}.tif"))
FMT_TEST_MSPEC_IMAGE_PATH = str(
Path(BASE_DIR) /
Path("{prefix:s}_Test_public/") /
Path("MUL-PanSharpen/MUL-PanSharpen_{image_id:s}.tif"))
# Preprocessing result
FMT_BANDCUT_TH_PATH = IMAGE_DIR + "/bandcut{}.csv"
FMT_MUL_BANDCUT_TH_PATH = IMAGE_DIR + "/mul_bandcut{}.csv"
# Image list, Image container and mask container
FMT_VALTRAIN_IMAGELIST_PATH = IMAGE_DIR + "/{prefix:s}_valtrain_ImageId.csv"
FMT_VALTEST_IMAGELIST_PATH = IMAGE_DIR + "/{prefix:s}_valtest_ImageId.csv"
FMT_VALTRAIN_IM_STORE = IMAGE_DIR + "/valtrain_{}_im.h5"
FMT_VALTEST_IM_STORE = IMAGE_DIR + "/valtest_{}_im.h5"
FMT_VALTRAIN_MASK_STORE = IMAGE_DIR + "/valtrain_{}_mask.h5"
FMT_VALTEST_MASK_STORE = IMAGE_DIR + "/valtest_{}_mask.h5"
FMT_VALTRAIN_MUL_STORE = IMAGE_DIR + "/valtrain_{}_mul.h5"
FMT_VALTEST_MUL_STORE = IMAGE_DIR + "/valtest_{}_mul.h5"
FMT_TRAIN_IMAGELIST_PATH = IMAGE_DIR + "/{prefix:s}_train_ImageId.csv"
FMT_TEST_IMAGELIST_PATH = IMAGE_DIR + "/{prefix:s}_test_ImageId.csv"
FMT_TRAIN_IM_STORE = IMAGE_DIR + "/train_{}_im.h5"
FMT_TEST_IM_STORE = IMAGE_DIR + "/test_{}_im.h5"
FMT_TRAIN_MASK_STORE = IMAGE_DIR + "/train_{}_mask.h5"
FMT_TRAIN_MUL_STORE = IMAGE_DIR + "/train_{}_mul.h5"
FMT_TEST_MUL_STORE = IMAGE_DIR + "/test_{}_mul.h5"
FMT_IMMEAN = IMAGE_DIR + "/{}_immean.h5"
FMT_MULMEAN = IMAGE_DIR + "/{}_mulmean.h5"
# Model files
FMT_VALMODEL_PATH = MODEL_DIR + "/{}_val_weights.h5"
FMT_FULLMODEL_PATH = MODEL_DIR + "/{}_full_weights.h5"
FMT_VALMODEL_HIST = MODEL_DIR + "/{}_val_hist.csv"
FMT_VALMODEL_EVALHIST = MODEL_DIR + "/{}_val_evalhist.csv"
FMT_VALMODEL_EVALTHHIST = MODEL_DIR + "/{}_val_evalhist_th.csv"
# Prediction & polygon result
FMT_TESTPRED_PATH = MODEL_DIR + "/{}_pred.h5"
FMT_VALTESTPRED_PATH = MODEL_DIR + "/{}_eval_pred.h5"
FMT_VALTESTPOLY_PATH = MODEL_DIR + "/{}_eval_poly.csv"
FMT_VALTESTTRUTH_PATH = MODEL_DIR + "/{}_eval_poly_truth.csv"
FMT_VALTESTPOLY_OVALL_PATH = MODEL_DIR + "/eval_poly.csv"
FMT_VALTESTTRUTH_OVALL_PATH = MODEL_DIR + "/eval_poly_truth.csv"
FMT_TESTPOLY_PATH = MODEL_DIR + "/{}_poly.csv"
# Model related files (others)
FMT_VALMODEL_LAST_PATH = MODEL_DIR + "/{}_val_weights_last.h5"
FMT_FULLMODEL_LAST_PATH = MODEL_DIR + "/{}_full_weights_last.h5"
# Logger
warnings.simplefilter("ignore", UserWarning)
handler = StreamHandler()
handler.setLevel(INFO)
handler.setFormatter(Formatter(LOGFORMAT))
fh_handler = FileHandler(".{}.log".format(MODEL_NAME))
fh_handler.setFormatter(Formatter(LOGFORMAT))
logger = getLogger('spacenet2')
logger.setLevel(INFO)
if __name__ == '__main__':
logger.addHandler(handler)
logger.addHandler(fh_handler)
# Fix seed for reproducibility
np.random.seed(1145141919)
def directory_name_to_area_id(datapath):
"""
Directory name to AOI number
Usage:
>>> directory_name_to_area_id("/data/test/AOI_2_Vegas")
2
"""
dir_name = Path(datapath).name
if dir_name.startswith('AOI_2_Vegas'):
return 2
elif dir_name.startswith('AOI_3_Paris'):
return 3
elif dir_name.startswith('AOI_4_Shanghai'):
return 4
elif dir_name.startswith('AOI_5_Khartoum'):
return 5
else:
raise RuntimeError("Unsupported city id is given.")
def _remove_interiors(line):
if "), (" in line:
line_prefix = line.split('), (')[0]
line_terminate = line.split('))",')[-1]
line = (
line_prefix +
'))",' +
line_terminate
)
return line
def __load_band_cut_th(band_fn, bandsz=3):
df = pd.read_csv(band_fn, index_col='area_id')
all_band_cut_th = {area_id: {} for area_id in range(2, 6)}
for area_id, row in df.iterrows():
for chan_i in range(bandsz):
all_band_cut_th[area_id][chan_i] = dict(
min=row['chan{}_min'.format(chan_i)],
max=row['chan{}_max'.format(chan_i)],
)
return all_band_cut_th
def _calc_fscore_per_aoi(area_id):
prefix = area_id_to_prefix(area_id)
truth_file = FMT_VALTESTTRUTH_PATH.format(prefix)
poly_file = FMT_VALTESTPOLY_PATH.format(prefix)
cmd = [
'java',
'-jar',
'/root/visualizer-2.0/visualizer.jar',
'-truth',
truth_file,
'-solution',
poly_file,
'-no-gui',
'-band-triplets',
'/root/visualizer-2.0/data/band-triplets.txt',
'-image-dir',
'pass',
]
proc = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
stdout_data, stderr_data = proc.communicate()
lines = [line for line in stdout_data.decode('utf8').split('\n')[-10:]]
"""
Overall F-score : 0.85029
AOI_2_Vegas:
TP : 27827
FP : 4999
FN : 4800
Precision: 0.847712
Recall : 0.852883
F-score : 0.85029
"""
if stdout_data.decode('utf8').strip().endswith("Overall F-score : 0"):
overall_fscore = 0
tp = 0
fp = 0
fn = 0
precision = 0
recall = 0
fscore = 0
elif len(lines) > 0 and lines[0].startswith("Overall F-score : "):
assert lines[0].startswith("Overall F-score : ")
assert lines[2].startswith("AOI_")
assert lines[3].strip().startswith("TP")
assert lines[4].strip().startswith("FP")
assert lines[5].strip().startswith("FN")
assert lines[6].strip().startswith("Precision")
assert lines[7].strip().startswith("Recall")
assert lines[8].strip().startswith("F-score")
overall_fscore = float(re.findall("([\d\.]+)", lines[0])[0])
tp = int(re.findall("(\d+)", lines[3])[0])
fp = int(re.findall("(\d+)", lines[4])[0])
fn = int(re.findall("(\d+)", lines[5])[0])
precision = float(re.findall("([\d\.]+)", lines[6])[0])
recall = float(re.findall("([\d\.]+)", lines[7])[0])
fscore = float(re.findall("([\d\.]+)", lines[8])[0])
else:
logger.warn("Unexpected data >>> " + stdout_data.decode('utf8'))
raise RuntimeError("Unsupported format")
return {
'overall_fscore': overall_fscore,
'tp': tp,
'fp': fp,
'fn': fn,
'precision': precision,
'recall': recall,
'fscore': fscore,
}
def prefix_to_area_id(prefix):
area_dict = {
'AOI_2_Vegas': 2,
'AOI_3_Paris': 3,
'AOI_4_Shanghai': 4,
'AOI_5_Khartoum': 5,
}
return area_dict[area_id]
def area_id_to_prefix(area_id):
area_dict = {
2: 'AOI_2_Vegas',
3: 'AOI_3_Paris',
4: 'AOI_4_Shanghai',
5: 'AOI_5_Khartoum',
}
return area_dict[area_id]
# ---------------------------------------------------------
# main
def _get_model_parameter(area_id):
prefix = area_id_to_prefix(area_id)
fn_hist = FMT_VALMODEL_EVALTHHIST.format(prefix)
best_row = pd.read_csv(fn_hist).sort_values(
by='fscore',
ascending=False,
).iloc[0]
param = dict(
fn_epoch=int(best_row['zero_base_epoch']),
min_poly_area=int(best_row['min_area_th']),
)
return param
def get_resized_raster_3chan_image(image_id, band_cut_th=None):
fn = train_image_id_to_path(image_id)
with rasterio.open(fn, 'r') as f:
values = f.read().astype(np.float32)
for chan_i in range(3):
min_val = band_cut_th[chan_i]['min']
max_val = band_cut_th[chan_i]['max']
values[chan_i] = np.clip(values[chan_i], min_val, max_val)
values[chan_i] = (values[chan_i] - min_val) / (max_val - min_val)
values = np.swapaxes(values, 0, 2)
values = np.swapaxes(values, 0, 1)
values = skimage.transform.resize(values, (INPUT_SIZE, INPUT_SIZE))
return values
def get_resized_raster_3chan_image_test(image_id, band_cut_th=None):
fn = test_image_id_to_path(image_id)
with rasterio.open(fn, 'r') as f:
values = f.read().astype(np.float32)
for chan_i in range(3):
min_val = band_cut_th[chan_i]['min']
max_val = band_cut_th[chan_i]['max']
values[chan_i] = np.clip(values[chan_i], min_val, max_val)
values[chan_i] = (values[chan_i] - min_val) / (max_val - min_val)
values = np.swapaxes(values, 0, 2)
values = np.swapaxes(values, 0, 1)
values = skimage.transform.resize(values, (INPUT_SIZE, INPUT_SIZE))
return values
def image_mask_resized_from_summary(df, image_id):
im_mask = np.zeros((650, 650))
for idx, row in df[df.ImageId == image_id].iterrows():
shape_obj = shapely.wkt.loads(row.PolygonWKT_Pix)
if shape_obj.exterior is not None:
coords = list(shape_obj.exterior.coords)
x = [round(float(pp[0])) for pp in coords]
y = [round(float(pp[1])) for pp in coords]
yy, xx = skimage.draw.polygon(y, x, (650, 650))
im_mask[yy, xx] = 1
interiors = shape_obj.interiors
for interior in interiors:
coords = list(interior.coords)
x = [round(float(pp[0])) for pp in coords]
y = [round(float(pp[1])) for pp in coords]
yy, xx = skimage.draw.polygon(y, x, (650, 650))
im_mask[yy, xx] = 0
im_mask = skimage.transform.resize(im_mask, (INPUT_SIZE, INPUT_SIZE))
im_mask = (im_mask > 0.5).astype(np.uint8)
return im_mask
def train_test_image_prep(area_id):
prefix = area_id_to_prefix(area_id)
df_train = pd.read_csv(
FMT_TRAIN_IMAGELIST_PATH.format(prefix=prefix),
index_col='ImageId')
df_test = pd.read_csv(
FMT_TEST_IMAGELIST_PATH.format(prefix=prefix),
index_col='ImageId')
band_cut_th = __load_band_cut_th(
FMT_BANDCUT_TH_PATH.format(prefix))[area_id]
df_summary = _load_train_summary_data(area_id)
fn = FMT_TRAIN_IM_STORE.format(prefix)
logger.info("Prepare image container: {}".format(fn))
with tb.open_file(fn, 'w') as f:
for image_id in tqdm.tqdm(df_train.index, total=len(df_train)):
im = get_resized_raster_3chan_image(image_id, band_cut_th)
atom = tb.Atom.from_dtype(im.dtype)
filters = tb.Filters(complib='blosc', complevel=9)
ds = f.create_carray(f.root, image_id, atom, im.shape,
filters=filters)
ds[:] = im
fn = FMT_TEST_IM_STORE.format(prefix)
logger.info("Prepare image container: {}".format(fn))
with tb.open_file(fn, 'w') as f:
for image_id in tqdm.tqdm(df_test.index, total=len(df_test)):
im = get_resized_raster_3chan_image_test(image_id, band_cut_th)
atom = tb.Atom.from_dtype(im.dtype)
filters = tb.Filters(complib='blosc', complevel=9)
ds = f.create_carray(f.root, image_id, atom, im.shape,
filters=filters)
ds[:] = im
fn = FMT_TRAIN_MASK_STORE.format(prefix)
logger.info("Prepare image container: {}".format(fn))
with tb.open_file(fn, 'w') as f:
for image_id in tqdm.tqdm(df_train.index, total=len(df_train)):
im_mask = image_mask_resized_from_summary(df_summary, image_id)
atom = tb.Atom.from_dtype(im_mask.dtype)
filters = tb.Filters(complib='blosc', complevel=9)
ds = f.create_carray(f.root, image_id, atom, im_mask.shape,
filters=filters)
ds[:] = im_mask
def valtrain_test_image_prep(area_id):
prefix = area_id_to_prefix(area_id)
logger.info("valtrain_test_image_prep for {}".format(prefix))
df_train = pd.read_csv(
FMT_VALTRAIN_IMAGELIST_PATH.format(prefix=prefix),
index_col='ImageId')
df_test = pd.read_csv(
FMT_VALTEST_IMAGELIST_PATH.format(prefix=prefix),
index_col='ImageId')
band_cut_th = __load_band_cut_th(
FMT_BANDCUT_TH_PATH.format(prefix))[area_id]
df_summary = _load_train_summary_data(area_id)
fn = FMT_VALTRAIN_IM_STORE.format(prefix)
logger.info("Prepare image container: {}".format(fn))
with tb.open_file(fn, 'w') as f:
for image_id in tqdm.tqdm(df_train.index, total=len(df_train)):
im = get_resized_raster_3chan_image(image_id, band_cut_th)
atom = tb.Atom.from_dtype(im.dtype)
filters = tb.Filters(complib='blosc', complevel=9)
ds = f.create_carray(f.root, image_id, atom, im.shape,
filters=filters)
ds[:] = im
fn = FMT_VALTEST_IM_STORE.format(prefix)
logger.info("Prepare image container: {}".format(fn))
with tb.open_file(fn, 'w') as f:
for image_id in tqdm.tqdm(df_test.index, total=len(df_test)):
im = get_resized_raster_3chan_image(image_id, band_cut_th)
atom = tb.Atom.from_dtype(im.dtype)
filters = tb.Filters(complib='blosc', complevel=9)
ds = f.create_carray(f.root, image_id, atom, im.shape,
filters=filters)
ds[:] = im
fn = FMT_VALTRAIN_MASK_STORE.format(prefix)
logger.info("Prepare image container: {}".format(fn))
with tb.open_file(fn, 'w') as f:
for image_id in tqdm.tqdm(df_train.index, total=len(df_train)):
im_mask = image_mask_resized_from_summary(df_summary, image_id)
atom = tb.Atom.from_dtype(im_mask.dtype)
filters = tb.Filters(complib='blosc', complevel=9)
ds = f.create_carray(f.root, image_id, atom, im_mask.shape,
filters=filters)
ds[:] = im_mask
fn = FMT_VALTEST_MASK_STORE.format(prefix)
logger.info("Prepare image container: {}".format(fn))
with tb.open_file(fn, 'w') as f:
for image_id in tqdm.tqdm(df_test.index, total=len(df_test)):
im_mask = image_mask_resized_from_summary(df_summary, image_id)
atom = tb.Atom.from_dtype(im_mask.dtype)
filters = tb.Filters(complib='blosc', complevel=9)
ds = f.create_carray(f.root, image_id, atom, im_mask.shape,
filters=filters)
ds[:] = im_mask
def train_test_mul_image_prep(area_id):
prefix = area_id_to_prefix(area_id)
df_train = pd.read_csv(
FMT_TRAIN_IMAGELIST_PATH.format(prefix=prefix),
index_col='ImageId')
df_test = pd.read_csv(
FMT_TEST_IMAGELIST_PATH.format(prefix=prefix),
index_col='ImageId')
band_rgb_th = __load_band_cut_th(
FMT_BANDCUT_TH_PATH.format(prefix))[area_id]
band_mul_th = __load_band_cut_th(
FMT_MUL_BANDCUT_TH_PATH.format(prefix), bandsz=8)[area_id]
df_summary = _load_train_summary_data(area_id)
fn = FMT_TRAIN_MUL_STORE.format(prefix)
logger.info("Prepare image container: {}".format(fn))
with tb.open_file(fn, 'w') as f:
for image_id in tqdm.tqdm(df_train.index, total=len(df_train)):
im = get_resized_raster_8chan_image(
image_id, band_rgb_th, band_mul_th)
atom = tb.Atom.from_dtype(im.dtype)
filters = tb.Filters(complib='blosc', complevel=9)
ds = f.create_carray(f.root, image_id, atom, im.shape,
filters=filters)
ds[:] = im
fn = FMT_TEST_MUL_STORE.format(prefix)
logger.info("Prepare image container: {}".format(fn))
with tb.open_file(fn, 'w') as f:
for image_id in tqdm.tqdm(df_test.index, total=len(df_test)):
im = get_resized_raster_8chan_image_test(
image_id, band_rgb_th, band_mul_th)
atom = tb.Atom.from_dtype(im.dtype)
filters = tb.Filters(complib='blosc', complevel=9)
ds = f.create_carray(f.root, image_id, atom, im.shape,
filters=filters)
ds[:] = im
def valtrain_test_mul_image_prep(area_id):
prefix = area_id_to_prefix(area_id)
logger.info("valtrain_test_image_prep for {}".format(prefix))
df_train = pd.read_csv(
FMT_VALTRAIN_IMAGELIST_PATH.format(prefix=prefix),
index_col='ImageId')
df_test = pd.read_csv(
FMT_VALTEST_IMAGELIST_PATH.format(prefix=prefix),
index_col='ImageId')
band_rgb_th = __load_band_cut_th(
FMT_BANDCUT_TH_PATH.format(prefix))[area_id]
band_mul_th = __load_band_cut_th(
FMT_MUL_BANDCUT_TH_PATH.format(prefix), bandsz=8)[area_id]
df_summary = _load_train_summary_data(area_id)
fn = FMT_VALTRAIN_MUL_STORE.format(prefix)
logger.info("Prepare image container: {}".format(fn))
with tb.open_file(fn, 'w') as f:
for image_id in tqdm.tqdm(df_train.index, total=len(df_train)):
im = get_resized_raster_8chan_image(
image_id, band_rgb_th, band_mul_th)
atom = tb.Atom.from_dtype(im.dtype)
filters = tb.Filters(complib='blosc', complevel=9)
ds = f.create_carray(f.root, image_id, atom, im.shape,
filters=filters)
ds[:] = im
fn = FMT_VALTEST_MUL_STORE.format(prefix)
logger.info("Prepare image container: {}".format(fn))
with tb.open_file(fn, 'w') as f:
for image_id in tqdm.tqdm(df_test.index, total=len(df_test)):
im = get_resized_raster_8chan_image(
image_id, band_rgb_th, band_mul_th)
atom = tb.Atom.from_dtype(im.dtype)
filters = tb.Filters(complib='blosc', complevel=9)
ds = f.create_carray(f.root, image_id, atom, im.shape,
filters=filters)
ds[:] = im
def _load_train_summary_data(area_id):
prefix = area_id_to_prefix(area_id)
fn = FMT_TRAIN_SUMMARY_PATH.format(prefix=prefix)
df = pd.read_csv(fn)
return df
def split_val_train_test(area_id):
prefix = area_id_to_prefix(area_id)
df = _load_train_summary_data(area_id)
df_agg = df.groupby('ImageId').agg('first')
image_id_list = df_agg.index.tolist()
np.random.shuffle(image_id_list)
sz_valtrain = int(len(image_id_list) * 0.7)
sz_valtest = len(image_id_list) - sz_valtrain
pd.DataFrame({'ImageId': image_id_list[:sz_valtrain]}).to_csv(
FMT_VALTRAIN_IMAGELIST_PATH.format(prefix=prefix),
index=False)
pd.DataFrame({'ImageId': image_id_list[sz_valtrain:]}).to_csv(
FMT_VALTEST_IMAGELIST_PATH.format(prefix=prefix),
index=False)
def train_image_id_to_mspec_path(image_id):
prefix = image_id_to_prefix(image_id)
fn = FMT_TRAIN_MSPEC_IMAGE_PATH.format(
prefix=prefix,
image_id=image_id)
return fn
def test_image_id_to_mspec_path(image_id):
prefix = image_id_to_prefix(image_id)
fn = FMT_TEST_MSPEC_IMAGE_PATH.format(
prefix=prefix,
image_id=image_id)
return fn
def train_image_id_to_path(image_id):
prefix = image_id_to_prefix(image_id)
fn = FMT_TRAIN_RGB_IMAGE_PATH.format(
prefix=prefix,
image_id=image_id)
return fn
def test_image_id_to_path(image_id):
prefix = image_id_to_prefix(image_id)
fn = FMT_TEST_RGB_IMAGE_PATH.format(
prefix=prefix,
image_id=image_id)
return fn
def image_id_to_prefix(image_id):
prefix = image_id.split('img')[0][:-1]
return prefix
def calc_multiband_cut_threshold(area_id):
rows = []
band_cut_th = __calc_multiband_cut_threshold(area_id)
prefix = area_id_to_prefix(area_id)
row = dict(prefix=area_id_to_prefix(area_id))
row['area_id'] = area_id
for chan_i in band_cut_th.keys():
row['chan{}_max'.format(chan_i)] = band_cut_th[chan_i]['max']
row['chan{}_min'.format(chan_i)] = band_cut_th[chan_i]['min']
rows.append(row)
pd.DataFrame(rows).to_csv(FMT_BANDCUT_TH_PATH.format(prefix), index=False)
def __calc_multiband_cut_threshold(area_id):
prefix = area_id_to_prefix(area_id)
band_values = {k: [] for k in range(3)}
band_cut_th = {k: dict(max=0, min=0) for k in range(3)}
image_id_list = pd.read_csv(FMT_VALTRAIN_IMAGELIST_PATH.format(
prefix=prefix)).ImageId.tolist()
for image_id in tqdm.tqdm(image_id_list[:500]):
image_fn = train_image_id_to_path(image_id)
with rasterio.open(image_fn, 'r') as f:
values = f.read().astype(np.float32)
for i_chan in range(3):
values_ = values[i_chan].ravel().tolist()
values_ = np.array(
[v for v in values_ if v != 0]
) # Remove sensored mask
band_values[i_chan].append(values_)
image_id_list = pd.read_csv(FMT_VALTEST_IMAGELIST_PATH.format(
prefix=prefix)).ImageId.tolist()
for image_id in tqdm.tqdm(image_id_list[:500]):
image_fn = train_image_id_to_path(image_id)
with rasterio.open(image_fn, 'r') as f:
values = f.read().astype(np.float32)
for i_chan in range(3):
values_ = values[i_chan].ravel().tolist()
values_ = np.array(
[v for v in values_ if v != 0]
) # Remove sensored mask
band_values[i_chan].append(values_)
for i_chan in range(3):
band_values[i_chan] = np.concatenate(
band_values[i_chan]).ravel()
band_cut_th[i_chan]['max'] = scipy.percentile(
band_values[i_chan], 98)
band_cut_th[i_chan]['min'] = scipy.percentile(
band_values[i_chan], 2)
return band_cut_th
def calc_mul_multiband_cut_threshold(area_id):
rows = []
band_cut_th = __calc_mul_multiband_cut_threshold(area_id)
prefix = area_id_to_prefix(area_id)
row = dict(prefix=area_id_to_prefix(area_id))
row['area_id'] = area_id
for chan_i in band_cut_th.keys():
row['chan{}_max'.format(chan_i)] = band_cut_th[chan_i]['max']
row['chan{}_min'.format(chan_i)] = band_cut_th[chan_i]['min']
rows.append(row)
pd.DataFrame(rows).to_csv(
FMT_MUL_BANDCUT_TH_PATH.format(prefix),
index=False)
def __calc_mul_multiband_cut_threshold(area_id):
prefix = area_id_to_prefix(area_id)
band_values = {k: [] for k in range(8)}
band_cut_th = {k: dict(max=0, min=0) for k in range(8)}
image_id_list = pd.read_csv(FMT_VALTRAIN_IMAGELIST_PATH.format(
prefix=prefix)).ImageId.tolist()
for image_id in tqdm.tqdm(image_id_list[:500]):
image_fn = train_image_id_to_mspec_path(image_id)
with rasterio.open(image_fn, 'r') as f:
values = f.read().astype(np.float32)
for i_chan in range(8):
values_ = values[i_chan].ravel().tolist()
values_ = np.array(
[v for v in values_ if v != 0]
) # Remove sensored mask
band_values[i_chan].append(values_)
image_id_list = pd.read_csv(FMT_VALTEST_IMAGELIST_PATH.format(
prefix=prefix)).ImageId.tolist()
for image_id in tqdm.tqdm(image_id_list[:500]):
image_fn = train_image_id_to_mspec_path(image_id)
with rasterio.open(image_fn, 'r') as f:
values = f.read().astype(np.float32)
for i_chan in range(8):
values_ = values[i_chan].ravel().tolist()
values_ = np.array(
[v for v in values_ if v != 0]
) # Remove sensored mask
band_values[i_chan].append(values_)
for i_chan in range(8):
band_values[i_chan] = np.concatenate(
band_values[i_chan]).ravel()
band_cut_th[i_chan]['max'] = scipy.percentile(
band_values[i_chan], 98)
band_cut_th[i_chan]['min'] = scipy.percentile(
band_values[i_chan], 2)
return band_cut_th
def get_unet():
conv_params = dict(activation='relu', border_mode='same')
merge_params = dict(mode='concat', concat_axis=1)
inputs = Input((8, 256, 256))
conv1 = Convolution2D(32, 3, 3, **conv_params)(inputs)
conv1 = Convolution2D(32, 3, 3, **conv_params)(conv1)
pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
conv2 = Convolution2D(64, 3, 3, **conv_params)(pool1)
conv2 = Convolution2D(64, 3, 3, **conv_params)(conv2)
pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
conv3 = Convolution2D(128, 3, 3, **conv_params)(pool2)
conv3 = Convolution2D(128, 3, 3, **conv_params)(conv3)
pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
conv4 = Convolution2D(256, 3, 3, **conv_params)(pool3)
conv4 = Convolution2D(256, 3, 3, **conv_params)(conv4)
pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)
conv5 = Convolution2D(512, 3, 3, **conv_params)(pool4)
conv5 = Convolution2D(512, 3, 3, **conv_params)(conv5)
up6 = merge_l([UpSampling2D(size=(2, 2))(conv5), conv4], **merge_params)
conv6 = Convolution2D(256, 3, 3, **conv_params)(up6)
conv6 = Convolution2D(256, 3, 3, **conv_params)(conv6)
up7 = merge_l([UpSampling2D(size=(2, 2))(conv6), conv3], **merge_params)
conv7 = Convolution2D(128, 3, 3, **conv_params)(up7)
conv7 = Convolution2D(128, 3, 3, **conv_params)(conv7)
up8 = merge_l([UpSampling2D(size=(2, 2))(conv7), conv2], **merge_params)
conv8 = Convolution2D(64, 3, 3, **conv_params)(up8)
conv8 = Convolution2D(64, 3, 3, **conv_params)(conv8)
up9 = merge_l([UpSampling2D(size=(2, 2))(conv8), conv1], **merge_params)
conv9 = Convolution2D(32, 3, 3, **conv_params)(up9)
conv9 = Convolution2D(32, 3, 3, **conv_params)(conv9)
conv10 = Convolution2D(1, 1, 1, activation='sigmoid')(conv9)
adam = Adam()
model = Model(input=inputs, output=conv10)
model.compile(optimizer=adam,
loss='binary_crossentropy',
metrics=['accuracy', jaccard_coef, jaccard_coef_int])
return model
def jaccard_coef(y_true, y_pred):
smooth = 1e-12
intersection = K.sum(y_true * y_pred, axis=[0, -1, -2])
sum_ = K.sum(y_true + y_pred, axis=[0, -1, -2])
jac = (intersection + smooth) / (sum_ - intersection + smooth)
return K.mean(jac)
def jaccard_coef_int(y_true, y_pred):
smooth = 1e-12
y_pred_pos = K.round(K.clip(y_pred, 0, 1))
intersection = K.sum(y_true * y_pred_pos, axis=[0, -1, -2])
sum_ = K.sum(y_true + y_pred_pos, axis=[0, -1, -2])
jac = (intersection + smooth) / (sum_ - intersection + smooth)
return K.mean(jac)
def generate_test_batch(area_id,
batch_size=64,
immean=None,
enable_tqdm=False):
prefix = area_id_to_prefix(area_id)
df_test = pd.read_csv(FMT_TEST_IMAGELIST_PATH.format(prefix=prefix))
fn_im = FMT_TEST_MUL_STORE.format(prefix)
image_id_list = df_test.ImageId.tolist()
if enable_tqdm:
pbar = tqdm.tqdm(total=len(image_id_list))
while 1:
total_sz = len(image_id_list)
n_batch = int(math.floor(total_sz / batch_size) + 1)
with tb.open_file(fn_im, 'r') as f_im:
for i_batch in range(n_batch):
target_image_ids = image_id_list[
i_batch*batch_size:(i_batch+1)*batch_size
]
if len(target_image_ids) == 0:
continue
X_test = []
y_test = []
for image_id in target_image_ids:
im = np.array(f_im.get_node('/' + image_id))
im = np.swapaxes(im, 0, 2)
im = np.swapaxes(im, 1, 2)
X_test.append(im)
mask = np.zeros((INPUT_SIZE, INPUT_SIZE)).astype(np.uint8)
y_test.append(mask)
X_test = np.array(X_test)
y_test = np.array(y_test)
y_test = y_test.reshape((-1, 1, INPUT_SIZE, INPUT_SIZE))
if immean is not None:
X_test = X_test - immean
if enable_tqdm:
pbar.update(y_test.shape[0])
yield (X_test, y_test)
if enable_tqdm:
pbar.close()
def get_resized_raster_8chan_image_test(image_id, band_rgb_th, band_mul_th):
"""
RGB + multispectral (total: 8 channels)
"""
im = []
fn = test_image_id_to_path(image_id)
with rasterio.open(fn, 'r') as f:
values = f.read().astype(np.float32)
for chan_i in range(3):
min_val = band_rgb_th[chan_i]['min']
max_val = band_rgb_th[chan_i]['max']
values[chan_i] = np.clip(values[chan_i], min_val, max_val)
values[chan_i] = (values[chan_i] - min_val) / (max_val - min_val)
im.append(skimage.transform.resize(
values[chan_i],
(INPUT_SIZE, INPUT_SIZE)))
fn = test_image_id_to_mspec_path(image_id)
with rasterio.open(fn, 'r') as f:
values = f.read().astype(np.float32)
usechannels = [1, 2, 5, 6, 7]
for chan_i in usechannels:
min_val = band_mul_th[chan_i]['min']
max_val = band_mul_th[chan_i]['max']
values[chan_i] = np.clip(values[chan_i], min_val, max_val)
values[chan_i] = (values[chan_i] - min_val) / (max_val - min_val)
im.append(skimage.transform.resize(
values[chan_i],
(INPUT_SIZE, INPUT_SIZE)))
im = np.array(im) # (ch, w, h)
im = np.swapaxes(im, 0, 2) # -> (h, w, ch)
im = np.swapaxes(im, 0, 1) # -> (w, h, ch)
return im
def get_resized_raster_8chan_image(image_id, band_rgb_th, band_mul_th):
"""
RGB + multispectral (total: 8 channels)
"""
im = []
fn = train_image_id_to_path(image_id)
with rasterio.open(fn, 'r') as f:
values = f.read().astype(np.float32)
for chan_i in range(3):
min_val = band_rgb_th[chan_i]['min']
max_val = band_rgb_th[chan_i]['max']
values[chan_i] = np.clip(values[chan_i], min_val, max_val)
values[chan_i] = (values[chan_i] - min_val) / (max_val - min_val)
im.append(skimage.transform.resize(
values[chan_i],
(INPUT_SIZE, INPUT_SIZE)))
fn = train_image_id_to_mspec_path(image_id)
with rasterio.open(fn, 'r') as f:
values = f.read().astype(np.float32)
usechannels = [1, 2, 5, 6, 7]
for chan_i in usechannels:
min_val = band_mul_th[chan_i]['min']
max_val = band_mul_th[chan_i]['max']
values[chan_i] = np.clip(values[chan_i], min_val, max_val)
values[chan_i] = (values[chan_i] - min_val) / (max_val - min_val)
im.append(skimage.transform.resize(
values[chan_i],
(INPUT_SIZE, INPUT_SIZE)))
im = np.array(im) # (ch, w, h)
im = np.swapaxes(im, 0, 2) # -> (h, w, ch)
im = np.swapaxes(im, 0, 1) # -> (w, h, ch)
return im
def _get_train_mul_data(area_id):
"""
RGB + multispectral (total: 8 channels)
"""
prefix = area_id_to_prefix(area_id)
fn_train = FMT_TRAIN_IMAGELIST_PATH.format(prefix=prefix)
df_train = pd.read_csv(fn_train)
X_train = []
fn_im = FMT_TRAIN_MUL_STORE.format(prefix)
with tb.open_file(fn_im, 'r') as f:
for idx, image_id in enumerate(df_train.ImageId.tolist()):
im = np.array(f.get_node('/' + image_id))
im = np.swapaxes(im, 0, 2)
im = np.swapaxes(im, 1, 2)
X_train.append(im)
X_train = np.array(X_train)
y_train = []
fn_mask = FMT_TRAIN_MASK_STORE.format(prefix)
with tb.open_file(fn_mask, 'r') as f:
for idx, image_id in enumerate(df_train.ImageId.tolist()):
mask = np.array(f.get_node('/' + image_id))
mask = (mask > 0.5).astype(np.uint8)
y_train.append(mask)
y_train = np.array(y_train)
y_train = y_train.reshape((-1, 1, INPUT_SIZE, INPUT_SIZE))
return X_train, y_train
def _get_test_mul_data(area_id):
"""
RGB + multispectral (total: 8 channels)
"""
prefix = area_id_to_prefix(area_id)
fn_test = FMT_TEST_IMAGELIST_PATH.format(prefix=prefix)
df_test = pd.read_csv(fn_test)
X_test = []
fn_im = FMT_TEST_MUL_STORE.format(prefix)
with tb.open_file(fn_im, 'r') as f:
for idx, image_id in enumerate(df_test.ImageId.tolist()):
im = np.array(f.get_node('/' + image_id))
im = np.swapaxes(im, 0, 2)
im = np.swapaxes(im, 1, 2)
X_test.append(im)
X_test = np.array(X_test)
return X_test
def _get_valtest_mul_data(area_id):
prefix = area_id_to_prefix(area_id)
fn_test = FMT_VALTEST_IMAGELIST_PATH.format(prefix=prefix)
df_test = pd.read_csv(fn_test)
X_val = []
fn_im = FMT_VALTEST_MUL_STORE.format(prefix)
with tb.open_file(fn_im, 'r') as f:
for idx, image_id in enumerate(df_test.ImageId.tolist()):
im = np.array(f.get_node('/' + image_id))
im = np.swapaxes(im, 0, 2)
im = np.swapaxes(im, 1, 2)
X_val.append(im)
X_val = np.array(X_val)
y_val = []
fn_mask = FMT_VALTEST_MASK_STORE.format(prefix)
with tb.open_file(fn_mask, 'r') as f:
for idx, image_id in enumerate(df_test.ImageId.tolist()):
mask = np.array(f.get_node('/' + image_id))
mask = (mask > 0.5).astype(np.uint8)
y_val.append(mask)
y_val = np.array(y_val)
y_val = y_val.reshape((-1, 1, INPUT_SIZE, INPUT_SIZE))
return X_val, y_val
def _get_valtrain_mul_data(area_id):
prefix = area_id_to_prefix(area_id)
fn_train = FMT_VALTRAIN_IMAGELIST_PATH.format(prefix=prefix)
df_train = pd.read_csv(fn_train)
X_val = []
fn_im = FMT_VALTRAIN_MUL_STORE.format(prefix)
with tb.open_file(fn_im, 'r') as f:
for idx, image_id in enumerate(df_train.ImageId.tolist()):
im = np.array(f.get_node('/' + image_id))
im = np.swapaxes(im, 0, 2)
im = np.swapaxes(im, 1, 2)
X_val.append(im)
X_val = np.array(X_val)
y_val = []
fn_mask = FMT_VALTRAIN_MASK_STORE.format(prefix)
with tb.open_file(fn_mask, 'r') as f:
for idx, image_id in enumerate(df_train.ImageId.tolist()):
mask = np.array(f.get_node('/' + image_id))
mask = (mask > 0.5).astype(np.uint8)
y_val.append(mask)
y_val = np.array(y_val)
y_val = y_val.reshape((-1, 1, INPUT_SIZE, INPUT_SIZE))
return X_val, y_val
def get_mul_mean_image(area_id):
prefix = area_id_to_prefix(area_id)
with tb.open_file(FMT_MULMEAN.format(prefix), 'r') as f:
im_mean = np.array(f.get_node('/mulmean'))
return im_mean
def preproc_stage3(area_id):
prefix = area_id_to_prefix(area_id)
if not Path(FMT_VALTEST_MUL_STORE.format(prefix)).exists():
valtrain_test_mul_image_prep(area_id)
if not Path(FMT_TEST_MUL_STORE.format(prefix)).exists():
train_test_mul_image_prep(area_id)
# mean image for subtract preprocessing
X1, _ = _get_train_mul_data(area_id)
X2 = _get_test_mul_data(area_id)
X = | np.vstack([X1, X2]) | numpy.vstack |
from __future__ import division
import unittest
import shutil
import os
import time
import warnings
import copy
import pytest
import netCDF4
import numpy as np
from numpy.testing import assert_allclose
from salem.tests import (requires_travis, requires_geopandas, requires_dask,
requires_matplotlib, requires_cartopy)
from salem import utils, transform_geopandas, GeoTiff, read_shapefile, sio
from salem import read_shapefile_to_grid
from salem.utils import get_demo_file
current_dir = os.path.dirname(os.path.abspath(__file__))
testdir = os.path.join(current_dir, 'tmp')
def is_cartopy_rotated_working():
from salem.gis import proj_to_cartopy
from cartopy.crs import PlateCarree
import pyproj
cp = pyproj.Proj('+ellps=WGS84 +proj=ob_tran +o_proj=latlon '
'+to_meter=0.0174532925199433 +o_lon_p=0.0 +o_lat_p=80.5 '
'+lon_0=357.5 +no_defs')
cp = proj_to_cartopy(cp)
out = PlateCarree().transform_points(cp, np.array([-20]), np.array([-9]))
if not (np.allclose(out[0, 0], -22.243473889042903, atol=1e-5) and
np.allclose(out[0, 1], -0.06328365194179102, atol=1e-5)):
# Cartopy also had issues
return False
return True
@requires_geopandas
def create_dummy_shp(fname):
import shapely.geometry as shpg
import geopandas as gpd
e_line = shpg.LinearRing([(1.5, 1), (2., 1.5), (1.5, 2.), (1, 1.5)])
i_line = shpg.LinearRing([(1.4, 1.4), (1.6, 1.4), (1.6, 1.6), (1.4, 1.6)])
p1 = shpg.Polygon(e_line, [i_line])
p2 = shpg.Polygon([(2.5, 1.3), (3., 1.8), (2.5, 2.3), (2, 1.8)])
p3 = shpg.Point(0.5, 0.5)
p4 = shpg.Point(1, 1)
df = gpd.GeoDataFrame()
df['name'] = ['Polygon', 'Line']
df['geometry'] = gpd.GeoSeries([p1, p2])
of = os.path.join(testdir, fname)
df.to_file(of)
return of
def delete_test_dir():
if os.path.exists(testdir):
shutil.rmtree(testdir)
class TestUtils(unittest.TestCase):
def setUp(self):
if not os.path.exists(testdir):
os.makedirs(testdir)
def tearDown(self):
delete_test_dir()
@requires_travis
def test_empty_cache(self):
utils.empty_cache()
def test_hash_cache_dir(self):
h1 = utils._hash_cache_dir()
h2 = utils._hash_cache_dir()
self.assertEqual(h1, h2)
def test_demofiles(self):
self.assertTrue(os.path.exists(utils.get_demo_file('dem_wgs84.nc')))
self.assertTrue(utils.get_demo_file('dummy') is None)
def test_read_colormap(self):
cl = utils.read_colormap('topo') * 256
assert_allclose(cl[4, :], (177, 242, 196))
assert_allclose(cl[-1, :], (235, 233, 235))
cl = utils.read_colormap('dem') * 256
assert_allclose(cl[4, :], (153,100, 43))
assert_allclose(cl[-1, :], (255,255,255))
def test_reduce(self):
arr = [[1, 1, 2, 2], [1, 1, 2, 2]]
assert_allclose(utils.reduce(arr, 1), arr)
assert_allclose(utils.reduce(arr, 2), [[1, 2]])
assert_allclose(utils.reduce(arr, 2, how=np.sum), [[4, 8]])
arr = np.stack([arr, arr, arr])
assert_allclose(arr.shape, (3, 2, 4))
assert_allclose(utils.reduce(arr, 1), arr)
assert_allclose(utils.reduce(arr, 2), [[[1, 2]], [[1, 2]], [[1, 2]]])
assert_allclose(utils.reduce(arr, 2, how=np.sum),
[[[4, 8]], [[4, 8]], [[4, 8]]])
arr[0, ...] = 0
assert_allclose(utils.reduce(arr, 2, how=np.sum),
[[[0, 0]], [[4, 8]], [[4, 8]]])
arr[1, ...] = 1
assert_allclose(utils.reduce(arr, 2, how=np.sum),
[[[0, 0]], [[4, 4]], [[4, 8]]])
class TestIO(unittest.TestCase):
def setUp(self):
if not os.path.exists(testdir):
os.makedirs(testdir)
def tearDown(self):
delete_test_dir()
@requires_geopandas
def test_cache_working(self):
f1 = 'f1.shp'
f1 = create_dummy_shp(f1)
cf1 = utils.cached_shapefile_path(f1)
self.assertFalse(os.path.exists(cf1))
_ = read_shapefile(f1)
self.assertFalse(os.path.exists(cf1))
_ = read_shapefile(f1, cached=True)
self.assertTrue(os.path.exists(cf1))
# nested calls
self.assertTrue(cf1 == utils.cached_shapefile_path(cf1))
# wait a bit
time.sleep(0.1)
f1 = create_dummy_shp(f1)
cf2 = utils.cached_shapefile_path(f1)
self.assertFalse(os.path.exists(cf1))
_ = read_shapefile(f1, cached=True)
self.assertFalse(os.path.exists(cf1))
self.assertTrue(os.path.exists(cf2))
df = read_shapefile(f1, cached=True)
np.testing.assert_allclose(df.min_x, [1., 2.])
np.testing.assert_allclose(df.max_x, [2., 3.])
np.testing.assert_allclose(df.min_y, [1., 1.3])
np.testing.assert_allclose(df.max_y, [2., 2.3])
self.assertRaises(ValueError, read_shapefile, 'f1.sph')
self.assertRaises(ValueError, utils.cached_shapefile_path, 'f1.splash')
@requires_geopandas
def test_read_to_grid(self):
g = GeoTiff(utils.get_demo_file('hef_srtm.tif'))
sf = utils.get_demo_file('Hintereisferner_UTM.shp')
df1 = read_shapefile_to_grid(sf, g.grid)
df2 = transform_geopandas(read_shapefile(sf), to_crs=g.grid)
assert_allclose(df1.geometry[0].exterior.coords,
df2.geometry[0].exterior.coords)
# test for caching
d = g.grid.to_dict()
# change key ordering by chance
d2 = dict((k, v) for k, v in d.items())
from salem.sio import _memory_shapefile_to_grid, cached_shapefile_path
shape_cpath = cached_shapefile_path(sf)
res = _memory_shapefile_to_grid.call_and_shelve(shape_cpath,
grid=g.grid,
**d)
try:
h1 = res.timestamp
except AttributeError:
h1 = res.argument_hash
res = _memory_shapefile_to_grid.call_and_shelve(shape_cpath,
grid=g.grid,
**d2)
try:
h2 = res.timestamp
except AttributeError:
h2 = res.argument_hash
self.assertEqual(h1, h2)
def test_notimevar(self):
import xarray as xr
da = xr.DataArray(np.arange(12).reshape(3, 4), dims=['lat', 'lon'])
ds = da.to_dataset(name='var')
t = sio.netcdf_time(ds)
assert t is None
class TestSkyIsFalling(unittest.TestCase):
@requires_matplotlib
def test_projplot(self):
# this caused many problems on fabien's laptop.
# this is just to be sure that on your system, everything is fine
import pyproj
import matplotlib.pyplot as plt
from salem.gis import transform_proj, check_crs
wgs84 = pyproj.Proj(proj='latlong', datum='WGS84')
fig = plt.figure()
plt.close()
srs = '+units=m +proj=lcc +lat_1=29.0 +lat_2=29.0 +lat_0=29.0 +lon_0=89.8'
proj_out = check_crs('EPSG:4326')
proj_in = pyproj.Proj(srs, preserve_units=True)
lon, lat = transform_proj(proj_in, proj_out, -2235000, -2235000)
np.testing.assert_allclose(lon, 70.75731, atol=1e-5)
def test_gh_152(self):
# https://github.com/fmaussion/salem/issues/152
import xarray as xr
da = xr.DataArray(np.arange(20).reshape(4, 5), dims=['lat', 'lon'],
coords={'lat': np.linspace(0, 30, 4),
'lon': np.linspace(-20, 20, 5)})
da.salem.roi()
class TestXarray(unittest.TestCase):
def setUp(self):
if not os.path.exists(testdir):
os.makedirs(testdir)
def tearDown(self):
delete_test_dir()
@requires_dask
def test_era(self):
ds = sio.open_xr_dataset(get_demo_file('era_interim_tibet.nc')).chunk()
self.assertEqual(ds.salem.x_dim, 'longitude')
self.assertEqual(ds.salem.y_dim, 'latitude')
dss = ds.salem.subset(ds=ds)
self.assertEqual(dss.salem.grid, ds.salem.grid)
lon = 91.1
lat = 31.1
dss = ds.salem.subset(corners=((lon, lat), (lon, lat)), margin=1)
self.assertEqual(len(dss.latitude), 3)
self.assertEqual(len(dss.longitude), 3)
np.testing.assert_almost_equal(dss.longitude, [90.0, 90.75, 91.5])
def test_roi(self):
import xarray as xr
# Check that all attrs are preserved
with sio.open_xr_dataset(get_demo_file('era_interim_tibet.nc')) as ds:
ds.encoding = {'_FillValue': np.NaN}
ds['t2m'].encoding = {'_FillValue': np.NaN}
ds_ = ds.salem.roi(roi=np.ones_like(ds.t2m.values[0, ...]))
xr.testing.assert_identical(ds, ds_)
assert ds.encoding == ds_.encoding
assert ds.t2m.encoding == ds_.t2m.encoding
@requires_geopandas # because of the grid tests, more robust with GDAL
def test_basic_wrf(self):
import xarray as xr
ds = sio.open_xr_dataset(get_demo_file('wrf_tip_d1.nc')).chunk()
# this is because read_dataset changes some stuff, let's see if
# georef still ok
dsxr = xr.open_dataset(get_demo_file('wrf_tip_d1.nc'))
assert ds.salem.grid == dsxr.salem.grid
lon, lat = ds.salem.grid.ll_coordinates
assert_allclose(lon, ds['XLONG'], atol=1e-4)
assert_allclose(lat, ds['XLAT'], atol=1e-4)
# then something strange happened
assert ds.isel(Time=0).salem.grid == ds.salem.grid
assert ds.isel(Time=0).T2.salem.grid == ds.salem.grid
nlon, nlat = ds.isel(Time=0).T2.salem.grid.ll_coordinates
assert_allclose(nlon, ds['XLONG'], atol=1e-4)
assert_allclose(nlat, ds['XLAT'], atol=1e-4)
# the grid should not be missunderstood as lonlat
t2 = ds.T2.isel(Time=0) - 273.15
with pytest.raises(RuntimeError):
g = t2.salem.grid
@requires_dask
def test_geo_em(self):
for i in [1, 2, 3]:
fg = get_demo_file('geo_em_d0{}_lambert.nc'.format(i))
ds = sio.open_wrf_dataset(fg).chunk()
self.assertFalse('Time' in ds.dims)
self.assertTrue('time' in ds.dims)
self.assertTrue('south_north' in ds.dims)
self.assertTrue('south_north' in ds.coords)
@requires_geopandas # because of the grid tests, more robust with GDAL
def test_wrf(self):
import xarray as xr
ds = sio.open_wrf_dataset(get_demo_file('wrf_tip_d1.nc')).chunk()
# this is because read_dataset changes some stuff, let's see if
# georef still ok
dsxr = xr.open_dataset(get_demo_file('wrf_tip_d1.nc'))
assert ds.salem.grid == dsxr.salem.grid
lon, lat = ds.salem.grid.ll_coordinates
assert_allclose(lon, ds['lon'], atol=1e-4)
assert_allclose(lat, ds['lat'], atol=1e-4)
# then something strange happened
assert ds.isel(time=0).salem.grid == ds.salem.grid
assert ds.isel(time=0).T2.salem.grid == ds.salem.grid
nlon, nlat = ds.isel(time=0).T2.salem.grid.ll_coordinates
assert_allclose(nlon, ds['lon'], atol=1e-4)
assert_allclose(nlat, ds['lat'], atol=1e-4)
# the grid should not be missunderstood as lonlat
t2 = ds.T2.isel(time=0) - 273.15
with pytest.raises(RuntimeError):
g = t2.salem.grid
@requires_dask
def test_ncl_diagvars(self):
import xarray as xr
wf = get_demo_file('wrf_cropped.nc')
ncl_out = get_demo_file('wrf_cropped_ncl.nc')
w = sio.open_wrf_dataset(wf).chunk()
nc = xr.open_dataset(ncl_out)
ref = nc['TK']
tot = w['TK']
assert_allclose(ref, tot, rtol=1e-6)
ref = nc['SLP']
tot = w['SLP']
tot = tot.values
assert_allclose(ref, tot, rtol=1e-6)
w = w.isel(time=1, south_north=slice(12, 16), west_east=slice(9, 16))
nc = nc.isel(Time=1, south_north=slice(12, 16), west_east=slice(9, 16))
ref = nc['TK']
tot = w['TK']
assert_allclose(ref, tot, rtol=1e-6)
ref = nc['SLP']
tot = w['SLP']
tot = tot.values
assert_allclose(ref, tot, rtol=1e-6)
w = w.isel(bottom_top=slice(3, 5))
nc = nc.isel(bottom_top=slice(3, 5))
ref = nc['TK']
tot = w['TK']
assert_allclose(ref, tot, rtol=1e-6)
ref = nc['SLP']
tot = w['SLP']
tot = tot.values
assert_allclose(ref, tot, rtol=1e-6)
@requires_dask
def test_ncl_diagvars_compressed(self):
rtol = 2e-5
import xarray as xr
wf = get_demo_file('wrf_cropped_compressed.nc')
ncl_out = get_demo_file('wrf_cropped_ncl.nc')
w = sio.open_wrf_dataset(wf).chunk()
nc = xr.open_dataset(ncl_out)
ref = nc['TK']
tot = w['TK']
assert_allclose(ref, tot, rtol=rtol)
ref = nc['SLP']
tot = w['SLP'].data
assert_allclose(ref, tot, rtol=rtol)
w = w.isel(time=1, south_north=slice(12, 16), west_east=slice(9, 16))
nc = nc.isel(Time=1, south_north=slice(12, 16), west_east=slice(9, 16))
ref = nc['TK']
tot = w['TK']
assert_allclose(ref, tot, rtol=rtol)
ref = nc['SLP']
tot = w['SLP']
assert_allclose(ref, tot, rtol=rtol)
w = w.isel(bottom_top=slice(3, 5))
nc = nc.isel(bottom_top=slice(3, 5))
ref = nc['TK']
tot = w['TK']
assert_allclose(ref, tot, rtol=rtol)
ref = nc['SLP']
tot = w['SLP']
assert_allclose(ref, tot, rtol=rtol)
@requires_dask
def test_unstagger(self):
wf = get_demo_file('wrf_cropped.nc')
w = sio.open_wrf_dataset(wf).chunk()
nc = sio.open_xr_dataset(wf).chunk()
nc['PH_UNSTAGG'] = nc['P']*0.
uns = nc['PH'].isel(bottom_top_stag=slice(0, -1)).values + \
nc['PH'].isel(bottom_top_stag=slice(1, len(nc.bottom_top_stag))).values
nc['PH_UNSTAGG'].values = uns * 0.5
assert_allclose(w['PH'], nc['PH_UNSTAGG'])
# chunk
v = w['PH'].chunk((1, 6, 13, 13))
assert_allclose(v.mean(), nc['PH_UNSTAGG'].mean(), atol=1e-2)
wn = w.isel(west_east=slice(4, 8))
ncn = nc.isel(west_east=slice(4, 8))
assert_allclose(wn['PH'], ncn['PH_UNSTAGG'])
wn = w.isel(south_north=slice(4, 8), time=1)
ncn = nc.isel(south_north=slice(4, 8), Time=1)
assert_allclose(wn['PH'], ncn['PH_UNSTAGG'])
wn = w.isel(west_east=4)
ncn = nc.isel(west_east=4)
assert_allclose(wn['PH'], ncn['PH_UNSTAGG'])
wn = w.isel(bottom_top=4)
ncn = nc.isel(bottom_top=4)
assert_allclose(wn['PH'], ncn['PH_UNSTAGG'])
wn = w.isel(bottom_top=0)
ncn = nc.isel(bottom_top=0)
assert_allclose(wn['PH'], ncn['PH_UNSTAGG'])
wn = w.isel(bottom_top=-1)
ncn = nc.isel(bottom_top=-1)
assert_allclose(wn['PH'], ncn['PH_UNSTAGG'])
w['PH'].chunk()
@requires_dask
def test_unstagger_compressed(self):
wf = get_demo_file('wrf_cropped.nc')
wfc = get_demo_file('wrf_cropped_compressed.nc')
w = sio.open_wrf_dataset(wf).chunk()
wc = sio.open_wrf_dataset(wfc).chunk()
assert_allclose(w['PH'], wc['PH'], rtol=0.003)
@requires_dask
def test_diagvars(self):
wf = get_demo_file('wrf_d01_allvars_cropped.nc')
w = sio.open_wrf_dataset(wf).chunk()
# ws
w['ws_ref'] = np.sqrt(w['U']**2 + w['V']**2)
assert_allclose(w['ws_ref'], w['WS'])
wcrop = w.isel(west_east=slice(4, 8), bottom_top=4)
assert_allclose(wcrop['ws_ref'], wcrop['WS'])
@requires_dask
def test_diagvars_compressed(self):
wf = get_demo_file('wrf_d01_allvars_cropped_compressed.nc')
w = sio.open_wrf_dataset(wf).chunk()
# ws
w['ws_ref'] = np.sqrt(w['U']**2 + w['V']**2)
assert_allclose(w['ws_ref'], w['WS'])
wcrop = w.isel(west_east=slice(4, 8), bottom_top=4)
assert_allclose(wcrop['ws_ref'], wcrop['WS'])
@requires_dask
def test_prcp(self):
wf = get_demo_file('wrfout_d01.nc')
w = sio.open_wrf_dataset(wf).chunk()
nc = sio.open_xr_dataset(wf)
nc['REF_PRCP_NC'] = nc['RAINNC']*0.
uns = nc['RAINNC'].isel(Time=slice(1, len(nc.bottom_top_stag))).values - \
nc['RAINNC'].isel(Time=slice(0, -1)).values
nc['REF_PRCP_NC'].values[1:, ...] = uns * 60 / 180. # for three hours
nc['REF_PRCP_NC'].values[0, ...] = np.NaN
nc['REF_PRCP_C'] = nc['RAINC']*0.
uns = nc['RAINC'].isel(Time=slice(1, len(nc.bottom_top_stag))).values - \
nc['RAINC'].isel(Time=slice(0, -1)).values
nc['REF_PRCP_C'].values[1:, ...] = uns * 60 / 180. # for three hours
nc['REF_PRCP_C'].values[0, ...] = np.NaN
nc['REF_PRCP'] = nc['REF_PRCP_C'] + nc['REF_PRCP_NC']
for suf in ['_NC', '_C', '']:
assert_allclose(w['PRCP' + suf], nc['REF_PRCP' + suf], rtol=1e-5)
wn = w.isel(time=slice(1, 3))
ncn = nc.isel(Time=slice(1, 3))
assert_allclose(wn['PRCP' + suf], ncn['REF_PRCP' + suf], rtol=1e-5)
wn = w.isel(time=2)
ncn = nc.isel(Time=2)
assert_allclose(wn['PRCP' + suf], ncn['REF_PRCP' + suf], rtol=1e-5)
wn = w.isel(time=1)
ncn = nc.isel(Time=1)
assert_allclose(wn['PRCP' + suf], ncn['REF_PRCP' + suf], rtol=1e-5)
wn = w.isel(time=0)
self.assertTrue(~np.any(np.isfinite(wn['PRCP' + suf].values)))
wn = w.isel(time=slice(1, 3), south_north=slice(50, -1))
ncn = nc.isel(Time=slice(1, 3), south_north=slice(50, -1))
assert_allclose(wn['PRCP' + suf], ncn['REF_PRCP' + suf], rtol=1e-5)
wn = w.isel(time=2, south_north=slice(50, -1))
ncn = nc.isel(Time=2, south_north=slice(50, -1))
assert_allclose(wn['PRCP' + suf], ncn['REF_PRCP' + suf], rtol=1e-5)
wn = w.isel(time=1, south_north=slice(50, -1))
ncn = nc.isel(Time=1, south_north=slice(50, -1))
assert_allclose(wn['PRCP' + suf], ncn['REF_PRCP' + suf], rtol=1e-5)
wn = w.isel(time=0, south_north=slice(50, -1))
self.assertTrue(~np.any(np.isfinite(wn['PRCP' + suf].values)))
@requires_dask
def test_prcp_compressed(self):
wf = get_demo_file('wrfout_d01.nc')
wfc = get_demo_file('wrfout_d01_compressed.nc')
w = sio.open_wrf_dataset(wf).chunk().isel(time=slice(1, -1))
wc = sio.open_wrf_dataset(wfc).chunk().isel(time=slice(1, -1))
for suf in ['_NC', '_C', '']:
assert_allclose(w['PRCP' + suf], wc['PRCP' + suf], atol=0.0003)
@requires_geopandas # because of the grid tests, more robust with GDAL
def test_transform_logic(self):
# This is just for the naming and dim logic, the rest is tested elsewh
ds1 = sio.open_wrf_dataset(get_demo_file('wrfout_d01.nc')).chunk()
ds2 = sio.open_wrf_dataset(get_demo_file('wrfout_d01.nc')).chunk()
# 2darray case
t2 = ds2.T2.isel(time=1)
with pytest.raises(ValueError):
ds1.salem.transform_and_add(t2.values, grid=t2.salem.grid)
ds1.salem.transform_and_add(t2.values, grid=t2.salem.grid, name='t2_2darr')
assert 't2_2darr' in ds1
assert_allclose(ds1.t2_2darr.coords['south_north'],
t2.coords['south_north'])
assert_allclose(ds1.t2_2darr.coords['west_east'],
t2.coords['west_east'])
assert ds1.salem.grid == ds1.t2_2darr.salem.grid
# 3darray case
t2 = ds2.T2
ds1.salem.transform_and_add(t2.values, grid=t2.salem.grid, name='t2_3darr')
assert 't2_3darr' in ds1
assert_allclose(ds1.t2_3darr.coords['south_north'],
t2.coords['south_north'])
assert_allclose(ds1.t2_3darr.coords['west_east'],
t2.coords['west_east'])
assert 'time' in ds1.t2_3darr.coords
# dataarray case
ds1.salem.transform_and_add(t2, name='NEWT2')
assert 'NEWT2' in ds1
assert_allclose(ds1.NEWT2, ds1.T2)
assert_allclose(ds1.t2_3darr.coords['south_north'],
t2.coords['south_north'])
assert_allclose(ds1.t2_3darr.coords['west_east'],
t2.coords['west_east'])
assert 'time' in ds1.t2_3darr.coords
# dataset case
ds1.salem.transform_and_add(ds2[['RAINC', 'RAINNC']],
name={'RAINC':'PRCPC',
'RAINNC': 'PRCPNC'})
assert 'PRCPC' in ds1
assert_allclose(ds1.PRCPC, ds1.RAINC)
assert 'time' in ds1.PRCPNC.coords
# what happens with external data?
dse = sio.open_xr_dataset(get_demo_file('era_interim_tibet.nc'))
out = ds1.salem.transform(dse.t2m, interp='linear')
assert_allclose(out.coords['south_north'],
t2.coords['south_north'])
assert_allclose(out.coords['west_east'],
t2.coords['west_east'])
@requires_geopandas
def test_lookup_transform(self):
dsw = sio.open_wrf_dataset(get_demo_file('wrfout_d01.nc')).chunk()
dse = sio.open_xr_dataset(get_demo_file('era_interim_tibet.nc')).chunk()
out = dse.salem.lookup_transform(dsw.T2C.isel(time=0), method=len)
# qualitative tests (quantitative testing done elsewhere)
assert out[0, 0] == 0
assert out.mean() > 1
dsw = sio.open_wrf_dataset(get_demo_file('wrfout_d01.nc'))
dse = sio.open_xr_dataset(get_demo_file('era_interim_tibet.nc'))
_, lut = dse.salem.lookup_transform(dsw.T2C.isel(time=0), method=len,
return_lut=True)
out2 = dse.salem.lookup_transform(dsw.T2C.isel(time=0), method=len,
lut=lut)
# qualitative tests (quantitative testing done elsewhere)
| assert_allclose(out, out2) | numpy.testing.assert_allclose |
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
''' eval PoseEstNet '''
import csv
import math
import os
import argparse
import numpy as np
from utils.inference import SdkInfer, get_posenet_preds
from utils.transforms import image_proc, flip_back, flip_pairs
def calc_dists(preds, target, normalize):
''' calc dists '''
preds = preds.astype(np.float32)
target = target.astype(np.float32)
dists = np.zeros((preds.shape[1], preds.shape[0]))
for n in range(preds.shape[0]):
for c in range(preds.shape[1]):
if target[n, c, 0] > 1 and target[n, c, 1] > 1:
normed_preds = preds[n, c, :] / normalize[n]
normed_targets = target[n, c, :] / normalize[n]
dists[c, n] = np.linalg.norm(normed_preds - normed_targets)
else:
dists[c, n] = -1
return dists
def dist_acc(dists, thr=0.5):
''' Return percentage below threshold while ignoring values with a -1 '''
dist_cal = np.not_equal(dists, -1)
num_dist_cal = dist_cal.sum()
if num_dist_cal > 0:
return np.less(dists[dist_cal], thr).sum() * 1.0 / num_dist_cal
return -1
def accuracy(output, target, hm_type='gaussian'):
'''
Calculate accuracy according to PCK,
but uses ground truth heatmap rather than x,y locations
First value to be returned is average accuracy across 'idxs',
followed by individual accuracies
'''
idx = list(range(output.shape[1]))
norm = 1.0
if hm_type == 'gaussian':
pred, _ = get_posenet_preds(output, trans_back=False)
target, _ = get_posenet_preds(target, trans_back=False)
h = output.shape[2]
w = output.shape[3]
norm = np.ones((pred.shape[0], 2)) * np.array([h, w]) / 10
dists = calc_dists(pred, target, norm)
acc = np.zeros((len(idx) + 1))
avg_acc = 0
cnt = 0
for i in range(len(idx)):
acc[i + 1] = dist_acc(dists[idx[i]])
if acc[i + 1] >= 0:
avg_acc = avg_acc + acc[i + 1]
cnt += 1
avg_acc = avg_acc / cnt if cnt != 0 else 0
if cnt != 0:
acc[0] = avg_acc
return acc, avg_acc, cnt, pred
def eval_posenet(label_path, imgs_path, pipline_path, FLIP_TEST=True, SHIFT_HEATMAP=True):
''' start eval '''
stream = SdkInfer(pipline_path)
stream.init_stream()
label_csv = open(label_path)
reader = csv.reader(label_csv, delimiter=',')
hash_annot = {}
data = []
for row in reader:
img_name = row[0]
width = int(row[1])
height = int(row[2])
joints = []
for j in range(36):
joint = [int(row[j * 3 + 3]), int(row[j * 3 + 4]),
int(row[j * 3 + 5])]
joints.append(joint)
hash_annot[img_name] = (width, height, joints)
center_joints = []
scale_joints = []
all_preds = np.zeros(
(len(hash_annot), 36, 3),
dtype=np.float32
)
batch_count = 0
idx = 0
for k in sorted(hash_annot.keys()):
image_name = k
if not image_name.lower().endswith((".jpg", "jpeg")):
continue
img_path = os.path.join(imgs_path, image_name)
_, pe_input, center, scale = image_proc(img_path)
pn_id = stream.send_package_buf(b'PoseEstNet0', np.expand_dims(
pe_input.astype(np.float32), axis=0), 0)
infer_result = stream.get_result(b'PoseEstNet0', pn_id)
if FLIP_TEST:
input_flipped = np.flip(np.expand_dims(
pe_input.astype(np.float32), axis=0), 3)
pn_flipped_id = stream.send_package_buf(
b'PoseEstNet0', input_flipped, 0)
outputs_flipped = stream.get_result(b'PoseEstNet0', pn_flipped_id)
if isinstance(outputs_flipped, list):
output_flipped = outputs_flipped[-1]
else:
output_flipped = outputs_flipped
output_flipped = flip_back(np.array(output_flipped), flip_pairs)
# feature is not aligned, shift flipped heatmap for higher accuracy
if SHIFT_HEATMAP: # true
output_flipped_copy = output_flipped
output_flipped[:, :, :,
1:] = output_flipped_copy[:, :, :, 0:-1]
infer_result = (infer_result + output_flipped) * 0.5
data.append(infer_result)
center_joints.append(center)
scale_joints.append(scale)
if batch_count == 31:
output = np.array(data, np.float32)
output = np.stack(output, axis=1).squeeze(axis=0)
preds = get_posenet_preds(
output, center=center_joints, scale=scale_joints)
all_preds[idx:idx + batch_count + 1, :, 0:3] = preds[:, :, 0:3]
print(f'-------- Test: [{int((idx+1)/32 + 1)}/{int(len(hash_annot)/32)}] ---------')
name_values = _evaluate(all_preds, label_path)
if isinstance(name_values, list):
for name_value in name_values:
_print_name_value(name_value, 'PoseEstNet')
else:
_print_name_value(name_values, 'PoseEstNet')
data = []
center_joints = []
scale_joints = []
idx += batch_count + 1
batch_count = 0
else:
batch_count += 1
stream.destroy()
def _print_name_value(name_value, full_arch_name):
''' print accuracy '''
names = name_value.keys()
values = name_value.values()
num_values = len(name_value)
print(
'| Arch ' +
' '.join(['| {}'.format(name) for name in names]) +
' |'
)
print('| --- ' * (num_values+1) + '|')
if len(full_arch_name) > 15:
full_arch_name = full_arch_name[:8] + '...'
print(
'| ' + full_arch_name + ' ' +
' '.join(['| {:.3f}'.format(value) for value in values]) +
' |'
)
def _evaluate(preds, label_path):
''' get accuracy set'''
SC_BIAS = 0.25
threshold = 0.5
gts = []
viss = []
area_sqrts = []
with open(label_path) as annot_file:
reader = csv.reader(annot_file, delimiter=',')
for row in reader:
joints = []
vis = []
top_lft = btm_rgt = [int(row[3]), int(row[4])]
for j in range(36):
joint = [int(row[j * 3 + 3]), int(row[j * 3 + 4]),
int(row[j * 3 + 5])]
joints.append(joint)
vis.append(joint[2])
if joint[0] < top_lft[0]:
top_lft[0] = joint[0]
if joint[1] < top_lft[1]:
top_lft[1] = joint[1]
if joint[0] > btm_rgt[0]:
btm_rgt[0] = joint[0]
if joint[1] > btm_rgt[1]:
btm_rgt[1] = joint[1]
gts.append(joints)
viss.append(vis)
area_sqrts.append(
math.sqrt((btm_rgt[0] - top_lft[0] + 1) * (btm_rgt[1] - top_lft[1] + 1)))
jnt_visible = np.array(viss, dtype=np.int)
jnt_visible = np.transpose(jnt_visible)
pos_pred_src = np.transpose(preds, [1, 2, 0])
pos_gt_src = np.transpose(gts, [1, 2, 0])
uv_error = pos_pred_src - pos_gt_src
uv_err = np.linalg.norm(uv_error, axis=1)
area_sqrts = np.linalg.norm(area_sqrts, axis=0)
area_sqrts *= SC_BIAS
scale = np.multiply(area_sqrts, np.ones((len(uv_err), 1)))
scaled_uv_err = np.divide(uv_err, scale)
scaled_uv_err = np.multiply(scaled_uv_err, jnt_visible)
jnt_count = | np.sum(jnt_visible, axis=1) | numpy.sum |
# program for the little one to practice spelling (and Spanish!)
import cv2
import enum
import tensorflow as tf
import pandas as pd
import numpy as np
import os
from PIL import Image as im
from translate import Translator
from threading import Thread
from datetime import datetime
# only play the spanish word if found and only once
found = False
numFound = 0
time_found = datetime.now()
#play welcome message
os.system('mpg123 sounds/welcome.mp3')
#video stream class for multithreading
class vStream:
def __init__(self,src,width,height):
self._running = True
self.width=width
self.height=height
self.capture=cv2.VideoCapture(src)
self.thread=Thread(target=self.update,args=())
self.thread.daemon=True
self.thread.start()
def update(self):
while self._running:
success,self.frame=self.capture.read()
if success:
self.frame2=cv2.resize(self.frame,(self.width,self.height))
def getFrame(self):
return self.frame2
#kill the thread
def kill(self):
self.capture.release()
self._running = False
#play the spanish word if the letter is found
class spanishAudio:
isFound = False
fileName = ""
def __init__(self):
self._running = True
self.thread=Thread(target=self.update,args=())
self.thread.daemon=True
self.thread.start()
def update(self):
while self._running:
if self.isFound:
print("Found1")
cmd = 'mpg123 sounds/' + self.fileName
os.system(cmd)
self.isFound = False
def setFound(self,found, file_path):
print("Found2")
self.isFound=found
self.fileName=file_path
def kill(self):
self._running = False
# enumeration of objects to display on the screen
class Object(enum.Enum):
cat = 1
dog = 2
cow = 3
ball = 4
duck = 5
goat = 6
#increment to the next object
def inc(self):
v = self.value + 1
#if we reached the end, start over
if v > 6:
v = 1
return Object(v)
#return the missing letter and its position
#given that the kiddo is just learning letters, only using the first letter
#set up to have the missing letter be anywhere though
def letterPos(self):
l = 1
if self.value == 1:
#l = 1
val = "C"
if self.value == 2:
#l = 3
val = "D"
if self.value == 3:
#l = 2
val = "C"
if self.value == 4:
#l = 2
val = "B"
if self.value == 5:
#l = 4
val = "D"
if self.value == 6:
#l = 3
val = "G"
return (l,val)
# put cat letters on the screen
def drawCatText(image):
# show the letters and the one to fill in
image = cv2.line(image, (150, 300), (230, 300), (255, 0, 0), 4)
image = cv2.rectangle(image, (130, 175), (245, 305), (255, 0, 0), 3)
cv2.putText(image, "A", (250, 290),
cv2.FONT_HERSHEY_SIMPLEX, 4, (255, 0, 0), 5)
image = cv2.line(image, (250, 300), (330, 300), (255, 0, 0), 4)
cv2.putText(image, "T", (350, 290),
cv2.FONT_HERSHEY_SIMPLEX, 4, (255, 0, 0), 5)
image = cv2.line(image, (350, 300), (430, 300), (255, 0, 0), 4)
return image
# put duck letters on the screen
def drawDuckText(image):
# show the letters and the one to fill in
image = cv2.line(image, (150, 300), (230, 300), (255, 0, 0), 4)
image = cv2.rectangle(image, (130, 175), (245, 305), (255, 0, 0), 3)
#cv2.putText(image, "D", (150, 290),
# cv2.FONT_HERSHEY_SIMPLEX, 4, (255, 0, 0), 5)
image = cv2.line(image, (150, 300), (230, 300), (255, 0, 0), 4)
cv2.putText(image, "U", (250, 290),
cv2.FONT_HERSHEY_SIMPLEX, 4, (255, 0, 0), 5)
#image = cv2.rectangle(image, (230, 175), (345, 305), (255, 0, 0), 3)
image = cv2.line(image, (250, 300), (330, 300), (255, 0, 0), 4)
cv2.putText(image, "C", (350, 290),
cv2.FONT_HERSHEY_SIMPLEX, 4, (255, 0, 0), 5)
image = cv2.line(image, (350, 300), (430, 300), (255, 0, 0), 4)
cv2.putText(image, "K", (450, 290),
cv2.FONT_HERSHEY_SIMPLEX, 4, (255, 0, 0), 5)
image = cv2.line(image, (450, 300), (530, 300), (255, 0, 0), 4)
return image
# put goat letters on the screen
def drawGoatText(image):
# show the letters and the one to fill in
image = cv2.line(image, (150, 300), (230, 300), (255, 0, 0), 4)
image = cv2.rectangle(image, (130, 175), (245, 305), (255, 0, 0), 3)
#cv2.putText(image, "G", (150, 290),
# cv2.FONT_HERSHEY_SIMPLEX, 4, (255, 0, 0), 5)
image = cv2.line(image, (150, 300), (230, 300), (255, 0, 0), 4)
cv2.putText(image, "O", (250, 290),
cv2.FONT_HERSHEY_SIMPLEX, 4, (255, 0, 0), 5)
image = cv2.line(image, (250, 300), (330, 300), (255, 0, 0), 4)
cv2.putText(image, "A", (350, 290),
cv2.FONT_HERSHEY_SIMPLEX, 4, (255, 0, 0), 5)
#image = cv2.rectangle(image, (345, 175), (435, 305), (255, 0, 0), 3)
image = cv2.line(image, (350, 300), (430, 300), (255, 0, 0), 4)
cv2.putText(image, "T", (450, 290),
cv2.FONT_HERSHEY_SIMPLEX, 4, (255, 0, 0), 5)
image = cv2.line(image, (450, 300), (530, 300), (255, 0, 0), 4)
return image
# put ball letters on the screen
def drawBallText(image):
# show the letters and the one to fill in
image = cv2.line(image, (150, 300), (230, 300), (255, 0, 0), 4)
image = cv2.rectangle(image, (130, 175), (245, 305), (255, 0, 0), 3)
#cv2.putText(image, "B", (150, 290),
# cv2.FONT_HERSHEY_SIMPLEX, 4, (255, 0, 0), 5)
image = cv2.line(image, (150, 300), (230, 300), (255, 0, 0), 4)
cv2.putText(image, "A", (250, 290),
cv2.FONT_HERSHEY_SIMPLEX, 4, (255, 0, 0), 5)
image = cv2.line(image, (250, 300), (330, 300), (255, 0, 0), 4)
cv2.putText(image, "L", (350, 290),
cv2.FONT_HERSHEY_SIMPLEX, 4, (255, 0, 0), 5)
image = cv2.line(image, (350, 300), (430, 300), (255, 0, 0), 4)
#image = cv2.rectangle(image, (430, 175), (545, 305), (255, 0, 0), 3)
cv2.putText(image, "L", (450, 290),
cv2.FONT_HERSHEY_SIMPLEX, 4, (255, 0, 0), 5)
image = cv2.line(image, (450, 300), (530, 300), (255, 0, 0), 4)
return image
# put cow letters on the screen
def drawCowText(image):
# show the letters and the one to fill in
image = cv2.line(image, (150, 300), (230, 300), (255, 0, 0), 4)
image = cv2.rectangle(image, (130, 175), (245, 305), (255, 0, 0), 3)
#cv2.putText(image, "C", (150, 290),
# cv2.FONT_HERSHEY_SIMPLEX, 4, (255, 0, 0), 5)
image = cv2.line(image, (150, 300), (230, 300), (255, 0, 0), 4)
cv2.putText(image, "O", (250, 290),
cv2.FONT_HERSHEY_SIMPLEX, 4, (255, 0, 0), 5)
#image = cv2.rectangle(image, (230, 175), (345, 305), (255, 0, 0), 3)
image = cv2.line(image, (250, 300), (330, 300), (255, 0, 0), 4)
cv2.putText(image, "W", (350, 290),
cv2.FONT_HERSHEY_SIMPLEX, 4, (255, 0, 0), 5)
image = cv2.line(image, (350, 300), (430, 300), (255, 0, 0), 4)
return image
# put dog letters on the screen
def drawDogText(image):
# show the letters and the one to fill in
image = cv2.rectangle(image, (130, 175), (245, 305), (255, 0, 0), 3)
#cv2.putText(image, "D", (150, 290),
# cv2.FONT_HERSHEY_SIMPLEX, 4, (255, 0, 0), 5)
image = cv2.line(image, (150, 300), (230, 300), (255, 0, 0), 4)
cv2.putText(image, "O", (250, 290),
cv2.FONT_HERSHEY_SIMPLEX, 4, (255, 0, 0), 5)
image = cv2.line(image, (250, 300), (330, 300), (255, 0, 0), 4)
cv2.putText(image, "G", (350, 290),
cv2.FONT_HERSHEY_SIMPLEX, 4, (255, 0, 0), 5)
#image = cv2.rectangle(image, (345, 175), (440, 305), (255, 0, 0), 3)
image = cv2.line(image, (350, 300), (430, 300), (255, 0, 0), 4)
return image
#put the letters on the screen depending on which object it is
def addLetters(curObject, image):
if curObject.name == "cat":
image = drawCatText(image)
elif curObject.name == "dog":
image = drawDogText(image)
elif curObject.name == "cow":
image = drawCowText(image)
elif curObject.name == "ball":
image = drawBallText(image)
elif curObject.name == "duck":
image = drawDuckText(image)
elif curObject.name == "goat":
image = drawGoatText(image)
return image
# draw the object picture and letters to the screen
def drawScreen(filename, image, curObject):
game_pic = cv2.imread(filename, 1)
game_pic = cv2.resize(game_pic, (200, 150), interpolation=cv2.INTER_LINEAR)
added_image = cv2.addWeighted(
image[10:160, 200:400, :], 0.1, game_pic[0:150, 0:200, :], 0.9, 0)
# Change the region with the result
image[10:160, 200:400] = added_image
# add the letters for the given object to the screen
image = addLetters(curObject, image)
#draw a border around the letters
image = cv2.rectangle(image, (0, 0), (100, 480), (185, 185, 185), -1)
image = cv2.rectangle(image, (0, 325), (640, 480), (185, 185, 185), -1)
image = cv2.rectangle(image, (540, 0), (640, 480), (185, 185, 185), -1)
return image
# get the input from the screen where the letter goes
def getLetter(image, location):
get_letter = []
#only doing the first letter, but can eventually have
#missing letter anywhere in the word
get_letter = image[180:298, 130:240]
#if location == 1:
# get_letter = image[180:298, 130:240]
#if location == 2:
# get_letter = image[180:298, 245:335]
#if location == 3:
# get_letter = image[180:298, 345:435]
#if location == 4:
# get_letter = image[180:298, 445:535]
get_letter = cv2.cvtColor(get_letter, cv2.COLOR_RGB2GRAY)
get_letter = cv2.resize(get_letter, (28, 28),
interpolation=cv2.INTER_LINEAR)
# invert the black and white colows
img = cv2.bitwise_not(get_letter)
# turn the background black
# if the pixel value is less than 160, that means it's background,
# so turn it all the way black
img[img < 160] = 0
#have dimensions match what goes into the model
img = np.expand_dims(img, -1)
img = np.expand_dims(img, axis=0)
img = | np.array(img, dtype="float32") | numpy.array |
from __future__ import division
import torch
import numpy as np
import os.path as osp
import torch.nn.functional as F
from mmcv.runner import load_checkpoint
from vegcn.datasets import build_dataset
from vegcn.confidence import confidence_to_peaks
from vegcn.deduce import peaks_to_labels
from utils import (sparse_mx_to_torch_sparse_tensor, list2dict, write_meta,
write_feat, mkdir_if_no_exists, rm_suffix, build_knns,
knns2ordered_nbrs, BasicDataset, Timer)
from evaluation import evaluate, accuracy
def test(model, dataset, cfg, logger):
if cfg.load_from:
logger.info('load from {}'.format(cfg.load_from))
load_checkpoint(model, cfg.load_from, strict=True, logger=logger)
features = torch.FloatTensor(dataset.features)
adj = sparse_mx_to_torch_sparse_tensor(dataset.adj)
if not dataset.ignore_label:
labels = torch.FloatTensor(dataset.labels)
if cfg.cuda:
model.cuda()
features = features.cuda()
adj = adj.cuda()
if not dataset.ignore_label:
labels = labels.cuda()
model.eval()
output, gcn_feat = model((features, adj), output_feat=True)
if not dataset.ignore_label:
loss = F.mse_loss(output, labels)
loss_test = float(loss)
logger.info('[Test] loss = {:.4f}'.format(loss_test))
pred_confs = output.detach().cpu().numpy()
gcn_feat = gcn_feat.detach().cpu().numpy()
return pred_confs, gcn_feat
def test_gcn_v(model, cfg, logger):
for k, v in cfg.model['kwargs'].items():
setattr(cfg.test_data, k, v)
dataset = build_dataset(cfg.model['type'], cfg.test_data)
folder = '{}_gcnv_k_{}_th_{}'.format(cfg.test_name, cfg.knn, cfg.th_sim)
oprefix = osp.join(cfg.work_dir, folder)
oname = osp.basename(rm_suffix(cfg.load_from))
opath_pred_confs = osp.join(oprefix, 'pred_confs', '{}.npz'.format(oname))
if osp.isfile(opath_pred_confs) and not cfg.force:
data = | np.load(opath_pred_confs) | numpy.load |
import numpy as np
import pandas as pd
import theano.tensor as T
from random import shuffle
from theano import shared, function
from patsy import dmatrix
from collections import defaultdict
class MainClauseModel(object):
def __init__(self, nlatfeats=8, alpha=1., discount=None, beta=0.5, gamma=0.9,
delta=2., orthogonality_penalty=0., nonparametric=False):
'''
Parameters
----------
nlatfeats : int
Number of latent features for each verb; the default of 8 is
the number of unique subcat frames in the data
alpha : float (positive)
Beta process hyperparameter as specified in Teh et al. 2007
"Stick-breaking Construction for the Indian Buffet Process";
changes meaning based on Pitman-Yor discount hyperparameter
(see Teh et al. 2007, p.3)
discount : float (unit) or None
If discount is a float, it must satisfy alpha > -discount
beta : float (positive)
If parametric=True, concetration parameter for verb-specific
beta draws based on beta process sample; if nonparametric=False,
hyperparameter of a Beta(beta, beta); in the latter case, beta
should be on (0,1), otherwise the verb representations are
unidentifiable, since their is a flat prior on the selection
probability
gamma : float (positive)
Hyperparameter of a beta distribution on the projection matrix
delta : float (positive)
Hyperparameter of a beta distribution on the verb feature
probability matrix
orthogonality_penalty : float (positive)
How much to penalize for singularity
nonparametric : bool
Whether to use a nonparametric prior
divergence_weight : float (0 to negative infinity) (ADDED)
How much to weight the either-or bias. If 0, no either-or bias.
'''
self.nlatfeats = nlatfeats
self.alpha = alpha
self.discount = discount
self.beta = beta
self.gamma = gamma
self.delta = delta
self.orthogonality_penalty = orthogonality_penalty
self.nonparametric = nonparametric
self.divergence_weight = -1
self._validate_params()
self._ident = ''.join(np.random.choice(9, size=10).astype(str))
def _validate_params(self):
if self.discount is not None:
self._pitman_yor = True
try:
assert self.alpha > -self.discount
except AssertionError:
raise ValueError('alpha must be greater than -discount')
else:
self._pitman_yor = False
def _initialize_model(self, data, stochastic):
self.data = data
self._initialize_counter()
self._initialize_reps()
self._initialize_loss()
self._initialize_updaters(stochastic)
def _initialize_counter(self):
self._verbcount = T.zeros(self.data.n('verb'))
self._verbeye = T.eye(self.data.n('verb'))
def _initialize_reps(self):
self._reps = {}
if self.nonparametric:
# nu_aux = np.array([2.]+[-1.]*(self.nlatfeats-1))
nu_aux = | np.array([0.]*self.nlatfeats) | numpy.array |
"""Tests for neighbor caching.
"""
import numpy as np
import unittest
from pysph.base.nnps import NeighborCache, LinkedListNNPS
from pysph.base.utils import get_particle_array
from cyarray.carray import UIntArray
class TestNeighborCache(unittest.TestCase):
def _make_random_parray(self, name, nx=5):
x, y, z = np.random.random((3, nx, nx, nx))
x = np.ravel(x)
y = | np.ravel(y) | numpy.ravel |
import pytest
from kbbq import compare_reads
from kbbq.gatk import applybqsr
import numpy as np
import filecmp
from pandas.util.testing import assert_frame_equal
import pysam
######################
# FASTQ Recalibration
######################
class FakeRead:
def __init__(self, name, quality, sequence):
self.name = name #string
self.quality = quality #string
self.sequence = sequence #string
def get_quality_array(self, offset = 33):
q = np.array(list(self.quality), dtype = np.unicode)
quals = np.array(q.view(np.uint32) - offset, dtype = np.uint32)
return list(quals)
def bamread_to_fakeread(read):
complement = {'A' : 'T', 'T' : 'A', 'G' : 'C', 'C' : 'G'}
seq = read.query_sequence
q = read.get_tag('OQ')
if read.is_reverse:
seq = ''.join([complement.get(x,'N') for x in reversed(seq)])
q = q[::-1]
suffix = ("/2" if read.is_read2 else "/1")
name = read.query_name + suffix + '_RG:Z:' + read.get_tag('RG')
return FakeRead(name = name, quality = q, sequence = seq)
@pytest.mark.slow
def test_fastq_calibration(report, recalibratedbam):
rg_to_pu = compare_reads.get_rg_to_pu(recalibratedbam)
rg_to_int = {r:i for i,r in enumerate(rg_to_pu)}
meanq, *vectors = applybqsr.table_to_vectors(report, list(rg_to_pu.values()))
dqs = applybqsr.get_delta_qs(meanq, *vectors)
for read in recalibratedbam:
fastqread = bamread_to_fakeread(read)
gatk_calibrated_quals = np.array(read.query_qualities, dtype = np.int)
if read.is_reverse:
gatk_calibrated_quals = np.flip(gatk_calibrated_quals)
rg = rg_to_int[compare_reads.fastq_infer_rg(fastqread)]
recalibrated_quals = compare_reads.recalibrate_fastq(fastqread, meanq, *dqs, rg = rg, dinuc_to_int = compare_reads.Dinucleotide.dinuc_to_int, secondinpair = compare_reads.fastq_infer_secondinpair(fastqread))
assert np.array_equal(recalibrated_quals, gatk_calibrated_quals)
def test_tstamp():
import datetime
correct = datetime.datetime.today()
correct = correct.replace(microsecond = 0)
test = datetime.datetime.fromisoformat(compare_reads.tstamp()[2:-2])
assert correct == test
def test_load_positions(simple_bed):
correct = {'ref':list(range(8,46))}
assert compare_reads.load_positions(simple_bed) == correct
def test_get_var_sites(simple_vcf):
correct = {'ref':[9]}
assert compare_reads.get_var_sites(simple_vcf) == correct
def test_train_regression():
q = np.array([28, 28, 24, 24, 28, 30, 27, 9, 10, 14, 20, 25, 31, 32, 24, 24, 25] #0:16
+ [29, 27, 29, 30, 30, 30, 29, 29, 29], dtype = np.int) #17:
e = np.zeros(len(q))
e[21] = True
lr = compare_reads.train_regression(q, e)
q = q[:,np.newaxis]
predicted = lr.predict_proba(q)
assert predicted.shape[0] == q.shape[0]
def test_regression_recalibrate():
q = np.array([28, 28, 24, 24, 28, 30, 27, 9, 10, 14, 20, 25, 31, 32, 24, 24, 25] #0:16
+ [29, 27, 29, 30, 30, 30, 29, 29, 29], dtype = np.int) #17:
e = np.zeros(len(q))
e[21] = True
lr = compare_reads.train_regression(q,e)
newq = compare_reads.regression_recalibrate(lr, q)
assert newq.shape == q.shape
assert not np.all(newq == q)
def test_find_read_errors(simple_bam, simple_refdict, simple_fullskips):
"""
This will need more testing for some edge cases probably
"""
import collections
r1skips = np.zeros(17, dtype = np.bool)
r1skips[3] = True #from vcf
r1skips[0:2] = True #from BED
r2errs = np.zeros(9, dtype = np.bool)
r2errs[5] = True
bam = pysam.AlignmentFile(simple_bam,'rb')
reads = list(bam)
e, s = compare_reads.find_read_errors(reads[0], simple_refdict, simple_fullskips)
assert np.array_equal(e,np.zeros(17, dtype = np.bool))
assert np.array_equal(s,r1skips)
e, s = compare_reads.find_read_errors(reads[1], simple_refdict, simple_fullskips)
assert np.array_equal(e,r2errs)
assert np.array_equal(s,np.zeros(9, dtype = np.bool))
#test hard clip + n block
readstr = 'clipped\t0\tref\t9\t255\t1M9H\t*\t0\t0\tA\t)'
clippedread = pysam.AlignedSegment.fromstring(readstr,bam.header)
e, s = compare_reads.find_read_errors(clippedread, simple_refdict, simple_fullskips)
assert np.array_equal(e, np.array([False]))
assert np.array_equal(s, np.array([False], dtype = np.bool))
# test exception for invalid CIGAR
# though the amount of effort required to do this means it's unlikely
# to ever happen to a user...
FakeRead = collections.namedtuple('FakeRead', clippedread.__dir__(), rename = True)
shallowvalues = {k:getattr(clippedread,k) for k in clippedread.__dir__()}
shallowvalues['cigartuples'] = [('L',9)]
clippedread = FakeRead(*shallowvalues)
with pytest.raises(ValueError):
e, s = compare_reads.find_read_errors(clippedread, simple_refdict, simple_fullskips)
def test_RescaledNormal_prior():
assert compare_reads.RescaledNormal.prior(0) == np.log(.9)
assert np.array_equal(compare_reads.RescaledNormal.prior(np.arange(43)),
compare_reads.RescaledNormal.prior_dist)
assert | np.all(compare_reads.RescaledNormal.prior_dist < 0) | numpy.all |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Jul 15 16:41:27 2018
@author: xionghaipeng
"""
__author__ = 'xhp'
'''load the dataset'''
# from __future__ import print_function, division
import os
import torch
# import pandas as pd #load csv file
from skimage import io, transform #
import numpy as np
# import matplotlib.pyplot as plt
from torch.utils.data import Dataset # , DataLoader#
# from torchvision import transforms, utils#
import glob # use glob.glob to get special flielist
import scipy.io as sio # use to import mat as dic,data is ndarray
import torch
import torch.nn.functional as F
import glob
import torchvision.transforms as trans
import cv2
from PIL import Image
from torch.utils.data import Dataset
import os
import numpy as np
import torch
from torchvision import transforms
import random
import h5py
import matplotlib.pyplot as plt
import pandas as pd
from tqdm import tqdm
# Ignore warnings
import warnings
warnings.filterwarnings("ignore")
class myDataset(Dataset):
"""dataset. also can be used for annotation like density map"""
def __init__(self, img_dir, tar_dir, rgb_dir, transform=None, if_test=False, \
IF_loadmem=False):
"""
Args:
img_dir (string ): Directory with all the images.
tar_dir (string ): Path to the annotations.
transform (callable, optional): Optional transform to be applied
on a sample.
"""
self.IF_loadmem = IF_loadmem # whether to load data in memory
self.IF_loadFinished = False
self.image_mem = []
self.target_mem = []
self.img_dir = img_dir
self.tar_dir = tar_dir
self.transform = transform
mat = sio.loadmat(rgb_dir)
self.rgb = mat['rgbMean'].reshape(1, 1, 3) # rgbMean is computed after norm to [0-1]
img_name = os.path.join(self.img_dir, '*.jpg')
self.filelist = glob.glob(img_name)
self.dataset_len = len(self.filelist)
# for test process, load data is different
self.if_test = if_test
def __len__(self):
return self.dataset_len
def __getitem__(self, idx):
# ------------------------------------
# 1. see if load from disk or memory
# ------------------------------------
if (not self.IF_loadmem) or (not self.IF_loadFinished):
img_name = self.filelist[idx]
image = io.imread(img_name) # load as numpy ndarray
image = image / 255. - self.rgb # to normalization,auto to change dtype
(filepath, tempfilename) = os.path.split(img_name)
(name, extension) = os.path.splitext(tempfilename)
mat_dir = os.path.join(self.tar_dir, '%s.mat' % (name))
mat = sio.loadmat(mat_dir)
# if need to save in memory
if self.IF_loadmem:
self.image_mem.append(image)
self.target_mem.append(mat)
# updata if load finished
if len(self.image_mem) == self.dataset_len:
self.IF_loadFinished = True
else:
image = self.image_mem[idx]
mat = self.target_mem[idx]
# target = mat['target']
# for train may need pre load
if not self.if_test:
target = mat['crop_gtdens']
sample = {'image': image, 'target': target}
if self.transform:
sample = self.transform(sample)
# pad the image
sample['image'], sample['target'] = get_pad(sample['image'], DIV=64), get_pad(sample['target'], DIV=64)
else:
target = mat['all_num']
sample = {'image': image, 'target': target}
if self.transform:
sample = self.transform(sample)
sample['density_map'] = torch.from_numpy(mat['density_map'])
# pad the image
sample['image'], sample['density_map'] = get_pad(sample['image'], DIV=64), get_pad(sample['density_map'],
DIV=64)
return sample
######################################################################
class ToTensor(object):
"""Convert ndarrays in sample to Tensors."""
def __call__(self, sample):
image, target = sample['image'], sample['target']
# swap color axis because
# numpy image: H x W x C
# torch image: C X H X W
image = image.transpose((2, 0, 1))
return {'image': torch.from_numpy(image),
'target': torch.from_numpy(target)}
######################################################################
def get_pad(inputs, DIV=64):
h, w = inputs.size()[-2:]
ph, pw = (DIV - h % DIV), (DIV - w % DIV)
# print(ph,pw)
if (ph != DIV) or (pw != DIV):
tmp_pad = [pw // 2, pw - pw // 2, ph // 2, ph - ph // 2]
# print(tmp_pad)
inputs = F.pad(inputs, tmp_pad)
return inputs
class SHTA(Dataset):
def __init__(self, img_root, gt_dmap_root=None, bs=1, scale=[1], crop=1, randomcrop=False, down=1, raw=False,
preload=False, phase='train',maintrans=None, imgtrans=None, gttrans=None):
'''
img_root: the root path of img.
gt_dmap_root: the root path of ground-truth density-map.
gt_downsample: default is 0, denote that the output of deep-model is the same size as input image.
phase: train or test
'''
self.img_root = img_root
self.gt_dmap_root = gt_dmap_root
self.crop = crop
self.raw = raw
self.phase = phase
self.preload = preload
self.bs = bs
self.img_files = glob.glob(img_root + '/*.jpg')
self.samples = len(self.img_files)
self.maintrans = maintrans
self.imgtrans = imgtrans
self.gttrans = gttrans
self.down = down
self.randomcrop = randomcrop
self.scale_list = scale
if self.preload:
self.data = read_image_and_gt(self.img_files, preload=True)
def __len__(self):
return self.samples
def __getitem__(self, index):
imgs = []
gts = []
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
if self.preload:
imgname = self.data[index][0]
img = self.data[index][1]
gt = self.data[index][2]
else:
imgname, img, gt = read_image_and_gt(self.img_files[index], preload=False, mean=mean, std=std)
# if self.maintrans is not None:
# img, den = self.maintrans(img, gt)
# if self.imgtrans is not None:
# img = self.imgtrans(img)
# if self.gttrans is not None:
# gt = self.gttrans(gt)
if len(self.scale_list) > 1 and self.phase == 'train':
temp = random.randint(0, len(self.scale_list)-1)
scale_para = self.scale_list[temp]
if scale_para!=1:
img = cv2.resize(img, (scale_para * img.shape[1], scale_para * img.shape[0]))
gt = cv2.resize(gt, (scale_para * gt.shape[1], scale_para * gt.shape[0])) / scale_para / scale_para
if random.random()>0.75 and self.phase == 'train':
img = img[:, ::-1].copy() # 水平翻转
gt = gt[:, ::-1].copy() # 水平翻转
rh, rw,_ = img.shape
ch, cw = rh // 2, rw // 2
img = img.transpose([2, 0, 1])
img = img[np.newaxis,:,:,:]
gt = gt[np.newaxis,np.newaxis, :, :]
gt = torch.tensor(gt, dtype=torch.float32)
img = torch.tensor(img, dtype=torch.float32)
if self.crop > 1 and self.randomcrop and self.phase == 'train':
gts = []
imgs = []
count = 0
while count < self.crop:
sh = random.randint(0, ch)
sw = random.randint(0, cw)
img.append(imgs[:,:, sh:sh + ch, sw:sw + cw])
gts.append(gts[:, :,sh:sh + ch, sw:sw + cw])
count +=1
imgs = [get_pad(i) for i in imgs]
gts = [get_pad(i) for i in gts]
img = torch.cat(imgs, 0)
gt = torch.cat(gts, 0)
elif self.crop > 1 and not self.randomcrop and self.phase == 'train':
gts = []
imgs = []
imgs.append(img[:, :, 0:ch, 0:cw])
gts.append(gt[:, :,0:ch, 0:cw])
imgs.append(img[:, :,ch:, 0:cw])
gts.append(gt[:, :, ch:, 0:cw])
imgs.append(img[:, :, 0:ch, cw:])
gts.append(gt[:, :, 0:ch, cw:])
imgs.append(img[:, :, ch:, cw:])
gts.append(gt[:, :,ch:, cw:])
count = 4
while count < self.crop:
sh = random.randint(0, ch)
sw = random.randint(0, cw)
imgs.append(img[:, :, sh:sh + ch, sw:sw + cw])
gts.append(gt[:, :, sh:sh + ch, sw:sw + cw])
count +=1
imgs = [get_pad(i) for i in imgs]
gts = [get_pad(i) for i in gts]
img = torch.cat(imgs, 0)
gt = torch.cat(gts, 0)
# img = get_pad(img)
# gt = get_pad(gt)
return imgname, img, gt
def read_image_and_gt(imgfiles, preload, mean, std):
if preload:
data = []
print('Loading data into ram......')
for idx, imgfile in tqdm(enumerate(imgfiles)):
img = cv2.imread(imgfile)
imgname = imgfile.split('/')[-1]
img = img[:, :, ::-1].copy()
img = img.astype(np.float32, copy=False)
img = (img - mean) / std
gtfile = imgfile.replace('original', 'knn_gt').replace('.jpg', '.npy').replace('images',
'ground-truth').replace(
'IMG_',
'GT_IMG_')
gt = np.load(gtfile)
# gt = torch.tensor(gt,dtype=torch.float32)
# img = torch.tensor(img,dtype=torch.float32)
#
# img = get_pad(img)
# gt = get_pad(gt)
#
# # rh, rw, c = img.shape
# # dw = int(rw / 16) * 16
# # dh = int(rh / 16) * 16
# # img = cv2.resize(img, (dw, dh))
# # zoom = rw * rh / dw / dh
# # gt = cv2.resize(gt, (dw, dh), interpolation=cv2.INTER_CUBIC) * zoom
#
# img = img.transpose([2, 0, 1])
# gt = torch.unsqueeze(gt,0)
data.append([imgname, img, gt])
return data
else:
img = cv2.imread(imgfiles)
imgname = imgfiles.split('/')[-1]
img = img[:, :, ::-1].copy()
img = img.astype(np.float32, copy=False)
img = (img / 255 - mean) / std
gtfile = imgfiles.replace('original', 'knn_gt').replace('.jpg', '.npy').replace('images',
'ground-truth').replace('IMG_',
'GT_IMG_')
gt = | np.load(gtfile) | numpy.load |
# This file is part of the Extra-P software (http://www.scalasca.org/software/extra-p)
#
# Copyright (c) 2020-2021, Technical University of Darmstadt, Germany
#
# This software may be modified and distributed under the terms of a BSD-style license.
# See the LICENSE file in the base directory for details.
from __future__ import annotations
import math
import typing
import numpy
from PySide2.QtCore import * # @UnusedWildImport
from PySide2.QtGui import * # @UnusedWildImport
from PySide2.QtWidgets import * # @UnusedWildImport
from extrap.gui.Utils import formatFormula
from extrap.gui.Utils import formatNumber
if typing.TYPE_CHECKING:
from extrap.gui.MainWidget import MainWidget
#####################################################################
class GraphWidget(QWidget):
"""This class represents a Widget that is used to show the graph on
the extrap window.
"""
#####################################################################
def __init__(self, main_widget: MainWidget, parent):
super(GraphWidget, self).__init__(parent)
self.main_widget = main_widget
self.initUI()
self.set_initial_value()
self.setMouseTracking(True)
def initUI(self):
self.setMinimumWidth(300)
self.setMinimumHeight(300)
self.setContextMenuPolicy(Qt.CustomContextMenu)
self.customContextMenuRequested.connect(self.showContextMenu)
self.show()
def setMax(self, axis, maxX):
""" This function sets the highest value of x that is being shown on x axis.
"""
if axis == 0:
self.main_widget.data_display.setMaxValue(0, maxX)
self.max_x = maxX
else:
print("[EXTRAP:] Error: Set maximum for axis other than X-axis.")
def logicalXtoPixel(self, lValue):
"""
This function converts an X-value from logical into pixel coordinates.
"""
return self.left_margin + lValue * self.graph_width / self.max_x
def pixelXtoLogical(self, pValue):
"""
This function converts an X-value from pixel into logical coordinates.
"""
return (pValue - self.left_margin) * self.max_x / self.graph_width
def logicalYtoPixel(self, lValue):
"""
This function converts an Y-value from logical into pixel coordinates.
"""
return self.top_margin + (self.max_y - lValue) * self.graph_height / self.max_y
def pixelYtoLogical(self, pValue):
"""
This function converts an Y-value from pixel into logical coordinates.
"""
return (self.graph_height + self.top_margin - pValue) * self.max_y / self.graph_height
def paintEvent(self, event):
paint = QPainter()
paint.begin(self)
paint.setRenderHints(QPainter.Antialiasing | QPainter.TextAntialiasing)
self.drawGraph(paint)
paint.end()
# noinspection PyAttributeOutsideInit
def set_initial_value(self):
"""
This function sets the initial value for different parameters required for graph.
"""
# Basic geometry constants
self.max_x = 40
self.left_margin = 80
self.bottom_margin = 80
self.top_margin = 20
self.right_margin = 20
self.legend_x = 100 # X-coordinate of the upper left corner of the legend
self.legend_y = 20 # Y-coordinate of the upper left corner of the legend
# the actual value for below 3 variables will be set later in the code
self.max_y = 0
self.graph_height = 0
self.graph_width = 0
self.legend_width = 0
self.legend_height = 0
self.clicked_x_pos = None
self.clicked_y_pos = None
# colors
self.BACKGROUND_COLOR = QColor("white")
self.TEXT_COLOR = QColor("black")
self.AXES_COLOR = QColor("black")
self.AGGREGATE_MODEL_COLOR = QColor(self.main_widget.graph_color_list[0])
self.DATA_POINT_COLOR = QColor(self.main_widget.graph_color_list[0]).darker(200)
self.DATA_RANGE_COLOR = QColor(self.main_widget.graph_color_list[0]).darker(150)
self.minimum_number_points_marked = 2
self.aggregate_callpath = False
self.datapoints_type = ""
self.datapointType_Int_Map = {
'min': 1, 'mean': 2, 'max': 3, 'median': 4, 'standardDeviation': 5, 'outlier': 6}
@property
def show_datapoints(self):
return not self.combine_all_callpath and self.datapoints_type != ''
@property
def combine_all_callpath(self):
return self.aggregate_callpath and len(self.main_widget.getSelectedCallpath()) > 1
@Slot(QPoint)
def showContextMenu(self, point):
"""
This function takes care of different options and their visibility in the context menu.
"""
# selected_metric = self.main_widget.getSelectedMetric()
selected_callpaths = self.main_widget.getSelectedCallpath()
if not selected_callpaths:
return
menu = QMenu()
points_group = QActionGroup(self)
points_group.setEnabled(not self.combine_all_callpath)
data_point_selection = [
# To show data points for mean values
("Show Mean Points", 'mean'),
# To show data points for min values
("Show Minimum Points", 'min'),
# To show data points for max values
("Show Maximum Points", 'max'),
# To show data points for median values
("Show Median Points", 'median'),
# To show data points for standard deviation values
("Show Standard Deviation Points", 'standardDeviation'),
# To show outlier points
("Show all data points", 'outlier'),
# Hiding any datapoints
("Hide Points", ''),
]
for name, type in data_point_selection:
action = points_group.addAction(name)
action.setCheckable(True)
action.setData(type)
action.setChecked(self.datapoints_type == type)
action.triggered.connect(self.selectDataPoints)
menu.addAction(action)
# Combining and disjoing Multiple callpaths
menu.addSeparator()
group = QActionGroup(self)
group.setEnabled(len(selected_callpaths) > 1)
group.setExclusive(True)
action = group.addAction("Combine callpaths")
action.setCheckable(True)
action.setChecked(self.aggregate_callpath)
action.triggered.connect(self.combineCallPaths)
menu.addAction(action)
action1 = group.addAction("Show all callpaths")
action1.setCheckable(True)
action1.setChecked(not self.aggregate_callpath)
action1.triggered.connect(self.showAllCallPaths)
menu.addAction(action1)
# Export
menu.addSeparator()
exportDataAction = menu.addAction("Export Data")
exportDataAction.triggered.connect(self.exportData)
screenshotAction = menu.addAction("Screenshot")
screenshotAction.triggered.connect(self.screenshot)
menu.exec_(self.mapToGlobal(point))
@Slot()
def selectDataPoints(self):
"""
This function hides all the datapoints that is being shown on graph.
"""
self.datapoints_type = QObject.sender(self).data()
self.update()
@Slot()
def combineCallPaths(self):
"""
This function combines all callpaths shown on graph.
"""
self.aggregate_callpath = True
self.update()
@Slot()
def showAllCallPaths(self):
"""
This function shows all callpaths.
"""
self.aggregate_callpath = False
self.update()
@Slot()
def screenshot(self):
selected_callpaths = self.main_widget.getSelectedCallpath()
selected_metric = self.main_widget.getSelectedMetric()
name_addition = "-"
if selected_metric:
name_addition = f"-{selected_metric}-"
if selected_callpaths:
name_addition += ','.join((c.name for c in selected_callpaths))
self.main_widget.screenshot(target=self, name_addition=name_addition)
@Slot()
def exportData(self):
"""
This function allows to export the currently shown points and functions in a text format
"""
selected_metric = self.main_widget.getSelectedMetric()
selected_callpaths = self.main_widget.getSelectedCallpath()
if not selected_callpaths:
return
# model_list = list()
text = ''
model_set = self.main_widget.getCurrentModel()
if model_set is None:
return
model_set_models = model_set.models
if not model_set_models:
return
for selected_callpath in selected_callpaths:
model = model_set_models[selected_callpath.path, selected_metric]
if model is None:
return
model_function = model.hypothesis.function
data_points = [p for (_, p) in self.calculateDataPoints(
selected_metric, selected_callpath, True)]
callpath_name = selected_callpath.name
parameters = self.main_widget.experiment.parameters
model_function_text = 'Model: ' + \
formatFormula(
model_function.to_string(*parameters))
data_points_text = '\n'.join(
('(' + str(x) + ', ' + str(y) + ')') for (x, y) in data_points)
text += callpath_name + '\n' + data_points_text + \
'\n' + model_function_text + '\n\n'
msg = QMessageBox()
msg.setIcon(QMessageBox.Information)
msg.setText(
"Exported data (text can be copied to the clipboard using the context menu):")
msg.setInformativeText(text)
msg.setWindowTitle("Export Data")
# msg.setDetailedText("The details are as follows:")
msg.setStandardButtons(QMessageBox.Ok)
msg.exec_()
def drawGraph(self, paint):
"""
This function is being called by paintEvent to draw the graph
"""
# Get data
model_set = self.main_widget.getCurrentModel()
selected_metric = self.main_widget.getSelectedMetric()
selected_callpaths = self.main_widget.getSelectedCallpath()
if not selected_callpaths or model_set is None:
return
model_set_models = model_set.models
if not model_set_models:
return
model_list = list()
selected_callpaths_checked=[]
for selected_callpath in selected_callpaths:
key = (selected_callpath.path, selected_metric)
if key in model_set_models:
model = model_set_models[key]
model_list.append(model)
selected_callpaths_checked.append(selected_callpath)
# Calculate geometry constraints
self.graph_width = self.frameGeometry().width() - self.left_margin - self.right_margin
self.graph_height = self.frameGeometry().height() - self.top_margin - self.bottom_margin
y = self.calculateMaxY(model_list) * 1.2
self.max_y = y
# Draw coordinate system
self.drawAxis(paint, selected_metric)
# Draw functionss
index_indicator = 0
if not self.combine_all_callpath:
for model in model_list:
color = self.main_widget.getColorForCallPath(
selected_callpaths_checked[index_indicator])
self.drawModel(paint, model, color)
index_indicator = index_indicator + 1
else:
color = self.main_widget.getColorForCallPath(selected_callpaths_checked[0])
self.drawAggregratedModel(paint, model_list)
# Draw data points
self.drawDataPoints(paint, selected_metric, selected_callpaths_checked)
# Draw legend
self.drawLegend(paint)
def drawDataPoints(self, paint, selectedMetric, selectedCallpaths):
if self.show_datapoints is True:
pen = QPen(self.DATA_POINT_COLOR)
pen.setWidth(4)
paint.setPen(pen)
# data_points_list = list()
for selected_callpath in selectedCallpaths:
if self.datapoints_type == "outlier":
self.showOutlierPoints(
paint, selectedMetric, selected_callpath)
else:
data_points = self.calculateDataPoints(
selectedMetric, selected_callpath)
self.plotPointsOnGraph(
paint, data_points)
def drawLegend(self, paint):
# drawing the graph legend
px_between = 15
callpath_color_dict = self.main_widget.get_callpath_color_map()
dict_size = len(callpath_color_dict)
font_size = int(self.main_widget.getFontSize())
paint.setFont(QFont('Decorative', font_size))
paint.setBrush(self.BACKGROUND_COLOR)
pen = QPen(self.TEXT_COLOR)
pen.setWidth(1)
paint.setPen(pen)
counter_increment = font_size + 3
if self.combine_all_callpath is False:
text_len = 0
for callpath, color in callpath_color_dict.items():
text_len = max(text_len, len(callpath.name))
self.legend_width = 55 + text_len * (font_size - 1)
self.legend_height = counter_increment * dict_size + px_between
paint.drawRect(self.legend_x,
self.legend_y,
self.legend_width,
self.legend_height)
counter = 0
for callpath, color in callpath_color_dict.items():
pen = QPen(QColor(color))
pen.setWidth(2)
paint.setPen(pen)
paint.drawLine(self.legend_x + 5,
self.legend_y + px_between + counter,
self.legend_x + 35,
self.legend_y + px_between + counter)
paint.setPen(self.TEXT_COLOR)
paint.drawText(self.legend_x + 45,
self.legend_y + px_between + counter,
callpath.name)
counter = counter + counter_increment
else:
text_len = 0
callpath_list = list()
for callpath, color in callpath_color_dict.items():
callpath_list.append(callpath.name)
text_len = max(text_len, text_len +
len(callpath.name))
aggregated_callpath_name = str.join('+', callpath_list)
self.legend_width = 55 + text_len * (font_size - 1)
self.legend_height = counter_increment * 1 + px_between
paint.drawRect(self.legend_x,
self.legend_y,
self.legend_width,
self.legend_height)
pen = QPen(self.AGGREGATE_MODEL_COLOR)
pen.setWidth(2)
paint.setPen(pen)
paint.drawLine(self.legend_x + 5,
self.legend_y + px_between,
self.legend_x + 35,
self.legend_y + px_between)
paint.setPen(self.TEXT_COLOR)
paint.drawText(self.legend_x + 45,
self.legend_y + px_between,
aggregated_callpath_name)
def drawModel(self, paint, model, color):
function = model.hypothesis.function
cord_list = self.calculate_function(function, self.graph_width)
pen = QPen(QColor(color))
pen.setWidth(2)
paint.setPen(pen)
points = [
QPointF(self.logicalXtoPixel(x), self.logicalYtoPixel(y)) for x, y in cord_list
]
paint.drawPolyline(points)
def drawAggregratedModel(self, paint, model_list):
functions = list()
for model in model_list:
function = model.hypothesis.function
functions.append(function)
cord_list = self.calculate_aggregate_callpath_function(
functions, self.graph_width)
pen = QPen(self.AGGREGATE_MODEL_COLOR)
pen.setWidth(2)
paint.setPen(pen)
points = [
QPointF(self.logicalXtoPixel(x), self.logicalYtoPixel(y)) for x, y in cord_list
]
paint.drawPolyline(points)
def drawAxis(self, paint, selectedMetric):
# Determing the number of divisions to be marked on x axis such that there is a minimum distance of 100 pixels
# between two of them based on that, then calculating distance between two marks on y axis.
x_offset = 100
x_origin = self.left_margin
y_origin = self.top_margin + self.graph_height
y_other_end = self.top_margin
x_other_end = self.graph_width + self.left_margin
num_points_marked_on_x_axis = int(self.graph_width / x_offset)
if num_points_marked_on_x_axis < self.minimum_number_points_marked:
num_points_marked_on_x_axis = self.minimum_number_points_marked
x_offset = self.graph_width / num_points_marked_on_x_axis
num_points_marked_on_y_axis = int(self.graph_height / x_offset)
if num_points_marked_on_y_axis == 0:
num_points_marked_on_y_axis = 1
y_offset = self.graph_height / num_points_marked_on_y_axis
x_to_mark_cal = self.get_axis_mark_list(
self.max_x, num_points_marked_on_x_axis)
y_to_mark_cal = self.get_axis_mark_list(
self.max_y, num_points_marked_on_y_axis)
x_to_mark = self.format_numbers_to_be_displayed(x_to_mark_cal)
y_to_mark = self.format_numbers_to_be_displayed(y_to_mark_cal)
# setting number of sub division to be marked on x axis and y axis
number_of_intermediate_points_on_x = 2
number_of_intermediate_points_on_y = 4
# drawing the rectangular region that would contain the graph
paint.setPen(self.BACKGROUND_COLOR)
paint.setBrush(self.BACKGROUND_COLOR)
paint.drawRect(self.frameGeometry())
# drawing x axis and y axis
paint.setPen(self.AXES_COLOR)
paint.drawLine(x_origin, y_origin, x_other_end, y_origin)
paint.drawLine(x_origin, y_other_end, x_origin, y_origin)
# marking divions and subdivisons on x axis
y = y_origin
paint.drawText(self.left_margin - 5, y + 30, "0")
if x_to_mark[0] != 0:
intermediate_x_offset = x_offset / \
(number_of_intermediate_points_on_x + 1)
intermediate_x = self.left_margin + intermediate_x_offset
for _ in range(0, number_of_intermediate_points_on_x, +1):
paint.drawLine(intermediate_x, y - 3, intermediate_x, y)
intermediate_x = intermediate_x + intermediate_x_offset
# x_last_position = self.left_margin
# removing the "_" sign form beginning if x_label has
x_label = self.main_widget.data_display.getAxisParameter(0).name
if x_label.startswith("_"):
x_label = x_label[1:]
for i in range(len(x_to_mark)):
x = self.logicalXtoPixel(x_to_mark_cal[i])
if i == len(x_to_mark) - 1:
x = round(x)
if i == (int(len(x_to_mark) / 2)):
paint.drawText(x, y + 50, x_label)
intermediate_x_offset = x_offset / (number_of_intermediate_points_on_x + 1)
intermediate_x = x + intermediate_x_offset
for _ in range(0, number_of_intermediate_points_on_x, +1):
paint.drawLine(intermediate_x, y - 3, intermediate_x, y)
intermediate_x = intermediate_x + intermediate_x_offset
paint.drawLine(x, y - 6, x, y)
paint.drawText(x - 15, y + 30, str(x_to_mark[i]))
# x_last_position = x
for y_value in range((y_origin - 7), y_other_end, -3):
paint.drawPoint(x, y_value)
# marking divions and subdivisons on y axis
x = self.left_margin
if y_to_mark[0] != 0:
intermediate_y_offset = y_offset / \
(number_of_intermediate_points_on_y + 1)
intermediate_y = y_origin - intermediate_y_offset
for _ in range(0, number_of_intermediate_points_on_y, +1):
paint.drawLine(x_origin, intermediate_y,
x_origin + 3, intermediate_y)
intermediate_y = intermediate_y - intermediate_y_offset
# y_last_position = y_origin
for j in range(len(y_to_mark)):
y = self.logicalYtoPixel(y_to_mark_cal[j])
if j + 1 == (int(len(y_to_mark) / 2)):
paint.drawText(5, y - (y_offset / 2), selectedMetric.name)
intermediate_y_offset = y_offset / (number_of_intermediate_points_on_y + 1)
intermediate_y = y - intermediate_y_offset
for _ in range(0, number_of_intermediate_points_on_y, +1):
paint.drawLine(x_origin, intermediate_y,
x_origin + 3, intermediate_y)
intermediate_y = intermediate_y - intermediate_y_offset
paint.drawLine(x_origin, y, x_origin + 6, y)
paint.drawText(x_origin - 70, y + 5, str(y_to_mark[j]))
# y_last_position = y
for x_value in range((x_origin + 7), x_other_end, +3):
paint.drawPoint(x_value, y)
def calculate_function(self, function, length_x_axis):
"""
This function calculates the x values,
based on the range that were provided &
then it uses ExtraP_Function_Generic to calculate the correspoding y value
"""
# m_x_lower_bound = 1
number_of_x_points, x_list, x_values = self._calculate_evaluation_points(length_x_axis)
previous = numpy.seterr(invalid='ignore', divide='ignore')
y_list = function.evaluate(x_list).reshape(-1)
numpy.seterr(**previous)
cord_list = self._create_drawing_iterator(x_values, y_list)
return cord_list
def calculate_aggregate_callpath_function(self, functions, length_x_axis):
"""
This function calculates the x values, based on the range that were provided & then it uses
ExtraP_Function_Generic to calculate and aggregate the corresponding y value for all the model functions
"""
number_of_x_points, x_list, x_values = self._calculate_evaluation_points(length_x_axis)
y_list = numpy.zeros(number_of_x_points)
previous = numpy.seterr(invalid='ignore', divide='ignore')
for function in functions:
y_list += function.evaluate(x_list).reshape(-1)
numpy.seterr(**previous)
cord_list = self._create_drawing_iterator(x_values, y_list)
return cord_list
def _create_drawing_iterator(self, x_values, y_list):
y_list[y_list == math.inf] = numpy.max(y_list[y_list != math.inf])
y_list[y_list == -math.inf] = numpy.min(y_list[y_list != -math.inf])
cord_list_before_filtering = zip(x_values, y_list)
cord_list = ((x, y)
for x, y in cord_list_before_filtering
if not math.isnan(y) and 0 <= y)
return cord_list
def _calculate_evaluation_points(self, length_x_axis):
number_of_x_points = int(length_x_axis / 2)
x_values = numpy.linspace(0, self.max_x, number_of_x_points)
x_list = numpy.ndarray((len(self.main_widget.experiment.parameters), number_of_x_points))
param = self.main_widget.data_display.getAxisParameter(0).id
parameter_value_list = self.main_widget.data_display.getValues()
for i, val in parameter_value_list.items():
x_list[i] = numpy.repeat(val, number_of_x_points)
x_list[param] = x_values
return number_of_x_points, x_list, x_values
@staticmethod
def get_axis_mark_list(max_val, number_of_points):
""" This function takes as parameter as number of points to be marked on an axis,
the maximum value to be marked on axis and returns a list of points to be marked on axis.
"""
axis_points_to_mark = list()
axis_range = float((float(max_val - 0)) / number_of_points)
value = 0
for _ in range(1, number_of_points + 1):
value = value + axis_range
if value < 1:
digits_after_point = int(math.log10(1 / value)) + 2
value_to_append = float(
"{0:.{1}f}".format(value, digits_after_point))
else:
value_to_append = float("{0:.2f}".format(value))
axis_points_to_mark.append(float(value_to_append))
return axis_points_to_mark
def calculate_absolute_position(self, cord_list, identifier):
""" This function calculates the absolute position of the points on the
graph widget wrt the coordinate system.
"""
absolute_position_list = list()
if identifier == "x":
for cord in cord_list:
cur_pixel = self.logicalXtoPixel(cord)
absolute_position_list.append(cur_pixel)
elif identifier == "y":
for cord in cord_list:
cur_pixel = self.logicalYtoPixel(cord)
absolute_position_list.append(cur_pixel)
return absolute_position_list
# function to format the numbers to be marked on the graph
@staticmethod
def format_numbers_to_be_displayed(value_list):
""" This function formats and beautify the number to be shown on the graph.
"""
new_mark_list = list()
for value in value_list:
if value >= 10:
precision = 1
else:
precision = 2
value_str = formatNumber(str(value), precision)
new_mark_list.append(value_str)
return new_mark_list
@staticmethod
def reduce_length(value):
""" This function formats and beautify the number to be shown on the graph.
"""
splitted_value = value.split('e')
first_part = float(splitted_value[0])
first_part = round(first_part, 2)
return '{:g}'.format(float(first_part)) + "e" + ''.join(splitted_value[1])
def calculateDataPoints(self, selectedMetric, selectedCallpath, ignore_limit=False):
""" This function calculates datapoints to be marked on the graph
"""
datapoints = self.main_widget.getCurrentModel().models[(selectedCallpath.path, selectedMetric)].measurements
parameter_datapoint = self.main_widget.data_display.getAxisParameter(0).id
datapoint_x_absolute_pos_list = list()
datapoint_y_absolute_pos_list = list()
datapoint_x_list = list()
datapoint_y_list = list()
if self.datapoints_type == "min":
datapoint_list = self.getDataPoints(
datapoints, parameter_datapoint, ignore_limit, lambda d: d.minimum)
elif self.datapoints_type == "mean":
datapoint_list = self.getDataPoints(
datapoints, parameter_datapoint, ignore_limit, lambda d: d.mean)
elif self.datapoints_type == "max":
datapoint_list = self.getDataPoints(
datapoints, parameter_datapoint, ignore_limit, lambda d: d.maximum)
elif self.datapoints_type == "median":
datapoint_list = self.getDataPoints(
datapoints, parameter_datapoint, ignore_limit, lambda d: d.median)
elif self.datapoints_type == "standardDeviation":
datapoint_list = self.getDataPoints(
datapoints, parameter_datapoint, ignore_limit, lambda d: d.std)
# TODO think about drawing as bar around value
else:
datapoint_list = None
if datapoint_list:
datapoint_x_list, datapoint_y_list = zip(*datapoint_list)
datapoint_x_absolute_pos_list = self.calculate_absolute_position(
datapoint_x_list, "x")
datapoint_y_absolute_pos_list = self.calculate_absolute_position(
datapoint_y_list, "y")
datapoint_on_graph_values = zip(datapoint_x_absolute_pos_list, datapoint_y_absolute_pos_list)
datapoint_actual_values = zip(datapoint_x_list, datapoint_y_list)
return list(zip(datapoint_on_graph_values, datapoint_actual_values))
def getDataPoints(self, datapoints, parameter_datapoint, ignore_limit, key):
"""
This function calculates datapoints with property selected by the
key function to be marked on the graph
"""
return [
(dp.coordinate[parameter_datapoint], key(dp))
for dp in datapoints
if (dp.coordinate[parameter_datapoint] <= self.max_x or ignore_limit)
]
def calculateMaxY(self, modelList):
y_max = 0
pv_list = self.main_widget.data_display.getValues()
param = self.main_widget.data_display.getAxisParameter(0).id
pv_list[param] = self.max_x
# Check the maximum value of a displayed data point
if self.show_datapoints:
for model in modelList:
y = max(model.predictions)
y_max = max(y, y_max)
previous = | numpy.seterr(invalid='ignore', divide='ignore') | numpy.seterr |
import scipy.linalg as la
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as colors
"""
Creating an entire pressure field based on the locations of various sources
"""
def pressure_field(positions,frequencies,
field_points = -1,
time = 0.0,
areas = [0.001],
velocities = [0.01],
strengths = [0.01],
phases = [0],
x_range = [-1,1],
y_range = [-1,1],
z_range = [-1,1],
point_density = 100,
directivity_distance = 1000,
num_directivity_points = 10000,
method = "Monopole Addition",
dimensions = 2,
directivity_only = False,
directivity_plot_alone = False,
show_plots = False,
pressure_limits = [-100,100]):
# Making all arrays that describe the sources be equal lengths
num_sources = len(positions)
positions = np.asarray(positions)
if np.size(frequencies) == 1:
frequencies = np.ones(num_sources) * frequencies
if np.size(areas) == 1:
areas = np.ones(num_sources) * areas
if np.size(strengths) == 1:
strengths = np.ones(num_sources) * strengths
if np.size(phases) == 1:
phases = np.ones(num_sources) * phases
if np.size(velocities) == 1:
velocities = np.ones(num_sources) * velocities
# Enabling the user to custom-select points in the field
if np.all(field_points == -1):
custom_points = False
else:
custom_points = True
time = complex(time)
if dimensions == 1 and not custom_points:
numPoints_x = int(np.floor((x_range[1] - x_range[0]) * point_density))
x = np.linspace(x_range[0],x_range[1],numPoints_x)
x = x[x != 0]
field_points = x.reshape(-1,1)
grid = x
elif dimensions == 2 and not custom_points:
numPoints_x = int(np.floor((x_range[1] - x_range[0]) * point_density))
numPoints_y = int(np.floor((y_range[1] - y_range[0]) * point_density))
x = np.linspace(x_range[0],x_range[1],numPoints_x)
y = np.linspace(y_range[0],y_range[1],numPoints_y)
grid = np.meshgrid(x,y)
field_points = np.append(grid[0].reshape(-1,1),grid[1].reshape(-1,1),axis=1)
X = grid[0]
Y = grid[1]
elif dimensions == 3 and not custom_points:
numPoints_x = int(np.floor((x_range[1] - x_range[0]) * point_density))
numPoints_y = int(np.floor((y_range[1] - y_range[0]) * point_density))
numPoints_z = int(np.floor((z_range[1] - z_range[0]) * point_density))
x = np.linspace(x_range[0],x_range[1],numPoints_x)
y = np.linspace(y_range[0],y_range[1],numPoints_y)
z = np.linspace(z_range[0],z_range[1],numPoints_z)
grid = np.meshgrid(x,y,z)
field_points = np.append(grid[0].reshape(-1,1),np.append(grid[1].reshape(-1,1),grid[2].reshape(-1,1),axis = 1),axis=1)
X = grid[0]
Y = grid[1]
Z = grid[2]
if not directivity_only:
pressure_field = get_field(positions,frequencies,strengths,velocities,areas,phases,field_points,time,method)
if not custom_points:
pressure_field = pressure_field.reshape(-1,len(x)) # It's the number of points in the x-direction that you use here
else:
pressure_field = 0
# Getting the directivity at a given distance. Default is 1000 meters away
if not dimensions == 1 and not custom_points:
directivity_points, theta = define_arc(directivity_distance,num_directivity_points)
directivity = np.abs(get_field(positions,frequencies,strengths,velocities,areas,phases,directivity_points,time,method))
directivity = directivity / np.max(directivity)
# Only show plots if you calculated the entirie pressure field
if dimensions == 1 and not custom_points:
plot_1D(x,pressure_field,positions,show_plots,pressure_limits,directivity_only)
theta = 0
directivity = 0
if dimensions == 2 and not custom_points:
plot_2D(X,Y,pressure_field,positions,method,theta,directivity,show_plots,directivity_only,directivity_distance,directivity_plot_alone,pressure_limits)
if dimensions == 3 and not custom_points:
plot_3D(X,Y,Z,pressure_field,positions,method,theta,directivity,show_plots,directivity_only,directivity_distance,directivity_plot_alone,pressure_limits)
if not custom_points:
return pressure_field, grid, directivity, theta
else:
return pressure_field
def plot_1D(x,pressure_field,positions,show_plots,pressure_limits,directivity_only):
if show_plots and not directivity_only:
# Defining the figure
fig = plt.figure()
fig.set_size_inches(8,8)
# Plotting the real part
ax = fig.add_subplot(221)
ax.plot(x,np.real(pressure_field)[0,:])
ax.scatter(positions[:,0],np.zeros(len(positions[:,0])),color = "black",marker = "o",facecolors = "white",linewidth = 1.5,s = 10)
ax.set_aspect('auto')
ax.set_title("Real Part")
ax.set_xlabel("X (m)")
ax.set_ylabel("Re{Pressure}")
ax.set_ylim(pressure_limits[0],pressure_limits[1])
ax.grid("on")
# Plotting the imaginary part
ax = fig.add_subplot(223)
ax.plot(x,np.imag(pressure_field)[0,:])
ax.scatter(positions[:,0],np.zeros(len(positions[:,0])),color = "black",marker = "o",facecolors = "white",linewidth = 1.5,s = 10)
ax.set_aspect('auto')
ax.set_title("Imaginary Part")
ax.set_xlabel("X (m)")
ax.set_ylabel("Im{Pressure}")
ax.set_ylim(pressure_limits[0],pressure_limits[1])
ax.grid("on")
# Plotting the magnitude
ax = fig.add_subplot(222)
ax.plot(x,np.abs(pressure_field)[0,:])
ax.scatter(positions[:,0],np.zeros(len(positions[:,0])),color = "black",marker = "o",facecolors = "white",linewidth = 1.5,s = 10)
ax.set_aspect('auto')
ax.set_title("Magnitude")
ax.set_xlabel("X (m)")
ax.set_ylabel("|Pressure|")
ax.set_ylim(pressure_limits[0]*0.05,pressure_limits[1])
ax.grid("on")
fig.tight_layout(pad = 0.5)
fig.show()
def plot_2D(X,Y,pressure_field,positions,method,theta,directivity,show_plots,directivity_only,directivity_distance,directivity_plot_alone,pressure_limits):
if show_plots and not directivity_only:
# Defining the figure
fig, ax = plt.subplots(2,2)
fig.set_size_inches(8,8)
# Plotting the real part
c = ax[0,0].pcolormesh(X,Y,np.real(pressure_field),shading = "gouraud",cmap = "RdBu",vmin = pressure_limits[0],vmax = pressure_limits[1])
ax[0,0].scatter(positions[:,0],positions[:,1],color = "black",marker = "o",facecolors = "white",linewidth = 1.5,s = 10)
ax[0,0].set_aspect('equal')
ax[0,0].set_title("Real Part")
ax[0,0].set_xlabel("X (m)")
ax[0,0].set_ylabel("Y (m)")
fig.colorbar(c,ax = ax[0,0],fraction=0.046, pad=0.04)
# Plotting the imaginary part
c = ax[1,0].pcolormesh(X,Y,np.imag(pressure_field),shading = "gouraud",cmap = "RdBu",vmin = pressure_limits[0],vmax = pressure_limits[1])
ax[1,0].scatter(positions[:,0],positions[:,1],color = "black",marker = "o",facecolors = "white",linewidth = 1.5,s = 10)
ax[1,0].set_aspect('equal')
ax[1,0].set_title("Imaginary Part")
ax[1,0].set_xlabel("X (m)")
ax[1,0].set_ylabel("Y (m)")
fig.colorbar(c,ax = ax[1,0],fraction=0.046, pad=0.04)
# Plotting the magnitude
c = ax[0,1].pcolormesh(X,Y,np.abs(pressure_field),shading = "gouraud",cmap = "jet",vmin = 0,vmax = pressure_limits[1])
ax[0,1].scatter(positions[:,0],positions[:,1],color = "black",marker = "o",facecolors = "white",linewidth = 1.5,s = 10)
ax[0,1].set_aspect('equal')
ax[0,1].set_title("Pressure Magnitude")
ax[0,1].set_xlabel("X (m)")
ax[0,1].set_ylabel("Y (m)")
fig.colorbar(c,ax = ax[0,1],fraction=0.046, pad=0.04)
# Plotting the directivity
ax[1,1].axis("off")
ax = fig.add_subplot(224,projection = 'polar')
c = ax.plot(theta,20*np.log10(directivity))
ax.set_rmin(-40)
ax.set_rticks([0,-10,-20,-30,-40])
ax.set_aspect('equal')
ax.set_title(str("Beam Pattern (dB) at {0} m".format(directivity_distance)))
fig.show()
if method == "Rayleigh":
ax.set_thetamin(-90)
ax.set_thetamax(90)
fig.tight_layout(pad = 0.5)
fig.show()
if directivity_plot_alone:
fig, ax = plt.subplots(1,2,subplot_kw={'projection': 'polar'})
ax[0].plot(theta,directivity)
ax[0].set_title("Normalized Directivity")
ax[1].plot(theta,20*np.log10(directivity))
ax[1].set_title("Beam Pattern (dB)")
ax[1].set_rmin(-40)
ax[1].set_rticks([0,-10,-20,-30,-40])
fig.tight_layout()
fig.set_size_inches(8,8)
fig.show()
def plot_3D(X,Y,Z,pressure_field,positions,method,theta,directivity,show_plots,directivity_only,directivity_distance,directivity_plot_alone,pressure_limits):
if show_plots and not directivity_only:
# Defining the figure
fig = plt.figure()
fig.set_size_inches(8,8)
# Adding opacity to the colormap
cmap = plt.cm.RdBu_r
my_RdBu = cmap(np.arange(cmap.N))
my_RdBu[:,-1] = np.linspace(-1,1,cmap.N)
my_RdBu[:,-1] = np.abs(my_RdBu[:,-1])
my_RdBu = colors.ListedColormap(my_RdBu)
cmap = plt.cm.jet
my_jet = cmap(np.arange(cmap.N))
my_jet[:,-1] = np.linspace(0,1,cmap.N)
my_jet = colors.ListedColormap(my_jet)
# Plotting the real part
ax = fig.add_subplot(221,projection = '3d')
c = ax.scatter(X,Y,Z,np.real(pressure_field), c = np.real(pressure_field),cmap = my_RdBu,vmin = pressure_limits[0],vmax = pressure_limits[1],edgecolors = None)
ax.scatter(positions[:,0],positions[:,1],positions[:,2],color = "black",marker = "o",facecolors = "white",linewidth = 1.5,s = 10)
ax.set_aspect('auto')
ax.set_title("Real Part")
ax.set_xlabel("X (m)")
ax.set_ylabel("Y (m)")
fig.colorbar(c,ax = ax,fraction=0.046, pad=0.04)
# Plotting the imaginary part
ax = fig.add_subplot(223,projection = '3d')
c = ax.scatter(X,Y,Z,np.imag(pressure_field), c = np.imag(pressure_field),cmap = my_RdBu,vmin = pressure_limits[0],vmax = pressure_limits[1],edgecolors = None)
ax.scatter(positions[:,0],positions[:,1],positions[:,2],color = "black",marker = "o",facecolors = "white",linewidth = 1.5,s = 10)
ax.set_aspect('auto')
ax.set_title("Imaginary Part")
ax.set_xlabel("X (m)")
ax.set_ylabel("Y (m)")
fig.colorbar(c,ax = ax,fraction=0.046, pad=0.04)
# Plotting the magnitude
ax = fig.add_subplot(222,projection = '3d')
c = ax.scatter(X,Y,Z,np.abs(pressure_field), c = np.abs(pressure_field),cmap = my_jet,vmin = 0,vmax = pressure_limits[1],edgecolors = None)
ax.scatter(positions[:,0],positions[:,1],positions[:,2],color = "black",marker = "o",facecolors = "white",linewidth = 1.5,s = 10)
ax.set_aspect('auto')
ax.set_title("Magnitude")
ax.set_xlabel("X (m)")
ax.set_ylabel("Y (m)")
fig.colorbar(c,ax = ax,fraction=0.046, pad=0.04)
# Plotting the directivity
ax = fig.add_subplot(224,projection = 'polar')
c = ax.plot(theta,20*np.log10(directivity))
ax.set_rmin(-40)
ax.set_rticks([0,-10,-20,-30,-40])
ax.set_aspect('equal')
ax.set_title(str("Beam Pattern (dB) at {0} m".format(directivity_distance)))
fig.show()
if method == "Rayleigh":
ax.set_thetamin(-90)
ax.set_thetamax(90)
fig.tight_layout(pad = 0.5)
fig.show()
if directivity_plot_alone:
fig, ax = plt.subplots(1,2,subplot_kw={'projection': 'polar'})
ax[0].plot(theta,directivity)
ax[0].set_title("Normalized Directivity")
ax[1].plot(theta,20*np.log10(directivity))
ax[1].set_title("Beam Pattern (dB)")
ax[1].set_rmin(-40)
ax[1].set_rticks([0,-10,-20,-30,-40])
fig.tight_layout()
fig.set_size_inches(8,8)
fig.show()
"""
Creating a field
"""
def get_field(positions,frequencies,strengths,velocities,areas,phases,field_points,time,method):
# Convert everything to a numpy array
positions = np.asarray(positions)
strengths = | np.asarray(strengths) | numpy.asarray |
import itertools
import numpy as np
from PartSegCore.segmentation.border_smoothing import IterativeVoteSmoothing, OpeningSmoothing, VoteSmoothing
from PartSegCore.segmentation.watershed import NeighType
class TestVoteSmoothing:
def test_cube_sides(self):
data = np.zeros((50, 50, 50), dtype=np.uint8)
data[2:-2, 2:-2, 2:-2] = 1
res = VoteSmoothing.smooth(data, {"neighbourhood_type": NeighType.sides, "support_level": 1})
assert np.all(res == data)
res = VoteSmoothing.smooth(data, {"neighbourhood_type": NeighType.sides, "support_level": 3})
assert np.all(res == data)
res = VoteSmoothing.smooth(data, {"neighbourhood_type": NeighType.sides, "support_level": 4})
res2 = np.copy(data)
for pos in itertools.product([2, -3], repeat=3):
res2[pos] = 0
assert np.all(res2 == res)
res = VoteSmoothing.smooth(data, {"neighbourhood_type": NeighType.sides, "support_level": 5})
res2 = np.copy(data)
for pos in itertools.permutations([2, 2, -3, -3, slice(2, -2)], 3):
res2[pos] = 0
assert np.all(res2 == res)
res = VoteSmoothing.smooth(data, {"neighbourhood_type": NeighType.sides, "support_level": 6})
res2 = np.zeros(data.shape, dtype=data.dtype)
res2[3:-3, 3:-3, 3:-3] = 1
assert np.all(res2 == res)
def test_cube_edges(self):
data = np.zeros((50, 50, 50), dtype=np.uint8)
data[2:-2, 2:-2, 2:-2] = 1
res = VoteSmoothing.smooth(data, {"neighbourhood_type": NeighType.edges, "support_level": 1})
assert np.all(res == data)
res = VoteSmoothing.smooth(data, {"neighbourhood_type": NeighType.edges, "support_level": 6})
assert np.all(res == data)
res = VoteSmoothing.smooth(data, {"neighbourhood_type": NeighType.edges, "support_level": 7})
res2 = np.copy(data)
for pos in itertools.product([2, -3], repeat=3):
res2[pos] = 0
assert np.all(res2 == res)
res = VoteSmoothing.smooth(data, {"neighbourhood_type": NeighType.edges, "support_level": 9})
assert np.all(res2 == res)
res = VoteSmoothing.smooth(data, {"neighbourhood_type": NeighType.edges, "support_level": 10})
res2 = np.copy(data)
for pos in itertools.permutations([2, 2, -3, -3, slice(2, -2)], 3):
res2[pos] = 0
assert np.all(res2 == res)
res = VoteSmoothing.smooth(data, {"neighbourhood_type": NeighType.edges, "support_level": 13})
assert np.all(res2 == res)
res = VoteSmoothing.smooth(data, {"neighbourhood_type": NeighType.edges, "support_level": 14})
res2 = np.zeros(data.shape, dtype=data.dtype)
res2[3:-3, 3:-3, 3:-3] = 1
assert np.all(res2 == res)
def test_cube_vertex(self):
data = np.zeros((50, 50, 50), dtype=np.uint8)
data[2:-2, 2:-2, 2:-2] = 1
res = VoteSmoothing.smooth(data, {"neighbourhood_type": NeighType.vertex, "support_level": 1})
assert np.all(res == data)
res = VoteSmoothing.smooth(data, {"neighbourhood_type": NeighType.vertex, "support_level": 7})
assert np.all(res == data)
res = VoteSmoothing.smooth(data, {"neighbourhood_type": NeighType.vertex, "support_level": 8})
res2 = np.copy(data)
for pos in itertools.product([2, -3], repeat=3):
res2[pos] = 0
assert np.all(res2 == res)
res = VoteSmoothing.smooth(data, {"neighbourhood_type": NeighType.vertex, "support_level": 11})
assert np.all(res2 == res)
res = VoteSmoothing.smooth(data, {"neighbourhood_type": NeighType.vertex, "support_level": 12})
res2 = np.copy(data)
for pos in itertools.permutations([2, 2, -3, -3, slice(2, -2)], 3):
res2[pos] = 0
assert np.all(res2 == res)
res = VoteSmoothing.smooth(data, {"neighbourhood_type": NeighType.vertex, "support_level": 17})
assert np.all(res2 == res)
res = VoteSmoothing.smooth(data, {"neighbourhood_type": NeighType.vertex, "support_level": 18})
res2 = np.zeros(data.shape, dtype=data.dtype)
res2[3:-3, 3:-3, 3:-3] = 1
assert np.all(res2 == res)
def test_square_sides(self):
data = np.zeros((1, 50, 50), dtype=np.uint8)
data[:, 2:-2, 2:-2] = 1
res = VoteSmoothing.smooth(data, {"neighbourhood_type": NeighType.sides, "support_level": 1})
assert | np.all(res == data) | numpy.all |
#!/usr/bin/env python
from __future__ import print_function, division
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm, Normalize
import h5py
import progressbar
import os
from glob import glob
import tensorflow as tf
print(f'Tensorflow version {tf.__version__}')
import flow_ffjord_tf
fig_fmt = 'png'
dpi = 120
def cart2cyl(eta):
R = np.linalg.norm(eta[:,:2], axis=1)
z = eta[:,2]
phi = np.arctan2(eta[:,1], eta[:,0])
cos_phi = eta[:,0] / R
sin_phi = eta[:,1] / R
vR = eta[:,3] * cos_phi + eta[:,4] * sin_phi
vT = -eta[:,3] * sin_phi + eta[:,4] * cos_phi
vz = eta[:,5]
return {'R':R, 'z':z, 'phi':phi, 'vR':vR, 'vz':vz, 'vT':vT}
def load_training_data(fname):
with h5py.File(fname, 'r') as f:
eta = f['eta'][:]
return eta
def load_flows(fname_patterns):
flow_list = []
fnames = []
for fn in fname_patterns:
fnames += glob(fn)
fnames = sorted(fnames)
fnames = [fn[:-6] for fn in fnames]
print(f'Found {len(fnames)} flows.')
for i,fn in enumerate(fnames):
print(f'Loading flow {i+1} of {len(fnames)} ...')
print(fn)
flow = flow_ffjord_tf.FFJORDFlow.load(fname=fn)
flow_list.append(flow)
return flow_list
def sample_from_flows(flow_list, n_samples, batch_size=1024):
n_flows = len(flow_list)
# Sample from ensemble of flows
n_batches = n_samples // (n_flows * batch_size)
eta = np.empty((n_samples,6), dtype='f4')
eta[:] = np.nan # Make it obvious if there are unfilled values at the end
bar = progressbar.ProgressBar(max_value=n_batches*n_flows)
batch_idx = 0
for i,flow in enumerate(flow_list):
#print(f'Sampling from flow {i+1} of {n_flows} ...')
@tf.function
def sample_batch():
print(f'Tracing sample_batch for flow {i+1} of {n_flows} ...')
return flow.sample([batch_size])
for k in range(n_batches):
j0 = batch_idx * batch_size
eta[j0:j0+batch_size] = sample_batch().numpy()
batch_idx += 1
bar.update(batch_idx)
return eta
def plot_1d_marginals(cyl_train, cyl_sample, fig_dir, loss=None):
labels = ['$R$', '$z$', r'$\phi$', '$v_R$', '$v_z$', '$v_T$']
keys = ['R', 'z', 'phi', 'vR', 'vz', 'vT']
fig,ax_arr = plt.subplots(2,3, figsize=(12,8), dpi=120)
for i,(ax,l,k) in enumerate(zip(ax_arr.flat,labels,keys)):
xlim = | np.percentile(cyl_train[k], [1., 99.]) | numpy.percentile |
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 5 21:02:07 2021
@author: lukepinkel
"""
import patsy
import numpy as np
import scipy as sp
import scipy.stats
import pandas as pd
import scipy.interpolate
import matplotlib.pyplot as plt
from .smooth_setup import parse_smooths, get_parametric_formula, get_smooth
from ..pyglm.families import Gaussian
from ..utilities.splines import (crspline_basis, bspline_basis, ccspline_basis,
absorb_constraints)
class GaussianAdditiveModel:
def __init__(self, formula, data):
family = Gaussian()
smooth_info = parse_smooths(formula, data)
formula = get_parametric_formula(formula)
y, Xp = patsy.dmatrices(formula, data, return_type='dataframe',
eval_env=1)
varnames = Xp.columns.tolist()
smooths = {}
start = p = Xp.shape[1]
ns = 0
for key, val in smooth_info.items():
slist = get_smooth(**val)
if len(slist)==1:
smooths[key], = slist
p_i = smooths[key]['X'].shape[1]
varnames += [f"{key}{j}" for j in range(1, p_i+1)]
p += p_i
ns += 1
else:
for i, x in enumerate(slist):
by_key = f"{key}_{x['by_cat']}"
smooths[by_key] = x
p_i = x['X'].shape[1]
varnames += [f"{by_key}_{j}" for j in range(1, p_i+1)]
p += p_i
ns += 1
X, S, Sj, ranks, ldS = [Xp], np.zeros((ns, p, p)), [], [], []
for i, (var, s) in enumerate(smooths.items()):
p_i = s['X'].shape[1]
Si, ix = np.zeros((p, p)), np.arange(start, start+p_i)
start += p_i
Si[ix, ix.reshape(-1, 1)] = s['S']
smooths[var]['ix'], smooths[var]['Si'] = ix, Si
X.append(smooths[var]['X'])
S[i] = Si
Sj.append(s['S'])
ranks.append(np.linalg.matrix_rank(Si))
u = np.linalg.eigvals(s['S'])
ldS.append(np.log(u[u>np.finfo(float).eps]).sum())
self.X, self.Xp, self.y = np.concatenate(X, axis=1), Xp.values, y.values[:, 0]
self.S, self.Sj, self.ranks, self.ldS = S, Sj, ranks, ldS
self.f, self.smooths = family, smooths
self.ns, self.n_obs, self.nx = ns, self.X.shape[0], self.X.shape[1]
self.mp = self.nx - np.sum(self.ranks)
self.data = data
theta = np.zeros(self.ns+1)
for i, (var, s) in enumerate(smooths.items()):
ix = smooths[var]['ix']
a = self.S[i][ix, ix[:, None].T]
d = np.diag(self.X[:, ix].T.dot(self.X[:, ix]))
lam = (1.5 * (d / a)[a>0]).mean()
theta[i] = np.log(lam)
varnames += [f"log_smooth_{var}"]
theta[-1] = 1.0
varnames += ["log_scale"]
self.theta = theta
self.varnames = varnames
self.smooth_info = smooth_info
def get_wz(self, eta):
mu = self.f.inv_link(eta)
v = self.f.var_func(mu=mu)
dg = self.f.dinv_link(eta)
r = self.y - mu
a = 1.0 + r * (self.f.dvar_dmu(mu) / v + self.f.d2link(mu) * dg)
z = eta + r / (dg * a)
w = a * dg**2 / v
return z, w
def solve_pls(self, eta, S):
z, w = self.get_wz(eta)
Xw = self.X * w[:, None]
beta_new = np.linalg.solve(Xw.T.dot(self.X)+S, Xw.T.dot(z))
return beta_new
def pirls(self, alpha, n_iters=200, tol=1e-7):
beta = np.zeros(self.X.shape[1])
S = self.get_penalty_mat(alpha)
eta = self.X.dot(beta)
dev = self.f.deviance(self.y, mu=self.f.inv_link(eta)).sum()
success = False
for i in range(n_iters):
beta_new = self.solve_pls(eta, S)
eta_new = self.X.dot(beta_new)
dev_new = self.f.deviance(self.y, mu=self.f.inv_link(eta_new)).sum()
if dev_new > dev:
success=False
break
if abs(dev - dev_new) / dev_new < tol:
success = True
break
eta = eta_new
dev = dev_new
beta = beta_new
return beta, eta, dev, success, i
def get_penalty_mat(self, alpha):
Sa = np.einsum('i,ijk->jk', alpha, self.S)
return Sa
def logdetS(self, alpha, phi):
logdet = 0.0
for i, (r, lds) in enumerate(list(zip(self.ranks, self.ldS))):
logdet += r * np.log(alpha[i]/phi) + lds
return logdet
def grad_beta_rho(self, beta, alpha):
S = self.get_penalty_mat(alpha)
A = np.linalg.inv(self.hess_dev_beta(beta, S))
dbdr = | np.zeros((beta.shape[0], alpha.shape[0])) | numpy.zeros |
import glob
from functools import partial
from pathlib import Path
from typing import Dict, List, Optional, Tuple
import albumentations as albu
import librosa
import librosa.display
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pytorch_lightning as pl
import scipy
from hydra.utils import get_original_cwd
from omegaconf import DictConfig, ListConfig, OmegaConf
from sklearn.model_selection import StratifiedKFold
from torch.utils.data import DataLoader
from src.dataset.dataset import WaveformDataset
from src.dataset.utils import calc_triangle_center, get_groundtruth
from src.postprocess.postporcess import apply_gauss_smoothing, apply_kf_smoothing
from src.postprocess.visualize import add_distance_diff
IMG_MEAN = (0.485, 0.456, 0.406, 0.485, 0.456, 0.406, 0.485, 0.456, 0.406)
IMG_STD = (0.229, 0.224, 0.225, 0.229, 0.224, 0.225, 0.485, 0.456, 0.406)
class GsdcDatamodule(pl.LightningDataModule):
def __init__(
self,
conf: DictConfig,
val_fold: int = 0,
batch_size: int = 64,
num_workers: int = 16,
aug_mode: int = 0,
is_debug: bool = False,
) -> None:
super().__init__()
self.conf = conf
self.batch_size = batch_size
self.aug_mode = aug_mode
self.num_workers = num_workers
self.is_debug = is_debug
self.val_fold = val_fold
self.input_width = conf["input_width"]
self.num_inchannels = len(conf["stft_targets"]) * 3
self.img_mean = np.array(IMG_MEAN[: self.num_inchannels])
self.img_std = np.array(IMG_STD[: self.num_inchannels])
def prepare_data(self):
# check
assert Path(get_original_cwd(), self.conf["data_dir"]).is_dir()
def _onehot_to_set(self, onehot: np.ndarray):
return set(np.where(onehot == 1)[0].astype(str).tolist())
def _use_cached_kalman(self, df: pd.DataFrame, is_test=False) -> pd.DataFrame:
print("apply kalman filttering")
processed_kf_path = (
"../input/kf_test.csv" if is_test else "../input/kf_train.csv"
)
processed_kf_path = Path(get_original_cwd(), processed_kf_path)
try:
df = pd.read_csv(processed_kf_path)
except Exception:
df = apply_kf_smoothing(df=df) # nan each phone first or last row
df.to_csv(processed_kf_path, index=False)
return df
def setup(self, stage: Optional[str] = None):
# Assign Train/val split(s) for use in Dataloaders
conf = self.conf
if stage == "fit" or stage is None:
# read data
data_dir = Path(get_original_cwd(), self.conf["data_dir"])
self.train_df = pd.read_csv(data_dir / "baseline_locations_train.csv")
df_path = pd.read_csv(
Path(get_original_cwd(), "./src/meta_data/path_meta_info.csv")
)
# merge graoundtruth
self.train_df = self.train_df.merge(
get_groundtruth(data_dir),
on=["collectionName", "phoneName", "millisSinceGpsEpoch"],
)
if self.conf.apply_kalman_filtering:
self.train_df = self._use_cached_kalman(df=self.train_df, is_test=False)
# there is nan at being and end...
if self.conf.stft_targets[0].find("center") > -1:
self.train_df = calc_triangle_center(
df=self.train_df,
targets=["latDeg", "lngDeg", "latDeg_gt", "lngDeg_gt"],
)
else:
self.train_df = add_distance_diff(df=self.train_df, is_test=False)
# train/val split
df_path = make_split(df=df_path, n_splits=3)
self.train_df = merge_split_info(data_df=self.train_df, split_df=df_path)
self.train_df = choose_paths(df=self.train_df, target=self.conf.target_path)
train_df = self.train_df.loc[self.train_df["fold"] != self.val_fold, :]
val_df = self.train_df.loc[self.train_df["fold"] == self.val_fold, :]
if self.conf.data_aug_with_kf:
train_phone = train_df.phone.unique()
if self.conf.apply_kalman_filtering:
orig_df = pd.read_csv(data_dir / "baseline_locations_train.csv")
orig_df = orig_df.merge(
get_groundtruth(data_dir),
on=["collectionName", "phoneName", "millisSinceGpsEpoch"],
)
else:
orig_df = self._use_cached_kalman(df=train_df, is_test=False)
orig_df = orig_df.loc[orig_df.phone.isin(train_phone)]
if self.conf.stft_targets[0].find("center") > -1:
orig_df = calc_triangle_center(
df=orig_df,
targets=["latDeg", "lngDeg", "latDeg_gt", "lngDeg_gt"],
)
else:
orig_df = add_distance_diff(df=orig_df, is_test=False)
split_info_df = train_df.loc[
:, ["phone", "millisSinceGpsEpoch", "location", "fold", "length"]
]
orig_df = pd.merge(
left=orig_df,
right=split_info_df,
on=["phone", "millisSinceGpsEpoch"],
)
orig_df["phone"] = orig_df["phone"] + "_kf_aug"
train_df = pd.concat([train_df, orig_df], axis=0).reset_index(drop=True)
if self.conf.data_aug_with_gaussian:
train_phone = train_df.phone.unique()
orig_df = pd.read_csv(data_dir / "baseline_locations_train.csv")
orig_df = orig_df.merge(
get_groundtruth(data_dir),
on=["collectionName", "phoneName", "millisSinceGpsEpoch"],
)
orig_df = orig_df.loc[orig_df.phone.isin(train_phone)]
orig_df = apply_gauss_smoothing(
df=orig_df, params={"sz_1": 0.85, "sz_2": 5.65, "sz_crit": 1.5}
)
if self.conf.stft_targets[0].find("center") > -1:
orig_df = calc_triangle_center(
df=orig_df,
targets=["latDeg", "lngDeg", "latDeg_gt", "lngDeg_gt"],
)
else:
orig_df = add_distance_diff(df=orig_df, is_test=False)
split_info_df = train_df.loc[
:, ["phone", "millisSinceGpsEpoch", "location", "fold", "length"]
]
orig_df = pd.merge(
left=orig_df,
right=split_info_df,
on=["phone", "millisSinceGpsEpoch"],
)
orig_df["phone"] = orig_df["phone"] + "_gauss"
train_df = pd.concat([train_df, orig_df], axis=0).reset_index(drop=True)
train_df, train_list = make_sampling_list(
df=train_df,
input_width=conf["input_width"],
sampling_delta=conf["train_sampling_delta"],
stft_targets=conf["stft_targets"],
is_test=False,
remove_starts=True,
remove_ends=False
if self.conf.stft_targets[0].find("prev") > -1
else True,
)
train_sequences = get_phone_sequences(
df=train_df, targets=conf["stft_targets"], is_test=False
)
val_df, val_list = make_sampling_list(
df=val_df,
input_width=conf["input_width"],
sampling_delta=conf["val_sampling_delta"],
stft_targets=conf["stft_targets"],
is_test=False,
remove_starts=True,
remove_ends=False
if self.conf.stft_targets[0].find("prev") > -1
else True,
)
val_df.to_csv("./val.csv")
val_sequences = get_phone_sequences(
df=val_df, targets=conf["stft_targets"], is_test=False
)
self.train_dataset = WaveformDataset(
sampling_list=train_list,
phone_sequences=train_sequences,
stft_targets=conf["stft_targets"],
stft_params=conf["stft_params"],
input_width=conf["input_width"],
image_transforms=self.train_transform(),
is_test=False,
gt_as_mask=self.conf.gt_as_mask,
rand_freq=self.conf.rand_freq,
rand_ratio=self.conf.rand_ratio,
sigma=self.conf.sigma,
)
self.val_dataset = WaveformDataset(
sampling_list=val_list,
phone_sequences=val_sequences,
stft_targets=conf["stft_targets"],
stft_params=conf["stft_params"],
input_width=conf["input_width"],
image_transforms=self.val_transform(),
is_test=False,
gt_as_mask=self.conf.gt_as_mask,
)
self.plot_dataset(self.train_dataset)
self.train_df = train_df
self.val_df = val_df
# Assign Test split(s) for use in Dataloaders
if stage == "test" or stage is None:
# read data
data_dir = Path(get_original_cwd(), self.conf["data_dir"])
if self.conf.test_with_val:
self.train_df = pd.read_csv(data_dir / "baseline_locations_train.csv")
df_path = pd.read_csv(
Path(get_original_cwd(), "../input/path_meta_info.csv")
)
if self.conf.apply_kalman_filtering:
self.train_df = self._use_cached_kalman(
df=self.train_df, is_test=False
)
# train/val split
df_path = make_split(df=df_path, n_splits=3)
self.train_df = merge_split_info(
data_df=self.train_df, split_df=df_path
)
self.test_df = self.train_df.loc[
self.train_df["fold"] == self.val_fold, :
]
else:
self.test_df = pd.read_csv(data_dir / "baseline_locations_test.csv")
if self.conf.apply_kalman_filtering:
self.test_df = self._use_cached_kalman(
df=self.test_df, is_test=True
)
# there is nan at being and end...
if self.conf.stft_targets[0].find("center") > -1:
self.test_df = calc_triangle_center(
df=self.test_df, targets=["latDeg", "lngDeg"],
)
else:
self.test_df = add_distance_diff(df=self.test_df, is_test=True)
if self.conf.tta_with_kf:
test_phone = self.test_df.phone.unique()
if self.conf.apply_kalman_filtering:
orig_df = pd.read_csv(data_dir / "baseline_locations_test.csv")
orig_df = orig_df.merge(
get_groundtruth(data_dir),
on=["collectionName", "phoneName", "millisSinceGpsEpoch"],
)
else:
orig_df = self._use_cached_kalman(df=self.test_df, is_test=True)
orig_df = orig_df.loc[orig_df.phone.isin(test_phone)]
if self.conf.stft_targets[0].find("center") > -1:
orig_df = calc_triangle_center(
df=orig_df,
targets=["latDeg", "lngDeg", "latDeg_gt", "lngDeg_gt"],
)
else:
orig_df = add_distance_diff(df=orig_df, is_test=True)
split_info_df = self.test_df.loc[
:, ["phone", "millisSinceGpsEpoch", "location", "fold", "length"]
]
orig_df = pd.merge(
left=orig_df,
right=split_info_df,
on=["phone", "millisSinceGpsEpoch"],
)
orig_df["phone"] = orig_df["phone"] + "_kf_aug"
self.test_df = pd.concat([self.test_df, orig_df], axis=0).reset_index(
drop=True
)
self.test_df, test_list = make_sampling_list(
df=self.test_df,
input_width=conf["input_width"],
sampling_delta=conf["test_sampling_delta"],
stft_targets=conf["stft_targets"],
is_test=True,
remove_starts=True,
remove_ends=False
if self.conf.stft_targets[0].find("prev") > -1
else True,
)
self.test_df.to_csv("./test_input.csv", index=False)
test_sequences = get_phone_sequences(
df=self.test_df, targets=conf["stft_targets"], is_test=True
)
self.test_dataset = WaveformDataset(
sampling_list=test_list,
phone_sequences=test_sequences,
stft_targets=conf["stft_targets"],
stft_params=conf["stft_params"],
input_width=conf["input_width"],
image_transforms=self.test_transform(),
is_test=True,
gt_as_mask=self.conf.gt_as_mask,
)
self.plot_dataset(self.test_dataset)
def train_dataloader(self):
return DataLoader(
self.train_dataset,
shuffle=True,
batch_size=self.batch_size,
num_workers=self.num_workers,
)
def val_dataloader(self):
return DataLoader(
self.val_dataset,
shuffle=False,
batch_size=self.batch_size,
num_workers=self.num_workers,
)
def test_dataloader(self):
return DataLoader(
self.test_dataset,
shuffle=False,
batch_size=self.batch_size,
num_workers=self.num_workers,
)
def train_transform(self):
return self.get_transforms(mode=self.aug_mode)
def val_transform(self):
return self.get_transforms(mode=0)
def test_transform(self):
return self.get_transforms(mode=0)
def get_transforms(self, mode: int = 0) -> albu.Compose:
self.input_size = WaveformDataset.calc_stft_resize(
input_width=self.conf.input_width, n_fft=self.conf.stft_params.n_fft
)
def pad_image(
image: np.ndarray,
input_size: List[int],
constant_values: float = 255.0,
**kwargs,
):
pad_size = (input_size[0] - image.shape[0], input_size[1] - image.shape[1])
if np.any(np.array(pad_size) > 0):
image = np.pad(
image, [[0, pad_size[0]], [0, pad_size[1]], [0, 0]], mode="reflect",
)
# image[:, :, orig_width:] = constant_values
return image
add_pad_img = partial(
pad_image, input_size=self.input_size, constant_values=255.0
)
add_pad_mask = partial(
pad_image, input_size=self.input_size, constant_values=1.0
)
if mode == 0:
transforms = [
albu.Lambda(image=add_pad_img, mask=add_pad_mask, name="padding"),
albu.Normalize(mean=self.img_mean, std=self.img_std),
]
elif mode == 1:
transforms = [
albu.HorizontalFlip(p=0.5),
albu.Lambda(image=add_pad_img, mask=add_pad_mask, name="padding"),
albu.Normalize(mean=self.img_mean, std=self.img_std),
]
else:
raise NotImplementedError
if self.conf.gt_as_mask:
additional_targets = {"target_image": "mask"}
else:
additional_targets = {"target_image": "image"}
composed = albu.Compose(transforms, additional_targets=additional_targets)
return composed
def plot_dataset(
self, dataset, plot_num: int = 3, df: Optional[pd.DataFrame] = None,
) -> None:
inds = np.random.choice(len(dataset), plot_num)
h_, w_ = get_input_size_wo_pad(
n_fft=self.conf.stft_params.n_fft, input_width=self.conf.input_width
)
for i in inds:
plt.figure(figsize=(16, 8))
data = dataset[i]
im = data["image"].numpy().transpose(1, 2, 0)
im = im[:h_, :w_]
# === PLOT ===
nrows = 3
ncols = 3
fig, ax = plt.subplots(
nrows=nrows, ncols=ncols, figsize=(12, 6), sharey=True, sharex=True,
)
fig.suptitle(
"_".join(
[
data["phone"],
str(data["millisSinceGpsEpoch"]),
str(data["phone_time"]),
]
)
)
cnum = len(self.conf["stft_targets"])
D_abs, D_cos, D_sin = WaveformDataset.handle_stft_normalize(
img=im,
cnum=cnum,
is_encode=False,
is_db=self.conf["stft_params"]["is_db"],
img_mean=self.img_mean,
img_std=self.img_std,
)
for stft_ind, stft_name in enumerate(self.conf["stft_targets"]):
show_stft(
conf=self.conf,
D_abs=D_abs[..., stft_ind],
D_cos=D_cos[..., stft_ind],
D_sin=D_sin[..., stft_ind],
ax=ax,
stft_ind=stft_ind,
stft_name=stft_name,
)
if data["target_image"].shape[0] != 0:
im = data["target_image"].numpy().transpose(1, 2, 0)
im = im[:h_, :w_]
# === PLOT ===
nrows = 3
ncols = 3
fig, ax = plt.subplots(
nrows=nrows, ncols=ncols, figsize=(12, 6), sharey=True, sharex=True,
)
fig.suptitle(
"_".join(
[
data["phone"],
str(data["millisSinceGpsEpoch"]),
str(data["phone_time"]),
]
)
)
cnum = len(self.conf["stft_targets"])
D_abs, D_cos, D_sin = WaveformDataset.handle_stft_normalize(
img=im,
cnum=cnum,
is_encode=False,
is_db=self.conf["stft_params"]["is_db"],
img_mean=self.img_mean,
img_std=self.img_std,
gt_as_mask=self.conf.gt_as_mask,
)
for stft_ind, stft_name in enumerate(self.conf["stft_targets"]):
show_stft(
conf=self.conf,
D_abs=D_abs[..., stft_ind],
D_cos=D_cos[..., stft_ind],
D_sin=D_sin[..., stft_ind],
ax=ax,
stft_ind=stft_ind,
stft_name=stft_name.replace("_diff", "_gt_diff"),
)
def get_input_size_wo_pad(n_fft: int = 256, input_width: int = 128) -> Tuple[int, int]:
input_height = n_fft // 2 + 1
input_width = input_width + 1
return input_height, input_width
def show_stft(
conf: DictConfig,
D_abs: np.ndarray,
D_cos: np.ndarray,
D_sin: np.ndarray,
ax: plt.axes,
stft_ind: int,
stft_name: str = None,
) -> None:
for nrow, mat in enumerate([D_abs, D_cos, D_sin]):
img = librosa.display.specshow(
mat,
sr=1,
hop_length=conf["stft_params"]["hop_length"],
x_axis="time",
y_axis="hz",
cmap="cool",
ax=ax[nrow][stft_ind],
)
plt.colorbar(img, ax=ax[nrow][stft_ind])
ax[0][stft_ind].set_title(stft_name)
def choose_paths(df: pd.DataFrame, target: str = "short") -> pd.DataFrame:
if target is not None:
return df.loc[df["length"].apply(lambda x: x.split("-")[0]) == target, :]
else:
return df
def make_split(df: pd.DataFrame, n_splits: int = 3) -> pd.DataFrame:
df["fold"] = -1
df["groups"] = df["location"].apply(lambda x: x.split("-")[0])
df["groups"] = df["groups"] + "_" + df["length"]
# gkf = GroupKFold(n_splits=n_splits)
gkf = StratifiedKFold(n_splits=n_splits)
for i, (train_idx, valid_idx) in enumerate(gkf.split(df, df["groups"])):
df.loc[valid_idx, "fold"] = i
return df
def merge_split_info(data_df: pd.DataFrame, split_df: pd.DataFrame) -> pd.DataFrame:
split_col = ["collectionName", "location", "length", "fold"]
df = pd.merge(data_df, split_df.loc[:, split_col], on="collectionName")
return df
def interpolate_vel(
velocity: np.ndarray,
base_time: np.ndarray,
ref_time: np.ndarray,
drop_first_vel: bool = True,
) -> np.ndarray:
if velocity.ndim == 1:
raise NotImplementedError
if ref_time.max() > base_time.max():
assert ref_time.max() - base_time.max() <= 1000
base_time = np.pad(
base_time, [0, 1], mode="constant", constant_values=base_time.max() + 1000
)
velocity = np.pad(velocity, [[0, 1], [0, 0]], mode="edge")
if drop_first_vel:
assert np.all(velocity[0] == np.nan) or np.all(velocity[0] == 0.0)
velocity = velocity[
1:,
]
# (sequence, feats)
rel_posi = | np.cumsum(velocity, axis=0) | numpy.cumsum |
import os
import numpy as np
INPUT = os.path.join(os.path.dirname(__file__), "input.txt")
with open(INPUT) as f:
lines = f.readlines()
lines = [l.rstrip().split(",") for l in lines]
lines_arr = np.array(lines)
lines_arr = np.squeeze(lines_arr)
# Part 1 Naive Numpy Bruteforce
lanternfish = np.copy(lines_arr).astype(int)
for day in range(80):
lanternfish -= 1
n_newborns = lanternfish[lanternfish < 0].size
lanternfish[lanternfish < 0] = 6
lanternfish = np.append(lanternfish, np.ones(n_newborns) * 8)
print(lanternfish.size)
# Part 2
fast_fish, bins = np.histogram(lines_arr, bins=np.arange(0, 10))
for i in range(256):
n_newborns = fast_fish[0]
fast_fish = np.append(fast_fish[1:], n_newborns)
fast_fish[6] += n_newborns
print( | np.sum(fast_fish) | numpy.sum |
import sys, os
from bmtk.simulator import bionet
import numpy as np
import h5py
#import synapses
import pandas as pd
from neuron import h
from matplotlib import cm
| np.random.seed(2129) | numpy.random.seed |
import nibabel as nib
import numpy as np
import torch
from functools import partial
from collections import defaultdict
from pairwise_measures import PairwiseMeasures
from src.utils import apply_transform, non_geometric_augmentations, generate_affine, to_var_gpu, batch_adaptation, soft_dice
def evaluate(args, preds, targets, prefix,
metrics=['dice', 'jaccard', 'sensitivity', 'specificity', 'soft_dice',
'loads', 'haus_dist', 'vol_diff', 'ppv', 'connected_elements']):
output_dict = defaultdict(list)
nifty_metrics = ['dice', 'jaccard', 'sensitivity', 'specificity',
'haus_dist', 'vol_diff', 'ppv', 'connected_elements']
for pred, target in zip(preds, targets):
seg = np.where(pred > 0.5, np.ones_like(pred, dtype=np.int64), np.zeros_like(pred, dtype=np.int64))
ref = np.where(target > 0.5, np.ones_like(target, dtype=np.int64), np.zeros_like(target, dtype=np.int64))
pairwise = PairwiseMeasures(seg, ref)
for metric in nifty_metrics:
if metric in metrics:
if metric == 'connected_elements':
TPc, FPc, FNc = pairwise.m_dict[metric][0]()
output_dict[prefix + 'TPc'].append(TPc)
output_dict[prefix + 'FPc'].append(FPc)
output_dict[prefix + 'FNc'].append(FNc)
else:
output_dict[prefix + metric].append(pairwise.m_dict[metric][0]())
if 'soft_dice' in metrics:
output_dict[prefix + 'soft_dice'].append(soft_dice(pred, ref, args.labels))
if 'loads' in metrics:
output_dict[prefix + 'loads'].append(np.sum(pred))
if 'per_pixel_diff' in metrics:
output_dict[prefix + 'per_pixel_diff'].append(np.mean(np.abs(ref - pred)))
return output_dict
def inference_tumour(args, p, model, whole_volume_dataset, iteration=0, prefix='', infer_on=None):
"""
This function should run inference on a set of volumes, save the results, calculate the dice
"""
def save_img(format_spec, identifier, array):
img = nib.Nifti1Image(array, np.eye(4))
fn = format_spec.format(identifier)
nib.save(img, fn)
return fn
with torch.set_grad_enabled(False):
model.eval()
preds_0, preds_ema = [], []
preds, targets = [], []
predsAug, predsT = [], []
range_of_volumes = range(len(whole_volume_dataset)) if infer_on is None else infer_on
print('Evaluating on {} subjects'.format(len(range_of_volumes)))
for index in range(len(range_of_volumes)):
print('Evaluating on subject {}'.format(str(index)))
inputs, labels = whole_volume_dataset[index]
#TODO: inputs is of size (4, 170, 240, 160), need to change inference values accordingly.
subj_id = whole_volume_dataset.get_subject_id_from_index(index)
targetL = np.zeros(shape=(args.paddtarget, args.paddtarget, inputs.shape[-1]))
outputS = np.zeros(shape=(args.paddtarget, args.paddtarget, inputs.shape[-1]))
inputsS = np.zeros(shape=(inputs.shape[0], args.paddtarget, args.paddtarget, inputs.shape[-1]))
outputsT = np.zeros(shape=(args.paddtarget, args.paddtarget, inputs.shape[-1]))
outputsAug = np.zeros(shape=(args.paddtarget, args.paddtarget, inputs.shape[-1]))
for slice_index in np.arange(0, inputs.shape[-1], step=args.batch_size):
index_start = slice_index
index_end = min(slice_index+args.batch_size, inputs.shape[-1])
batch_input = np.einsum('ijkl->lijk', inputs[:, :, :, index_start:index_end])
batch_labels = np.einsum('ijk->kij', labels[:, :, index_start:index_end])
batch_input = torch.tensor(batch_input)
batch_labels = torch.tensor(np.expand_dims(batch_labels, axis=1))
batch_input, batch_labels = batch_adaptation(batch_input, batch_labels, args.paddtarget)
batch_input, batch_labels = to_var_gpu(batch_input), to_var_gpu(batch_labels)
outputs, _, _, _, _, _, _, _, _, _ = model(batch_input)
outputs = torch.sigmoid(outputs)
if args.method == 'A2':
Theta, Theta_inv = generate_affine(batch_input, degreeFreedom=args.affine_rot_degree,
scale=args.affine_scale,
shearingScale=args.affine_shearing)
inputstaug = apply_transform(batch_input, Theta)
outputstaug, _, _, _, _, _, _, _, _, _ = model(inputstaug)
outputstaug = torch.sigmoid(outputstaug)
outputs_t = apply_transform(outputs, Theta)
elif args.method == 'A4':
batch_trs = batch_input.cpu().numpy()
batch_trs = p.map(partial(non_geometric_augmentations, method='bias', norm_training_images=None),
np.copy(batch_trs))
batch_trs = p.map(partial(non_geometric_augmentations, method='kspace', norm_training_images=None),
np.copy(batch_trs))
inputstaug = torch.Tensor(batch_trs).cuda()
outputstaug, _, _, _, _, _, _, _, _, _ = model(inputstaug)
outputstaug = torch.sigmoid(outputstaug)
elif args.method in ['A3', 'adversarial', 'mean_teacher']:
batch_trs = batch_input.cpu().numpy()
batch_trs = p.map(partial(non_geometric_augmentations, method='bias', norm_training_images=None),
np.copy(batch_trs))
batch_trs = p.map(partial(non_geometric_augmentations, method='kspace', norm_training_images=None),
np.copy(batch_trs))
inputstaug = torch.Tensor(batch_trs).cuda()
Theta, Theta_inv = generate_affine(inputstaug, degreeFreedom=args.affine_rot_degree,
scale=args.affine_scale,
shearingScale=args.affine_shearing)
inputstaug = apply_transform(inputstaug, Theta)
outputstaug, _, _, _, _, _, _, _, _, _ = model(inputstaug)
outputstaug = torch.sigmoid(outputstaug)
outputs_t = apply_transform(outputs, Theta)
outputS[:, :, index_start:index_end] = np.einsum('ijk->jki',
np.squeeze(outputs.detach().cpu().numpy()))
targetL[:, :, index_start:index_end] = np.einsum('ijk->jki',
np.squeeze(batch_labels.detach().cpu().numpy()))
inputsS[:, :, :, index_start:index_end] = np.einsum('ijkl->jkli', np.squeeze(batch_input.detach().cpu().numpy()))
if args.method in ['A2', 'A3', 'A4', 'adversarial', 'mean_teacher']:
outputsAug[:, :, index_start:index_end] = np.einsum('ijk->jki',
np.squeeze(outputstaug.detach().cpu().numpy()))
if args.method in ['A3', 'A2', 'adversarial', 'mean_teacher']:
outputsT[:, :, index_start:index_end] = np.einsum('ijk->jki',
np.squeeze(outputs_t.detach().cpu().numpy()))
format_spec = '{}_{}_{}_{}_{}_{}_'.format(prefix, args.method, args.source, args.target, args.tag,
iteration) + \
'_{}_' + f'{str(subj_id)}.nii.gz'
ema_format_spec = '{}_{}_{}_{}_{}_{}_'.format(prefix, args.method, args.source,
args.target, args.tag, 'EMA') + \
'_{}_' + f'{str(subj_id)}.nii.gz'
if iteration == 0:
fn = save_img(format_spec=ema_format_spec, identifier='Prediction', array=outputS)
else:
pred_zero = f'{prefix}_{args.method}_{args.source}_{args.target}' \
f'_{args.tag}_0__Prediction_{str(subj_id)}.nii.gz'
outputs_0 = nib.load(pred_zero).get_data()
preds_0.append(outputs_0)
alpha = 0.9
pred_ema_filename = f'{prefix}_{args.method}_{args.source}_{args.target}' \
f'_{args.tag}_EMA__Prediction_{str(subj_id)}.nii.gz'
pred_ema_t_minus_one = nib.load(pred_ema_filename).get_data()
pred_ema = alpha * outputS + (1 - alpha) * pred_ema_t_minus_one
preds_ema.append(pred_ema)
save_img(format_spec=ema_format_spec, identifier='Prediction', array=pred_ema)
save_img(format_spec=format_spec, identifier='Prediction', array=outputS)
save_img(format_spec=format_spec, identifier='target', array=targetL)
for idx, modality in enumerate(['flair', 't1c', 't1', 't2']):
save_img(format_spec=format_spec, identifier='{}_mri'.format(modality), array=inputsS[idx, ...])
preds.append(outputS)
targets.append(targetL)
if args.method in ['A2', 'A3', 'A4', 'adversarial', 'mean_teacher']:
predsAug.append(outputsAug)
save_img(format_spec=format_spec, identifier='Aug', array=outputsAug)
if args.method in ['A2', 'A3', 'adversarial', 'mean_teacher']:
predsT.append(outputsT)
save_img(format_spec=format_spec, identifier='Transformed', array=outputsT)
performance_supervised = evaluate(args=args, preds=preds, targets=targets, prefix='supervised_')
performance_i = None
if args.method in ['A2', 'A3', 'A4', 'adversarial', 'mean_teacher']:
if args.method in ['A2', 'A3', 'adversarial', 'mean_teacher']:
performance_i = evaluate(args=args, preds=predsAug, targets=predsT, prefix='consistency_')
else:
performance_i = evaluate(args=args, preds=predsAug, targets=preds, prefix='consistency_')
if iteration == 0:
return performance_supervised, performance_i, None, None
else:
performance_compared_to_0 = evaluate(args=args, preds=preds, targets=preds_0, prefix='diff_to_0_',
metrics=['per_pixel_diff'])
performance_compared_to_ema = evaluate(args=args, preds=preds, targets=preds_ema, prefix='diff_to_ema_',
metrics=['per_pixel_diff'])
return performance_supervised, performance_i, performance_compared_to_0, performance_compared_to_ema
def inference_ms(args, p, model, whole_volume_dataset, iteration=0, prefix='', infer_on=None, eval_diff=True):
"""
This function should run inference on a set of volumes, save the results, calculate the dice
"""
def save_img(format_spec, identifier, array):
img = nib.Nifti1Image(array, np.eye(4))
fn = format_spec.format(identifier)
nib.save(img, fn)
return fn
with torch.set_grad_enabled(False):
model.eval()
preds_0, preds_ema = [], []
preds, targets = [], []
predsAug, predsT = [], []
print('Evaluating on {} subjects'.format(len(whole_volume_dataset)))
range_of_volumes = range(len(whole_volume_dataset)) if infer_on is None else infer_on
for index in range_of_volumes:
print('Evaluating on subject {}'.format(str(index)))
inputs, labels = whole_volume_dataset[index]
subj_id = whole_volume_dataset.get_subject_id_from_index(index)
targetL = np.zeros(shape=(args.paddtarget, args.paddtarget, inputs.shape[2]))
outputS = np.zeros(shape=(args.paddtarget, args.paddtarget, inputs.shape[2]))
inputsS = np.zeros(shape=(args.paddtarget, args.paddtarget, inputs.shape[2]))
outputsT = np.zeros(shape=(args.paddtarget, args.paddtarget, inputs.shape[2]))
outputsAug = np.zeros(shape=(args.paddtarget, args.paddtarget, inputs.shape[2]))
for slice_index in np.arange(0, inputs.shape[2], step=args.batch_size):
index_start = slice_index
index_end = min(slice_index+args.batch_size, inputs.shape[2])
batch_input = np.einsum('ijk->kij', inputs[:, :, index_start:index_end])
batch_labels = np.einsum('ijk->kij', labels[:, :, index_start:index_end])
batch_input = torch.tensor(np.expand_dims(batch_input, axis=1).astype(np.float32))
batch_labels = torch.tensor(np.expand_dims(batch_labels, axis=1))
batch_input, batch_labels = batch_adaptation(batch_input, batch_labels, args.paddtarget)
batch_input, batch_labels = to_var_gpu(batch_input), to_var_gpu(batch_labels)
outputs, _, _, _, _, _, _, _, _, _ = model(batch_input)
outputs = torch.sigmoid(outputs)
if args.method == 'A2':
Theta, Theta_inv = generate_affine(batch_input, degreeFreedom=args.affine_rot_degree,
scale=args.affine_scale,
shearingScale=args.affine_shearing)
inputstaug = apply_transform(batch_input, Theta)
outputstaug, _, _, _, _, _, _, _, _, _ = model(inputstaug)
outputstaug = torch.sigmoid(outputstaug)
outputs_t = apply_transform(outputs, Theta)
elif args.method == 'A4':
batch_trs = batch_input.cpu().numpy()
batch_trs = p.map(partial(non_geometric_augmentations, method='bias', norm_training_images=None),
np.copy(batch_trs))
batch_trs = p.map(partial(non_geometric_augmentations, method='kspace', norm_training_images=None),
np.copy(batch_trs))
inputstaug = torch.Tensor(batch_trs).cuda()
outputstaug, _, _, _, _, _, _, _, _, _ = model(inputstaug)
outputstaug = torch.sigmoid(outputstaug)
elif args.method in ['A3', 'adversarial', 'mean_teacher']:
batch_trs = batch_input.cpu().numpy()
batch_trs = p.map(partial(non_geometric_augmentations, method='bias', norm_training_images=None),
np.copy(batch_trs))
batch_trs = p.map(partial(non_geometric_augmentations, method='kspace', norm_training_images=None),
np.copy(batch_trs))
inputstaug = torch.Tensor(batch_trs).cuda()
Theta, Theta_inv = generate_affine(inputstaug, degreeFreedom=args.affine_rot_degree,
scale=args.affine_scale,
shearingScale=args.affine_shearing)
inputstaug = apply_transform(inputstaug, Theta)
outputstaug, _, _, _, _, _, _, _, _, _ = model(inputstaug)
outputstaug = torch.sigmoid(outputstaug)
outputs_t = apply_transform(outputs, Theta)
outputS[:, :, index_start:index_end] = np.einsum('ijk->jki', outputs.detach().cpu().numpy()[:, 0, ...])
targetL[:, :, index_start:index_end] = np.einsum('ijk->jki', batch_labels.detach().cpu().numpy()[:, 0, ...])
inputsS[:, :, index_start:index_end] = np.einsum('ijk->jki', batch_input.detach().cpu().numpy()[:, 0, ...])
if args.method in ['A2', 'A3', 'A4', 'adversarial', 'mean_teacher']:
outputsAug[:, :, index_start:index_end] = np.einsum('ijk->jki',
outputstaug.detach().cpu().numpy()[:, 0, ...])
if args.method in ['A3', 'A2', 'adversarial', 'mean_teacher']:
outputsT[:, :, index_start:index_end] = np.einsum('ijk->jki',
outputs_t.detach().cpu().numpy()[:, 0, ...])
format_spec = '{}_{}_{}_{}_{}_{}_'.format(prefix, args.method, args.source, args.target, args.tag, iteration) +\
'_{}_' + f'{str(subj_id)}.nii.gz'
ema_format_spec = '{}_{}_{}_{}_{}_{}_'.format(prefix, args.method, args.source,
args.target, args.tag, 'EMA') + \
'_{}_' + f'{str(subj_id)}.nii.gz'
if iteration == 0:
save_img(format_spec=ema_format_spec, identifier='Prediction', array=outputS)
elif eval_diff and iteration > 0:
pred_zero = f'{prefix}_{args.method}_{args.source}_{args.target}' \
f'_{args.tag}_{0}__Prediction_{str(subj_id)}.nii.gz'
outputs_0 = nib.load(pred_zero).get_data()
preds_0.append(outputs_0)
alpha = 0.9
pred_ema_filename = f'{prefix}_{args.method}_{args.source}_{args.target}' \
f'_{args.tag}_EMA__Prediction_{str(subj_id)}.nii.gz'
print(pred_ema_filename)
pred_ema_t_minus_one = nib.load(pred_ema_filename).get_data()
pred_ema = alpha * outputS + (1 - alpha) * pred_ema_t_minus_one
preds_ema.append(pred_ema)
save_img(format_spec=ema_format_spec, identifier='Prediction', array=pred_ema)
else:
print('Not computing diff')
save_img(format_spec=format_spec, identifier='Prediction', array=outputS)
save_img(format_spec=format_spec, identifier='target', array=targetL)
save_img(format_spec=format_spec, identifier='mri', array=inputsS)
preds.append(outputS)
targets.append(targetL)
if args.method in ['A2', 'A3', 'A4', 'adversarial', 'mean_teacher']:
predsAug.append(outputsAug)
save_img(format_spec=format_spec, identifier='Aug', array=outputsAug)
if args.method in ['A2', 'A3', 'adversarial', 'mean_teacher']:
predsT.append(outputsT)
save_img(format_spec=format_spec, identifier='Transformed', array=outputsT)
performance_supervised = evaluate(args=args, preds=preds, targets=targets, prefix='supervised_')
performance_i = None
if args.method in ['A2', 'A3', 'A4', 'adversarial', 'mean_teacher']:
if args.method in ['A2', 'A3', 'adversarial', 'mean_teacher']:
performance_i = evaluate(args=args, preds=predsAug, targets=predsT, prefix='consistency_')
else:
performance_i = evaluate(args=args, preds=predsAug, targets=preds, prefix='consistency_')
if iteration == 0:
return performance_supervised, performance_i, None, None
else:
performance_compared_to_0 = evaluate(args=args, preds=preds, targets=preds_0, prefix='diff_to_0_',
metrics=['per_pixel_diff'])
performance_compared_to_ema = evaluate(args=args, preds=preds, targets=preds_ema, prefix='diff_to_ema_',
metrics=['per_pixel_diff'])
return performance_supervised, performance_i, performance_compared_to_0, performance_compared_to_ema
def inference_crossmoda(args, p, model, whole_volume_dataset, iteration=0, prefix='', infer_on=None, eval_diff=True):
"""
This function should run inference on a set of volumes, save the results, calculate the dice
"""
def save_img(format_spec, identifier, array):
img = nib.Nifti1Image(array, np.eye(4))
fn = format_spec.format(identifier)
nib.save(img, fn)
return fn
with torch.set_grad_enabled(False):
model.eval()
preds_0, preds_ema = [], []
preds, targets = [], []
predsAug, predsT = [], []
print('Evaluating on {} subjects'.format(len(whole_volume_dataset)))
range_of_volumes = range(len(whole_volume_dataset)) if infer_on is None else infer_on
for index in range_of_volumes:
print('Evaluating on subject {}'.format(str(index)))
inputs, labels = whole_volume_dataset[index]
subj_id = whole_volume_dataset.get_subject_id_from_index(index)
targetL = np.zeros(shape=(args.paddtarget, args.paddtarget, inputs.shape[2]))
outputS = np.zeros(shape=(args.paddtarget, args.paddtarget, inputs.shape[2]))
inputsS = np.zeros(shape=(args.paddtarget, args.paddtarget, inputs.shape[2]))
outputsT = | np.zeros(shape=(args.paddtarget, args.paddtarget, inputs.shape[2])) | numpy.zeros |
#
# Frequency Domain Filters
#
import cv2
import numpy as np
def square_pad(source: np.ndarray, size_x: int, size_y: int, pad_value: int) -> np.ndarray:
"""
Pad Image/Array to Desired Output shape
:param source: Input Array/Image
:param size_x: Desired width size
:param size_y: Desired height
:param pad_value: value to be added as a padding
:return: Padded Square Array
"""
src = np.copy(source)
x, y = src.shape
out_x = (size_x - x) // 2
out_xx = size_x - out_x - x
out_y = (size_y - y) // 2
out_yy = size_y - out_y - y
return np.pad(src, ((out_x, out_xx), (out_y, out_yy)), constant_values=pad_value)
def frequency_filter(source: np.ndarray, kernel: np.ndarray) -> np.ndarray:
"""
Application of a Filter in frequency domain
:param source: Source Image
:param kernel: Kernel applied on image
:return: Filtered Image
"""
src = np.copy(source)
# Covert Image to gray scale
src = cv2.cvtColor(src, cv2.COLOR_BGR2GRAY)
# Convert Image to Frequency Domain
# and decentralize the output
ft_src = np.fft.fft2(src)
ft_src_shifted = np.fft.fftshift(ft_src)
# apply Kernel
out = np.fft.ifftshift(ft_src_shifted * kernel)
out = np.fft.ifft2(out)
return | np.abs(out) | numpy.abs |
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 15 18:53:22 2021
@author: <NAME>
"""
import argparse
import numpy as np
from zdm import zdm
#import pcosmic
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import matplotlib.cm as cm
from scipy import interpolate
import matplotlib
from pkg_resources import resource_filename
import os
import sys
import scipy as sp
import time
from matplotlib.ticker import NullFormatter
from zdm import iteration as it
from zdm import survey
from zdm import cosmology as cos
from zdm import pcosmic
from zdm import beams
from zdm import misc_functions
import pickle
np.seterr(divide='ignore')
####setting up the initial grid and plotting some stuff####
setH0=67.74
cos.set_cosmology(H0=setH0)
# get the grid of p(DM|z)
zDMgrid, zvals,dmvals,H0=misc_functions.get_zdm_grid(H0=setH0,new=True,plot=False,method='analytic')
Wbins=10
Wscale=2
Nbeams=[20,20,20] #Full beam NOT Std
thresh=0
method=2
Wlogmean=1.70267
Wlogsigma=0.899148
sdir = os.path.join(resource_filename('zdm', 'data'), 'Surveys/')
lat50=survey.survey()
lat50.process_survey_file(sdir+'CRAFT_class_I_and_II.dat')
DMhalo=50
lat50.init_DMEG(DMhalo)
lat50.init_beam(nbins=Nbeams[0],method=2,plot=False,thresh=thresh) # tells the survey to use the beam file
pwidths,pprobs=survey.make_widths(lat50,Wlogmean,Wlogsigma,Wbins,scale=Wscale)
efficiencies=lat50.get_efficiency_from_wlist(dmvals,pwidths,pprobs)
weights=lat50.wplist
ics=survey.survey()
ics.process_survey_file(sdir+'CRAFT_ICS.dat')
DMhalo=50
ics.init_DMEG(DMhalo)
ics.init_beam(nbins=Nbeams[0],method=2,plot=False,thresh=thresh) # tells the survey to use the beam file
pwidths,pprobs=survey.make_widths(ics,Wlogmean,Wlogsigma,Wbins,scale=Wscale)
efficiencies=ics.get_efficiency_from_wlist(dmvals,pwidths,pprobs)
weights=ics.wplist
pks=survey.survey()
pks.process_survey_file(sdir+'parkes_mb_class_I_and_II.dat')
DMhalo=50
pks.init_DMEG(DMhalo)
pks.init_beam(nbins=Nbeams[0],method=2,plot=False,thresh=thresh) # tells the survey to use the beam file
pwidths,pprobs=survey.make_widths(pks,Wlogmean,Wlogsigma,Wbins,scale=Wscale)
efficiencies=pks.get_efficiency_from_wlist(dmvals,pwidths,pprobs)
weights=pks.wplist
ICS892=survey.survey()
ICS892.process_survey_file(sdir+'CRAFT_ICS_892.dat')
ICS892.init_DMEG(DMhalo)
ICS892.init_beam(nbins=Nbeams[0],method=2,plot=False,thresh=thresh) # tells the survey to use the beam file
pwidths,pprobs=survey.make_widths(ICS892,Wlogmean,Wlogsigma,Wbins,scale=Wscale)
efficiencies892=ICS892.get_efficiency_from_wlist(dmvals,pwidths,pprobs)
surveys=[lat50,ics,ICS892,pks]
#updated best-fit values
alpha_method=0
logmean=2.11
logsigma=0.53
alpha=1.55
gamma=-1.09
Emax=10**(41.7)
Emin=10**(30)
sfr_n=1.67
C=3.188
#alpha_method=1
#Emin=10**30
#Emax =10**41.40
#alpha =-0.66
#gamma = -1.01
#sfr_n= 0.73
#logmean=2.18
#logsigma=0.48
#C=2.36 ##it.GetFirstConstantEstimate(grids,surveys,pset)
pset=[np.log10(float(Emin)),np.log10(float(Emax)),alpha,gamma,sfr_n,logmean,logsigma,C,setH0]
it.print_pset(pset)
grids=misc_functions.initialise_grids(surveys,zDMgrid, zvals,dmvals,pset,wdist=True,source_evolution=0,alpha_method=0)
plots=False
zmax=[0.6,1,1,3]
DMmax=[1500,2000,2000,3000]
zmax2=[0.75,1,1,3]
DMmax2=[1000,2000,2000,4000]
if plots:
for i in range (len(surveys)):
grid=grids[i]
sv=surveys[i]
pcosmic.plot_mean(zvals,'mean_DM.pdf')
#misc_functions.plot_efficiencies(lat50)
misc_functions.plot_zdm_basic_paper(grid.grid,grid.zvals,grid.dmvals,zmax=3,DMmax=3000,
name='Plots/p_dm_z_grid_image.pdf',norm=1,log=True,
label='$\\log_{10}p(DM_{\\rm EG}|z)$',
conts=[0.16,0.5,0.88],title='Grid at H0 '+str(i),
H0=setH0,showplot=True)
misc_functions.plot_zdm_basic_paper(grid.smear_grid,grid.zvals,grid.dmvals,zmax=3,
DMmax=3000,norm=1,log=True,
ylabel='${\\rm DM_{\\rm EG}}$',
label='$\\log_{10} p({\\rm DM_{cosmic}+DM_{host}}|z)$',
conts=[0.023, 0.159,0.5,0.841,0.977],
title='Smear grid at H0 '+str(i),H0=setH0,
showplot=True)
misc_functions.plot_grid_2(grid.pdv,grid.zvals,grid.dmvals,zmax=zmax[i],DMmax=DMmax[i],
name='Plots/pdv.pdf',norm=2,log=True
,label='$p(DM_{\\rm EG},z)dV$ [Mpc$^3$]',
title="Pdv at H0" + str(i),showplot=True)
muDM=10**pset[5]
Macquart=muDM
misc_functions.plot_grid_2(grid.rates,grid.zvals,grid.dmvals,zmax=zmax[i],DMmax=DMmax[i],
norm=2,log=True,label='$\\log_{10} p({\\rm DM}_{\\rm EG},z)$',
project=False,FRBDM=sv.DMEGs,FRBZ=None,Aconts=[0.01,0.1,0.5],
Macquart=Macquart,title="H0 value "+str(i),H0= setH0,showplot=True)
misc_functions.make_dm_redshift(grid,
DMmax=DMmax2[i],zmax=zmax2[i],loc='upper right',Macquart=Macquart,
H0=setH0,showplot=True)
print ("initial grid setup done")
scanoverH0=False
# just testing....should NOT be used (update_grid routine should not be modified)
if scanoverH0:
for k in range (len(surveys)):
grid=grids[k]
sv=surveys[k]
###### shows how to do a 1D scan of parameter values #######
pset=[np.log10(float(grid.Emin)),np.log10(float(grid.Emax)),grid.alpha,grid.gamma,grid.sfr_n,grid.smear_mean,grid.smear_sigma,C,grid.H0]
#lEmaxs=np.linspace(40,44,21)
#lscan,lllist,expected=it.scan_likelihoods_1D(grid,pset,lat50,1,lEmaxs,norm=True)
#print (lscan, lllist, expected)
#misc_functions.plot_1d(lEmaxs,lscan,'$E_{\\rm max}$','Plots/test_lik_fn_emax.pdf')
#for H0
t0=time.process_time()
H0iter= | np.linspace(50,100,4) | numpy.linspace |
# -*- coding: utf-8 -*-
"""
License: MIT
@author: gaj
E-mail: <EMAIL>
Paper References:
[1] <NAME>, <NAME>, and <NAME>, “Improving component substitution Pansharpening through multivariate regression of MS+Pan data,”
IEEE Transactions on Geoscience and Remote Sensing, vol. 45, no. 10, pp. 3230–3239, October 2007.
[2] <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, and <NAME>, “A Critical Comparison Among Pansharpening Algorithms”,
IEEE Transaction on Geoscience and Remote Sensing, 2014.
"""
import numpy as np
from methods.utils import upsample_interp23
import cv2
def estimation_alpha(pan, hs, mode='global'):
if mode == 'global':
IHC = np.reshape(pan, (-1, 1))
ILRC = np.reshape(hs, (hs.shape[0]*hs.shape[1], hs.shape[2]))
alpha = np.linalg.lstsq(ILRC, IHC)[0]
elif mode == 'local':
patch_size = 32
all_alpha = []
print(pan.shape)
for i in range(0, hs.shape[0]-patch_size, patch_size):
for j in range(0, hs.shape[1]-patch_size, patch_size):
patch_pan = pan[i:i+patch_size, j:j+patch_size, :]
patch_hs = hs[i:i+patch_size, j:j+patch_size, :]
IHC = np.reshape(patch_pan, (-1, 1))
ILRC = np.reshape(patch_hs, (-1, hs.shape[2]))
local_alpha = np.linalg.lstsq(ILRC, IHC)[0]
all_alpha.append(local_alpha)
all_alpha = np.array(all_alpha)
alpha = np.mean(all_alpha, axis=0, keepdims=False)
return alpha
def GSA(pan, hs):
M, N, c = pan.shape
m, n, C = hs.shape
ratio = int(np.round(M/m))
print('get sharpening ratio: ', ratio)
assert int(np.round(M/m)) == int(np.round(N/n))
#upsample
u_hs = upsample_interp23(hs, ratio)
#remove means from u_hs
means = np.mean(u_hs, axis=(0, 1))
image_lr = u_hs-means
#remove means from hs
image_lr_lp = hs-np.mean(hs, axis=(0,1))
#sintetic intensity
image_hr = pan-np.mean(pan)
image_hr0 = cv2.resize(image_hr, (n, m), cv2.INTER_CUBIC)
image_hr0 = np.expand_dims(image_hr0, -1)
alpha = estimation_alpha(image_hr0, np.concatenate((image_lr_lp, np.ones((m, n, 1))), axis=-1), mode='global')
I = np.dot(np.concatenate((image_lr, np.ones((M, N, 1))), axis=-1), alpha)
I0 = I-np.mean(I)
#computing coefficients
g = []
g.append(1)
for i in range(C):
temp_h = image_lr[:, :, i]
c = np.cov(np.reshape(I0, (-1,)), np.reshape(temp_h, (-1,)), ddof=1)
g.append(c[0,1]/np.var(I0))
g = np.array(g)
#detail extraction
delta = image_hr-I0
deltam = np.tile(delta, (1, 1, C+1))
#fusion
V = | np.concatenate((I0, image_lr), axis=-1) | numpy.concatenate |
#!/usr/bin/env python
import librosa
import numpy as np
import pyworld as pw
import scipy
import imageio
from sklearn.preprocessing import normalize
class conf:
"""
Configuration Parameter Class
"""
"""
time length of preprocessed audio
"""
prep_audio_dataset_second=3
# sampling rate
sample_ratio = 16000 # 22050
"""
Short Time FFT window size
librosa default value 2048
stft returned value shape (1 + n_fft/2, t)
"""
n_fft = 256 #2048
"""
Encoderで縦・横方向に畳み込むサイズ倍率
Encが8レイヤ、各レイヤで行列サイズ1/2になるので 256
入力スペクトログラムの行・列のサイズはこの倍数とすること
"""
Encocer_Feature_Constant=2**7 #2**7:128@n_fft256 #2**8:256
"""
enable saving the label(specImage)
"""
enable_output_labelWav = True
"""
scaleArray()のscaleFactorを表示するか
"""
print_scaleFactor=False
"""
スペクトログラムへの変換時の最小強度
→ 対数とったときに-6が最小値になる
"""
eps= 10**-6
"""
強度スペクトログラムの正規化時のスケール倍率
"""
scale_abs=0.1
"""
強度スペクトログラムの正規化時のオフセット
epsから決定
"""
offset_abs=0.6
"""
位相スペクトログラムの正規化時のスケール倍率
"""
scale_phase=1/(np.pi*2)
"""
位相スペクトログラムの正規化時のオフセット
"""
offset_phase=0.5
def convert_to_wave(Dabs, Dphase):
D_hat = 10 ** Dabs * np.exp(1j*Dphase) #xp.exp(1j*Dphase)
y_hat = librosa.istft(D_hat)
return y_hat
def convert_to_spectrogram(waveNDArray):
"""
convert audio 1D Numpy Array to spectrogram 2D Numpy Array.
note. Dabs = np.log10(np.abs(D) + 10**-6)
:param waveNDArray:
:return: Dabs,Dphase
"""
# スペクトル・位相マップ 作成
D = librosa.stft(waveNDArray, n_fft=conf.n_fft) #D:np.ndarray [shape=(1 + n_fft/2, t), dtype=dtype]
Dabs = np.log10(np.abs(D) + conf.eps)
Dphase = np.angle(D)
return Dabs,Dphase
def padding_spectrogram(D):
"""
スペクトログラムの行列サイズをEncoderに特徴的な値の整数倍にする
Encが8レイヤ、各レイヤで行列サイズ1/2になるので、入力スペクトログラムの行・列のサイズは256の倍数とする
"""
D = D[0:D.shape[0]-1,:] #最後の行を削除 TODO: 0:-1
w_div,w_rem = divmod(D.shape[1], conf.Encocer_Feature_Constant)
D = np.pad(D, [(0,0), (0, conf.Encocer_Feature_Constant * (w_div + 1) - D.shape[1])],
'constant', constant_values = np.min(np.abs(D)))
return D
def anonymization(fs, waveNDArray, f0Value = 0, sp_strechRatio = | np.random.uniform(0.6, 2, size=1) | numpy.random.uniform |
from src.network.dqn import DeepQNetwork
dqn = DeepQNetwork(input_size=[100, 100, 4], max_steps=1000, \
state_stack_size=4, action_size=9, num_episodes=10000, memory_frame_rate=3)
n_simulations = 50
import numpy as np
for checkpoint in range(6, 41, 5):
restored = dqn.restore_last_checkpoint(checkpoint)
avg_reward = 0.0
rewards = | np.array([]) | numpy.array |
"""Training with long time series.
Supplementary code for:
<NAME> and <NAME>. "Signal Processing with Recurrent Neural Networks in TensorFlow"
"""
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
nest = tf.contrib.framework.nest
def lstm_func(d, m, n, lr):
# Placeholders
inputs = tf.placeholder(tf.float32, [None, None, d])
targets = tf.placeholder(tf.float32, [None, None, m])
# Network architecture
n_cells_layers = [20, 15]
cells = [tf.nn.rnn_cell.LSTMCell(num_units=n) for n in n_cells_layers]
cell = tf.nn.rnn_cell.MultiRNNCell(cells)
zero_state = cell.zero_state(n, tf.float32)
state = nest.map_structure(lambda tensor: tf.Variable(tensor, trainable=False), zero_state)
# RNN
rnn_output, new_state = tf.nn.dynamic_rnn(cell, inputs, initial_state=state, dtype=tf.float32)
# State update
update_state = nest.map_structure(tf.assign, state, new_state)
update_state = nest.flatten(update_state)
reset_state = nest.map_structure(tf.assign, state, zero_state)
reset_state = nest.flatten(reset_state)
with tf.control_dependencies(update_state): # Update_state is already a list
rnn_output = tf.identity(rnn_output)
rnn_output_flat = tf.reshape(rnn_output, [-1, n_cells_layers[-1]])
prediction_flat = tf.layers.dense(rnn_output_flat, m, activation=None)
targets_flat = tf.reshape(targets, [-1, m])
prediction = tf.reshape(prediction_flat, [-1, tf.shape(inputs)[1], m])
# Error function and optimizer
loss = tf.losses.mean_squared_error(targets_flat, prediction_flat)
train_step = tf.train.AdamOptimizer(lr).minimize(loss)
return inputs, targets, new_state, reset_state, prediction, loss, train_step
def main():
# Parameters
gap = 5 # Time steps to predict into the future
T = 600 # Length of training time series
# N = [32] # Size of recurrent neural network
n = 1 # Number of training sequences
n_test = 1 # Number of test sequences
m = 1 # Output dimension
d = 1 # Input dimension
epochs = 200 # Maximum number of epochs
lr = 0.01 # Learning rate
# Load and arrange data
raw_data = np.genfromtxt('data/data_adjust-1.csv',delimiter = ",")
train_X = raw_data[0:T,9]
train_X = train_X.copy()
train_Y = raw_data[gap:T+gap,9]
train_Y = train_Y.copy()
test_X = raw_data[T:-gap,9]
test_X = test_X.copy()
test_Y = raw_data[T+gap:,9]
test_Y = test_Y.copy()
train_X.resize(n, train_X.size, d)
train_Y.resize(n, train_Y.size, m)
test_X.resize(n_test, test_X.size, d)
test_Y.resize(n_test, test_Y.size, m)
time = np.arange(train_X.size)*0.012
inputs, targets, new_state, reset_state, prediction, loss, train_step = lstm_func(d, m, n, lr)
path = 'tensorboard/1'
# Create session and initialize variables
with tf.Session() as sess:
# writer = tf.summary.FileWriter(path)
# writer.add_graph(sess.graph)
init = tf.global_variables_initializer()
sess.run(init)
sess.graph.finalize() # Graph is read-only after this statement.
# Do the learning
for i in range(epochs):
sess.run(reset_state) # Reset at beginning of each time series
chunk_size = 50
for chunk_start in range(0, T, chunk_size):
sess.run(train_step, feed_dict={
inputs: train_X[:, chunk_start: chunk_start + chunk_size],
targets: train_Y[:, chunk_start: chunk_start + chunk_size]})
if (i+1)%10==0:
sess.run(reset_state)
temp_loss = sess.run(loss, feed_dict={inputs: train_X, targets: train_Y})
print(i+1, ' loss =', temp_loss)
# Visualize modelling of training data
sess.run(reset_state)
model, final_state = sess.run([prediction, new_state], feed_dict={inputs: train_X})
plt.rc('font', size=14)
# plt.plot(train_X[0,:,0], label='input', color='lightgray', linestyle='--')
plt.plot(time, train_Y[0,:,0], label='target', linestyle='-', linewidth=3)
plt.plot(time, model[0,:,0], label='model', linestyle='-', linewidth=3)
plt.legend(loc=1)
plt.xlabel('time [t]')
plt.ylabel('signal')
plt.title('data presented in one batch')
# plt.savefig('lorenzTrainChunk.pdf')
plt.show()
sess.run(reset_state)
concatenated = []
for chunk_start in range (0, T, chunk_size):
model, _ = sess.run([prediction, new_state], feed_dict={
inputs:
train_X[:, chunk_start: chunk_start + chunk_size]})
concatenated.append(model)
model = | np.stack(concatenated, axis=0) | numpy.stack |
# # Multiscale HiTS Visuals (updated visuals)
# ## updated by <NAME> 11/23/2021
# ## created by <NAME>, 05/07/2020
#=========================================================
# IMPORT PACKAGES
#=========================================================
import os
import sys
import pdb
import time
import torch
import numpy as np
import matplotlib.pyplot as plt
import yaml
#from datetime import datetime
#--------------------------------------------------
module_dir = os.path.abspath( os.path.join(os.getcwd(),'src'))
if module_dir not in sys.path:
sys.path.append(module_dir)
import ResNet as net
#=========================================================
# Input Arguments
#=========================================================
with open("parameters.yml", 'r') as stream:
D = yaml.safe_load(stream)
for key in D:
globals()[str(key)] = D[key]
print('{}: {}'.format(str(key), D[key]))
# transforms key-names from dictionary into global variables, then assigns them the dictionary-values
#print('TO CONTINUE, PRESS [c] THEN [ENTER]')
#print('TO QUIT, PRESS [q] THEN [ENTER]')
#pdb.set_trace()
#=========================================================
# Directories and Paths
#=========================================================
n_steps = np.int64(model_steps * 2**k_max)
print(f"number of time steps = {n_steps}")
data_folder = 'data_dt={}_steps={}_period={}-{}_amp={}-{}_train+val+test={}+{}+{}'.format(dt, n_steps, period_min, period_max, amp_min, amp_max, n_train, n_val, n_test)
data_dir = os.path.join(os.getcwd(), 'data', data_folder)
model_folder = f"models_dt={dt}_steps={n_steps}_period={period_min}-{period_max}_amp={amp_min}-{amp_max}_lr={learn_rate_min}-{learn_rate_max}_resnet={n_inputs}+{n_layers}x{n_neurons}+{n_outputs}"
model_dir = os.path.join(os.getcwd(), 'models', model_folder)
if not os.path.exists(data_dir):
sys.exit("Cannot find folder ../data/{} in current directory".format(data_folder))
if not os.path.exists(model_dir):
sys.exit("Cannot find folder ../models/{} in current directory".format(model_folder))
#--------------------------------------------------
# file names for figures
file_fig_uniscale = 'plot_uniscale_{}.png'.format(system)
file_fig_mse_models = 'plot_MSE_models_{}.png'.format(system)
file_fig_mse_multiscale = 'plot_MSE_multiscale_{}.png'.format(system)
file_fig_multiscale = 'plot_multiscale_{}.png'.format(system)
#========================================================================
# Load Data and Models (then prepare some globals)
#========================================================================
# load validation set and test set
test_data = np.load(os.path.join(data_dir, 'test.npy'))
#--------------------------------------------------
# list of k-values: k = 0 ... k_max
ks = list(range(0,k_max+1))
step_sizes = [2**k for k in ks]
num_models = k_max+1
#--------------------------------------------------
# load models
models = list()
for step_size in step_sizes:
model_name = 'model_D{}.pt'.format(step_size)
models.append(torch.load(os.path.join(model_dir, model_name), map_location='cpu'))
num_k = len(models)
print('{} models loaded for time-stepping:'.format(num_models) )
print('model index: k = {} .. {}'.format(0, k_max) )
print('step size: 2^k = {} .. {}'.format(1, step_sizes[k_max] ) )
print('step size: dt = {} .. {} \n'.format(dt, dt*step_sizes[k_max]) )
#--------------------------------------------------
# fix model consistencies trained on gpus (optional)
for model in models:
model.device = 'cpu'
model._modules['increment']._modules['activation'] = torch.nn.ReLU()
#--------------------------------------------------
# shared info
n_steps = test_data.shape[1] - 1
t_space = [dt*(step+1) for step in range(n_steps)] # = 1,2, ... , n (list)
criterion = torch.nn.MSELoss(reduction='none')
#========================================================================
# Create Directories and Files for Figures
#========================================================================
# create directory for figures
figure_dir = model_dir
if not os.path.exists(figure_dir):
os.makedirs(figure_dir)
#--------------------------------------------------
# paths for figures
file_fig_uniscale = os.path.abspath( os.path.join(figure_dir, file_fig_uniscale) )
file_fig_mse_models = os.path.abspath( os.path.join(figure_dir, file_fig_mse_models) )
file_fig_mse_multiscale = os.path.abspath( os.path.join(figure_dir, file_fig_mse_multiscale) )
file_fig_multiscale = os.path.abspath( os.path.join(figure_dir, file_fig_multiscale) )
#==========================================================
# Plot Predictions of Individual ResNet time-steppers
#==========================================================
idx = 0
iterate_k = iter(ks)
colors=iter(plt.cm.rainbow(np.linspace(0, 1, len(ks))))
fig, axs = plt.subplots(num_models, 1, figsize=(plot_x_dim, plot_y_dim*num_models*1.3))
for model in models:
rgb = next(colors)
k = next(iterate_k)
y_preds = model.uni_scale_forecast( torch.tensor(test_data[idx:idx+1, 0, :n_outputs]).float(), n_steps=n_steps, y_known=torch.tensor(test_data[idx:idx+1, :, n_outputs:]).float() )
R = y_preds[0, 0:n_steps, 1].detach().numpy()
axs[k].plot(t_space, test_data[idx, 0:n_steps, 1], linestyle='-', color='gray', linewidth=10, label='R(t)')
axs[k].plot(t_space, R, linestyle='--', color=rgb, linewidth=6, label='$\Delta t = ${}dt'.format(step_sizes[k]) )
axs[k].legend(fontsize=legend_fontsize, loc='upper center', ncol=5, bbox_to_anchor=(0.5, 1.17))
axs[k].tick_params(axis='both', which='major', labelsize=axis_fontsize)
axs[k].grid(axis='y')
plt.show()
plt.savefig(file_fig_uniscale)
#==========================================================
# Plot Log(MSE) of Predictions (individual models)
#==========================================================
# uniscale time-stepping with NN
preds_mse = list()
times = list()
for model in models:
start = time.time()
y_preds = model.uni_scale_forecast( torch.tensor(test_data[:, 0, :n_outputs]).float(), n_steps=n_steps, y_known=torch.tensor(test_data[:, :, n_outputs:]).float() )
end = time.time()
times.append(end - start)
preds_mse.append(criterion(torch.tensor(test_data[:, 1:, 0]).float(), y_preds[:,:,0]).mean(-1)) # CHECK THIS! CHECK THIS!
#----------------------------------------------------------
fig = plt.figure(figsize=(plot_x_dim, plot_y_dim))
colors=iter( (plt.cm.rainbow(np.linspace(0, 1, len(ks)))))
dot_sizes = iter( ( np.linspace(1,20,len(preds_mse)) ) )
t_array = np.array(t_space)
m_steps = n_steps-1
max_log = 0
min_log = 0
for k in range(0,k_max+1):
err = preds_mse[k]
err = err.mean(0).numpy()
rgb = next(colors)
n_forward = np.int64( np.round( m_steps / 2**k ) )
key = np.int64( np.round( np.linspace(0,m_steps,n_forward+1) ) )
t_k = t_array[key]
log_err_k = np.log10(err[key])
plt.plot(t_k, log_err_k, 'o', fillstyle='full', linestyle='-', linewidth=3, markersize=next(dot_sizes), color=rgb, label='$\Delta\ t$={}dt'.format(step_sizes[k]))
#max_log = max_log + min(0, np.max(log_err_k[1:])) # accumulate maximum log(MSE) < 0 in order to calculate a average-ceiling < 0
min_log = np.min(err) # err = preds_mse[k_max] from last iteration above
d_log = np.abs(max_log-min_log)
mid_log = np.mean( [min_log, max_log] )
plt.legend(fontsize=legend_fontsize, loc='upper center', ncol=6, bbox_to_anchor=(0.5, 1.24))
plt.title('time-steps without interpolation', y=1.0, pad=-40, fontsize=title_fontsize)
plt.xticks(fontsize=axis_fontsize)
plt.yticks(fontsize=axis_fontsize)
plt.xlabel('time',fontsize=x_label_fontsize)
plt.ylabel('log(MSE)',fontsize=y_label_fontsize)
plt.grid(axis = 'y')
plt.ylim(ymin=mid_log-d_log, ymax=mid_log+d_log)
plt.show()
plt.savefig(file_fig_mse_models)
#==========================================================
# Choose Range of Models that Minimize MSE (when combined)
#==========================================================
# cross validation (model selections)
start_idx = 0
end_idx = k_max # or len(models)-1
best_mse = 1e+5
val_data = np.load(os.path.join(data_dir, 'val_D{}.npy'.format(k_max)))
# choose the largest time step
for k in range(0, k_max+1):
step_size = np.int64(2**k)
y_preds = net.vectorized_multi_scale_forecast(torch.tensor(val_data[:, 0, :n_outputs]).float().to('cpu'), n_steps=n_steps, models=models[:len(models)-k], y_known=torch.tensor(val_data[:, 0, n_outputs:]).float().to('cpu'))
mse = criterion(torch.tensor(val_data[:, 1:, :n_outputs]).float(), y_preds).mean().item()
if mse <= best_mse:
end_idx = len(models)-k
best_mse = mse
#----------------------------------------------------------
# choose the smallest time step
for k in range(0, end_idx):
step_size = np.int64(2**k)
y_preds = net.vectorized_multi_scale_forecast(torch.tensor(val_data[:, 0, :n_outputs]).float().to('cpu'), n_steps=n_steps, models=models[k:end_idx], y_known=torch.tensor(val_data[:, :, n_outputs:]).float().to('cpu'))
mse = criterion(torch.tensor(val_data[:, 1:, :n_outputs]).float(), y_preds).mean().item()
if mse <= best_mse:
start_idx = k
best_mse = mse
#----------------------------------------------------------
models = models[start_idx:(end_idx+1)]
num_k = len(models)
print('{} models chosen for Multiscale HiTS:'.format(num_k) )
print(' k = {} .. {}'.format(start_idx, end_idx) )
print(' 2^k = {} .. {}'.format(2**start_idx, 2**end_idx ) )
print('t-steps = {} .. {}\n'.format(dt*2**start_idx, dt * 2**end_idx ) )
del val_data
#==========================================================
# Plot Log(MSE) for Multi-scale vs Single
#==========================================================
# multiscale time-stepping with NN
start = time.time()
y_preds, model_key = net.vectorized_multi_scale_forecast(torch.tensor(test_data[:, 0, :n_outputs]).float().to('cpu'), n_steps=n_steps, models=models, y_known=torch.tensor(test_data[:, :, n_outputs:]).float().to('cpu'), key=True)
end = time.time()
multiscale_time = end - start
multiscale_preds_mse = criterion(torch.tensor(test_data[:, 1:, :]).float(), y_preds).mean(-1)
# added additional argument to function 'vectorized_multi_scale_forecast( ... , key=True)' in order to data of each individual ResNet
model_key = model_key.detach().numpy()
#model_key_plus = np.delete(model_key, np.argwhere(model_key==0) )
#----------------------------------------------------------
# visualize forecasting error at each time step
fig = plt.figure(figsize=(plot_x_dim, plot_y_dim))
colors=iter(plt.cm.rainbow(np.linspace(0, 1, len(ks))))
multiscale_err = multiscale_preds_mse.mean(0).detach().numpy()
for k in range(len(preds_mse)):
err = preds_mse[k]
err = err.mean(0).detach().numpy()
rgb = next(colors)
plt.plot(t_space, np.log10(err), linestyle='-', color=rgb, linewidth=4, label='$\Delta\ t$={}dt'.format(step_sizes[k]))
plt.plot(t_space, np.log10(multiscale_err), linestyle='-', color='k', linewidth=4, label='multiscale')
plt.legend(fontsize=legend_fontsize, loc='upper center', ncol=6, bbox_to_anchor=(0.5, 1.2))
plt.xticks(fontsize=axis_fontsize)
plt.yticks(fontsize=axis_fontsize)
plt.xlabel('time',fontsize=x_label_fontsize)
plt.ylabel('log(MSE)',fontsize=y_label_fontsize)
plt.grid(axis = 'y')
plt.ylim(ymin=min_log-d_log)
plt.savefig(file_fig_mse_multiscale)
#==========================================================
# Plot Multiscale Predictions with a color-key for each chosen model
#==========================================================
idx = 0
dot_min = 6
dot_max = 10
#-------------------------------------------------------------
t = | np.linspace(0, (n_steps-1)*dt, n_steps) | numpy.linspace |
import matplotlib.pyplot as plt
import numpy as np
from numpy import cross, eye
from scipy.linalg import expm, norm
import pandas as pd
from scipy.spatial.transform import Rotation as R
from pyts.decomposition import SingularSpectrumAnalysis
def modeshape_sync_lstsq(mode_shape_vec):
"""
Creates a straight line fit in the complex plane and alligns the mode shape with the real-axis.
:param mode_shape_vec: Mode shape vector
:type mode_shape_vec: array(float)
:return _n: Alligned mode shape vector
"""
_n = np.zeros_like(mode_shape_vec)
for i in range(np.shape(mode_shape_vec)[1]):
_mode = mode_shape_vec[:,i]
z = np.arctan(np.average(np.imag(_mode)/np.real(_mode),weights = np.abs(_mode)**1e4))
_n[:,i] = _mode*(np.cos(-1*z)+1j*np.sin(-1*z))
return _n
def modeshape_scaling_DP(mode_shape_vec, driving_point,sync = True):
"""
Scales mode shapes according to the driving point measurement.
:param mode_shape_vec: Mode shape vector
:type mode_shape_vec: array(float)
:param driving_point: Driving point location
:type driving_point: int
:param sync: Allign mode shape with the real-axis
:type sync: bool, optional
:return: Scalled mode shape
"""
_mode = mode_shape_vec
for i in range(np.shape(mode_shape_vec)[1]):
_mode[:,i] = _mode[:,i]/np.sqrt(mode_shape_vec[driving_point,i])
if sync:
_mode = modeshape_sync_lstsq(_mode)
return _mode
def MCF(mod):
"""
Calculate Mode Complexity Factor (MCF)
:param mod: Mode shape
:type mod: array(float)
:return: Mode complexity factor
"""
sxx = np.real(mod).T@ | np.real(mod) | numpy.real |
"""
Testing differentiation of user-defined datatypes.
"""
import nose
import unittest
import numpy as np
from openmdao.lib.geometry.geom_data import GeomData
from openmdao.main.api import Component, Assembly, set_as_top
from openmdao.main.datatypes.api import Array, Float, VarTree
from openmdao.util.testutil import assert_rel_error
class GeomComponent(Component):
x = Float(1.0, iotype='in')
geom_out = VarTree(GeomData(2, 1), iotype='out')
def list_deriv_vars(self):
return ('x'), ('geom_out.points',)
def provideJ(self):
self.J = np.array([[2, 0, 1],
[0, 2, 1]], dtype=np.float)
def apply_deriv(self, arg, result):
result['geom_out.points'][0,:] += self.J[0,:]*arg['x']
result['geom_out.points'][1,:] += self.J[1,:]*arg['x']
def apply_derivT(self, arg, result):
result['x'] += np.sum(self.J.T[:,0]*arg['geom_out.points'][0,:])
result['x'] += | np.sum(self.J.T[:,1]*arg['geom_out.points'][1,:]) | numpy.sum |
"""
Tests different implementations of solve functions.
"""
from __future__ import print_function
from itertools import product
from unittest import TestCase, skipIf
import numpy as np
from numpy.testing import run_module_suite, assert_allclose
from pkg_resources import parse_version
import gulinalg
class TestSolveTriangular(TestCase):
"""
Test A * x = B and it's variants where A is a triangular matrix.
Since names are abbreviated, here is what they mean:
LO - A is a Lower triangular matrix.
UP - A is a Upper diagonal matrix.
TRANS N - No tranpose, T - Transpose, C - Conjuagte Transpose
DIAG N - A is non-unit triangular, U - A is unit triangular
B - By default B is a matrix, otherwise we specify it in test name.
"""
def test_LO_TRANS_N_DIAG_N_B_VECTOR(self):
"""Test A * x = B where A is a lower triangular matrix"""
a = np.array([[3, 0, 0, 0], [2, 1, 0, 0], [1, 0, 1, 0], [1, 1, 1, 1]])
b = np.array([4, 2, 4, 2])
x = gulinalg.solve_triangular(a, b)
assert_allclose(np.dot(a, x), b, atol=1e-15)
def test_UP_TRANS_N_DIAG_N(self):
"""Test A * x = B where A is a upper triangular matrix"""
a = np.array([[1, 2, 3, 4], [0, 2, 3, 4], [0, 0, 3, 4], [0, 0, 0, 4]])
b = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1], [0, 0, 0]])
x = gulinalg.solve_triangular(a, b, UPLO='U')
assert_allclose(np.dot(a, x), b, atol=1e-15)
def test_UP_TRANS_T_DIAG_N(self):
"""Test A.T * x = B where A is a upper triangular matrix"""
a = np.array([[1, 2, 3, 4], [0, 2, 3, 4], [0, 0, 3, 4], [0, 0, 0, 4]])
b = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1], [0, 0, 0]])
x = gulinalg.solve_triangular(a, b, UPLO='U', transpose_type='T')
assert_allclose(np.dot(a.T, x), b, atol=1e-15)
def test_UP_TRANS_C_DIAG_N(self):
"""Test A.H * x = B where A is a upper triangular matrix"""
a = np.array([[1 + 2j, 2 + 2j], [0, 1 + 1j]])
b = np.array([[1 + 0j, 0], [0, 1 + 0j]])
ref = np.array([[0.2+0.4j, -0.0+0.j], [-0.4-0.8j, 0.5+0.5j]])
x = gulinalg.solve_triangular(a, b, UPLO='U', transpose_type='C')
assert_allclose(x, ref, atol=1e-15)
def test_UP_TRANS_N_DIAG_U(self):
"""
Test A * x = B where A is a upper triangular matrix and diagonal
elements are considered unit diagonal.
"""
a = np.array([[1, 2, 3, 4], [0, 2, 3, 4], [0, 0, 3, 4], [0, 0, 0, 4]])
b = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1], [0, 0, 0]])
res = gulinalg.solve_triangular(a, b, UPLO='U', unit_diagonal=True)
# DIAG='U' assumes that diagonal elements are 1.
a_unit_diag = np.array([[1, 2, 3, 4], [0, 1, 3, 4],
[0, 0, 1, 4], [0, 0, 0, 1]])
ref = gulinalg.solve_triangular(a_unit_diag, b, UPLO='U')
assert_allclose(res, ref, atol=1e-15)
def test_UP_TRANS_T_DIAG_U(self):
"""
Test A.T * x = B where A is a upper triangular matrix and diagonal
elements are considered unit diagonal.
"""
a = np.array([[1, 2, 3, 4], [0, 2, 3, 4], [0, 0, 3, 4], [0, 0, 0, 4]])
b = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1], [0, 0, 0]])
res = gulinalg.solve_triangular(
a, b, UPLO='U', transpose_type='T', unit_diagonal=True)
# DIAG='U' assumes that diagonal elements are 1.
a_unit_diag = np.array([[1, 2, 3, 4], [0, 1, 3, 4],
[0, 0, 1, 4], [0, 0, 0, 1]])
ref = gulinalg.solve_triangular(
a_unit_diag, b, UPLO='U', transpose_type='T')
assert_allclose(res, ref, atol=1e-15)
def test_UP_TRANS_C_DIAG_U(self):
"""
Test A.H * x = B where A is a upper triangular matrix and diagonal
elements are considered unit diagonal.
"""
a = np.array([[1 + 2j, 2 + 2j], [0, 1 + 1j]])
b = np.array([[1, 0], [0, 1]])
res = gulinalg.solve_triangular(
a, b, UPLO='U', transpose_type='C', unit_diagonal=True)
# DIAG='U' assumes that diagonal elements are 1.
a_unit_diag = np.array([[1, 2 + 2j], [0, 1]])
ref = gulinalg.solve_triangular(
a_unit_diag, b, UPLO='U', transpose_type='C')
assert_allclose(res, ref, atol=1e-15)
def test_fortran_layout_matrix(self):
"""Input matrices have fortran layout"""
a = np.asfortranarray([[1, 2, 3, 4], [0, 2, 3, 4],
[0, 0, 3, 4], [0, 0, 0, 4]])
b = np.asfortranarray([[1, 0, 0], [0, 1, 0], [0, 0, 1], [0, 0, 0]])
res = gulinalg.solve_triangular(
a, b, UPLO='U', transpose_type='T', unit_diagonal=True)
# DIAG='U' assumes that diagonal elements are 1.
a_unit_diag = np.asfortranarray([[1, 2, 3, 4], [0, 1, 3, 4],
[0, 0, 1, 4], [0, 0, 0, 1]])
ref = gulinalg.solve_triangular(
a_unit_diag, b, UPLO='U', transpose_type='T'
)
assert_allclose(res, ref, atol=1e-15)
def test_input_matrix_non_contiguous(self):
"""Input matrix is not a contiguous matrix"""
a = np.asfortranarray(
[[[1, 2, 3, 4], [0, 2, 3, 4], [0, 0, 3, 4], [0, 0, 0, 4]],
[[1, 2, 3, 4], [0, 2, 3, 4], [0, 0, 3, 4], [0, 0, 0, 4]]])[0]
b = np.ascontiguousarray([[1, 0, 0], [0, 1, 0], [0, 0, 1], [0, 0, 0]])
assert not a.flags.c_contiguous and not a.flags.f_contiguous
x = gulinalg.solve_triangular(a, b, UPLO='U')
assert_allclose(np.dot(a, x), b, atol=1e-15)
@skipIf(parse_version(np.__version__) < parse_version('1.13'),
"Prior to 1.13, numpy low level iterators didn't support removing "
"empty axis. So gufunc couldn't be called with empty inner loop")
def test_m_and_n_zero(self):
"""Corner case of solving where m = 0 and n = 0"""
a = np.ascontiguousarray(np.random.randn(0, 0))
b = np.ascontiguousarray(np.random.randn(0, 0))
x = gulinalg.solve_triangular(a, b, UPLO='U')
assert x.shape == (0, 0)
assert_allclose(np.dot(a, x), b, atol=1e-15)
@skipIf(parse_version(np.__version__) < parse_version('1.13'),
"Prior to 1.13, numpy low level iterators didn't support removing "
"empty axis. So gufunc couldn't be called with empty inner loop")
def test_m_zero(self):
"""Corner case of solving where m = 0"""
a = np.ascontiguousarray(np.random.randn(0, 0))
b = np.ascontiguousarray(np.random.randn(0, 2))
x = gulinalg.solve_triangular(a, b, UPLO='U')
assert x.shape == (0, 2)
assert_allclose(np.dot(a, x), b, atol=1e-15)
@skipIf(parse_version(np.__version__) < parse_version('1.13'),
"Prior to 1.13, numpy low level iterators didn't support removing "
"empty axis. So gufunc couldn't be called with empty inner loop")
def test_n_zero(self):
"""Corner case of solving where n = 0"""
a = np.ascontiguousarray(np.random.randn(2, 2))
b = np.ascontiguousarray(np.random.randn(2, 0))
x = gulinalg.solve_triangular(a, b, UPLO='U')
assert x.shape == (2, 0)
assert_allclose(np.dot(a, x), b, atol=1e-15)
def test_size_one_matrices(self):
"""Corner case of decomposing where m = 1 and n = 1"""
a = np.ascontiguousarray(np.random.randn(1, 1))
b = np.ascontiguousarray(np.random.randn(1, 1))
x = gulinalg.solve_triangular(a, b, UPLO='U')
assert x.shape == (1, 1)
assert_allclose( | np.dot(a, x) | numpy.dot |
import json
import numpy as np
import keras
from keras.preprocessing import text
from seq2vec import Seq2VecHash, Seq2Seq
def load_clickstream_length():
data = np.zeros((21, 9))
for i in range(1, 22):
with open(f'./dataset/{i}.json') as f:
d = json.load(f)
for j in range(0, len(d)):
length = len(d[j]['clickstream'])
data[i-1][j] = length
return data
def load_clickstream(user_id, task_id):
with open(f'./dataset/{user_id}.json') as f:
return json.load(f)[task_id]['clickstream']
def clickstream_length_normalize():
mat_length = load_clickstream_length()
mat_length = mat_length / mat_length.sum(axis=1)[:, None]
return mat_length
def compute_url_overlap_rate(task_id):
count = 0
url_map = dict()
for user_id in range(1, 22):
clickstream = load_clickstream(user_id, task_id)
for obj in clickstream:
count += 1
key = obj['current_url']
if key not in url_map:
url_map[key] = 1
continue
url_map[key] += 1
return url_map, len(url_map) / count
def compute_url_overlap_rate_all():
for task_id in range(0, 9):
_, rate = compute_url_overlap_rate(task_id)
print(f'task {task_id} clickstream overlap rate: ', 1 - rate)
def compute_url_word_sequence():
clickstream = load_clickstream(1, 1)
for obj in clickstream:
print(text.text_to_word_sequence(obj['current_url']))
# url_map, rate = compute_url_overlap_rate(1)
# print(json.dumps(url_map, sort_keys=True, indent=4))
def compute_url_mapping(task_id):
total = {}
for user_id in range(1, 22):
clickstream = load_clickstream(user_id, task_id)
for obj in clickstream:
previous = obj['previous_url']
if previous in total:
current = obj['current_url']
if current in total[previous]:
total[previous][current] += 1
else:
total[previous][current] = 1
else:
total[previous] = {}
with open(f'embeddings/{task_id}.json', 'w+') as f:
f.write(json.dumps(total, indent=4))
# for task_id in range(0, 9):
# compute_url_embedding(task_id)
vec_len = 30
def compute_url_embedding(user_id, task_id):
clickstream = load_clickstream(user_id, task_id)
urls = []
for obj in clickstream:
urls.append(obj['previous_url'])
transformer = Seq2VecHash(vec_len=vec_len)
result = transformer(urls)
print('clickstream: ', result)
return result
def main():
sos = np.zeros((1, vec_len))
coi = np.zeros((1, vec_len)) - 1
eos = np.zeros((1, vec_len)) - 10
pad = np.zeros((1, vec_len)) - 100
max_length = 0
sentences = []
for user_id in range(1, 22):
for task_id in range(0, 9):
clickstream = compute_url_embedding(user_id, task_id)
pos = clickstream.shape[0]//2
clickstream = np.insert(clickstream, pos, coi, 0)
clickstream = np.insert(clickstream, 0, sos, 0)
clickstream = | np.insert(clickstream, clickstream.shape[0], eos, 0) | numpy.insert |
"""
Gmsh format 2.2
"""
import numpy as np
from flow import Flow
from element import Element
from element_search import find_neighbors
from text.text_flow import write_flow
from text.text_elements import write_elements
from text.text_geometries import write_geometries
#==============================================================================
def intIt(l):
return np.array([int(e) for e in l])
def floatIt(l):
return np.array([float(e) for e in l])
def extract_msh(path_msh):
f = open(path_msh, 'r')
nodes_X, nodes_Y = [], []
elements = []
line = f.readline()
# ...
# $Nodes\n
# n_nodes
# ...
while line != '$Nodes\n':
line = f.readline()
line = f.readline()
n_nodes = int(line.strip())
for i in range(n_nodes):
# line = id x y z
line = f.readline()
coord = floatIt(line.strip().split())
nodes_X.append(coord[1])
nodes_Y.append(coord[2])
# ...
# $Elements\n
# n_elements
# ...
while line != '$Elements\n':
line = f.readline()
line = f.readline()
n_elements = int(line.strip())
count = 0
for i in range(n_elements):
# element_id element_type ... ... nodes_id
line = f.readline()
coord = intIt(line.strip().split())
element_type = coord[1]
if element_type == 9: # 6-node second order triangle
count += 1
e = Element(count)
e.nodes = np.array(coord[-6:])
elements.append(e)
# if element_type == 1: # 2-node line
# e.element_type = 1
# e.nodes = coord[-2:]
#
# elif element_type == 2: # 3-node triangle
# e.element_type = 2
# e.nodes = coord[-3:]
#
# elif element_type == 3: # 4-node quadrangle
# e.element_type = 3
# e.nodes = coord[-4:]
#
# elif element_type == 8: # 3-node second order line
# e.element_type = 8
# e.nodes = coord[-3:]
#
# elif element_type == 9: # 6-node second order triangle
# e.element_type = 9
# e.nodes = coord[-6:]
#
# elif element_type == 10: # 9-node second order quadrangle
# e.element_type = 10
# e.nodes = coord[-9:]
#
# elif element_type == 15: # 1-node point
# e.element_type = 15
# e.nodes = coord[-1:]
#
# elements.append(e)
f.close()
return np.array(nodes_X), | np.array(nodes_Y) | numpy.array |
"""
'power.py' module serves mainly for interacting with C++ library fastsim.py
- translate all power spectra, growth functions, correlations functions, etc.
into C++ functions for speed
- handles numpy arrays
- cosmo == C++ class Cosmo_Param, accessible through SimInfo.sim.cosmo
- FTYPE_t=[float, double, long double]
"""
import numpy as np
from scipy.optimize import brentq, curve_fit, minimize_scalar
from . import fastsim as fs
class Non_Linear_Cosmo(object):
# private variables and methods
_cosmo_emu = None
_cosmo_halofit = None
_sim_emu = None
_sim_halofit = None
_corr_func = {}
_sigma_func = {}
@staticmethod
def _init_cosmo(cosmo):
if Non_Linear_Cosmo._cosmo_emu is None:
Non_Linear_Cosmo._cosmo_emu = Non_Linear_Cosmo._copy_cosmo(cosmo, fs.ccl_emu, transfer_function=fs.ccl_emulator)
if Non_Linear_Cosmo._cosmo_halofit is None:
Non_Linear_Cosmo._cosmo_halofit = Non_Linear_Cosmo._copy_cosmo(cosmo, fs.ccl_halofit)
@staticmethod
def _init_sim(sim):
Non_Linear_Cosmo._init_cosmo(sim.cosmo)
if Non_Linear_Cosmo._sim_emu is None:
Non_Linear_Cosmo._sim_emu = Non_Linear_Cosmo._copy_sim(sim, Non_Linear_Cosmo._cosmo_emu)
if Non_Linear_Cosmo._sim_halofit is None:
Non_Linear_Cosmo._sim_halofit = Non_Linear_Cosmo._copy_sim(sim, Non_Linear_Cosmo._cosmo_halofit)
@staticmethod
def _copy_cosmo(cosmo_from, matter_power_spectrum_method, transfer_function=None):
# create empty Cosmo_Param
cosmo_to = fs.Cosmo_Param()
# copy basic parameterz
cosmo_to.sigma8 = cosmo_from.sigma8
cosmo_to.ns = cosmo_from.ns
cosmo_to.k2_G = cosmo_from.k2_G
cosmo_to.Omega_m = cosmo_from.Omega_m
cosmo_to.Omega_b = cosmo_from.Omega_b
cosmo_to.H0 = cosmo_from.H0
# copy ccl methods
cosmo_to.config.baryons_power_spectrum_method = cosmo_from.config.baryons_power_spectrum_method
cosmo_to.config.mass_function_method = cosmo_from.config.mass_function_method
# -> transfer function and matter power spectrum is different
if transfer_function is None:
cosmo_to.config.transfer_function_method = cosmo_from.config.transfer_function_method
else:
cosmo_to.config.transfer_function_method = transfer_function
cosmo_to.config.matter_power_spectrum_method = matter_power_spectrum_method
# initialize Cosmo_Param
cosmo_to.init()
return cosmo_to
@staticmethod
def _copy_sim(sim_from, cosmo):
sim_to = fs.Sim_Param()
sim_to.cosmo = cosmo
sim_to.box_opt = sim_from.box_opt
sim_to.integ_opt = sim_from.integ_opt
sim_to.out_opt = sim_from.out_opt
sim_to.comp_app = sim_from.comp_app
sim_to.app_opt = sim_from.app_opt
sim_to.run_opt = sim_from.run_opt
sim_to.other_par = sim_from.other_par
sim_to.chi_opt = sim_from.chi_opt
sim_to.test_opt = sim_from.test_opt
return sim_to
@staticmethod
def non_lin_pow_spec(a, k, cosmo, lin_scale):
""" return ndarray of nonlinear power spectrum """
# initialize emulator cosmology (if not done already)
Non_Linear_Cosmo._init_cosmo(cosmo)
# for z < 2 use emulator, halofit otherwise
if a < 1./3.:
cosmo_ = Non_Linear_Cosmo._cosmo_halofit
else:
cosmo_ = Non_Linear_Cosmo._cosmo_emu
# always check for TZA
cosmo_.truncated_pk = cosmo.truncated_pk
# scale to match linear power spectrum at small scales
# TODO
# call non-linear power spectrum
k = np.array(k)
if k.shape:
return np.array([fs.non_lin_pow_spec(a, k_, cosmo_) for k_ in k])
else:
return fs.non_lin_pow_spec(a, np.asscalar(k), cosmo_)
@staticmethod
def gen_func(sim, a, data, gen_func, cache):
if a not in cache:
# initialize emulator cosmology (if not done already)
Non_Linear_Cosmo._init_sim(sim)
# for z < 2 use emulator, halofit otherwise
if a < 1./3.:
sim_ = Non_Linear_Cosmo._sim_halofit
else:
sim_ = Non_Linear_Cosmo._sim_emu
# always check fo TZA
sim_.cosmo.truncated_pk = sim.cosmo.truncated_pk
# call non-linear gen_func
gen_func(sim_, a, data)
# copy data to cache
cache[a] = get_copy_data_vec(data)
return cache[a]
@staticmethod
def non_lin_corr_func(sim, a, data):
""" return non-linear correlation function """
return Non_Linear_Cosmo.gen_func(sim, a, data, fs.gen_corr_func_binned_gsl_qawf_nl, Non_Linear_Cosmo._corr_func)
@staticmethod
def non_lin_sigma_func(sim, a, data):
""" return non-linear amplitude of density fluctuations """
return Non_Linear_Cosmo.gen_func(sim, a, data, fs.gen_sigma_func_binned_gsl_qawf_nl, Non_Linear_Cosmo._sigma_func)
def get_a_init_from_zs(zs):
""" from list of redshifts returns initial scale factor, i.e. value after 'init' """
for z in zs:
if z != 'init':
return 1/(1.+z)
def get_a_fom_zs(zs):
try:
iter(zs)
except TypeError:
return 1./(1+zs)
else:
a = [1./(z + 1) for z in zs if z != 'init']
return np.array(a)
def get_z_from_a(a):
try:
iter(a)
except TypeError:
return 1./a - 1
else:
zs = [1./a_ - 1 for a_ in a]
return np.array(zs)
def get_a_from_A(cosmo, A):
""" return scale factor a at which amplitude of linear power spectrum is A (normalize as A=1 at a=1) """
# 'f = 0' <=> A = D^2 (linear power grows as D^2)
A = np.array(A)
if A.shape:
a_eff = []
for A_ in A:
f = lambda a : A_ - fs.growth_factor(a, cosmo)**2
a_eff.append(brentq(f, 0, 2))
return np.array(a_eff)
else:
f = lambda a : A - fs.growth_factor(a, cosmo)**2
return brentq(f, 0, 2)
def get_copy_data_vec(Data_Vec_from):
dim = Data_Vec_from.dim()
size = Data_Vec_from.size()
if dim == 2:
Data_Vec = fs.Data_Vec_2(size)
elif dim == 3:
Data_Vec = fs.Data_Vec_3(size)
for i in range(dim):
for j in range(size):
Data_Vec[i][j] = Data_Vec_from[i][j]
return Data_Vec
def get_ndarray(Data_Vec):
""" copy C++ class Data_Vec<FTYPE_t, N> into numpy array """
dim = Data_Vec.dim()
data = [[x for x in Data_Vec[i]] for i in range(dim)]
return np.array(data)
def get_Data_vec(data):
""" copy 2D data 'dim x size' into C++ class Data_Vec<FTYPE_t, dim> """
dim = len(data)
size = len(data[0])
if dim == 2:
Data_Vec = fs.Data_Vec_2(size)
elif dim == 3:
Data_Vec = fs.Data_Vec_3(size)
else:
raise IndexError("only Data_Vec<FTYPE_t, dim> of 'dim' 2 or 3 supported")
for j in range(dim):
for i in range(size):
Data_Vec[j][i] = data[j][i]
return Data_Vec
def non_lin_pow_spec(a, k, cosmo, lin_scale=False):
""" return ndarray of nonlinear power spectrum """
# special wrapper -- use emulator power spectrum regardless of the one in cosmo
return Non_Linear_Cosmo.non_lin_pow_spec(a, k, cosmo, lin_scale)
def lin_pow_spec(a, k, cosmo):
""" return ndarray of linear power spectrum """
k = np.array(k)
if k.shape:
return np.array([fs.lin_pow_spec(a, k_, cosmo) for k_ in k])
else:
return fs.lin_pow_spec(a, np.asscalar(k), cosmo)
def chi_bulk_a(a, chi_opt, MPL=1, CHI_A_UNITS=True):
""" return bulk value of chameleon field at background level """
if CHI_A_UNITS: return 1
chi_0 = 2*chi_opt["beta"]*MPL*chi_opt["phi"]
n = chi_opt["n"]
return chi_0*pow(a, 3/(1-n))
def chi_bulk_a_n(a, chi_opt, MPL=1, CHI_A_UNITS=True):
""" return bulk value of chameleon field at background level divided by (1-n), i.e. common factor """
n = chi_opt["n"]
return chi_bulk_a(a, chi_opt, MPL=MPL, CHI_A_UNITS=CHI_A_UNITS)/(1-n)
def chi_psi_a(a, chi_opt):
""" return value of screening potential at given time """
phi = chi_opt["phi"]
n = chi_opt["n"]
phi *= pow(a, (5.-2*n)/(1.-n))
return phi
def phi_G_prefactor(cosmo, c_kms=299792.458):
mu_ = 3./2*cosmo.Omega_m * pow(cosmo.H0 * cosmo.h / c_kms, 2)
return 1/mu_
def phi_G_k(a, k, cosmo, c_kms=299792.458):
Pk = lin_pow_spec(a, k, cosmo)
Pk_til = Pk*pow(k/(2*np.pi), 3)
drho = np.sqrt(Pk_til)
mu = phi_G_prefactor(cosmo, c_kms=c_kms)
phi = drho/(mu*a*k*k)
return phi
def chi_psi_k_a_single(a, cosmo, chi_opt, k_min=1e-5, k_max=1e3, rel_tol=1e-1):
""" return scale at which hravitational potential is equal to screening potential """
psi_scr_a = chi_psi_a(a, chi_opt)
f = lambda k : np.abs(psi_scr_a - phi_G_k(a, k, cosmo))
k_scr = minimize_scalar(f, bracket=(k_min, k_max), bounds=(k_min, np.inf), tol=1e-12).x
if f(k_scr)/psi_scr_a < rel_tol:
return k_scr
else:
return 0
def chi_psi_k_a(a, cosmo, chi_opt, k_min=1e-5, k_max=1e3, rel_tol=1e-1):
a = np.array(a)
fce = lambda a_ : chi_psi_k_a_single(a_, cosmo, chi_opt, k_min=k_min, k_max=k_max, rel_tol=rel_tol)
if a.shape:
return np.array([fce(a_) for a_ in a])
else:
return fce(np.asscalar(a))
def chi_mass_sq(a, cosmo, chi_opt, MPL=1, c_kms=299792.458):
""" return mass squared of chameleon field sitting at chi_bulk(a, 0) """
# prefactor = (3*MPL*chi_opt["beta"]*cosmo.Omega_m *pow(cosmo.H0 # beta*rho_m,0 / Mpl
# * cosmo.h / c_kms # units factor for 'c = 1' and [L] = Mpc / h
# ,2))
# evolve rho_m,0 -> rho_m
a = np.array(a)
# prefactor /= pow(a, 3)
# return prefactor/chi_bulk_a_n(a, chi_opt, MPL=MPL, CHI_A_UNITS=False)
n = chi_opt["n"]
mu = phi_G_prefactor(cosmo, c_kms=c_kms)
phi_a = chi_psi_a(a, chi_opt)
return (1-n)/(a*mu*phi_a)
def chi_compton_wavelength(a, cosmo, chi_opt, MPL=1, c_kms=299792.458):
m_sq = chi_mass_sq(a, cosmo, chi_opt, MPL=MPL, c_kms=c_kms)
return 1/np.sqrt(m_sq)
def chi_lin_pow_spec(a, k, cosmo, chi_opt, MPL=1, c_kms=299792.458):
""" return ndarray of linear power spectrum for chameleon in units of chi_prefactor """
mass_sq = chi_mass_sq(a, cosmo, chi_opt, MPL=MPL, c_kms=c_kms)
k = np.array(k)
chi_mod = pow(mass_sq/(mass_sq+k*k), 2)
if k.shape:
return chi_mod*np.array([fs.lin_pow_spec(a, k_, cosmo) for k_ in k])
else:
return chi_mod*fs.lin_pow_spec(a, np.asscalar(k), cosmo)
def chi_trans_to_supp(a, k, Pk, cosmo, chi_opt, CHI_A_UNITS=True):
""" transform input chameleon power spectrum to suppression according to linear prediction """
Pk_lin = chi_lin_pow_spec(a, k, cosmo, chi_opt)
return Pk/ (Pk_lin * pow(chi_bulk_a_n(a, chi_opt), 2)) # chi_bulk for normalization of Pk_lin
def chi_trans_to_init(data_list, zeropoint=0):
""" transform supp (ref: lin) to supp (ref: init) """
reversed_data_list = data_list[::-1]
for data in reversed_data_list:
data[1] += zeropoint - reversed_data_list[-1][1]
def hybrid_pow_spec(a, k, A, cosmo):
""" return 'hybrid' power spectrum: (1-A)*P_lin(k, a) + A*P_nl """
return (1-A)*lin_pow_spec(a, k, cosmo) + A*non_lin_pow_spec(a, k, cosmo)
def gen_func(sim, fc_par, fce_lin, fc_nl, Pk=None, z=None, non_lin=False):
data = fs.Data_Vec_2()
if Pk: # compute function from given continuous power spectrum
try:
fc_par(sim, Pk, data)
# GSL integration error
except RuntimeError:
return None
elif z is not None: # compute (non-)linear function
a = 1./(1.+z) if z != 'init' else 1.0
if non_lin:
data = fc_nl(sim, a, data)
else:
fce_lin(sim, a, data)
else:
raise KeyError("Function 'gen_func' called without arguments.")
return get_ndarray(data)
def corr_func(sim, Pk=None, z=None, non_lin=False):
""" return correlation function
if given Pk -- C++ class Extrap_Pk or Extrap_Pk_Nl -- computes its corr. func.
if given redshift, computes linear or non-linear (emulator) correlation function """
fc_par = fs.gen_corr_func_binned_gsl_qawf
fce_lin = fs.gen_corr_func_binned_gsl_qawf_lin
fc_nl = Non_Linear_Cosmo.non_lin_corr_func
return gen_func(sim, fc_par, fce_lin, fc_nl, Pk=Pk, z=z, non_lin=non_lin)
def sigma_R(sim, Pk=None, z=None, non_lin=False):
""" return amplitude of density fluctuations
if given Pk -- C++ class Extrap_Pk or Extrap_Pk_Nl -- computes its sigma_R.
if given redshift, computes linear or non-linear (emulator) amplitude of density fluctuations """
fc_par = fs.gen_sigma_binned_gsl_qawf
fce_lin = fs.gen_sigma_func_binned_gsl_qawf_lin
fc_nl = Non_Linear_Cosmo.non_lin_sigma_func
return gen_func(sim, fc_par, fce_lin, fc_nl, Pk=Pk, z=z, non_lin=non_lin)
def get_hybrid_pow_spec_amp(sim, data, k_nyquist_par, a=None, fit_lin=False):
""" fit data [k, Pk, std] to hybrid power spectrum (1-A)*P_lin(k) + A*P_nl(k)
return dictionary with C++ class Extrap_Pk_Nl, fit values and covariance.
If 'fit_lin' is True, fit linear power spectrum and use C++ class Extrap_Pk instead.
"""
# extract data
kk, Pk = | np.array(data[0]) | numpy.array |
#importing some useful packages
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
import cv2
import math
import pandas as pd
import glob
from moviepy.editor import VideoFileClip
from IPython.display import HTML
'''
===============================================================================
CLASS DEFINITION
===============================================================================
'''
# Define a class to receive the characteristics of each line detection
class Line():
def __init__(self):
# was the line detected in the last iteration?
self.detected = False
# xe values of the last n fits of the line
self.recent_xfitted = []
#polynomial coefficients averaged over the last n iterations
self.best_fit = None
#polynomial coefficients for the most recent fit
self.current_fit = [np.array([False])]
#radius of curvature of the line in some units
self.radius_of_curvature = None
#distance in meters of vehicle center from the line
self.line_base_pos = None
'''
===============================================================================
FUNCTION DEFINITION
===============================================================================
'''
def weighted_img(img, initial_img, α=0.8, β=1., γ=0.):
return cv2.addWeighted(initial_img, α, img, β, γ)
def grayscale(img):
"""Return an image in grayscale
To show the image: plt.imshow(gray, cmap='gray')"""
return cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
def find_chessboard_corners(img,objpoints,imgpoints,objp):
# Convert the image to grayscale
gray = grayscale(img)
# Find chessboard corners
ret, corners = cv2.findChessboardCorners(gray, (9,6), None)
# If corners are found, add the object points and image points to the array
if ret == True:
imgpoints.append(corners)
objpoints.append(objp)
#Draw and display the corners
img = cv2.drawChessboardCorners(img, (9,6), corners, ret)
return
def get_distortion_param():
global image_folder
'''
Obtain the distortion parameters of the camera with the chessboard calibration
'''
#Read the list of calibration images
images = glob.glob('camera_cal/calibration*.jpg')
objpoints = [] # points in real world space
imgpoints = [] # points in image space
# Define the object points (0,0,0), (1,0,0), ..., (8,5,0)
objp = np.zeros((6*9,3),np.float32)
objp[:,:2] = np.mgrid[0:9,0:6].T.reshape(-1,2)
for fname in images:
# Read each image in the calibration folder
img = mpimg.imread(fname)
find_chessboard_corners(img, objpoints, imgpoints, objp)
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, img.shape[1:], None, None)
undistorted_subfolder = 'undistorted/'
for fname in images:
img = mpimg.imread(fname)
undistorted_img = cv2.undistort(img,mtx,dist,None, mtx)
plt.imsave(image_folder + undistorted_subfolder + fname,undistorted_img)
return mtx, dist
def get_binary_image(img):
'''
Function that, given an undistorted image, returns a binary theresholded image
using different methods
'''
global process_choice
global image_folder
s_binary = HLS_threshold(img)
mag_binary = mag_threshold(img)
dir_binary = dir_threshold(img)
xsobel_binary = abs_sobel_thresh(img, orient = 'x')
ysobel_binary = abs_sobel_thresh(img, orient = 'y')
# Combine the two binary thresholds
combined_binary = np.zeros_like(mag_binary)
combined_binary[((xsobel_binary == 1) & (ysobel_binary == 1)) | ((mag_binary == 1) & (dir_binary == 1)) | (s_binary == 1)] = 1
# #Show the different influences on the combined binary image
# combined_binary_1 = np.zeros_like(mag_binary)
# combined_binary_1[(xsobel_binary == 1) & (ysobel_binary == 1)] = 1
# combined_binary_2 = np.zeros_like(mag_binary)
# combined_binary_2[(mag_binary == 1) & (dir_binary == 1)] = 1
# f, axes = plt.subplots(3, 3, figsize=(32, 16))
# axes[0,0].set_title('Gradient magnitude', fontsize=30)
# axes[0,0].imshow(mag_binary)
# axes[0,1].set_title('Gradient direction', fontsize=30)
# axes[0,1].imshow(dir_binary)
# axes[0,2].set_title('Gradient mag + dir', fontsize=30)
# axes[0,2].imshow(combined_binary_2)
# axes[1,0].set_title('X Sobel', fontsize=30)
# axes[1,0].imshow(xsobel_binary)
# axes[1,1].set_title('Y Sobel', fontsize=30)
# axes[1,1].imshow(ysobel_binary)
# axes[1,2].set_title('X + Y Sobel', fontsize=30)
# axes[1,2].imshow(combined_binary_1)
# axes[2,0].set_title('Original', fontsize=30)
# axes[2,0].imshow(img)
# axes[2,1].set_title('S component', fontsize=30)
# axes[2,1].imshow(s_binary)
# axes[2,2].set_title('Combined', fontsize=30)
# axes[2,2].imshow(combined_binary)
# Save the images in the defined folders
if process_choice == 'i':
global image_name
global image_count
binary_img_subfolder = 'binary_images/'
X_Sobel_subfolder = 'X_Sobel/'
Y_Sobel_subfolder = 'Y_Sobel/'
Grad_Magnitude_subfolder = 'Grad_Magnitude/'
Grad_Direction_subfolder = 'Grad_Direction/'
S_subfolder = 'S_Threshold/'
plt.imsave(image_folder + binary_img_subfolder + X_Sobel_subfolder + image_name[image_count],xsobel_binary, cmap='gray')
plt.imsave(image_folder + binary_img_subfolder + Y_Sobel_subfolder + image_name[image_count],ysobel_binary, cmap='gray')
plt.imsave(image_folder + binary_img_subfolder + Grad_Magnitude_subfolder + image_name[image_count],mag_binary, cmap='gray')
plt.imsave(image_folder + binary_img_subfolder + Grad_Direction_subfolder + image_name[image_count],dir_binary, cmap='gray')
plt.imsave(image_folder + binary_img_subfolder + S_subfolder + image_name[image_count],s_binary, cmap='gray')
return combined_binary
def HLS_threshold(img):
''' Convert to HLS color space and separate the S channel
Note: img is the undistorted image
'''
hls = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)
s_channel = hls[:,:,2]
# Threshold color channel
s_thresh_min = 170
s_thresh_max = 255
s_binary = np.zeros_like(s_channel)
s_binary[(s_channel >= s_thresh_min) & (s_channel <= s_thresh_max)] = 1
return s_binary
def mag_threshold(img, sobel_kernel=9, mag_thresh=(30, 100)):
'''
Function that returns the binary image computed using the gradient magnitude medthod
'''
# Convert to grayscale
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# Take both Sobel x and y gradients
sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=sobel_kernel)
sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=sobel_kernel)
# Calculate the gradient magnitude
gradmag = np.sqrt(sobelx**2 + sobely**2)
# Rescale to 8 bit
scale_factor = np.max(gradmag)/255
gradmag = (gradmag/scale_factor).astype(np.uint8)
# Create a binary image of ones where threshold is met, zeros otherwise
binary_output = np.zeros_like(gradmag)
binary_output[(gradmag >= mag_thresh[0]) & (gradmag <= mag_thresh[1])] = 1
return binary_output
def dir_threshold(img, sobel_kernel= 15, thresh=(0.7, 1.3)):
'''
Function that returns the binary image computed using the gradient direction medthod
'''
# Grayscale
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# Calculate the x and y gradients
sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=sobel_kernel)
sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=sobel_kernel)
# Take the absolute value of the gradient direction,
# apply a threshold, and create a binary image result
absgraddir = np.arctan2(np.absolute(sobely), np.absolute(sobelx))
binary_output = np.zeros_like(absgraddir)
binary_output[(absgraddir >= thresh[0]) & (absgraddir <= thresh[1])] = 1
return binary_output
def abs_sobel_thresh(img, orient, thresh_min=20, thresh_max=100):
'''
Define a function that takes an image, and perform its Sobel operation
'''
# Convert to grayscale
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# Apply x or y gradient with the OpenCV Sobel() function
# and take the absolute value
if orient == 'x':
abs_sobel = np.absolute(cv2.Sobel(gray, cv2.CV_64F, 1, 0))
if orient == 'y':
abs_sobel = np.absolute(cv2.Sobel(gray, cv2.CV_64F, 0, 1))
# Rescale back to 8 bit integer
scaled_sobel = np.uint8(255*abs_sobel/np.max(abs_sobel))
# Create a copy and apply the threshold
binary_output = np.zeros_like(scaled_sobel)
# Apply the threshold to the binary output
binary_output[(scaled_sobel >= thresh_min) & (scaled_sobel <= thresh_max)] = 1
return binary_output
def get_perspective_matrix(img,src,dst):
'''
Given an image, the source and destination points, obtain the perspective matrix
'''
M = cv2.getPerspectiveTransform(src,dst)
return M
def change_perspective(binary_img):
#Choose the four points that define the road plane for the perspective change
global src
global dst
global img_size
#Plot the position of the points
#plt.figure(figsize=(20,10))
#plt.imshow(binary_img)
#plt.plot(276,670,'.', markersize=30)
#plt.plot(1026,670,'.', markersize=30)
#plt.plot(525,500,'.', markersize=30)
#plt.plot(762,500,'.', markersize=30)
# ATTENTION: the shape vector is inverted
img_size = (binary_img.shape[1],binary_img.shape[0])
src = np.float32([[762,500],[1026,670],[276,670],[525,500]])
dst = np.float32([ [img_size[0]-300, 300], [img_size[0]-300, img_size[1]-100],
[300, img_size[1]-100],[300, 300]])
M = get_perspective_matrix(binary_img,src,dst)
warped_img = cv2.warpPerspective(binary_img,M,img_size,flags=cv2.INTER_LINEAR)
return warped_img
def find_lane_pixels(binary_warped):
'''
Function that, given a binary warped image, defines the possible
pixels belonging to the left and right line
'''
# Take a histogram of the bottom half of the image
histogram = np.sum(binary_warped[binary_warped.shape[0]//2:,:], axis=0)
# Create an output image to draw on and visualize the result
out_img = np.dstack((binary_warped, binary_warped, binary_warped))
# Find the peak of the left and right halves of the histogram
# These will be the starting point for the left and right lines
midpoint = np.int(histogram.shape[0]//2)
leftx_base = np.argmax(histogram[:midpoint])
rightx_base = np.argmax(histogram[midpoint:]) + midpoint
# Choose the number of sliding windows
nwindows = 15
# Set the width of the windows +/- margin
margin = 50
# Set minimum number of pixels found to recenter window
minpix = 100
# Set height of windows - based on nwindows above and image shape
window_height = np.int(binary_warped.shape[0]//nwindows)
# Identify the x and y positions of all nonzero pixels in the image
nonzero = binary_warped.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
# Current positions to be updated later for each window in nwindows
leftx_current = leftx_base
rightx_current = rightx_base
# Create empty lists to receive left and right lane pixel indices
left_lane_inds = []
right_lane_inds = []
# Step through the windows one by one
for window in range(nwindows):
# Identify window boundaries in x and y (and right and left)
win_y_low = binary_warped.shape[0] - (window+1)*window_height
win_y_high = binary_warped.shape[0] - window*window_height
win_xleft_low = leftx_current - margin
win_xleft_high = leftx_current + margin
win_xright_low = rightx_current - margin
win_xright_high = rightx_current + margin
# # Draw the windows on the visualization image
# cv2.rectangle(out_img,(win_xleft_low,win_y_low),
# (win_xleft_high,win_y_high),(0,255,0), 2)
# cv2.rectangle(out_img,(win_xright_low,win_y_low),
# (win_xright_high,win_y_high),(0,255,0), 2)
# Identify the nonzero pixels in x and y within the window #
good_left_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) &
(nonzerox >= win_xleft_low) & (nonzerox < win_xleft_high)).nonzero()[0]
good_right_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) &
(nonzerox >= win_xright_low) & (nonzerox < win_xright_high)).nonzero()[0]
# Append these indices to the lists
left_lane_inds.append(good_left_inds)
right_lane_inds.append(good_right_inds)
# If you found > minpix pixels, recenter next window on their mean position
if len(good_left_inds) > minpix:
leftx_current = np.int(np.mean(nonzerox[good_left_inds]))
if len(good_right_inds) > minpix:
rightx_current = np.int(np.mean(nonzerox[good_right_inds]))
# Concatenate the arrays of indices
try:
left_lane_inds = np.concatenate(left_lane_inds)
right_lane_inds = np.concatenate(right_lane_inds)
except ValueError:
# Avoids an error if the above is not implemented fully
pass
# Extract left and right line pixel positions
leftx = nonzerox[left_lane_inds]
lefty = nonzeroy[left_lane_inds]
rightx = nonzerox[right_lane_inds]
righty = nonzeroy[right_lane_inds]
return leftx, lefty, rightx, righty, out_img
def fit_polynomial(binary_warped):
'''
Function that, starting from the histogram peaks of the warped image,
estimate the polynomial coefficients.
'''
global n
global process_choice
global image_folder
global xm_per_pix
global ym_per_pix
# Find our lane pixels first
leftx, lefty, rightx, righty, out_img = find_lane_pixels(binary_warped)
# Fit a second order polynomial to each using `np.polyfit`
left_line.current_fit = np.polyfit(lefty, leftx, 2)
right_line.current_fit = np.polyfit(righty, rightx, 2)
left_line.recent_xfitted.insert(0,left_line.current_fit)
right_line.recent_xfitted.insert(0,right_line.current_fit)
if len(left_line.recent_xfitted) > n:
left_line.recent_xfitted.pop()
if len(right_line.recent_xfitted) > n:
right_line.recent_xfitted.pop()
left_line.best_fit = np.array([0,0,0], dtype='float')
for left_count in range(0, len(left_line.recent_xfitted)):
left_line.best_fit = left_line.best_fit + left_line.recent_xfitted[left_count]
left_line.best_fit = left_line.best_fit/len(left_line.recent_xfitted)
right_line.best_fit = np.array([0,0,0], dtype='float')
for right_count in range(0, len(right_line.recent_xfitted)):
right_line.best_fit = right_line.best_fit + right_line.recent_xfitted[right_count]
right_line.best_fit = right_line.best_fit/len(right_line.recent_xfitted)
left_line.detected = True
right_line.detected = True
# Generate x and y values for plotting
ploty = np.linspace(0, binary_warped.shape[0]-1, binary_warped.shape[0] )
try:
left_fitx = left_line.best_fit[0]*ploty**2 + left_line.best_fit[1]*ploty + left_line.best_fit[2]
right_fitx = right_line.best_fit[0]*ploty**2 + right_line.best_fit[1]*ploty + right_line.best_fit[2]
except TypeError:
# Avoids an error if `left` and `right_fit` are still none or incorrect
print('The function failed to fit a line!')
left_line.detected = False
right_line.detected = False
left_fitx = 1*ploty**2 + 1*ploty
right_fitx = 1*ploty**2 + 1*ploty
# Colors in the left and right lane regions
out_img[lefty, leftx] = [255, 0, 0]
out_img[righty, rightx] = [0, 0, 255]
# Save the images in the defined folders
if process_choice == 'i':
global image_name
global image_count
warped_img_subfolder = 'warped_images/'
line_pixels_subfolder = 'line_pixels/'
plt.imsave(image_folder + warped_img_subfolder + line_pixels_subfolder + image_name[image_count],out_img)
left_fit_cr = np.polyfit(lefty*ym_per_pix, leftx*xm_per_pix, 2)
right_fit_cr = np.polyfit(righty*ym_per_pix, rightx*xm_per_pix, 2)
# Create an image to draw the lines on
warp_zero = np.zeros_like(binary_warped).astype(np.uint8)
color_warp = np.dstack((warp_zero, warp_zero, warp_zero))
# Recast the x and y points into usable format for cv2.fillPoly()
pts_left = np.array([np.transpose(np.vstack([left_fitx, ploty]))])
pts_right = np.array([np.flipud(np.transpose(np.vstack([right_fitx, ploty])))])
pts = np.hstack((pts_left, pts_right))
# Draw the lane onto the warped blank image
result = cv2.fillPoly(color_warp, np.int_([pts]), (0,255, 0))
return result, left_fitx, right_fitx, ploty, left_fit_cr, right_fit_cr
def fit_poly(img_shape, leftx, lefty, rightx, righty):
global n
global n_left
global n_right
global max_missed_detection
global xm_per_pix
global ym_per_pix
#Fit a second order polynomial to each with np.polyfit() ###
left_line.current_fit = np.polyfit(lefty, leftx, 2)
right_line.current_fit = np.polyfit(righty, rightx, 2)
if all(left_line.current_fit > 1.2*left_line.recent_xfitted[0]) or all(left_line.current_fit < 0.8*left_line.recent_xfitted[0]):
left_line.current_fit = left_line.recent_xfitted[0]
n_left = n_left + 1
if n_left >= max_missed_detection:
left_line.__init__()
if all(right_line.current_fit > 1.2*right_line.recent_xfitted[0]) or all(right_line.current_fit < 0.8*right_line.recent_xfitted[0]):
right_line.current_fit = right_line.recent_xfitted[0]
n_right = n_right + 1
if n_right >= max_missed_detection:
right_line.__init__()
left_line.recent_xfitted.insert(0,left_line.current_fit)
right_line.recent_xfitted.insert(0,right_line.current_fit)
if len(left_line.recent_xfitted) > n:
left_line.recent_xfitted.pop()
if len(right_line.recent_xfitted) > n:
right_line.recent_xfitted.pop()
left_line.best_fit = np.array([0,0,0], dtype='float')
for left_count in range(0, len(left_line.recent_xfitted)):
left_line.best_fit = left_line.best_fit + left_line.recent_xfitted[left_count]
left_line.best_fit = left_line.best_fit/len(left_line.recent_xfitted)
right_line.best_fit = np.array([0,0,0], dtype='float')
for right_count in range(0, len(right_line.recent_xfitted)):
right_line.best_fit = right_line.best_fit + right_line.recent_xfitted[right_count]
right_line.best_fit = right_line.best_fit/len(right_line.recent_xfitted)
# Generate x and y values for plotting
ploty = np.linspace(0, img_shape[0]-1, img_shape[0])
left_fit_cr = np.polyfit(lefty*ym_per_pix, leftx*xm_per_pix, 2)
right_fit_cr = np.polyfit(righty*ym_per_pix, rightx*xm_per_pix, 2)
left_fitx = left_line.best_fit[0]*ploty**2 + left_line.best_fit[1]*ploty + left_line.best_fit[2]
right_fitx = right_line.best_fit[0]*ploty**2 + right_line.best_fit[1]*ploty + right_line.best_fit[2]
return left_fitx, right_fitx, ploty, left_fit_cr, right_fit_cr
def search_around_poly(binary_warped):
# Choose the width of the margin around the previous polynomial to search
margin = 50
# Grab activated pixels
nonzero = binary_warped.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
#Set the area of search based on activated x-values within the +/- margin
#of the polynomial function found on the previous frame
left_lane_inds = ((nonzerox > (left_line.best_fit[0]*(nonzeroy**2) + left_line.best_fit[1]*nonzeroy +
left_line.best_fit[2] - margin)) & (nonzerox < (left_line.best_fit[0]*(nonzeroy**2) +
left_line.best_fit[1]*nonzeroy + left_line.best_fit[2] + margin)))
right_lane_inds = ((nonzerox > (right_line.best_fit[0]*(nonzeroy**2) + right_line.best_fit[1]*nonzeroy +
right_line.best_fit[2] - margin)) & (nonzerox < (right_line.best_fit[0]*(nonzeroy**2) +
right_line.best_fit[1]*nonzeroy + right_line.best_fit[2] + margin)))
# Extract left and right line pixel positions
leftx = nonzerox[left_lane_inds]
lefty = nonzeroy[left_lane_inds]
rightx = nonzerox[right_lane_inds]
righty = nonzeroy[right_lane_inds]
# Fit new polynomials
left_fitx, right_fitx, ploty, left_fit_cr, right_fit_cr = fit_poly(binary_warped.shape, leftx, lefty, rightx, righty)
# Create an image to draw the lines on
warp_zero = np.zeros_like(binary_warped).astype(np.uint8)
color_warp = np.dstack((warp_zero, warp_zero, warp_zero))
# Recast the x and y points into usable format for cv2.fillPoly()
pts_left = np.array([np.transpose(np.vstack([left_fitx, ploty]))])
pts_right = np.array([np.flipud(np.transpose(np.vstack([right_fitx, ploty])))])
pts = np.hstack((pts_left, pts_right))
# Draw the lane onto the warped blank image
result = cv2.fillPoly(color_warp, np.int_([pts]), (0,255, 0))
return result, left_fitx, right_fitx, ploty, left_fit_cr, right_fit_cr
def measure_curvature_real(ploty, left_fit_cr, right_fit_cr):
'''
Calculates the curvature of polynomial functions in meters.
'''
# Define conversions in x and y from pixels space to meters
global ym_per_pix # meters per pixel in y dimension
global xm_per_pix # meters per pixel in x dimension
# Define y-value where we want radius of curvature
# We'll choose the maximum y-value, corresponding to the bottom of the image
y_eval = np.max(ploty)
# Calculation of R_curve (radius of curvature)
left_curverad = ((1 + (2*left_fit_cr[0]*y_eval*ym_per_pix + left_fit_cr[1])**2)**1.5) / | np.absolute(2*left_fit_cr[0]) | numpy.absolute |
import unittest
from datetime import datetime
import numpy as np
from sklearn.metrics.pairwise import cosine_similarity
from tweepy import Status, User
from twitter_bot_type_classification.dataset.db import DATE_TIME_FORMAT, TWITTER_DATE_TIME_FORMAT
from twitter_bot_type_classification.features.tweet import TweetFeatures, TWEET_SOURCES_IDX, COUNTRY_CODES_IDX, \
TWEET_TEXT_SIMILARITY_FEATURES
from twitter_bot_type_classification.features.user import UserFeatures, USER_FEATURES_INDEX
class UserFeaturesTests(unittest.TestCase):
def test_tweet_time_interval_mean_nan(self):
user_dic = {
"id": 1,
"name": "Test Account",
"screen_name": "test_account",
"location": "",
"url": None,
"expanded_url": None,
"description": "",
"protected": False,
"verified": False,
"followers_count": 10,
"friends_count": 15,
"listed_count": 2,
"favourites_count": 50,
"statuses_count": 9,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(TWITTER_DATE_TIME_FORMAT),
"profile_image_url_https": "",
"default_profile": True,
"default_profile_image": True,
"withheld_in_countries": "",
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}
user = User.parse(api=None, json=user_dic)
user_features = UserFeatures(user, [])
self.assertTrue(np.isnan(user_features[USER_FEATURES_INDEX["tweet_time_interval_mean"]]))
def test_tweet_time_interval_mean(self):
user_dic = {
"id": 1,
"name": "Test Account",
"screen_name": "test_account",
"location": "",
"url": None,
"expanded_url": None,
"description": "",
"protected": False,
"verified": False,
"followers_count": 10,
"friends_count": 15,
"listed_count": 2,
"favourites_count": 50,
"statuses_count": 9,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(TWITTER_DATE_TIME_FORMAT),
"profile_image_url_https": "",
"default_profile": True,
"default_profile_image": True,
"withheld_in_countries": "",
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}
user = User.parse(api=None, json=user_dic)
tweets = [
Status.parse(api=None, json={
"id": 0,
"user_id": 1,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(
TWITTER_DATE_TIME_FORMAT),
"text": "This is just a simple test tweet text.",
"coordinates": None,
"place": None,
"in_reply_to_status_id": None,
"in_reply_to_user_id": None,
"quoted_status_id": None,
"retweet_count": 2,
"favorite_count": 3,
"lang": "en",
"withheld_copyright": False,
"withheld_in_countries": None,
"entities": {
"urls": []
},
"source": "Twitter Web App",
"videos": 0,
"photos": 0,
"gifs": 0,
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}),
Status.parse(api=None, json={
"id": 1,
"user_id": 1,
"created_at": datetime.strptime("2000-01-01 00:05:00", DATE_TIME_FORMAT).strftime(
TWITTER_DATE_TIME_FORMAT),
"text": "This is just a simple test tweet text.",
"coordinates": None,
"place": None,
"in_reply_to_status_id": None,
"in_reply_to_user_id": None,
"quoted_status_id": None,
"retweet_count": 2,
"favorite_count": 3,
"lang": "en",
"withheld_copyright": False,
"withheld_in_countries": None,
"entities": {
"urls": []
},
"source": "Twitter Web App",
"videos": 0,
"photos": 0,
"gifs": 0,
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}),
Status.parse(api=None, json={
"id": 2,
"user_id": 1,
"created_at": datetime.strptime("2000-01-01 00:10:00", DATE_TIME_FORMAT).strftime(
TWITTER_DATE_TIME_FORMAT),
"text": "This is just a simple test tweet text.",
"coordinates": None,
"place": None,
"in_reply_to_status_id": None,
"in_reply_to_user_id": None,
"quoted_status_id": None,
"retweet_count": 2,
"favorite_count": 3,
"lang": "en",
"withheld_copyright": False,
"withheld_in_countries": None,
"entities": {
"urls": []
},
"source": "Twitter Web App",
"videos": 0,
"photos": 0,
"gifs": 0,
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}),
Status.parse(api=None, json={
"id": 3,
"user_id": 1,
"created_at": datetime.strptime("2000-01-01 00:50:00", DATE_TIME_FORMAT).strftime(
TWITTER_DATE_TIME_FORMAT),
"text": "This is just a simple test tweet text.",
"coordinates": None,
"place": None,
"in_reply_to_status_id": None,
"in_reply_to_user_id": None,
"quoted_status_id": None,
"retweet_count": 2,
"favorite_count": 3,
"lang": "en",
"withheld_copyright": False,
"withheld_in_countries": None,
"entities": {
"urls": []
},
"source": "Twitter Web App",
"videos": 0,
"photos": 0,
"gifs": 0,
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}),
Status.parse(api=None, json={
"id": 4,
"user_id": 1,
"created_at": datetime.strptime("2000-01-01 01:00:00", DATE_TIME_FORMAT).strftime(
TWITTER_DATE_TIME_FORMAT),
"text": "This is just a simple test tweet text.",
"coordinates": None,
"place": None,
"in_reply_to_status_id": None,
"in_reply_to_user_id": None,
"quoted_status_id": None,
"retweet_count": 2,
"favorite_count": 3,
"lang": "en",
"withheld_copyright": False,
"withheld_in_countries": None,
"entities": {
"urls": []
},
"source": "Twitter Web App",
"videos": 0,
"photos": 0,
"gifs": 0,
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}),
Status.parse(api=None, json={
"id": 5,
"user_id": 1,
"created_at": datetime.strptime("2000-01-01 01:05:00", DATE_TIME_FORMAT).strftime(
TWITTER_DATE_TIME_FORMAT),
"text": "This is just a simple test tweet text.",
"coordinates": None,
"place": None,
"in_reply_to_status_id": None,
"in_reply_to_user_id": None,
"quoted_status_id": None,
"retweet_count": 2,
"favorite_count": 3,
"lang": "en",
"withheld_copyright": False,
"withheld_in_countries": None,
"entities": {
"urls": []
},
"source": "Twitter Web App",
"videos": 0,
"photos": 0,
"gifs": 0,
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
})
]
user_features = UserFeatures(user, tweets)
self.assertAlmostEqual(user_features[USER_FEATURES_INDEX["tweet_time_interval_mean"]], np.mean([
946681200.0 - 946681500.0, 946681500.0 - 946681800.0, 946681800.0 - 946684200.0, 946684200.0 - 946684800.0,
946684800.0 - 946685100.0
]), places=4)
def test_tweet_time_interval_std_nan(self):
user_dic = {
"id": 1,
"name": "<NAME>",
"screen_name": "test_account",
"location": "",
"url": None,
"expanded_url": None,
"description": "",
"protected": False,
"verified": False,
"followers_count": 10,
"friends_count": 15,
"listed_count": 2,
"favourites_count": 50,
"statuses_count": 9,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(TWITTER_DATE_TIME_FORMAT),
"profile_image_url_https": "",
"default_profile": True,
"default_profile_image": True,
"withheld_in_countries": "",
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}
user = User.parse(api=None, json=user_dic)
user_features = UserFeatures(user, [])
self.assertTrue(np.isnan(user_features[USER_FEATURES_INDEX["tweet_time_interval_std"]]))
def test_tweet_time_interval_std(self):
user_dic = {
"id": 1,
"name": "Test Account",
"screen_name": "test_account",
"location": "",
"url": None,
"expanded_url": None,
"description": "",
"protected": False,
"verified": False,
"followers_count": 10,
"friends_count": 15,
"listed_count": 2,
"favourites_count": 50,
"statuses_count": 9,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(TWITTER_DATE_TIME_FORMAT),
"profile_image_url_https": "",
"default_profile": True,
"default_profile_image": True,
"withheld_in_countries": "",
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}
user = User.parse(api=None, json=user_dic)
tweets = [
Status.parse(api=None, json={
"id": 0,
"user_id": 1,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(
TWITTER_DATE_TIME_FORMAT),
"text": "This is just a simple test tweet text.",
"coordinates": None,
"place": None,
"in_reply_to_status_id": None,
"in_reply_to_user_id": None,
"quoted_status_id": None,
"retweet_count": 2,
"favorite_count": 3,
"lang": "en",
"withheld_copyright": False,
"withheld_in_countries": None,
"entities": {
"urls": []
},
"source": "Twitter Web App",
"videos": 0,
"photos": 0,
"gifs": 0,
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}),
Status.parse(api=None, json={
"id": 1,
"user_id": 1,
"created_at": datetime.strptime("2000-01-01 00:05:00", DATE_TIME_FORMAT).strftime(
TWITTER_DATE_TIME_FORMAT),
"text": "This is just a simple test tweet text.",
"coordinates": None,
"place": None,
"in_reply_to_status_id": None,
"in_reply_to_user_id": None,
"quoted_status_id": None,
"retweet_count": 2,
"favorite_count": 3,
"lang": "en",
"withheld_copyright": False,
"withheld_in_countries": None,
"entities": {
"urls": []
},
"source": "Twitter Web App",
"videos": 0,
"photos": 0,
"gifs": 0,
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}),
Status.parse(api=None, json={
"id": 2,
"user_id": 1,
"created_at": datetime.strptime("2000-01-01 00:10:00", DATE_TIME_FORMAT).strftime(
TWITTER_DATE_TIME_FORMAT),
"text": "This is just a simple test tweet text.",
"coordinates": None,
"place": None,
"in_reply_to_status_id": None,
"in_reply_to_user_id": None,
"quoted_status_id": None,
"retweet_count": 2,
"favorite_count": 3,
"lang": "en",
"withheld_copyright": False,
"withheld_in_countries": None,
"entities": {
"urls": []
},
"source": "Twitter Web App",
"videos": 0,
"photos": 0,
"gifs": 0,
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}),
Status.parse(api=None, json={
"id": 3,
"user_id": 1,
"created_at": datetime.strptime("2000-01-01 00:50:00", DATE_TIME_FORMAT).strftime(
TWITTER_DATE_TIME_FORMAT),
"text": "This is just a simple test tweet text.",
"coordinates": None,
"place": None,
"in_reply_to_status_id": None,
"in_reply_to_user_id": None,
"quoted_status_id": None,
"retweet_count": 2,
"favorite_count": 3,
"lang": "en",
"withheld_copyright": False,
"withheld_in_countries": None,
"entities": {
"urls": []
},
"source": "Twitter Web App",
"videos": 0,
"photos": 0,
"gifs": 0,
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}),
Status.parse(api=None, json={
"id": 4,
"user_id": 1,
"created_at": datetime.strptime("2000-01-01 01:00:00", DATE_TIME_FORMAT).strftime(
TWITTER_DATE_TIME_FORMAT),
"text": "This is just a simple test tweet text.",
"coordinates": None,
"place": None,
"in_reply_to_status_id": None,
"in_reply_to_user_id": None,
"quoted_status_id": None,
"retweet_count": 2,
"favorite_count": 3,
"lang": "en",
"withheld_copyright": False,
"withheld_in_countries": None,
"entities": {
"urls": []
},
"source": "Twitter Web App",
"videos": 0,
"photos": 0,
"gifs": 0,
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}),
Status.parse(api=None, json={
"id": 5,
"user_id": 1,
"created_at": datetime.strptime("2000-01-01 01:05:00", DATE_TIME_FORMAT).strftime(
TWITTER_DATE_TIME_FORMAT),
"text": "This is just a simple test tweet text.",
"coordinates": None,
"place": None,
"in_reply_to_status_id": None,
"in_reply_to_user_id": None,
"quoted_status_id": None,
"retweet_count": 2,
"favorite_count": 3,
"lang": "en",
"withheld_copyright": False,
"withheld_in_countries": None,
"entities": {
"urls": []
},
"source": "Twitter Web App",
"videos": 0,
"photos": 0,
"gifs": 0,
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
})
]
user_features = UserFeatures(user, tweets)
self.assertAlmostEqual(user_features[USER_FEATURES_INDEX["tweet_time_interval_std"]], np.std([
946681200.0 - 946681500.0, 946681500.0 - 946681800.0, 946681800.0 - 946684200.0, 946684200.0 - 946684800.0,
946684800.0 - 946685100.0
]), places=4)
def test_tweet_likes_mean_nan(self):
user_dic = {
"id": 1,
"name": "Test Account",
"screen_name": "test_account",
"location": "",
"url": None,
"expanded_url": None,
"description": "",
"protected": False,
"verified": False,
"followers_count": 10,
"friends_count": 15,
"listed_count": 2,
"favourites_count": 50,
"statuses_count": 9,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(TWITTER_DATE_TIME_FORMAT),
"profile_image_url_https": "",
"default_profile": True,
"default_profile_image": True,
"withheld_in_countries": "",
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}
user = User.parse(api=None, json=user_dic)
user_features = UserFeatures(user, [])
self.assertTrue(np.isnan(user_features[USER_FEATURES_INDEX["tweet_likes_mean"]]))
def test_tweet_likes_mean(self):
user_dic = {
"id": 1,
"name": "Test Account",
"screen_name": "test_account",
"location": "",
"url": None,
"expanded_url": None,
"description": "",
"protected": False,
"verified": False,
"followers_count": 10,
"friends_count": 15,
"listed_count": 2,
"favourites_count": 50,
"statuses_count": 9,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(TWITTER_DATE_TIME_FORMAT),
"profile_image_url_https": "",
"default_profile": True,
"default_profile_image": True,
"withheld_in_countries": "",
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}
user = User.parse(api=None, json=user_dic)
tweets = [
Status.parse(api=None, json={
"id": 0,
"user_id": 1,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(
TWITTER_DATE_TIME_FORMAT),
"text": "This is just a simple test tweet text.",
"coordinates": None,
"place": None,
"in_reply_to_status_id": None,
"in_reply_to_user_id": None,
"quoted_status_id": None,
"retweet_count": 2,
"favorite_count": 10,
"lang": "en",
"withheld_copyright": False,
"withheld_in_countries": None,
"entities": {
"urls": []
},
"source": "Twitter Web App",
"videos": 0,
"photos": 0,
"gifs": 0,
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}),
Status.parse(api=None, json={
"id": 1,
"user_id": 1,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(
TWITTER_DATE_TIME_FORMAT),
"text": "This is just a simple test tweet text.",
"coordinates": None,
"place": None,
"in_reply_to_status_id": None,
"in_reply_to_user_id": None,
"quoted_status_id": None,
"retweet_count": 2,
"favorite_count": 3,
"lang": "en",
"withheld_copyright": False,
"withheld_in_countries": None,
"entities": {
"urls": []
},
"source": "Twitter Web App",
"videos": 0,
"photos": 0,
"gifs": 0,
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}),
Status.parse(api=None, json={
"id": 2,
"user_id": 1,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(
TWITTER_DATE_TIME_FORMAT),
"text": "This is just a simple test tweet text.",
"coordinates": None,
"place": None,
"in_reply_to_status_id": None,
"in_reply_to_user_id": None,
"quoted_status_id": None,
"retweet_count": 2,
"favorite_count": 5,
"lang": "en",
"withheld_copyright": False,
"withheld_in_countries": None,
"entities": {
"urls": []
},
"source": "Twitter Web App",
"videos": 0,
"photos": 0,
"gifs": 0,
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
})
]
user_features = UserFeatures(user, tweets)
self.assertEqual(user_features[USER_FEATURES_INDEX["tweet_likes_mean"]], np.mean([10, 3, 5]))
def test_tweet_likes_std_nan(self):
user_dic = {
"id": 1,
"name": "<NAME>",
"screen_name": "test_account",
"location": "",
"url": None,
"expanded_url": None,
"description": "",
"protected": False,
"verified": False,
"followers_count": 10,
"friends_count": 15,
"listed_count": 2,
"favourites_count": 50,
"statuses_count": 9,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(TWITTER_DATE_TIME_FORMAT),
"profile_image_url_https": "",
"default_profile": True,
"default_profile_image": True,
"withheld_in_countries": "",
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}
user = User.parse(api=None, json=user_dic)
user_features = UserFeatures(user, [])
self.assertTrue(np.isnan(user_features[USER_FEATURES_INDEX["tweet_likes_std"]]))
def test_tweet_likes_std(self):
user_dic = {
"id": 1,
"name": "Test Account",
"screen_name": "test_account",
"location": "",
"url": None,
"expanded_url": None,
"description": "",
"protected": False,
"verified": False,
"followers_count": 10,
"friends_count": 15,
"listed_count": 2,
"favourites_count": 50,
"statuses_count": 9,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(TWITTER_DATE_TIME_FORMAT),
"profile_image_url_https": "",
"default_profile": True,
"default_profile_image": True,
"withheld_in_countries": "",
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}
user = User.parse(api=None, json=user_dic)
tweets = [
Status.parse(api=None, json={
"id": 0,
"user_id": 1,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(
TWITTER_DATE_TIME_FORMAT),
"text": "This is just a simple test tweet text.",
"coordinates": None,
"place": None,
"in_reply_to_status_id": None,
"in_reply_to_user_id": None,
"quoted_status_id": None,
"retweet_count": 2,
"favorite_count": 10,
"lang": "en",
"withheld_copyright": False,
"withheld_in_countries": None,
"entities": {
"urls": []
},
"source": "Twitter Web App",
"videos": 0,
"photos": 0,
"gifs": 0,
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}),
Status.parse(api=None, json={
"id": 1,
"user_id": 1,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(
TWITTER_DATE_TIME_FORMAT),
"text": "This is just a simple test tweet text.",
"coordinates": None,
"place": None,
"in_reply_to_status_id": None,
"in_reply_to_user_id": None,
"quoted_status_id": None,
"retweet_count": 2,
"favorite_count": 3,
"lang": "en",
"withheld_copyright": False,
"withheld_in_countries": None,
"entities": {
"urls": []
},
"source": "Twitter Web App",
"videos": 0,
"photos": 0,
"gifs": 0,
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}),
Status.parse(api=None, json={
"id": 2,
"user_id": 1,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(
TWITTER_DATE_TIME_FORMAT),
"text": "This is just a simple test tweet text.",
"coordinates": None,
"place": None,
"in_reply_to_status_id": None,
"in_reply_to_user_id": None,
"quoted_status_id": None,
"retweet_count": 2,
"favorite_count": 5,
"lang": "en",
"withheld_copyright": False,
"withheld_in_countries": None,
"entities": {
"urls": []
},
"source": "Twitter Web App",
"videos": 0,
"photos": 0,
"gifs": 0,
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
})
]
user_features = UserFeatures(user, tweets)
self.assertAlmostEqual(user_features[USER_FEATURES_INDEX["tweet_likes_std"]], np.std([10, 3, 5]), places=6)
def test_tweet_likes_max_nan(self):
user_dic = {
"id": 1,
"name": "Test Account",
"screen_name": "test_account",
"location": "",
"url": None,
"expanded_url": None,
"description": "",
"protected": False,
"verified": False,
"followers_count": 10,
"friends_count": 15,
"listed_count": 2,
"favourites_count": 50,
"statuses_count": 9,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(TWITTER_DATE_TIME_FORMAT),
"profile_image_url_https": "",
"default_profile": True,
"default_profile_image": True,
"withheld_in_countries": "",
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}
user = User.parse(api=None, json=user_dic)
user_features = UserFeatures(user, [])
self.assertTrue(np.isnan(user_features[USER_FEATURES_INDEX["tweet_likes_max"]]))
def test_tweet_likes_max(self):
user_dic = {
"id": 1,
"name": "Test Account",
"screen_name": "test_account",
"location": "",
"url": None,
"expanded_url": None,
"description": "",
"protected": False,
"verified": False,
"followers_count": 10,
"friends_count": 15,
"listed_count": 2,
"favourites_count": 50,
"statuses_count": 9,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(TWITTER_DATE_TIME_FORMAT),
"profile_image_url_https": "",
"default_profile": True,
"default_profile_image": True,
"withheld_in_countries": "",
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}
user = User.parse(api=None, json=user_dic)
tweets = [
Status.parse(api=None, json={
"id": 0,
"user_id": 1,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(
TWITTER_DATE_TIME_FORMAT),
"text": "This is just a simple test tweet text.",
"coordinates": None,
"place": None,
"in_reply_to_status_id": None,
"in_reply_to_user_id": None,
"quoted_status_id": None,
"retweet_count": 2,
"favorite_count": 10,
"lang": "en",
"withheld_copyright": False,
"withheld_in_countries": None,
"entities": {
"urls": []
},
"source": "Twitter Web App",
"videos": 0,
"photos": 0,
"gifs": 0,
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}),
Status.parse(api=None, json={
"id": 1,
"user_id": 1,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(
TWITTER_DATE_TIME_FORMAT),
"text": "This is just a simple test tweet text.",
"coordinates": None,
"place": None,
"in_reply_to_status_id": None,
"in_reply_to_user_id": None,
"quoted_status_id": None,
"retweet_count": 2,
"favorite_count": 3,
"lang": "en",
"withheld_copyright": False,
"withheld_in_countries": None,
"entities": {
"urls": []
},
"source": "Twitter Web App",
"videos": 0,
"photos": 0,
"gifs": 0,
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}),
Status.parse(api=None, json={
"id": 2,
"user_id": 1,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(
TWITTER_DATE_TIME_FORMAT),
"text": "This is just a simple test tweet text.",
"coordinates": None,
"place": None,
"in_reply_to_status_id": None,
"in_reply_to_user_id": None,
"quoted_status_id": None,
"retweet_count": 2,
"favorite_count": 5,
"lang": "en",
"withheld_copyright": False,
"withheld_in_countries": None,
"entities": {
"urls": []
},
"source": "Twitter Web App",
"videos": 0,
"photos": 0,
"gifs": 0,
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
})
]
user_features = UserFeatures(user, tweets)
self.assertEqual(user_features[USER_FEATURES_INDEX["tweet_likes_max"]], 10)
def test_tweet_likes_min_nan(self):
user_dic = {
"id": 1,
"name": "Test Account",
"screen_name": "test_account",
"location": "",
"url": None,
"expanded_url": None,
"description": "",
"protected": False,
"verified": False,
"followers_count": 10,
"friends_count": 15,
"listed_count": 2,
"favourites_count": 50,
"statuses_count": 9,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(TWITTER_DATE_TIME_FORMAT),
"profile_image_url_https": "",
"default_profile": True,
"default_profile_image": True,
"withheld_in_countries": "",
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}
user = User.parse(api=None, json=user_dic)
user_features = UserFeatures(user, [])
self.assertTrue(np.isnan(user_features[USER_FEATURES_INDEX["tweet_likes_min"]]))
def test_tweet_likes_min(self):
user_dic = {
"id": 1,
"name": "Test Account",
"screen_name": "test_account",
"location": "",
"url": None,
"expanded_url": None,
"description": "",
"protected": False,
"verified": False,
"followers_count": 10,
"friends_count": 15,
"listed_count": 2,
"favourites_count": 50,
"statuses_count": 9,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(TWITTER_DATE_TIME_FORMAT),
"profile_image_url_https": "",
"default_profile": True,
"default_profile_image": True,
"withheld_in_countries": "",
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}
user = User.parse(api=None, json=user_dic)
tweets = [
Status.parse(api=None, json={
"id": 0,
"user_id": 1,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(
TWITTER_DATE_TIME_FORMAT),
"text": "This is just a simple test tweet text.",
"coordinates": None,
"place": None,
"in_reply_to_status_id": None,
"in_reply_to_user_id": None,
"quoted_status_id": None,
"retweet_count": 2,
"favorite_count": 10,
"lang": "en",
"withheld_copyright": False,
"withheld_in_countries": None,
"entities": {
"urls": []
},
"source": "Twitter Web App",
"videos": 0,
"photos": 0,
"gifs": 0,
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}),
Status.parse(api=None, json={
"id": 1,
"user_id": 1,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(
TWITTER_DATE_TIME_FORMAT),
"text": "This is just a simple test tweet text.",
"coordinates": None,
"place": None,
"in_reply_to_status_id": None,
"in_reply_to_user_id": None,
"quoted_status_id": None,
"retweet_count": 2,
"favorite_count": 3,
"lang": "en",
"withheld_copyright": False,
"withheld_in_countries": None,
"entities": {
"urls": []
},
"source": "Twitter Web App",
"videos": 0,
"photos": 0,
"gifs": 0,
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}),
Status.parse(api=None, json={
"id": 2,
"user_id": 1,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(
TWITTER_DATE_TIME_FORMAT),
"text": "This is just a simple test tweet text.",
"coordinates": None,
"place": None,
"in_reply_to_status_id": None,
"in_reply_to_user_id": None,
"quoted_status_id": None,
"retweet_count": 2,
"favorite_count": 5,
"lang": "en",
"withheld_copyright": False,
"withheld_in_countries": None,
"entities": {
"urls": []
},
"source": "Twitter Web App",
"videos": 0,
"photos": 0,
"gifs": 0,
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
})
]
user_features = UserFeatures(user, tweets)
self.assertEqual(user_features[USER_FEATURES_INDEX["tweet_likes_min"]], 3)
def test_tweet_retweets_mean_nan(self):
user_dic = {
"id": 1,
"name": "Test Account",
"screen_name": "test_account",
"location": "",
"url": None,
"expanded_url": None,
"description": "",
"protected": False,
"verified": False,
"followers_count": 10,
"friends_count": 15,
"listed_count": 2,
"favourites_count": 50,
"statuses_count": 9,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(TWITTER_DATE_TIME_FORMAT),
"profile_image_url_https": "",
"default_profile": True,
"default_profile_image": True,
"withheld_in_countries": "",
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}
user = User.parse(api=None, json=user_dic)
user_features = UserFeatures(user, [])
self.assertTrue(np.isnan(user_features[USER_FEATURES_INDEX["tweet_retweets_mean"]]))
def test_tweet_retweets_mean(self):
user_dic = {
"id": 1,
"name": "<NAME>",
"screen_name": "test_account",
"location": "",
"url": None,
"expanded_url": None,
"description": "",
"protected": False,
"verified": False,
"followers_count": 10,
"friends_count": 15,
"listed_count": 2,
"favourites_count": 50,
"statuses_count": 9,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(TWITTER_DATE_TIME_FORMAT),
"profile_image_url_https": "",
"default_profile": True,
"default_profile_image": True,
"withheld_in_countries": "",
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}
user = User.parse(api=None, json=user_dic)
tweets = [
Status.parse(api=None, json={
"id": 0,
"user_id": 1,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(
TWITTER_DATE_TIME_FORMAT),
"text": "This is just a simple test tweet text.",
"coordinates": None,
"place": None,
"in_reply_to_status_id": None,
"in_reply_to_user_id": None,
"quoted_status_id": None,
"retweet_count": 1,
"favorite_count": 3,
"lang": "en",
"withheld_copyright": False,
"withheld_in_countries": None,
"entities": {
"urls": []
},
"source": "Twitter Web App",
"videos": 0,
"photos": 0,
"gifs": 0,
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}),
Status.parse(api=None, json={
"id": 1,
"user_id": 1,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(
TWITTER_DATE_TIME_FORMAT),
"text": "This is just a simple test tweet text.",
"coordinates": None,
"place": None,
"in_reply_to_status_id": None,
"in_reply_to_user_id": None,
"quoted_status_id": None,
"retweet_count": 3,
"favorite_count": 3,
"lang": "en",
"withheld_copyright": False,
"withheld_in_countries": None,
"entities": {
"urls": []
},
"source": "Twitter Web App",
"videos": 0,
"photos": 0,
"gifs": 0,
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}),
Status.parse(api=None, json={
"id": 2,
"user_id": 1,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(
TWITTER_DATE_TIME_FORMAT),
"text": "This is just a simple test tweet text.",
"coordinates": None,
"place": None,
"in_reply_to_status_id": None,
"in_reply_to_user_id": None,
"quoted_status_id": None,
"retweet_count": 7,
"favorite_count": 3,
"lang": "en",
"withheld_copyright": False,
"withheld_in_countries": None,
"entities": {
"urls": []
},
"source": "Twitter Web App",
"videos": 0,
"photos": 0,
"gifs": 0,
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
})
]
user_features = UserFeatures(user, tweets)
self.assertAlmostEqual(user_features[USER_FEATURES_INDEX["tweet_retweets_mean"]], np.mean([1, 3, 7]), places=6)
def test_tweet_retweets_std_nan(self):
user_dic = {
"id": 1,
"name": "Test Account",
"screen_name": "test_account",
"location": "",
"url": None,
"expanded_url": None,
"description": "",
"protected": False,
"verified": False,
"followers_count": 10,
"friends_count": 15,
"listed_count": 2,
"favourites_count": 50,
"statuses_count": 9,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(TWITTER_DATE_TIME_FORMAT),
"profile_image_url_https": "",
"default_profile": True,
"default_profile_image": True,
"withheld_in_countries": "",
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}
user = User.parse(api=None, json=user_dic)
user_features = UserFeatures(user, [])
self.assertTrue(np.isnan(user_features[USER_FEATURES_INDEX["tweet_retweets_std"]]))
def test_tweet_retweets_std(self):
user_dic = {
"id": 1,
"name": "Test Account",
"screen_name": "test_account",
"location": "",
"url": None,
"expanded_url": None,
"description": "",
"protected": False,
"verified": False,
"followers_count": 10,
"friends_count": 15,
"listed_count": 2,
"favourites_count": 50,
"statuses_count": 9,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(TWITTER_DATE_TIME_FORMAT),
"profile_image_url_https": "",
"default_profile": True,
"default_profile_image": True,
"withheld_in_countries": "",
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}
user = User.parse(api=None, json=user_dic)
tweets = [
Status.parse(api=None, json={
"id": 0,
"user_id": 1,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(
TWITTER_DATE_TIME_FORMAT),
"text": "This is just a simple test tweet text.",
"coordinates": None,
"place": None,
"in_reply_to_status_id": None,
"in_reply_to_user_id": None,
"quoted_status_id": None,
"retweet_count": 1,
"favorite_count": 3,
"lang": "en",
"withheld_copyright": False,
"withheld_in_countries": None,
"entities": {
"urls": []
},
"source": "Twitter Web App",
"videos": 0,
"photos": 0,
"gifs": 0,
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}),
Status.parse(api=None, json={
"id": 1,
"user_id": 1,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(
TWITTER_DATE_TIME_FORMAT),
"text": "This is just a simple test tweet text.",
"coordinates": None,
"place": None,
"in_reply_to_status_id": None,
"in_reply_to_user_id": None,
"quoted_status_id": None,
"retweet_count": 3,
"favorite_count": 3,
"lang": "en",
"withheld_copyright": False,
"withheld_in_countries": None,
"entities": {
"urls": []
},
"source": "Twitter Web App",
"videos": 0,
"photos": 0,
"gifs": 0,
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}),
Status.parse(api=None, json={
"id": 2,
"user_id": 1,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(
TWITTER_DATE_TIME_FORMAT),
"text": "This is just a simple test tweet text.",
"coordinates": None,
"place": None,
"in_reply_to_status_id": None,
"in_reply_to_user_id": None,
"quoted_status_id": None,
"retweet_count": 7,
"favorite_count": 3,
"lang": "en",
"withheld_copyright": False,
"withheld_in_countries": None,
"entities": {
"urls": []
},
"source": "Twitter Web App",
"videos": 0,
"photos": 0,
"gifs": 0,
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
})
]
user_features = UserFeatures(user, tweets)
self.assertAlmostEqual(user_features[USER_FEATURES_INDEX["tweet_retweets_std"]], np.std([1, 3, 7]), places=6)
def test_tweet_retweets_max_nan(self):
user_dic = {
"id": 1,
"name": "<NAME>",
"screen_name": "test_account",
"location": "",
"url": None,
"expanded_url": None,
"description": "",
"protected": False,
"verified": False,
"followers_count": 10,
"friends_count": 15,
"listed_count": 2,
"favourites_count": 50,
"statuses_count": 9,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(TWITTER_DATE_TIME_FORMAT),
"profile_image_url_https": "",
"default_profile": True,
"default_profile_image": True,
"withheld_in_countries": "",
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}
user = User.parse(api=None, json=user_dic)
user_features = UserFeatures(user, [])
self.assertTrue(np.isnan(user_features[USER_FEATURES_INDEX["tweet_retweets_max"]]))
def test_tweet_retweets_max(self):
user_dic = {
"id": 1,
"name": "Test Account",
"screen_name": "test_account",
"location": "",
"url": None,
"expanded_url": None,
"description": "",
"protected": False,
"verified": False,
"followers_count": 10,
"friends_count": 15,
"listed_count": 2,
"favourites_count": 50,
"statuses_count": 9,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(TWITTER_DATE_TIME_FORMAT),
"profile_image_url_https": "",
"default_profile": True,
"default_profile_image": True,
"withheld_in_countries": "",
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}
user = User.parse(api=None, json=user_dic)
tweets = [
Status.parse(api=None, json={
"id": 0,
"user_id": 1,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(
TWITTER_DATE_TIME_FORMAT),
"text": "This is just a simple test tweet text.",
"coordinates": None,
"place": None,
"in_reply_to_status_id": None,
"in_reply_to_user_id": None,
"quoted_status_id": None,
"retweet_count": 1,
"favorite_count": 3,
"lang": "en",
"withheld_copyright": False,
"withheld_in_countries": None,
"entities": {
"urls": []
},
"source": "Twitter Web App",
"videos": 0,
"photos": 0,
"gifs": 0,
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}),
Status.parse(api=None, json={
"id": 1,
"user_id": 1,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(
TWITTER_DATE_TIME_FORMAT),
"text": "This is just a simple test tweet text.",
"coordinates": None,
"place": None,
"in_reply_to_status_id": None,
"in_reply_to_user_id": None,
"quoted_status_id": None,
"retweet_count": 3,
"favorite_count": 3,
"lang": "en",
"withheld_copyright": False,
"withheld_in_countries": None,
"entities": {
"urls": []
},
"source": "Twitter Web App",
"videos": 0,
"photos": 0,
"gifs": 0,
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}),
Status.parse(api=None, json={
"id": 2,
"user_id": 1,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(
TWITTER_DATE_TIME_FORMAT),
"text": "This is just a simple test tweet text.",
"coordinates": None,
"place": None,
"in_reply_to_status_id": None,
"in_reply_to_user_id": None,
"quoted_status_id": None,
"retweet_count": 7,
"favorite_count": 3,
"lang": "en",
"withheld_copyright": False,
"withheld_in_countries": None,
"entities": {
"urls": []
},
"source": "Twitter Web App",
"videos": 0,
"photos": 0,
"gifs": 0,
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
})
]
user_features = UserFeatures(user, tweets)
self.assertEqual(user_features[USER_FEATURES_INDEX["tweet_retweets_max"]], 7)
def test_tweet_retweets_min_nan(self):
user_dic = {
"id": 1,
"name": "Test Account",
"screen_name": "test_account",
"location": "",
"url": None,
"expanded_url": None,
"description": "",
"protected": False,
"verified": False,
"followers_count": 10,
"friends_count": 15,
"listed_count": 2,
"favourites_count": 50,
"statuses_count": 9,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(TWITTER_DATE_TIME_FORMAT),
"profile_image_url_https": "",
"default_profile": True,
"default_profile_image": True,
"withheld_in_countries": "",
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}
user = User.parse(api=None, json=user_dic)
user_features = UserFeatures(user, [])
self.assertTrue(np.isnan(user_features[USER_FEATURES_INDEX["tweet_retweets_min"]]))
def test_tweet_retweets_min(self):
user_dic = {
"id": 1,
"name": "Test Account",
"screen_name": "test_account",
"location": "",
"url": None,
"expanded_url": None,
"description": "",
"protected": False,
"verified": False,
"followers_count": 10,
"friends_count": 15,
"listed_count": 2,
"favourites_count": 50,
"statuses_count": 9,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(TWITTER_DATE_TIME_FORMAT),
"profile_image_url_https": "",
"default_profile": True,
"default_profile_image": True,
"withheld_in_countries": "",
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}
user = User.parse(api=None, json=user_dic)
tweets = [
Status.parse(api=None, json={
"id": 0,
"user_id": 1,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(
TWITTER_DATE_TIME_FORMAT),
"text": "This is just a simple test tweet text.",
"coordinates": None,
"place": None,
"in_reply_to_status_id": None,
"in_reply_to_user_id": None,
"quoted_status_id": None,
"retweet_count": 1,
"favorite_count": 3,
"lang": "en",
"withheld_copyright": False,
"withheld_in_countries": None,
"entities": {
"urls": []
},
"source": "Twitter Web App",
"videos": 0,
"photos": 0,
"gifs": 0,
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}),
Status.parse(api=None, json={
"id": 1,
"user_id": 1,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(
TWITTER_DATE_TIME_FORMAT),
"text": "This is just a simple test tweet text.",
"coordinates": None,
"place": None,
"in_reply_to_status_id": None,
"in_reply_to_user_id": None,
"quoted_status_id": None,
"retweet_count": 3,
"favorite_count": 3,
"lang": "en",
"withheld_copyright": False,
"withheld_in_countries": None,
"entities": {
"urls": []
},
"source": "Twitter Web App",
"videos": 0,
"photos": 0,
"gifs": 0,
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}),
Status.parse(api=None, json={
"id": 2,
"user_id": 1,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(
TWITTER_DATE_TIME_FORMAT),
"text": "This is just a simple test tweet text.",
"coordinates": None,
"place": None,
"in_reply_to_status_id": None,
"in_reply_to_user_id": None,
"quoted_status_id": None,
"retweet_count": 7,
"favorite_count": 3,
"lang": "en",
"withheld_copyright": False,
"withheld_in_countries": None,
"entities": {
"urls": []
},
"source": "Twitter Web App",
"videos": 0,
"photos": 0,
"gifs": 0,
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
})
]
user_features = UserFeatures(user, tweets)
self.assertEqual(user_features[USER_FEATURES_INDEX["tweet_retweets_min"]], 1.0)
def test_self_replies_mean_nan(self):
user_dic = {
"id": 1,
"name": "Test Account",
"screen_name": "test_account",
"location": "",
"url": None,
"expanded_url": None,
"description": "",
"protected": False,
"verified": False,
"followers_count": 10,
"friends_count": 15,
"listed_count": 2,
"favourites_count": 50,
"statuses_count": 9,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(TWITTER_DATE_TIME_FORMAT),
"profile_image_url_https": "",
"default_profile": True,
"default_profile_image": True,
"withheld_in_countries": "",
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}
user = User.parse(api=None, json=user_dic)
user_features = UserFeatures(user, [])
self.assertTrue(np.isnan(user_features[USER_FEATURES_INDEX["self_replies_mean"]]))
def test_self_replies_mean(self):
user_dic = {
"id": 1,
"name": "<NAME>",
"screen_name": "test_account",
"location": "",
"url": None,
"expanded_url": None,
"description": "",
"protected": False,
"verified": False,
"followers_count": 10,
"friends_count": 15,
"listed_count": 2,
"favourites_count": 50,
"statuses_count": 9,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(TWITTER_DATE_TIME_FORMAT),
"profile_image_url_https": "",
"default_profile": True,
"default_profile_image": True,
"withheld_in_countries": "",
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}
user = User.parse(api=None, json=user_dic)
tweets = [
Status.parse(api=None, json={
"id": 0,
"user_id": 1,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(
TWITTER_DATE_TIME_FORMAT),
"text": "This is just a simple test tweet text.",
"coordinates": None,
"place": None,
"in_reply_to_status_id": None,
"in_reply_to_user_id": None,
"quoted_status_id": None,
"retweet_count": 2,
"favorite_count": 3,
"lang": "en",
"withheld_copyright": False,
"withheld_in_countries": None,
"entities": {
"urls": []
},
"source": "Twitter Web App",
"videos": 0,
"photos": 0,
"gifs": 0,
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}),
Status.parse(api=None, json={
"id": 1,
"user_id": 1,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(
TWITTER_DATE_TIME_FORMAT),
"text": "This is just a simple test tweet text.",
"coordinates": None,
"place": None,
"in_reply_to_status_id": 5,
"in_reply_to_user_id": 1,
"quoted_status_id": None,
"retweet_count": 2,
"favorite_count": 3,
"lang": "en",
"withheld_copyright": False,
"withheld_in_countries": None,
"entities": {
"urls": []
},
"source": "Twitter Web App",
"videos": 0,
"photos": 0,
"gifs": 0,
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}),
Status.parse(api=None, json={
"id": 2,
"user_id": 1,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(
TWITTER_DATE_TIME_FORMAT),
"text": "This is just a simple test tweet text.",
"coordinates": None,
"place": None,
"in_reply_to_status_id": 5,
"in_reply_to_user_id": 1,
"quoted_status_id": None,
"retweet_count": 2,
"favorite_count": 3,
"lang": "en",
"withheld_copyright": False,
"withheld_in_countries": None,
"entities": {
"urls": []
},
"source": "Twitter Web App",
"videos": 0,
"photos": 0,
"gifs": 0,
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
})
]
user_features = UserFeatures(user, tweets)
self.assertAlmostEqual(user_features[USER_FEATURES_INDEX["self_replies_mean"]], np.mean([1, 1, 0]))
def test_number_of_different_countries_nan(self):
user_dic = {
"id": 1,
"name": "Test Account",
"screen_name": "test_account",
"location": "",
"url": None,
"expanded_url": None,
"description": "",
"protected": False,
"verified": False,
"followers_count": 10,
"friends_count": 15,
"listed_count": 2,
"favourites_count": 50,
"statuses_count": 9,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(TWITTER_DATE_TIME_FORMAT),
"profile_image_url_https": "",
"default_profile": True,
"default_profile_image": True,
"withheld_in_countries": "",
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}
user = User.parse(api=None, json=user_dic)
user_features = UserFeatures(user, [])
self.assertTrue(np.isnan(user_features[USER_FEATURES_INDEX["number_of_different_countries"]]))
def test_number_of_different_countries(self):
user_dic = {
"id": 1,
"name": "<NAME>",
"screen_name": "test_account",
"location": "",
"url": None,
"expanded_url": None,
"description": "",
"protected": False,
"verified": False,
"followers_count": 10,
"friends_count": 15,
"listed_count": 2,
"favourites_count": 50,
"statuses_count": 9,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(TWITTER_DATE_TIME_FORMAT),
"profile_image_url_https": "",
"default_profile": True,
"default_profile_image": True,
"withheld_in_countries": "",
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}
user = User.parse(api=None, json=user_dic)
tweets = [
Status.parse(api=None, json={
"id": 0,
"user_id": 1,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(
TWITTER_DATE_TIME_FORMAT),
"text": "This is just a simple test tweet text.",
"coordinates": None,
"place": {
"country_code": "DE",
"name": "Berlin"
},
"in_reply_to_status_id": None,
"in_reply_to_user_id": None,
"quoted_status_id": None,
"retweet_count": 2,
"favorite_count": 3,
"lang": "en",
"withheld_copyright": False,
"withheld_in_countries": None,
"entities": {
"urls": []
},
"source": "Twitter Web App",
"videos": 0,
"photos": 0,
"gifs": 0,
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}),
Status.parse(api=None, json={
"id": 1,
"user_id": 1,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(
TWITTER_DATE_TIME_FORMAT),
"text": "This is just a simple test tweet text.",
"coordinates": None,
"place": {
"country_code": "DE",
"name": "Berlin"
},
"in_reply_to_status_id": None,
"in_reply_to_user_id": None,
"quoted_status_id": None,
"retweet_count": 2,
"favorite_count": 3,
"lang": "en",
"withheld_copyright": False,
"withheld_in_countries": None,
"entities": {
"urls": []
},
"source": "Twitter Web App",
"videos": 0,
"photos": 0,
"gifs": 0,
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}),
Status.parse(api=None, json={
"id": 2,
"user_id": 1,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(
TWITTER_DATE_TIME_FORMAT),
"text": "This is just a simple test tweet text.",
"coordinates": None,
"place": {
"country_code": "EN",
"name": "London"
},
"in_reply_to_status_id": None,
"in_reply_to_user_id": None,
"quoted_status_id": None,
"retweet_count": 2,
"favorite_count": 3,
"lang": "en",
"withheld_copyright": False,
"withheld_in_countries": None,
"entities": {
"urls": []
},
"source": "Twitter Web App",
"videos": 0,
"photos": 0,
"gifs": 0,
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
})
]
user_features = UserFeatures(user, tweets)
self.assertEqual(user_features[USER_FEATURES_INDEX["number_of_different_countries"]], 2.0)
def test_country_with_most_tweets_nan(self):
user_dic = {
"id": 1,
"name": "Test Account",
"screen_name": "test_account",
"location": "",
"url": None,
"expanded_url": None,
"description": "",
"protected": False,
"verified": False,
"followers_count": 10,
"friends_count": 15,
"listed_count": 2,
"favourites_count": 50,
"statuses_count": 9,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(TWITTER_DATE_TIME_FORMAT),
"profile_image_url_https": "",
"default_profile": True,
"default_profile_image": True,
"withheld_in_countries": "",
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}
user = User.parse(api=None, json=user_dic)
user_features = UserFeatures(user, [])
self.assertTrue(np.isnan(user_features[USER_FEATURES_INDEX["country_with_most_tweets"]]))
def test_country_with_most_tweets(self):
user_dic = {
"id": 1,
"name": "<NAME>",
"screen_name": "test_account",
"location": "",
"url": None,
"expanded_url": None,
"description": "",
"protected": False,
"verified": False,
"followers_count": 10,
"friends_count": 15,
"listed_count": 2,
"favourites_count": 50,
"statuses_count": 9,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(TWITTER_DATE_TIME_FORMAT),
"profile_image_url_https": "",
"default_profile": True,
"default_profile_image": True,
"withheld_in_countries": "",
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}
user = User.parse(api=None, json=user_dic)
tweets = [
Status.parse(api=None, json={
"id": 0,
"user_id": 1,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(
TWITTER_DATE_TIME_FORMAT),
"text": "This is just a simple test tweet text.",
"coordinates": None,
"place": {
"country_code": "DE",
"name": "Berlin"
},
"in_reply_to_status_id": None,
"in_reply_to_user_id": None,
"quoted_status_id": None,
"retweet_count": 2,
"favorite_count": 3,
"lang": "en",
"withheld_copyright": False,
"withheld_in_countries": None,
"entities": {
"urls": []
},
"source": "Twitter Web App",
"videos": 0,
"photos": 0,
"gifs": 0,
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}),
Status.parse(api=None, json={
"id": 1,
"user_id": 1,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(
TWITTER_DATE_TIME_FORMAT),
"text": "This is just a simple test tweet text.",
"coordinates": None,
"place": {
"country_code": "DE",
"name": "Berlin"
},
"in_reply_to_status_id": None,
"in_reply_to_user_id": None,
"quoted_status_id": None,
"retweet_count": 2,
"favorite_count": 3,
"lang": "en",
"withheld_copyright": False,
"withheld_in_countries": None,
"entities": {
"urls": []
},
"source": "Twitter Web App",
"videos": 0,
"photos": 0,
"gifs": 0,
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}),
Status.parse(api=None, json={
"id": 2,
"user_id": 1,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(
TWITTER_DATE_TIME_FORMAT),
"text": "This is just a simple test tweet text.",
"coordinates": None,
"place": {
"country_code": "EN",
"name": "London"
},
"in_reply_to_status_id": None,
"in_reply_to_user_id": None,
"quoted_status_id": None,
"retweet_count": 2,
"favorite_count": 3,
"lang": "en",
"withheld_copyright": False,
"withheld_in_countries": None,
"entities": {
"urls": []
},
"source": "Twitter Web App",
"videos": 0,
"photos": 0,
"gifs": 0,
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
})
]
user_features = UserFeatures(user, tweets)
self.assertEqual(user_features[USER_FEATURES_INDEX["country_with_most_tweets"]], COUNTRY_CODES_IDX["DE"])
def test_number_of_different_sources_nan(self):
user_dic = {
"id": 1,
"name": "Test Account",
"screen_name": "test_account",
"location": "",
"url": None,
"expanded_url": None,
"description": "",
"protected": False,
"verified": False,
"followers_count": 10,
"friends_count": 15,
"listed_count": 2,
"favourites_count": 50,
"statuses_count": 9,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(TWITTER_DATE_TIME_FORMAT),
"profile_image_url_https": "",
"default_profile": True,
"default_profile_image": True,
"withheld_in_countries": "",
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}
user = User.parse(api=None, json=user_dic)
user_features = UserFeatures(user, [])
self.assertTrue(np.isnan(user_features[USER_FEATURES_INDEX["number_of_different_sources"]]))
def test_number_of_different_sources(self):
user_dic = {
"id": 1,
"name": "Test Account",
"screen_name": "test_account",
"location": "",
"url": None,
"expanded_url": None,
"description": "",
"protected": False,
"verified": False,
"followers_count": 10,
"friends_count": 15,
"listed_count": 2,
"favourites_count": 50,
"statuses_count": 9,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(TWITTER_DATE_TIME_FORMAT),
"profile_image_url_https": "",
"default_profile": True,
"default_profile_image": True,
"withheld_in_countries": "",
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}
user = User.parse(api=None, json=user_dic)
tweets = [
Status.parse(api=None, json={
"id": 0,
"user_id": 1,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(
TWITTER_DATE_TIME_FORMAT),
"text": "This is just a simple test tweet text.",
"coordinates": None,
"place": None,
"in_reply_to_status_id": None,
"in_reply_to_user_id": None,
"quoted_status_id": None,
"retweet_count": 2,
"favorite_count": 3,
"lang": "en",
"withheld_copyright": False,
"withheld_in_countries": None,
"entities": {
"urls": []
},
"source": "Twitter Web App",
"videos": 0,
"photos": 0,
"gifs": 0,
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}),
Status.parse(api=None, json={
"id": 1,
"user_id": 1,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(
TWITTER_DATE_TIME_FORMAT),
"text": "This is just a simple test tweet text.",
"coordinates": None,
"place": None,
"in_reply_to_status_id": None,
"in_reply_to_user_id": None,
"quoted_status_id": None,
"retweet_count": 2,
"favorite_count": 3,
"lang": "en",
"withheld_copyright": False,
"withheld_in_countries": None,
"entities": {
"urls": []
},
"source": "Twitter Web App",
"videos": 0,
"photos": 0,
"gifs": 0,
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}),
Status.parse(api=None, json={
"id": 2,
"user_id": 1,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(
TWITTER_DATE_TIME_FORMAT),
"text": "This is just a simple test tweet text.",
"coordinates": None,
"place": None,
"in_reply_to_status_id": None,
"in_reply_to_user_id": None,
"quoted_status_id": None,
"retweet_count": 2,
"favorite_count": 3,
"lang": "en",
"withheld_copyright": False,
"withheld_in_countries": None,
"entities": {
"urls": []
},
"source": "Twitter Android App",
"videos": 0,
"photos": 0,
"gifs": 0,
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
})
]
user_features = UserFeatures(user, tweets)
self.assertEqual(user_features[USER_FEATURES_INDEX["number_of_different_sources"]], 2.0)
def test_most_used_source_nan(self):
user_dic = {
"id": 1,
"name": "<NAME>",
"screen_name": "test_account",
"location": "",
"url": None,
"expanded_url": None,
"description": "",
"protected": False,
"verified": False,
"followers_count": 10,
"friends_count": 15,
"listed_count": 2,
"favourites_count": 50,
"statuses_count": 9,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(TWITTER_DATE_TIME_FORMAT),
"profile_image_url_https": "",
"default_profile": True,
"default_profile_image": True,
"withheld_in_countries": "",
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}
user = User.parse(api=None, json=user_dic)
user_features = UserFeatures(user, [])
self.assertTrue(np.isnan(user_features[USER_FEATURES_INDEX["most_used_source"]]))
def test_most_used_source(self):
user_dic = {
"id": 1,
"name": "Test Account",
"screen_name": "test_account",
"location": "",
"url": None,
"expanded_url": None,
"description": "",
"protected": False,
"verified": False,
"followers_count": 10,
"friends_count": 15,
"listed_count": 2,
"favourites_count": 50,
"statuses_count": 9,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(TWITTER_DATE_TIME_FORMAT),
"profile_image_url_https": "",
"default_profile": True,
"default_profile_image": True,
"withheld_in_countries": "",
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}
user = User.parse(api=None, json=user_dic)
tweets = [
Status.parse(api=None, json={
"id": 0,
"user_id": 1,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(
TWITTER_DATE_TIME_FORMAT),
"text": "This is just a simple test tweet text.",
"coordinates": None,
"place": None,
"in_reply_to_status_id": None,
"in_reply_to_user_id": None,
"quoted_status_id": None,
"retweet_count": 2,
"favorite_count": 3,
"lang": "en",
"withheld_copyright": False,
"withheld_in_countries": None,
"entities": {
"urls": []
},
"source": "Twitter Web App",
"videos": 0,
"photos": 0,
"gifs": 0,
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}),
Status.parse(api=None, json={
"id": 1,
"user_id": 1,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(
TWITTER_DATE_TIME_FORMAT),
"text": "This is just a simple test tweet text.",
"coordinates": None,
"place": None,
"in_reply_to_status_id": None,
"in_reply_to_user_id": None,
"quoted_status_id": None,
"retweet_count": 2,
"favorite_count": 3,
"lang": "en",
"withheld_copyright": False,
"withheld_in_countries": None,
"entities": {
"urls": []
},
"source": "Twitter Web App",
"videos": 0,
"photos": 0,
"gifs": 0,
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}),
Status.parse(api=None, json={
"id": 2,
"user_id": 1,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(
TWITTER_DATE_TIME_FORMAT),
"text": "This is just a simple test tweet text.",
"coordinates": None,
"place": None,
"in_reply_to_status_id": None,
"in_reply_to_user_id": None,
"quoted_status_id": None,
"retweet_count": 2,
"favorite_count": 3,
"lang": "en",
"withheld_copyright": False,
"withheld_in_countries": None,
"entities": {
"urls": []
},
"source": "Twitter Android App",
"videos": 0,
"photos": 0,
"gifs": 0,
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
})
]
user_features = UserFeatures(user, tweets)
self.assertEqual(user_features[USER_FEATURES_INDEX["most_used_source"]], TWEET_SOURCES_IDX["Twitter Web App"])
def test_retweet_tweets_mean_nan(self):
user_dic = {
"id": 1,
"name": "Test Account",
"screen_name": "test_account",
"location": "",
"url": None,
"expanded_url": None,
"description": "",
"protected": False,
"verified": False,
"followers_count": 10,
"friends_count": 15,
"listed_count": 2,
"favourites_count": 50,
"statuses_count": 9,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(TWITTER_DATE_TIME_FORMAT),
"profile_image_url_https": "",
"default_profile": True,
"default_profile_image": True,
"withheld_in_countries": "",
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}
user = User.parse(api=None, json=user_dic)
user_features = UserFeatures(user, [])
self.assertTrue(np.isnan(user_features[USER_FEATURES_INDEX["retweet_tweets_mean"]]))
def test_retweet_tweets_mean(self):
user_dic = {
"id": 1,
"name": "Test Account",
"screen_name": "test_account",
"location": "",
"url": None,
"expanded_url": None,
"description": "",
"protected": False,
"verified": False,
"followers_count": 10,
"friends_count": 15,
"listed_count": 2,
"favourites_count": 50,
"statuses_count": 9,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(TWITTER_DATE_TIME_FORMAT),
"profile_image_url_https": "",
"default_profile": True,
"default_profile_image": True,
"withheld_in_countries": "",
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}
user = User.parse(api=None, json=user_dic)
tweets = [
Status.parse(api=None, json={
"id": 0,
"user_id": 1,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(
TWITTER_DATE_TIME_FORMAT),
"text": "This is just a simple test tweet text.",
"coordinates": None,
"place": None,
"in_reply_to_status_id": None,
"in_reply_to_user_id": None,
"quoted_status_id": None,
"retweet_count": 2,
"favorite_count": 3,
"lang": "en",
"withheld_copyright": False,
"withheld_in_countries": None,
"entities": {
"urls": []
},
"source": "Twitter Web App",
"videos": 0,
"photos": 0,
"gifs": 0,
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT),
"retweeted_status": {
"id": 12
}
}),
Status.parse(api=None, json={
"id": 1,
"user_id": 1,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(
TWITTER_DATE_TIME_FORMAT),
"text": "This is just a simple test tweet text.",
"coordinates": None,
"place": None,
"in_reply_to_status_id": None,
"in_reply_to_user_id": None,
"quoted_status_id": None,
"retweet_count": 2,
"favorite_count": 3,
"lang": "en",
"withheld_copyright": False,
"withheld_in_countries": None,
"entities": {
"urls": []
},
"source": "Twitter Web App",
"videos": 0,
"photos": 0,
"gifs": 0,
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT),
"retweeted_status": {
"id": 13
}
}),
Status.parse(api=None, json={
"id": 2,
"user_id": 1,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(
TWITTER_DATE_TIME_FORMAT),
"text": "This is just a simple test tweet text.",
"coordinates": None,
"place": None,
"in_reply_to_status_id": None,
"in_reply_to_user_id": None,
"quoted_status_id": None,
"retweet_count": 2,
"favorite_count": 3,
"lang": "en",
"withheld_copyright": False,
"withheld_in_countries": None,
"entities": {
"urls": []
},
"source": "Twitter Web App",
"videos": 0,
"photos": 0,
"gifs": 0,
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
})
]
user_features = UserFeatures(user, tweets)
self.assertAlmostEqual(user_features[USER_FEATURES_INDEX["retweet_tweets_mean"]], np.mean([1, 1, 0]))
def test_answer_tweets_mean_nan(self):
user_dic = {
"id": 1,
"name": "Test Account",
"screen_name": "test_account",
"location": "",
"url": None,
"expanded_url": None,
"description": "",
"protected": False,
"verified": False,
"followers_count": 10,
"friends_count": 15,
"listed_count": 2,
"favourites_count": 50,
"statuses_count": 9,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(TWITTER_DATE_TIME_FORMAT),
"profile_image_url_https": "",
"default_profile": True,
"default_profile_image": True,
"withheld_in_countries": "",
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}
user = User.parse(api=None, json=user_dic)
user_features = UserFeatures(user, [])
self.assertTrue(np.isnan(user_features[USER_FEATURES_INDEX["answer_tweets_mean"]]))
def test_answer_tweets_mean(self):
user_dic = {
"id": 1,
"name": "Test Account",
"screen_name": "test_account",
"location": "",
"url": None,
"expanded_url": None,
"description": "",
"protected": False,
"verified": False,
"followers_count": 10,
"friends_count": 15,
"listed_count": 2,
"favourites_count": 50,
"statuses_count": 9,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(TWITTER_DATE_TIME_FORMAT),
"profile_image_url_https": "",
"default_profile": True,
"default_profile_image": True,
"withheld_in_countries": "",
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}
user = User.parse(api=None, json=user_dic)
tweets = [
Status.parse(api=None, json={
"id": 0,
"user_id": 1,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(
TWITTER_DATE_TIME_FORMAT),
"text": "This is just a simple test tweet text.",
"coordinates": None,
"place": None,
"in_reply_to_status_id": None,
"in_reply_to_user_id": None,
"quoted_status_id": None,
"retweet_count": 2,
"favorite_count": 3,
"lang": "en",
"withheld_copyright": False,
"withheld_in_countries": None,
"entities": {
"urls": []
},
"source": "Twitter Web App",
"videos": 0,
"photos": 0,
"gifs": 0,
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}),
Status.parse(api=None, json={
"id": 1,
"user_id": 1,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(
TWITTER_DATE_TIME_FORMAT),
"text": "This is just a simple test tweet text.",
"coordinates": None,
"place": None,
"in_reply_to_status_id": 12,
"in_reply_to_user_id": 42,
"quoted_status_id": None,
"retweet_count": 2,
"favorite_count": 3,
"lang": "en",
"withheld_copyright": False,
"withheld_in_countries": None,
"entities": {
"urls": []
},
"source": "Twitter Web App",
"videos": 0,
"photos": 0,
"gifs": 0,
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}),
Status.parse(api=None, json={
"id": 2,
"user_id": 1,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(
TWITTER_DATE_TIME_FORMAT),
"text": "This is just a simple test tweet text.",
"coordinates": None,
"place": None,
"in_reply_to_status_id": 13,
"in_reply_to_user_id": 42,
"quoted_status_id": None,
"retweet_count": 2,
"favorite_count": 3,
"lang": "en",
"withheld_copyright": False,
"withheld_in_countries": None,
"entities": {
"urls": []
},
"source": "Twitter Web App",
"videos": 0,
"photos": 0,
"gifs": 0,
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
})
]
user_features = UserFeatures(user, tweets)
self.assertAlmostEqual(user_features[USER_FEATURES_INDEX["answer_tweets_mean"]], np.mean([1, 1, 0]))
def test_number_of_withheld_countries_max_nan(self):
user_dic = {
"id": 1,
"name": "<NAME>",
"screen_name": "test_account",
"location": "",
"url": None,
"expanded_url": None,
"description": "",
"protected": False,
"verified": False,
"followers_count": 10,
"friends_count": 15,
"listed_count": 2,
"favourites_count": 50,
"statuses_count": 9,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(TWITTER_DATE_TIME_FORMAT),
"profile_image_url_https": "",
"default_profile": True,
"default_profile_image": True,
"withheld_in_countries": "",
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}
user = User.parse(api=None, json=user_dic)
user_features = UserFeatures(user, [])
self.assertTrue(np.isnan(user_features[USER_FEATURES_INDEX["number_of_withheld_countries_max"]]))
def test_number_of_withheld_countries_max(self):
user_dic = {
"id": 1,
"name": "Test Account",
"screen_name": "test_account",
"location": "",
"url": None,
"expanded_url": None,
"description": "",
"protected": False,
"verified": False,
"followers_count": 10,
"friends_count": 15,
"listed_count": 2,
"favourites_count": 50,
"statuses_count": 9,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(TWITTER_DATE_TIME_FORMAT),
"profile_image_url_https": "",
"default_profile": True,
"default_profile_image": True,
"withheld_in_countries": "",
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}
user = User.parse(api=None, json=user_dic)
tweets = [
Status.parse(api=None, json={
"id": 0,
"user_id": 1,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(
TWITTER_DATE_TIME_FORMAT),
"text": "This is just a simple test tweet text.",
"coordinates": None,
"place": None,
"in_reply_to_status_id": None,
"in_reply_to_user_id": None,
"quoted_status_id": None,
"retweet_count": 2,
"favorite_count": 3,
"lang": "en",
"withheld_copyright": False,
"withheld_in_countries": None,
"entities": {
"urls": []
},
"source": "Twitter Web App",
"videos": 0,
"photos": 0,
"gifs": 0,
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}),
Status.parse(api=None, json={
"id": 1,
"user_id": 1,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(
TWITTER_DATE_TIME_FORMAT),
"text": "This is just a simple test tweet text.",
"coordinates": None,
"place": None,
"in_reply_to_status_id": None,
"in_reply_to_user_id": None,
"quoted_status_id": None,
"retweet_count": 2,
"favorite_count": 3,
"lang": "en",
"withheld_copyright": False,
"withheld_in_countries": ["EN"],
"entities": {
"urls": []
},
"source": "Twitter Web App",
"videos": 0,
"photos": 0,
"gifs": 0,
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}),
Status.parse(api=None, json={
"id": 2,
"user_id": 1,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(
TWITTER_DATE_TIME_FORMAT),
"text": "This is just a simple test tweet text.",
"coordinates": None,
"place": None,
"in_reply_to_status_id": None,
"in_reply_to_user_id": None,
"quoted_status_id": None,
"retweet_count": 2,
"favorite_count": 3,
"lang": "en",
"withheld_copyright": False,
"withheld_in_countries": ["DE", "EN", "NL"],
"entities": {
"urls": []
},
"source": "Twitter Web App",
"videos": 0,
"photos": 0,
"gifs": 0,
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
})
]
user_features = UserFeatures(user, tweets)
self.assertEqual(user_features[USER_FEATURES_INDEX["number_of_withheld_countries_max"]], 3.0)
def test_withheld_country_tweets_mean_nan(self):
user_dic = {
"id": 1,
"name": "Test Account",
"screen_name": "test_account",
"location": "",
"url": None,
"expanded_url": None,
"description": "",
"protected": False,
"verified": False,
"followers_count": 10,
"friends_count": 15,
"listed_count": 2,
"favourites_count": 50,
"statuses_count": 9,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(TWITTER_DATE_TIME_FORMAT),
"profile_image_url_https": "",
"default_profile": True,
"default_profile_image": True,
"withheld_in_countries": "",
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}
user = User.parse(api=None, json=user_dic)
user_features = UserFeatures(user, [])
self.assertTrue(np.isnan(user_features[USER_FEATURES_INDEX["withheld_country_tweets_mean"]]))
def test_withheld_country_tweets_mean(self):
user_dic = {
"id": 1,
"name": "<NAME>",
"screen_name": "test_account",
"location": "",
"url": None,
"expanded_url": None,
"description": "",
"protected": False,
"verified": False,
"followers_count": 10,
"friends_count": 15,
"listed_count": 2,
"favourites_count": 50,
"statuses_count": 9,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(TWITTER_DATE_TIME_FORMAT),
"profile_image_url_https": "",
"default_profile": True,
"default_profile_image": True,
"withheld_in_countries": "",
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}
user = User.parse(api=None, json=user_dic)
tweets = [
Status.parse(api=None, json={
"id": 0,
"user_id": 1,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(
TWITTER_DATE_TIME_FORMAT),
"text": "This is just a simple test tweet text.",
"coordinates": None,
"place": None,
"in_reply_to_status_id": None,
"in_reply_to_user_id": None,
"quoted_status_id": None,
"retweet_count": 2,
"favorite_count": 3,
"lang": "en",
"withheld_copyright": False,
"withheld_in_countries": None,
"entities": {
"urls": []
},
"source": "Twitter Web App",
"videos": 0,
"photos": 0,
"gifs": 0,
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}),
Status.parse(api=None, json={
"id": 1,
"user_id": 1,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(
TWITTER_DATE_TIME_FORMAT),
"text": "This is just a simple test tweet text.",
"coordinates": None,
"place": None,
"in_reply_to_status_id": None,
"in_reply_to_user_id": None,
"quoted_status_id": None,
"retweet_count": 2,
"favorite_count": 3,
"lang": "en",
"withheld_copyright": False,
"withheld_in_countries": None,
"entities": {
"urls": []
},
"source": "Twitter Web App",
"videos": 0,
"photos": 0,
"gifs": 0,
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}),
Status.parse(api=None, json={
"id": 2,
"user_id": 1,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(
TWITTER_DATE_TIME_FORMAT),
"text": "This is just a simple test tweet text.",
"coordinates": None,
"place": None,
"in_reply_to_status_id": None,
"in_reply_to_user_id": None,
"quoted_status_id": None,
"retweet_count": 2,
"favorite_count": 3,
"lang": "en",
"withheld_copyright": False,
"withheld_in_countries": "DE",
"entities": {
"urls": []
},
"source": "Twitter Web App",
"videos": 0,
"photos": 0,
"gifs": 0,
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
})
]
user_features = UserFeatures(user, tweets)
self.assertAlmostEqual(user_features[USER_FEATURES_INDEX["withheld_country_tweets_mean"]], np.mean([0, 0, 1]))
def test_number_of_different_tweet_coord_groups_nan(self):
user_dic = {
"id": 1,
"name": "Test Account",
"screen_name": "test_account",
"location": "",
"url": None,
"expanded_url": None,
"description": "",
"protected": False,
"verified": False,
"followers_count": 10,
"friends_count": 15,
"listed_count": 2,
"favourites_count": 50,
"statuses_count": 9,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(TWITTER_DATE_TIME_FORMAT),
"profile_image_url_https": "",
"default_profile": True,
"default_profile_image": True,
"withheld_in_countries": "",
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}
user = User.parse(api=None, json=user_dic)
user_features = UserFeatures(user, [])
self.assertTrue(np.isnan(user_features[USER_FEATURES_INDEX["number_of_different_tweet_coord_groups"]]))
def test_number_of_different_tweet_coord_groups_group(self):
user_dic = {
"id": 1,
"name": "Test Account",
"screen_name": "test_account",
"location": "",
"url": None,
"expanded_url": None,
"description": "",
"protected": False,
"verified": False,
"followers_count": 10,
"friends_count": 15,
"listed_count": 2,
"favourites_count": 50,
"statuses_count": 9,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(TWITTER_DATE_TIME_FORMAT),
"profile_image_url_https": "",
"default_profile": True,
"default_profile_image": True,
"withheld_in_countries": "",
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}
user = User.parse(api=None, json=user_dic)
tweets = [
Status.parse(api=None, json={
"id": 0,
"user_id": 1,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(
TWITTER_DATE_TIME_FORMAT),
"text": "This is just a simple test tweet text.",
"coordinates": None,
"place": None,
"in_reply_to_status_id": None,
"in_reply_to_user_id": None,
"quoted_status_id": None,
"retweet_count": 2,
"favorite_count": 3,
"lang": "en",
"withheld_copyright": False,
"withheld_in_countries": None,
"entities": {
"urls": []
},
"source": "Twitter Web App",
"videos": 0,
"photos": 0,
"gifs": 0,
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}),
Status.parse(api=None, json={
"id": 1,
"user_id": 1,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(
TWITTER_DATE_TIME_FORMAT),
"text": "This is just a simple test tweet text.",
"coordinates": {
"coordinates": [15, -74]
},
"place": None,
"in_reply_to_status_id": None,
"in_reply_to_user_id": None,
"quoted_status_id": None,
"retweet_count": 2,
"favorite_count": 3,
"lang": "en",
"withheld_copyright": False,
"withheld_in_countries": None,
"entities": {
"urls": []
},
"source": "Twitter Web App",
"videos": 0,
"photos": 0,
"gifs": 0,
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}),
Status.parse(api=None, json={
"id": 2,
"user_id": 1,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(
TWITTER_DATE_TIME_FORMAT),
"text": "This is just a simple test tweet text.",
"coordinates": {
"coordinates": [0, -74]
},
"place": None,
"in_reply_to_status_id": None,
"in_reply_to_user_id": None,
"quoted_status_id": None,
"retweet_count": 2,
"favorite_count": 3,
"lang": "en",
"withheld_copyright": False,
"withheld_in_countries": None,
"entities": {
"urls": []
},
"source": "Twitter Web App",
"videos": 0,
"photos": 0,
"gifs": 0,
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
})
]
user_features = UserFeatures(user, tweets)
self.assertEqual(user_features[USER_FEATURES_INDEX["number_of_different_tweet_coord_groups"]], 2)
def test_most_frequent_tweet_coord_group_nan(self):
user_dic = {
"id": 1,
"name": "<NAME>",
"screen_name": "test_account",
"location": "",
"url": None,
"expanded_url": None,
"description": "",
"protected": False,
"verified": False,
"followers_count": 10,
"friends_count": 15,
"listed_count": 2,
"favourites_count": 50,
"statuses_count": 9,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(TWITTER_DATE_TIME_FORMAT),
"profile_image_url_https": "",
"default_profile": True,
"default_profile_image": True,
"withheld_in_countries": "",
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}
user = User.parse(api=None, json=user_dic)
user_features = UserFeatures(user, [])
self.assertTrue(np.isnan(user_features[USER_FEATURES_INDEX["most_frequent_tweet_coord_group"]]))
def test_most_frequent_tweet_coord_group(self):
user_dic = {
"id": 1,
"name": "Test Account",
"screen_name": "test_account",
"location": "",
"url": None,
"expanded_url": None,
"description": "",
"protected": False,
"verified": False,
"followers_count": 10,
"friends_count": 15,
"listed_count": 2,
"favourites_count": 50,
"statuses_count": 9,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(TWITTER_DATE_TIME_FORMAT),
"profile_image_url_https": "",
"default_profile": True,
"default_profile_image": True,
"withheld_in_countries": "",
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}
user = User.parse(api=None, json=user_dic)
tweets = [
Status.parse(api=None, json={
"id": 0,
"user_id": 1,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(
TWITTER_DATE_TIME_FORMAT),
"text": "This is just a simple test tweet text.",
"coordinates": None,
"place": None,
"in_reply_to_status_id": None,
"in_reply_to_user_id": None,
"quoted_status_id": None,
"retweet_count": 2,
"favorite_count": 3,
"lang": "en",
"withheld_copyright": False,
"withheld_in_countries": None,
"entities": {
"urls": []
},
"source": "Twitter Web App",
"videos": 0,
"photos": 0,
"gifs": 0,
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}),
Status.parse(api=None, json={
"id": 1,
"user_id": 1,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(
TWITTER_DATE_TIME_FORMAT),
"text": "This is just a simple test tweet text.",
"coordinates": {
"coordinates": [15, -74]
},
"place": None,
"in_reply_to_status_id": None,
"in_reply_to_user_id": None,
"quoted_status_id": None,
"retweet_count": 2,
"favorite_count": 3,
"lang": "en",
"withheld_copyright": False,
"withheld_in_countries": None,
"entities": {
"urls": []
},
"source": "Twitter Web App",
"videos": 0,
"photos": 0,
"gifs": 0,
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}),
Status.parse(api=None, json={
"id": 2,
"user_id": 1,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(
TWITTER_DATE_TIME_FORMAT),
"text": "This is just a simple test tweet text.",
"coordinates": {
"coordinates": [0, -74]
},
"place": None,
"in_reply_to_status_id": None,
"in_reply_to_user_id": None,
"quoted_status_id": None,
"retweet_count": 2,
"favorite_count": 3,
"lang": "en",
"withheld_copyright": False,
"withheld_in_countries": None,
"entities": {
"urls": []
},
"source": "Twitter Web App",
"videos": 0,
"photos": 0,
"gifs": 0,
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
})
]
user_features = UserFeatures(user, tweets)
self.assertEqual(user_features[USER_FEATURES_INDEX["most_frequent_tweet_coord_group"]], 7.0)
def test_different_user_interactions_nan(self):
user_dic = {
"id": 1,
"name": "<NAME>",
"screen_name": "test_account",
"location": "",
"url": None,
"expanded_url": None,
"description": "",
"protected": False,
"verified": False,
"followers_count": 10,
"friends_count": 15,
"listed_count": 2,
"favourites_count": 50,
"statuses_count": 9,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(TWITTER_DATE_TIME_FORMAT),
"profile_image_url_https": "",
"default_profile": True,
"default_profile_image": True,
"withheld_in_countries": "",
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}
user = User.parse(api=None, json=user_dic)
user_features = UserFeatures(user, [])
self.assertTrue(np.isnan(user_features[USER_FEATURES_INDEX["different_user_interactions"]]))
def test_different_user_interactions_0(self):
user_dic = {
"id": 1,
"name": "<NAME>",
"screen_name": "test_account",
"location": "",
"url": None,
"expanded_url": None,
"description": "",
"protected": False,
"verified": False,
"followers_count": 10,
"friends_count": 15,
"listed_count": 2,
"favourites_count": 50,
"statuses_count": 9,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(TWITTER_DATE_TIME_FORMAT),
"profile_image_url_https": "",
"default_profile": True,
"default_profile_image": True,
"withheld_in_countries": "",
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}
user = User.parse(api=None, json=user_dic)
tweets = [
Status.parse(api=None, json={
"id": 0,
"user_id": 1,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(
TWITTER_DATE_TIME_FORMAT),
"text": "This is just a simple test tweet text. http://www.twitter.com",
"coordinates": None,
"place": None,
"in_reply_to_status_id": None,
"in_reply_to_user_id": None,
"quoted_status_id": None,
"retweet_count": 2,
"favorite_count": 3,
"lang": "en",
"withheld_copyright": False,
"withheld_in_countries": None,
"entities": {
"urls": []
},
"source": "Twitter Web App",
"videos": 0,
"photos": 0,
"gifs": 0,
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}),
Status.parse(api=None, json={
"id": 1,
"user_id": 1,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(
TWITTER_DATE_TIME_FORMAT),
"text": " 😀 This is just a simple test tweet text with urls and emojis. http://www.twitter.com http://www.twitter.com",
"coordinates": None,
"place": None,
"in_reply_to_status_id": None,
"in_reply_to_user_id": None,
"quoted_status_id": None,
"retweet_count": 2,
"favorite_count": 3,
"lang": "en",
"withheld_copyright": False,
"withheld_in_countries": None,
"entities": {
"urls": []
},
"source": "Twitter Web App",
"videos": 0,
"photos": 0,
"gifs": 0,
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}),
Status.parse(api=None, json={
"id": 2,
"user_id": 1,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(
TWITTER_DATE_TIME_FORMAT),
"text": "This is just a simple test tweet text with emojis. 😀😀",
"coordinates": None,
"place": None,
"in_reply_to_status_id": None,
"in_reply_to_user_id": None,
"quoted_status_id": None,
"retweet_count": 2,
"favorite_count": 3,
"lang": "en",
"withheld_copyright": False,
"withheld_in_countries": None,
"entities": {
"urls": []
},
"source": "Twitter Web App",
"videos": 0,
"photos": 0,
"gifs": 0,
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
})
]
user_features = UserFeatures(user, tweets)
self.assertEqual(user_features[USER_FEATURES_INDEX["different_user_interactions"]], 0)
def test_different_user_interactions_1(self):
user_dic = {
"id": 1,
"name": "Test Account",
"screen_name": "test_account",
"location": "",
"url": None,
"expanded_url": None,
"description": "",
"protected": False,
"verified": False,
"followers_count": 10,
"friends_count": 15,
"listed_count": 2,
"favourites_count": 50,
"statuses_count": 9,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(TWITTER_DATE_TIME_FORMAT),
"profile_image_url_https": "",
"default_profile": True,
"default_profile_image": True,
"withheld_in_countries": "",
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}
user = User.parse(api=None, json=user_dic)
tweets = [
Status.parse(api=None, json={
"id": 0,
"user_id": 1,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(
TWITTER_DATE_TIME_FORMAT),
"text": "This is just a simple test tweet text. http://www.twitter.com",
"coordinates": None,
"place": None,
"in_reply_to_status_id": None,
"in_reply_to_user_id": 12345,
"quoted_status_id": None,
"retweet_count": 2,
"favorite_count": 3,
"lang": "en",
"withheld_copyright": False,
"withheld_in_countries": None,
"entities": {
"urls": []
},
"source": "Twitter Web App",
"videos": 0,
"photos": 0,
"gifs": 0,
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}),
Status.parse(api=None, json={
"id": 1,
"user_id": 1,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(
TWITTER_DATE_TIME_FORMAT),
"text": " 😀 This is just a simple test tweet text with urls and emojis. http://www.twitter.com http://www.twitter.com",
"coordinates": None,
"place": None,
"in_reply_to_status_id": None,
"in_reply_to_user_id": None,
"quoted_status_id": None,
"retweet_count": 2,
"favorite_count": 3,
"lang": "en",
"withheld_copyright": False,
"withheld_in_countries": None,
"entities": {
"urls": []
},
"source": "Twitter Web App",
"videos": 0,
"photos": 0,
"gifs": 0,
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}),
Status.parse(api=None, json={
"id": 2,
"user_id": 1,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(
TWITTER_DATE_TIME_FORMAT),
"text": "This is just a simple test tweet text with emojis. 😀😀",
"coordinates": None,
"place": None,
"in_reply_to_status_id": None,
"in_reply_to_user_id": None,
"quoted_status_id": None,
"retweet_count": 2,
"favorite_count": 3,
"lang": "en",
"withheld_copyright": False,
"withheld_in_countries": None,
"entities": {
"urls": []
},
"source": "Twitter Web App",
"videos": 0,
"photos": 0,
"gifs": 0,
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
})
]
user_features = UserFeatures(user, tweets)
self.assertEqual(user_features[USER_FEATURES_INDEX["different_user_interactions"]], 1)
def test_different_user_interactions_2(self):
user_dic = {
"id": 1,
"name": "<NAME>",
"screen_name": "test_account",
"location": "",
"url": None,
"expanded_url": None,
"description": "",
"protected": False,
"verified": False,
"followers_count": 10,
"friends_count": 15,
"listed_count": 2,
"favourites_count": 50,
"statuses_count": 9,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(TWITTER_DATE_TIME_FORMAT),
"profile_image_url_https": "",
"default_profile": True,
"default_profile_image": True,
"withheld_in_countries": "",
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}
user = User.parse(api=None, json=user_dic)
tweets = [
Status.parse(api=None, json={
"id": 0,
"user_id": 1,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(
TWITTER_DATE_TIME_FORMAT),
"text": "This is just a simple test tweet text. http://www.twitter.com",
"coordinates": None,
"place": None,
"in_reply_to_status_id": None,
"in_reply_to_user_id": 12345,
"quoted_status_id": None,
"retweet_count": 2,
"favorite_count": 3,
"lang": "en",
"withheld_copyright": False,
"withheld_in_countries": None,
"entities": {
"urls": []
},
"source": "Twitter Web App",
"videos": 0,
"photos": 0,
"gifs": 0,
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}),
Status.parse(api=None, json={
"id": 1,
"user_id": 1,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(
TWITTER_DATE_TIME_FORMAT),
"text": " 😀 This is just a simple test tweet text with urls and emojis. http://www.twitter.com http://www.twitter.com",
"coordinates": None,
"place": None,
"in_reply_to_status_id": None,
"in_reply_to_user_id": 12345,
"quoted_status_id": None,
"retweet_count": 2,
"favorite_count": 3,
"lang": "en",
"withheld_copyright": False,
"withheld_in_countries": None,
"entities": {
"urls": []
},
"source": "Twitter Web App",
"videos": 0,
"photos": 0,
"gifs": 0,
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}),
Status.parse(api=None, json={
"id": 2,
"user_id": 1,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(
TWITTER_DATE_TIME_FORMAT),
"text": "This is just a simple test tweet text with emojis. 😀😀",
"coordinates": None,
"place": None,
"in_reply_to_status_id": None,
"in_reply_to_user_id": None,
"quoted_status_id": None,
"retweet_count": 2,
"favorite_count": 3,
"lang": "en",
"withheld_copyright": False,
"withheld_in_countries": None,
"entities": {
"urls": []
},
"source": "Twitter Web App",
"videos": 0,
"photos": 0,
"gifs": 0,
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
})
]
user_features = UserFeatures(user, tweets)
self.assertEqual(user_features[USER_FEATURES_INDEX["different_user_interactions"]], 1)
def test_different_user_interactions_3(self):
user_dic = {
"id": 1,
"name": "Test Account",
"screen_name": "test_account",
"location": "",
"url": None,
"expanded_url": None,
"description": "",
"protected": False,
"verified": False,
"followers_count": 10,
"friends_count": 15,
"listed_count": 2,
"favourites_count": 50,
"statuses_count": 9,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(TWITTER_DATE_TIME_FORMAT),
"profile_image_url_https": "",
"default_profile": True,
"default_profile_image": True,
"withheld_in_countries": "",
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}
user = User.parse(api=None, json=user_dic)
tweets = [
Status.parse(api=None, json={
"id": 0,
"user_id": 1,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(
TWITTER_DATE_TIME_FORMAT),
"text": "This is just a simple test tweet text. http://www.twitter.com",
"coordinates": None,
"place": None,
"in_reply_to_status_id": None,
"in_reply_to_user_id": 12345,
"quoted_status_id": None,
"retweet_count": 2,
"favorite_count": 3,
"lang": "en",
"withheld_copyright": False,
"withheld_in_countries": None,
"entities": {
"urls": []
},
"source": "Twitter Web App",
"videos": 0,
"photos": 0,
"gifs": 0,
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}),
Status.parse(api=None, json={
"id": 1,
"user_id": 1,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(
TWITTER_DATE_TIME_FORMAT),
"text": " 😀 This is just a simple test tweet text with urls and emojis. http://www.twitter.com http://www.twitter.com",
"coordinates": None,
"place": None,
"in_reply_to_status_id": None,
"in_reply_to_user_id": 12345,
"quoted_status_id": None,
"retweet_count": 2,
"favorite_count": 3,
"lang": "en",
"withheld_copyright": False,
"withheld_in_countries": None,
"entities": {
"urls": []
},
"source": "Twitter Web App",
"videos": 0,
"photos": 0,
"gifs": 0,
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}),
Status.parse(api=None, json={
"id": 2,
"user_id": 1,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(
TWITTER_DATE_TIME_FORMAT),
"text": "This is just a simple test tweet text with emojis. 😀😀",
"coordinates": None,
"place": None,
"in_reply_to_status_id": None,
"in_reply_to_user_id": 56789,
"quoted_status_id": None,
"retweet_count": 2,
"favorite_count": 3,
"lang": "en",
"withheld_copyright": False,
"withheld_in_countries": None,
"entities": {
"urls": []
},
"source": "Twitter Web App",
"videos": 0,
"photos": 0,
"gifs": 0,
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
})
]
user_features = UserFeatures(user, tweets)
self.assertEqual(user_features[USER_FEATURES_INDEX["different_user_interactions"]], 2)
def test_different_user_interactions_4(self):
user_dic = {
"id": 1,
"name": "Test Account",
"screen_name": "test_account",
"location": "",
"url": None,
"expanded_url": None,
"description": "",
"protected": False,
"verified": False,
"followers_count": 10,
"friends_count": 15,
"listed_count": 2,
"favourites_count": 50,
"statuses_count": 9,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(TWITTER_DATE_TIME_FORMAT),
"profile_image_url_https": "",
"default_profile": True,
"default_profile_image": True,
"withheld_in_countries": "",
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}
user = User.parse(api=None, json=user_dic)
tweets = [
Status.parse(api=None, json={
"id": 0,
"user_id": 1,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(
TWITTER_DATE_TIME_FORMAT),
"text": "This is just a simple test tweet text. http://www.twitter.com",
"coordinates": None,
"place": None,
"in_reply_to_status_id": None,
"in_reply_to_user_id": 12345,
"quoted_status_id": None,
"retweet_count": 2,
"favorite_count": 3,
"lang": "en",
"withheld_copyright": False,
"withheld_in_countries": None,
"entities": {
"urls": []
},
"source": "Twitter Web App",
"videos": 0,
"photos": 0,
"gifs": 0,
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}),
Status.parse(api=None, json={
"id": 1,
"user_id": 1,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(
TWITTER_DATE_TIME_FORMAT),
"text": " 😀 This is just a simple test tweet text with urls and emojis. http://www.twitter.com http://www.twitter.com",
"coordinates": None,
"place": None,
"in_reply_to_status_id": None,
"in_reply_to_user_id": 56789,
"quoted_status_id": None,
"retweet_count": 2,
"favorite_count": 3,
"lang": "en",
"withheld_copyright": False,
"withheld_in_countries": None,
"entities": {
"urls": []
},
"source": "Twitter Web App",
"videos": 0,
"photos": 0,
"gifs": 0,
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}),
Status.parse(api=None, json={
"id": 2,
"user_id": 1,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(
TWITTER_DATE_TIME_FORMAT),
"text": "This is just a simple test tweet text with emojis. 😀😀",
"coordinates": None,
"place": None,
"in_reply_to_status_id": None,
"in_reply_to_user_id": 101112,
"quoted_status_id": None,
"retweet_count": 2,
"favorite_count": 3,
"lang": "en",
"withheld_copyright": False,
"withheld_in_countries": None,
"entities": {
"urls": []
},
"source": "Twitter Web App",
"videos": 0,
"photos": 0,
"gifs": 0,
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
})
]
user_features = UserFeatures(user, tweets)
self.assertEqual(user_features[USER_FEATURES_INDEX["different_user_interactions"]], 3)
def test_tweet_text_length_max_nan(self):
user_dic = {
"id": 1,
"name": "<NAME>",
"screen_name": "test_account",
"location": "",
"url": None,
"expanded_url": None,
"description": "",
"protected": False,
"verified": False,
"followers_count": 10,
"friends_count": 15,
"listed_count": 2,
"favourites_count": 50,
"statuses_count": 9,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(TWITTER_DATE_TIME_FORMAT),
"profile_image_url_https": "",
"default_profile": True,
"default_profile_image": True,
"withheld_in_countries": "",
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}
user = User.parse(api=None, json=user_dic)
user_features = UserFeatures(user, [])
self.assertTrue(np.isnan(user_features[USER_FEATURES_INDEX["tweet_text_length_max"]]))
def test_tweet_text_length_max(self):
user_dic = {
"id": 1,
"name": "<NAME>",
"screen_name": "test_account",
"location": "",
"url": None,
"expanded_url": None,
"description": "",
"protected": False,
"verified": False,
"followers_count": 10,
"friends_count": 15,
"listed_count": 2,
"favourites_count": 50,
"statuses_count": 9,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(TWITTER_DATE_TIME_FORMAT),
"profile_image_url_https": "",
"default_profile": True,
"default_profile_image": True,
"withheld_in_countries": "",
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}
user = User.parse(api=None, json=user_dic)
tweets = [
Status.parse(api=None, json={
"id": 0,
"user_id": 1,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(
TWITTER_DATE_TIME_FORMAT),
"text": "This is just a simple test tweet text. http://www.twitter.com",
"coordinates": None,
"place": None,
"in_reply_to_status_id": None,
"in_reply_to_user_id": None,
"quoted_status_id": None,
"retweet_count": 2,
"favorite_count": 3,
"lang": "en",
"withheld_copyright": False,
"withheld_in_countries": None,
"entities": {
"urls": []
},
"source": "Twitter Web App",
"videos": 0,
"photos": 0,
"gifs": 0,
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}),
Status.parse(api=None, json={
"id": 1,
"user_id": 1,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(
TWITTER_DATE_TIME_FORMAT),
"text": " 😀 This is just a simple test tweet text with urls and emojis. http://www.twitter.com http://www.twitter.com",
"coordinates": None,
"place": None,
"in_reply_to_status_id": None,
"in_reply_to_user_id": None,
"quoted_status_id": None,
"retweet_count": 2,
"favorite_count": 3,
"lang": "en",
"withheld_copyright": False,
"withheld_in_countries": None,
"entities": {
"urls": []
},
"source": "Twitter Web App",
"videos": 0,
"photos": 0,
"gifs": 0,
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}),
Status.parse(api=None, json={
"id": 2,
"user_id": 1,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(
TWITTER_DATE_TIME_FORMAT),
"text": "This is just a simple test tweet text with emojis. 😀😀",
"coordinates": None,
"place": None,
"in_reply_to_status_id": None,
"in_reply_to_user_id": None,
"quoted_status_id": None,
"retweet_count": 2,
"favorite_count": 3,
"lang": "en",
"withheld_copyright": False,
"withheld_in_countries": None,
"entities": {
"urls": []
},
"source": "Twitter Web App",
"videos": 0,
"photos": 0,
"gifs": 0,
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
})
]
user_features = UserFeatures(user, tweets)
self.assertEqual(user_features[USER_FEATURES_INDEX["tweet_text_length_max"]], 108)
def test_tweet_text_length_min_nan(self):
user_dic = {
"id": 1,
"name": "Test Account",
"screen_name": "test_account",
"location": "",
"url": None,
"expanded_url": None,
"description": "",
"protected": False,
"verified": False,
"followers_count": 10,
"friends_count": 15,
"listed_count": 2,
"favourites_count": 50,
"statuses_count": 9,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(TWITTER_DATE_TIME_FORMAT),
"profile_image_url_https": "",
"default_profile": True,
"default_profile_image": True,
"withheld_in_countries": "",
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}
user = User.parse(api=None, json=user_dic)
user_features = UserFeatures(user, [])
self.assertTrue(np.isnan(user_features[USER_FEATURES_INDEX["tweet_text_length_min"]]))
def test_tweet_text_length_min(self):
user_dic = {
"id": 1,
"name": "Test Account",
"screen_name": "test_account",
"location": "",
"url": None,
"expanded_url": None,
"description": "",
"protected": False,
"verified": False,
"followers_count": 10,
"friends_count": 15,
"listed_count": 2,
"favourites_count": 50,
"statuses_count": 9,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(TWITTER_DATE_TIME_FORMAT),
"profile_image_url_https": "",
"default_profile": True,
"default_profile_image": True,
"withheld_in_countries": "",
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}
user = User.parse(api=None, json=user_dic)
tweets = [
Status.parse(api=None, json={
"id": 0,
"user_id": 1,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(
TWITTER_DATE_TIME_FORMAT),
"text": "This is just a simple test tweet text. http://www.twitter.com",
"coordinates": None,
"place": None,
"in_reply_to_status_id": None,
"in_reply_to_user_id": None,
"quoted_status_id": None,
"retweet_count": 2,
"favorite_count": 3,
"lang": "en",
"withheld_copyright": False,
"withheld_in_countries": None,
"entities": {
"urls": []
},
"source": "Twitter Web App",
"videos": 0,
"photos": 0,
"gifs": 0,
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}),
Status.parse(api=None, json={
"id": 1,
"user_id": 1,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(
TWITTER_DATE_TIME_FORMAT),
"text": " 😀 This is just a simple test tweet text with urls and emojis. http://www.twitter.com http://www.twitter.com",
"coordinates": None,
"place": None,
"in_reply_to_status_id": None,
"in_reply_to_user_id": None,
"quoted_status_id": None,
"retweet_count": 2,
"favorite_count": 3,
"lang": "en",
"withheld_copyright": False,
"withheld_in_countries": None,
"entities": {
"urls": []
},
"source": "Twitter Web App",
"videos": 0,
"photos": 0,
"gifs": 0,
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}),
Status.parse(api=None, json={
"id": 2,
"user_id": 1,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(
TWITTER_DATE_TIME_FORMAT),
"text": "This is just a simple test tweet text with emojis. 😀😀",
"coordinates": None,
"place": None,
"in_reply_to_status_id": None,
"in_reply_to_user_id": None,
"quoted_status_id": None,
"retweet_count": 2,
"favorite_count": 3,
"lang": "en",
"withheld_copyright": False,
"withheld_in_countries": None,
"entities": {
"urls": []
},
"source": "Twitter Web App",
"videos": 0,
"photos": 0,
"gifs": 0,
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
})
]
user_features = UserFeatures(user, tweets)
self.assertEqual(user_features[USER_FEATURES_INDEX["tweet_text_length_min"]], 53)
def test_tweet_text_length_mean_nan(self):
user_dic = {
"id": 1,
"name": "Test Account",
"screen_name": "test_account",
"location": "",
"url": None,
"expanded_url": None,
"description": "",
"protected": False,
"verified": False,
"followers_count": 10,
"friends_count": 15,
"listed_count": 2,
"favourites_count": 50,
"statuses_count": 9,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(TWITTER_DATE_TIME_FORMAT),
"profile_image_url_https": "",
"default_profile": True,
"default_profile_image": True,
"withheld_in_countries": "",
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}
user = User.parse(api=None, json=user_dic)
user_features = UserFeatures(user, [])
self.assertTrue(np.isnan(user_features[USER_FEATURES_INDEX["tweet_text_length_mean"]]))
def test_tweet_text_length_mean(self):
user_dic = {
"id": 1,
"name": "Test Account",
"screen_name": "test_account",
"location": "",
"url": None,
"expanded_url": None,
"description": "",
"protected": False,
"verified": False,
"followers_count": 10,
"friends_count": 15,
"listed_count": 2,
"favourites_count": 50,
"statuses_count": 9,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(TWITTER_DATE_TIME_FORMAT),
"profile_image_url_https": "",
"default_profile": True,
"default_profile_image": True,
"withheld_in_countries": "",
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}
user = User.parse(api=None, json=user_dic)
tweets = [
Status.parse(api=None, json={
"id": 0,
"user_id": 1,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(
TWITTER_DATE_TIME_FORMAT),
"text": "This is just a simple test tweet text. http://www.twitter.com",
"coordinates": None,
"place": None,
"in_reply_to_status_id": None,
"in_reply_to_user_id": None,
"quoted_status_id": None,
"retweet_count": 2,
"favorite_count": 3,
"lang": "en",
"withheld_copyright": False,
"withheld_in_countries": None,
"entities": {
"urls": []
},
"source": "Twitter Web App",
"videos": 0,
"photos": 0,
"gifs": 0,
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}),
Status.parse(api=None, json={
"id": 1,
"user_id": 1,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(
TWITTER_DATE_TIME_FORMAT),
"text": " 😀 This is just a simple test tweet text with urls and emojis. http://www.twitter.com http://www.twitter.com",
"coordinates": None,
"place": None,
"in_reply_to_status_id": None,
"in_reply_to_user_id": None,
"quoted_status_id": None,
"retweet_count": 2,
"favorite_count": 3,
"lang": "en",
"withheld_copyright": False,
"withheld_in_countries": None,
"entities": {
"urls": []
},
"source": "Twitter Web App",
"videos": 0,
"photos": 0,
"gifs": 0,
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}),
Status.parse(api=None, json={
"id": 2,
"user_id": 1,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(
TWITTER_DATE_TIME_FORMAT),
"text": "This is just a simple test tweet text with emojis. 😀😀",
"coordinates": None,
"place": None,
"in_reply_to_status_id": None,
"in_reply_to_user_id": None,
"quoted_status_id": None,
"retweet_count": 2,
"favorite_count": 3,
"lang": "en",
"withheld_copyright": False,
"withheld_in_countries": None,
"entities": {
"urls": []
},
"source": "Twitter Web App",
"videos": 0,
"photos": 0,
"gifs": 0,
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
})
]
user_features = UserFeatures(user, tweets)
self.assertAlmostEqual(user_features[USER_FEATURES_INDEX["tweet_text_length_mean"]], np.mean([61, 108, 53]),
places=5)
def test_tweet_text_length_std_nan(self):
user_dic = {
"id": 1,
"name": "Test Account",
"screen_name": "test_account",
"location": "",
"url": None,
"expanded_url": None,
"description": "",
"protected": False,
"verified": False,
"followers_count": 10,
"friends_count": 15,
"listed_count": 2,
"favourites_count": 50,
"statuses_count": 9,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(TWITTER_DATE_TIME_FORMAT),
"profile_image_url_https": "",
"default_profile": True,
"default_profile_image": True,
"withheld_in_countries": "",
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}
user = User.parse(api=None, json=user_dic)
user_features = UserFeatures(user, [])
self.assertTrue(np.isnan(user_features[USER_FEATURES_INDEX["tweet_text_length_std"]]))
def test_tweet_text_length_std(self):
user_dic = {
"id": 1,
"name": "Test Account",
"screen_name": "test_account",
"location": "",
"url": None,
"expanded_url": None,
"description": "",
"protected": False,
"verified": False,
"followers_count": 10,
"friends_count": 15,
"listed_count": 2,
"favourites_count": 50,
"statuses_count": 9,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(TWITTER_DATE_TIME_FORMAT),
"profile_image_url_https": "",
"default_profile": True,
"default_profile_image": True,
"withheld_in_countries": "",
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}
user = User.parse(api=None, json=user_dic)
tweets = [
Status.parse(api=None, json={
"id": 0,
"user_id": 1,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(
TWITTER_DATE_TIME_FORMAT),
"text": "This is just a simple test tweet text. http://www.twitter.com",
"coordinates": None,
"place": None,
"in_reply_to_status_id": None,
"in_reply_to_user_id": None,
"quoted_status_id": None,
"retweet_count": 2,
"favorite_count": 3,
"lang": "en",
"withheld_copyright": False,
"withheld_in_countries": None,
"entities": {
"urls": []
},
"source": "Twitter Web App",
"videos": 0,
"photos": 0,
"gifs": 0,
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}),
Status.parse(api=None, json={
"id": 1,
"user_id": 1,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(
TWITTER_DATE_TIME_FORMAT),
"text": " 😀 This is just a simple test tweet text with urls and emojis. http://www.twitter.com http://www.twitter.com",
"coordinates": None,
"place": None,
"in_reply_to_status_id": None,
"in_reply_to_user_id": None,
"quoted_status_id": None,
"retweet_count": 2,
"favorite_count": 3,
"lang": "en",
"withheld_copyright": False,
"withheld_in_countries": None,
"entities": {
"urls": []
},
"source": "Twitter Web App",
"videos": 0,
"photos": 0,
"gifs": 0,
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}),
Status.parse(api=None, json={
"id": 2,
"user_id": 1,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(
TWITTER_DATE_TIME_FORMAT),
"text": "This is just a simple test tweet text with emojis. 😀😀",
"coordinates": None,
"place": None,
"in_reply_to_status_id": None,
"in_reply_to_user_id": None,
"quoted_status_id": None,
"retweet_count": 2,
"favorite_count": 3,
"lang": "en",
"withheld_copyright": False,
"withheld_in_countries": None,
"entities": {
"urls": []
},
"source": "Twitter Web App",
"videos": 0,
"photos": 0,
"gifs": 0,
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
})
]
user_features = UserFeatures(user, tweets)
self.assertAlmostEqual(user_features[USER_FEATURES_INDEX["tweet_text_length_std"]], np.std([61, 108, 53]),
places=5)
def test_number_of_hashtags_max_nan(self):
user_dic = {
"id": 1,
"name": "<NAME>",
"screen_name": "test_account",
"location": "",
"url": None,
"expanded_url": None,
"description": "",
"protected": False,
"verified": False,
"followers_count": 10,
"friends_count": 15,
"listed_count": 2,
"favourites_count": 50,
"statuses_count": 9,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(TWITTER_DATE_TIME_FORMAT),
"profile_image_url_https": "",
"default_profile": True,
"default_profile_image": True,
"withheld_in_countries": "",
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}
user = User.parse(api=None, json=user_dic)
user_features = UserFeatures(user, [])
self.assertTrue(np.isnan(user_features[USER_FEATURES_INDEX["number_of_hashtags_max"]]))
def test_number_of_hashtags_max(self):
user_dic = {
"id": 1,
"name": "Test Account",
"screen_name": "test_account",
"location": "",
"url": None,
"expanded_url": None,
"description": "",
"protected": False,
"verified": False,
"followers_count": 10,
"friends_count": 15,
"listed_count": 2,
"favourites_count": 50,
"statuses_count": 9,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(TWITTER_DATE_TIME_FORMAT),
"profile_image_url_https": "",
"default_profile": True,
"default_profile_image": True,
"withheld_in_countries": "",
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}
user = User.parse(api=None, json=user_dic)
tweets = [
Status.parse(api=None, json={
"id": 0,
"user_id": 1,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(
TWITTER_DATE_TIME_FORMAT),
"text": "This is just a simple test tweet text.",
"coordinates": None,
"place": None,
"in_reply_to_status_id": None,
"in_reply_to_user_id": None,
"quoted_status_id": None,
"retweet_count": 2,
"favorite_count": 3,
"lang": "en",
"withheld_copyright": False,
"withheld_in_countries": None,
"entities": {
"urls": []
},
"source": "Twitter Web App",
"videos": 0,
"photos": 0,
"gifs": 0,
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}),
Status.parse(api=None, json={
"id": 1,
"user_id": 1,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(
TWITTER_DATE_TIME_FORMAT),
"text": "This is just a simple test tweet text. #ab #test",
"coordinates": None,
"place": None,
"in_reply_to_status_id": None,
"in_reply_to_user_id": None,
"quoted_status_id": None,
"retweet_count": 2,
"favorite_count": 3,
"lang": "en",
"withheld_copyright": False,
"withheld_in_countries": None,
"entities": {
"urls": []
},
"source": "Twitter Web App",
"videos": 0,
"photos": 0,
"gifs": 0,
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}),
Status.parse(api=None, json={
"id": 2,
"user_id": 1,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(
TWITTER_DATE_TIME_FORMAT),
"text": "This is just a simple test tweet text. #test",
"coordinates": None,
"place": None,
"in_reply_to_status_id": None,
"in_reply_to_user_id": None,
"quoted_status_id": None,
"retweet_count": 2,
"favorite_count": 3,
"lang": "en",
"withheld_copyright": False,
"withheld_in_countries": None,
"entities": {
"urls": []
},
"source": "Twitter Web App",
"videos": 0,
"photos": 0,
"gifs": 0,
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
})
]
user_features = UserFeatures(user, tweets)
self.assertEqual(user_features[USER_FEATURES_INDEX["number_of_hashtags_max"]], 2.0)
def test_number_of_hashtags_min_nan(self):
user_dic = {
"id": 1,
"name": "Test Account",
"screen_name": "test_account",
"location": "",
"url": None,
"expanded_url": None,
"description": "",
"protected": False,
"verified": False,
"followers_count": 10,
"friends_count": 15,
"listed_count": 2,
"favourites_count": 50,
"statuses_count": 9,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(TWITTER_DATE_TIME_FORMAT),
"profile_image_url_https": "",
"default_profile": True,
"default_profile_image": True,
"withheld_in_countries": "",
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}
user = User.parse(api=None, json=user_dic)
user_features = UserFeatures(user, [])
self.assertTrue(np.isnan(user_features[USER_FEATURES_INDEX["number_of_hashtags_min"]]))
def test_number_of_hashtags_min(self):
user_dic = {
"id": 1,
"name": "<NAME>",
"screen_name": "test_account",
"location": "",
"url": None,
"expanded_url": None,
"description": "",
"protected": False,
"verified": False,
"followers_count": 10,
"friends_count": 15,
"listed_count": 2,
"favourites_count": 50,
"statuses_count": 9,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(TWITTER_DATE_TIME_FORMAT),
"profile_image_url_https": "",
"default_profile": True,
"default_profile_image": True,
"withheld_in_countries": "",
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}
user = User.parse(api=None, json=user_dic)
tweets = [
Status.parse(api=None, json={
"id": 0,
"user_id": 1,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(
TWITTER_DATE_TIME_FORMAT),
"text": "This is just a simple test tweet text.",
"coordinates": None,
"place": None,
"in_reply_to_status_id": None,
"in_reply_to_user_id": None,
"quoted_status_id": None,
"retweet_count": 2,
"favorite_count": 3,
"lang": "en",
"withheld_copyright": False,
"withheld_in_countries": None,
"entities": {
"urls": []
},
"source": "Twitter Web App",
"videos": 0,
"photos": 0,
"gifs": 0,
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}),
Status.parse(api=None, json={
"id": 1,
"user_id": 1,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(
TWITTER_DATE_TIME_FORMAT),
"text": "This is just a simple test tweet text. #ab #test",
"coordinates": None,
"place": None,
"in_reply_to_status_id": None,
"in_reply_to_user_id": None,
"quoted_status_id": None,
"retweet_count": 2,
"favorite_count": 3,
"lang": "en",
"withheld_copyright": False,
"withheld_in_countries": None,
"entities": {
"urls": []
},
"source": "Twitter Web App",
"videos": 0,
"photos": 0,
"gifs": 0,
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}),
Status.parse(api=None, json={
"id": 2,
"user_id": 1,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(
TWITTER_DATE_TIME_FORMAT),
"text": "This is just a simple test tweet text. #test",
"coordinates": None,
"place": None,
"in_reply_to_status_id": None,
"in_reply_to_user_id": None,
"quoted_status_id": None,
"retweet_count": 2,
"favorite_count": 3,
"lang": "en",
"withheld_copyright": False,
"withheld_in_countries": None,
"entities": {
"urls": []
},
"source": "Twitter Web App",
"videos": 0,
"photos": 0,
"gifs": 0,
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
})
]
user_features = UserFeatures(user, tweets)
self.assertEqual(user_features[USER_FEATURES_INDEX["number_of_hashtags_min"]], 0.0)
def test_number_of_hashtags_mean_nan(self):
user_dic = {
"id": 1,
"name": "<NAME>",
"screen_name": "test_account",
"location": "",
"url": None,
"expanded_url": None,
"description": "",
"protected": False,
"verified": False,
"followers_count": 10,
"friends_count": 15,
"listed_count": 2,
"favourites_count": 50,
"statuses_count": 9,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(TWITTER_DATE_TIME_FORMAT),
"profile_image_url_https": "",
"default_profile": True,
"default_profile_image": True,
"withheld_in_countries": "",
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}
user = User.parse(api=None, json=user_dic)
user_features = UserFeatures(user, [])
self.assertTrue(np.isnan(user_features[USER_FEATURES_INDEX["number_of_hashtags_mean"]]))
def test_number_of_hashtags_mean(self):
user_dic = {
"id": 1,
"name": "Test Account",
"screen_name": "test_account",
"location": "",
"url": None,
"expanded_url": None,
"description": "",
"protected": False,
"verified": False,
"followers_count": 10,
"friends_count": 15,
"listed_count": 2,
"favourites_count": 50,
"statuses_count": 9,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(TWITTER_DATE_TIME_FORMAT),
"profile_image_url_https": "",
"default_profile": True,
"default_profile_image": True,
"withheld_in_countries": "",
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}
user = User.parse(api=None, json=user_dic)
tweets = [
Status.parse(api=None, json={
"id": 0,
"user_id": 1,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(
TWITTER_DATE_TIME_FORMAT),
"text": "This is just a simple test tweet text.",
"coordinates": None,
"place": None,
"in_reply_to_status_id": None,
"in_reply_to_user_id": None,
"quoted_status_id": None,
"retweet_count": 2,
"favorite_count": 3,
"lang": "en",
"withheld_copyright": False,
"withheld_in_countries": None,
"entities": {
"urls": []
},
"source": "Twitter Web App",
"videos": 0,
"photos": 0,
"gifs": 0,
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}),
Status.parse(api=None, json={
"id": 1,
"user_id": 1,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(
TWITTER_DATE_TIME_FORMAT),
"text": "This is just a simple test tweet text. #ab #test",
"coordinates": None,
"place": None,
"in_reply_to_status_id": None,
"in_reply_to_user_id": None,
"quoted_status_id": None,
"retweet_count": 2,
"favorite_count": 3,
"lang": "en",
"withheld_copyright": False,
"withheld_in_countries": None,
"entities": {
"urls": []
},
"source": "Twitter Web App",
"videos": 0,
"photos": 0,
"gifs": 0,
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}),
Status.parse(api=None, json={
"id": 2,
"user_id": 1,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(
TWITTER_DATE_TIME_FORMAT),
"text": "This is just a simple test tweet text. #test",
"coordinates": None,
"place": None,
"in_reply_to_status_id": None,
"in_reply_to_user_id": None,
"quoted_status_id": None,
"retweet_count": 2,
"favorite_count": 3,
"lang": "en",
"withheld_copyright": False,
"withheld_in_countries": None,
"entities": {
"urls": []
},
"source": "Twitter Web App",
"videos": 0,
"photos": 0,
"gifs": 0,
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
})
]
user_features = UserFeatures(user, tweets)
self.assertAlmostEqual(user_features[USER_FEATURES_INDEX["number_of_hashtags_mean"]], np.mean([0, 2, 1]))
def test_number_of_hashtags_std_nan(self):
user_dic = {
"id": 1,
"name": "Test Account",
"screen_name": "test_account",
"location": "",
"url": None,
"expanded_url": None,
"description": "",
"protected": False,
"verified": False,
"followers_count": 10,
"friends_count": 15,
"listed_count": 2,
"favourites_count": 50,
"statuses_count": 9,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(TWITTER_DATE_TIME_FORMAT),
"profile_image_url_https": "",
"default_profile": True,
"default_profile_image": True,
"withheld_in_countries": "",
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}
user = User.parse(api=None, json=user_dic)
user_features = UserFeatures(user, [])
self.assertTrue(np.isnan(user_features[USER_FEATURES_INDEX["number_of_hashtags_std"]]))
def test_number_of_hashtags_std(self):
user_dic = {
"id": 1,
"name": "Test Account",
"screen_name": "test_account",
"location": "",
"url": None,
"expanded_url": None,
"description": "",
"protected": False,
"verified": False,
"followers_count": 10,
"friends_count": 15,
"listed_count": 2,
"favourites_count": 50,
"statuses_count": 9,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(TWITTER_DATE_TIME_FORMAT),
"profile_image_url_https": "",
"default_profile": True,
"default_profile_image": True,
"withheld_in_countries": "",
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}
user = User.parse(api=None, json=user_dic)
tweets = [
Status.parse(api=None, json={
"id": 0,
"user_id": 1,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(
TWITTER_DATE_TIME_FORMAT),
"text": "This is just a simple test tweet text.",
"coordinates": None,
"place": None,
"in_reply_to_status_id": None,
"in_reply_to_user_id": None,
"quoted_status_id": None,
"retweet_count": 2,
"favorite_count": 3,
"lang": "en",
"withheld_copyright": False,
"withheld_in_countries": None,
"entities": {
"urls": []
},
"source": "Twitter Web App",
"videos": 0,
"photos": 0,
"gifs": 0,
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}),
Status.parse(api=None, json={
"id": 1,
"user_id": 1,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(
TWITTER_DATE_TIME_FORMAT),
"text": "This is just a simple test tweet text. #ab #test",
"coordinates": None,
"place": None,
"in_reply_to_status_id": None,
"in_reply_to_user_id": None,
"quoted_status_id": None,
"retweet_count": 2,
"favorite_count": 3,
"lang": "en",
"withheld_copyright": False,
"withheld_in_countries": None,
"entities": {
"urls": []
},
"source": "Twitter Web App",
"videos": 0,
"photos": 0,
"gifs": 0,
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}),
Status.parse(api=None, json={
"id": 2,
"user_id": 1,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(
TWITTER_DATE_TIME_FORMAT),
"text": "This is just a simple test tweet text. #test",
"coordinates": None,
"place": None,
"in_reply_to_status_id": None,
"in_reply_to_user_id": None,
"quoted_status_id": None,
"retweet_count": 2,
"favorite_count": 3,
"lang": "en",
"withheld_copyright": False,
"withheld_in_countries": None,
"entities": {
"urls": []
},
"source": "Twitter Web App",
"videos": 0,
"photos": 0,
"gifs": 0,
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
})
]
user_features = UserFeatures(user, tweets)
self.assertAlmostEqual(user_features[USER_FEATURES_INDEX["number_of_hashtags_std"]], np.std([0, 2, 1]))
def test_length_of_hashtag_max_nan(self):
user_dic = {
"id": 1,
"name": "Test Account",
"screen_name": "test_account",
"location": "",
"url": None,
"expanded_url": None,
"description": "",
"protected": False,
"verified": False,
"followers_count": 10,
"friends_count": 15,
"listed_count": 2,
"favourites_count": 50,
"statuses_count": 9,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(TWITTER_DATE_TIME_FORMAT),
"profile_image_url_https": "",
"default_profile": True,
"default_profile_image": True,
"withheld_in_countries": "",
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}
user = User.parse(api=None, json=user_dic)
user_features = UserFeatures(user, [])
self.assertTrue(np.isnan(user_features[USER_FEATURES_INDEX["length_of_hashtag_max"]]))
def test_length_of_hashtag_max(self):
user_dic = {
"id": 1,
"name": "Test Account",
"screen_name": "test_account",
"location": "",
"url": None,
"expanded_url": None,
"description": "",
"protected": False,
"verified": False,
"followers_count": 10,
"friends_count": 15,
"listed_count": 2,
"favourites_count": 50,
"statuses_count": 9,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(TWITTER_DATE_TIME_FORMAT),
"profile_image_url_https": "",
"default_profile": True,
"default_profile_image": True,
"withheld_in_countries": "",
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}
user = User.parse(api=None, json=user_dic)
tweets = [
Status.parse(api=None, json={
"id": 0,
"user_id": 1,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(
TWITTER_DATE_TIME_FORMAT),
"text": "This is just a simple test tweet text. #test2",
"coordinates": None,
"place": None,
"in_reply_to_status_id": None,
"in_reply_to_user_id": None,
"quoted_status_id": None,
"retweet_count": 2,
"favorite_count": 3,
"lang": "en",
"withheld_copyright": False,
"withheld_in_countries": None,
"entities": {
"urls": []
},
"source": "Twitter Web App",
"videos": 0,
"photos": 0,
"gifs": 0,
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}),
Status.parse(api=None, json={
"id": 1,
"user_id": 1,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(
TWITTER_DATE_TIME_FORMAT),
"text": "This is just a simple test tweet text. #ab #test",
"coordinates": None,
"place": None,
"in_reply_to_status_id": None,
"in_reply_to_user_id": None,
"quoted_status_id": None,
"retweet_count": 2,
"favorite_count": 3,
"lang": "en",
"withheld_copyright": False,
"withheld_in_countries": None,
"entities": {
"urls": []
},
"source": "Twitter Web App",
"videos": 0,
"photos": 0,
"gifs": 0,
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}),
Status.parse(api=None, json={
"id": 2,
"user_id": 1,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(
TWITTER_DATE_TIME_FORMAT),
"text": "This is just a simple test tweet text. #test",
"coordinates": None,
"place": None,
"in_reply_to_status_id": None,
"in_reply_to_user_id": None,
"quoted_status_id": None,
"retweet_count": 2,
"favorite_count": 3,
"lang": "en",
"withheld_copyright": False,
"withheld_in_countries": None,
"entities": {
"urls": []
},
"source": "Twitter Web App",
"videos": 0,
"photos": 0,
"gifs": 0,
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
})
]
user_features = UserFeatures(user, tweets)
self.assertEqual(user_features[USER_FEATURES_INDEX["length_of_hashtag_max"]], 6.0)
def test_length_of_hashtag_min_nan(self):
user_dic = {
"id": 1,
"name": "Test Account",
"screen_name": "test_account",
"location": "",
"url": None,
"expanded_url": None,
"description": "",
"protected": False,
"verified": False,
"followers_count": 10,
"friends_count": 15,
"listed_count": 2,
"favourites_count": 50,
"statuses_count": 9,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(TWITTER_DATE_TIME_FORMAT),
"profile_image_url_https": "",
"default_profile": True,
"default_profile_image": True,
"withheld_in_countries": "",
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}
user = User.parse(api=None, json=user_dic)
user_features = UserFeatures(user, [])
self.assertTrue(np.isnan(user_features[USER_FEATURES_INDEX["length_of_hashtag_min"]]))
def test_length_of_hashtag_min(self):
user_dic = {
"id": 1,
"name": "Test Account",
"screen_name": "test_account",
"location": "",
"url": None,
"expanded_url": None,
"description": "",
"protected": False,
"verified": False,
"followers_count": 10,
"friends_count": 15,
"listed_count": 2,
"favourites_count": 50,
"statuses_count": 9,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(TWITTER_DATE_TIME_FORMAT),
"profile_image_url_https": "",
"default_profile": True,
"default_profile_image": True,
"withheld_in_countries": "",
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}
user = User.parse(api=None, json=user_dic)
tweets = [
Status.parse(api=None, json={
"id": 0,
"user_id": 1,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(
TWITTER_DATE_TIME_FORMAT),
"text": "This is just a simple test tweet text. #test2",
"coordinates": None,
"place": None,
"in_reply_to_status_id": None,
"in_reply_to_user_id": None,
"quoted_status_id": None,
"retweet_count": 2,
"favorite_count": 3,
"lang": "en",
"withheld_copyright": False,
"withheld_in_countries": None,
"entities": {
"urls": []
},
"source": "Twitter Web App",
"videos": 0,
"photos": 0,
"gifs": 0,
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}),
Status.parse(api=None, json={
"id": 1,
"user_id": 1,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(
TWITTER_DATE_TIME_FORMAT),
"text": "This is just a simple test tweet text. #ab #test",
"coordinates": None,
"place": None,
"in_reply_to_status_id": None,
"in_reply_to_user_id": None,
"quoted_status_id": None,
"retweet_count": 2,
"favorite_count": 3,
"lang": "en",
"withheld_copyright": False,
"withheld_in_countries": None,
"entities": {
"urls": []
},
"source": "Twitter Web App",
"videos": 0,
"photos": 0,
"gifs": 0,
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}),
Status.parse(api=None, json={
"id": 2,
"user_id": 1,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(
TWITTER_DATE_TIME_FORMAT),
"text": "This is just a simple test tweet text. #test",
"coordinates": None,
"place": None,
"in_reply_to_status_id": None,
"in_reply_to_user_id": None,
"quoted_status_id": None,
"retweet_count": 2,
"favorite_count": 3,
"lang": "en",
"withheld_copyright": False,
"withheld_in_countries": None,
"entities": {
"urls": []
},
"source": "Twitter Web App",
"videos": 0,
"photos": 0,
"gifs": 0,
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
})
]
user_features = UserFeatures(user, tweets)
self.assertEqual(user_features[USER_FEATURES_INDEX["length_of_hashtag_min"]], 3.0)
def test_cleaned_tweet_text_length_max_nan(self):
user_dic = {
"id": 1,
"name": "<NAME>",
"screen_name": "test_account",
"location": "",
"url": None,
"expanded_url": None,
"description": "",
"protected": False,
"verified": False,
"followers_count": 10,
"friends_count": 15,
"listed_count": 2,
"favourites_count": 50,
"statuses_count": 9,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(TWITTER_DATE_TIME_FORMAT),
"profile_image_url_https": "",
"default_profile": True,
"default_profile_image": True,
"withheld_in_countries": "",
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}
user = User.parse(api=None, json=user_dic)
user_features = UserFeatures(user, [])
self.assertTrue(np.isnan(user_features[USER_FEATURES_INDEX["cleaned_tweet_text_length_max"]]))
def test_cleaned_tweet_text_length_max(self):
user_dic = {
"id": 1,
"name": "<NAME>",
"screen_name": "test_account",
"location": "",
"url": None,
"expanded_url": None,
"description": "",
"protected": False,
"verified": False,
"followers_count": 10,
"friends_count": 15,
"listed_count": 2,
"favourites_count": 50,
"statuses_count": 9,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(TWITTER_DATE_TIME_FORMAT),
"profile_image_url_https": "",
"default_profile": True,
"default_profile_image": True,
"withheld_in_countries": "",
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}
user = User.parse(api=None, json=user_dic)
tweets = [
Status.parse(api=None, json={
"id": 0,
"user_id": 1,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(
TWITTER_DATE_TIME_FORMAT),
"text": "This is just a simple test tweet text. http://www.twitter.com",
"coordinates": None,
"place": None,
"in_reply_to_status_id": None,
"in_reply_to_user_id": None,
"quoted_status_id": None,
"retweet_count": 2,
"favorite_count": 3,
"lang": "en",
"withheld_copyright": False,
"withheld_in_countries": None,
"entities": {
"urls": []
},
"source": "Twitter Web App",
"videos": 0,
"photos": 0,
"gifs": 0,
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}),
Status.parse(api=None, json={
"id": 1,
"user_id": 1,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(
TWITTER_DATE_TIME_FORMAT),
"text": " 😀 This is just a simple test tweet text with urls and emojis. http://www.twitter.com http://www.twitter.com",
"coordinates": None,
"place": None,
"in_reply_to_status_id": None,
"in_reply_to_user_id": None,
"quoted_status_id": None,
"retweet_count": 2,
"favorite_count": 3,
"lang": "en",
"withheld_copyright": False,
"withheld_in_countries": None,
"entities": {
"urls": []
},
"source": "Twitter Web App",
"videos": 0,
"photos": 0,
"gifs": 0,
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}),
Status.parse(api=None, json={
"id": 2,
"user_id": 1,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(
TWITTER_DATE_TIME_FORMAT),
"text": "This is just a simple test tweet text with emojis. 😀😀",
"coordinates": None,
"place": None,
"in_reply_to_status_id": None,
"in_reply_to_user_id": None,
"quoted_status_id": None,
"retweet_count": 2,
"favorite_count": 3,
"lang": "en",
"withheld_copyright": False,
"withheld_in_countries": None,
"entities": {
"urls": []
},
"source": "Twitter Web App",
"videos": 0,
"photos": 0,
"gifs": 0,
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
})
]
user_features = UserFeatures(user, tweets)
self.assertEqual(user_features[USER_FEATURES_INDEX["cleaned_tweet_text_length_max"]], 63.0)
def test_cleaned_tweet_text_length_min_nan(self):
user_dic = {
"id": 1,
"name": "Test Account",
"screen_name": "test_account",
"location": "",
"url": None,
"expanded_url": None,
"description": "",
"protected": False,
"verified": False,
"followers_count": 10,
"friends_count": 15,
"listed_count": 2,
"favourites_count": 50,
"statuses_count": 9,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(TWITTER_DATE_TIME_FORMAT),
"profile_image_url_https": "",
"default_profile": True,
"default_profile_image": True,
"withheld_in_countries": "",
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}
user = User.parse(api=None, json=user_dic)
user_features = UserFeatures(user, [])
self.assertTrue(np.isnan(user_features[USER_FEATURES_INDEX["cleaned_tweet_text_length_min"]]))
def test_cleaned_tweet_text_length_min(self):
user_dic = {
"id": 1,
"name": "Test Account",
"screen_name": "test_account",
"location": "",
"url": None,
"expanded_url": None,
"description": "",
"protected": False,
"verified": False,
"followers_count": 10,
"friends_count": 15,
"listed_count": 2,
"favourites_count": 50,
"statuses_count": 9,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(TWITTER_DATE_TIME_FORMAT),
"profile_image_url_https": "",
"default_profile": True,
"default_profile_image": True,
"withheld_in_countries": "",
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}
user = User.parse(api=None, json=user_dic)
tweets = [
Status.parse(api=None, json={
"id": 0,
"user_id": 1,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(
TWITTER_DATE_TIME_FORMAT),
"text": "This is just a simple test tweet text. http://www.twitter.com",
"coordinates": None,
"place": None,
"in_reply_to_status_id": None,
"in_reply_to_user_id": None,
"quoted_status_id": None,
"retweet_count": 2,
"favorite_count": 3,
"lang": "en",
"withheld_copyright": False,
"withheld_in_countries": None,
"entities": {
"urls": []
},
"source": "Twitter Web App",
"videos": 0,
"photos": 0,
"gifs": 0,
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}),
Status.parse(api=None, json={
"id": 1,
"user_id": 1,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(
TWITTER_DATE_TIME_FORMAT),
"text": " 😀 This is just a simple test tweet text with urls and emojis. http://www.twitter.com http://www.twitter.com",
"coordinates": None,
"place": None,
"in_reply_to_status_id": None,
"in_reply_to_user_id": None,
"quoted_status_id": None,
"retweet_count": 2,
"favorite_count": 3,
"lang": "en",
"withheld_copyright": False,
"withheld_in_countries": None,
"entities": {
"urls": []
},
"source": "Twitter Web App",
"videos": 0,
"photos": 0,
"gifs": 0,
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}),
Status.parse(api=None, json={
"id": 2,
"user_id": 1,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(
TWITTER_DATE_TIME_FORMAT),
"text": "This is just a simple test tweet text with emojis. 😀😀",
"coordinates": None,
"place": None,
"in_reply_to_status_id": None,
"in_reply_to_user_id": None,
"quoted_status_id": None,
"retweet_count": 2,
"favorite_count": 3,
"lang": "en",
"withheld_copyright": False,
"withheld_in_countries": None,
"entities": {
"urls": []
},
"source": "Twitter Web App",
"videos": 0,
"photos": 0,
"gifs": 0,
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
})
]
user_features = UserFeatures(user, tweets)
self.assertEqual(user_features[USER_FEATURES_INDEX["cleaned_tweet_text_length_min"]], 39.0)
def test_cleaned_tweet_text_length_mean_nan(self):
user_dic = {
"id": 1,
"name": "Test Account",
"screen_name": "test_account",
"location": "",
"url": None,
"expanded_url": None,
"description": "",
"protected": False,
"verified": False,
"followers_count": 10,
"friends_count": 15,
"listed_count": 2,
"favourites_count": 50,
"statuses_count": 9,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(TWITTER_DATE_TIME_FORMAT),
"profile_image_url_https": "",
"default_profile": True,
"default_profile_image": True,
"withheld_in_countries": "",
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}
user = User.parse(api=None, json=user_dic)
user_features = UserFeatures(user, [])
self.assertTrue(np.isnan(user_features[USER_FEATURES_INDEX["cleaned_tweet_text_length_mean"]]))
def test_cleaned_tweet_text_length_mean(self):
user_dic = {
"id": 1,
"name": "Test Account",
"screen_name": "test_account",
"location": "",
"url": None,
"expanded_url": None,
"description": "",
"protected": False,
"verified": False,
"followers_count": 10,
"friends_count": 15,
"listed_count": 2,
"favourites_count": 50,
"statuses_count": 9,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(TWITTER_DATE_TIME_FORMAT),
"profile_image_url_https": "",
"default_profile": True,
"default_profile_image": True,
"withheld_in_countries": "",
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}
user = User.parse(api=None, json=user_dic)
tweets = [
Status.parse(api=None, json={
"id": 0,
"user_id": 1,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(
TWITTER_DATE_TIME_FORMAT),
"text": "This is just a simple test tweet text. http://www.twitter.com",
"coordinates": None,
"place": None,
"in_reply_to_status_id": None,
"in_reply_to_user_id": None,
"quoted_status_id": None,
"retweet_count": 2,
"favorite_count": 3,
"lang": "en",
"withheld_copyright": False,
"withheld_in_countries": None,
"entities": {
"urls": []
},
"source": "Twitter Web App",
"videos": 0,
"photos": 0,
"gifs": 0,
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}),
Status.parse(api=None, json={
"id": 1,
"user_id": 1,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(
TWITTER_DATE_TIME_FORMAT),
"text": " 😀 This is just a simple test tweet text with urls and emojis. http://www.twitter.com http://www.twitter.com",
"coordinates": None,
"place": None,
"in_reply_to_status_id": None,
"in_reply_to_user_id": None,
"quoted_status_id": None,
"retweet_count": 2,
"favorite_count": 3,
"lang": "en",
"withheld_copyright": False,
"withheld_in_countries": None,
"entities": {
"urls": []
},
"source": "Twitter Web App",
"videos": 0,
"photos": 0,
"gifs": 0,
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}),
Status.parse(api=None, json={
"id": 2,
"user_id": 1,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(
TWITTER_DATE_TIME_FORMAT),
"text": "This is just a simple test tweet text with emojis. 😀😀",
"coordinates": None,
"place": None,
"in_reply_to_status_id": None,
"in_reply_to_user_id": None,
"quoted_status_id": None,
"retweet_count": 2,
"favorite_count": 3,
"lang": "en",
"withheld_copyright": False,
"withheld_in_countries": None,
"entities": {
"urls": []
},
"source": "Twitter Web App",
"videos": 0,
"photos": 0,
"gifs": 0,
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
})
]
user_features = UserFeatures(user, tweets)
self.assertAlmostEqual(user_features[USER_FEATURES_INDEX["cleaned_tweet_text_length_mean"]],
np.mean([39, 63, 51]))
def test_cleaned_tweet_text_length_std_nan(self):
user_dic = {
"id": 1,
"name": "Test Account",
"screen_name": "test_account",
"location": "",
"url": None,
"expanded_url": None,
"description": "",
"protected": False,
"verified": False,
"followers_count": 10,
"friends_count": 15,
"listed_count": 2,
"favourites_count": 50,
"statuses_count": 9,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(TWITTER_DATE_TIME_FORMAT),
"profile_image_url_https": "",
"default_profile": True,
"default_profile_image": True,
"withheld_in_countries": "",
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}
user = User.parse(api=None, json=user_dic)
user_features = UserFeatures(user, [])
self.assertTrue(np.isnan(user_features[USER_FEATURES_INDEX["cleaned_tweet_text_length_std"]]))
def test_cleaned_tweet_text_length_std(self):
user_dic = {
"id": 1,
"name": "Test Account",
"screen_name": "test_account",
"location": "",
"url": None,
"expanded_url": None,
"description": "",
"protected": False,
"verified": False,
"followers_count": 10,
"friends_count": 15,
"listed_count": 2,
"favourites_count": 50,
"statuses_count": 9,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(TWITTER_DATE_TIME_FORMAT),
"profile_image_url_https": "",
"default_profile": True,
"default_profile_image": True,
"withheld_in_countries": "",
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}
user = User.parse(api=None, json=user_dic)
tweets = [
Status.parse(api=None, json={
"id": 0,
"user_id": 1,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(
TWITTER_DATE_TIME_FORMAT),
"text": "This is just a simple test tweet text. http://www.twitter.com",
"coordinates": None,
"place": None,
"in_reply_to_status_id": None,
"in_reply_to_user_id": None,
"quoted_status_id": None,
"retweet_count": 2,
"favorite_count": 3,
"lang": "en",
"withheld_copyright": False,
"withheld_in_countries": None,
"entities": {
"urls": []
},
"source": "Twitter Web App",
"videos": 0,
"photos": 0,
"gifs": 0,
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}),
Status.parse(api=None, json={
"id": 1,
"user_id": 1,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(
TWITTER_DATE_TIME_FORMAT),
"text": " 😀 This is just a simple test tweet text with urls and emojis. http://www.twitter.com http://www.twitter.com",
"coordinates": None,
"place": None,
"in_reply_to_status_id": None,
"in_reply_to_user_id": None,
"quoted_status_id": None,
"retweet_count": 2,
"favorite_count": 3,
"lang": "en",
"withheld_copyright": False,
"withheld_in_countries": None,
"entities": {
"urls": []
},
"source": "Twitter Web App",
"videos": 0,
"photos": 0,
"gifs": 0,
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}),
Status.parse(api=None, json={
"id": 2,
"user_id": 1,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(
TWITTER_DATE_TIME_FORMAT),
"text": "This is just a simple test tweet text with emojis. 😀😀",
"coordinates": None,
"place": None,
"in_reply_to_status_id": None,
"in_reply_to_user_id": None,
"quoted_status_id": None,
"retweet_count": 2,
"favorite_count": 3,
"lang": "en",
"withheld_copyright": False,
"withheld_in_countries": None,
"entities": {
"urls": []
},
"source": "Twitter Web App",
"videos": 0,
"photos": 0,
"gifs": 0,
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
})
]
user_features = UserFeatures(user, tweets)
self.assertAlmostEqual(user_features[USER_FEATURES_INDEX["cleaned_tweet_text_length_std"]],
np.std([39, 63, 51]), places=6)
def test_number_of_user_mentions_mean_nan(self):
user_dic = {
"id": 1,
"name": "Test Account",
"screen_name": "test_account",
"location": "",
"url": None,
"expanded_url": None,
"description": "",
"protected": False,
"verified": False,
"followers_count": 10,
"friends_count": 15,
"listed_count": 2,
"favourites_count": 50,
"statuses_count": 9,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(TWITTER_DATE_TIME_FORMAT),
"profile_image_url_https": "",
"default_profile": True,
"default_profile_image": True,
"withheld_in_countries": "",
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}
user = User.parse(api=None, json=user_dic)
user_features = UserFeatures(user, [])
self.assertTrue(np.isnan(user_features[USER_FEATURES_INDEX["number_of_user_mentions_mean"]]))
def test_number_of_user_mentions_mean(self):
user_dic = {
"id": 1,
"name": "Test Account",
"screen_name": "test_account",
"location": "",
"url": None,
"expanded_url": None,
"description": "",
"protected": False,
"verified": False,
"followers_count": 10,
"friends_count": 15,
"listed_count": 2,
"favourites_count": 50,
"statuses_count": 9,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(TWITTER_DATE_TIME_FORMAT),
"profile_image_url_https": "",
"default_profile": True,
"default_profile_image": True,
"withheld_in_countries": "",
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}
user = User.parse(api=None, json=user_dic)
tweets = [
Status.parse(api=None, json={
"id": 0,
"user_id": 1,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(
TWITTER_DATE_TIME_FORMAT),
"text": "This is just a simple test tweet text. @user1 @user2 @user3",
"coordinates": None,
"place": None,
"in_reply_to_status_id": None,
"in_reply_to_user_id": None,
"quoted_status_id": None,
"retweet_count": 2,
"favorite_count": 3,
"lang": "en",
"withheld_copyright": False,
"withheld_in_countries": None,
"entities": {
"urls": []
},
"source": "Twitter Web App",
"videos": 0,
"photos": 0,
"gifs": 0,
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}),
Status.parse(api=None, json={
"id": 1,
"user_id": 1,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(
TWITTER_DATE_TIME_FORMAT),
"text": "This is just a simple test tweet text. @user1 @user2",
"coordinates": None,
"place": None,
"in_reply_to_status_id": None,
"in_reply_to_user_id": None,
"quoted_status_id": None,
"retweet_count": 2,
"favorite_count": 3,
"lang": "en",
"withheld_copyright": False,
"withheld_in_countries": None,
"entities": {
"urls": []
},
"source": "Twitter Web App",
"videos": 0,
"photos": 0,
"gifs": 0,
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}),
Status.parse(api=None, json={
"id": 2,
"user_id": 1,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(
TWITTER_DATE_TIME_FORMAT),
"text": "This is just a simple test tweet text. @user2",
"coordinates": None,
"place": None,
"in_reply_to_status_id": None,
"in_reply_to_user_id": None,
"quoted_status_id": None,
"retweet_count": 2,
"favorite_count": 3,
"lang": "en",
"withheld_copyright": False,
"withheld_in_countries": None,
"entities": {
"urls": []
},
"source": "Twitter Web App",
"videos": 0,
"photos": 0,
"gifs": 0,
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
})
]
user_features = UserFeatures(user, tweets)
self.assertAlmostEqual(user_features[USER_FEATURES_INDEX["number_of_user_mentions_mean"]], np.mean([3, 2, 1]))
def test_number_of_user_mentions_std_nan(self):
user_dic = {
"id": 1,
"name": "<NAME>",
"screen_name": "test_account",
"location": "",
"url": None,
"expanded_url": None,
"description": "",
"protected": False,
"verified": False,
"followers_count": 10,
"friends_count": 15,
"listed_count": 2,
"favourites_count": 50,
"statuses_count": 9,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(TWITTER_DATE_TIME_FORMAT),
"profile_image_url_https": "",
"default_profile": True,
"default_profile_image": True,
"withheld_in_countries": "",
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}
user = User.parse(api=None, json=user_dic)
user_features = UserFeatures(user, [])
self.assertTrue(np.isnan(user_features[USER_FEATURES_INDEX["number_of_user_mentions_std"]]))
def test_number_of_user_mentions_std(self):
user_dic = {
"id": 1,
"name": "Test Account",
"screen_name": "test_account",
"location": "",
"url": None,
"expanded_url": None,
"description": "",
"protected": False,
"verified": False,
"followers_count": 10,
"friends_count": 15,
"listed_count": 2,
"favourites_count": 50,
"statuses_count": 9,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(TWITTER_DATE_TIME_FORMAT),
"profile_image_url_https": "",
"default_profile": True,
"default_profile_image": True,
"withheld_in_countries": "",
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}
user = User.parse(api=None, json=user_dic)
tweets = [
Status.parse(api=None, json={
"id": 0,
"user_id": 1,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(
TWITTER_DATE_TIME_FORMAT),
"text": "This is just a simple test tweet text. @user1 @user2 @user3",
"coordinates": None,
"place": None,
"in_reply_to_status_id": None,
"in_reply_to_user_id": None,
"quoted_status_id": None,
"retweet_count": 2,
"favorite_count": 3,
"lang": "en",
"withheld_copyright": False,
"withheld_in_countries": None,
"entities": {
"urls": []
},
"source": "Twitter Web App",
"videos": 0,
"photos": 0,
"gifs": 0,
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}),
Status.parse(api=None, json={
"id": 1,
"user_id": 1,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(
TWITTER_DATE_TIME_FORMAT),
"text": "This is just a simple test tweet text. @user1 @user2",
"coordinates": None,
"place": None,
"in_reply_to_status_id": None,
"in_reply_to_user_id": None,
"quoted_status_id": None,
"retweet_count": 2,
"favorite_count": 3,
"lang": "en",
"withheld_copyright": False,
"withheld_in_countries": None,
"entities": {
"urls": []
},
"source": "Twitter Web App",
"videos": 0,
"photos": 0,
"gifs": 0,
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}),
Status.parse(api=None, json={
"id": 2,
"user_id": 1,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(
TWITTER_DATE_TIME_FORMAT),
"text": "This is just a simple test tweet text. @user2",
"coordinates": None,
"place": None,
"in_reply_to_status_id": None,
"in_reply_to_user_id": None,
"quoted_status_id": None,
"retweet_count": 2,
"favorite_count": 3,
"lang": "en",
"withheld_copyright": False,
"withheld_in_countries": None,
"entities": {
"urls": []
},
"source": "Twitter Web App",
"videos": 0,
"photos": 0,
"gifs": 0,
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
})
]
user_features = UserFeatures(user, tweets)
self.assertAlmostEqual(user_features[USER_FEATURES_INDEX["number_of_user_mentions_std"]], np.std([3, 2, 1]))
def test_number_of_user_mentions_max_nan(self):
user_dic = {
"id": 1,
"name": "<NAME>",
"screen_name": "test_account",
"location": "",
"url": None,
"expanded_url": None,
"description": "",
"protected": False,
"verified": False,
"followers_count": 10,
"friends_count": 15,
"listed_count": 2,
"favourites_count": 50,
"statuses_count": 9,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(TWITTER_DATE_TIME_FORMAT),
"profile_image_url_https": "",
"default_profile": True,
"default_profile_image": True,
"withheld_in_countries": "",
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}
user = User.parse(api=None, json=user_dic)
user_features = UserFeatures(user, [])
self.assertTrue(np.isnan(user_features[USER_FEATURES_INDEX["number_of_user_mentions_max"]]))
def test_number_of_user_mentions_max(self):
user_dic = {
"id": 1,
"name": "<NAME>",
"screen_name": "test_account",
"location": "",
"url": None,
"expanded_url": None,
"description": "",
"protected": False,
"verified": False,
"followers_count": 10,
"friends_count": 15,
"listed_count": 2,
"favourites_count": 50,
"statuses_count": 9,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(TWITTER_DATE_TIME_FORMAT),
"profile_image_url_https": "",
"default_profile": True,
"default_profile_image": True,
"withheld_in_countries": "",
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}
user = User.parse(api=None, json=user_dic)
tweets = [
Status.parse(api=None, json={
"id": 0,
"user_id": 1,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(
TWITTER_DATE_TIME_FORMAT),
"text": "This is just a simple test tweet text. @user1 @user2 @user3",
"coordinates": None,
"place": None,
"in_reply_to_status_id": None,
"in_reply_to_user_id": None,
"quoted_status_id": None,
"retweet_count": 2,
"favorite_count": 3,
"lang": "en",
"withheld_copyright": False,
"withheld_in_countries": None,
"entities": {
"urls": []
},
"source": "Twitter Web App",
"videos": 0,
"photos": 0,
"gifs": 0,
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}),
Status.parse(api=None, json={
"id": 1,
"user_id": 1,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(
TWITTER_DATE_TIME_FORMAT),
"text": "This is just a simple test tweet text. @user1 @user2",
"coordinates": None,
"place": None,
"in_reply_to_status_id": None,
"in_reply_to_user_id": None,
"quoted_status_id": None,
"retweet_count": 2,
"favorite_count": 3,
"lang": "en",
"withheld_copyright": False,
"withheld_in_countries": None,
"entities": {
"urls": []
},
"source": "Twitter Web App",
"videos": 0,
"photos": 0,
"gifs": 0,
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}),
Status.parse(api=None, json={
"id": 2,
"user_id": 1,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(
TWITTER_DATE_TIME_FORMAT),
"text": "This is just a simple test tweet text. @user2",
"coordinates": None,
"place": None,
"in_reply_to_status_id": None,
"in_reply_to_user_id": None,
"quoted_status_id": None,
"retweet_count": 2,
"favorite_count": 3,
"lang": "en",
"withheld_copyright": False,
"withheld_in_countries": None,
"entities": {
"urls": []
},
"source": "Twitter Web App",
"videos": 0,
"photos": 0,
"gifs": 0,
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
})
]
user_features = UserFeatures(user, tweets)
self.assertAlmostEqual(user_features[USER_FEATURES_INDEX["number_of_user_mentions_max"]], 3.0)
def test_number_of_user_mentions_min_nan(self):
user_dic = {
"id": 1,
"name": "Test Account",
"screen_name": "test_account",
"location": "",
"url": None,
"expanded_url": None,
"description": "",
"protected": False,
"verified": False,
"followers_count": 10,
"friends_count": 15,
"listed_count": 2,
"favourites_count": 50,
"statuses_count": 9,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(TWITTER_DATE_TIME_FORMAT),
"profile_image_url_https": "",
"default_profile": True,
"default_profile_image": True,
"withheld_in_countries": "",
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}
user = User.parse(api=None, json=user_dic)
user_features = UserFeatures(user, [])
self.assertTrue(np.isnan(user_features[USER_FEATURES_INDEX["number_of_user_mentions_min"]]))
def test_number_of_user_mentions_min(self):
user_dic = {
"id": 1,
"name": "Test Account",
"screen_name": "test_account",
"location": "",
"url": None,
"expanded_url": None,
"description": "",
"protected": False,
"verified": False,
"followers_count": 10,
"friends_count": 15,
"listed_count": 2,
"favourites_count": 50,
"statuses_count": 9,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(TWITTER_DATE_TIME_FORMAT),
"profile_image_url_https": "",
"default_profile": True,
"default_profile_image": True,
"withheld_in_countries": "",
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}
user = User.parse(api=None, json=user_dic)
tweets = [
Status.parse(api=None, json={
"id": 0,
"user_id": 1,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(
TWITTER_DATE_TIME_FORMAT),
"text": "This is just a simple test tweet text. @user1 @user2 @user3",
"coordinates": None,
"place": None,
"in_reply_to_status_id": None,
"in_reply_to_user_id": None,
"quoted_status_id": None,
"retweet_count": 2,
"favorite_count": 3,
"lang": "en",
"withheld_copyright": False,
"withheld_in_countries": None,
"entities": {
"urls": []
},
"source": "Twitter Web App",
"videos": 0,
"photos": 0,
"gifs": 0,
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}),
Status.parse(api=None, json={
"id": 1,
"user_id": 1,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(
TWITTER_DATE_TIME_FORMAT),
"text": "This is just a simple test tweet text. @user1 @user2",
"coordinates": None,
"place": None,
"in_reply_to_status_id": None,
"in_reply_to_user_id": None,
"quoted_status_id": None,
"retweet_count": 2,
"favorite_count": 3,
"lang": "en",
"withheld_copyright": False,
"withheld_in_countries": None,
"entities": {
"urls": []
},
"source": "Twitter Web App",
"videos": 0,
"photos": 0,
"gifs": 0,
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}),
Status.parse(api=None, json={
"id": 2,
"user_id": 1,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(
TWITTER_DATE_TIME_FORMAT),
"text": "This is just a simple test tweet text. @user2",
"coordinates": None,
"place": None,
"in_reply_to_status_id": None,
"in_reply_to_user_id": None,
"quoted_status_id": None,
"retweet_count": 2,
"favorite_count": 3,
"lang": "en",
"withheld_copyright": False,
"withheld_in_countries": None,
"entities": {
"urls": []
},
"source": "Twitter Web App",
"videos": 0,
"photos": 0,
"gifs": 0,
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
})
]
user_features = UserFeatures(user, tweets)
self.assertAlmostEqual(user_features[USER_FEATURES_INDEX["number_of_user_mentions_min"]], 1.0)
def test_number_of_sentences_mean_nan(self):
user_dic = {
"id": 1,
"name": "Test Account",
"screen_name": "test_account",
"location": "",
"url": None,
"expanded_url": None,
"description": "",
"protected": False,
"verified": False,
"followers_count": 10,
"friends_count": 15,
"listed_count": 2,
"favourites_count": 50,
"statuses_count": 9,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(TWITTER_DATE_TIME_FORMAT),
"profile_image_url_https": "",
"default_profile": True,
"default_profile_image": True,
"withheld_in_countries": "",
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}
user = User.parse(api=None, json=user_dic)
user_features = UserFeatures(user, [])
self.assertTrue(np.isnan(user_features[USER_FEATURES_INDEX["number_of_sentences_mean"]]))
def test_number_of_sentences_mean(self):
user_dic = {
"id": 1,
"name": "Test Account",
"screen_name": "test_account",
"location": "",
"url": None,
"expanded_url": None,
"description": "",
"protected": False,
"verified": False,
"followers_count": 10,
"friends_count": 15,
"listed_count": 2,
"favourites_count": 50,
"statuses_count": 9,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(TWITTER_DATE_TIME_FORMAT),
"profile_image_url_https": "",
"default_profile": True,
"default_profile_image": True,
"withheld_in_countries": "",
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}
user = User.parse(api=None, json=user_dic)
tweets = [
Status.parse(api=None, json={
"id": 0,
"user_id": 1,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(
TWITTER_DATE_TIME_FORMAT),
"text": "This is just a simple test tweet text.",
"coordinates": None,
"place": None,
"in_reply_to_status_id": None,
"in_reply_to_user_id": None,
"quoted_status_id": None,
"retweet_count": 2,
"favorite_count": 3,
"lang": "en",
"withheld_copyright": False,
"withheld_in_countries": None,
"entities": {
"urls": []
},
"source": "Twitter Web App",
"videos": 0,
"photos": 0,
"gifs": 0,
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}),
Status.parse(api=None, json={
"id": 1,
"user_id": 1,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(
TWITTER_DATE_TIME_FORMAT),
"text": "This is just a simple test tweet text.",
"coordinates": None,
"place": None,
"in_reply_to_status_id": None,
"in_reply_to_user_id": None,
"quoted_status_id": None,
"retweet_count": 2,
"favorite_count": 3,
"lang": "en",
"withheld_copyright": False,
"withheld_in_countries": None,
"entities": {
"urls": []
},
"source": "Twitter Web App",
"videos": 0,
"photos": 0,
"gifs": 0,
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}),
Status.parse(api=None, json={
"id": 2,
"user_id": 1,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(
TWITTER_DATE_TIME_FORMAT),
"text": "This is just a simple test tweet text. This is another sentence.",
"coordinates": None,
"place": None,
"in_reply_to_status_id": None,
"in_reply_to_user_id": None,
"quoted_status_id": None,
"retweet_count": 2,
"favorite_count": 3,
"lang": "en",
"withheld_copyright": False,
"withheld_in_countries": None,
"entities": {
"urls": []
},
"source": "Twitter Web App",
"videos": 0,
"photos": 0,
"gifs": 0,
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
})
]
user_features = UserFeatures(user, tweets)
self.assertAlmostEqual(user_features[USER_FEATURES_INDEX["number_of_sentences_mean"]], np.mean([1, 1, 2]))
def test_number_of_sentences_std_nan(self):
user_dic = {
"id": 1,
"name": "<NAME>",
"screen_name": "test_account",
"location": "",
"url": None,
"expanded_url": None,
"description": "",
"protected": False,
"verified": False,
"followers_count": 10,
"friends_count": 15,
"listed_count": 2,
"favourites_count": 50,
"statuses_count": 9,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(TWITTER_DATE_TIME_FORMAT),
"profile_image_url_https": "",
"default_profile": True,
"default_profile_image": True,
"withheld_in_countries": "",
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}
user = User.parse(api=None, json=user_dic)
user_features = UserFeatures(user, [])
self.assertTrue( | np.isnan(user_features[USER_FEATURES_INDEX["number_of_sentences_std"]]) | numpy.isnan |
# -*- coding: utf-8 -*-
"""Tests of GLSAR and diagnostics against Gretl
Created on Thu Feb 02 21:15:47 2012
Author: <NAME>
License: BSD-3
"""
import os
import numpy as np
from numpy.testing import (assert_almost_equal, assert_equal,
assert_allclose, assert_array_less)
from statsmodels.regression.linear_model import OLS, GLSAR
from statsmodels.tools.tools import add_constant
from statsmodels.datasets import macrodata
import statsmodels.stats.sandwich_covariance as sw
import statsmodels.stats.diagnostic as smsdia
import statsmodels.stats.outliers_influence as oi
def compare_ftest(contrast_res, other, decimal=(5,4)):
assert_almost_equal(contrast_res.fvalue, other[0], decimal=decimal[0])
assert_almost_equal(contrast_res.pvalue, other[1], decimal=decimal[1])
assert_equal(contrast_res.df_num, other[2])
assert_equal(contrast_res.df_denom, other[3])
assert_equal("f", other[4])
class TestGLSARGretl:
def test_all(self):
d = macrodata.load_pandas().data
#import datasetswsm.greene as g
#d = g.load('5-1')
#growth rates
gs_l_realinv = 400 * np.diff(np.log(d['realinv'].values))
gs_l_realgdp = 400 * np.diff(np.log(d['realgdp'].values))
#simple diff, not growthrate, I want heteroscedasticity later for testing
endogd = np.diff(d['realinv'])
exogd = add_constant(np.c_[np.diff(d['realgdp'].values), d['realint'][:-1].values])
endogg = gs_l_realinv
exogg = add_constant(np.c_[gs_l_realgdp, d['realint'][:-1].values])
res_ols = OLS(endogg, exogg).fit()
#print res_ols.params
mod_g1 = GLSAR(endogg, exogg, rho=-0.108136)
res_g1 = mod_g1.fit()
#print res_g1.params
mod_g2 = GLSAR(endogg, exogg, rho=-0.108136) #-0.1335859) from R
res_g2 = mod_g2.iterative_fit(maxiter=5)
#print res_g2.params
rho = -0.108136
# coefficient std. error t-ratio p-value 95% CONFIDENCE INTERVAL
partable = np.array([
[-9.50990, 0.990456, -9.602, 3.65e-018, -11.4631, -7.55670], # ***
[ 4.37040, 0.208146, 21.00, 2.93e-052, 3.95993, 4.78086], # ***
[-0.579253, 0.268009, -2.161, 0.0319, -1.10777, -0.0507346]]) # **
#Statistics based on the rho-differenced data:
result_gretl_g1 = dict(
endog_mean = ("Mean dependent var", 3.113973),
endog_std = ("S.D. dependent var", 18.67447),
ssr = ("Sum squared resid", 22530.90),
mse_resid_sqrt = ("S.E. of regression", 10.66735),
rsquared = ("R-squared", 0.676973),
rsquared_adj = ("Adjusted R-squared", 0.673710),
fvalue = ("F(2, 198)", 221.0475),
f_pvalue = ("P-value(F)", 3.56e-51),
resid_acf1 = ("rho", -0.003481),
dw = ("Durbin-Watson", 1.993858))
#fstatistic, p-value, df1, df2
reset_2_3 = [5.219019, 0.00619, 2, 197, "f"]
reset_2 = [7.268492, 0.00762, 1, 198, "f"]
reset_3 = [5.248951, 0.023, 1, 198, "f"]
#LM-statistic, p-value, df
arch_4 = [7.30776, 0.120491, 4, "chi2"]
#multicollinearity
vif = [1.002, 1.002]
cond_1norm = 6862.0664
determinant = 1.0296049e+009
reciprocal_condition_number = 0.013819244
#Chi-square(2): test-statistic, pvalue, df
normality = [20.2792, 3.94837e-005, 2]
#tests
res = res_g1 #with rho from Gretl
#basic
assert_almost_equal(res.params, partable[:,0], 4)
assert_almost_equal(res.bse, partable[:,1], 6)
assert_almost_equal(res.tvalues, partable[:,2], 2)
assert_almost_equal(res.ssr, result_gretl_g1['ssr'][1], decimal=2)
#assert_almost_equal(res.llf, result_gretl_g1['llf'][1], decimal=7) #not in gretl
#assert_almost_equal(res.rsquared, result_gretl_g1['rsquared'][1], decimal=7) #FAIL
#assert_almost_equal(res.rsquared_adj, result_gretl_g1['rsquared_adj'][1], decimal=7) #FAIL
assert_almost_equal(np.sqrt(res.mse_resid), result_gretl_g1['mse_resid_sqrt'][1], decimal=5)
assert_almost_equal(res.fvalue, result_gretl_g1['fvalue'][1], decimal=4)
assert_allclose(res.f_pvalue,
result_gretl_g1['f_pvalue'][1],
rtol=1e-2)
#assert_almost_equal(res.durbin_watson, result_gretl_g1['dw'][1], decimal=7) #TODO
#arch
#sm_arch = smsdia.acorr_lm(res.wresid**2, maxlag=4, autolag=None)
sm_arch = smsdia.het_arch(res.wresid, nlags=4)
assert_almost_equal(sm_arch[0], arch_4[0], decimal=4)
assert_almost_equal(sm_arch[1], arch_4[1], decimal=6)
#tests
res = res_g2 #with estimated rho
#estimated lag coefficient
assert_almost_equal(res.model.rho, rho, decimal=3)
#basic
assert_almost_equal(res.params, partable[:,0], 4)
assert_almost_equal(res.bse, partable[:,1], 3)
assert_almost_equal(res.tvalues, partable[:,2], 2)
assert_almost_equal(res.ssr, result_gretl_g1['ssr'][1], decimal=2)
#assert_almost_equal(res.llf, result_gretl_g1['llf'][1], decimal=7) #not in gretl
#assert_almost_equal(res.rsquared, result_gretl_g1['rsquared'][1], decimal=7) #FAIL
#assert_almost_equal(res.rsquared_adj, result_gretl_g1['rsquared_adj'][1], decimal=7) #FAIL
assert_almost_equal(np.sqrt(res.mse_resid), result_gretl_g1['mse_resid_sqrt'][1], decimal=5)
assert_almost_equal(res.fvalue, result_gretl_g1['fvalue'][1], decimal=0)
assert_almost_equal(res.f_pvalue, result_gretl_g1['f_pvalue'][1], decimal=6)
#assert_almost_equal(res.durbin_watson, result_gretl_g1['dw'][1], decimal=7) #TODO
c = oi.reset_ramsey(res, degree=2)
compare_ftest(c, reset_2, decimal=(2,4))
c = oi.reset_ramsey(res, degree=3)
compare_ftest(c, reset_2_3, decimal=(2,4))
#arch
#sm_arch = smsdia.acorr_lm(res.wresid**2, maxlag=4, autolag=None)
sm_arch = smsdia.het_arch(res.wresid, nlags=4)
| assert_almost_equal(sm_arch[0], arch_4[0], decimal=1) | numpy.testing.assert_almost_equal |
from rkstiff.grids import construct_x_kx_rfft, construct_x_kx_fft
from rkstiff.grids import construct_x_Dx_cheb
from rkstiff.derivatives import dx_rfft, dx_fft
import numpy as np
def test_periodic_dx_rfft():
N = 100
a, b = 0, 2*np.pi
x,kx = construct_x_kx_rfft(N,a,b)
u = np.sin(x)
ux_exact = np.cos(x)
ux_approx = dx_rfft(kx,u)
assert np.allclose(ux_exact,ux_approx)
def test_zeroboundaries_dx_rfft():
N = 400
a, b = -30., 30.
x,kx = construct_x_kx_rfft(N,a,b)
u = 1./np.cosh(x)
ux_exact = -np.tanh(x)/np.cosh(x)
ux_approx = dx_rfft(kx,u)
assert np.allclose(ux_exact,ux_approx)
def test_gauss_dx_rfft():
N = 128
a,b = -10,10
x,kx = construct_x_kx_rfft(N,a,b)
u = np.exp(-x**2)
ux_exact = -2*x* | np.exp(-x**2) | numpy.exp |
from itertools import groupby
import numbers
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from models.layers.meta_sequential import MetaSequential
from models.layers.meta_patch import MetaPatchConv2d, make_meta_patch_conv2d_block
class HyperGen(nn.Module):
""" Hypernetwork generator comprised of a backbone network, weight mapper, and a decoder.
Args:
backbone (nn.Module factory): Backbone network
weight_mapper (nn.Module factory): Weight mapper network.
in_nc (int): input number of channels.
num_classes (int): output number of classes.
kernel_sizes (int): the kernel size of the decoder layers.
level_layers (int): number of layers in each level of the decoder.
level_channels (list of int, optional): If specified, sets the output channels of each level in the decoder.
expand_ratio (int): inverted residual block's expansion ratio in the decoder.
groups (int, optional): Number of blocked connections from input channels to output channels.
weight_groups (int, optional): per level signal to weights in the decoder.
inference_hflip (bool): If true, enables horizontal flip of input tensor.
inference_gather (str): Inference gather type: ``mean'' or ``max''.
with_out_fc (bool): If True, add a final fully connected layer to the decoder.
decoder_groups (int, optional): per level groups in the decoder.
decoder_dropout (float): If specified, enables dropout with the given probability.
coords_res (list of tuple of int, optional): list of inference resolutions for caching positional embedding.
"""
def __init__(self, backbone, weight_mapper, in_nc=3, num_classes=3, kernel_sizes=3, level_layers=1, expand_ratio=1,
groups=1, inference_hflip=False, inference_gather='mean', with_out_fc=False, decoder_dropout=None):
super(HyperGen, self).__init__()
self.inference_hflip = inference_hflip
self.inference_gather = inference_gather
self.backbone = backbone()
feat_channels = [in_nc] + self.backbone.feat_channels[:-1]
self.decoder = MultiScaleDecoder(feat_channels, 3, num_classes, kernel_sizes, level_layers,
with_out_fc=with_out_fc, out_kernel_size=1, expand_ratio=expand_ratio,
dropout=decoder_dropout)
self.weight_mapper = weight_mapper(self.backbone.feat_channels[-1], self.decoder.param_groups)
@property
def hyper_params(self):
return self.decoder.hyper_params
def process_single_tensor(self, x, hflip=False):
x = torch.flip(x, [-1]) if hflip else x
features = self.backbone(x)
weights = self.weight_mapper(features[-1])
x = [x] + features[:-1]
x = self.decoder(x, weights)
x = torch.flip(x, [-1]) if hflip else x
return x
def gather_results(self, x, y=None):
assert x is not None
if y is None:
return x
if self.inference_gather == 'mean':
return (x + y) * 0.5
else:
return torch.max(x, y)
def forward(self, x):
assert isinstance(x, (list, tuple, torch.Tensor)), f'x must be of type list, tuple, or tensor'
if isinstance(x, torch.Tensor):
return self.process_single_tensor(x)
# Note: the first pyramid will determine the output resolution
out_res = x[0].shape[2:]
out = None
for p in x:
if self.inference_hflip:
p = torch.max(self.process_single_tensor(p), self.process_single_tensor(p, hflip=True))
else:
p = self.process_single_tensor(p)
# Resize current image to output resolution if necessary
if p.shape[2:] != out_res:
p = F.interpolate(p, out_res, mode='bilinear', align_corners=False)
out = self.gather_results(p, out)
return out
class MultiScaleDecoder(nn.Module):
""" Dynamic multi-scale decoder.
Args:
feat_channels (list of int): per level input feature channels.
signal_channels (list of int): per level input signal channels.
num_classes (int): output number of classes.
kernel_sizes (int): the kernel size of the layers.
level_layers (int): number of layers in each level.
level_channels (list of int, optional): If specified, sets the output channels of each level.
norm_layer (nn.Module): Type of feature normalization layer
act_layer (nn.Module): Type of activation layer
out_kernel_size (int): kernel size of the final output layer.
expand_ratio (int): inverted residual block's expansion ratio.
groups (int, optional): number of blocked connections from input channels to output channels.
weight_groups (int, optional): per level signal to weights.
with_out_fc (bool): If True, add a final fully connected layer.
dropout (float): If specified, enables dropout with the given probability.
coords_res (list of tuple of int, optional): list of inference resolutions for caching positional embedding.
"""
def __init__(self, feat_channels, in_nc=3, num_classes=3, kernel_sizes=3, level_layers=1, norm_layer=nn.BatchNorm2d,
act_layer=nn.ReLU6(inplace=True), out_kernel_size=1, expand_ratio=1, with_out_fc=False, dropout=None):
super(MultiScaleDecoder, self).__init__()
if isinstance(kernel_sizes, numbers.Number):
kernel_sizes = (kernel_sizes,) * len(feat_channels)
if isinstance(level_layers, numbers.Number):
level_layers = (level_layers,) * len(feat_channels)
assert len(kernel_sizes) == len(feat_channels), \
f'kernel_sizes ({len(kernel_sizes)}) must be of size {len(feat_channels)}'
assert len(level_layers) == len(feat_channels), \
f'level_layers ({len(level_layers)}) must be of size {len(feat_channels)}'
self.level_layers = level_layers
self.levels = len(level_layers)
self.layer_params = []
feat_channels = feat_channels[::-1] # Reverse the order of the feature channels
# For each level
prev_channels = 0
for level in range(self.levels):
curr_ngf = feat_channels[level]
prev_channels += curr_ngf # Accommodate the previous number of channels
curr_layers = []
kernel_size = kernel_sizes[level]
# For each layer in the current level
for layer in range(self.level_layers[level]):
if (not with_out_fc) and (level == (self.levels - 1) and (layer == (self.level_layers[level] - 1))):
curr_ngf = num_classes
if kernel_size > 1:
curr_layers.append(HyperPatchInvertedResidual(
prev_channels + 2, curr_ngf, kernel_size, expand_ratio=expand_ratio, norm_layer=norm_layer,
act_layer=act_layer))
else:
curr_layers.append(make_meta_patch_conv2d_block(prev_channels + 2, curr_ngf, kernel_size))
prev_channels = curr_ngf
# Add level layers to module
self.add_module(f'level_{level}', MetaSequential(*curr_layers))
# Add the last layer
if with_out_fc:
out_fc_layers = [nn.Dropout2d(dropout, True)] if dropout is not None else []
out_fc_layers.append(
MetaPatchConv2d(prev_channels, num_classes, out_kernel_size, padding=out_kernel_size // 2))
self.out_fc = MetaSequential(*out_fc_layers)
else:
self.out_fc = None
# Calculate number of hyper parameters, weight ranges, and total number of hyper parameters per level
self.hyper_params = 0
self._ranges = [0]
self.param_groups = []
for level in range(self.levels):
level_layers = getattr(self, f'level_{level}')
self.hyper_params += level_layers.hyper_params
self._ranges.append(self.hyper_params)
self.param_groups.append(level_layers.hyper_params)
if with_out_fc:
self.hyper_params += self.out_fc.hyper_params
self.param_groups.append(self.out_fc.hyper_params)
self._ranges.append(self.hyper_params)
def forward(self, x, w):
assert isinstance(w, (list, tuple))
assert len(x) <= self.levels
# For each level
p = None
for level in range(len(x)):
level_w = w[level]
level_layers = getattr(self, f'level_{level}')
# Initial layer input
if p is None:
p = x[-level - 1]
else:
# p = F.interpolate(p, scale_factor=2, mode='bilinear', align_corners=False) # Upsample x2
if p.shape[2:] != x[-level - 1].shape[2:]:
p = F.interpolate(p, x[-level - 1].shape[2:], mode='bilinear', align_corners=False) # Upsample
p = torch.cat((x[-level - 1], p), dim=1)
# Add image coordinates
p = torch.cat([get_image_coordinates(p.shape[0], *p.shape[-2:], p.device), p], dim=1)
# Computer the output for the current level
p = level_layers(p, level_w)
# Last layer
if self.out_fc is not None:
p = self.out_fc(p, w[-1])
return p
class HyperPatchInvertedResidual(nn.Module):
def __init__(self, in_nc, out_nc, kernel_size=3, stride=1, expand_ratio=1, norm_layer=nn.BatchNorm2d,
act_layer=nn.ReLU6(inplace=True), padding_mode='reflect'):
super(HyperPatchInvertedResidual, self).__init__()
self.stride = stride
assert stride in [1, 2]
hidden_dim = int(round(in_nc * expand_ratio))
self.use_res_connect = self.stride == 1 and in_nc == out_nc
layers = []
if expand_ratio != 1:
# pw
layers.append(make_meta_patch_conv2d_block(in_nc, hidden_dim, 1, norm_layer=norm_layer,
act_layer=act_layer))
layers.extend([
# dw
make_meta_patch_conv2d_block(hidden_dim, hidden_dim, kernel_size, stride=stride, groups=hidden_dim,
norm_layer=norm_layer, act_layer=act_layer, padding_mode=padding_mode),
# pw-linear
make_meta_patch_conv2d_block(hidden_dim, out_nc, 1, stride=stride, norm_layer=norm_layer, act_layer=None)
])
self.conv = MetaSequential(*layers)
@property
def hyper_params(self):
return self.conv.hyper_params
def forward(self, x, w):
if self.use_res_connect:
return x + self.conv(x, w)
else:
return self.conv(x, w)
def get_image_coordinates(b, h, w, device):
x = torch.linspace(-1, 1, steps=w, device=device)
y = torch.linspace(-1, 1, steps=h, device=device)
grid = torch.stack(torch.meshgrid(y, x)[::-1], dim=0).repeat(b, 1, 1, 1)
# grid = torch.stack(torch.meshgrid(x, y)[::-1], dim=0).repeat(b, 1, 1, 1)
return grid
class WeightMapper(nn.Module):
""" Weight mapper module (called context head in the paper).
Args:
in_channels (int): input number of channels.
out_channels (int): output number of channels.
levels (int): number of levels operating on different strides.
bias (bool): if True, enables bias in all convolution operations.
min_unit (int): used when dividing the signal channels into parts, must be a multiple of this number.
weight_groups (int): per level signal to weights groups.
"""
def __init__(self, in_channels, out_channels, levels=2, bias=False, min_unit=8, down_groups=1, flat_groups=1,
weight_groups=1, avg_pool=False):
super(WeightMapper, self).__init__()
assert levels > 0, 'levels must be greater than zero'
self.in_channels = in_channels
self.out_channels = out_channels
self.levels = levels
self.bias = bias
self.avg_pool = avg_pool
self.down_groups = down_groups
self.flat_groups = flat_groups
self.weight_groups = weight_groups
min_unit = max(min_unit, weight_groups)
for level in range(self.levels - 1):
down = nn.Sequential(
nn.Conv2d(in_channels, in_channels, kernel_size=2, stride=2, bias=bias, groups=down_groups),
nn.BatchNorm2d(in_channels),
nn.ReLU(inplace=True))
self.add_module(f'down_{level}', down)
up = nn.UpsamplingNearest2d(scale_factor=2)
self.add_module(f'up_{level}', up)
flat = [nn.Conv2d(in_channels * 2, in_channels, kernel_size=1, bias=bias, groups=flat_groups),
nn.BatchNorm2d(in_channels)]
if level > 0:
flat.append(nn.ReLU(inplace=True))
flat = nn.Sequential(*flat)
self.add_module(f'flat_{level}', flat)
out_channels = [next_multiply(c, weight_groups) for c in out_channels]
self.out_conv = Conv2dMulti(in_channels, out_channels, 1, bias=bias, min_unit=min_unit, groups=weight_groups)
def forward(self, x):
if self.levels <= 1:
return self.out_conv(x)
# Down stream
feat = [x]
for level in range(self.levels - 1):
down = getattr(self, f'down_{level}')
feat.append(down(feat[-1]))
# Average the last feature map
if self.avg_pool:
orig_shape = feat[-1].shape
if orig_shape[-2:] != (1, 1):
feat[-1] = F.adaptive_avg_pool2d(feat[-1], 1)
feat[-1] = F.interpolate(feat[-1], orig_shape[-2:], mode='nearest')
# Up stream
for level in range(self.levels - 2, -1, -1):
up = getattr(self, f'up_{level}')
flat = getattr(self, f'flat_{level}')
x = up(feat.pop(-1))
feat[-1] = torch.cat((feat[-1], x), dim=1)
feat[-1] = flat(feat[-1])
# Output weights
w = self.out_conv(feat[-1])
if self.weight_groups > 1:
w = [wi[:, :oc] for wi, oc in zip(w, self.out_channels)]
return w
def extra_repr(self):
return f'in_channels={self.in_channels}, out_channels={self.out_channels}, bias={self.bias}'
def next_multiply(x, base):
return type(x)( | np.ceil(x / base) | numpy.ceil |
#<NAME>
#
#
# 2019-11-17
# -----------------------------------------------------------------------------
# This function computes the logarithmic (or ignorance) score. Predictive distributions can
# be considered as Gaussian, Gamma distributed, Empirical or "Loi des fuites"
# (a Gamma distribution + a Dirac at zero, suitable for daily precip), and Kernel distribution.
#
# input:
# calculation: mxn matrix; m = number of simulations
# n = number of member in ensemble
# observation: mx1 vector; m = number of records
# case: - 'Normal'
# - 'Gamma'
# - 'Kernel'
# - 'Fuites' is made for daily precipitation exclusively
# - 'Empirical'
# thres: probability density threshold below which we consider that the
# event was missed by the forecasting system. This value must be
# small (e.g.: 0.0001 means that f(obs) given the forecasts is
# only 0.0001 --> not forecasted).
# By default, thres = 0 and the logarithmic score is unbounded.
# opt_case - if 'case' = 'Fuites', opt_cas is the threshold to determine data
# which contributed to gamma distribution and those who are part of the
# Dirac impulsion
# - if 'case' = 'empirical', opt_cas needed is the number of bins
# in which to divide the ensemble, by default, it will be the
# number of members (Nan excluded). opt_cas have to be an integer
# superior to 1.
#
# output:
# loga: the logarithmic score (n*1 matrix)
# ind_miss: Boleans to point out days for which the event was missed according
# to the threshold specified by the user (1= missed) (n*1 matrix)
#
# Reference:
# 'Empirical' case is based on Roulston and Smith (2002) with
# modifications -> quantile and members with similar values
# -----------------------------------------------------------------------------
# History
#
# MAB June 19: Added 2 cases for the empirical distribution: the
# observation can either be the smallest or the largest member of the
# augmented ensemble, in which case we can't use the "DeltaX = X(S+1) -
# X(S-1);" equation.
# -----------------------------------------------------------------------------
import numpy as np
from scipy.stats import norm, gamma, gaussian_kde
import sys
def score_log(calculation, observation, case, thres=0., opt_case=None):
# transform input into numpy array
calculation = np.array(calculation, dtype='float64')
observation = np.array(observation, dtype='float64')
dim1 = calculation.shape
if len(dim1) == 1:
calculation = calculation.reshape((1,dim1[0]))
dim2 = observation.shape
if len(dim2) == 0:
observation = observation.reshape((1,1))
elif len(dim2) == 1:
observation = observation.reshape((dim2[0],1))
# preparation
n = np.size(calculation, axis=0)
loga = np.empty(n)
loga[:] = np.nan
ind_miss = np.empty(n)
ind_miss[:] = np.nan
# test input arguments are correct
if len(observation) != n:
sys.exit('Error! The length of the record of observations doesn''t match the length of the forecasting period')
if thres == 0:
print('Logarithmic score is unbounded')
elif (thres < 0) or (thres > 1):
sys.exit('Threshold has to be between 0 and 1.')
# calcuation depending on the case
if case == 'Empirical':
# if no opt_case is given, number of bins are determined by the number of nonNaN members
if opt_case == None:
print('Bins used for empirical method determined by ensemble members')
elif (opt_case < 2) or (not isinstance(opt_case, int)):
sys.exit('Format of opt_case is not valide.')
if not isinstance(thres, float):
sys.exit('Format of threshold is not valide. thres needs to be a list with 2 entries, determining the upper and lower bound for aberrant values')
# loop over the records
for j in range(n):
# determine of observation is in the bound of max min of ensemble
if (~np.all(np.isnan(calculation[j,:]))) and (~np.isnan(observation[j])):
if (np.nanmin(calculation[j,:]) <= observation[j]) and (observation[j] <= np.nanmax(calculation[j,:])):
ind_miss[j] = 0
# suppress NaN from the ensemble to determine the number of members
sample_nonnan = calculation[j,:][~np.isnan(calculation[j,:])]
sort_sample_nonnan = np.sort(sample_nonnan)
# transform data, if bins are specified by user in the opt_case argument
if opt_case != None:
sort_sample_nonnan = np.quantile(sort_sample_nonnan, np.arange(0, 1, 1/opt_case))
# number of bins
N = len(sort_sample_nonnan)
# if all members of forcast and obervation are the same -> perfect forecast
if len(np.unique(np.append(sort_sample_nonnan, observation[j]))) == 1:
proba_obs = 1
else:
# if some members are equal, modify slightly the value
if len(np.unique(sort_sample_nonnan)) != len(sort_sample_nonnan):
uni_sample = np.unique(sort_sample_nonnan)
bins = np.append(uni_sample, np.inf)
hist, binedges = np.histogram(sort_sample_nonnan, bins)
idxs, = np.where(hist > 1)
new_sample = uni_sample
for idx in idxs:
new_val = uni_sample[idx] + 0.01 * np.random.rand(hist[idx]-1)
new_sample = np.append(new_sample, new_val)
sort_sample_nonnan = np.sort(new_sample)
# find position of the observation in the ensemble
X = np.sort(np.concatenate((sort_sample_nonnan, observation[j])))
S, = np.where(X == observation[j])
# if observation is at the first or last position of the ensemble -> threshold prob
if S[0] == len(X)-1:
proba_obs = thres
elif S[0] == 0:
proba_obs = thres
else:
#if the observation falls between two members or occupies the first or last rank
if len(S) == 1:
# If the observation is between the augmented ensemble bounds
DeltaX = X[S[0]+1] - X[S[0]-1]
proba_obs = min(1/(DeltaX * (N+1)),1)
# if observation is equal to one member, choose the maximum of the probability density associated
elif len(S) == 2:
if S[0] == 0:
DeltaX = X[S[1]+1] - X[S[1]]
elif S[1] == len(X)-1:
DeltaX = X[S[0]] - X[S[0]-1]
else:
DeltaX1 = X[S[1]+1] - X[S[1]]
DeltaX2 = X[S[0]] - X[S[0]-1]
DeltaX = min(DeltaX1,DeltaX2)
proba_obs = min(1/(DeltaX * (N+1)),1)
# test if probability below threshold
if proba_obs < thres:
proba_obs = thres
ind_miss[j] = 1
# if observation is outside of the bound of the ensemble
else:
ind_miss[j] = 1
proba_obs = thres
# calculate the logarithmus
loga[j] = - np.log2(proba_obs)
# if all values are nan in ensemble
else:
loga[j] = np.nan
ind_miss[j] = np.nan
elif case == 'Normal':
if (opt_case != None):
sys.exit('No optional case possible for Normal distribution')
for j in range(n):
# filter non nan values
sample_nonnan = calculation[j,:][~np.isnan(calculation[j,:])]
# if there are values in the ensemble which are not nan
if (len(sample_nonnan) > 0) and (~np.isnan(observation[j])):
# perfect forecast, all member values equal the observation
if len(np.unique(np.append(sample_nonnan, observation[j]))) == 1:
proba_obs = 1
ind_miss[j] = 0
loga[j] = - np.log2(proba_obs)
else:
mu, sig = norm.fit(sample_nonnan)
# transform standard deviation to unbiased estimation of standard deviation
nb_mb = len(sample_nonnan)
sighat = nb_mb/(nb_mb-1) * sig
# all member forecasts the same but unequal the observation
if sighat == 0:
loga[j] = - np.log2(thres)
ind_miss[j] = 1
else:
proba_obs = min(norm.pdf(observation[j], mu, sighat), 1)
if proba_obs >= thres:
ind_miss[j] = 0
loga[j] = - np.log2(proba_obs)
else:
loga[j] = - np.log2(thres)
ind_miss[j] = 1
# if all values in the snemble are nan
else:
loga[j] = np.nan
ind_miss[j] = np.nan
elif case == 'Gamma':
if (opt_case != None):
sys.exit('No optional case possible for Gamma distribution')
# check if any value is smaller equal zero
idxs = np.where(calculation <= 0)
if len(idxs[0]) == 0:
for j in range(n):
# filter non nan values
sample_nonnan = calculation[j,:][~np.isnan(calculation[j,:])]
# if there are values in the ensemble which are not nan
if (len(sample_nonnan) > 0) and (~np.isnan(observation[j])):
if len(np.unique(np.append(sample_nonnan, observation[j]))) == 1:
proba_obs = 1
ind_miss[j] = 0
loga[j] = - np.log2(proba_obs)
else:
# fit data to gamma distribtion
alpha, loc, beta = gamma.fit(sample_nonnan, floc=0)
proba_obs = min(gamma.pdf(observation[j], alpha, loc, beta), 1)
if (alpha <= 0) or (beta <= 0):
loga[j] = - np.log2(thres)
ind_miss[j] = 1
else:
if proba_obs >= thres:
ind_miss[j] = 0
loga[j] = - np.log2(proba_obs)
else:
loga[j] = - np.log2(thres)
ind_miss[j] = 1
# if all values in the snemble are nan
else:
loga[j] = np.nan
ind_miss[j] = np.nan
else:
sys.exit('Forecasts contain zeros. You must choose a different distribution.')
elif case == 'Kernel':
if (opt_case != None):
sys.exit('No optional case possible for Kernel distribution')
for j in range(n):
# filter non nan values
sample_nonnan = calculation[j,:][~np.isnan(calculation[j,:])]
# if there are values in the ensemble which are not nan
if (len(sample_nonnan) > 0) and (~np.isnan(observation[j])):
# perfect forecast, all member values equal the observation
if len(np.unique( | np.append(sample_nonnan, observation[j]) | numpy.append |
# ----------------------------------------------------------------------
#
# <NAME>, U.S. Geological Survey
# <NAME>, GNS Science
# <NAME>, University at Buffalo
#
# This code was developed as part of the Computational Infrastructure
# for Geodynamics (http://geodynamics.org).
#
# Copyright (c) 2010-2021 University of California, Davis
#
# See LICENSE.md for license information.
#
# ----------------------------------------------------------------------
#
# @file tests/fullscale/poroelasticity/cryer/cryer_soln.py
#
# @brief Analytical solution to Cryer's problem.
# Owing to the symmetry of the problem, we only need consider the quarter
# domain case.
#
# -F
# ----------
# | |
# Ux=0 | | P=0
# | |
# | |
# ----------
# Uy=0
#
# Dirichlet boundary conditions
# Ux(0,y) = 0
# Uy(x,0) = 0
# Neumann boundary conditions
# \tau_normal(x,ymax) = -1*Pa
import numpy
# Physical properties
G = 3.0
rho_s = 2500
rho_f = 1000
K_fl = 8.0
K_sg = 10.0
K_d = 4.0
alpha = 0.6
phi = 0.1
k = 1.5
mu_f = 1.0
P_0 = 1.0
R_0 = 1.0
ndim = 3
M = 1.0 / ( phi / K_fl + (alpha - phi) /K_sg)
kappa = k/mu_f
K_u = K_d + alpha*alpha*M
S = (3*K_u + 4*G) / (M*(3*K_d + 4*G)) #(1/M) + ( (3*alpha*alpha) / (3*K_d + 4*G) )#
c = kappa / S
nu = (3*K_d - 2*G) / (2*(3*K_d + G))
nu_u = (3*K_u - 2*G) / (2*(3*K_u + G))
U_R_inf = -1.*(P_0*R_0*(1.-2.*nu))/(2.*G*(1.+nu))
eta = (alpha*(1-2*nu))/(2*(1-nu))
xmin = 0.0 # m
xmax = 1.0 # m
ymin = 0.0 # m
ymax = 1.0 # m
zmin = 0.0 # m
zmax = 1.0 # m
# Time steps
ts = 0.0028666667 # sec
nts = 2
tsteps = numpy.arange(0.0, ts * nts, ts) + ts # sec
# ----------------------------------------------------------------------
class AnalyticalSoln(object):
"""
Analytical solution to Cryer's problem
"""
SPACE_DIM = 3
TENSOR_SIZE = 4
ITERATIONS = 50
EPS = 1e-25
def __init__(self):
self.fields = {
"displacement": self.displacement,
"pressure": self.pressure,
#"trace_strain": self.trace_strain,
"porosity": self.porosity,
"solid_density": self.solid_density,
"fluid_density": self.fluid_density,
"fluid_viscosity": self.fluid_viscosity,
"shear_modulus": self.shear_modulus,
"undrained_bulk_modulus": self.undrained_bulk_modulus,
"drained_bulk_modulus": self.drained_bulk_modulus,
"biot_coefficient": self.biot_coefficient,
"biot_modulus": self.biot_modulus,
"isotropic_permeability": self.isotropic_permeability,
"initial_amplitude": {
"x_neg": self.zero_vector,
"y_neg": self.zero_vector,
"z_neg": self.zero_vector,
"surface_traction": self.surface_traction,
"surface_pressure": self.zero_scalar
}
}
return
def getField(self, name, mesh_entity, pts):
if name in "initial_amplitude":
field = self.fields[name][mesh_entity](pts)
else:
field = self.fields[name](pts)
return field
def zero_scalar(self, locs):
(npts, dim) = locs.shape
return numpy.zeros((1, npts, 1), dtype=numpy.float64)
def zero_vector(self, locs):
(npts, dim) = locs.shape
return numpy.zeros((1, npts, self.SPACE_DIM), dtype=numpy.float64)
def solid_density(self, locs):
"""
Compute solid_density field at locations.
"""
(npts, dim) = locs.shape
solid_density = rho_s * numpy.ones((1, npts, 1), dtype=numpy.float64)
return solid_density
def fluid_density(self, locs):
"""
Compute fluid density field at locations.
"""
(npts, dim) = locs.shape
fluid_density = rho_f * numpy.ones((1, npts, 1), dtype=numpy.float64)
return fluid_density
def porosity(self, locs):
"""
Compute solid_density field at locations.
"""
(npts, dim) = locs.shape
porosity = phi * numpy.ones((1, npts, 1), dtype=numpy.float64)
return porosity
def shear_modulus(self, locs):
"""
Compute shear modulus field at locations.
"""
(npts, dim) = locs.shape
shear_modulus = G * numpy.ones((1, npts, 1), dtype=numpy.float64)
return shear_modulus
def fluid_viscosity(self, locs):
"""
Compute fluid_viscosity field at locations.
"""
(npts, dim) = locs.shape
fluid_viscosity = mu_f * numpy.ones((1, npts, 1), dtype=numpy.float64)
return fluid_viscosity
def undrained_bulk_modulus(self, locs):
"""
Compute undrained bulk modulus field at locations.
"""
(npts, dim) = locs.shape
undrained_bulk_modulus = K_u * numpy.ones((1, npts, 1), dtype=numpy.float64)
return undrained_bulk_modulus
def drained_bulk_modulus(self, locs):
"""
Compute undrained bulk modulus field at locations.
"""
(npts, dim) = locs.shape
drained_bulk_modulus = K_d * numpy.ones((1, npts, 1), dtype=numpy.float64)
return drained_bulk_modulus
def biot_coefficient(self, locs):
"""
Compute biot coefficient field at locations.
"""
(npts, dim) = locs.shape
biot_coefficient = alpha * numpy.ones((1, npts, 1), dtype=numpy.float64)
return biot_coefficient
def biot_modulus(self, locs):
"""
Compute biot modulus field at locations.
"""
(npts, dim) = locs.shape
biot_modulus = M * numpy.ones((1, npts, 1), dtype=numpy.float64)
return biot_modulus
def isotropic_permeability(self, locs):
"""
Compute isotropic permeability field at locations.
"""
(npts, dim) = locs.shape
isotropic_permeability = k * numpy.ones((1, npts, 1), dtype=numpy.float64)
return isotropic_permeability
def displacement(self, locs):
"""
Compute displacement field at locations.
"""
(npts, dim) = locs.shape
ntpts = tsteps.shape[0]
displacement = numpy.zeros((ntpts, npts, dim), dtype=numpy.float64)
x_n = self.cryerZeros()
center = numpy.where(~locs.any(axis=1))[0]
R = numpy.sqrt(locs[:,0]*locs[:,0] + locs[:,1]*locs[:,1] + locs[:,2]*locs[:,2])
theta = numpy.nan_to_num( numpy.arctan( numpy.nan_to_num( numpy.sqrt(locs[:,0]**2 + locs[:,1]**2) / locs[:,2] ) ) )
phi = numpy.nan_to_num( numpy.arctan( numpy.nan_to_num( locs[:,1] / locs[:,0] ) ) )
R_star = R.reshape([R.size,1]) / R_0
x_n.reshape([1,x_n.size])
E = numpy.square(1-nu)*numpy.square(1+nu_u)*x_n - 18*(1+nu)*(nu_u-nu)*(1-nu_u)
t_track = 0
for t in tsteps:
t_star = (c*t)/(R_0**2)
r_exact_N = R_star.ravel() - numpy.nan_to_num(numpy.sum(((12*(1 + nu)*(nu_u - nu)) / \
((1 - 2*nu)*E*R_star*R_star*x_n*numpy.sin(numpy.sqrt(x_n))) ) * \
(3*(nu_u - nu) * (numpy.sin(R_star*numpy.sqrt(x_n)) - R_star*numpy.sqrt(x_n)*numpy.cos(R_star*numpy.sqrt(x_n))) + \
(1 - nu)*(1 - 2*nu)*R_star*R_star*R_star*x_n*numpy.sin(numpy.sqrt(x_n))) * \
numpy.exp(-x_n*t_star),axis=1))
displacement[t_track, :, 0] = (r_exact_N*U_R_inf)*numpy.cos(phi)* | numpy.sin(theta) | numpy.sin |
import cv2
import torch
import numpy as np
def fuse_heatmap_image(img, heatmap, resize=None, keep_heatmap=False):
img = img.cpu().numpy() if isinstance(img, torch.Tensor) else np.array(img)
heatmap = heatmap.detach().cpu().numpy() if isinstance(heatmap, torch.Tensor) else heatmap
if not resize:
size = img.shape
else:
size = resize
heatmap = heatmap - np.min(heatmap)
heatmap = heatmap / np.max(heatmap)
heatmap = np.float32(cv2.resize(heatmap, size))
heatmap = cv2.applyColorMap(np.uint8(255 * heatmap), cv2.COLORMAP_JET)
fused = np.float32(cv2.resize(img/255, size)) + | np.float32(heatmap/255, size) | numpy.float32 |
"""Script to compare the sensitivity and discovery potential for the LLAGN sample (15887 sources)
as a function of injected spectral index for energy decades between 100 GeV and 10 PeV.
"""
from __future__ import print_function
from __future__ import division
import numpy as np
from flarestack.core.results import ResultsHandler
# # from flarestack.data.icecube.ps_tracks.ps_v002_p01 import ps_7year
# from flarestack.data.icecube.ps_tracks.ps_v003_p02 import ps_10year
# from flarestack.data.icecube.northern_tracks.nt_v002_p05 import diffuse_8year
# from flarestack.data.icecube.gfu.gfu_v002_p01 import txs_sample_v1
from flarestack.shared import plot_output_dir, flux_to_k, make_analysis_pickle, k_to_flux
from flarestack.data.icecube import diffuse_8_year
from flarestack.utils.catalogue_loader import load_catalogue
from flarestack.analyses.agn_cores.shared_agncores import \
agn_subset_catalogue, complete_cats_north, complete_cats_north, agn_catalogue_name, agn_subset_catalogue_north
from flarestack.core.minimisation import MinimisationHandler
from flarestack.cluster import analyse, wait_for_cluster
import math
import matplotlib.pyplot as plt
from matplotlib.ticker import ScalarFormatter
# plt.style.use('~/scratch/phdthesis.mpltstyle')
import time
import logging
import os
import psutil, resource #to get memory usage info
analyses = dict()
# Initialise Injectors/LLHs
llh_time = {
"time_pdf_name": "Steady"
}
llh_energy = {
"energy_pdf_name": "PowerLaw"
}
llh_dict = {
"llh_name": "standard_matrix",
"llh_sig_time_pdf": llh_time,
"llh_energy_pdf": llh_energy
}
def base_name(cat_key, gamma):
return "analyses/agn_cores/stacking_analysis_8yrNTsample_diff_sens_pre_unblinding/{0}/" \
"{1}/".format(cat_key, gamma)
def generate_name(cat_key, n_sources, gamma):
return base_name(cat_key, gamma) + "NrSrcs={0}/".format(n_sources)
gammas = [2.0, 2.5]
# Number of sources in the LLAGN sample
nr_brightest_sources = [15887]
# Energy bins
energies = np.logspace(2, 7, 6)
bins = list(zip(energies[:-1], energies[1:]))
all_res = dict()
for (cat_type, method) in complete_cats_north[-1:]:
unique_key = cat_type + "_" + method
print(unique_key)
gamma_dict = dict()
for gamma_index in gammas:
res = dict()
for j, nr_srcs in enumerate(nr_brightest_sources):
cat_path = agn_subset_catalogue(cat_type, method, nr_srcs)
print("Loading catalogue", cat_path, " with ", nr_srcs, "sources")
catalogue = load_catalogue(cat_path)
cat = np.load(cat_path)
print("Total flux is: ", cat['base_weight'].sum()*1e-13)
full_name = generate_name(unique_key, nr_srcs, gamma_index)
res_e_min = dict()
# scale factor of neutrino injection, tuned for each energy bin
scale_factor_per_decade = [0.2, 0.5, 1, 0.57, 0.29]
for i, (e_min, e_max) in enumerate(bins[:]):
full_name_en = full_name + 'Emin={0:.2f}'.format(e_min) + "/"
print("Full name for ", nr_srcs, " sources is", full_name_en)
# Injection parameters
injection_time = llh_time
injection_energy = dict(llh_energy)
injection_energy["gamma"] = gamma_index
injection_energy["e_min_gev"] = e_min
injection_energy["e_max_gev"] = e_max
inj_kwargs = {
"injection_energy_pdf": injection_energy,
"injection_sig_time_pdf": injection_time,
}
mh_dict = {
"name": full_name_en,
"mh_name": "large_catalogue",
"dataset": diffuse_8_year.get_seasons(), #subselection_fraction=1),
"catalogue": cat_path,
"llh_dict": llh_dict,
"inj_dict": inj_kwargs,
"n_trials": 1, #10,
# "n_steps": 15,
}
mh = MinimisationHandler.create(mh_dict)
# scale factor to tune (manually) the number of injected neutrinos
scale_factor = 3 * mh.guess_scale()/3/7/scale_factor_per_decade[i]
print("Scale Factor: ", scale_factor_per_decade[i], scale_factor)
# # # # # How to run on the cluster for sources < 3162
mh_dict["n_steps"] = 15
mh_dict["scale"] = scale_factor
analyse(mh_dict, cluster=False, n_cpu=32, n_jobs=150)
# How to run on the cluster for sources > 3162
# _n_jobs = 50
# scale_loop = np.linspace(0, scale_factor, 15)
# print(scale_loop)
# for scale in scale_loop[:4]:
# print('Running ' + str(mh_dict["n_trials"]) + ' trials with scale ' + str(scale))
# mh_dict["fixed_scale"] = scale
# # # analyse(mh_dict, cluster=False, n_cpu=35, n_jobs=10)
# if scale == 0.:
# n_jobs = _n_jobs*10
# else:
# n_jobs = _n_jobs
# print("Submitting " + str(n_jobs) + " jobs")
# analyse(mh_dict, cluster=True, n_cpu=1, n_jobs=n_jobs)
res_e_min[e_min] = mh_dict
res[nr_srcs] = res_e_min
gamma_dict[gamma_index] = res
all_res[unique_key] = gamma_dict
# wait_for_cluster()
logging.getLogger().setLevel("INFO")
for (cat_key, gamma_dict) in all_res.items():
print(cat_key, cat_key.split("_"))
agn_type = cat_key.split("_")[0]
xray_cat = cat_key.split(str(agn_type)+'_')[-1]
full_cat = load_catalogue(agn_catalogue_name(agn_type, xray_cat))
full_flux = np.sum(full_cat["base_weight"])
saturate_ratio = 0.26
# Loop on gamma
for (gamma_index, gamma_res) in (iter(gamma_dict.items())):
sens = []
sens_err_low = []
sens_err_upp = []
disc_pot = []
disc_ts_threshold = []
n_src = []
fracs = []
sens_livetime = []
disc_pots_livetime = []
sens_livetime_100GeV10PeV = []
disc_pots_livetime_100GeV10PeV = []
ratio_sens = []
ratio_disc = []
ratio_sens_100GeV10PeV = []
ratio_disc_100GeV10PeV = []
int_xray_flux_erg = []
int_xray_flux = []
guess = []
sens_n = []
disc_pot_n = []
e_min_gev = []
e_max_gev = []
base_dir = base_name(cat_key, gamma_index)
# Loop on number of sources of the AGN sample
for (nr_srcs, rh_dict_srcs) in sorted(gamma_res.items()):
print("In if loop on nr_srcs and rh_dict")
print(nr_srcs)
print(rh_dict_srcs)
print("nr_srcs in loop: ", nr_srcs)
# loop on emin and emax
for (e_min, rh_dict) in sorted(rh_dict_srcs.items()):
cat = load_catalogue(rh_dict["catalogue"])
print("e_min in loop: ", e_min)
print(" ")
print(" ")
int_xray = np.sum(cat["base_weight"] / 1e13*624.151)
int_xray_flux.append(int_xray) # GeV cm-2 s-1
int_xray_flux_erg.append(np.sum(cat["base_weight"]) / 1e13) # erg
# cm-2 s-1
fracs.append(np.sum(cat["base_weight"])/full_flux)
try:
rh = ResultsHandler(rh_dict)
print("Sens", rh.sensitivity)
print("Sens_err", rh.sensitivity_err, rh.sensitivity_err[0], rh.sensitivity_err[1])
print("Disc", rh.disc_potential)
print("Disc_TS_threshold", rh.disc_ts_threshold)
# print("Guess", rh_dict["scale"])
print("Sens (n)", rh.sensitivity * rh.flux_to_ns)
print("DP (n)", rh.disc_potential * rh.flux_to_ns)
# # guess.append(k_to_flux(rh_dict["scale"])* 2./3.)
# guess.append(k_to_flux(rh_dict["scale"])/3.)
print(rh_dict["inj_dict"], rh_dict["inj_dict"]["injection_energy_pdf"]["e_min_gev"])
e_min_gev.append(rh_dict["inj_dict"]["injection_energy_pdf"]["e_min_gev"])
e_max_gev.append(rh_dict["inj_dict"]["injection_energy_pdf"]["e_max_gev"])
# sensitivity/dp normalized per flux normalization GeV-1 cm-2 s-1
sens.append(rh.sensitivity)
sens_err_low.append(rh.sensitivity_err[0])
sens_err_upp.append(rh.sensitivity_err[1])
disc_pot.append(rh.disc_potential)
disc_ts_threshold.append(rh.disc_ts_threshold)
sens_n.append(rh.sensitivity * rh.flux_to_ns)
disc_pot_n.append(rh.disc_potential * rh.flux_to_ns)
key = "Energy Flux (GeV cm^{-2} s^{-1})" # old version: "Total Fluence (GeV cm^{-2} s^{-1})"
astro_sens, astro_disc = rh.astro_values(
rh_dict["inj_dict"]["injection_energy_pdf"])
sens_livetime.append(astro_sens[key]) # fluence=integrated over energy
disc_pots_livetime.append(astro_disc[key])
# Nu energy flux integrated between 100GeV and 10PeV,
# indipendently from the e_min_gev, e_max_gev of the injection
rh_dict["inj_dict"]["injection_energy_pdf"]["e_min_gev"] = 100
rh_dict["inj_dict"]["injection_energy_pdf"]["e_max_gev"] = 1e7
astro_sens_100GeV10PeV, astro_disc_100GeV10PeV = rh.astro_values(
rh_dict["inj_dict"]["injection_energy_pdf"])
sens_livetime_100GeV10PeV.append(astro_sens_100GeV10PeV[key]) # fluence=integrated over energy
disc_pots_livetime_100GeV10PeV.append(astro_disc_100GeV10PeV[key])
# normalized over tot xray flux
ratio_sens.append(astro_sens[key] / int_xray) # fluence
ratio_disc.append(astro_disc[key] / int_xray)
ratio_sens_100GeV10PeV.append(astro_sens_100GeV10PeV[key] / int_xray) # fluence
ratio_disc_100GeV10PeV.append(astro_disc_100GeV10PeV[key] / int_xray)
n_src.append(nr_srcs)
except OSError:
pass
# # Save arrays to file
np.savetxt(plot_output_dir(base_dir) + "data.out",
(np.array(n_src), np.array(int_xray_flux_erg),
np.array(e_min_gev), np.array(e_max_gev),
np.array(sens), np.array(sens_err_low), np.array(sens_err_upp),
np.array(disc_pot), np.array(disc_ts_threshold),
np.array(sens_livetime), np.array(disc_pots_livetime),
np.array(ratio_sens), np.array(ratio_disc),
np.array(ratio_sens)/saturate_ratio, np.array(ratio_disc)/saturate_ratio,
np.array(sens_livetime_100GeV10PeV), np.array(disc_pots_livetime_100GeV10PeV),
np.array(ratio_sens_100GeV10PeV), np.array(ratio_disc_100GeV10PeV),
np.array(ratio_sens_100GeV10PeV)/saturate_ratio, np.array(ratio_disc_100GeV10PeV)/saturate_ratio,
np.array(sens_n), | np.array(disc_pot_n) | numpy.array |
# -*- coding: utf-8 -*-
'''
perceptron algorithm
to minimize misclassification error
SVM (support vector machine)
maxmize the margin (margin is defined as the distance between the separating hyperplane, and the training samples that are closest to this hyperplane)
'''
from sklearn import datasets
from sklearn.cross_validation import train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import Perceptron
from sklearn.metrics import accuracy_score
from sklearn.neighbors import KNeighborsClassifier
from sklearn.preprocessing import StandardScaler
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from matplotlib.colors import ListedColormap
import matplotlib.pyplot as plt
import numpy as np
def scikit_basic():
iris = datasets.load_iris()
X = iris.data[:, [2, 3]]
y = iris.target
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = .3, random_state = 0)
sc = StandardScaler()
sc.fit(X_train)
X_train_std = sc.transform(X_train)
X_test_std = sc.transform(X_test)
ppn = Perceptron(n_iter = 40, eta0 = .1, random_state = 0)
ppn.fit(X_train_std, y_train)
y_pred = ppn.predict(X_test_std)
# print ('Misclassified samples: %d' % (y_test != y_pred).sum())
# print ('Accuracy: %.2f' % accuracy_score(y_test, y_pred))
X_combined_std = np.vstack((X_train_std, X_test_std))
y_combined = np.hstack((y_train, y_test))
plot_decision_regions(X = X_combined_std, y = y_combined, classifier = ppn, test_idx = range(105, 150))
plt.xlabel('petal length [standardized]')
plt.ylabel('petal width [standardized]')
plt.legend(loc = 'upper left')
plt.show()
def plot_decision_regions(X, y, classifier, test_idx = None, resolution = .02):
# setup marker generator and color map
markers = ('s', 'x', 'o', '^', 'v')
colors = ('red', 'blue', 'lightgreen', 'gray', 'cyan')
cmap = ListedColormap(colors[:len(np.unique(y))])
# plot the decision surface
x1_min, x1_max = X[:, 0].min() -1, X[:, 0].max() + 1
x2_min, x2_max = X[:, 1].min() -1, X[:, 1].max() + 1
xx1, xx2 = np.meshgrid(np.arange(x1_min, x1_max, resolution),
np.arange(x2_min, x2_max, resolution))
Z = classifier.predict(np.array([xx1.ravel(), xx2.ravel()]).T)
Z = Z.reshape(xx1.shape)
plt.contourf(xx1, xx2, Z, alpha = .4, cmap = cmap)
plt.xlim(xx1.min(), xx1.max())
plt.ylim(xx2.min(), xx2.max())
# plot all samples
X_test, y_test = X[test_idx, :], y[test_idx]
for idx, cl in enumerate(np.unique(y)):
plt.scatter(x = X[y == cl, 0], y = X[y == cl, 1],
alpha = .8, c = cmap(idx),
marker = markers[idx], label = cl)
# highlight test samples
if test_idx:
X_test, y_test = X[test_idx, :], y[test_idx]
plt.scatter(X_test[:, 0], X_test[:, 1], c = '',
alpha = 1.0, linewidth = 1, marker = 'o',
s = 55, label = 'test set')
def sigmoid(z):
return 1.0/(1.0 + np.exp(-z))
class iris_data:
def __init__(self):
iris = datasets.load_iris()
self.X = iris.data[:, [2, 3]]
self.y = iris.target
self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(self.X, self.y, test_size = .3, random_state = 0)
def norm_data(self):
return self.X_train, self.y_train, self.X_test, self.y_test
def std_data(self):
sc = StandardScaler()
sc.fit(self.X_train)
self.X_train_std = sc.transform(self.X_train)
self.X_test_std = sc.transform(self.X_test)
return self.X_train_std, self.y_train, self.X_test_std, self.y_test
def combined_data(self):
self.std_data()
X_combined_std = np.vstack((self.X_train_std, self.X_test_std))
y_combined = np.hstack((self.y_train, self.y_test))
return X_combined_std, y_combined
def logistic_base():
iris = datasets.load_iris()
X = iris.data[:, [2, 3]]
y = iris.target
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = .3, random_state = 0)
sc = StandardScaler()
sc.fit(X_train)
X_train_std = sc.transform(X_train)
X_test_std = sc.transform(X_test)
X_combined_std = np.vstack((X_train_std, X_test_std))
y_combined = np.hstack((y_train, y_test))
''' logistic plot '''
# z = np.arange( -7, 7, .1)
# phi_z = sigmoid(z)
# plt.plot(z, phi_z)
# plt.axvline(0.0, color = 'k')
# plt.axhspan(0.0, 1.0, facecolor = '1.0', alpha = 1.0, ls = 'dotted')
# plt.axhline(y = .5, ls = 'dotted', color = 'k')
# plt.yticks([0.0, .5, 1.0])
# plt.ylim(-.1, 1.1)
# plt.xlabel('z')
# plt.ylabel('$\phi (z)$')
# plt.show()
''' fit iris with logistic'''
# lr = LogisticRegression(C = 1000.0, random_state = 0)
# lr.fit(X_train_std, y_train)
# plot_decision_regions(X_combined_std, y_combined, classifier = lr, test_idx = range(105, 150))
# plt.xlabel('petal length [standardized]')
# plt.ylabel('petal width [standardized]')
# plt.legend(loc = 'upper left')
# plt.show()
# print (lr.predict_proba(X_test_std[0, :]))
'''add L2 regularization'''
weights, params = [], []
for c in np.arange(-5, 5):
lr = LogisticRegression(C = 10 ** c, random_state = 0)
lr.fit(X_train_std, y_train)
weights.append(lr.coef_[1])
print (lr.coef_[1])
params.append(10 ** c)
weights = np.array(weights)
plt.plot(params, weights[:, 0], label = 'petal length')
plt.plot(params, weights[:, 1], linestyle = '--', label = 'petal width')
plt.ylabel('weight coefficient')
plt.xlabel('C')
plt.legend(loc = 'upper left')
plt.xscale('log')
plt.show()
def SVM_related():
'''solve linear problem'''
svm = SVC(kernel = 'linear', C = 1.0, random_state = 0)
ir = iris_data()
X_train_std, y_train, X_test_std, y_test = ir.std_data()
X_combined_std, y_combined = ir.combined_data()
svm.fit(X_train_std, y_train)
# plot_decision_regions(X_combined_std, y_combined, classifier = svm, test_idx = range(105, 150))
# plt.xlabel('petal length [standardized]')
# plt.ylabel('petal width [standardized]')
# plt.legend(loc = 'upper left')
# plt.show()
'''solve non-linear problem'''
np.random.seed(0)
X_xor = np.random.randn(200, 2)
y_xor = np.logical_xor(X_xor[:, 0] > 0, X_xor[:, 1] > 0)
y_xor = np.where(y_xor, 1, -1)
# plt.scatter(X_xor[y_xor == 1, 0], X_xor[y_xor == 1, 1], c = 'b', marker = 'x', label = '1')
# plt.scatter(X_xor[y_xor == -1, 0], X_xor[y_xor == -1, 1], c = 'r', marker = 's', label = '-1')
# plt.ylim(-3.0)
# plt.legend()
# plt.show()
# svm = SVC(kernel = 'rbf', random_state = 0, gamma = .10, C = 10.0)
# svm.fit(X_xor, y_xor)
# plot_decision_regions(X_xor, y_xor, classifier = svm)
# plt.legend(loc = 'upper left')
# plt.show()
# for better understanding on the gamma parameter
# svm = SVC(kernel = 'rbf', random_state = 0, gamma = .2, C = 1.0)
# svm.fit(X_train_std, y_train)
# plot_decision_regions(X_combined_std, y_combined, classifier = svm, test_idx = range(105, 150))
# plt.xlabel('petal length [standarized]')
# plt.ylabel('petal width [standarized]')
# plt.legend(loc = 'upper left')
# plt.show()
def decision_tree_related():
ir = iris_data()
X_train, y_train, X_test, y_test = ir.norm_data()
X_combined = | np.vstack((X_train, X_test)) | numpy.vstack |
import numpy as np
import pickle as pkl
import networkx as nx
import scipy.sparse as sp
from scipy.sparse.linalg.eigen.arpack import eigsh
import sys
from sklearn.neighbors import kneighbors_graph
from sklearn import svm
import time
import tensorflow as tf
def del_all_flags(FLAGS):
flags_dict = FLAGS._flags()
keys_list = [keys for keys in flags_dict]
for keys in keys_list:
FLAGS.__delattr__(keys)
def parse_index_file(filename):
"""Parse index file."""
index = []
for line in open(filename):
index.append(int(line.strip()))
return index
def noise_power_from_snrdb(snrdb):
return 1/10.0 ** (snrdb/10.0)
def add_noise2feat(x,snrdb):
noise_power=sp.linalg.norm(x)*noise_power_from_snrdb(snrdb)
noise = noise_power* np.random.normal(0, 1, (np.shape(x)[1]))
return x+noise
def sample_mask(idx, l):
"""Create mask."""
mask = np.zeros(l)
mask[idx] = 1
return np.array(mask, dtype=np.bool)
def load_data(dataset_str,neighbor_list):
"""
Loads input data from gcn/data directory
ind.dataset_str.x => the feature vectors of the training instances as scipy.sparse.csr.csr_matrix object;
ind.dataset_str.tx => the feature vectors of the test instances as scipy.sparse.csr.csr_matrix object;
ind.dataset_str.allx => the feature vectors of both labeled and unlabeled training instances
(a superset of ind.dataset_str.x) as scipy.sparse.csr.csr_matrix object;
ind.dataset_str.y => the one-hot labels of the labeled training instances as numpy.ndarray object;
ind.dataset_str.ty => the one-hot labels of the test instances as numpy.ndarray object;
ind.dataset_str.ally => the labels for instances in ind.dataset_str.allx as numpy.ndarray object;
ind.dataset_str.graph => a dict in the format {index: [index_of_neighbor_nodes]} as collections.defaultdict
object;
ind.dataset_str.test.index => the indices of test instances in graph, for the inductive setting as list object.
All objects above must be saved using python pickle module.
:param dataset_str: Dataset name
:return: All data input files loaded (as well the training/test data).
"""
names = ['x', 'y', 'tx', 'ty', 'allx', 'ally', 'graph']
objects = []
for i in range(len(names)):
with open("data/ind.{}.{}".format(dataset_str, names[i]), 'rb') as f:
if sys.version_info > (3, 0):
objects.append(pkl.load(f, encoding='latin1'))
else:
objects.append(pkl.load(f))
x, y, tx, ty, allx, ally, graph = tuple(objects)
if (dataset_str=='test') | (dataset_str=='breast_cancer') | (dataset_str=='ionosphere') \
| (dataset_str == 'synthetic'):
with open("data/ind.{}.test.index".format(dataset_str), 'rb') as f:
if sys.version_info > (3, 0):
test_idx_reorder=(pkl.load(f, encoding='latin1'))
else:
test_idx_reorder =(pkl.load(f))
if dataset_str=='test':
adj = nx.adjacency_matrix(graph)
else:
adj =[]
features = sp.vstack((allx)).tolil()
labels = np.vstack((ally))
test_idx_range = np.sort(test_idx_reorder)
else:
test_idx_reorder = parse_index_file("data/ind.{}.test.index".format(dataset_str))
test_idx_range = np.sort(test_idx_reorder)
if dataset_str == 'citeseer':
# Fix citeseer dataset (there are some isolated nodes in the graph)
# Find isolated nodes, add them as zero-vecs into the right position
test_idx_range_full = range(min(test_idx_reorder), max(test_idx_reorder) + 1)
tx_extended = sp.lil_matrix((len(test_idx_range_full), x.shape[1]))
tx_extended[test_idx_range - min(test_idx_range), :] = tx
tx = tx_extended
ty_extended = np.zeros((len(test_idx_range_full), y.shape[1]))
ty_extended[test_idx_range - min(test_idx_range), :] = ty
ty = ty_extended
features = sp.vstack((allx, tx)).tolil()
labels = np.vstack((ally, ty))
adj = nx.adjacency_matrix(nx.from_dict_of_lists(graph))
features[test_idx_reorder, :] = features[test_idx_range, :]
nbr_neighbors=neighbor_list
adj_list=np.append([adj],create_network_nearest_neighbor(features,nbr_neighbors))
labels[test_idx_reorder, :] = labels[test_idx_range, :]
val_size= 100
idx_test = test_idx_range.tolist()
idx_train = range(len(y))
idx_val = range(len(y), len(y)+val_size)
train_mask = sample_mask(idx_train, labels.shape[0])
val_mask = sample_mask(idx_val, labels.shape[0])
test_mask = sample_mask(idx_test, labels.shape[0])
y_train = np.zeros(labels.shape)
y_val = np.zeros(labels.shape)
y_test = | np.zeros(labels.shape) | numpy.zeros |
# usage: mpython lig2protDist.py t1_v178a
import mdtraj as md
import itertools
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import sys
key=sys.argv[1]
print('loading in {} trajectory...'.format(key))
t=md.load(key+'.dcd',top=key+'.psf',stride=100)
print('assigning residue groups...')
top = t.topology
gbiAtoms = top.select("resname GBI1")
group0 = [top.atom(gbiAtoms[0]).residue.index]
### mdtraj starts reading residue 0 as the first one (Phe 88)
# so to get mdtraj values, take (protein resid)-88
group1 = list(range(11,38)) # 99-126
group2 = list(range(46,73)) # 134-161
group3 = list(range(80,104)) # 168-192
group4 = list(range(110,133)) # 198-221
print('calculating contacts from s1 helix...')
pairs1 = list(itertools.product(group0, group1))
dists1, inds1 = md.compute_contacts(t,pairs1) # inds1 is same as pairs1 here
print('calculating contacts from s2 helix...')
pairs2 = list(itertools.product(group0, group2))
dists2, inds2 = md.compute_contacts(t,pairs2)
print('calculating contacts from s3 helix...')
pairs3 = list(itertools.product(group0, group3))
dists3, inds3 = md.compute_contacts(t,pairs3)
print('calculating contacts from s4 helix...')
pairs4 = list(itertools.product(group0, group4))
dists4, inds4 = md.compute_contacts(t,pairs4)
### take relative to reference coordinates
print('doing the same with reference coordinates...')
u=md.load('t1_begin.pdb')
group0a = [u.topology.atom(u.topology.select("resname GBI1")[0]).residue.index]
# assign pairs
pairs1a = list(itertools.product(group0a, group1))
pairs2a = list(itertools.product(group0a, group2))
pairs3a = list(itertools.product(group0a, group3))
pairs4a = list(itertools.product(group0a, group4))
# compute distances
dists1a, inds1a = md.compute_contacts(u,pairs1a)
dists2a, inds2a = md.compute_contacts(u,pairs2a)
dists3a, inds3a = md.compute_contacts(u,pairs3a)
dists4a, inds4a = md.compute_contacts(u,pairs4a)
# take relative difference
rel1 = dists1-dists1a
rel2 = dists2-dists2a
rel3 = dists3-dists3a
rel4 = dists4-dists4a
print('plotting original distances...')
plt.clf()
fig = plt.figure(figsize=(12,24))
plt.subplot(4,1,1)
plt.imshow(dists1.T,cmap='jet_r',aspect='auto',interpolation='none', vmin=0.0, vmax=2.30)
plt.ylabel('residue in S1')
ax = plt.gca();
ax.set_yticks( | np.arange(0, 27, 1) | numpy.arange |
import copy
import numpy as np
import random
from sklearn.utils import shuffle
from ml_utils import ActivationFunctions, LossFunctions
import time
from serializer import Serializer
class NamesToNationalityClassifier:
def __init__(self, possible_labels, alpha=0.0001, hidden_dimensions=500, l2_lambda = 0.02, momentum=0.9, num_epoche=30):
self.serializer = Serializer(possible_labels)
self.alpha = alpha
self.input_dimensions = self.serializer.input_dimensions
self.hidden_dimensions = hidden_dimensions
self.output_dimensions = self.serializer.target_dimensions
self.training_to_validation_ratio = 0.7 # This means 70% of the dataset will be used for training, and 30% is for validation
# Weight Initialization
# We are using the Xavier initialization
# Reference: https://medium.com/usf-msds/deep-learning-best-practices-1-weight-initialization-14e5c0295b94
self.weight_init_type = 'X1'
self.W0 = np.random.randn(self.hidden_dimensions, self.hidden_dimensions) * | np.sqrt(1 / self.hidden_dimensions) | numpy.sqrt |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Calculation of RDMs from unbalanced datasets, i.e. datasets with different
channels or numbers of measurements per dissimilarity
@author: heiko
"""
from collections.abc import Iterable
from copy import deepcopy
import warnings
import numpy as np
from rsatoolbox.rdm.rdms import RDMs
from rsatoolbox.rdm.rdms import concat
from rsatoolbox.util.matrix import row_col_indicator_rdm
def calc_rdm_unbalanced(dataset, method='euclidean', descriptor=None,
noise=None, cv_descriptor=None,
prior_lambda=1, prior_weight=0.1,
weighting='number', enforce_same=False):
"""
calculate a RDM from an input dataset for unbalanced datasets.
Args:
dataset (rsatoolbox.data.dataset.DatasetBase):
The dataset the RDM is computed from
method (String):
a description of the dissimilarity measure (e.g. 'Euclidean')
descriptor (String):
obs_descriptor used to define the rows/columns of the RDM
noise (numpy.ndarray):
dataset.n_channel x dataset.n_channel
precision matrix used to calculate the RDM
used only for Mahalanobis and Crossnobis estimators
defaults to an identity matrix, i.e. euclidean distance
Returns:
rsatoolbox.rdm.rdms.RDMs: RDMs object with the one RDM
"""
if descriptor is None:
dataset = deepcopy(dataset)
dataset.obs_descriptors['index'] = np.arange(dataset.n_obs)
descriptor = 'index'
if isinstance(dataset, Iterable):
rdms = []
for i_dat, dat in enumerate(dataset):
if noise is None:
rdms.append(calc_rdm_unbalanced(
dat, method=method, descriptor=descriptor,
cv_descriptor=cv_descriptor,
prior_lambda=prior_lambda, prior_weight=prior_weight,
weighting=weighting, enforce_same=enforce_same))
elif isinstance(noise, np.ndarray) and noise.ndim == 2:
rdms.append(calc_rdm_unbalanced(
dat, method=method,
descriptor=descriptor,
noise=noise,
cv_descriptor=cv_descriptor,
prior_lambda=prior_lambda, prior_weight=prior_weight,
weighting=weighting, enforce_same=enforce_same))
elif isinstance(noise, Iterable):
rdms.append(calc_rdm_unbalanced(
dat, method=method,
descriptor=descriptor,
noise=noise[i_dat],
cv_descriptor=cv_descriptor,
prior_lambda=prior_lambda, prior_weight=prior_weight,
weighting=weighting, enforce_same=enforce_same))
rdm = concat(rdms)
else:
rdm = []
weights = []
self_sim = []
if method == 'crossnobis' or method == 'poisson_cv':
if cv_descriptor is None:
if 'index' not in dataset.obs_descriptors.keys():
dataset.obs_descriptors['index'] = np.arange(dataset.n_obs)
cv_descriptor = 'index'
warnings.warn('cv_descriptor not set, using index for now.'
+ 'This will only remove self-similarities.'
+ 'Effectively this assumes independent trials')
unique_cond = set(dataset.obs_descriptors[descriptor])
for i, i_des in enumerate(unique_cond):
v, _ = calc_one_similarity(
dataset, descriptor, i_des, i_des, method=method,
noise=noise, weighting=weighting,
prior_lambda=prior_lambda,
prior_weight=prior_weight,
cv_descriptor=cv_descriptor)
self_sim.append(v)
for j, j_des in enumerate(unique_cond):
if j > i:
v, w = calc_one_similarity(
dataset, descriptor, i_des, j_des, method=method,
noise=noise, weighting=weighting,
prior_lambda=prior_lambda,
prior_weight=prior_weight,
cv_descriptor=cv_descriptor)
rdm.append(v)
weights.append(w)
row_idx, col_idx = row_col_indicator_rdm(len(unique_cond))
self_sim = np.array(self_sim)
rdm = np.array(rdm)
rdm = row_idx @ self_sim + col_idx @ self_sim - 2 * rdm
rdm = RDMs(
dissimilarities=np.array([rdm]),
dissimilarity_measure=method,
rdm_descriptors=deepcopy(dataset.descriptors))
rdm.pattern_descriptors[descriptor] = list(unique_cond)
rdm.rdm_descriptors['weights'] = [weights]
return rdm
def _check_noise(noise, n_channel):
"""
checks that a noise pattern is a matrix with correct dimension
n_channel x n_channel
Args:
noise: noise input to be checked
Returns:
noise(np.ndarray): n_channel x n_channel noise precision matrix
"""
if noise is None:
pass
elif isinstance(noise, np.ndarray) and noise.ndim == 2:
assert np.all(noise.shape == (n_channel, n_channel))
elif isinstance(noise, Iterable):
for i, _ in enumerate(noise):
noise[i] = _check_noise(noise[i], n_channel)
elif isinstance(noise, dict):
for key in noise.keys():
noise[key] = _check_noise(noise[key], n_channel)
else:
raise ValueError('noise(s) must have shape n_channel x n_channel')
return noise
def calc_one_similarity_small(
dataset, descriptor, i_des, j_des, method='euclidean',
noise=None, weighting='number',
prior_lambda=1, prior_weight=0.1):
"""
finds all pairs of vectors to be compared and calculates one similarity
Args:
dataset (rsatoolbox.data.DatasetBase):
dataset to extract from
descriptor (String):
key for the descriptor defining the conditions
i_des : descriptor value
the value of the first condition
j_des : descriptor value
the value of the second condition
noise : numpy.ndarray (n_channels x n_channels), optional
the covariance or precision matrix over channels
necessary for calculation of mahalanobis distances
Returns:
(np.ndarray, np.ndarray) : (value, weight)
value are the dissimilarities
weight is the weight for the samples
"""
data_i = dataset.subset_obs(descriptor, i_des)
data_j = dataset.subset_obs(descriptor, j_des)
values = []
weights = []
for vec_i in data_i.measurements:
for vec_j in data_j.measurements:
finite = np.isfinite(vec_i) & np.isfinite(vec_j)
if noise is not None:
noise_small = noise[finite][:, finite]
else:
noise_small = None
if np.any(finite):
if weighting == 'number':
weight = np.sum(finite)
elif weighting == 'equal':
weight = 1
sim = similarity(
vec_i[finite], vec_j[finite], method,
prior_lambda=prior_lambda, prior_weight=prior_weight,
noise=noise_small) \
/ np.sum(finite)
values.append(sim)
weights.append(weight)
weights = np.array(weights)
values = np.array(values)
if np.sum(weights) > 0:
weight = np.sum(weights)
value = np.sum(weights * values) / weight
else:
value = np.nan
weight = 0
return value, weight
def calc_one_similarity(dataset, descriptor, i_des, j_des,
method='euclidean',
noise=None, weighting='number',
prior_lambda=1, prior_weight=0.1,
cv_descriptor=None):
"""
finds all pairs of vectors to be compared and calculates one distance
Args:
dataset (rsatoolbox.data.DatasetBase):
dataset to extract from
descriptor (String):
key for the descriptor defining the conditions
i_des : descriptor value
the value of the first condition
j_des : descriptor value
the value of the second condition
noise : numpy.ndarray (n_channels x n_channels), optional
the covariance or precision matrix over channels
necessary for calculation of mahalanobis distances
Returns:
(np.ndarray, np.ndarray) : (value, weight)
value is the dissimilarity
weight is the weight of the samples
"""
data_i = dataset.subset_obs(descriptor, i_des)
data_j = dataset.subset_obs(descriptor, j_des)
values = []
weights = []
for i in range(data_i.n_obs):
for j in range(data_j.n_obs):
if cv_descriptor is None:
accepted = True
else:
if (data_i.obs_descriptors[cv_descriptor][i]
== data_j.obs_descriptors[cv_descriptor][j]):
accepted = False
else:
accepted = True
if accepted:
vec_i = data_i.measurements[i]
vec_j = data_j.measurements[j]
finite = np.isfinite(vec_i) & np.isfinite(vec_j)
if np.any(finite):
if weighting == 'number':
weight = np.sum(finite)
elif weighting == 'equal':
weight = 1
sim = similarity(
vec_i[finite], vec_j[finite],
method,
noise=noise,
prior_lambda=prior_lambda,
prior_weight=prior_weight) \
/ np.sum(finite)
values.append(sim)
weights.append(weight)
weights = np.array(weights)
values = np.array(values)
if np.sum(weights) > 0:
weight = np.sum(weights)
value = np.sum(weights * values) / weight
else:
value = np.nan
weight = 0
return value, weight
def calc_one_dissimilarity_cv(dataset, descriptor, i_des, j_des,
method='euclidean',
noise=None, weighting='number',
prior_lambda=1, prior_weight=0.1,
cv_descriptor=None, enforce_same=False):
"""
finds all pairs of vectors to be compared and calculates one distance
Args:
dataset (rsatoolbox.data.DatasetBase):
dataset to extract from
descriptor (String):
key for the descriptor defining the conditions
i_des : descriptor value
the value of the first condition
j_des : descriptor value
the value of the second condition
noise : numpy.ndarray (n_channels x n_channels), optional
the covariance or precision matrix over channels
necessary for calculation of mahalanobis distances
Returns:
(np.ndarray, np.ndarray) : (value, weight)
value is the dissimilarity
weight is the weight of the samples
"""
data_i = dataset.subset_obs(descriptor, i_des)
data_j = dataset.subset_obs(descriptor, j_des)
values = []
weights = []
for i in range(data_i.n_obs):
for j in range(data_j.n_obs):
for k in range(i + 1, data_i.n_obs):
for l in range(j + 1, data_j.n_obs):
if cv_descriptor is None:
accepted = True
else:
if (data_i.obs_descriptors[cv_descriptor][i]
== data_i.obs_descriptors[cv_descriptor][k]):
accepted = False
elif (data_j.obs_descriptors[cv_descriptor][j]
== data_j.obs_descriptors[cv_descriptor][l]):
accepted = False
elif (data_i.obs_descriptors[cv_descriptor][i]
== data_j.obs_descriptors[cv_descriptor][l]):
accepted = False
elif (data_j.obs_descriptors[cv_descriptor][j]
== data_i.obs_descriptors[cv_descriptor][k]):
accepted = False
else:
accepted = True
if enforce_same:
if (data_i.obs_descriptors[cv_descriptor][i]
!= data_j.obs_descriptors[cv_descriptor][j]):
accepted = False
if (data_i.obs_descriptors[cv_descriptor][k]
!= data_j.obs_descriptors[cv_descriptor][l]):
accepted = False
if accepted:
vec_i = data_i.measurements[i]
vec_j = data_j.measurements[j]
vec_k = data_i.measurements[k]
vec_l = data_j.measurements[l]
finite = np.isfinite(vec_i) & np.isfinite(vec_j) \
& np.isfinite(vec_k) & np.isfinite(vec_l)
if np.any(finite):
if weighting == 'number':
weight = np.sum(finite)
elif weighting == 'equal':
weight = 1
dissim = dissimilarity_cv(
vec_i[finite], vec_j[finite],
vec_k[finite], vec_l[finite],
method,
noise=noise,
prior_lambda=prior_lambda,
prior_weight=prior_weight) \
/ np.sum(finite)
values.append(dissim)
weights.append(weight)
weights = | np.array(weights) | numpy.array |
import cv2 as cv
import numpy as np
import os
import math
def softmax(x):
return np.exp(x) / np.sum(np.exp(x), axis=0)
def load_img(dir): ## 载入图片
img_name = os.listdir(dir)
img_num = len(img_name)
imgs = []
for i in range(img_num):
img = cv.imread(dir+'/'+img_name[i], 1)
img = cv.normalize(img, None, 0, 1, cv.NORM_MINMAX, cv.CV_32F)
imgs.append(img)
return imgs
def cal_saturation(src): ## 计算饱和度
return np.std(src, axis=2)
def cal_contrast(src): ## 计算对比度
gray = cv.cvtColor(src, cv.COLOR_BGR2GRAY)
ker = np.float32([[0, 1, 0], [1, -4, 1], [0, 1, 0]])
lap = abs(cv.filter2D(gray, -1, kernel=ker))
return lap
def cal_wellexposedness(src, sigma=0.2): ## 计算曝光度
w = np.exp(-0.5 * (src - 0.5)**2 / sigma**2)
w = | np.prod(w, axis=2) | numpy.prod |
from __future__ import division
import itertools
import numpy as np
import sys
from collections import namedtuple
from numba import unittest_support as unittest
from numba import njit, typeof, types, typing, typeof, ir, utils, bytecode
from .support import TestCase, tag
from numba.array_analysis import EquivSet, ArrayAnalysis
from numba.compiler import Pipeline, Flags, _PipelineManager
from numba.targets import cpu, registry
from numba.numpy_support import version as numpy_version
from numba.ir_utils import remove_dead
# for parallel tests, marking that Windows with Python 2.7 is not supported
_windows_py27 = (sys.platform.startswith('win32') and
sys.version_info[:2] == (2, 7))
_32bit = sys.maxsize <= 2 ** 32
_reason = 'parfors not supported'
skip_unsupported = unittest.skipIf(_32bit or _windows_py27, _reason)
class TestEquivSet(TestCase):
"""
Test array_analysis.EquivSet.
"""
@tag('important')
def test_insert_equiv(self):
s1 = EquivSet()
s1.insert_equiv('a', 'b')
self.assertTrue(s1.is_equiv('a', 'b'))
self.assertTrue(s1.is_equiv('b', 'a'))
s1.insert_equiv('c', 'd')
self.assertTrue(s1.is_equiv('c', 'd'))
self.assertFalse(s1.is_equiv('c', 'a'))
s1.insert_equiv('a', 'c')
self.assertTrue(s1.is_equiv('a', 'b', 'c', 'd'))
self.assertFalse(s1.is_equiv('a', 'e'))
@tag('important')
def test_intersect(self):
s1 = EquivSet()
s2 = EquivSet()
r = s1.intersect(s2)
self.assertTrue(r.is_empty())
s1.insert_equiv('a', 'b')
r = s1.intersect(s2)
self.assertTrue(r.is_empty())
s2.insert_equiv('b', 'c')
r = s1.intersect(s2)
self.assertTrue(r.is_empty())
s2.insert_equiv('d', 'a')
r = s1.intersect(s2)
self.assertTrue(r.is_empty())
s1.insert_equiv('a', 'e')
s2.insert_equiv('c', 'd')
r = s1.intersect(s2)
self.assertTrue(r.is_equiv('a', 'b'))
self.assertFalse(r.is_equiv('a', 'e'))
self.assertFalse(r.is_equiv('c', 'd'))
class ArrayAnalysisTester(Pipeline):
@classmethod
def mk_pipeline(cls, args, return_type=None, flags=None, locals={},
library=None, typing_context=None, target_context=None):
if not flags:
flags = Flags()
flags.nrt = True
if typing_context is None:
typing_context = registry.cpu_target.typing_context
if target_context is None:
target_context = registry.cpu_target.target_context
return cls(typing_context, target_context, library, args, return_type,
flags, locals)
def compile_to_ir(self, func, test_idempotence=None):
"""
Populate and run compiler pipeline
"""
self.func_id = bytecode.FunctionIdentity.from_function(func)
try:
bc = self.extract_bytecode(self.func_id)
except BaseException as e:
raise e
self.bc = bc
self.lifted = ()
self.lifted_from = None
pm = _PipelineManager()
pm.create_pipeline("nopython")
if self.func_ir is None:
pm.add_stage(self.stage_analyze_bytecode, "analyzing bytecode")
pm.add_stage(self.stage_process_ir, "processing IR")
if not self.flags.no_rewrites:
if self.status.can_fallback:
pm.add_stage(
self.stage_preserve_ir, "preserve IR for fallback")
pm.add_stage(self.stage_generic_rewrites, "nopython rewrites")
pm.add_stage(
self.stage_inline_pass, "inline calls to locally defined closures")
pm.add_stage(self.stage_nopython_frontend, "nopython frontend")
pm.add_stage(self.stage_annotate_type, "annotate type")
if not self.flags.no_rewrites:
pm.add_stage(self.stage_nopython_rewrites, "nopython rewrites")
func_ir_copies = []
def stage_array_analysis():
self.array_analysis = ArrayAnalysis(self.typingctx, self.func_ir,
self.type_annotation.typemap,
self.type_annotation.calltypes)
self.array_analysis.run(self.func_ir.blocks)
func_ir_copies.append(self.func_ir.copy())
if test_idempotence and len(func_ir_copies) > 1:
test_idempotence(func_ir_copies)
pm.add_stage(stage_array_analysis, "analyze array equivalences")
if test_idempotence:
# Do another pass of array analysis to test idempontence
pm.add_stage(stage_array_analysis, "analyze array equivalences")
pm.finalize()
res = pm.run(self.status)
return self.array_analysis
class TestArrayAnalysis(TestCase):
def compare_ir(self, ir_list):
outputs = []
for func_ir in ir_list:
remove_dead(func_ir.blocks, func_ir.arg_names, func_ir)
output = utils.StringIO()
func_ir.dump(file=output)
outputs.append(output.getvalue())
self.assertTrue(len(set(outputs)) == 1) # assert all outputs are equal
def _compile_and_test(self, fn, arg_tys, asserts=[], equivs=[], idempotent=True):
"""
Compile the given function and get its IR.
"""
test_pipeline = ArrayAnalysisTester.mk_pipeline(arg_tys)
test_idempotence = self.compare_ir if idempotent else lambda x:()
analysis = test_pipeline.compile_to_ir(fn, test_idempotence)
if equivs:
for func in equivs:
# only test the equiv_set of the first block
func(analysis.equiv_sets[0])
if asserts == None:
self.assertTrue(self._has_no_assertcall(analysis.func_ir))
else:
for func in asserts:
func(analysis.func_ir, analysis.typemap)
def _has_assertcall(self, func_ir, typemap, args):
msg = "Sizes of {} do not match".format(', '.join(args))
for label, block in func_ir.blocks.items():
for expr in block.find_exprs(op='call'):
fn = func_ir.get_definition(expr.func.name)
if isinstance(fn, ir.Global) and fn.name == 'assert_equiv':
typ = typemap[expr.args[0].name]
if typ.value.startswith(msg):
return True
return False
def _has_shapecall(self, func_ir, x):
for label, block in func_ir.blocks.items():
for expr in block.find_exprs(op='getattr'):
if expr.attr == 'shape':
y = func_ir.get_definition(expr.value, lhs_only=True)
z = func_ir.get_definition(x, lhs_only=True)
y = y.name if isinstance(y, ir.Var) else y
z = z.name if isinstance(z, ir.Var) else z
if y == z:
return True
return False
def _has_no_assertcall(self, func_ir):
for label, block in func_ir.blocks.items():
for expr in block.find_exprs(op='call'):
fn = func_ir.get_definition(expr.func.name)
if isinstance(fn, ir.Global) and fn.name == 'assert_equiv':
return False
return True
def with_assert(self, *args):
return lambda func_ir, typemap: self.assertTrue(
self._has_assertcall(func_ir, typemap, args))
def without_assert(self, *args):
return lambda func_ir, typemap: self.assertFalse(
self._has_assertcall(func_ir, typemap, args))
def with_equiv(self, *args):
def check(equiv_set):
n = len(args)
for i in range(n - 1):
if not equiv_set.is_equiv(args[i], args[n - 1]):
return False
return True
return lambda equiv_set: self.assertTrue(check(equiv_set))
def without_equiv(self, *args):
def check(equiv_set):
n = len(args)
for i in range(n - 1):
if equiv_set.is_equiv(args[i], args[n - 1]):
return False
return True
return lambda equiv_set: self.assertTrue(check(equiv_set))
def with_shapecall(self, x):
return lambda func_ir, s: self.assertTrue(self._has_shapecall(func_ir, x))
def without_shapecall(self, x):
return lambda func_ir, s: self.assertFalse(self._has_shapecall(func_ir, x))
def test_base_cases(self):
def test_0():
a = np.zeros(0)
b = np.zeros(1)
m = 0
n = 1
c = np.zeros((m, n))
return
self._compile_and_test(test_0, (),
equivs=[self.with_equiv('a', (0,)),
self.with_equiv('b', (1,)),
self.with_equiv('c', (0, 1))])
def test_1(n):
a = np.zeros(n)
b = np.zeros(n)
return a + b
self._compile_and_test(test_1, (types.intp,), asserts=None)
def test_2(m, n):
a = np.zeros(n)
b = np.zeros(m)
return a + b
self._compile_and_test(test_2, (types.intp, types.intp),
asserts=[self.with_assert('a', 'b')])
def test_3(n):
a = np.zeros(n)
return a + n
self._compile_and_test(test_3, (types.intp,), asserts=None)
def test_4(n):
a = np.zeros(n)
b = a + 1
c = a + 2
return a + c
self._compile_and_test(test_4, (types.intp,), asserts=None)
def test_5(n):
a = np.zeros((n, n))
m = n
b = np.zeros((m, n))
return a + b
self._compile_and_test(test_5, (types.intp,), asserts=None)
def test_6(m, n):
a = np.zeros(n)
b = np.zeros(m)
d = a + b
e = a - b
return d + e
self._compile_and_test(test_6, (types.intp, types.intp),
asserts=[self.with_assert('a', 'b'),
self.without_assert('d', 'e')])
def test_7(m, n):
a = np.zeros(n)
b = np.zeros(m)
if m == 10:
d = a + b
else:
d = a - b
return d + a
self._compile_and_test(test_7, (types.intp, types.intp),
asserts=[self.with_assert('a', 'b'),
self.without_assert('d', 'a')])
def test_8(m, n):
a = np.zeros(n)
b = np.zeros(m)
if m == 10:
d = b + a
else:
d = a + a
return b + d
self._compile_and_test(test_8, (types.intp, types.intp),
asserts=[self.with_assert('b', 'a'),
self.with_assert('b', 'd')])
def test_9(m):
A = np.ones(m)
s = 0
while m < 2:
m += 1
B = np.ones(m)
s += np.sum(A + B)
return s
self._compile_and_test(test_9, (types.intp,),
asserts=[self.with_assert('A', 'B')])
def test_10(m, n):
p = m - 1
q = n + 1
r = q + 1
A = np.zeros(p)
B = np.zeros(q)
C = np.zeros(r)
D = np.zeros(m)
s = np.sum(A + B)
t = np.sum(C + D)
return s + t
self._compile_and_test(test_10, (types.intp,types.intp,),
asserts=[self.with_assert('A', 'B'),
self.without_assert('C', 'D')])
T = namedtuple("T", ['a','b'])
def test_namedtuple(n):
r = T(n, n)
return r[0]
self._compile_and_test(test_namedtuple, (types.intp,),
equivs=[self.with_equiv('r', ('n', 'n'))],)
def test_shape(A):
(m, n) = A.shape
B = np.ones((m, n))
return A + B
self._compile_and_test(test_shape, (types.Array(types.intp, 2, 'C'),),
asserts=None)
def test_cond(l, m, n):
A = np.ones(l)
B = np.ones(m)
C = np.ones(n)
if l == m:
r = np.sum(A + B)
else:
r = 0
if m != n:
s = 0
else:
s = np.sum(B + C)
t = 0
if l == m:
if m == n:
t = np.sum(A + B + C)
return r + s + t
self._compile_and_test(test_cond, (types.intp, types.intp, types.intp),
asserts=None)
def test_assert_1(m, n):
assert(m == n)
A = np.ones(m)
B = np.ones(n)
return np.sum(A + B)
self._compile_and_test(test_assert_1, (types.intp, types.intp),
asserts=None)
def test_assert_2(A, B):
assert(A.shape == B.shape)
return np.sum(A + B)
self._compile_and_test(test_assert_2, (types.Array(types.intp, 1, 'C'),
types.Array(types.intp, 1, 'C'),),
asserts=None)
self._compile_and_test(test_assert_2, (types.Array(types.intp, 2, 'C'),
types.Array(types.intp, 2, 'C'),),
asserts=None)
# expected failure
with self.assertRaises(AssertionError) as raises:
self._compile_and_test(test_assert_2, (types.Array(types.intp, 1, 'C'),
types.Array(types.intp, 2, 'C'),),
asserts=None)
msg = "Dimension mismatch"
self.assertIn(msg, str(raises.exception))
def test_stencilcall(self):
from numba import stencil
@stencil
def kernel_1(a):
return 0.25 * (a[0,1] + a[1,0] + a[0,-1] + a[-1,0])
def test_1(n):
a = np.ones((n,n))
b = kernel_1(a)
return a + b
self._compile_and_test(test_1, (types.intp,),
equivs=[self.with_equiv('a', 'b')],
asserts=[self.without_assert('a', 'b')])
def test_2(n):
a = np.ones((n,n))
b = np.ones((n+1,n+1))
kernel_1(a, out=b)
return a
self._compile_and_test(test_2, (types.intp,),
equivs=[self.without_equiv('a', 'b')])
@stencil(standard_indexing=('c',))
def kernel_2(a, b, c):
return a[0,1,0] + b[0,-1,0] + c[0]
def test_3(n):
a = np.arange(64).reshape(4,8,2)
b = np.arange(64).reshape(n,8,2)
u = np.zeros(1)
v = kernel_2(a, b, u)
return v
# standard indexed arrays are not considered in size equivalence
self._compile_and_test(test_3, (types.intp,),
equivs=[self.with_equiv('a', 'b', 'v'),
self.without_equiv('a', 'u')],
asserts=[self.with_assert('a', 'b')])
def test_slice(self):
def test_1(m, n):
A = np.zeros(m)
B = np.zeros(n)
s = np.sum(A + B)
C = A[1:m-1]
D = B[1:n-1]
t = np.sum(C + D)
return s + t
self._compile_and_test(test_1, (types.intp,types.intp,),
asserts=[self.with_assert('A', 'B'),
self.without_assert('C', 'D')],
idempotent=False)
def test_2(m):
A = np.zeros(m)
B = A[0:m-3]
C = A[1:m-2]
D = A[2:m-1]
E = B + C
return D + E
self._compile_and_test(test_2, (types.intp,),
asserts=[self.without_assert('B', 'C'),
self.without_assert('D', 'E')],
idempotent=False)
def test_3(m):
A = np.zeros((m,m))
B = A[0:m-2,0:m-2]
C = A[1:m-1,1:m-1]
E = B + C
return E
self._compile_and_test(test_3, (types.intp,),
asserts=[self.without_assert('B', 'C')],
idempotent=False)
def test_4(m):
A = np.zeros((m,m))
B = A[0:m-2,:]
C = A[1:m-1,:]
E = B + C
return E
self._compile_and_test(test_4, (types.intp,),
asserts=[self.without_assert('B', 'C')],
idempotent=False)
def test_5(m,n):
A = np.zeros(m)
B = np.zeros(m)
B[0:m-2] = A[1:m-1]
C = np.zeros(n)
D = A[1:m-1]
C[0:n-2] = D
# B and C are not necessarily of the same size because we can't
# derive m == n from (m-2) % m == (n-2) % n
return B + C
self._compile_and_test(test_5, (types.intp,types.intp),
asserts=[self.without_assert('B', 'A'),
self.with_assert('C', 'D'),
self.with_assert('B', 'C')],
idempotent=False)
def test_6(m):
A = np.zeros((m,m))
B = A[0:m-2,:-1]
C = A[1:m-1,:-1]
E = B + C
return E
self._compile_and_test(test_6, (types.intp,),
asserts=[self.without_assert('B', 'C')],
idempotent=False)
def test_7(m):
A = np.zeros((m,m))
B = A[0:m-2,-3:-1]
C = A[1:m-1,-4:-2]
E = B + C
return E
self._compile_and_test(test_7, (types.intp,),
asserts=[self.without_assert('B', 'C')],
idempotent=False)
def test_8(m):
A = np.zeros((m,m))
B = A[:m-2,0:]
C = A[1:-1,:]
E = B + C
return E
self._compile_and_test(test_8, (types.intp,),
asserts=[self.without_assert('B', 'C')],
idempotent=False)
def test_numpy_calls(self):
def test_zeros(n):
a = np.zeros(n)
b = np.zeros((n, n))
c = np.zeros(shape=(n, n))
self._compile_and_test(test_zeros, (types.intp,),
equivs=[self.with_equiv('a', 'n'),
self.with_equiv('b', ('n', 'n')),
self.with_equiv('b', 'c')])
def test_0d_array(n):
a = np.array(1)
b = np.ones(2)
return a + b
self._compile_and_test(test_0d_array, (types.intp,),
equivs=[self.without_equiv('a', 'b')],
asserts=[self.without_shapecall('a')])
def test_ones(n):
a = np.ones(n)
b = np.ones((n, n))
c = np.ones(shape=(n, n))
self._compile_and_test(test_ones, (types.intp,),
equivs=[self.with_equiv('a', 'n'),
self.with_equiv('b', ('n', 'n')),
self.with_equiv('b', 'c')])
def test_empty(n):
a = np.empty(n)
b = np.empty((n, n))
c = np.empty(shape=(n, n))
self._compile_and_test(test_empty, (types.intp,),
equivs=[self.with_equiv('a', 'n'),
self.with_equiv('b', ('n', 'n')),
self.with_equiv('b', 'c')])
def test_eye(n):
a = np.eye(n)
b = np.eye(N=n)
c = np.eye(N=n, M=n)
d = np.eye(N=n, M=n + 1)
self._compile_and_test(test_eye, (types.intp,),
equivs=[self.with_equiv('a', ('n', 'n')),
self.with_equiv('b', ('n', 'n')),
self.with_equiv('b', 'c'),
self.without_equiv('b', 'd')])
def test_identity(n):
a = np.identity(n)
self._compile_and_test(test_identity, (types.intp,),
equivs=[self.with_equiv('a', ('n', 'n'))])
def test_diag(n):
a = np.identity(n)
b = np.diag(a)
c = np.diag(b)
d = np.diag(a, k=1)
self._compile_and_test(test_diag, (types.intp,),
equivs=[self.with_equiv('b', ('n',)),
self.with_equiv('c', ('n', 'n'))],
asserts=[self.with_shapecall('d'),
self.without_shapecall('c')])
def test_array_like(a):
b = np.empty_like(a)
c = np.zeros_like(a)
d = np.ones_like(a)
e = np.full_like(a, 1)
f = np.asfortranarray(a)
self._compile_and_test(test_array_like, (types.Array(types.intp, 2, 'C'),),
equivs=[
self.with_equiv('a', 'b', 'd', 'e', 'f')],
asserts=[self.with_shapecall('a'),
self.without_shapecall('b')])
def test_reshape(n):
a = np.ones(n * n)
b = a.reshape((n, n))
return a.sum() + b.sum()
self._compile_and_test(test_reshape, (types.intp,),
equivs=[self.with_equiv('b', ('n', 'n'))],
asserts=[self.without_shapecall('b')])
def test_transpose(m, n):
a = np.ones((m, n))
b = a.T
c = a.transpose()
# Numba njit cannot compile explicit transpose call!
# c = np.transpose(b)
self._compile_and_test(test_transpose, (types.intp, types.intp),
equivs=[self.with_equiv('a', ('m', 'n')),
self.with_equiv('b', ('n', 'm')),
self.with_equiv('c', ('n', 'm'))])
def test_transpose_3d(m, n, k):
a = np.ones((m, n, k))
b = a.T
c = a.transpose()
d = a.transpose(2,0,1)
dt = a.transpose((2,0,1))
e = a.transpose(0,2,1)
et = a.transpose((0,2,1))
# Numba njit cannot compile explicit transpose call!
# c = np.transpose(b)
self._compile_and_test(test_transpose_3d, (types.intp, types.intp, types.intp),
equivs=[self.with_equiv('a', ('m', 'n', 'k')),
self.with_equiv('b', ('k', 'n', 'm')),
self.with_equiv('c', ('k', 'n', 'm')),
self.with_equiv('d', ('k', 'm', 'n')),
self.with_equiv('dt', ('k', 'm', 'n')),
self.with_equiv('e', ('m', 'k', 'n')),
self.with_equiv('et', ('m', 'k', 'n'))])
def test_random(n):
a0 = np.random.rand(n)
a1 = np.random.rand(n, n)
b0 = np.random.randn(n)
b1 = np.random.randn(n, n)
c0 = np.random.ranf(n)
c1 = np.random.ranf((n, n))
c2 = np.random.ranf(size=(n, n))
d0 = np.random.random_sample(n)
d1 = np.random.random_sample((n, n))
d2 = np.random.random_sample(size=(n, n))
e0 = np.random.sample(n)
e1 = np.random.sample((n, n))
e2 = np.random.sample(size=(n, n))
f0 = np.random.random(n)
f1 = np.random.random((n, n))
f2 = np.random.random(size=(n, n))
g0 = np.random.standard_normal(n)
g1 = np.random.standard_normal((n, n))
g2 = np.random.standard_normal(size=(n, n))
h0 = np.random.chisquare(10, n)
h1 = np.random.chisquare(10, (n, n))
h2 = np.random.chisquare(10, size=(n, n))
i0 = np.random.weibull(10, n)
i1 = np.random.weibull(10, (n, n))
i2 = np.random.weibull(10, size=(n, n))
j0 = np.random.power(10, n)
j1 = np.random.power(10, (n, n))
j2 = np.random.power(10, size=(n, n))
k0 = np.random.geometric(0.1, n)
k1 = np.random.geometric(0.1, (n, n))
k2 = np.random.geometric(0.1, size=(n, n))
l0 = np.random.exponential(10, n)
l1 = np.random.exponential(10, (n, n))
l2 = np.random.exponential(10, size=(n, n))
m0 = np.random.poisson(10, n)
m1 = np.random.poisson(10, (n, n))
m2 = np.random.poisson(10, size=(n, n))
n0 = | np.random.rayleigh(10, n) | numpy.random.rayleigh |
"""
Retrain the YOLO model for your own dataset.
"""
from PIL import ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
import numpy as np
import keras.backend as K
from keras.layers import Input, Lambda
from keras.models import Model
from keras.optimizers import Adam
from keras.callbacks import TensorBoard, ModelCheckpoint, ReduceLROnPlateau, EarlyStopping
from yolo3.model import preprocess_true_boxes, yolo_body, yolo_loss
from yolo3.utils import get_random_data
class YoloModel():
def __init__(self):
self.JPGPaths = 'D://VOCdevkit/VOC2020/JPEGImages/train.jpg'
self.TXTPaths = 'D://VOCdevkit/VOC2020/ImageSets/Main/train.txt'
self.XMLPaths = 'D://VOCdevkit/VOC2020/Annotations/%s.xml'
self.classes = ["大巴车", "公交车", "绿色渣土车", "红色渣土车", "灰色渣土车", "蓝色渣土车", "危险品罐车", "环卫车", "厢式货车", "水泥搅拌车", "工程车"]
self.annotation_path = '2020_train.txt'
self.log_dir = 'logs/000/'
self.classes_path = 'dabache,gongjiaoche,greenzhatuche,redzhatuche,grayzhatuche,bluezhatuche,weixianpinche,huanweiche,xiangshihuoche,shuinijiaobanche,gongchengche'
self.anchors_path = "10,13, 16,30, 33,23, 30,61, 62,45, 59,119, 116,90, 156,198, 373,326"
self.weights_path = 'logs/000/ep115-loss10.940-val_loss11.115.h5' # 放置最新的 h5 文件
self.get_annotation() # 转换 文件路径
self.anchors = self.get_anchors()
self.num_anchors = len(self.anchors)
self.class_names = self.get_classes()
self.num_classes = len(self.class_names)
self.input_shape = (416, 416)
self.val_split = 0.1 # 测试集训练集比例
self.batch_size = 2 # note that more GPU memory is required after unfreezing the body
def _main(self):
model = self.create_model(freeze_body=2, weights_path=self.weights_path) # make sure you know what you freeze
logging = TensorBoard(log_dir=self.log_dir)
checkpoint = ModelCheckpoint(self.log_dir + 'ep{epoch:03d}-loss{loss:.3f}-val_loss{val_loss:.3f}.h5',
monitor='val_loss', save_weights_only=True, save_best_only=True, period=1)
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=3, verbose=1)
early_stopping = EarlyStopping(monitor='val_loss', min_delta=0, patience=10, verbose=1)
with open(self.annotation_path) as f:
lines = f.readlines()
np.random.seed(10101)
np.random.shuffle(lines)
np.random.seed(None)
num_val = int(len(lines)*self.val_split)
num_train = len(lines) - num_val
if True:
for i in range(len(model.layers)):
model.layers[i].trainable = True
model.compile(optimizer=Adam(lr=1e-4), loss={'yolo_loss': lambda y_true, y_pred: y_pred}) # recompile to apply the change
print('Unfreeze all of the layers.')
print('Train on {} samples, val on {} samples, with batch size {}.'.format(num_train, num_val, self.batch_size))
model.fit_generator(self.data_generator_wrapper(lines[:num_train], self.batch_size, self.input_shape, self.anchors, self.num_classes),
steps_per_epoch=max(1, num_train//self.batch_size),
validation_data=self.data_generator_wrapper(lines[num_train:], self.batch_size, self.input_shape, self.anchors, self.num_classes),
validation_steps=max(1, num_val//self.batch_size),
epochs=200,
initial_epoch=100,
callbacks=[logging, checkpoint, reduce_lr, early_stopping])
model.save_weights(self.log_dir + 'trained_weights_final.h5')
# Further training if needed.
def get_classes(self):
'''loads the classes'''
class_names = self.classes_path
class_names =[str(x) for x in class_names.split(',')]
return class_names
def get_anchors(self):
'''loads the anchors from a file'''
anchors = self.anchors_path #
anchors = [float(x) for x in anchors.split(',')]
return np.array(anchors).reshape(-1, 2)
def create_model(self, load_pretrained=True, freeze_body=2, weights_path='model_data/yolo_weights.h5'):
'''create the training model'''
K.clear_session() # get a new session
image_input = Input(shape=(None, None, 3))
h, w = self.input_shape # multiple of 32, hw
y_true = [Input(shape=(h//{0:32, 1:16, 2:8}[l], w//{0:32, 1:16, 2:8}[l], \
self.num_anchors//3, self.num_classes+5)) for l in range(3)]
model_body = yolo_body(image_input, self.num_anchors//3, self.num_classes)
print('Create YOLOv3 model with {} anchors and {} classes.'.format(self.num_anchors, self.num_classes))
if load_pretrained:
model_body.load_weights(weights_path, by_name=True, skip_mismatch=True)
print('Load weights {}.'.format(weights_path))
if freeze_body in [1, 2]:
# Freeze darknet53 body or freeze all but 3 output layers.
num = (185, len(model_body.layers)-3)[freeze_body-1]
for i in range(num): model_body.layers[i].trainable = False
print('Freeze the first {} layers of total {} layers.'.format(num, len(model_body.layers)))
model_loss = Lambda(yolo_loss, output_shape=(1,), name='yolo_loss',
arguments={'anchors': self.anchors, 'num_classes': self.num_classes, 'ignore_thresh': 0.5})(
[*model_body.output, *y_true])
model = Model([model_body.input, *y_true], model_loss)
return model
def data_generator(self, annotation_lines, batch_size, input_shape, anchors, num_classes):
'''data generator for fit_generator'''
n = len(annotation_lines)
i = 0
while True:
image_data = []
box_data = []
for b in range(batch_size):
if i==0:
np.random.shuffle(annotation_lines)
image, box = get_random_data(annotation_lines[i], input_shape, random=True)
image_data.append(image)
box_data.append(box)
i = (i+1) % n
image_data = np.array(image_data)
box_data = np.array(box_data)
y_true = preprocess_true_boxes(box_data, input_shape, anchors, num_classes)
yield [image_data, *y_true], | np.zeros(batch_size) | numpy.zeros |
#!/usr/bin/env python
import pytest
import os
import shutil
import json
import numpy as np
import cv2
import sys
import pandas as pd
from plotnine import ggplot
from plantcv import plantcv as pcv
import plantcv.learn
import plantcv.parallel
import plantcv.utils
# Import matplotlib and use a null Template to block plotting to screen
# This will let us test debug = "plot"
import matplotlib
import dask
from dask.distributed import Client
PARALLEL_TEST_DATA = os.path.join(os.path.dirname(os.path.abspath(__file__)), "parallel_data")
TEST_TMPDIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", ".cache")
TEST_IMG_DIR = "images"
TEST_IMG_DIR2 = "images_w_date"
TEST_SNAPSHOT_DIR = "snapshots"
TEST_PIPELINE = os.path.join(PARALLEL_TEST_DATA, "plantcv-script.py")
META_FIELDS = {"imgtype": 0, "camera": 1, "frame": 2, "zoom": 3, "lifter": 4, "gain": 5, "exposure": 6, "id": 7}
VALID_META = {
# Camera settings
"camera": {
"label": "camera identifier",
"datatype": "<class 'str'>",
"value": "none"
},
"imgtype": {
"label": "image type",
"datatype": "<class 'str'>",
"value": "none"
},
"zoom": {
"label": "camera zoom setting",
"datatype": "<class 'str'>",
"value": "none"
},
"exposure": {
"label": "camera exposure setting",
"datatype": "<class 'str'>",
"value": "none"
},
"gain": {
"label": "camera gain setting",
"datatype": "<class 'str'>",
"value": "none"
},
"frame": {
"label": "image series frame identifier",
"datatype": "<class 'str'>",
"value": "none"
},
"lifter": {
"label": "imaging platform height setting",
"datatype": "<class 'str'>",
"value": "none"
},
# Date-Time
"timestamp": {
"label": "datetime of image",
"datatype": "<class 'datetime.datetime'>",
"value": None
},
# Sample attributes
"id": {
"label": "image identifier",
"datatype": "<class 'str'>",
"value": "none"
},
"plantbarcode": {
"label": "plant barcode identifier",
"datatype": "<class 'str'>",
"value": "none"
},
"treatment": {
"label": "treatment identifier",
"datatype": "<class 'str'>",
"value": "none"
},
"cartag": {
"label": "plant carrier identifier",
"datatype": "<class 'str'>",
"value": "none"
},
# Experiment attributes
"measurementlabel": {
"label": "experiment identifier",
"datatype": "<class 'str'>",
"value": "none"
},
# Other
"other": {
"label": "other identifier",
"datatype": "<class 'str'>",
"value": "none"
}
}
METADATA_COPROCESS = {
'VIS_SV_0_z1_h1_g0_e82_117770.jpg': {
'path': os.path.join(PARALLEL_TEST_DATA, 'snapshots', 'snapshot57383', 'VIS_SV_0_z1_h1_g0_e82_117770.jpg'),
'camera': 'SV',
'imgtype': 'VIS',
'zoom': 'z1',
'exposure': 'e82',
'gain': 'g0',
'frame': '0',
'lifter': 'h1',
'timestamp': '2014-10-22 17:49:35.187',
'id': '117770',
'plantbarcode': 'Ca031AA010564',
'treatment': 'none',
'cartag': '2143',
'measurementlabel': 'C002ch_092214_biomass',
'other': 'none',
'coimg': 'NIR_SV_0_z1_h1_g0_e65_117779.jpg'
},
'NIR_SV_0_z1_h1_g0_e65_117779.jpg': {
'path': os.path.join(PARALLEL_TEST_DATA, 'snapshots', 'snapshot57383', 'NIR_SV_0_z1_h1_g0_e65_117779.jpg'),
'camera': 'SV',
'imgtype': 'NIR',
'zoom': 'z1',
'exposure': 'e65',
'gain': 'g0',
'frame': '0',
'lifter': 'h1',
'timestamp': '2014-10-22 17:49:35.187',
'id': '117779',
'plantbarcode': 'Ca031AA010564',
'treatment': 'none',
'cartag': '2143',
'measurementlabel': 'C002ch_092214_biomass',
'other': 'none'
}
}
METADATA_VIS_ONLY = {
'VIS_SV_0_z1_h1_g0_e82_117770.jpg': {
'path': os.path.join(PARALLEL_TEST_DATA, 'snapshots', 'snapshot57383', 'VIS_SV_0_z1_h1_g0_e82_117770.jpg'),
'camera': 'SV',
'imgtype': 'VIS',
'zoom': 'z1',
'exposure': 'e82',
'gain': 'g0',
'frame': '0',
'lifter': 'h1',
'timestamp': '2014-10-22 17:49:35.187',
'id': '117770',
'plantbarcode': 'Ca031AA010564',
'treatment': 'none',
'cartag': '2143',
'measurementlabel': 'C002ch_092214_biomass',
'other': 'none'
}
}
METADATA_NIR_ONLY = {
'NIR_SV_0_z1_h1_g0_e65_117779.jpg': {
'path': os.path.join(PARALLEL_TEST_DATA, 'snapshots', 'snapshot57383', 'NIR_SV_0_z1_h1_g0_e65_117779.jpg'),
'camera': 'SV',
'imgtype': 'NIR',
'zoom': 'z1',
'exposure': 'e65',
'gain': 'g0',
'frame': '0',
'lifter': 'h1',
'timestamp': '2014-10-22 17:49:35.187',
'id': '117779',
'plantbarcode': 'Ca031AA010564',
'treatment': 'none',
'cartag': '2143',
'measurementlabel': 'C002ch_092214_biomass',
'other': 'none'
}
}
# Set the temp directory for dask
dask.config.set(temporary_directory=TEST_TMPDIR)
# ##########################
# Tests setup function
# ##########################
def setup_function():
if not os.path.exists(TEST_TMPDIR):
os.mkdir(TEST_TMPDIR)
# ##############################
# Tests for the parallel subpackage
# ##############################
def test_plantcv_parallel_workflowconfig_save_config_file():
# Create a test tmp directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_parallel_workflowconfig_save_config_file")
os.mkdir(cache_dir)
# Define output path/filename
template_file = os.path.join(cache_dir, "config.json")
# Create config instance
config = plantcv.parallel.WorkflowConfig()
# Save template file
config.save_config(config_file=template_file)
assert os.path.exists(template_file)
def test_plantcv_parallel_workflowconfig_import_config_file():
# Define input path/filename
config_file = os.path.join(PARALLEL_TEST_DATA, "workflow_config_template.json")
# Create config instance
config = plantcv.parallel.WorkflowConfig()
# import config file
config.import_config(config_file=config_file)
assert config.cluster == "LocalCluster"
def test_plantcv_parallel_workflowconfig_validate_config():
# Create a test tmp directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_parallel_workflowconfig_validate_config")
os.mkdir(cache_dir)
# Create config instance
config = plantcv.parallel.WorkflowConfig()
# Set valid values in config
config.input_dir = os.path.join(PARALLEL_TEST_DATA, "images")
config.json = os.path.join(cache_dir, "valid_config.json")
config.filename_metadata = ["imgtype", "camera", "frame", "zoom", "lifter", "gain", "exposure", "id"]
config.workflow = TEST_PIPELINE
config.img_outdir = cache_dir
# Validate config
assert config.validate_config()
def test_plantcv_parallel_workflowconfig_invalid_startdate():
# Create a test tmp directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_parallel_workflowconfig_invalid_startdate")
os.mkdir(cache_dir)
# Create config instance
config = plantcv.parallel.WorkflowConfig()
# Set valid values in config
config.input_dir = os.path.join(PARALLEL_TEST_DATA, "images")
config.json = os.path.join(cache_dir, "valid_config.json")
config.filename_metadata = ["imgtype", "camera", "frame", "zoom", "lifter", "gain", "exposure", "id"]
config.workflow = TEST_PIPELINE
config.img_outdir = cache_dir
config.start_date = "2020-05-10"
# Validate config
assert not config.validate_config()
def test_plantcv_parallel_workflowconfig_invalid_enddate():
# Create a test tmp directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_parallel_workflowconfig_invalid_enddate")
os.mkdir(cache_dir)
# Create config instance
config = plantcv.parallel.WorkflowConfig()
# Set valid values in config
config.input_dir = os.path.join(PARALLEL_TEST_DATA, "images")
config.json = os.path.join(cache_dir, "valid_config.json")
config.filename_metadata = ["imgtype", "camera", "frame", "zoom", "lifter", "gain", "exposure", "id"]
config.workflow = TEST_PIPELINE
config.img_outdir = cache_dir
config.end_date = "2020-05-10"
config.timestampformat = "%Y%m%d"
# Validate config
assert not config.validate_config()
def test_plantcv_parallel_workflowconfig_invalid_metadata_terms():
# Create a test tmp directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_parallel_workflowconfig_invalid_metadata_terms")
os.mkdir(cache_dir)
# Create config instance
config = plantcv.parallel.WorkflowConfig()
# Set invalid values in config
# input_dir and json are not defined by default, but are required
# Set an incorrect metadata term
config.filename_metadata.append("invalid")
# Validate config
assert not config.validate_config()
def test_plantcv_parallel_workflowconfig_invalid_filename_metadata():
# Create a test tmp directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_parallel_workflowconfig_invalid_filename_metadata")
os.mkdir(cache_dir)
# Create config instance
config = plantcv.parallel.WorkflowConfig()
# Set invalid values in config
# input_dir and json are not defined by default, but are required
# Do not set required filename_metadata
# Validate config
assert not config.validate_config()
def test_plantcv_parallel_workflowconfig_invalid_cluster():
# Create a test tmp directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_parallel_workflowconfig_invalid_cluster")
os.mkdir(cache_dir)
# Create config instance
config = plantcv.parallel.WorkflowConfig()
# Set invalid values in config
# input_dir and json are not defined by default, but are required
# Set invalid cluster type
config.cluster = "MyCluster"
# Validate config
assert not config.validate_config()
def test_plantcv_parallel_metadata_parser_snapshots():
# Create config instance
config = plantcv.parallel.WorkflowConfig()
config.input_dir = os.path.join(PARALLEL_TEST_DATA, TEST_SNAPSHOT_DIR)
config.json = os.path.join(TEST_TMPDIR, "test_plantcv_parallel_metadata_parser_snapshots", "output.json")
config.filename_metadata = ["imgtype", "camera", "frame", "zoom", "lifter", "gain", "exposure", "id"]
config.workflow = TEST_PIPELINE
config.metadata_filters = {"imgtype": "VIS", "camera": "SV"}
config.start_date = "2014-10-21 00:00:00.0"
config.end_date = "2014-10-23 00:00:00.0"
config.timestampformat = '%Y-%m-%d %H:%M:%S.%f'
config.imgformat = "jpg"
meta = plantcv.parallel.metadata_parser(config=config)
assert meta == METADATA_VIS_ONLY
def test_plantcv_parallel_metadata_parser_snapshots_coimg():
# Create config instance
config = plantcv.parallel.WorkflowConfig()
config.input_dir = os.path.join(PARALLEL_TEST_DATA, TEST_SNAPSHOT_DIR)
config.json = os.path.join(TEST_TMPDIR, "test_plantcv_parallel_metadata_parser_snapshots_coimg", "output.json")
config.filename_metadata = ["imgtype", "camera", "frame", "zoom", "lifter", "gain", "exposure", "id"]
config.workflow = TEST_PIPELINE
config.metadata_filters = {"imgtype": "VIS"}
config.start_date = "2014-10-21 00:00:00.0"
config.end_date = "2014-10-23 00:00:00.0"
config.timestampformat = '%Y-%m-%d %H:%M:%S.%f'
config.imgformat = "jpg"
config.coprocess = "FAKE"
meta = plantcv.parallel.metadata_parser(config=config)
assert meta == METADATA_VIS_ONLY
def test_plantcv_parallel_metadata_parser_images():
# Create config instance
config = plantcv.parallel.WorkflowConfig()
config.input_dir = os.path.join(PARALLEL_TEST_DATA, TEST_IMG_DIR)
config.json = os.path.join(TEST_TMPDIR, "test_plantcv_parallel_metadata_parser_images", "output.json")
config.filename_metadata = ["imgtype", "camera", "frame", "zoom", "lifter", "gain", "exposure", "id"]
config.workflow = TEST_PIPELINE
config.metadata_filters = {"imgtype": "VIS"}
config.start_date = "2014"
config.end_date = "2014"
config.timestampformat = '%Y' # no date in filename so check date range and date_format are ignored
config.imgformat = "jpg"
meta = plantcv.parallel.metadata_parser(config=config)
expected = {
'VIS_SV_0_z1_h1_g0_e82_117770.jpg': {
'path': os.path.join(PARALLEL_TEST_DATA, 'images', 'VIS_SV_0_z1_h1_g0_e82_117770.jpg'),
'camera': 'SV',
'imgtype': 'VIS',
'zoom': 'z1',
'exposure': 'e82',
'gain': 'g0',
'frame': '0',
'lifter': 'h1',
'timestamp': None,
'id': '117770',
'plantbarcode': 'none',
'treatment': 'none',
'cartag': 'none',
'measurementlabel': 'none',
'other': 'none'}
}
assert meta == expected
config.include_all_subdirs = False
meta = plantcv.parallel.metadata_parser(config=config)
assert meta == expected
def test_plantcv_parallel_metadata_parser_regex():
# Create config instance
config = plantcv.parallel.WorkflowConfig()
config.input_dir = os.path.join(PARALLEL_TEST_DATA, TEST_IMG_DIR)
config.json = os.path.join(TEST_TMPDIR, "test_plantcv_parallel_metadata_parser_images", "output.json")
config.filename_metadata = ["imgtype", "camera", "frame", "zoom", "lifter", "gain", "exposure", "id"]
config.workflow = TEST_PIPELINE
config.metadata_filters = {"imgtype": "VIS"}
config.start_date = "2014-10-21 00:00:00.0"
config.end_date = "2014-10-23 00:00:00.0"
config.timestampformat = '%Y-%m-%d %H:%M:%S.%f'
config.imgformat = "jpg"
config.delimiter = r'(VIS)_(SV)_(\d+)_(z1)_(h1)_(g0)_(e82)_(\d+)'
meta = plantcv.parallel.metadata_parser(config=config)
expected = {
'VIS_SV_0_z1_h1_g0_e82_117770.jpg': {
'path': os.path.join(PARALLEL_TEST_DATA, 'images', 'VIS_SV_0_z1_h1_g0_e82_117770.jpg'),
'camera': 'SV',
'imgtype': 'VIS',
'zoom': 'z1',
'exposure': 'e82',
'gain': 'g0',
'frame': '0',
'lifter': 'h1',
'timestamp': None,
'id': '117770',
'plantbarcode': 'none',
'treatment': 'none',
'cartag': 'none',
'measurementlabel': 'none',
'other': 'none'}
}
assert meta == expected
def test_plantcv_parallel_metadata_parser_images_outside_daterange():
# Create config instance
config = plantcv.parallel.WorkflowConfig()
config.input_dir = os.path.join(PARALLEL_TEST_DATA, TEST_IMG_DIR2)
config.json = os.path.join(TEST_TMPDIR, "test_plantcv_parallel_metadata_parser_images_outside_daterange",
"output.json")
config.filename_metadata = ["imgtype", "camera", "frame", "zoom", "lifter", "gain", "exposure", "timestamp"]
config.workflow = TEST_PIPELINE
config.metadata_filters = {"imgtype": "NIR"}
config.start_date = "1970-01-01 00_00_00"
config.end_date = "1970-01-01 00_00_00"
config.timestampformat = "%Y-%m-%d %H_%M_%S"
config.imgformat = "jpg"
config.delimiter = r"(NIR)_(SV)_(\d)_(z1)_(h1)_(g0)_(e65)_(\d{4}-\d{2}-\d{2} \d{2}_\d{2}_\d{2})"
meta = plantcv.parallel.metadata_parser(config=config)
assert meta == {}
def test_plantcv_parallel_metadata_parser_no_default_dates():
# Create config instance
config = plantcv.parallel.WorkflowConfig()
config.input_dir = os.path.join(PARALLEL_TEST_DATA, TEST_SNAPSHOT_DIR)
config.json = os.path.join(TEST_TMPDIR, "test_plantcv_parallel_metadata_parser_no_default_dates", "output.json")
config.filename_metadata = ["imgtype", "camera", "frame", "zoom", "lifter", "gain", "exposure", "id"]
config.workflow = TEST_PIPELINE
config.metadata_filters = {"imgtype": "VIS", "camera": "SV", "id": "117770"}
config.start_date = None
config.end_date = None
config.timestampformat = '%Y-%m-%d %H:%M:%S.%f'
config.imgformat = "jpg"
meta = plantcv.parallel.metadata_parser(config=config)
assert meta == METADATA_VIS_ONLY
def test_plantcv_parallel_check_date_range_wrongdateformat():
start_date = 10
end_date = 10
img_time = '2010-10-10'
with pytest.raises(SystemExit, match=r'does not match format'):
date_format = '%Y%m%d'
_ = plantcv.parallel.check_date_range(
start_date, end_date, img_time, date_format)
def test_plantcv_parallel_metadata_parser_snapshot_outside_daterange():
# Create config instance
config = plantcv.parallel.WorkflowConfig()
config.input_dir = os.path.join(PARALLEL_TEST_DATA, TEST_SNAPSHOT_DIR)
config.json = os.path.join(TEST_TMPDIR, "test_plantcv_parallel_metadata_parser_snapshot_outside_daterange",
"output.json")
config.filename_metadata = ["imgtype", "camera", "frame", "zoom", "lifter", "gain", "exposure", "id"]
config.workflow = TEST_PIPELINE
config.metadata_filters = {"imgtype": "VIS"}
config.start_date = "1970-01-01 00:00:00.0"
config.end_date = "1970-01-01 00:00:00.0"
config.timestampformat = '%Y-%m-%d %H:%M:%S.%f'
config.imgformat = "jpg"
meta = plantcv.parallel.metadata_parser(config=config)
assert meta == {}
def test_plantcv_parallel_metadata_parser_fail_images():
# Create config instance
config = plantcv.parallel.WorkflowConfig()
config.input_dir = os.path.join(PARALLEL_TEST_DATA, TEST_SNAPSHOT_DIR)
config.json = os.path.join(TEST_TMPDIR, "test_plantcv_parallel_metadata_parser_fail_images", "output.json")
config.filename_metadata = ["imgtype", "camera", "frame", "zoom", "lifter", "gain", "exposure", "id"]
config.workflow = TEST_PIPELINE
config.metadata_filters = {"cartag": "VIS"}
config.start_date = "1970-01-01 00:00:00.0"
config.end_date = "1970-01-01 00:00:00.0"
config.timestampformat = '%Y-%m-%d %H:%M:%S.%f'
config.imgformat = "jpg"
config.coprocess = "NIR"
meta = plantcv.parallel.metadata_parser(config=config)
assert meta == METADATA_NIR_ONLY
def test_plantcv_parallel_metadata_parser_images_with_frame():
# Create config instance
config = plantcv.parallel.WorkflowConfig()
config.input_dir = os.path.join(PARALLEL_TEST_DATA, TEST_SNAPSHOT_DIR)
config.json = os.path.join(TEST_TMPDIR, "test_plantcv_parallel_metadata_parser_images_with_frame", "output.json")
config.filename_metadata = ["imgtype", "camera", "frame", "zoom", "lifter", "gain", "exposure", "id"]
config.workflow = TEST_PIPELINE
config.metadata_filters = {"imgtype": "VIS"}
config.start_date = "2014-10-21 00:00:00.0"
config.end_date = "2014-10-23 00:00:00.0"
config.timestampformat = '%Y-%m-%d %H:%M:%S.%f'
config.imgformat = "jpg"
config.coprocess = "NIR"
meta = plantcv.parallel.metadata_parser(config=config)
assert meta == {
'VIS_SV_0_z1_h1_g0_e82_117770.jpg': {
'path': os.path.join(PARALLEL_TEST_DATA, 'snapshots', 'snapshot57383', 'VIS_SV_0_z1_h1_g0_e82_117770.jpg'),
'camera': 'SV',
'imgtype': 'VIS',
'zoom': 'z1',
'exposure': 'e82',
'gain': 'g0',
'frame': '0',
'lifter': 'h1',
'timestamp': '2014-10-22 17:49:35.187',
'id': '117770',
'plantbarcode': 'Ca031AA010564',
'treatment': 'none',
'cartag': '2143',
'measurementlabel': 'C002ch_092214_biomass',
'other': 'none',
'coimg': 'NIR_SV_0_z1_h1_g0_e65_117779.jpg'
},
'NIR_SV_0_z1_h1_g0_e65_117779.jpg': {
'path': os.path.join(PARALLEL_TEST_DATA, 'snapshots', 'snapshot57383', 'NIR_SV_0_z1_h1_g0_e65_117779.jpg'),
'camera': 'SV',
'imgtype': 'NIR',
'zoom': 'z1',
'exposure': 'e65',
'gain': 'g0',
'frame': '0',
'lifter': 'h1',
'timestamp': '2014-10-22 17:49:35.187',
'id': '117779',
'plantbarcode': 'Ca031AA010564',
'treatment': 'none',
'cartag': '2143',
'measurementlabel': 'C002ch_092214_biomass',
'other': 'none'
}
}
def test_plantcv_parallel_metadata_parser_images_no_frame():
# Create config instance
config = plantcv.parallel.WorkflowConfig()
config.input_dir = os.path.join(PARALLEL_TEST_DATA, TEST_SNAPSHOT_DIR)
config.json = os.path.join(TEST_TMPDIR, "test_plantcv_parallel_metadata_parser_images_no_frame",
"output.json")
config.filename_metadata = ["imgtype", "camera", "X", "zoom", "lifter", "gain", "exposure", "id"]
config.workflow = TEST_PIPELINE
config.metadata_filters = {"imgtype": "VIS"}
config.start_date = "2014-10-21 00:00:00.0"
config.end_date = "2014-10-23 00:00:00.0"
config.timestampformat = '%Y-%m-%d %H:%M:%S.%f'
config.imgformat = "jpg"
config.coprocess = "NIR"
meta = plantcv.parallel.metadata_parser(config=config)
assert meta == {
'VIS_SV_0_z1_h1_g0_e82_117770.jpg': {
'path': os.path.join(PARALLEL_TEST_DATA, 'snapshots', 'snapshot57383', 'VIS_SV_0_z1_h1_g0_e82_117770.jpg'),
'camera': 'SV',
'imgtype': 'VIS',
'zoom': 'z1',
'exposure': 'e82',
'gain': 'g0',
'frame': 'none',
'lifter': 'h1',
'timestamp': '2014-10-22 17:49:35.187',
'id': '117770',
'plantbarcode': 'Ca031AA010564',
'treatment': 'none',
'cartag': '2143',
'measurementlabel': 'C002ch_092214_biomass',
'other': 'none',
'coimg': 'NIR_SV_0_z1_h1_g0_e65_117779.jpg'
},
'NIR_SV_0_z1_h1_g0_e65_117779.jpg': {
'path': os.path.join(PARALLEL_TEST_DATA, 'snapshots', 'snapshot57383', 'NIR_SV_0_z1_h1_g0_e65_117779.jpg'),
'camera': 'SV',
'imgtype': 'NIR',
'zoom': 'z1',
'exposure': 'e65',
'gain': 'g0',
'frame': 'none',
'lifter': 'h1',
'timestamp': '2014-10-22 17:49:35.187',
'id': '117779',
'plantbarcode': 'Ca031AA010564',
'treatment': 'none',
'cartag': '2143',
'measurementlabel': 'C002ch_092214_biomass',
'other': 'none'
}
}
def test_plantcv_parallel_metadata_parser_images_no_camera():
# Create config instance
config = plantcv.parallel.WorkflowConfig()
config.input_dir = os.path.join(PARALLEL_TEST_DATA, TEST_SNAPSHOT_DIR)
config.json = os.path.join(TEST_TMPDIR, "test_plantcv_parallel_metadata_parser_images_no_frame", "output.json")
config.filename_metadata = ["imgtype", "X", "frame", "zoom", "lifter", "gain", "exposure", "id"]
config.workflow = TEST_PIPELINE
config.metadata_filters = {"imgtype": "VIS"}
config.start_date = "2014-10-21 00:00:00.0"
config.end_date = "2014-10-23 00:00:00.0"
config.timestampformat = '%Y-%m-%d %H:%M:%S.%f'
config.imgformat = "jpg"
config.coprocess = "NIR"
meta = plantcv.parallel.metadata_parser(config=config)
assert meta == {
'VIS_SV_0_z1_h1_g0_e82_117770.jpg': {
'path': os.path.join(PARALLEL_TEST_DATA, 'snapshots', 'snapshot57383', 'VIS_SV_0_z1_h1_g0_e82_117770.jpg'),
'camera': 'none',
'imgtype': 'VIS',
'zoom': 'z1',
'exposure': 'e82',
'gain': 'g0',
'frame': '0',
'lifter': 'h1',
'timestamp': '2014-10-22 17:49:35.187',
'id': '117770',
'plantbarcode': 'Ca031AA010564',
'treatment': 'none',
'cartag': '2143',
'measurementlabel': 'C002ch_092214_biomass',
'other': 'none',
'coimg': 'NIR_SV_0_z1_h1_g0_e65_117779.jpg'
},
'NIR_SV_0_z1_h1_g0_e65_117779.jpg': {
'path': os.path.join(PARALLEL_TEST_DATA, 'snapshots', 'snapshot57383', 'NIR_SV_0_z1_h1_g0_e65_117779.jpg'),
'camera': 'none',
'imgtype': 'NIR',
'zoom': 'z1',
'exposure': 'e65',
'gain': 'g0',
'frame': '0',
'lifter': 'h1',
'timestamp': '2014-10-22 17:49:35.187',
'id': '117779',
'plantbarcode': 'Ca031AA010564',
'treatment': 'none',
'cartag': '2143',
'measurementlabel': 'C002ch_092214_biomass',
'other': 'none'
}
}
def test_plantcv_parallel_job_builder_single_image():
# Create cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_parallel_job_builder_single_image")
os.mkdir(cache_dir)
# Create config instance
config = plantcv.parallel.WorkflowConfig()
config.input_dir = os.path.join(PARALLEL_TEST_DATA, TEST_SNAPSHOT_DIR)
config.json = os.path.join(cache_dir, "output.json")
config.tmp_dir = cache_dir
config.filename_metadata = ["imgtype", "camera", "frame", "zoom", "lifter", "gain", "exposure", "id"]
config.workflow = TEST_PIPELINE
config.img_outdir = cache_dir
config.metadata_filters = {"imgtype": "VIS", "camera": "SV"}
config.start_date = "2014-10-21 00:00:00.0"
config.end_date = "2014-10-23 00:00:00.0"
config.timestampformat = '%Y-%m-%d %H:%M:%S.%f'
config.imgformat = "jpg"
config.other_args = ["--other", "on"]
config.writeimg = True
jobs = plantcv.parallel.job_builder(meta=METADATA_VIS_ONLY, config=config)
image_name = list(METADATA_VIS_ONLY.keys())[0]
result_file = os.path.join(cache_dir, image_name + '.txt')
expected = ['python', TEST_PIPELINE, '--image', METADATA_VIS_ONLY[image_name]['path'], '--outdir',
cache_dir, '--result', result_file, '--writeimg', '--other', 'on']
if len(expected) != len(jobs[0]):
assert False
else:
assert all([i == j] for i, j in zip(jobs[0], expected))
def test_plantcv_parallel_job_builder_coprocess():
# Create cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_parallel_job_builder_coprocess")
os.mkdir(cache_dir)
# Create config instance
config = plantcv.parallel.WorkflowConfig()
config.input_dir = os.path.join(PARALLEL_TEST_DATA, TEST_SNAPSHOT_DIR)
config.json = os.path.join(cache_dir, "output.json")
config.tmp_dir = cache_dir
config.filename_metadata = ["imgtype", "camera", "frame", "zoom", "lifter", "gain", "exposure", "id"]
config.workflow = TEST_PIPELINE
config.img_outdir = cache_dir
config.metadata_filters = {"imgtype": "VIS", "camera": "SV"}
config.start_date = "2014-10-21 00:00:00.0"
config.end_date = "2014-10-23 00:00:00.0"
config.timestampformat = '%Y-%m-%d %H:%M:%S.%f'
config.imgformat = "jpg"
config.other_args = ["--other", "on"]
config.writeimg = True
config.coprocess = "NIR"
jobs = plantcv.parallel.job_builder(meta=METADATA_COPROCESS, config=config)
img_names = list(METADATA_COPROCESS.keys())
vis_name = img_names[0]
vis_path = METADATA_COPROCESS[vis_name]['path']
result_file = os.path.join(cache_dir, vis_name + '.txt')
nir_name = img_names[1]
coresult_file = os.path.join(cache_dir, nir_name + '.txt')
expected = ['python', TEST_PIPELINE, '--image', vis_path, '--outdir', cache_dir, '--result', result_file,
'--coresult', coresult_file, '--writeimg', '--other', 'on']
if len(expected) != len(jobs[0]):
assert False
else:
assert all([i == j] for i, j in zip(jobs[0], expected))
def test_plantcv_parallel_multiprocess_create_dask_cluster_local():
client = plantcv.parallel.create_dask_cluster(cluster="LocalCluster", cluster_config={})
status = client.status
client.shutdown()
assert status == "running"
def test_plantcv_parallel_multiprocess_create_dask_cluster():
client = plantcv.parallel.create_dask_cluster(cluster="HTCondorCluster", cluster_config={"cores": 1,
"memory": "1GB",
"disk": "1GB"})
status = client.status
client.shutdown()
assert status == "running"
def test_plantcv_parallel_multiprocess_create_dask_cluster_invalid_cluster():
with pytest.raises(ValueError):
_ = plantcv.parallel.create_dask_cluster(cluster="Skynet", cluster_config={})
def test_plantcv_parallel_convert_datetime_to_unixtime():
unix_time = plantcv.parallel.convert_datetime_to_unixtime(timestamp_str="1970-01-01", date_format="%Y-%m-%d")
assert unix_time == 0
def test_plantcv_parallel_convert_datetime_to_unixtime_bad_strptime():
with pytest.raises(SystemExit):
_ = plantcv.parallel.convert_datetime_to_unixtime(timestamp_str="1970-01-01", date_format="%Y-%m")
def test_plantcv_parallel_multiprocess():
image_name = list(METADATA_VIS_ONLY.keys())[0]
image_path = os.path.join(METADATA_VIS_ONLY[image_name]['path'], image_name)
result_file = os.path.join(TEST_TMPDIR, image_name + '.txt')
jobs = [['python', TEST_PIPELINE, '--image', image_path, '--outdir', TEST_TMPDIR, '--result', result_file,
'--writeimg', '--other', 'on']]
# Create a dask LocalCluster client
client = Client(n_workers=1)
plantcv.parallel.multiprocess(jobs, client=client)
assert os.path.exists(result_file)
def test_plantcv_parallel_process_results():
# Create a test tmp directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_parallel_process_results")
os.mkdir(cache_dir)
plantcv.parallel.process_results(job_dir=os.path.join(PARALLEL_TEST_DATA, "results"),
json_file=os.path.join(cache_dir, 'appended_results.json'))
plantcv.parallel.process_results(job_dir=os.path.join(PARALLEL_TEST_DATA, "results"),
json_file=os.path.join(cache_dir, 'appended_results.json'))
# Assert that the output JSON file matches the expected output JSON file
result_file = open(os.path.join(cache_dir, "appended_results.json"), "r")
results = json.load(result_file)
result_file.close()
expected_file = open(os.path.join(PARALLEL_TEST_DATA, "appended_results.json"))
expected = json.load(expected_file)
expected_file.close()
assert results == expected
def test_plantcv_parallel_process_results_new_output():
# Create a test tmp directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_parallel_process_results_new_output")
os.mkdir(cache_dir)
plantcv.parallel.process_results(job_dir=os.path.join(PARALLEL_TEST_DATA, "results"),
json_file=os.path.join(cache_dir, 'new_result.json'))
# Assert output matches expected values
result_file = open(os.path.join(cache_dir, "new_result.json"), "r")
results = json.load(result_file)
result_file.close()
expected_file = open(os.path.join(PARALLEL_TEST_DATA, "new_result.json"))
expected = json.load(expected_file)
expected_file.close()
assert results == expected
def test_plantcv_parallel_process_results_valid_json():
# Test when the file is a valid json file but doesn't contain expected keys
with pytest.raises(RuntimeError):
plantcv.parallel.process_results(job_dir=os.path.join(PARALLEL_TEST_DATA, "results"),
json_file=os.path.join(PARALLEL_TEST_DATA, "valid.json"))
def test_plantcv_parallel_process_results_invalid_json():
# Create a test tmp directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_parallel_process_results_invalid_json")
os.mkdir(cache_dir)
# Move the test data to the tmp directory
shutil.copytree(os.path.join(PARALLEL_TEST_DATA, "bad_results"), os.path.join(cache_dir, "bad_results"))
with pytest.raises(RuntimeError):
plantcv.parallel.process_results(job_dir=os.path.join(cache_dir, "bad_results"),
json_file=os.path.join(cache_dir, "bad_results", "invalid.txt"))
# ####################################################################################################################
# ########################################### PLANTCV MAIN PACKAGE ###################################################
matplotlib.use('Template')
TEST_DATA = os.path.join(os.path.dirname(os.path.abspath(__file__)), "data")
HYPERSPECTRAL_TEST_DATA = os.path.join(os.path.dirname(os.path.abspath(__file__)), "hyperspectral_data")
HYPERSPECTRAL_DATA = "darkReference"
HYPERSPECTRAL_WHITE = "darkReference_whiteReference"
HYPERSPECTRAL_DARK = "darkReference_darkReference"
HYPERSPECTRAL_HDR = "darkReference.hdr"
HYPERSPECTRAL_MASK = "darkReference_mask.png"
HYPERSPECTRAL_DATA_NO_DEFAULT = "darkReference2"
HYPERSPECTRAL_HDR_NO_DEFAULT = "darkReference2.hdr"
HYPERSPECTRAL_DATA_APPROX_PSEUDO = "darkReference3"
HYPERSPECTRAL_HDR_APPROX_PSEUDO = "darkReference3.hdr"
HYPERSPECTRAL_HDR_SMALL_RANGE = {'description': '{[HEADWALL Hyperspec III]}', 'samples': '800', 'lines': '1',
'bands': '978', 'header offset': '0', 'file type': 'ENVI Standard',
'interleave': 'bil', 'sensor type': 'Unknown', 'byte order': '0',
'default bands': '159,253,520', 'wavelength units': 'nm',
'wavelength': ['379.027', '379.663', '380.3', '380.936', '381.573', '382.209']}
FLUOR_TEST_DATA = os.path.join(os.path.dirname(os.path.abspath(__file__)), "photosynthesis_data")
FLUOR_IMG = "PSII_PSD_supopt_temp_btx623_22_rep1.DAT"
TEST_COLOR_DIM = (2056, 2454, 3)
TEST_GRAY_DIM = (2056, 2454)
TEST_BINARY_DIM = TEST_GRAY_DIM
TEST_INPUT_COLOR = "input_color_img.jpg"
TEST_INPUT_GRAY = "input_gray_img.jpg"
TEST_INPUT_GRAY_SMALL = "input_gray_img_small.jpg"
TEST_INPUT_BINARY = "input_binary_img.png"
# Image from http://www.libpng.org/pub/png/png-OwlAlpha.html
# This image may be used, edited and reproduced freely.
TEST_INPUT_RGBA = "input_rgba.png"
TEST_INPUT_BAYER = "bayer_img.png"
TEST_INPUT_ROI_CONTOUR = "input_roi_contour.npz"
TEST_INPUT_ROI_HIERARCHY = "input_roi_hierarchy.npz"
TEST_INPUT_CONTOURS = "input_contours.npz"
TEST_INPUT_OBJECT_CONTOURS = "input_object_contours.npz"
TEST_INPUT_OBJECT_HIERARCHY = "input_object_hierarchy.npz"
TEST_VIS = "VIS_SV_0_z300_h1_g0_e85_v500_93054.png"
TEST_NIR = "NIR_SV_0_z300_h1_g0_e15000_v500_93059.png"
TEST_VIS_TV = "VIS_TV_0_z300_h1_g0_e85_v500_93054.png"
TEST_NIR_TV = "NIR_TV_0_z300_h1_g0_e15000_v500_93059.png"
TEST_INPUT_MASK = "input_mask_binary.png"
TEST_INPUT_MASK_OOB = "mask_outbounds.png"
TEST_INPUT_MASK_RESIZE = "input_mask_resize.png"
TEST_INPUT_NIR_MASK = "input_nir.png"
TEST_INPUT_FDARK = "FLUO_TV_dark.png"
TEST_INPUT_FDARK_LARGE = "FLUO_TV_DARK_large"
TEST_INPUT_FMIN = "FLUO_TV_min.png"
TEST_INPUT_FMAX = "FLUO_TV_max.png"
TEST_INPUT_FMASK = "FLUO_TV_MASK.png"
TEST_INPUT_GREENMAG = "input_green-magenta.jpg"
TEST_INPUT_MULTI = "multi_ori_image.jpg"
TEST_INPUT_MULTI_MASK = "multi_ori_mask.jpg"
TEST_INPUT_MULTI_OBJECT = "roi_objects.npz"
TEST_INPUT_MULTI_CONTOUR = "multi_contours.npz"
TEST_INPUT_ClUSTER_CONTOUR = "clusters_i.npz"
TEST_INPUT_MULTI_HIERARCHY = "multi_hierarchy.npz"
TEST_INPUT_VISUALIZE_CONTOUR = "roi_objects_visualize.npz"
TEST_INPUT_VISUALIZE_HIERARCHY = "roi_obj_hierarchy_visualize.npz"
TEST_INPUT_VISUALIZE_CLUSTERS = "clusters_i_visualize.npz"
TEST_INPUT_VISUALIZE_BACKGROUND = "visualize_background_img.png"
TEST_INPUT_GENOTXT = "cluster_names.txt"
TEST_INPUT_GENOTXT_TOO_MANY = "cluster_names_too_many.txt"
TEST_INPUT_CROPPED = 'cropped_img.jpg'
TEST_INPUT_CROPPED_MASK = 'cropped-mask.png'
TEST_INPUT_MARKER = 'seed-image.jpg'
TEST_INPUT_SKELETON = 'input_skeleton.png'
TEST_INPUT_SKELETON_PRUNED = 'input_pruned_skeleton.png'
TEST_FOREGROUND = "TEST_FOREGROUND.jpg"
TEST_BACKGROUND = "TEST_BACKGROUND.jpg"
TEST_PDFS = "naive_bayes_pdfs.txt"
TEST_PDFS_BAD = "naive_bayes_pdfs_bad.txt"
TEST_VIS_SMALL = "setaria_small_vis.png"
TEST_MASK_SMALL = "setaria_small_mask.png"
TEST_VIS_COMP_CONTOUR = "setaria_composed_contours.npz"
TEST_ACUTE_RESULT = np.asarray([[[119, 285]], [[151, 280]], [[168, 267]], [[168, 262]], [[171, 261]], [[224, 269]],
[[246, 271]], [[260, 277]], [[141, 248]], [[183, 194]], [[188, 237]], [[173, 240]],
[[186, 260]], [[147, 244]], [[163, 246]], [[173, 268]], [[170, 272]], [[151, 320]],
[[195, 289]], [[228, 272]], [[210, 272]], [[209, 247]], [[210, 232]]])
TEST_VIS_SMALL_PLANT = "setaria_small_plant_vis.png"
TEST_MASK_SMALL_PLANT = "setaria_small_plant_mask.png"
TEST_VIS_COMP_CONTOUR_SMALL_PLANT = "setaria_small_plant_composed_contours.npz"
TEST_SAMPLED_RGB_POINTS = "sampled_rgb_points.txt"
TEST_TARGET_IMG = "target_img.png"
TEST_TARGET_IMG_WITH_HEXAGON = "target_img_w_hexagon.png"
TEST_TARGET_IMG_TRIANGLE = "target_img copy.png"
TEST_SOURCE1_IMG = "source1_img.png"
TEST_SOURCE2_IMG = "source2_img.png"
TEST_TARGET_MASK = "mask_img.png"
TEST_TARGET_IMG_COLOR_CARD = "color_card_target.png"
TEST_SOURCE2_MASK = "mask2_img.png"
TEST_TARGET_MATRIX = "target_matrix.npz"
TEST_SOURCE1_MATRIX = "source1_matrix.npz"
TEST_SOURCE2_MATRIX = "source2_matrix.npz"
TEST_MATRIX_B1 = "matrix_b1.npz"
TEST_MATRIX_B2 = "matrix_b2.npz"
TEST_TRANSFORM1 = "transformation_matrix1.npz"
TEST_MATRIX_M1 = "matrix_m1.npz"
TEST_MATRIX_M2 = "matrix_m2.npz"
TEST_S1_CORRECTED = "source_corrected.png"
TEST_SKELETON_OBJECTS = "skeleton_objects.npz"
TEST_SKELETON_HIERARCHIES = "skeleton_hierarchies.npz"
TEST_THERMAL_ARRAY = "thermal_img.npz"
TEST_THERMAL_IMG_MASK = "thermal_img_mask.png"
TEST_INPUT_THERMAL_CSV = "FLIR2600.csv"
PIXEL_VALUES = "pixel_inspector_rgb_values.txt"
# ##########################
# Tests for the main package
# ##########################
@pytest.mark.parametrize("debug", ["print", "plot"])
def test_plantcv_debug(debug, tmpdir):
from plantcv.plantcv._debug import _debug
# Create a test tmp directory
img_outdir = tmpdir.mkdir("sub")
pcv.params.debug = debug
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
_debug(visual=img, filename=os.path.join(img_outdir, TEST_INPUT_COLOR))
assert True
@pytest.mark.parametrize("datatype,value", [[list, []], [int, 2], [float, 2.2], [bool, True], [str, "2"], [dict, {}],
[tuple, ()], [None, None]])
def test_plantcv_outputs_add_observation(datatype, value):
# Create output instance
outputs = pcv.Outputs()
outputs.add_observation(sample='default', variable='test', trait='test variable', method='type', scale='none',
datatype=datatype, value=value, label=[])
assert outputs.observations["default"]["test"]["value"] == value
def test_plantcv_outputs_add_observation_invalid_type():
# Create output instance
outputs = pcv.Outputs()
with pytest.raises(RuntimeError):
outputs.add_observation(sample='default', variable='test', trait='test variable', method='type', scale='none',
datatype=list, value=np.array([2]), label=[])
def test_plantcv_outputs_save_results_json_newfile(tmpdir):
# Create a test tmp directory
cache_dir = tmpdir.mkdir("sub")
outfile = os.path.join(cache_dir, "results.json")
# Create output instance
outputs = pcv.Outputs()
outputs.add_observation(sample='default', variable='test', trait='test variable', method='test', scale='none',
datatype=str, value="test", label="none")
outputs.save_results(filename=outfile, outformat="json")
with open(outfile, "r") as fp:
results = json.load(fp)
assert results["observations"]["default"]["test"]["value"] == "test"
def test_plantcv_outputs_save_results_json_existing_file(tmpdir):
# Create a test tmp directory
cache_dir = tmpdir.mkdir("sub")
outfile = os.path.join(cache_dir, "data_results.txt")
shutil.copyfile(os.path.join(TEST_DATA, "data_results.txt"), outfile)
# Create output instance
outputs = pcv.Outputs()
outputs.add_observation(sample='default', variable='test', trait='test variable', method='test', scale='none',
datatype=str, value="test", label="none")
outputs.save_results(filename=outfile, outformat="json")
with open(outfile, "r") as fp:
results = json.load(fp)
assert results["observations"]["default"]["test"]["value"] == "test"
def test_plantcv_outputs_save_results_csv(tmpdir):
# Create a test tmp directory
cache_dir = tmpdir.mkdir("sub")
outfile = os.path.join(cache_dir, "results.csv")
testfile = os.path.join(TEST_DATA, "data_results.csv")
# Create output instance
outputs = pcv.Outputs()
outputs.add_observation(sample='default', variable='string', trait='string variable', method='string', scale='none',
datatype=str, value="string", label="none")
outputs.add_observation(sample='default', variable='boolean', trait='boolean variable', method='boolean',
scale='none', datatype=bool, value=True, label="none")
outputs.add_observation(sample='default', variable='list', trait='list variable', method='list',
scale='none', datatype=list, value=[1, 2, 3], label=[1, 2, 3])
outputs.add_observation(sample='default', variable='tuple', trait='tuple variable', method='tuple',
scale='none', datatype=tuple, value=(1, 2), label=(1, 2))
outputs.add_observation(sample='default', variable='tuple_list', trait='list of tuples variable',
method='tuple_list', scale='none', datatype=list, value=[(1, 2), (3, 4)], label=[1, 2])
outputs.save_results(filename=outfile, outformat="csv")
with open(outfile, "r") as fp:
results = fp.read()
with open(testfile, "r") as fp:
test_results = fp.read()
assert results == test_results
def test_plantcv_transform_warp_smaller():
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR),-1)
bimg = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY),-1)
bimg_small = cv2.resize(bimg, (200,300)) #not sure why INTER_NEAREST doesn't preserve values
bimg_small[bimg_small>0]=255
mrow, mcol = bimg_small.shape
vrow, vcol, vdepth = img.shape
pcv.params.debug = None
mask_warped = pcv.transform.warp(bimg_small, img[:,:,2],
pts = [(0,0),(mcol-1,0),(mcol-1,mrow-1),(0,mrow-1)],
refpts = [(0,0),(vcol-1,0),(vcol-1,vrow-1),(0,vrow-1)])
pcv.params.debug = 'plot'
mask_warped_plot = pcv.transform.warp(bimg_small, img[:,:,2],
pts = [(0,0),(mcol-1,0),(mcol-1,mrow-1),(0,mrow-1)],
refpts = [(0,0),(vcol-1,0),(vcol-1,vrow-1),(0,vrow-1)])
assert np.count_nonzero(mask_warped)==93142
assert np.count_nonzero(mask_warped_plot)==93142
def test_plantcv_transform_warp_larger():
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR),-1)
gimg = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY),-1)
gimg_large = cv2.resize(gimg, (5000,7000))
mrow, mcol = gimg_large.shape
vrow, vcol, vdepth = img.shape
pcv.params.debug='print'
mask_warped_print = pcv.transform.warp(gimg_large, img,
pts = [(0,0),(mcol-1,0),(mcol-1,mrow-1),(0,mrow-1)],
refpts = [(0,0),(vcol-1,0),(vcol-1,vrow-1),(0,vrow-1)])
assert np.sum(mask_warped_print)==83103814
def test_plantcv_transform_warp_rgbimgerror():
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR),-1)
gimg = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY),-1)
gimg_large = cv2.resize(gimg, (5000,7000))
mrow, mcol = gimg_large.shape
vrow, vcol, vdepth = img.shape
with pytest.raises(RuntimeError):
_ = pcv.transform.warp(img, img,
pts = [(0,0),(mcol-1,0),(mcol-1,mrow-1),(0,mrow-1)],
refpts = [(0,0),(vcol-1,0),(vcol-1,vrow-1),(0,vrow-1)])
def test_plantcv_transform_warp_4ptserror():
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR),-1)
mrow, mcol, _ = img.shape
vrow, vcol, vdepth = img.shape
with pytest.raises(RuntimeError):
_ = pcv.transform.warp(img[:,:,0], img,
pts = [(0,0),(mcol-1,0),(0,mrow-1)],
refpts = [(0,0),(vcol-1,0),(0,vrow-1)])
with pytest.raises(RuntimeError):
_ = pcv.transform.warp(img[:,:,1], img,
pts = [(0,0),(mcol-1,0),(0,mrow-1)],
refpts = [(0,0),(vcol-1,0),(vcol-1,vrow-1),(0,vrow-1)])
with pytest.raises(RuntimeError):
_ = pcv.transform.warp(img[:,:,2], img,
pts = [(0,0),(mcol-1,0),(mcol-1,mrow-1),(0,mrow-1)],
refpts = [(0,0),(vcol-1,0),(vcol-1,vrow-1),(0,vrow-1),(0,vrow-1)])
def test_plantcv_acute():
# Read in test data
mask = cv2.imread(os.path.join(TEST_DATA, TEST_MASK_SMALL), -1)
contours_npz = np.load(os.path.join(TEST_DATA, TEST_VIS_COMP_CONTOUR), encoding="latin1")
obj_contour = contours_npz['arr_0']
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.acute(obj=obj_contour, win=5, thresh=15, mask=mask)
_ = pcv.acute(obj=obj_contour, win=0, thresh=15, mask=mask)
_ = pcv.acute(obj=np.array(([[213, 190]], [[83, 61]], [[149, 246]])), win=84, thresh=192, mask=mask)
_ = pcv.acute(obj=np.array(([[3, 29]], [[31, 102]], [[161, 63]])), win=148, thresh=56, mask=mask)
_ = pcv.acute(obj=np.array(([[103, 154]], [[27, 227]], [[152, 83]])), win=35, thresh=0, mask=mask)
# Test with debug = None
pcv.params.debug = None
_ = pcv.acute(obj=np.array(([[103, 154]], [[27, 227]], [[152, 83]])), win=35, thresh=0, mask=mask)
_ = pcv.acute(obj=obj_contour, win=0, thresh=15, mask=mask)
homology_pts = pcv.acute(obj=obj_contour, win=5, thresh=15, mask=mask)
assert all([i == j] for i, j in zip(np.shape(homology_pts), (29, 1, 2)))
def test_plantcv_acute_vertex():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_acute_vertex")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_VIS_SMALL))
contours_npz = np.load(os.path.join(TEST_DATA, TEST_VIS_COMP_CONTOUR), encoding="latin1")
obj_contour = contours_npz['arr_0']
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.acute_vertex(obj=obj_contour, win=5, thresh=15, sep=5, img=img, label="prefix")
_ = pcv.acute_vertex(obj=[], win=5, thresh=15, sep=5, img=img)
_ = pcv.acute_vertex(obj=[], win=.01, thresh=.01, sep=1, img=img)
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.acute_vertex(obj=obj_contour, win=5, thresh=15, sep=5, img=img)
# Test with debug = None
pcv.params.debug = None
acute = pcv.acute_vertex(obj=obj_contour, win=5, thresh=15, sep=5, img=img)
assert all([i == j] for i, j in zip(np.shape(acute), np.shape(TEST_ACUTE_RESULT)))
pcv.outputs.clear()
def test_plantcv_acute_vertex_bad_obj():
img = cv2.imread(os.path.join(TEST_DATA, TEST_VIS_SMALL))
obj_contour = np.array([])
pcv.params.debug = None
result = pcv.acute_vertex(obj=obj_contour, win=5, thresh=15, sep=5, img=img)
assert all([i == j] for i, j in zip(result, [0, ("NA", "NA")]))
pcv.outputs.clear()
def test_plantcv_analyze_bound_horizontal():
# Clear previous outputs
pcv.outputs.clear()
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_analyze_bound_horizontal")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
img_above_bound_only = cv2.imread(os.path.join(TEST_DATA, TEST_MASK_SMALL_PLANT))
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
contours_npz = np.load(os.path.join(TEST_DATA, TEST_INPUT_CONTOURS), encoding="latin1")
object_contours = contours_npz['arr_0']
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.analyze_bound_horizontal(img=img, obj=object_contours, mask=mask, line_position=300, label="prefix")
pcv.outputs.clear()
_ = pcv.analyze_bound_horizontal(img=img, obj=object_contours, mask=mask, line_position=100)
_ = pcv.analyze_bound_horizontal(img=img_above_bound_only, obj=object_contours, mask=mask, line_position=1756)
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.analyze_bound_horizontal(img=img, obj=object_contours, mask=mask, line_position=1756)
# Test with debug = None
pcv.params.debug = None
_ = pcv.analyze_bound_horizontal(img=img, obj=object_contours, mask=mask, line_position=1756)
assert len(pcv.outputs.observations["default"]) == 7
def test_plantcv_analyze_bound_horizontal_grayscale_image():
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY), -1)
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
contours_npz = np.load(os.path.join(TEST_DATA, TEST_INPUT_CONTOURS), encoding="latin1")
object_contours = contours_npz['arr_0']
# Test with a grayscale reference image and debug="plot"
pcv.params.debug = "plot"
boundary_img1 = pcv.analyze_bound_horizontal(img=img, obj=object_contours, mask=mask, line_position=1756)
assert len(np.shape(boundary_img1)) == 3
def test_plantcv_analyze_bound_horizontal_neg_y():
# Clear previous outputs
pcv.outputs.clear()
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_analyze_bound_horizontal")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
contours_npz = np.load(os.path.join(TEST_DATA, TEST_INPUT_CONTOURS), encoding="latin1")
object_contours = contours_npz['arr_0']
# Test with debug=None, line position that will trigger -y
pcv.params.debug = "plot"
_ = pcv.analyze_bound_horizontal(img=img, obj=object_contours, mask=mask, line_position=-1000)
_ = pcv.analyze_bound_horizontal(img=img, obj=object_contours, mask=mask, line_position=0)
_ = pcv.analyze_bound_horizontal(img=img, obj=object_contours, mask=mask, line_position=2056)
assert pcv.outputs.observations['default']['height_above_reference']['value'] == 713
def test_plantcv_analyze_bound_vertical():
# Clear previous outputs
pcv.outputs.clear()
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_analyze_bound_vertical")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
contours_npz = np.load(os.path.join(TEST_DATA, TEST_INPUT_CONTOURS), encoding="latin1")
object_contours = contours_npz['arr_0']
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.analyze_bound_vertical(img=img, obj=object_contours, mask=mask, line_position=1000, label="prefix")
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.analyze_bound_vertical(img=img, obj=object_contours, mask=mask, line_position=1000)
# Test with debug = None
pcv.params.debug = None
_ = pcv.analyze_bound_vertical(img=img, obj=object_contours, mask=mask, line_position=1000)
assert pcv.outputs.observations['default']['width_left_reference']['value'] == 94
def test_plantcv_analyze_bound_vertical_grayscale_image():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_analyze_bound_vertical")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY), -1)
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
contours_npz = np.load(os.path.join(TEST_DATA, TEST_INPUT_CONTOURS), encoding="latin1")
object_contours = contours_npz['arr_0']
# Test with a grayscale reference image and debug="plot"
pcv.params.debug = "plot"
_ = pcv.analyze_bound_vertical(img=img, obj=object_contours, mask=mask, line_position=1000)
assert pcv.outputs.observations['default']['width_left_reference']['value'] == 94
pcv.outputs.clear()
def test_plantcv_analyze_bound_vertical_neg_x():
# Clear previous outputs
pcv.outputs.clear()
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_analyze_bound_vertical")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
contours_npz = np.load(os.path.join(TEST_DATA, TEST_INPUT_CONTOURS), encoding="latin1")
object_contours = contours_npz['arr_0']
# Test with debug="plot", line position that will trigger -x
pcv.params.debug = "plot"
_ = pcv.analyze_bound_vertical(img=img, obj=object_contours, mask=mask, line_position=2454)
assert pcv.outputs.observations['default']['width_left_reference']['value'] == 441
def test_plantcv_analyze_bound_vertical_small_x():
# Clear previous outputs
pcv.outputs.clear()
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_analyze_bound_vertical")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
contours_npz = np.load(os.path.join(TEST_DATA, TEST_INPUT_CONTOURS), encoding="latin1")
object_contours = contours_npz['arr_0']
# Test with debug='plot', line position that will trigger -x, and two channel object
pcv.params.debug = "plot"
_ = pcv.analyze_bound_vertical(img=img, obj=object_contours, mask=mask, line_position=1)
assert pcv.outputs.observations['default']['width_right_reference']['value'] == 441
def test_plantcv_analyze_color():
# Clear previous outputs
pcv.outputs.clear()
# Test with debug = None
pcv.params.debug = None
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
_ = pcv.analyze_color(rgb_img=img, mask=mask, hist_plot_type="all")
_ = pcv.analyze_color(rgb_img=img, mask=mask, hist_plot_type=None, label="prefix")
_ = pcv.analyze_color(rgb_img=img, mask=mask, hist_plot_type=None)
_ = pcv.analyze_color(rgb_img=img, mask=mask, hist_plot_type='lab')
_ = pcv.analyze_color(rgb_img=img, mask=mask, hist_plot_type='hsv')
_ = pcv.analyze_color(rgb_img=img, mask=mask, hist_plot_type=None)
# Test with debug = "print"
# pcv.params.debug = "print"
_ = pcv.analyze_color(rgb_img=img, mask=mask, hist_plot_type="all")
_ = pcv.analyze_color(rgb_img=img, mask=mask, hist_plot_type=None, label="prefix")
# Test with debug = "plot"
# pcv.params.debug = "plot"
# _ = pcv.analyze_color(rgb_img=img, mask=mask, hist_plot_type=None)
_ = pcv.analyze_color(rgb_img=img, mask=mask, hist_plot_type='lab')
_ = pcv.analyze_color(rgb_img=img, mask=mask, hist_plot_type='hsv')
# _ = pcv.analyze_color(rgb_img=img, mask=mask, hist_plot_type=None)
# Test with debug = None
# pcv.params.debug = None
_ = pcv.analyze_color(rgb_img=img, mask=mask, hist_plot_type='rgb')
assert pcv.outputs.observations['default']['hue_median']['value'] == 84.0
def test_plantcv_analyze_color_incorrect_image():
img_binary = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
with pytest.raises(RuntimeError):
_ = pcv.analyze_color(rgb_img=img_binary, mask=mask, hist_plot_type=None)
#
#
def test_plantcv_analyze_color_bad_hist_type():
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
pcv.params.debug = "plot"
with pytest.raises(RuntimeError):
_ = pcv.analyze_color(rgb_img=img, mask=mask, hist_plot_type='bgr')
def test_plantcv_analyze_color_incorrect_hist_plot_type():
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
with pytest.raises(RuntimeError):
pcv.params.debug = "plot"
_ = pcv.analyze_color(rgb_img=img, mask=mask, hist_plot_type="bgr")
def test_plantcv_analyze_nir():
# Clear previous outputs
pcv.outputs.clear()
# Test with debug=None
pcv.params.debug = None
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR), 0)
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
_ = pcv.analyze_nir_intensity(gray_img=img, mask=mask, bins=256, histplot=True)
result = len(pcv.outputs.observations['default']['nir_frequencies']['value'])
assert result == 256
def test_plantcv_analyze_nir_16bit():
# Clear previous outputs
pcv.outputs.clear()
# Test with debug=None
pcv.params.debug = None
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR), 0)
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
_ = pcv.analyze_nir_intensity(gray_img=np.uint16(img), mask=mask, bins=256, histplot=True)
result = len(pcv.outputs.observations['default']['nir_frequencies']['value'])
assert result == 256
def test_plantcv_analyze_object():
# Test with debug = None
pcv.params.debug = None
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
contours_npz = np.load(os.path.join(TEST_DATA, TEST_INPUT_CONTOURS), encoding="latin1")
obj_contour = contours_npz['arr_0']
obj_images = pcv.analyze_object(img=img, obj=obj_contour, mask=mask)
pcv.outputs.clear()
assert len(obj_images) != 0
def test_plantcv_analyze_object_grayscale_input():
# Test with debug = None
pcv.params.debug = None
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR), 0)
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
contours_npz = np.load(os.path.join(TEST_DATA, TEST_INPUT_CONTOURS), encoding="latin1")
obj_contour = contours_npz['arr_0']
obj_images = pcv.analyze_object(img=img, obj=obj_contour, mask=mask)
assert len(obj_images) != 1
def test_plantcv_analyze_object_zero_slope():
# Test with debug = None
pcv.params.debug = None
# Create a test image
img = np.zeros((50, 50, 3), dtype=np.uint8)
img[10:11, 10:40, 0] = 255
mask = img[:, :, 0]
obj_contour = np.array([[[10, 10]], [[11, 10]], [[12, 10]], [[13, 10]], [[14, 10]], [[15, 10]], [[16, 10]],
[[17, 10]], [[18, 10]], [[19, 10]], [[20, 10]], [[21, 10]], [[22, 10]], [[23, 10]],
[[24, 10]], [[25, 10]], [[26, 10]], [[27, 10]], [[28, 10]], [[29, 10]], [[30, 10]],
[[31, 10]], [[32, 10]], [[33, 10]], [[34, 10]], [[35, 10]], [[36, 10]], [[37, 10]],
[[38, 10]], [[39, 10]], [[38, 10]], [[37, 10]], [[36, 10]], [[35, 10]], [[34, 10]],
[[33, 10]], [[32, 10]], [[31, 10]], [[30, 10]], [[29, 10]], [[28, 10]], [[27, 10]],
[[26, 10]], [[25, 10]], [[24, 10]], [[23, 10]], [[22, 10]], [[21, 10]], [[20, 10]],
[[19, 10]], [[18, 10]], [[17, 10]], [[16, 10]], [[15, 10]], [[14, 10]], [[13, 10]],
[[12, 10]], [[11, 10]]], dtype=np.int32)
obj_images = pcv.analyze_object(img=img, obj=obj_contour, mask=mask)
assert len(obj_images) != 0
def test_plantcv_analyze_object_longest_axis_2d():
# Test with debug = None
pcv.params.debug = None
# Create a test image
img = np.zeros((50, 50, 3), dtype=np.uint8)
img[0:5, 45:49, 0] = 255
img[0:5, 0:5, 0] = 255
mask = img[:, :, 0]
obj_contour = np.array([[[45, 1]], [[45, 2]], [[45, 3]], [[45, 4]], [[46, 4]], [[47, 4]], [[48, 4]],
[[48, 3]], [[48, 2]], [[48, 1]], [[47, 1]], [[46, 1]], [[1, 1]], [[1, 2]],
[[1, 3]], [[1, 4]], [[2, 4]], [[3, 4]], [[4, 4]], [[4, 3]], [[4, 2]],
[[4, 1]], [[3, 1]], [[2, 1]]], dtype=np.int32)
obj_images = pcv.analyze_object(img=img, obj=obj_contour, mask=mask)
assert len(obj_images) != 0
def test_plantcv_analyze_object_longest_axis_2e():
# Test with debug = None
pcv.params.debug = None
# Create a test image
img = np.zeros((50, 50, 3), dtype=np.uint8)
img[10:15, 10:40, 0] = 255
mask = img[:, :, 0]
obj_contour = np.array([[[10, 10]], [[10, 11]], [[10, 12]], [[10, 13]], [[10, 14]], [[11, 14]], [[12, 14]],
[[13, 14]], [[14, 14]], [[15, 14]], [[16, 14]], [[17, 14]], [[18, 14]], [[19, 14]],
[[20, 14]], [[21, 14]], [[22, 14]], [[23, 14]], [[24, 14]], [[25, 14]], [[26, 14]],
[[27, 14]], [[28, 14]], [[29, 14]], [[30, 14]], [[31, 14]], [[32, 14]], [[33, 14]],
[[34, 14]], [[35, 14]], [[36, 14]], [[37, 14]], [[38, 14]], [[39, 14]], [[39, 13]],
[[39, 12]], [[39, 11]], [[39, 10]], [[38, 10]], [[37, 10]], [[36, 10]], [[35, 10]],
[[34, 10]], [[33, 10]], [[32, 10]], [[31, 10]], [[30, 10]], [[29, 10]], [[28, 10]],
[[27, 10]], [[26, 10]], [[25, 10]], [[24, 10]], [[23, 10]], [[22, 10]], [[21, 10]],
[[20, 10]], [[19, 10]], [[18, 10]], [[17, 10]], [[16, 10]], [[15, 10]], [[14, 10]],
[[13, 10]], [[12, 10]], [[11, 10]]], dtype=np.int32)
obj_images = pcv.analyze_object(img=img, obj=obj_contour, mask=mask)
assert len(obj_images) != 0
def test_plantcv_analyze_object_small_contour():
# Test with debug = None
pcv.params.debug = None
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
obj_contour = [np.array([[[0, 0]], [[0, 50]], [[50, 50]], [[50, 0]]], dtype=np.int32)]
obj_images = pcv.analyze_object(img=img, obj=obj_contour, mask=mask)
assert obj_images is None
def test_plantcv_analyze_thermal_values():
# Clear previous outputs
pcv.outputs.clear()
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_analyze_thermal_values")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
# img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR), 0)
mask = cv2.imread(os.path.join(TEST_DATA, TEST_THERMAL_IMG_MASK), -1)
contours_npz = np.load(os.path.join(TEST_DATA, TEST_THERMAL_ARRAY), encoding="latin1")
img = contours_npz['arr_0']
pcv.params.debug = None
thermal_hist = pcv.analyze_thermal_values(thermal_array=img, mask=mask, histplot=True)
assert thermal_hist is not None and pcv.outputs.observations['default']['median_temp']['value'] == 33.20922
def test_plantcv_apply_mask_white():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_apply_mask_white")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.apply_mask(img=img, mask=mask, mask_color="white")
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.apply_mask(img=img, mask=mask, mask_color="white")
# Test with debug = None
pcv.params.debug = None
masked_img = pcv.apply_mask(img=img, mask=mask, mask_color="white")
assert all([i == j] for i, j in zip(np.shape(masked_img), TEST_COLOR_DIM))
def test_plantcv_apply_mask_black():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_apply_mask_black")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.apply_mask(img=img, mask=mask, mask_color="black")
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.apply_mask(img=img, mask=mask, mask_color="black")
# Test with debug = None
pcv.params.debug = None
masked_img = pcv.apply_mask(img=img, mask=mask, mask_color="black")
assert all([i == j] for i, j in zip(np.shape(masked_img), TEST_COLOR_DIM))
def test_plantcv_apply_mask_hyperspectral():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_apply_mask_hyperspectral")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
hyper_array = pcv.hyperspectral.read_data(filename=spectral_filename)
img = np.ones((2056, 2454))
img_stacked = cv2.merge((img, img, img, img))
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.apply_mask(img=img_stacked, mask=img, mask_color="black")
# Test with debug = "plot"
pcv.params.debug = "plot"
masked_array = pcv.apply_mask(img=hyper_array.array_data, mask=img, mask_color="black")
assert np.mean(masked_array) == 13.97111260224949
def test_plantcv_apply_mask_bad_input():
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
with pytest.raises(RuntimeError):
pcv.params.debug = "plot"
_ = pcv.apply_mask(img=img, mask=mask, mask_color="wite")
def test_plantcv_auto_crop():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_auto_crop")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img1 = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MULTI), -1)
contours = np.load(os.path.join(TEST_DATA, TEST_INPUT_MULTI_OBJECT), encoding="latin1")
roi_contours = [contours[arr_n] for arr_n in contours]
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.auto_crop(img=img1, obj=roi_contours[1], padding_x=(20, 10), padding_y=(20, 10), color='black')
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.auto_crop(img=img1, obj=roi_contours[1], color='image')
_ = pcv.auto_crop(img=img1, obj=roi_contours[1], padding_x=2000, padding_y=2000, color='image')
# Test with debug = None
pcv.params.debug = None
cropped = pcv.auto_crop(img=img1, obj=roi_contours[1], padding_x=20, padding_y=20, color='black')
x, y, z = np.shape(img1)
x1, y1, z1 = np.shape(cropped)
assert x > x1
def test_plantcv_auto_crop_grayscale_input():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_auto_crop_grayscale_input")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
rgb_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MULTI), -1)
gray_img = cv2.cvtColor(rgb_img, cv2.COLOR_BGR2GRAY)
contours = np.load(os.path.join(TEST_DATA, TEST_INPUT_MULTI_OBJECT), encoding="latin1")
roi_contours = [contours[arr_n] for arr_n in contours]
# Test with debug = "plot"
pcv.params.debug = "plot"
cropped = pcv.auto_crop(img=gray_img, obj=roi_contours[1], padding_x=20, padding_y=20, color='white')
x, y = np.shape(gray_img)
x1, y1 = np.shape(cropped)
assert x > x1
def test_plantcv_auto_crop_bad_color_input():
# Read in test data
rgb_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MULTI), -1)
gray_img = cv2.cvtColor(rgb_img, cv2.COLOR_BGR2GRAY)
contours = np.load(os.path.join(TEST_DATA, TEST_INPUT_MULTI_OBJECT), encoding="latin1")
roi_contours = [contours[arr_n] for arr_n in contours]
with pytest.raises(RuntimeError):
_ = pcv.auto_crop(img=gray_img, obj=roi_contours[1], padding_x=20, padding_y=20, color='wite')
def test_plantcv_auto_crop_bad_padding_input():
# Read in test data
rgb_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MULTI), -1)
gray_img = cv2.cvtColor(rgb_img, cv2.COLOR_BGR2GRAY)
contours = np.load(os.path.join(TEST_DATA, TEST_INPUT_MULTI_OBJECT), encoding="latin1")
roi_contours = [contours[arr_n] for arr_n in contours]
with pytest.raises(RuntimeError):
_ = pcv.auto_crop(img=gray_img, obj=roi_contours[1], padding_x="one", padding_y=20, color='white')
def test_plantcv_canny_edge_detect():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_canny_edge_detect")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
rgb_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.canny_edge_detect(img=rgb_img, mask=mask, mask_color='white')
_ = pcv.canny_edge_detect(img=img, mask=mask, mask_color='black')
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.canny_edge_detect(img=img, thickness=2)
_ = pcv.canny_edge_detect(img=img)
# Test with debug = None
pcv.params.debug = None
edge_img = pcv.canny_edge_detect(img=img)
# Assert that the output image has the dimensions of the input image
if all([i == j] for i, j in zip(np.shape(edge_img), TEST_BINARY_DIM)):
# Assert that the image is binary
if all([i == j] for i, j in zip(np.unique(edge_img), [0, 255])):
assert 1
else:
assert 0
else:
assert 0
def test_plantcv_canny_edge_detect_bad_input():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_canny_edge_detect")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
with pytest.raises(RuntimeError):
_ = pcv.canny_edge_detect(img=img, mask=mask, mask_color="gray")
def test_plantcv_closing():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_closing")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
rgb_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MULTI), -1)
gray_img = cv2.cvtColor(rgb_img, cv2.COLOR_BGR2GRAY)
bin_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
# Test with debug=None
pcv.params.debug = None
_ = pcv.closing(gray_img)
# Test with debug='plot'
pcv.params.debug = 'plot'
_ = pcv.closing(bin_img, np.ones((4, 4), np.uint8))
# Test with debug='print'
pcv.params.debug = 'print'
filtered_img = pcv.closing(bin_img)
assert np.sum(filtered_img) == 16261860
def test_plantcv_closing_bad_input():
# Read in test data
rgb_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MULTI), -1)
with pytest.raises(RuntimeError):
_ = pcv.closing(rgb_img)
def test_plantcv_cluster_contours():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_cluster_contours")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img1 = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MULTI), -1)
roi_objects = np.load(os.path.join(TEST_DATA, TEST_INPUT_MULTI_OBJECT), encoding="latin1")
hierarchy = np.load(os.path.join(TEST_DATA, TEST_INPUT_MULTI_HIERARCHY), encoding="latin1")
objs = [roi_objects[arr_n] for arr_n in roi_objects]
obj_hierarchy = hierarchy['arr_0']
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.cluster_contours(img=img1, roi_objects=objs, roi_obj_hierarchy=obj_hierarchy, nrow=4, ncol=6)
_ = pcv.cluster_contours(img=img1, roi_objects=objs, roi_obj_hierarchy=obj_hierarchy, show_grid=True)
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.cluster_contours(img=img1, roi_objects=objs, roi_obj_hierarchy=obj_hierarchy, nrow=4, ncol=6)
# Test with debug = None
pcv.params.debug = None
clusters_i, contours, hierarchy = pcv.cluster_contours(img=img1, roi_objects=objs, roi_obj_hierarchy=obj_hierarchy,
nrow=4, ncol=6)
lenori = len(objs)
lenclust = len(clusters_i)
assert lenori > lenclust
def test_plantcv_cluster_contours_grayscale_input():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_cluster_contours_grayscale_input")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img1 = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MULTI), 0)
roi_objects = np.load(os.path.join(TEST_DATA, TEST_INPUT_MULTI_OBJECT), encoding="latin1")
hierachy = np.load(os.path.join(TEST_DATA, TEST_INPUT_MULTI_HIERARCHY), encoding="latin1")
objs = [roi_objects[arr_n] for arr_n in roi_objects]
obj_hierarchy = hierachy['arr_0']
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.cluster_contours(img=img1, roi_objects=objs, roi_obj_hierarchy=obj_hierarchy, nrow=4, ncol=6)
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.cluster_contours(img=img1, roi_objects=objs, roi_obj_hierarchy=obj_hierarchy, nrow=4, ncol=6)
# Test with debug = None
pcv.params.debug = None
clusters_i, contours, hierachy = pcv.cluster_contours(img=img1, roi_objects=objs, roi_obj_hierarchy=obj_hierarchy,
nrow=4, ncol=6)
lenori = len(objs)
lenclust = len(clusters_i)
assert lenori > lenclust
def test_plantcv_cluster_contours_splitimg():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_cluster_contours_splitimg")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img1 = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MULTI), -1)
contours = np.load(os.path.join(TEST_DATA, TEST_INPUT_MULTI_CONTOUR), encoding="latin1")
clusters = np.load(os.path.join(TEST_DATA, TEST_INPUT_ClUSTER_CONTOUR), encoding="latin1")
hierachy = np.load(os.path.join(TEST_DATA, TEST_INPUT_MULTI_HIERARCHY), encoding="latin1")
cluster_names = os.path.join(TEST_DATA, TEST_INPUT_GENOTXT)
cluster_names_too_many = os.path.join(TEST_DATA, TEST_INPUT_GENOTXT_TOO_MANY)
roi_contours = [contours[arr_n] for arr_n in contours]
cluster_contours = [clusters[arr_n] for arr_n in clusters]
obj_hierarchy = hierachy['arr_0']
# Test with debug = None
pcv.params.debug = None
_, _, _ = pcv.cluster_contour_splitimg(img=img1, grouped_contour_indexes=cluster_contours,
contours=roi_contours,
hierarchy=obj_hierarchy, outdir=cache_dir, file=None, filenames=None)
_, _, _ = pcv.cluster_contour_splitimg(img=img1, grouped_contour_indexes=[[0]], contours=[],
hierarchy=np.array([[[1, -1, -1, -1]]]))
_, _, _ = pcv.cluster_contour_splitimg(img=img1, grouped_contour_indexes=cluster_contours,
contours=roi_contours,
hierarchy=obj_hierarchy, outdir=cache_dir, file='multi', filenames=None)
_, _, _ = pcv.cluster_contour_splitimg(img=img1, grouped_contour_indexes=cluster_contours,
contours=roi_contours,
hierarchy=obj_hierarchy, outdir=None, file=None, filenames=cluster_names)
_, _, _ = pcv.cluster_contour_splitimg(img=img1, grouped_contour_indexes=cluster_contours,
contours=roi_contours,
hierarchy=obj_hierarchy, outdir=None, file=None,
filenames=cluster_names_too_many)
output_path, imgs, masks = pcv.cluster_contour_splitimg(img=img1, grouped_contour_indexes=cluster_contours,
contours=roi_contours, hierarchy=obj_hierarchy, outdir=None,
file=None,
filenames=None)
assert len(output_path) != 0
def test_plantcv_cluster_contours_splitimg_grayscale():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_cluster_contours_splitimg_grayscale")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img1 = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MULTI), 0)
contours = np.load(os.path.join(TEST_DATA, TEST_INPUT_MULTI_CONTOUR), encoding="latin1")
clusters = np.load(os.path.join(TEST_DATA, TEST_INPUT_ClUSTER_CONTOUR), encoding="latin1")
hierachy = np.load(os.path.join(TEST_DATA, TEST_INPUT_MULTI_HIERARCHY), encoding="latin1")
cluster_names = os.path.join(TEST_DATA, TEST_INPUT_GENOTXT)
cluster_names_too_many = os.path.join(TEST_DATA, TEST_INPUT_GENOTXT_TOO_MANY)
roi_contours = [contours[arr_n] for arr_n in contours]
cluster_contours = [clusters[arr_n] for arr_n in clusters]
obj_hierarchy = hierachy['arr_0']
pcv.params.debug = None
output_path, imgs, masks = pcv.cluster_contour_splitimg(img=img1, grouped_contour_indexes=cluster_contours,
contours=roi_contours, hierarchy=obj_hierarchy, outdir=None,
file=None,
filenames=None)
assert len(output_path) != 0
def test_plantcv_color_palette():
# Return a color palette
colors = pcv.color_palette(num=10, saved=False)
assert np.shape(colors) == (10, 3)
def test_plantcv_color_palette_random():
# Return a color palette in random order
pcv.params.color_sequence = "random"
colors = pcv.color_palette(num=10, saved=False)
assert np.shape(colors) == (10, 3)
def test_plantcv_color_palette_saved():
# Return a color palette that was saved
pcv.params.saved_color_scale = [[0, 0, 0], [255, 255, 255]]
colors = pcv.color_palette(num=2, saved=True)
assert colors == [[0, 0, 0], [255, 255, 255]]
def test_plantcv_crop():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_crop")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
img, _, _ = pcv.readimage(os.path.join(TEST_DATA, TEST_INPUT_NIR_MASK), 'gray')
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.crop(img=img, x=10, y=10, h=50, w=50)
# Test with debug = "plot"
pcv.params.debug = "plot"
cropped = pcv.crop(img=img, x=10, y=10, h=50, w=50)
assert np.shape(cropped) == (50, 50)
def test_plantcv_crop_hyperspectral():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_crop_hyperspectral")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = np.ones((2056, 2454))
img_stacked = cv2.merge((img, img, img, img))
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.crop(img=img_stacked, x=10, y=10, h=50, w=50)
# Test with debug = "plot"
pcv.params.debug = "plot"
cropped = pcv.crop(img=img_stacked, x=10, y=10, h=50, w=50)
assert np.shape(cropped) == (50, 50, 4)
def test_plantcv_crop_position_mask():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_crop_position_mask")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
nir, path1, filename1 = pcv.readimage(os.path.join(TEST_DATA, TEST_INPUT_NIR_MASK), 'gray')
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MASK), -1)
mask_three_channel = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MASK), -1)
mask_resize = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MASK_RESIZE), -1)
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.crop_position_mask(nir, mask, x=40, y=3, v_pos="top", h_pos="right")
_ = pcv.crop_position_mask(nir, mask_resize, x=40, y=3, v_pos="top", h_pos="right")
_ = pcv.crop_position_mask(nir, mask_three_channel, x=40, y=3, v_pos="top", h_pos="right")
# Test with debug = "print" with bottom
_ = pcv.crop_position_mask(nir, mask, x=40, y=3, v_pos="bottom", h_pos="left")
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.crop_position_mask(nir, mask, x=40, y=3, v_pos="top", h_pos="right")
# Test with debug = "plot" with bottom
_ = pcv.crop_position_mask(nir, mask, x=45, y=2, v_pos="bottom", h_pos="left")
# Test with debug = None
pcv.params.debug = None
newmask = pcv.crop_position_mask(nir, mask, x=40, y=3, v_pos="top", h_pos="right")
assert np.sum(newmask) == 707115
def test_plantcv_crop_position_mask_color():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_crop_position_mask")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
nir, path1, filename1 = pcv.readimage(os.path.join(TEST_DATA, TEST_INPUT_COLOR), mode='native')
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MASK), -1)
mask_resize = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MASK_RESIZE))
mask_non_binary = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MASK))
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.crop_position_mask(nir, mask, x=40, y=3, v_pos="top", h_pos="right")
# Test with debug = "print" with bottom
_ = pcv.crop_position_mask(nir, mask, x=40, y=3, v_pos="bottom", h_pos="left")
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.crop_position_mask(nir, mask, x=40, y=3, v_pos="top", h_pos="right")
# Test with debug = "plot" with bottom
_ = pcv.crop_position_mask(nir, mask, x=45, y=2, v_pos="bottom", h_pos="left")
_ = pcv.crop_position_mask(nir, mask_non_binary, x=45, y=2, v_pos="bottom", h_pos="left")
_ = pcv.crop_position_mask(nir, mask_non_binary, x=45, y=2, v_pos="top", h_pos="left")
_ = pcv.crop_position_mask(nir, mask_non_binary, x=45, y=2, v_pos="bottom", h_pos="right")
_ = pcv.crop_position_mask(nir, mask_resize, x=45, y=2, v_pos="top", h_pos="left")
# Test with debug = None
pcv.params.debug = None
newmask = pcv.crop_position_mask(nir, mask, x=40, y=3, v_pos="top", h_pos="right")
assert np.sum(newmask) == 707115
def test_plantcv_crop_position_mask_bad_input_x():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_crop_position_mask")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MASK), -1)
# Read in test data
nir, path1, filename1 = pcv.readimage(os.path.join(TEST_DATA, TEST_INPUT_NIR_MASK))
pcv.params.debug = None
with pytest.raises(RuntimeError):
_ = pcv.crop_position_mask(nir, mask, x=-1, y=-1, v_pos="top", h_pos="right")
def test_plantcv_crop_position_mask_bad_input_vpos():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_crop_position_mask")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MASK), -1)
# Read in test data
nir, path1, filename1 = pcv.readimage(os.path.join(TEST_DATA, TEST_INPUT_NIR_MASK))
pcv.params.debug = None
with pytest.raises(RuntimeError):
_ = pcv.crop_position_mask(nir, mask, x=40, y=3, v_pos="below", h_pos="right")
def test_plantcv_crop_position_mask_bad_input_hpos():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_crop_position_mask")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MASK), -1)
# Read in test data
nir, path1, filename1 = pcv.readimage(os.path.join(TEST_DATA, TEST_INPUT_NIR_MASK))
pcv.params.debug = None
with pytest.raises(RuntimeError):
_ = pcv.crop_position_mask(nir, mask, x=40, y=3, v_pos="top", h_pos="starboard")
def test_plantcv_dilate():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_dilate")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.dilate(gray_img=img, ksize=5, i=1)
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.dilate(gray_img=img, ksize=5, i=1)
# Test with debug = None
pcv.params.debug = None
dilate_img = pcv.dilate(gray_img=img, ksize=5, i=1)
# Assert that the output image has the dimensions of the input image
if all([i == j] for i, j in zip(np.shape(dilate_img), TEST_BINARY_DIM)):
# Assert that the image is binary
if all([i == j] for i, j in zip(np.unique(dilate_img), [0, 255])):
assert 1
else:
assert 0
else:
assert 0
def test_plantcv_dilate_small_k():
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
# Test with debug = None
pcv.params.debug = None
with pytest.raises(ValueError):
_ = pcv.dilate(img, 1, 1)
def test_plantcv_erode():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_erode")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.erode(gray_img=img, ksize=5, i=1)
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.erode(gray_img=img, ksize=5, i=1)
# Test with debug = None
pcv.params.debug = None
erode_img = pcv.erode(gray_img=img, ksize=5, i=1)
# Assert that the output image has the dimensions of the input image
if all([i == j] for i, j in zip(np.shape(erode_img), TEST_BINARY_DIM)):
# Assert that the image is binary
if all([i == j] for i, j in zip(np.unique(erode_img), [0, 255])):
assert 1
else:
assert 0
else:
assert 0
def test_plantcv_erode_small_k():
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
# Test with debug = None
pcv.params.debug = None
with pytest.raises(ValueError):
_ = pcv.erode(img, 1, 1)
def test_plantcv_distance_transform():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_distance_transform")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_CROPPED_MASK), -1)
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.distance_transform(bin_img=mask, distance_type=1, mask_size=3)
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.distance_transform(bin_img=mask, distance_type=1, mask_size=3)
# Test with debug = None
pcv.params.debug = None
distance_transform_img = pcv.distance_transform(bin_img=mask, distance_type=1, mask_size=3)
# Assert that the output image has the dimensions of the input image
assert all([i == j] for i, j in zip(np.shape(distance_transform_img), np.shape(mask)))
def test_plantcv_fatal_error():
# Verify that the fatal_error function raises a RuntimeError
with pytest.raises(RuntimeError):
pcv.fatal_error("Test error")
def test_plantcv_fill():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_fill")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.fill(bin_img=img, size=63632)
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.fill(bin_img=img, size=63632)
# Test with debug = None
pcv.params.debug = None
fill_img = pcv.fill(bin_img=img, size=63632)
# Assert that the output image has the dimensions of the input image
# assert all([i == j] for i, j in zip(np.shape(fill_img), TEST_BINARY_DIM))
assert np.sum(fill_img) == 0
def test_plantcv_fill_bad_input():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_fill_bad_input")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY), -1)
with pytest.raises(RuntimeError):
_ = pcv.fill(bin_img=img, size=1)
def test_plantcv_fill_holes():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_fill_holes")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.fill_holes(bin_img=img)
pcv.params.debug = "plot"
_ = pcv.fill_holes(bin_img=img)
# Test with debug = None
pcv.params.debug = None
fill_img = pcv.fill_holes(bin_img=img)
assert np.sum(fill_img) > np.sum(img)
def test_plantcv_fill_holes_bad_input():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_fill_holes_bad_input")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY), -1)
with pytest.raises(RuntimeError):
_ = pcv.fill_holes(bin_img=img)
def test_plantcv_find_objects():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_find_objects")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.find_objects(img=img, mask=mask)
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.find_objects(img=img, mask=mask)
# Test with debug = None
pcv.params.debug = None
contours, hierarchy = pcv.find_objects(img=img, mask=mask)
# Assert the correct number of contours are found
assert len(contours) == 2
def test_plantcv_find_objects_grayscale_input():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_find_objects_grayscale_input")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR), 0)
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
# Test with debug = "plot"
pcv.params.debug = "plot"
contours, hierarchy = pcv.find_objects(img=img, mask=mask)
# Assert the correct number of contours are found
assert len(contours) == 2
def test_plantcv_flip():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_flip")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
img_binary = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.flip(img=img, direction="horizontal")
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.flip(img=img, direction="vertical")
_ = pcv.flip(img=img_binary, direction="vertical")
# Test with debug = None
pcv.params.debug = None
flipped_img = pcv.flip(img=img, direction="horizontal")
assert all([i == j] for i, j in zip(np.shape(flipped_img), TEST_COLOR_DIM))
def test_plantcv_flip_bad_input():
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
pcv.params.debug = None
with pytest.raises(RuntimeError):
_ = pcv.flip(img=img, direction="vert")
def test_plantcv_gaussian_blur():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_gaussian_blur")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
img_color = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR), -1)
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.gaussian_blur(img=img, ksize=(51, 51), sigma_x=0, sigma_y=None)
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.gaussian_blur(img=img, ksize=(51, 51), sigma_x=0, sigma_y=None)
_ = pcv.gaussian_blur(img=img_color, ksize=(51, 51), sigma_x=0, sigma_y=None)
# Test with debug = None
pcv.params.debug = None
gaussian_img = pcv.gaussian_blur(img=img, ksize=(51, 51), sigma_x=0, sigma_y=None)
imgavg = np.average(img)
gavg = np.average(gaussian_img)
assert gavg != imgavg
def test_plantcv_get_kernel_cross():
kernel = pcv.get_kernel(size=(3, 3), shape="cross")
assert (kernel == np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])).all()
def test_plantcv_get_kernel_rectangle():
kernel = pcv.get_kernel(size=(3, 3), shape="rectangle")
assert (kernel == np.array([[1, 1, 1], [1, 1, 1], [1, 1, 1]])).all()
def test_plantcv_get_kernel_ellipse():
kernel = pcv.get_kernel(size=(3, 3), shape="ellipse")
assert (kernel == np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])).all()
def test_plantcv_get_kernel_bad_input_size():
with pytest.raises(ValueError):
_ = pcv.get_kernel(size=(1, 1), shape="ellipse")
def test_plantcv_get_kernel_bad_input_shape():
with pytest.raises(RuntimeError):
_ = pcv.get_kernel(size=(3, 1), shape="square")
def test_plantcv_get_nir_sv():
nirpath = pcv.get_nir(TEST_DATA, TEST_VIS)
nirpath1 = os.path.join(TEST_DATA, TEST_NIR)
assert nirpath == nirpath1
def test_plantcv_get_nir_tv():
nirpath = pcv.get_nir(TEST_DATA, TEST_VIS_TV)
nirpath1 = os.path.join(TEST_DATA, TEST_NIR_TV)
assert nirpath == nirpath1
def test_plantcv_hist_equalization():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hist_equalization")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY), -1)
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.hist_equalization(gray_img=img)
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.hist_equalization(gray_img=img)
# Test with debug = None
pcv.params.debug = None
hist = pcv.hist_equalization(gray_img=img)
histavg = np.average(hist)
imgavg = np.average(img)
assert histavg != imgavg
def test_plantcv_hist_equalization_bad_input():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hist_equalization_bad_input")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY), 1)
# Test with debug = None
pcv.params.debug = None
with pytest.raises(RuntimeError):
_ = pcv.hist_equalization(gray_img=img)
def test_plantcv_image_add():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_image_add")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img1 = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
img2 = np.copy(img1)
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.image_add(gray_img1=img1, gray_img2=img2)
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.image_add(gray_img1=img1, gray_img2=img2)
# Test with debug = None
pcv.params.debug = None
added_img = pcv.image_add(gray_img1=img1, gray_img2=img2)
assert all([i == j] for i, j in zip(np.shape(added_img), TEST_BINARY_DIM))
def test_plantcv_image_subtract():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_image_sub")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# read in images
img1 = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
img2 = np.copy(img1)
# Test with debug = "print"
pcv.params.debug = 'print'
_ = pcv.image_subtract(img1, img2)
# Test with debug = "plot"
pcv.params.debug = 'plot'
_ = pcv.image_subtract(img1, img2)
# Test with debug = None
pcv.params.debug = None
new_img = pcv.image_subtract(img1, img2)
assert np.array_equal(new_img, np.zeros(np.shape(new_img), np.uint8))
def test_plantcv_image_subtract_fail():
# read in images
img1 = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
img2 = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY))
# test
with pytest.raises(RuntimeError):
_ = pcv.image_subtract(img1, img2)
def test_plantcv_invert():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_invert")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.invert(gray_img=img)
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.invert(gray_img=img)
# Test with debug = None
pcv.params.debug = None
inverted_img = pcv.invert(gray_img=img)
# Assert that the output image has the dimensions of the input image
if all([i == j] for i, j in zip( | np.shape(inverted_img) | numpy.shape |
# -*- coding: utf-8 -*-
"""
This module contains the Hyperheuristic class.
Created on Thu Jan 9 15:36:43 2020
@author: <NAME> (jcrvz.github.io), e-mail: <EMAIL>
"""
import numpy as np
import scipy.stats as st
from customhys.metaheuristic import Metaheuristic
from customhys import tools as jt
from datetime import datetime
import json
from os.path import exists as _check_path
from os import makedirs as _create_path
class Hyperheuristic:
"""
This is the Hyperheuristic class, each object corresponds to a hyper-heuristic process implemented with a heuristic
collection from Operators to build metaheuristics using the Metaheuristic module.
"""
def __init__(self, heuristic_space='default.txt', problem=None, parameters=None, file_label='', weights_array=None):
"""
Create a hyper-heuristic process using a operator collection as heuristic space.
:param str heuristic_space: Optional.
The heuristic space or search space collection. It could be a string indicating the file name, assuming it
is located in the folder ``./collections/``, or a list with tuples (check the default collection
``./collections/default.txt'``) just like ``operators.build_operators`` generates. The default is
'default.txt'.
:param dict problem:
This is a dictionary containing the 'function' that maps a 1-by-D array of real values to a real value,
'is_constrained' flag that indicates the solution is inside the search space, and the 'boundaries' (a tuple
with two lists of size D). These two lists correspond to the lower and upper limits of domain, such as:
``boundaries = (lower_boundaries, upper_boundaries)``
**Note:** Dimensions (D) of search domain are read from these boundaries. The problem can be obtained from
the ``benchmark_func`` module.
:param dict parameters:
Parameters to implement the hyper-heuristic procedure, the following fields must be provided: 'cardinality',
'num_iterations', 'num_agents', 'num_replicas', 'num_steps', 'stagnation_percentage', 'max_temperature', and
'cooling_rate'. The default is showing next:
parameters = {cardinality=3, # Max. numb. of SOs in MHs, lvl:1
num_iterations=100, # Iterations a MH performs, lvl:1
num_agents=30, # Agents in population, lvl:1
num_replicas=50, # Replicas per each MH, lvl:2
num_steps=100, # Trials per HH step, lvl:2
stagnation_percentage=0.3, # Stagnation percentage, lvl:2
max_temperature=200, # Initial temperature (SA), lvl:2
cooling_rate=0.05} # Cooling rate (SA), lvl:2
**Note:** Level (lvl) flag corresponds to the heuristic level of the parameter. lvl:1 concerns to mid-level
heuristics like metaheuristics, and lvl:2 to high-level heuristics like hyper-heuristics.
:param str file_label: Optional.
Tag or label for saving files. The default is ''.
:param numpy.array weights_array: Optional.
Weights of the search operators, if there is a-priori information about them. The default is None.
"""
# Read the heuristic space
if isinstance(heuristic_space, list):
self.heuristic_space = heuristic_space
elif isinstance(heuristic_space, str):
with open('collections/' + heuristic_space, 'r') as operators_file:
self.heuristic_space = [eval(line.rstrip('\n')) for line in operators_file]
else:
raise HyperheuristicError('Invalid heuristic_space')
# Assign default values
if parameters is None:
parameters = dict(cardinality=3, # Max. numb. of SOs in MHs, lvl:1
num_iterations=100, # Iterations a MH performs, lvl:1
num_agents=30, # Agents in population, lvl:1
num_replicas=50, # Replicas per each MH, lvl:2
num_steps=100, # Trials per HH step, lvl:2
stagnation_percentage=0.3, # Stagnation percentage, lvl:2
max_temperature=200, # Initial temperature (SA), lvl:2
cooling_rate=0.05) # Cooling rate (SA), lvl:2
# Read the problem
if problem:
self.problem = problem
else:
raise HyperheuristicError('Problem must be provided')
# Read the heuristic space size
self.num_operators = len(self.heuristic_space)
# Read the weights (if it is entered)
self.weights_array = weights_array
# Initialise other parameters
self.parameters = parameters
self.file_label = file_label
def run(self):
"""
Run the hyper-heuristic based on Simulated Annealing (SA) to find the best metaheuristic. Each meatheuristic is
run 'num_replicas' times to obtain statistics and then its performance. Once the process ends, it returns:
- solution: The sequence of search operators that compose the metaheuristic.
- performance: The metric value defined in ``get_performance``.
- encoded_solution: The sequence of indices that correspond to the search operators.
- historicals: A dictionary of information from each step. Its keys are: 'step', 'encoded_solution',
'solution', 'performances', and 'details'. The latter, 'details', is also a dictionary which contains
information about each replica carried out with the metaheuristic. Its fields are 'historical' (each
iteration that the metaheuristic has performed), 'fitness', 'positions', and 'statistics'.
:returns: solution (list), performance (float), encoded_solution (list)
"""
# Read the cardinality (which is the maximum allowed one)
max_cardinality = self.parameters['cardinality']
def obtain_neighbour_solution(sol=None):
"""
This method selects a neighbour candidate solution for a given candidate solution ``sol``. To do so, it
adds, deletes, or perturbate a randomly chosen operator index from the current sequence. If this sequence
is None, the method returns a new 1-cardinality sequence at random.
:param list sol: Optional.
Sequence of heuristic indices (or encoded solution). The default is None, which means that there is no
current sequence, so an initial one is required.
:return: list.
"""
if sol is None:
# Create a new 1-MH from scratch by using a weights array (if so)
encoded_neighbour = np.random.choice(self.num_operators, 1, replace=False, p=self.weights_array)
elif isinstance(sol, np.ndarray):
current_cardinality = len(sol)
# First read the available actions. Those could be 'Add', 'Del', an 'Per'
if current_cardinality >= max_cardinality:
available_options = ['Del', 'Per']
elif current_cardinality <= 1:
available_options = ['Add', 'Per']
else:
available_options = ['Add', 'Del', 'Per']
# Decide (randomly) which action to do
action = np.random.choice(available_options)
# Perform the corresponding action
if action == 'Add':
# Select an operator excluding the ones in the current solution
new_operator = np.random.choice(np.setdiff1d(np.arange(self.num_operators), sol))
# Select where to add such an operator, since ``operator_location`` value represents:
# 0 - left side of the first operator
# 1 - right side of the first operator or left side of the second one,
# ..., and so forth.
#
# | operator 1 | operator 2 | operator 3 | ... | operator N |
# 0 <--------> 1 <--------> 2 <--------> 3 <-- ... --> N-1 <---------> N
operator_location = np.random.randint(current_cardinality + 1)
# Add the selected operator
encoded_neighbour = np.array((*sol[:operator_location], new_operator, *sol[operator_location:]))
elif action == 'Del':
# Delete an operator randomly selected
encoded_neighbour = np.delete(sol, np.random.randint(current_cardinality))
else:
# Copy the current solution
encoded_neighbour = np.copy(sol)
# Perturbate an operator randomly selected excluding the existing ones
encoded_neighbour[np.random.randint(current_cardinality)] = np.random.choice(
np.setdiff1d(np.arange(self.num_operators), sol))
else:
raise HyperheuristicError('Invalid type of current solution!')
# Decode the neighbour solution
neighbour = [self.heuristic_space[index] for index in encoded_neighbour]
# Return the neighbour sequence and its decoded equivalent
return encoded_neighbour, neighbour
def obtain_temperature(step_val, function='boltzmann'):
"""
Return the updated temperature according to a defined scheme ``function``.
:param int step_val:
Step (or iteration) value of the current state of the hyper-heuristic search.
:param str function: Optional.
Mechanism for updating the temperature. It can be 'exponential', 'fast', or 'boltzmann'. The default
is 'boltzmann'.
:return: float
"""
if function == 'exponential':
return self.parameters['max_temperature'] * np.power(1 - self.parameters['cooling_rate'], step_val)
elif function == 'fast':
return self.parameters['max_temperature'] / step_val
else: # boltzmann
return self.parameters['max_temperature'] / np.log(step_val + 1)
# Acceptance function
def check_acceptance(delta, temp, function='exponential'):
"""
Return a flag indicating if the current performance value can be accepted according to the ``function``.
:param float delta:
Energy change for determining the acceptance probability.
:param float temp:
Temperature value for determining the acceptance probability.
:param str function: Optional.
Function for determining the acceptance probability. It can be 'exponential' or 'boltzmann'. The default
is 'boltzmann'.
:return: bool
"""
if function == 'exponential':
return (delta_energy <= 0) or (np.random.rand() < np.exp(-delta / temp))
else: # boltzmann
return (delta_energy <= 0) or (np.random.rand() < 1. / (1. + np.exp(delta / temp)))
# Create the initial solution
current_encoded_solution, current_solution = obtain_neighbour_solution()
# Evaluate this solution
current_performance, current_details = self.evaluate_metaheuristic(current_solution)
# Initialise the best solution and its performance
best_encoded_solution = np.copy(current_encoded_solution)
best_performance = current_performance
# Initialise historical register
# historicals = dict(encoded_solution=best_encoded_solution, performance=best_performance,
# details=current_details)
# Save this historical register, step = 0
_save_step(0, dict(encoded_solution=best_encoded_solution, performance=best_performance,
details=current_details), self.file_label)
# Print the first status update, step = 0
print('{} :: Step: {}, Perf: {}, e-Sol: {}'.format(self.file_label, 0, best_performance, best_encoded_solution))
# Step, stagnation counter and its maximum value
step = 0
stag_counter = 0
max_stag = round(self.parameters['stagnation_percentage'] * self.parameters['num_steps'])
# Perform the annealing simulation as hyper-heuristic process
while (step <= self.parameters['num_steps']) and (stag_counter <= max_stag):
step += 1
# Generate a neighbour solution (just indices-codes)
candidate_encoded_solution, candidate_solution = obtain_neighbour_solution(current_encoded_solution)
# Evaluate this candidate solution
candidate_performance, candidate_details = self.evaluate_metaheuristic(candidate_solution)
# Determine the energy (performance) change
delta_energy = candidate_performance - current_performance
# Update temperature
temperature = obtain_temperature(step)
# Accept the current solution via Metropolis criterion
if check_acceptance(delta_energy, temperature):
# Update the current solution and its performance
current_encoded_solution = np.copy(candidate_encoded_solution)
current_solution = np.copy(candidate_solution)
current_performance = candidate_performance
# if delta_energy > 0:
# print('{} :: Step: {}, Perf: {}, e-Sol: {} [Accepted]'.format(
# self.file_label, step, current_performance, current_encoded_solution))
# If the candidate solution is better or equal than the current best solution
if candidate_performance < best_performance:
# Update the best solution and its performance
best_encoded_solution = np.copy(candidate_encoded_solution)
best_solution = np.copy(candidate_solution)
best_performance = candidate_performance
# Reset the stagnation counter
stag_counter = 0
# Save this information
_save_step(step, {
'encoded_solution': best_encoded_solution,
'performance': best_performance,
'details': candidate_details
}, self.file_label)
# Print update
print('{} :: Step: {}, Perf: {}, e-Sol: {}'.format(
self.file_label, step, best_performance, best_encoded_solution))
else:
# Update the stagnation counter
stag_counter += 1
# Return the best solution found and its details
return best_solution, best_performance, best_encoded_solution
def evaluate_metaheuristic(self, search_operators):
"""
Evaluate the current sequence of ``search_operators`` as a metaheuristic. This process is repeated
``parameters['num_replicas']`` times and, then, the performance is determined. In the end, the method returns
the performance value and the details for all the runs. These details are ``historical_data``, ``fitness_data``,
``position_data``, and ``fitness_stats``.
:param list search_operators:
Sequence of search operators. These must be in the tuple form (decoded version). Check the ``metaheuristic``
module for further information.
:return: float, dict
"""
# Initialise the historical registers
historical_data = list()
fitness_data = list()
position_data = list()
# Run the metaheuristic several times
for rep in range(1, self.parameters['num_replicas'] + 1):
# Call the metaheuristic
mh = Metaheuristic(self.problem, search_operators, self.parameters['num_agents'],
self.parameters['num_iterations'])
# Run this metaheuristic
mh.run()
# Store the historical values from this run
historical_data.append(mh.historical)
# Read and store the solution obtained
_temporal_position, _temporal_fitness = mh.get_solution()
fitness_data.append(_temporal_fitness)
position_data.append(_temporal_position)
# print('-- MH: {}, fitness={}'.format(rep, _temporal_fitness))
# Determine a performance metric once finish the repetitions
fitness_stats = self.get_statistics(fitness_data)
# Return the performance value and the corresponding details
return self.get_performance(fitness_stats), dict(
historical=historical_data, fitness=fitness_data, positions=position_data, statistics=fitness_stats)
def brute_force(self):
"""
This method performs a brute force procedure solving the problem via all the available search operators without
integrating a high-level search method. So, each search operator is used as a 1-cardinality metaheuristic.
Results are directly saved as json files
:return: None.
"""
# Apply all the search operators in the collection as 1-cardinality MHs
for operator_id in range(self.num_operators):
# Read the corresponding operator
operator = [self.heuristic_space[operator_id]]
# Evaluate it within the metaheuristic structure
operator_performance, operator_details = self.evaluate_metaheuristic(operator)
# Save information
_save_step(operator_id, {
'encoded_solution': operator_id,
'performance': operator_performance,
'statistics': operator_details['statistics']
}, self.file_label)
# Print update
print('{} :: Operator {} of {}, Perf: {}'.format(
self.file_label, operator_id + 1, self.num_operators, operator_performance))
def basic_metaheuristics(self):
"""
This method performs a brute force procedure solving the problem via all the predefined metaheuristics in
'./collections/basicmetaheuristics.txt'. Many of them are 1-cardinality MHs but other are 2-cardinality ones.
This process does not require a high-level search method. Results are directly saved as json files.
:return: None.
"""
# Apply all the search operators in the collection as 1-size MHs
for operator_id in range(self.num_operators):
operator = self.heuristic_space[operator_id]
# Read the corresponding operator
if isinstance(operator, tuple):
operator = [operator]
# Evaluate it within the metaheuristic structure
operator_performance, operator_details = self.evaluate_metaheuristic(operator)
# Save information
_save_step(operator_id, {
'encoded_solution': operator_id,
'performance': operator_performance,
'statistics': operator_details['statistics']
}, self.file_label)
# Print update
print('{} :: BasicMH {} of {}, Perf: {}'.format(
self.file_label, operator_id + 1, self.num_operators, operator_performance))
@staticmethod
def get_performance(statistics):
"""
Return the performance from fitness values obtained from running a metaheuristic several times. This method uses
the Median and Interquartile Range values for such a purpose:
performance = Med{fitness values} + IQR{fitness values}
**Note:** If an alternative formula is needed, check the commented options.
:param statistics:
:type statistics:
:return:
:rtype:
"""
# TODO: Verify if using conditional for choosing between options is not cost computing
# return statistics['Med'] # Option 1
# return statistics['Avg'] + statistics['Std'] # Option 2
return statistics['Med'] + statistics['IQR'] # Option 3
# return statistics['Avg'] + statistics['Std'] + statistics['Med'] + statistics['IQR'] # Option 4
@staticmethod
def get_statistics(raw_data):
"""
Return statistics from all the fitness values found after running a metaheuristic several times. The oncoming
statistics are ``nob`` (number of observations), ``Min`` (minimum), ``Max`` (maximum), ``Avg`` (average),
``Std`` (standard deviation), ``Skw`` (skewness), ``Kur`` (kurtosis), ``IQR`` (interquartile range),
``Med`` (median), and ``MAD`` (Median absolute deviation).
:param list raw_data:
List of the fitness values.
:return: dict
"""
# Get descriptive statistics
dst = st.describe(raw_data)
# Store statistics
return dict(nob=dst.nobs,
Min=dst.minmax[0],
Max=dst.minmax[1],
Avg=dst.mean,
Std=np.std(raw_data),
Skw=dst.skewness,
Kur=dst.kurtosis,
IQR=st.iqr(raw_data),
Med= | np.median(raw_data) | numpy.median |
"""Console script for zalando_classification."""
import sys
import click
import numpy as np
import tensorflow as tf
import tensorflow.keras.backend as K
import tensorflow_probability as tfp
import pandas as pd
from gpdre import GaussianProcessDensityRatioEstimator
from gpdre.benchmarks import SugiyamaKrauledatMuellerDensityRatioMarginals
from gpdre.datasets import make_classification_dataset
from gpdre.base import MLPDensityRatioEstimator, LogisticRegressionDensityRatioEstimator
from gpdre.external.rulsif import RuLSIFDensityRatioEstimator
from gpdre.external.kliep import KLIEPDensityRatioEstimator
from gpdre.external.kmm import KMMDensityRatioEstimator
from gpdre.initializers import KMeans
from gpflow.models import SVGP
from gpflow.kernels import Matern52
from sklearn.linear_model import LogisticRegression
from pathlib import Path
K.set_floatx("float64")
# shortcuts
tfd = tfp.distributions
# sensible defaults
SUMMARY_DIR = "logs/"
SEED = 8888
dataset_seed = 8888
num_features = 2
num_samples = 1000
num_train = 500
num_test = 500
num_inducing_points = 300
optimizer = "adam"
epochs = 2000
batch_size = 100
buffer_size = 1000
jitter = 1e-6
num_seeds = 10
# properties of the distribution
props = {
"mean": tfd.Distribution.mean,
"mode": tfd.Distribution.mode,
"median": lambda d: d.distribution.quantile(0.5),
# "sample": tfd.Distribution.sample, # single sample
}
def class_posterior(x1, x2):
return 0.5 * (1 + tf.tanh(x1 - tf.nn.relu(-x2)))
def metric(X_train, y_train, X_test, y_test, sample_weight=None,
random_state=None):
model = LogisticRegression(C=1.0, random_state=random_state)
model.fit(X_train, y_train, sample_weight=sample_weight)
return model.score(X_test, y_test)
@click.command()
@click.argument("name")
@click.option("--summary-dir", default=SUMMARY_DIR,
type=click.Path(file_okay=False, dir_okay=True),
help="Summary directory.")
@click.option("-s", "--seed", default=SEED, type=int, help="Random seed")
def main(name, summary_dir, seed):
summary_path = Path(summary_dir).joinpath("sugiyama")
summary_path.mkdir(parents=True, exist_ok=True)
r = SugiyamaKrauledatMuellerDensityRatioMarginals()
rows = []
for seed in range(num_seeds):
# (X_train, y_train), (X_test, y_test) = r.train_test_split(X, y, seed=seed)
(X_train, y_train), (X_test, y_test) = r.make_covariate_shift_dataset(
num_test, num_train, class_posterior_fn=class_posterior, threshold=0.5,
seed=seed)
X, s = make_classification_dataset(X_test, X_train)
# Uniform
acc = metric(X_train, y_train, X_test, y_test, random_state=seed)
rows.append(dict(weight="uniform", acc=acc,
seed=seed, dataset_seed=seed))
# Exact
acc = metric(X_train, y_train, X_test, y_test,
sample_weight=r.ratio(X_train).numpy(), random_state=seed)
rows.append(dict(weight="exact", acc=acc,
seed=seed, dataset_seed=seed))
# RuLSIF
r_rulsif = RuLSIFDensityRatioEstimator(alpha=1e-6)
r_rulsif.fit(X_test, X_train)
sample_weight = np.maximum(1e-6, r_rulsif.ratio(X_train))
acc = metric(X_train, y_train, X_test, y_test,
sample_weight=sample_weight, random_state=seed)
rows.append(dict(weight="rulsif", acc=acc,
seed=seed, dataset_seed=seed))
# KLIEP
# sigmas = [0.1, 0.25, 0.5, 0.75, 1.0]
sigmas = list(np.maximum(0.25 * | np.arange(5) | numpy.arange |
# Tutorial "Regresion Basica: Predecir eficiencia de gasolina"
# https://www.tensorflow.org/tutorials/keras/regression?hl=es-419
import os
import sys
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import numpy as np # noqa: E402
# from scipy import stats # noqa: E402
import matplotlib.pyplot as plt # noqa: E402
import pandas as pd # noqa: E402
# import tensorflow as tf # noqa: E402
from keras import layers, backend # noqa: E402
from keras.utils.vis_utils import plot_model # noqa: E402
from tensorflow import keras # noqa: E402
from keras_visualizer import visualizer # noqa: E402
output_dir = ""
def c_mae(p_true, p_pred):
"""
https://stackoverflow.com/questions/69240680/how-to-get-mean-absolute-errors-mae-for-deep-learning-model
:param p_true: original target values
:param p_pred: predicted values
:return: MAE
"""
return np.mean(np.abs(p_pred - p_true)) # -1 is correct, using None gives different result '''
def c_mse(p_true, p_pred):
"""
https://stackoverflow.com/questions/69240680/how-to-get-mean-absolute-errors-mae-for-deep-learning-model
:param p_true: original target values
:param p_pred: predicted values
:return: MSE
"""
return np.mean(np.square(p_pred - p_true))
def c_determination(p_true, p_pred):
"""
Original posted in: https://jmlb.github.io/ml/2017/03/20/CoeffDetermination_CustomMetric4Keras/
:param p_true: original target values
:param p_pred: predicted values
:return: R^2 coefficient
"""
ss_res = np.sum(np.square(p_true - p_pred))
ss_tot = np.sum(np.square(p_true - np.mean(p_pred)))
return 1 - ss_res / (ss_tot + backend.epsilon())
def preprocess_data(p_x, p_stats):
"""
Make data standard/normal.
More info: https://dataakkadian.medium.com/standardization-vs-normalization-da7a3a308c64
https://www.kdnuggets.com/2020/04/data-transformation-standardization-normalization.html
:param p_x: input data
:param p_stats: input data statistics
:return: standardized and normalized data
"""
p_standardized = (p_x - p_stats["mean"]) / p_stats["std"] # mean = 0, std = 1
p_normalized = (p_x - p_stats["min"]) / (p_stats["max"] - p_stats["min"]) # range 0-1
return p_standardized, p_normalized
def build_rnn(p_input, p_output):
"""
Build Keras Recurrent Neural Network model
:param p_input: input size
:param p_output: output size
:return: model
"""
n_inter = np.abs(p_input - p_output) / 2
p_model = keras.Sequential([
layers.GRU(int(p_output + np.ceil(n_inter)), activation='linear', input_shape=(2, p_input)),
layers.Dense(p_output, activation='relu')
])
p_model.compile(loss='mean_squared_error', optimizer='adam', metrics=['mae', 'mse'])
print_model(p_model)
return p_model
def build_ffnn(p_input, p_output):
"""
Build Keras Feed-Forward Neural Network model
:param p_input: input size
:param p_output: output size
:return: model
"""
n_inter = int(np.abs(p_input - p_output) / 2)
p_model = keras.Sequential([
layers.Dense(p_output + np.ceil(n_inter), activation='linear', input_shape=[p_input]),
layers.Dense(p_output, activation='relu')
])
# optimizer = tf.keras.optimizers.RMSprop(0.001)
optimizer = keras.optimizers.Adam(learning_rate=0.001)
p_model.compile(loss='mse', optimizer=optimizer, metrics=['mae', 'mse'])
print_model(p_model)
return p_model
def print_model(p_model):
"""
Print Keras model to node graph
:param p_model: Keras model
:return:
"""
try:
plot_model(p_model, to_file=output_dir + "/model_scheme.png", show_shapes=True,
show_layer_names=True)
visualizer(p_model, filename=output_dir + "/model_graph", format='png')
except Exception as ex:
print(ex)
class PrintDot(keras.callbacks.Callback):
"""
Display training progress by printing a single dot for each completed epoch
"""
@staticmethod
def on_epoch_end(epoch, _logs):
print('') if epoch % 100 == 0 else 0
print('.', end='')
def plot_history(p_history, p_k):
"""
Plot epoch history (MAE and MSE)
:param p_history: input registers
:param p_k: climatic zone
:return:
"""
hist = pd.DataFrame(p_history.history)
hist['epoch'] = p_history.epoch
fig, (ax1, ax2) = plt.subplots(1, 2)
plt.setp(ax1, xlabel="Epoch", ylabel="Mean Abs Error [PCI]")
plt.setp(ax2, xlabel="Epoch", ylabel="Mean Square Error [$PCI^2$]")
ax1.plot(hist['epoch'], hist['mae'], label='Train Error')
ax1.plot(hist['epoch'], hist['val_mae'], label='Val Error')
ax1.legend()
ax1.set_xlim([min(hist['epoch']), max(hist['epoch'])])
ax1.set_ylim([min(hist['val_mae']), max(hist['val_mae'])])
ax2.plot(hist['epoch'], hist['mse'], label='Train Error')
ax2.plot(hist['epoch'], hist['val_mse'], label='Val Error')
ax2.legend()
ax2.set_xlim([min(hist['epoch']), max(hist['epoch'])])
ax2.set_ylim([min(hist['val_mse']), max(hist['val_mse'])])
plt.tight_layout()
plt.savefig(output_dir + "/model_history_" + p_k + ".png")
plt.clf()
def plot_data(p_1, p_2, p_3, p_4, p_5, p_6, p_k):
"""
:param p_1:
:param p_2:
:param p_3:
:param p_4:
:param p_5:
:param p_6:
:param p_k:
:return:
"""
fig, ((ax1, ax2), (ax3, ax4), (ax5, ax6)) = plt.subplots(3, 2)
plt.setp(ax1, xlabel="Train (PCI)", ylabel="Count")
plt.setp(ax2, xlabel="Test (PCI)", ylabel="Count")
plt.setp(ax3, xlabel="Train standard (PCI)", ylabel="Count")
plt.setp(ax4, xlabel="Test standard (PCI)", ylabel="Count")
plt.setp(ax5, xlabel="Train normal (PCI)", ylabel="Count")
plt.setp(ax6, xlabel="Test normal (PCI)", ylabel="Count")
n, bins, _ = ax1.hist(p_1["PCI"], bins=25, rwidth=0.8)
# density = stats.gaussian_kde(p_4["PCI"])
# ax4.plot(bins, density(bins) * max(n) / max(density(bins)), "r-")
n, bins, _ = ax2.hist(p_2["PCI"], bins=25, rwidth=0.8)
n, bins, _ = ax3.hist(p_3["PCI"], bins=25, rwidth=0.8)
n, bins, _ = ax4.hist(p_4["PCI"], bins=25, rwidth=0.8)
n, bins, _ = ax5.hist(p_5["PCI"], bins=25, rwidth=0.8)
n, bins, _ = ax6.hist(p_6["PCI"], bins=25, rwidth=0.8)
plt.tight_layout()
plt.savefig(output_dir + "/model_train_" + p_k + ".png")
plt.clf()
def plot_evaluation(p_test_predictions, p_test_labels, p_k):
"""
Plot error evaluation
:param p_test_predictions: input predicted data
:param p_test_labels: input actual data
:param p_k: climatic zone
:return:
"""
fig, (ax1, ax2) = plt.subplots(1, 2)
plt.setp(ax1, xlabel="True Values [PCI_F]", ylabel="Predictions [PCI_F]")
plt.setp(ax2, xlabel="Prediction Error [PCI_F]", ylabel="Count")
# print(np.shape(test_labels["PCI_F"]), np.shape(test_predictions))
ax1.scatter(p_test_labels["PCI_F"], p_test_predictions, c="r", marker="2")
ax1.plot([0, 100], [0, 100])
ax1.set_xlim([0, 100])
ax1.set_ylim([0, 100])
# https://stackoverflow.com/questions/27872723/is-there-a-clean-way-to-generate-a-line-histogram-chart-in-python
error = p_test_predictions - p_test_labels["PCI_F"]
n, bins, _ = ax2.hist(error, bins=25, rwidth=0.8, color="blue")
# density = stats.gaussian_kde(error)
# plt.plot(bins, density(bins) * max(n) / max(density(bins)), "r-")
plt.tight_layout()
plt.savefig(output_dir + "/model_evaluation_" + p_k + ".png")
plt.clf()
def model_parameters(model, p_columns, p_k):
"""
Generate a summary of weights or NN parameters
:param model: input model
:param p_columns: input columns
:param p_k: climatic zone
:return:
"""
with open(output_dir + "/weight_" + p_k + ".txt", "w",
encoding="utf-8") as txt_file:
for i, row in enumerate(model.layers[0].get_weights()[0]):
value = p_columns[i] + ", " + np.array2string(row[0])
txt_file.write("".join(value) + "\n")
# a_file = open(output_dir + "/summary_" + k + ".txt", "w",
# encoding="utf-8")
# for row in model.summary():
# np.savetxt(a_file, row)
# a_file.close()
def main(p_output, p_table, p_columns, p_targets, keras_model="ffnn", n_tests=1):
"""
Main function
:param p_output: output directory
:param p_table: input table
:param p_columns: input columns
:param p_targets: input targets
:param keras_model: keras model
:return:
"""
global output_dir
output_dir = p_output
p_columns.extend(p_targets)
dataset_raw = pd.read_csv(p_table, sep=";", encoding="unicode_escape", low_memory=False)
# Filter desired columns
dataset_raw = dataset_raw[p_columns + ["CLIMATIC ZONE"]]
# Delete unknown data
dataset = dataset_raw.dropna()
print("- Original size:", np.shape(dataset_raw)[0], "rows\n- After drop NA:", np.shape(dataset)[0], "rows")
climatic_zones = [
"ALL",
"LLUVIOSA - CÁLIDA",
"LLUVIOSA - MEDIA",
"LLUVIOSA - TEMPLADA",
"LLUVIOSA - FUERA DE RANGO",
"POCO LLUVIOSA - CÁLIDA",
"POCO LLUVIOSA - MEDIA",
"POCO LLUVIOSA - TEMPLADA",
"POCO LLUVIOSA - FUERA DE RANGO",
]
train_perc = 0.7
array_results = []
for zone in climatic_zones:
n_mae = 0
n_mse = 0
n_det = 0
dataset_cz = dataset
if zone != "ALL":
dataset_cz = dataset[dataset["CLIMATIC ZONE"] == zone]
print("Number of rows:", np.shape(dataset_cz)[0], "rows")
dataset_cz.pop("CLIMATIC ZONE")
# dataset_cz.pop("AADT_CUM")
# dataset_cz.pop("AADTT_CUM")
# dataset_cz.pop("KESAL_CUM")
if np.shape(dataset_cz)[0] > 1:
# Convert data to float
dataset_cz = dataset_cz.applymap(str).replace([","], ["."], regex=True).applymap(float)
# Divide dataset in train and test groups
train_dataset = dataset_cz.sample(frac=train_perc)
test_dataset = dataset_cz.drop(train_dataset.index)
# General statistics
train_stats = train_dataset.describe()
[train_stats.pop(x) for x in p_targets]
train_stats = train_stats.transpose()
# Objective value
train_labels = pd.concat([train_dataset.pop(x) for x in p_targets], axis=1)
test_labels = pd.concat([test_dataset.pop(x) for x in p_targets], axis=1)
# Normalising data
# normed_train_data = preprocess_data(train_dataset, train_stats)[0].fillna(0) # Standardization
# normed_test_data = preprocess_data(test_dataset, train_stats)[0].fillna(0)
normed_train_data = preprocess_data(train_dataset, train_stats)[1].fillna(0) # Normalization
normed_test_data = preprocess_data(test_dataset, train_stats)[1].fillna(0)
plot_data(train_dataset, test_dataset,
preprocess_data(train_dataset, train_stats)[0].fillna(0),
preprocess_data(test_dataset, train_stats)[0].fillna(0),
preprocess_data(train_dataset, train_stats)[1].fillna(0),
preprocess_data(test_dataset, train_stats)[1].fillna(0), zone)
for n in range(0, n_tests):
print("[[%s (%d/%d)]]" % (zone, n + 1, n_tests))
# Keras model
if keras_model == "rnn":
model = build_rnn(len(normed_train_data.keys()), len(p_targets))
else:
model = build_ffnn(len(normed_train_data.keys()), len(p_targets))
# model_parameters(model, p_columns, k)
# The patience parameter is the amount of epochs to check for improvement
early_stop = keras.callbacks.EarlyStopping(monitor='val_loss', patience=100)
# Number of rows accessed in each epoch
batch_size = np.shape(normed_train_data)[0] if | np.shape(normed_train_data) | numpy.shape |
import numpy as np
import gym
from gym import spaces
from gym.utils import seeding
import rvo2
class NavRVO2Env_all(gym.Env):
"""
What's new for the new environment:
Added 8 pedestrians initialized to be at 8 corners ([-0.7,-0.7], [0.7,-0.7], [0.7,0.7], [-0.7,0.7])
of a rectangle centering at the origin. 1 pedestrians at each corner. They walk almostly
diagonally towards the other side (specific direction is upon randomness). After they exit the rectangle,
they will be initialized at the corners again.
robot state:
'px', 'py', 'vx', 'vy', 'gx', 'gy'
0 1 2 3 4 5
pedestrian state:
'px1', 'py1', 'vx1', 'vy1'
6 7 8 9
"""
def __init__(self, task={}):
super(NavRVO2Env_all, self).__init__()
self._num_ped = 8
self._self_dim = 6
self._ped_dim = 4
self._num_agent = self._num_ped + 1 # ped_num + robot_num
self._state_dim = self._self_dim + self._num_ped * self._ped_dim # robot_state_dim + ped_num * ped_state_dim
self.observation_space = spaces.Box(low=-np.inf, high=np.inf,
shape=(self._state_dim,), dtype=np.float32)
self.action_space = spaces.Box(low=-0.1, high=0.1,
shape=(2,), dtype=np.float32)
self._done = False
self._task = task
self._goal = task.get('goal', np.array([0., 0.], dtype=np.float32))
self._default_robot_state = np.array([0., 0., 0., 0., self._goal[0], self._goal[1]], dtype=np.float32)
self._state = self._default_robot_state.copy()
self.seed()
self._ped_radius = 0.15
self._ped_speed = task.get('ped_speed', np.zeros(self._num_ped, dtype=np.float32))
self._ped_direc = task.get('ped_direc', np.zeros(self._num_ped, dtype=np.float32))
self._entering_corner = np.float32(0.7)
self._default_ped_states = self._entering_corner * np.array([[-1,-1], [1,-1], [1,1], [-1,1]])
self._default_ped_states = np.vstack((self._default_ped_states, self._default_ped_states)) # 8 ped
self._ped_states = self._default_ped_states.copy()
self._ped_list = []
self._simulator = self.init_simulator()
for i in range(self._num_ped): # Extrating values from simulator and init self._state
ai = self._ped_list[i]
ai_vel = self._simulator.getAgentVelocity(ai)
ai_pos = self._simulator.getAgentPosition(ai)
self._state = np.append(self._state, np.append([ai_pos[0], ai_pos[1]], [ai_vel[0], ai_vel[1]]))
def init_simulator(self):
# Initializing RVO2 simulator && add agents to self._ped_list
self._ped_list = []
timeStep = 1.
neighborDist = self._ped_radius # safe-radius to observe states
maxNeighbors = 8
timeHorizon = 2.0
timeHorizonObst = timeHorizon
radius = 0.05 # size of the agent
maxSpeed = 0.2
sim = rvo2.PyRVOSimulator(timeStep, neighborDist, maxNeighbors, timeHorizon, timeHorizonObst, radius, maxSpeed)
for i in range(self._num_ped):
ai = sim.addAgent((self._default_ped_states[i,0], self._default_ped_states[i,1]))
self._ped_list.append(ai)
vx = self._ped_speed[i] * np.cos(self._ped_direc[i])
vy = self._ped_speed[i] * np.sin(self._ped_direc[i])
sim.setAgentPrefVelocity(ai, (vx, vy))
return sim
def check_and_clip_ped_states(self):
# update simlator when an agent gets out of boundary
ai_list = []
for i in range(self._num_ped):
if any(abs(self._ped_states[i,:]) >= self._entering_corner + 0.001):
self._ped_states[[i, i], [0, 1]] = self._default_ped_states[i,:]
self._ped_direc[i] = np.arctan2(-self._ped_states[i,1], -self._ped_states[i,0]) + np.random.uniform(-np.pi/4, np.pi/4, size=(1,1))
ai_list.append(i)
if ai_list:
self.update_simulator(ai_list)
return ai_list
def print_rvo2_states(self):
print("Printing agent-states from rvo-2")
for i in range(self._num_ped):
ai = self._ped_list[i]
print("Agent", ai,": pos=", self._simulator.getAgentPosition(ai), ", vel=", self._simulator.getAgentVelocity(ai))
def print_ped_states(self):
print("Printing agent-states from self._ped_states")
for i in range(self._num_ped):
print("Agent", i,": pos=", self._ped_states[i])
def print_robot_state(self):
print("Robot: pos=", self._state[0:2])
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def sample_tasks(self, num_tasks):
# tasks includes goals and various goal_pos and ped_direcs
goal_range = [-1., -0.8, 0.8, 1.]
rand_temp = self.np_random.uniform(goal_range[0]-goal_range[1], goal_range[3]-goal_range[2], size=(num_tasks,))
rand_temp = rand_temp + goal_range[2] * np.sign(rand_temp) # there's a chance that 0 is sampled, but that's okay
free_axis = np.random.randint(2, size=num_tasks)
goals = np.zeros((num_tasks, 2), dtype=np.float32)
goals[range(num_tasks),free_axis] = rand_temp
goals[range(num_tasks),1-free_axis] = self.np_random.uniform(-1., 1., size=(num_tasks,))
ped_speeds = self.np_random.uniform(0.03, 0.15, size=(num_tasks, self._num_ped))
ped_direc = np.arctan2(-self._ped_states[:,1], -self._ped_states[:,0])
ram_direcs = self.np_random.uniform(-np.pi/4, np.pi/4, size=(num_tasks, self._num_ped)) # 8 pedestrians
ped_direcs = ram_direcs + ped_direc
tasks = [{'goal': goal, 'ped_speed': ped_speed, 'ped_direc': ped_direc} for goal, ped_speed, ped_direc in zip(goals, ped_speeds, ped_direcs)]
return tasks
def reset_task(self, task):
self._task = task
self._goal = task['goal']
self._ped_speed = task['ped_speed']
self._ped_direc = task['ped_direc']
self.update_simulator(self._ped_list)
def update_simulator(self, ai_list=[]):
if ai_list: #only update agents in ai_list
for ai in ai_list:
self._simulator.setAgentPosition(ai, (self._ped_states[ai,0], self._ped_states[ai,1]))
vx = self._ped_speed[ai] * np.cos(self._ped_direc[ai])
vy = self._ped_speed[ai] * np.sin(self._ped_direc[ai])
self._simulator.setAgentVelocity(ai, (vx, vy))
self._simulator.setAgentPrefVelocity(ai, (vx, vy))
else: # update all agents from _ped_states
for ai in self._ped_list:
self._simulator.setAgentPosition(ai, (self._ped_states[ai,0], self._ped_states[ai,1]))
vx = self._ped_speed[ai] * np.cos(self._ped_direc[ai])
vy = self._ped_speed[ai] * np.sin(self._ped_direc[ai])
self._simulator.setAgentVelocity(ai, (vx, vy))
self._simulator.setAgentPrefVelocity(ai, (vx, vy))
# print("ped i velocity = ", self._simulator.getAgentVelocity(ai)
def assert_sim_and_states(self):
for i in self._ped_list:
if self._ped_states[i, 0] != self._simulator.getAgentPosition(i)[0]:
print("Error: X for agent ", i, ": state = ", self._ped_states[i, 0], ", sim = ", self._simulator.getAgentPosition(i)[0])
return False
if self._ped_states[i, 1] != self._simulator.getAgentPosition(i)[1]:
print("Error: Y for agent ", i, ": state = ", self._ped_states[i, 1], ", sim = ", self._simulator.getAgentPosition(i)[1])
return False
return True
def update_ped_states(self):
# Update ped_states from simulator
for i in self._ped_list:
self._ped_states[i, 0] = self._simulator.getAgentPosition(i)[0]
self._ped_states[i, 1] = self._simulator.getAgentPosition(i)[1]
def reset(self, env=True):
self._done = False
self._state = self._default_robot_state.copy()
# self._ped_histories = []
self._ped_states = self._default_ped_states.copy()
try:
ped_direc = task.get('ped_direc', np.zeros(self._num_ped, dtype=np.float32))
except:
ped_direc = np.zeros(self._num_ped, dtype=np.float32)
for i in range(self._num_ped):
vx = self._ped_speed[i] * np.cos(ped_direc[i])
vy = self._ped_speed[i] * np.sin(ped_direc[i])
self._state = np.append(self._state, np.append(self._default_ped_states[i], [vx, vy]))
self._simulator = self.init_simulator()
return self._state
def step(self, action):
action = np.clip(action, -0.1, 0.1)
try:
assert self.action_space.contains(action)
except AssertionError as error:
print("AssertionError: action is {}".format(action))
self._state[0:2] = self._state[0:2] + action
self._state[2:4] = action
self._state[4:6] = self._goal
dx = self._state[0] - self._goal[0]
dy = self._state[1] - self._goal[1]
# Update agents' state
self._simulator.doStep()
self.update_ped_states()
self.check_and_clip_ped_states() # ensure all agents are within the bounary: reset to default pos if necessary
mid_point = self._goal/2.
real_ped_state = self._ped_states + mid_point
# update self._state
for i in range(self._num_ped):
ai_velocity = self._simulator.getAgentVelocity(self._ped_list[i])
self._state[self._self_dim+i*self._ped_dim: self._self_dim+i*self._ped_dim+4] = np.append(real_ped_state[i,:], [ai_velocity[0], ai_velocity[1]])
# Calculate rewards
dist_reward = -np.sqrt(dx ** 2 + dy ** 2)
weight_ped = 0.2
weight_colli = 1.5
col_reward = 0.
for i in range(self._num_ped):
dist_ped_i = np.sqrt((real_ped_state[i,0] - self._state[0]) ** 2 + (real_ped_state[i,1] - self._state[1]) ** 2)
if (dist_ped_i < self._ped_radius): # safe distance to pealize the robot
col_reward = col_reward + (dist_ped_i - self._ped_radius) * weight_ped
if (dist_ped_i < 0.05): # collision with an agent
col_reward = col_reward + (-1) * weight_colli
all_reward = np.array([dist_reward+col_reward, dist_reward, col_reward])
if self._done:
done = True
self._done = False
elif ((np.abs(dx) < 0.1) and ( | np.abs(dy) | numpy.abs |
import matplotlib.pyplot as plt
import numpy as np
import os
import PIL
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras.models import Sequential
import pathlib
from Constantes import class_names, batch_size, img_height, img_width
#--------------------------------------------------------------------------------------------------------------------------------------------
'''Descarga la carpeta de imagenes https://uniprivado.s3.amazonaws.com/imagenes_modelo.zip:
imagenes_modelo
fuego
no_fuego
'''
data_dir = pathlib.Path("D:\Sistema\Descargas\imagenes_modelo")
#--------------------------------------------------------------------------------------------------------------------------------------------
'''Se procesan las imagenes, seteandole las medidas y separando
un 20% para validacion y un 70% para aprendizaje"'''
train_ds = tf.keras.preprocessing.image_dataset_from_directory(
data_dir,
validation_split=0.2,
subset="training",
seed=123,
image_size=(img_height, img_width),
batch_size=batch_size)
val_ds = tf.keras.preprocessing.image_dataset_from_directory(
data_dir,
validation_split=0.2,
subset="validation",
seed=123,
image_size=(img_height, img_width),
batch_size=batch_size)
class_names = train_ds.class_names
print("Clasificación del modelo: ",class_names)
#--------------------------------------------------------------------------------------------------------------------------------------------
'''Visualiza datos aleatorios'''
import matplotlib.pyplot as plt
plt.figure(figsize=(10, 10))
for images, labels in train_ds.take(1):
for i in range(9):
ax = plt.subplot(3, 3, i + 1)
plt.imshow(images[i].numpy().astype("uint8"))
plt.title(class_names[labels[i]])
plt.axis("off")
AUTOTUNE = tf.data.experimental.AUTOTUNE
train_ds = train_ds.cache().shuffle(1000).prefetch(buffer_size=AUTOTUNE)
val_ds = val_ds.cache().prefetch(buffer_size=AUTOTUNE)
normalization_layer = layers.experimental.preprocessing.Rescaling(1./255)
normalized_ds = train_ds.map(lambda x, y: (normalization_layer(x), y))
image_batch, labels_batch = next(iter(normalized_ds))
first_image = image_batch[0]
# Notice the pixels values are now in `[0,1]`.
print( | np.min(first_image) | numpy.min |
import random
import numpy as np
from discretizations.DiscretizationScheme import DiscretizationScheme
from problems.Problem import Problem
class Flp(Problem):
def __init__(self, instanceName, instancePath, populationSize, discretizationScheme, repairType):
super().__init__(instanceName, instancePath)
self.populationSize = populationSize
self.discretizationScheme = discretizationScheme
self.repairType = repairType
self.costs = np.array([[]], dtype=np.int32)
self.customerBudgets = np.array([], dtype=np.int32)
self.facilities = 0
self.customers = 0
self.openFacilities = 0
self.repairsQuantity = 0
self.globalOptimum = self.getGlobalOptimum()
self.population = np.random.uniform(low=-1.0, high=1.0, size=(self.populationSize, self.costs.shape[0]))
self.binMatrix = np.random.randint(low=0, high=2, size=(self.populationSize, self.costs.shape[0]))
self.fitness = np.zeros(self.populationSize)
self.solutionsRanking = np.zeros(self.populationSize)
self.weightConstraints = np.array([], dtype=np.float64)
def getGlobalOptimum(self):
instances = {
'FLPr_100_40_01': [0, 2245],
'FLPr_100_40_02': [1, 2259],
'FLPr_100_40_03': [2, 2019],
'FLPr_100_40_04': [3, 1533],
'FLPr_100_40_05': [4, 2386],
'FLPr_100_40_06': [5, 1960],
'FLPr_100_40_07': [6, 2179],
'FLPr_100_40_08': [7, 2139],
'FLPr_100_40_09': [8, 1895],
'FLPr_100_40_10': [9, 2209],
'FLPr_100_100_01': [10, 2235],
'FLPr_100_100_02': [11, 2240],
'FLPr_100_100_03': [12, 1923],
'FLPr_100_100_04': [13, 2133],
'FLPr_100_100_05': [14, 2099],
'FLPr_100_100_06': [15, 2237],
'FLPr_100_100_07': [16, 1888],
'FLPr_100_100_08': [17, 1825],
'FLPr_100_100_09': [18, 1767],
'FLPr_100_100_10': [19, 2368]
}
for instanceName in instances:
if instanceName in self.instanceName:
return instances[instanceName][1]
return None
def readInstance(self):
file = open(self.instancePath, 'r')
# Leer dimensiones
line = file.readline().split()
self.facilities = int(line[0]) # m
self.customers = int(line[1]) # n
self.openFacilities = int(line[2]) # p
# Leer costos
self.costs = np.zeros((self.facilities, self.customers), dtype=np.int32)
for j in range(0, self.facilities):
line = file.readline().split()
self.costs[j] = np.array(line, dtype=np.int32)
# print('self.costs:', self.costs)
# print('self.costs.shape:', self.costs.shape)
# print('self.costs.shape[0]:', self.costs.shape[0])
# Leer restricciones (?)
line = file.readline().split()
self.customerBudgets = np.array(line, dtype=np.int32)
# print('self.customerBudgets:', self.customerBudgets)
# print('self.customerBudgets.shape:', self.customerBudgets.shape)
file.close()
self.refreshComputedAttributes()
def refreshComputedAttributes(self):
self.population = np.random.uniform(low=-1.0, high=1.0, size=(self.populationSize, self.costs.shape[0]))
self.binMatrix = np.random.randint(low=0, high=2, size=(self.populationSize, self.costs.shape[0]))
self.fitness = np.zeros(self.populationSize)
self.solutionsRanking = np.zeros(self.populationSize)
self.weightConstraints = 1 / np.sum(self.customerBudgets) # <<<
def process(self, *args, **kwargs):
# print('----------START process----------')
# Binarización de 2 pasos
self.binMatrix = DiscretizationScheme(
self.population, self.binMatrix, self.solutionsRanking, self.discretizationScheme['transferFunction'],
self.discretizationScheme['binarizationOperator']
).binarize()
# print('self.binMatrix:', self.binMatrix)
for solution in range(self.binMatrix.shape[0]):
openFacilities = np.sum(self.binMatrix[solution])
# print(f'[antes] solution: {solution} - open facilities: {openFacilities}')
# print('[antes] self.binMatrix[solution]:', self.binMatrix[solution])
if openFacilities > self.openFacilities: # si hay mas de p tiendas abiertas
potentialCloseFacilities = np.where(self.binMatrix[solution] == 1)[0]
randIndexes = random.sample(set(potentialCloseFacilities), k=(openFacilities - self.openFacilities))
self.binMatrix[solution][randIndexes] = 0
elif openFacilities < self.openFacilities: # si hay menos de p tiendas abiertas
potentialOpenNewFacilities = np.where(self.binMatrix[solution] == 0)[0]
randIndexes = random.sample(set(potentialOpenNewFacilities), k=(self.openFacilities - openFacilities))
self.binMatrix[solution][randIndexes] = 1
# openFacilities = np.sum(self.binMatrix[solution])
# print(f'[despues] solution: {solution} - open facilities: {openFacilities}')
# print('[despues] self.binMatrix[solution]:', self.binMatrix[solution])
facilitiesSelectedCosts = | np.zeros((self.openFacilities, self.customers), dtype=np.int32) | numpy.zeros |
# -*- coding: utf-8 -*-
"""
TODO: depricate this file eventually when geral model and dataset structure is
fully setup
"""
# utils.py
# provides utilities for learning a neural network model
from __future__ import absolute_import, division, print_function
import time
import numpy as np
from six.moves import cPickle as pickle # NOQA
import utool as ut
import six
from wbia_cnn import net_strs
print, rrr, profile = ut.inject2(__name__)
# VERBOSE_CNN = ut.get_argflag(('--verbose-cnn', '--verbcnn')) or ut.VERBOSE
VERBOSE_CNN = ut.get_module_verbosity_flags('cnn')[0] or ut.VERBOSE
RANDOM_SEED = None
# RANDOM_SEED = 42
def checkfreq(freqlike_, count):
# checks frequency of param, also handles the case where it is specified
# as a bool. does not trigger on 0
if ut.is_int(freqlike_):
return (count % freqlike_) == (freqlike_ - 1)
else:
return freqlike_ is True
def get_gpu_memory():
"""
References:
https://groups.google.com/forum/#!topic/theano-users/2EdclcmZazU
https://gist.github.com/matpalm/9c0c7c6a6f3681a0d39d
CommandLine:
python -m wbia_cnn.utils --test-get_gpu_memory
Example:
>>> # ENABLE_DOCTEST
>>> from wbia_cnn.utils import * # NOQA
>>> result = get_gpu_memory()
>>> print(result)
"""
import theano
return theano.sandbox.cuda.cuda_ndarray.cuda_ndarray.mem_info()
def _update(kwargs, key, value):
# if key not in kwargs.keys():
if key not in kwargs:
kwargs[key] = value
def testdata_imglist(shape=(32, 32, 3)):
"""
Returns 4 colored 32x32 test images, one is structured increasing numbers,
an images with lines of a cartoon face, and two complex images of people.
CommandLine:
python -m wbia_cnn.utils --test-testdata_imglist --show
Example:
>>> # ENABLE_DOCTEST
>>> from wbia_cnn.utils import * # NOQA
>>> (img_list, width, height, channels) = testdata_imglist()
>>> ut.quit_if_noshow()
>>> import plottool as pt
>>> pt.imshow(img_list[0], pnum=(2, 2, 1))
>>> pt.imshow(img_list[1], pnum=(2, 2, 2))
>>> pt.imshow(img_list[2], pnum=(2, 2, 3))
>>> pt.imshow(img_list[3], pnum=(2, 2, 4))
>>> ut.show_if_requested()
"""
import vtool as vt
x = 32
height, width, channels = shape
img0 = np.arange(x ** 2 * 3, dtype=np.uint8).reshape(x, x, 3)
img1 = vt.imread(ut.grab_test_imgpath('jeff.png'))
img2 = vt.imread(ut.grab_test_imgpath('carl.jpg'))
img3 = vt.imread(ut.grab_test_imgpath('lena.png'))
img_list = [
vt.padded_resize(img, (width, height)) for img in [img0, img1, img2, img3]
]
return img_list, width, height, channels
def convert_cv2_images_to_theano_images(img_list):
r"""
Converts b01c to bc01
Converts a list of cv2-style images into a single numpy array of nonflat
theano-style images.
h=height, w=width, b=batchid, c=channel
Args:
img_list (list of ndarrays): a list of numpy arrays with shape [h, w, c]
Returns:
data: in the shape [b, (c x h x w)]
CommandLine:
python -m wbia_cnn.utils --test-convert_cv2_images_to_theano_images
Example:
>>> # ENABLE_DOCTEST
>>> from wbia_cnn.utils import * # NOQA
>>> import vtool as vt
>>> # build test data
>>> # execute function
>>> img_list, width, height, channels = testdata_imglist()
>>> data = convert_cv2_images_to_theano_images(img_list)
>>> data[0].reshape(3, 32, 32)[:, 0:2, 0:2]
>>> subset = (data[0].reshape(3, 32, 32)[:, 0:2, 0:2])
>>> #result = str(np.transpose(subset, (1, 2, 0)))
>>> result = str(subset).replace('\n', '')
>>> print(result)
[[[ 0 3] [ 96 99]] [[ 1 4] [ 97 100]] [[ 2 5] [ 98 101]]]
"""
# [img.shape for img in img_list]
# format to [b, c, h, w]
if len(img_list.shape) == 3:
# ensure 4 dimensions
img_list = img_list.reshape(img_list.shape + (1,))
shape_list = [img.shape for img in img_list]
assert ut.allsame(shape_list)
theano_style_imgs = [ | np.transpose(img, (2, 0, 1)) | numpy.transpose |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Functions to calculate frequency spectra."""
from __future__ import (absolute_import, unicode_literals, division,
print_function)
from .base import mp_root, cross_gtis, create_gti_mask
from .base import common_name, _empty, _assign_value_if_none
from .rebin import const_rebin
from .io import sort_files, get_file_type, load_lcurve, save_pds
from .io import MP_FILE_EXTENSION
import numpy as np
import logging
import warnings
from multiprocessing import Pool
import os
def _wrap_fun_cpds(arglist):
f1, f2, outname, kwargs = arglist
try:
return calc_cpds(f1, f2, outname=outname, **kwargs)
except Exception as e:
warnings.warn(str(e))
def _wrap_fun_pds(argdict):
fname = argdict["fname"]
argdict.pop("fname")
try:
return calc_pds(fname, **argdict)
except Exception as e:
warnings.warn(str(e))
def fft(lc, bintime):
"""A wrapper for the fft function. Just numpy for now.
Parameters
----------
lc : array-like
bintime : float
Returns
-------
freq : array-like
ft : array-like
the Fourier transform.
"""
nbin = len(lc)
ft = np.fft.fft(lc)
freqs = np.fft.fftfreq(nbin, bintime)
return freqs.astype(np.double), ft
def leahy_pds(lc, bintime):
r"""Calculate the power density spectrum.
Calculates the Power Density Spectrum a la Leahy+1983, ApJ 266, 160,
given the lightcurve and its bin time.
Assumes no gaps are present! Beware!
Parameters
----------
lc : array-like
the light curve
bintime : array-like
the bin time of the light curve
Returns
-------
freqs : array-like
Frequencies corresponding to PDS
pds : array-like
The power density spectrum
"""
nph = sum(lc)
# Checks must be done before. At this point, only good light curves have to
# be provided
assert (nph > 0), 'Invalid interval. Light curve is empty'
freqs, ft = fft(lc, bintime)
# I'm pretty sure there is a faster way to do this.
pds = np.absolute(ft.conjugate() * ft) * 2. / nph
good = freqs >= 0
freqs = freqs[good]
pds = pds[good]
return freqs, pds
def welch_pds(time, lc, bintime, fftlen, gti=None, return_all=False):
r"""Calculate the PDS, averaged over equal chunks of data.
Calculates the Power Density Spectrum \'a la Leahy (1983), given the
lightcurve and its bin time, over equal chunks of length fftlen, and
returns the average of all PDSs, or the sum PDS and the number of chunks
Parameters
----------
time : array-like
Central times of light curve bins
lc : array-like
Light curve
bintime : float
Bin time of the light curve
fftlen : float
Length of each FFT
gti : [[g0_0, g0_1], [g1_0, g1_1], ...]
Good time intervals. Defaults to
[[time[0] - bintime/2, time[-1] + bintime/2]]
Returns
-------
return_str : object, optional
An Object containing all values below.
f : array-like
array of frequencies corresponding to PDS bins
pds : array-like
the values of the PDS
epds : array-like
the values of the PDS
npds : int
the number of summed PDSs (if normalize is False)
ctrate : float
the average count rate in the two lcs
dynpds : array-like, optional
dynepds : array-like, optional
dynctrate : array-like, optional
times : array-like, optional
Other parameters
----------------
return_all : bool
if True, return everything, including the dynamical PDS
"""
gti = _assign_value_if_none(
gti, [[time[0] - bintime / 2, time[-1] + bintime / 2]])
start_bins, stop_bins = \
decide_spectrum_lc_intervals(gti, fftlen, time)
results = _empty()
if return_all:
results.dynpds = []
results.edynpds = []
results.dynctrate = []
results.times = []
pds = 0
npds = len(start_bins)
mask = np.zeros(len(lc), dtype=np.bool)
for start_bin, stop_bin in zip(start_bins, stop_bins):
l = lc[start_bin:stop_bin]
t0 = time[start_bin]
try:
assert np.sum(l) != 0, \
'Interval starting at time %.7f is bad. Check GTIs' % t0
f, p = leahy_pds(l, bintime)
except Exception as e:
warnings.warn(str(e))
npds -= 1
continue
if return_all:
results.dynpds.append(p)
results.edynpds.append(p)
results.dynctrate.append(np.mean(l) / bintime)
results.times.append(time[start_bin])
pds += p
mask[start_bin:stop_bin] = True
pds /= npds
epds = pds / np.sqrt(npds)
ctrate = np.mean(lc[mask]) / bintime
results.f = f
results.pds = pds
results.epds = epds
results.npds = npds
results.ctrate = ctrate
return results
def leahy_cpds(lc1, lc2, bintime):
"""Calculate the cross power density spectrum.
Calculates the Cross Power Density Spectrum, normalized similarly to the
PDS in Leahy+1983, ApJ 266, 160., given the lightcurve and its bin time.
Assumes no gaps are present! Beware!
Parameters
----------
lc1 : array-like
The first light curve
lc2 : array-like
The light curve
bintime : array-like
The bin time of the light curve
Returns
-------
freqs : array-like
Frequencies corresponding to PDS
cpds : array-like
The cross power density spectrum
cpdse : array-like
The error on the cross power density spectrum
pds1 : array-like
The power density spectrum of the first light curve
pds2 : array-like
The power density spectrum of the second light curve
"""
assert len(lc1) == len(lc2), 'Light curves MUST have the same length!'
nph1 = sum(lc1)
nph2 = sum(lc2)
# Checks must be done before. At this point, only good light curves have to
# be provided
assert (nph1 > 0 and nph2 > 0), ('Invalid interval. At least one light '
'curve is empty')
freqs, ft1 = fft(lc1, bintime)
freqs, ft2 = fft(lc2, bintime)
pds1 = np.absolute(ft1.conjugate() * ft1) * 2. / nph1
pds2 = np.absolute(ft2.conjugate() * ft2) * 2. / nph2
pds1e = np.copy(pds1)
pds2e = np.copy(pds2)
# The "effective" count rate is the geometrical mean of the count rates
# of the two light curves
nph = np.sqrt(nph1 * nph2)
# I'm pretty sure there is a faster way to do this.
if nph != 0:
cpds = ft1.conjugate() * ft2 * 2. / nph
else:
cpds = np.zeros(len(freqs))
# Justification in timing paper! (Bachetti et al. arXiv:1409.3248)
# This only works for cospectrum. For the cross spectrum, I *think*
# it's irrelevant
cpdse = np.sqrt(pds1e * pds2e) / np.sqrt(2.)
good = freqs >= 0
freqs = freqs[good]
cpds = cpds[good]
cpdse = cpdse[good]
return freqs, cpds, cpdse, pds1, pds2
def welch_cpds(time, lc1, lc2, bintime, fftlen, gti=None, return_all=False):
"""Calculate the CPDS, averaged over equal chunks of data.
Calculates the Cross Power Density Spectrum normalized like PDS, given the
lightcurve and its bin time, over equal chunks of length fftlen, and
returns the average of all PDSs, or the sum PDS and the number of chunks
Parameters
----------
time : array-like
Central times of light curve bins
lc1 : array-like
Light curve 1
lc2 : array-like
Light curve 2
bintime : float
Bin time of the light curve
fftlen : float
Length of each FFT
gti : [[g0_0, g0_1], [g1_0, g1_1], ...]
Good time intervals. Defaults to
[[time[0] - bintime/2, time[-1] + bintime/2]]
Returns
-------
return_str : object
An Object containing all return values below
f : array-like
array of frequencies corresponding to PDS bins
cpds : array-like
the values of the PDS
ecpds : array-like
the values of the PDS
ncpds : int
the number of summed PDSs (if normalize is False)
ctrate : float
the average count rate in the two lcs
dyncpds : array-like, optional
dynecpds : array-like, optional
dynctrate : array-like, optional
times : array-like, optional
Other parameters
----------------
return_all : bool
if True, return everything, including the dynamical PDS
"""
gti = _assign_value_if_none(
gti, [[time[0] - bintime / 2, time[-1] + bintime / 2]])
start_bins, stop_bins = \
decide_spectrum_lc_intervals(gti, fftlen, time)
cpds = 0
ecpds = 0
npds = len(start_bins)
mask = np.zeros(len(lc1), dtype=np.bool)
results = _empty()
if return_all:
results.dyncpds = []
results.edyncpds = []
results.dynctrate = []
results.times = []
cpds = 0
ecpds = 0
npds = len(start_bins)
for start_bin, stop_bin in zip(start_bins, stop_bins):
l1 = lc1[start_bin:stop_bin]
l2 = lc2[start_bin:stop_bin]
t0 = time[start_bin]
try:
assert np.sum(l1) != 0 and np.sum(l2) != 0, \
'Interval starting at time %.7f is bad. Check GTIs' % t0
f, p, pe, p1, p2 = leahy_cpds(l1, l2, bintime)
except Exception as e:
warnings.warn(str(e))
npds -= 1
continue
cpds += p
ecpds += pe ** 2
if return_all:
results.dyncpds.append(p)
results.edyncpds.append(pe)
results.dynctrate.append(
np.sqrt(np.mean(l1)*np.mean(l2)) / bintime)
results.times.append(time[start_bin])
mask[start_bin:stop_bin] = True
cpds /= npds
ecpds = np.sqrt(ecpds) / npds
ctrate = np.sqrt(np.mean(lc1[mask])*np.mean(lc2[mask])) / bintime
results.f = f
results.cpds = cpds
results.ecpds = ecpds
results.ncpds = npds
results.ctrate = ctrate
return results
def rms_normalize_pds(pds, pds_err, source_ctrate, back_ctrate=None):
"""Normalize a Leahy PDS with RMS normalization ([1]_, [2]_).
Parameters
----------
pds : array-like
The Leahy-normalized PDS
pds_err : array-like
The uncertainties on the PDS values
source_ctrate : float
The source count rate
back_ctrate: float, optional
The background count rate
Returns
-------
pds : array-like
the RMS-normalized PDS
pds_err : array-like
the uncertainties on the PDS values
References
----------
.. [1] Belloni & Hasinger 1990, A&A, 230, 103
.. [2] Miyamoto+1991, ApJ, 383, 784
"""
back_ctrate = _assign_value_if_none(back_ctrate, 0)
factor = (source_ctrate + back_ctrate) / source_ctrate ** 2
return pds * factor, pds_err * factor
def decide_spectrum_intervals(gtis, fftlen):
"""Decide the start times of PDSs.
Start each FFT/PDS/cospectrum from the start of a GTI, and stop before the
next gap.
Only use for events! This will give problems with binned light curves.
Parameters
----------
gtis : [[gti0_0, gti0_1], [gti1_0, gti1_1], ...]
fftlen : float
Length of the chunks
Returns
-------
spectrum_start_times : array-like
List of starting times to use in the spectral calculations.
"""
spectrum_start_times = np.array([], dtype=np.longdouble)
for g in gtis:
if g[1] - g[0] < fftlen:
logging.info("GTI at %g--%g is Too short. Skipping." %
(g[0], g[1]))
continue
newtimes = np.arange(g[0], g[1] - fftlen, np.longdouble(fftlen),
dtype=np.longdouble)
spectrum_start_times = \
np.append(spectrum_start_times,
newtimes)
assert len(spectrum_start_times) > 0, \
"No GTIs are equal to or longer than fftlen. " + \
"Choose shorter fftlen (MPfspec -f <fftlen> <options> <filename>)"
return spectrum_start_times
def decide_spectrum_lc_intervals(gtis, fftlen, time):
"""Similar to decide_spectrum_intervals, but dedicated to light curves.
In this case, it is necessary to specify the time array containing the
times of the light curve bins.
Returns start and stop bins of the intervals to use for the PDS
Parameters
----------
gtis : [[gti0_0, gti0_1], [gti1_0, gti1_1], ...]
fftlen : float
Length of the chunks
time : array-like
Times of light curve bins
"""
bintime = time[1] - time[0]
nbin = np.long(fftlen / bintime)
if time[-1] < np.min(gtis) or time[0] > np.max(gtis):
raise ValueError("Invalid time interval for the given GTIs")
spectrum_start_bins = np.array([], dtype=np.long)
for g in gtis:
if g[1] - g[0] < fftlen:
logging.info("GTI at %g--%g is Too short. Skipping." %
(g[0], g[1]))
continue
startbin = np.argmin(np.abs(time - g[0]))
stopbin = np.argmin(np.abs(time - g[1]))
if time[stopbin] - time[startbin] + bintime < fftlen:
logging.info("T. int. at %g--%g is Too short. Skipping." %
(time[startbin], time[stopbin]))
continue
if stopbin == nbin - 1:
# Corner case
newbins = [startbin]
else:
newbins = np.arange(startbin, stopbin - nbin + 1, nbin,
dtype=np.long)
spectrum_start_bins = \
np.append(spectrum_start_bins,
newbins)
if len(spectrum_start_bins) == 0:
raise ValueError(
"No GTIs or data intervals are equal to or longer than fftlen. " + \
"Choose shorter fftlen (MPfspec -f <fftlen> <options> <filename>)")
return spectrum_start_bins, spectrum_start_bins + nbin
def calc_pds(lcfile, fftlen,
save_dyn=False,
bintime=1,
pdsrebin=1,
normalization='Leahy',
back_ctrate=0.,
noclobber=False,
outname=None):
"""Calculate the PDS from an input light curve file.
Parameters
----------
lcfile : str
The light curve file
fftlen : float
The length of the chunks over which FFTs will be calculated, in seconds
Other Parameters
----------------
save_dyn : bool
If True, save the dynamical power spectrum
bintime : float
The bin time. If different from that of the light curve, a rebinning is
performed
pdsrebin : int
Rebin the PDS of this factor.
normalization : str
'Leahy' or 'rms'
back_ctrate : float
The non-source count rate
noclobber : bool
If True, do not overwrite existing files
outname : str
If speficied, output file name. If not specified or None, the new file
will have the same root as the input light curve and the '_pds' suffix
"""
root = mp_root(lcfile)
outname = root + '_pds' + MP_FILE_EXTENSION
if noclobber and os.path.exists(outname):
print('File exists, and noclobber option used. Skipping')
return
logging.info("Loading file %s..." % lcfile)
lcdata = load_lcurve(lcfile)
time = lcdata['time']
mjdref = lcdata['MJDref']
try:
lc = lcdata['lccorr']
except:
lc = lcdata['lc']
dt = lcdata['dt']
gti = lcdata['GTI']
instr = lcdata['Instr']
tctrate = lcdata['total_ctrate']
if bintime <= dt:
bintime = dt
else:
lcrebin = np.rint(bintime / dt)
bintime = lcrebin * dt
logging.info("Rebinning lc by a factor %d" % lcrebin)
time, lc, dum = \
const_rebin(time, lc, lcrebin, normalize=False)
results = welch_pds(time, lc, bintime, fftlen, gti, return_all=True)
freq = results.f
pds = results.pds
epds = results.epds
npds = results.npds
ctrate = results.ctrate
freq, pds, epds = const_rebin(freq[1:], pds[1:], pdsrebin,
epds[1:])
if normalization == 'rms':
logging.info('Applying %s normalization' % normalization)
pds, epds = \
rms_normalize_pds(pds, epds,
source_ctrate=ctrate,
back_ctrate=back_ctrate)
for ic, pd in enumerate(results.dynpds):
ep = results.edynpds[ic].copy()
ct = results.dynctrate[ic].copy()
pd, ep = rms_normalize_pds(pd, ep, source_ctrate=ct,
back_ctrate=back_ctrate)
results.edynpds[ic][:] = ep
results.dynpds[ic][:] = pd
outdata = {'time': time[0], 'pds': pds, 'epds': epds, 'npds': npds,
'fftlen': fftlen, 'Instr': instr, 'freq': freq,
'rebin': pdsrebin, 'norm': normalization, 'ctrate': ctrate,
'total_ctrate': tctrate,
'back_ctrate': back_ctrate, 'MJDref': mjdref}
if 'Emin' in lcdata.keys():
outdata['Emin'] = lcdata['Emin']
outdata['Emax'] = lcdata['Emax']
if 'PImin' in lcdata.keys():
outdata['PImin'] = lcdata['PImin']
outdata['PImax'] = lcdata['PImax']
logging.debug(repr(results.dynpds))
if save_dyn:
outdata["dynpds"] = np.array(results.dynpds)[:, 1:]
outdata["edynpds"] = np.array(results.edynpds)[:, 1:]
outdata["dynctrate"] = | np.array(results.dynctrate) | numpy.array |
from reader import process_feature, read_data
import random
import numpy as np
import tensorflow as tf
from model import RNNModel
from sklearn import model_selection
from sklearn.metrics import roc_curve, auc, f1_score, average_precision_score
import os
import logging
import argparse
import matplotlib.pyplot as plt
from tqdm import tqdm
from operator import attrgetter
import math
def get_feature_label(data, length_limit = math.inf):
data = list(filter(lambda d: d.seq is not None, data))
for i in range(len(data)):
if not hasattr(data[i], 'features'):
data[i].get_feature()
length = np.array(list(map(lambda x : x.length, data)), dtype = np.int32)
# print(length)
max_length = min(np.max(length), length_limit)
feature_dim = data[0].feature_dim
batch_size = len(data)
x = np.zeros([batch_size, max_length, feature_dim])
y = np.zeros([batch_size, 1])
for i in range(len(data)):
length[i] = min(length[i], length_limit)
s = np.random.randint(data[i].length - length[i] + 1)
x[i, : length[i], :] = data[i].features[s : s + length[i], :]
y[i, 0] = data[i].label
return x, y, length
def val(model, data, batch_size = 64):
model.init_streaming()
for i in range(0, len(data), batch_size):
model.val(*get_feature_label(data[i:i+batch_size], length_limit = 1000))
return model.get_summaries()
# class SimpleLengthModel:
# threshold_length = 1000
# @classmethod
# def data_filter(cls, data):
# return data.length <= cls.threshold_length
def batch_data_provider(data, batch_size):
data_label = []
for l in (0, 1):
data_label.append(list(filter(lambda d : d.label == l, data)))
samples_label = [int(batch_size / 2), int(batch_size / 2) ]
while True:
yield random.sample(data_label[0], samples_label[0]) + random.sample(data_label[1], samples_label[1])
def train(train_data, val_data, steps = 6000, val_per_steps = 300, checkpoint_per_steps=100, batch_size = 64, learning_rate = 0.01, **kwargs):
global args
# train_data = list(filter(SimpleLengthModel.data_filter, train_data))
# val_data = list(filter(SimpleLengthModel.data_filter, val_data))
model = RNNModel(feature_dims=train_data[0].feature_dim, model_dir=args.output_dir, **kwargs)
if args.checkpoint is not None:
model.restore(args.checkpoint)
data_provider = batch_data_provider(train_data, batch_size=batch_size)
for t in range(0, steps):
x, y, length = get_feature_label(next(data_provider), length_limit=1000)
result = model.train(x, y, length, learning_rate)
logging.info("step = {}: {}".format(model.global_step, result))
if model.global_step % val_per_steps == 0:
result = val(model, val_data)
model.init_streaming()
logging.info("validation for step = {}: {}".format(model.global_step, result))
if model.global_step % checkpoint_per_steps == 0:
model.save_checkpoint()
logging.info("save checkpoint at {}".format(model.global_step))
if model.global_step % 2000 == 0:
learning_rate *= 0.2
logging.info("current learning rate = {}".format(learning_rate))
def test(data, batch_size=64, filename='roc.png', **kwargs):
global args
assert args.checkpoint is not None
model = RNNModel(feature_dims=data[0].feature_dim, model_dir=args.output_dir, **kwargs)
model.restore(args.checkpoint)
data = list(filter(lambda d: d.seq is not None, data))
for i in tqdm(range(0, len(data), batch_size)):
x, y, length = get_feature_label(data[i:i+batch_size], length_limit=10000)
predictions = model.predict(x, length)
for l,p in zip(data[i:i+batch_size], predictions):
l.prediction = p
# if SimpleLengthModel.data_filter(data[i]):
# x, y, length = get_feature_label(data[i:i+batch_size], length_limit=1000)
# predictions = model.predict(x, length)
# for l,p in zip(data[i:i+batch_size], predictions):
# l.prediction = p
# else:
# for l in data[i:i+batch_size]:
# l.prediction = 1 + l.length / 100000.0
predictions = list(map(attrgetter('prediction'), data))
labels = list(map(attrgetter('label'), data))
plot_roc(predictions, labels, filename=filename)
def baseline(data, filename):
labels = list(map(attrgetter('label'), data))
predictions = list(map(attrgetter('length'), data))
plot_roc(predictions, labels, filename=filename)
def plot_roc(predictions, labels, plot_samples = 50, filename='roc.png'):
global args
predictions = | np.array(predictions) | numpy.array |
import numpy as np
f1_alpha=1000 #set alpha of f1
f3_epsilon=1e-6 #set epsilon of f3
f45_q=10**8
def f1(x): #ellipsoid function
dim=x.shape[0] #dimension number
result=0
for i in range(dim):
result+=f1_alpha**(i/(dim-1))*x[i]**2
return result
def g1(x):
dim=x.shape[0]
result=np.zeros(dim)
for i in range(dim):
result[i]=2*(f1_alpha**(i/(dim-1)))*x[i]
return result
def h1(x):
dim=x.shape[0]
result=np.zeros((dim,dim))
for i in range(dim):
result[i,i]=2*(f1_alpha**(i/(dim-1)))
return result
f2 = lambda x: (1-x[0])**2+100*(x[1]-x[0]**2)**2; #Rosenbrok function
g2 = lambda x: np.array([-400*x[0]*(x[1]-x[0]**2)-2*(1-x[0]), 200*(x[1]-x[0]**2)])
h2 = lambda x: np.array([[2+1200*x[0]**2-400*x[1], -400*x[0]], [-400*x[0], 200]])
f3 = lambda x: np.log(f3_epsilon+f1(x)) #log ellipsoid function
def g3(x):
dim=x.shape[0]
result= | np.zeros(dim) | numpy.zeros |
import sys
sys.path.append('../nvm')
import numpy as np
import pickle
from activator import tanh_activator
from coder import Coder
from op_net import arith_ops
class CHL_Net:
def __init__(self, Ns, feedback=False, split_learn=False, biases=True):
self.sizes = [N for N in Ns]
self.feedback = feedback
self.split_learn = split_learn
self.biases = biases
self.W, self.B = [],[]
self.G = []
for i in range(len(Ns)-1):
size, prior_size = Ns[i+1], Ns[i]
self.W.append(2 * np.random.random((size, prior_size)) - 1)
self.B.append(2 * np.random.random((size, 1)) - 1)
self.G.append(np.random.normal(0.0, 1.0, ((size, prior_size))))
self.activity = [ | np.zeros((size,1)) | numpy.zeros |
import numpy as np
import pytest
from cgn.regop import IdentityOperator
from cgn import LinearConstraint, Parameter, Problem
def test_problem():
# Initialize parameters
n1 = 20
n2 = 50
x = Parameter(start=np.zeros(n1), name="x")
y = Parameter(start=np.zeros(n2), name="y")
x.beta = 0.384
y.beta = 32.2
x.lb = np.zeros(n1)
x.mean = np.random.randn(n1)
y.mean = np.random.randn(n2)
x.regop = IdentityOperator(dim=n1)
y.regop = IdentityOperator(dim=n2)
# Initialize constraints
a = np.ones((1, n1 + n2))
b = np.ones((1,))
eqcon = LinearConstraint(parameters=[x, y], a=a, b=b, ctype="eq")
c = np.eye(n2)
d = np.ones(n2)
incon = LinearConstraint(parameters=[y], a=c, b=d, ctype="ineq")
scale = 0.1
# Initialize misfit function and Jacobian
def fun(x1, x2):
return np.square(np.concatenate((x1, x2), axis=0))
def jac(x1, x2):
return 2 * np.diagflat(np.concatenate((x1, x2), axis=0))
problem = Problem(parameters=[x, y], fun=fun, jac=jac, constraints=[eqcon, incon], scale=scale)
# Check that the correct problem was initialized
assert isinstance(problem.q, IdentityOperator)
assert problem.nparams == 2
assert problem.n == n1 + n2
def test_constraints_must_depend_on_problem_parameters():
n1 = 20
n2 = 50
n3 = 1
x = Parameter(start=np.zeros(n1), name="x")
y = Parameter(start=np.zeros(n2), name="y")
z = Parameter(start=np.zeros(n3), name="z")
a1 = | np.random.randn(n1 + n2 + n3, n1 + n2 + n3) | numpy.random.randn |
import unittest
import numpy
from pyscf import lib
einsum = lib.einsum
lib.numpy_helper.EINSUM_MAX_SIZE, bak = 0, lib.numpy_helper.EINSUM_MAX_SIZE
def tearDownModule():
lib.numpy_helper.EINSUM_MAX_SIZE = bak
class KnownValues(unittest.TestCase):
def test_d_d(self):
a = numpy.random.random((7,1,3,4))
b = numpy.random.random((2,4,5,7))
c0 = numpy.einsum('abcd,fdea->cebf', a, b)
c1 = einsum('abcd,fdea->cebf', a, b)
self.assertTrue(c0.dtype == c1.dtype)
self.assertTrue(abs(c0-c1).max() < 1e-14)
def test_c_c(self):
a = numpy.random.random((7,1,3,4)).astype(numpy.float32)
b = numpy.random.random((2,4,5,7)).astype(numpy.float32)
c0 = numpy.einsum('abcd,fdea->cebf', a, b)
c1 = einsum('abcd,fdea->cebf', a, b)
self.assertTrue(c0.dtype == c1.dtype)
self.assertTrue(abs(c0-c1).max() < 1e-5)
def test_c_d(self):
a = numpy.random.random((7,1,3,4)).astype(numpy.float32) + 0j
b = numpy.random.random((2,4,5,7)).astype(numpy.float32)
c0 = numpy.einsum('abcd,fdea->cebf', a, b)
c1 = einsum('abcd,fdea->cebf', a, b)
self.assertTrue(c0.dtype == c1.dtype)
self.assertTrue(abs(c0-c1).max() < 1e-5)
def test_d_z(self):
a = numpy.random.random((7,1,3,4))
b = numpy.random.random((2,4,5,7)) + 0j
c0 = numpy.einsum('abcd,fdea->cebf', a, b)
c1 = einsum('abcd,fdea->cebf', a, b)
self.assertTrue(c0.dtype == c1.dtype)
self.assertTrue(abs(c0-c1).max() < 1e-14)
def test_z_z(self):
a = numpy.random.random((7,1,3,4)) + 0j
b = numpy.random.random((2,4,5,7)) + 0j
c0 = numpy.einsum('abcd,fdea->cebf', a, b)
c1 = einsum('abcd,fdea->cebf', a, b)
self.assertTrue(c0.dtype == c1.dtype)
self.assertTrue(abs(c0-c1).max() < 1e-14)
def test_d_dslice(self):
a = numpy.random.random((7,1,3,4))
b = numpy.random.random((2,4,5,7))
c0 = numpy.einsum('abcd,fdea->cebf', a, b[:,:,1:3,:])
c1 = einsum('abcd,fdea->cebf', a, b[:,:,1:3,:])
self.assertTrue(c0.dtype == c1.dtype)
self.assertTrue(abs(c0-c1).max() < 1e-14)
def test_d_dslice1(self):
a = numpy.random.random((7,1,3,4))
b = numpy.random.random((2,4,5,7))
c0 = numpy.einsum('abcd,fdea->cebf', a[:4].copy(), b[:,:,:,2:6])
c1 = einsum('abcd,fdea->cebf', a[:4].copy(), b[:,:,:,2:6])
self.assertTrue(c0.dtype == c1.dtype)
self.assertTrue(abs(c0-c1).max() < 1e-14)
def test_dslice_d(self):
a = numpy.random.random((7,1,3,4))
b = | numpy.random.random((2,4,5,7)) | numpy.random.random |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.