prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
"""
"""
from lists_and_dicts import *
from methods_and_classes import *
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib import ticker
import seaborn as sns
# global parameters
pd.options.display.float_format = '{:,.2f}'.format
plt.style.use('my_style')
stock = pd.read_csv('data/stock_transactions.csv')
new = stock['alt_product_group & cc'].str.split(n=0, expand=True)
stock['cc'] = new[0]
stock['alternate_product_group'] = new[1]
stock['cost_value'] = stock['unit_cost'] * stock['quantity']
stock = stock.drop(columns=['alt_product_group & cc'])
num_invalid_apg = pd.isnull(stock['alternate_product_group']).sum()
print(f'You have {num_invalid_apg} records after the preprocessing where you find invalid "alternate_product_group"')
num_invalid_cc = pd.isnull(pd.to_numeric(stock['cc'], errors='coerce')).sum()
print(f'You have {num_invalid_cc} records after the preprocessing where you find invalid "cc"')
# transaction type 1: debit raw materials
chemical_sum = summarise_stock(stock, 1, 48, '50', 'quantity', np.sum)
oil_sum = summarise_stock(stock, 1, 40, '55', 'quantity', np.sum)
plastics_sum = summarise_stock(stock, 1, 60, '60', 'quantity', np.sum)
paper_sum = summarise_stock(stock, 1, 41, '80', 'quantity', np.sum)
tuffa_sum = summarise_stock(stock, 1, 51, '85', 'quantity', np.sum)
# concatenate
raw_mats_one = | pd.DataFrame() | pandas.DataFrame |
from T2GEORES import geometry as geometry
import numpy as np
import re as re
import subprocess
import datetime
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import shutil
import os
import itertools
import json
import shapefile
import pylab as plb
import math
import sys
from scipy.spatial import ConvexHull
from scipy.interpolate import griddata
from scipy.spatial.distance import cdist
from scipy.spatial import cKDTree
import pandas as pd
from matplotlib.ticker import (MultipleLocator, FormatStrFormatter,AutoMinorLocator)
import pyvista as pv
import vtk
import sqlite3
import geopandas as gpd
import string
from lloydRelax import Field
class py2amesh:
"""It creates a mesh based on well positions and irregular blocks
The main characteristics are:
-It generates the mesh based on defined boundaries
-The inner section is called wellfield and can contain elements with hexagonal or square shape
-The wellfield can be delimited by a shapefile or by fixed squared boundaries.
-It allows to generates elements along a line to represent structures.
-The voronoi elements are generated by AMESH.
-The elements name cannot contain two consecutive zeros.
-The well blocks are the first on the list
-It creates the input files to work on Steinar (RockEditor)
-It can export a defined layer on a shapefile format
-It can plot a selected layer (It is recommended to use the function plot_voronoi() to plot)
-Two json file are generated with the correlative block for each well, from which can be track during all the modelling steps.
Parameters
----------
filename : str
File name with well feedzone location
filepath : str
Path of input files
Xmin : float
Minimun X coordinates for the grid
Xmax : float
Maximun X coordinates for the grid
Ymin : float
Minimun Y coordinates for the grid
Ymax : float
Maximun Y coordinates for the grid
toler : float
AMESH parameter
layers : dictionary
Name (correlative) and thickness of every layer on the model, keyword on input_dictionary
layer_to_plot : int
In case it is specified a voronoi plot will be performed
x_space : float
Horizontal distance between elements for the outerfield
y_space : float
Vertical distance between elements for the outerfield
radius_criteria: float
Minimun distance between well location and a regular element
x_from_boarder: float
Horizontal distance from the first element to the east border
y_from_boarder: float
Vertical distance from the first element to the south border
x_gap_min: float
Minimun X coordinates on the grid for the well field
x_gap_max: float
Maximun X coordinates on the grid for the well field
x_gap_space: float
Horizontal distance between elements for the farfield
y_gap_min: float
Minimun Y coordinates on the grid for the well field o
y_gap_max: float
Maximun X coordinates on the grid for the well field
y_gap_space: float
Vertical distance between elements for the farfield
plot_names: bool
If true it plots the name of the blocks from the selected layer to plot
plot_centers: bool
If true it plots the centers of the blocks from the selected layer to plot
z0_level: float
Reference level (elevation) for all the grid, keyword on input_dictionary
mesh_creation: bool
If true the mesh is created
plot_layer: bool
If true it plots the selected layer
to_steinar: bool
If true it creates the input files for steinar
to_GIS: bool
If true it generates a shapefile of the selected layer
plot_all_GIS: bool
If true it generates a shapefile of all layers
from_leapfrog: bool
lee archivos leapfrong ../mesh/from_leapfrog/LF_geometry.dat y ../mesh/from_leapfrog/LF_t2.dat, sin embargo se pierde la simbologia usada en leapfrog y no te utiliza la malla regular ni los pozos. Solamente se crea la malla utilizando amesh
line_file: str
It defines the path and name of a line that can represented a fault or other structure on the mesh, The input file must contain the header: ID,X,Y on csv format. ID referes to the same
structure, thus, more than one structure can be defined on a single file.
fault_distance: float
In case a line_file is define, some paralels elements will be created at a defined distance
with_polygon: bool
If true a shapefile will be read to define the wellfield.
polygon_shape: str
The shapefile deines the wellfield boundaries. The shape must not contain any cavity
set_inac_from_poly: bool
If true all the elements on the outside of the shapefile are defined as inactive
set_inac_from_inner:bool
If true all the elements on the outerfield are defined as inactive
rotate: bool
If true it rotates the mesh a defined angle
angle: float
Angle in degrees
inner_mesh_type: string
Type of mesh on the inner part of the mesh, it could be 'honeycomb' or 'regular'
Returns
-------
file
eleme: list of blocks from the grid
file
conne : list of connections on the grid
shapefile
mesh_{field}_layer_{layer} : shapefile of a defined (or all) layer including rock distribution
plot
Voronoi plot (in case is specified)
Attention
---------
A copy of AMESH must be on the path or directory
"""
def __init__(self,filename,filepath,Xmin,Xmax,Ymin,Ymax,\
toler,layers,layer_to_plot,x_space,y_space,radius_criteria,\
x_from_boarder,y_from_boarder,\
x_gap_min,x_gap_max,x_gap_space,y_gap_min,y_gap_max,y_gap_space,\
plot_names,plot_centers,z0_level,plot_all_GIS,from_leapfrog,line_file,fault_distance,with_polygon,polygon_shape,set_inac_from_poly,set_inac_from_inner,rotate,angle,inner_mesh_type,\
distance_points,fault_rows,relaxation_times,points_around_well,distance_points_around_well):
self.filename=filename
self.filepath=filepath
self.layers=layers
self.number_of_layer=len(layers)
self.Xmin=Xmin
self.Xmax=Xmax
self.Ymin=Ymin
self.Ymax=Ymax
self.z0_level=z0_level
self.layer_to_plot=layer_to_plot
self.radius_criteria=radius_criteria
self.x_space=x_space
self.y_space=y_space
self.x_from_boarder=x_from_boarder
self.y_from_boarder=y_from_boarder
self.z=0
self.delf_rock="101" #Layer index
self.filename_out="in"
self.toler=toler
self.x_gap_min=x_gap_min
self.x_gap_max=x_gap_max
self.x_gap_space=x_gap_space
self.y_gap_min=y_gap_min
self.y_gap_max=y_gap_max
self.y_gap_space=y_gap_space
self.plot_names=plot_names
self.plot_centers=plot_centers
self.plot_all_GIS=plot_all_GIS
self.from_leapfrog=from_leapfrog
self.line_file=line_file
self.fault_distance=fault_distance
self.with_polygon=with_polygon
self.set_inac_from_poly=set_inac_from_poly
self.set_inac_from_inner=set_inac_from_inner
self.rotate=rotate
self.angle=angle
self.inner_mesh_type=inner_mesh_type
self.polygon_shape=polygon_shape
self.distance_points=distance_points
self.fault_rows=fault_rows
self.relaxation_times=relaxation_times
self.points_around_well=points_around_well
self.distance_points_around_well=distance_points_around_well
if self.with_polygon:
shape = shapefile.Reader(polygon_shape)
#first feature of the shapefile
feature = shape.shapeRecords()[0]
points = feature.shape.__geo_interface__
self.polygon=[]
for n in points:
for v in points[n]:
if n=='coordinates':
self.polygon.append([v[0],v[1]]) # (GeoJSON format)
#Read border to clip write into in files
borders=gpd.read_file('../../GIS/reservoir/reservoir_limits_1_pol.shp')
border_points=[]
for line in borders.iterrows():
pointList = line[1].geometry.exterior.coords.xy
for point in zip(pointList[0],pointList[1]):
border_points.append([point[0],point[1]])
self.polygon_external=border_points[::-1][0:-1]
self.color_dict = {1:[['AA','AB','AC','AD','AE','AF','AG'],'ROCK1','red'],\
2:[['BA','BB','BC','BD','BE','BF','BG'],'ROCK2','white'],\
3:[['CA','CB','CC','CD','CE','CF','CG'],'ROCK3','yellow'],\
4:[['DA','DB','DC','DD','DE','DF','DG'],'ROCK4','blue'],\
5:[['EA','EB','EC','ED','EE','EF','EG'],'ROCK5','green'],\
6:[['FA','FB','FC','FD','FE','FF','FG'],'ROCK6','purple'],\
7:[['GA','GB','GC','GD','GE','GF','GG'],'ROCK7','#ff69b4'],\
8:[['HA','HB','HC','HD','HE','HF','HG'],'ROCK8','darkorange'],\
9:[['IA','IB','IC','ID','IE','IF','IG'],'ROCK9','cyan'],\
10:[['JA','JB','JC','JD','JE','JF','JG'],'ROK10','magenta'],\
11:[['KA','KB','KC','KD','KE','KF','KG'],'ROK11','#faebd7'],\
12:[['LA','LB','LC','LD','LE','LF','LG'],'ROK12','#2e8b57'],\
13:[['MA','MB','MC','MD','ME','MF','MG'],'ROK13','#eeefff'],\
14:[['NA','NB','NC','ND','NE','NF','NG'],'ROK14','#da70d6'],\
15:[['OA','OB','OC','OD','OE','OF','OG'],'ROK15','#ff7f50'],\
16:[['PA','PB','PC','PD','PE','PF','PG'],'ROK16','#cd853f'],\
17:[['QA','QB','QC','QD','QE','QF','QG'],'ROK17','#bc8f8f'],\
18:[['RA','RB','RC','RD','RE','RF','RG'],'ROK18','#5f9ea0'],\
19:[['SA','SB','SC','SD','SE','SF','SG'],'ROK19','#daa520'],
20:[['TA','TB','SC','SD','SE','SF','SG'],'ROK20','#daa520'],
21:[['UA','UB','UC','UD','UE','UF','UG'],'ROK21','#daa520'],
22:[['VA','VB','SC','VD','VE','VF','VG'],'ROK22','#daa520'],
23:[['WA','WB','SC','WD','WE','WF','WG'],'ROK23','#daa520'],
24:[['XA','XB','SC','XD','XE','XF','XG'],'ROK19','#daa520'],
25:[['YA','YB','SC','YD','YE','YF','YG'],'ROK19','#daa520'],
26:[['ZA','ZB','SC','ZD','ZE','ZF','ZG'],'ROK19','#daa520']}
self.rock_dict={}
prof_cont=0
for jk in range(1,len(layers)+1):
if jk==1:
prof_cont=layers[jk-1]*0.5
z_real=z0_level-prof_cont
elif jk>1:
prof_cont=prof_cont+layers[jk-1]*0.5+layers[jk-2]*0.5
z_real=z0_level-prof_cont
self.rock_dict[jk]=[self.color_dict[jk][0][0],self.color_dict[jk][1],\
self.color_dict[jk][2],self.color_dict[jk][0][0],z_real,self.layers[jk-1]]
def regular_mesh(self):
"""Genera malla regular en en toda la extension de la region definida por Xmin,Xmax,Ymin y Ymax
"""
x_regular=range(self.Xmin+self.x_from_boarder,self.Xmax+self.x_space-self.x_from_boarder,self.x_space)
y_regular=range(self.Ymin+self.y_from_boarder,self.Ymax+self.y_space-self.y_from_boarder,self.y_space)
x_regular_small=range(self.x_gap_min,self.x_gap_max+self.x_gap_space,self.x_gap_space)
y_regular_small=range(self.y_gap_min,self.y_gap_max+self.y_gap_space,self.y_gap_space)
self.mesh_array=[]
for nx in x_regular:
for ny in y_regular:
if ((nx<self.x_gap_min) or (nx>self.x_gap_max)) or ((ny<self.y_gap_min) or (ny>self.y_gap_max)):
self.mesh_array.append([nx,ny])
#Small polygon area must be here
for nxx in x_regular_small:
cnt=0
for nyy in y_regular_small:
if [nxx,nyy] not in self.mesh_array:
if self.inner_mesh_type=='honeycomb':
if cnt%2==0:
self.mesh_array.append([nxx,nyy])
else:
self.mesh_array.append([nxx+self.x_gap_space/2,nyy])
elif self.inner_mesh_type=='regular':
self.mesh_array.append([nxx,nyy])
cnt+=1
if self.rotate:
angle=self.angle
for pair in range(len(self.mesh_array)):
x1=self.mesh_array[pair][0]-self.Xmin
y1=self.mesh_array[pair][1]-self.Ymin
self.mesh_array[pair][0]=x1*math.cos(math.pi*angle/180)-y1*math.sin(math.pi*angle/180)+self.Xmin
self.mesh_array[pair][1]=x1*math.sin(math.pi*angle/180)+y1*math.cos(math.pi*angle/180)+self.Ymin
return np.array(self.mesh_array)
def check_in_out(self,position,point,source):
"""Verifica si un punto de la malla del campo cercano esta dentro o fuera del poligo definido por el shapefile de entrada o del campo cercano
"""
if position=='internal':
polygon=self.polygon
elif position=='external':
polygon=self.polygon_external
boolean=False
if source=='shapefile':
cnt=0
for n in range(len(polygon)):
if n+1!=len(polygon):
m,b=plb.polyfit([polygon[n][0],polygon[n+1][0]],[polygon[n][1],polygon[n+1][1]],1)
val_range=[polygon[n][1],polygon[n+1][1]]
elif n+1==len(polygon):
m,b=plb.polyfit([polygon[-1][0],polygon[0][0]],[polygon[-1][1],polygon[0][1]],1)
val_range=[polygon[-1][1],polygon[0][1]]
x=(point[1]-b)/m
if point[0]<x and min(val_range)<point[1] and point[1]<max(val_range):
cnt+=1
if cnt==1:
boolean=True
elif source=='inner':
Xarray_inner=np.array([self.x_gap_min,self.x_gap_max+self.x_gap_space])
Yarray_inner=np.array([self.y_gap_min,self.y_gap_max+self.y_gap_space])
if Xarray_inner[0]<point[0] and Xarray_inner[1]>point[0] and Yarray_inner[0]<point[1] and Yarray_inner[1]>point[1]:
boolean=True
return boolean
def reg_pol_mesh(self):
"""Crea malla regular cuando existe un poligono de entrada
"""
#x_regular=np.arange(self.Xmin+self.x_from_boarder,self.Xmax+self.x_space-self.x_from_boarder,self.x_space)
#y_regular=np.arange(self.Ymin+self.y_from_boarder,self.Ymax+self.y_space-self.y_from_boarder,self.y_space)
nx=40 #number of elements in one direction
n_times=4
ny=int((self.Ymax+2*n_times*self.x_from_boarder-self.Ymin)*nx/(self.Xmax-self.Xmin+n_times*2*self.x_from_boarder))
x_regular=np.linspace(self.Xmin-n_times*self.x_from_boarder,self.Xmax+n_times*self.x_from_boarder,nx,endpoint=True) #generation of regular grid on X
y_regular=np.linspace(self.Ymin-n_times*self.y_from_boarder,self.Ymax+n_times*self.x_from_boarder,ny,endpoint=True) #generation of regular grid on Y
self.mesh_array=[]
for nx in x_regular:
for ny in y_regular:
self.mesh_array.append([nx,ny])
if self.rotate:
angle=self.angle
for pair in range(len(self.mesh_array)):
x1=self.mesh_array[pair][0]-(self.Xmin-n_times*self.x_from_boarder)
y1=self.mesh_array[pair][1]-(self.Ymin-n_times*self.x_from_boarder)
self.mesh_array[pair][0]=x1*math.cos(math.pi*angle/180)-y1*math.sin(math.pi*angle/180)+self.Xmin-n_times*self.x_from_boarder
self.mesh_array[pair][1]=x1*math.sin(math.pi*angle/180)+y1*math.cos(math.pi*angle/180)+self.Ymin-n_times*self.x_from_boarder
x_pol=[]
y_pol=[]
for n in range(len(self.polygon)):
x_pol.append(int(self.polygon[n][0]))
y_pol.append(int(self.polygon[n][1]))
x_pol.append(int(self.polygon[0][0]))
y_pol.append(int(self.polygon[0][1]))
x_gap_min=self.x_gap_min#min(x_pol)
x_gap_max=self.x_gap_max#max(x_pol)
y_gap_min=self.y_gap_min#min(y_pol)
y_gap_max=self.y_gap_max#max(y_pol)
small_mesh=[]
#x_regular_small=np.arange(x_gap_min,x_gap_max+self.x_gap_space,self.x_gap_space)
#y_regular_small=np.arange(y_gap_min,y_gap_max+self.y_gap_space,self.y_gap_space)
x_regular_small=np.arange(x_gap_min,x_gap_max,self.x_gap_space)
y_regular_small=np.arange(y_gap_min,y_gap_max,self.y_gap_space)
for nxx in x_regular_small:
cnt=0
for nyy in y_regular_small:
if [nxx,nyy] not in small_mesh:
if self.inner_mesh_type=='honeycomb':
if cnt%2==0:
small_mesh.append([nxx,nyy])
else:
small_mesh.append([nxx+self.x_gap_space/2,nyy])
elif self.inner_mesh_type=='regular':
small_mesh.append([nxx,nyy])
cnt+=1
if self.rotate:
angle=self.angle
for pair in range(len(small_mesh)):
x1=small_mesh[pair][0]-self.x_gap_min
y1=small_mesh[pair][1]-self.y_gap_min
small_mesh[pair][0]=x1*math.cos(math.pi*angle/180)-y1*math.sin(math.pi*angle/180)+self.x_gap_min
small_mesh[pair][1]=x1*math.sin(math.pi*angle/180)+y1*math.cos(math.pi*angle/180)+self.y_gap_min
to_delete=[]
for v in range(len(self.mesh_array)):
point=[self.mesh_array[v][0],self.mesh_array[v][1]]
check=self.check_in_out('internal',point,source='shapefile')
if check:
to_delete.append(v)
self.mesh_array=np.delete(self.mesh_array, to_delete, 0)
to_delete=[]
for v in range(len(small_mesh)):
point=[small_mesh[v][0],small_mesh[v][1]]
check=self.check_in_out('internal',point,source='shapefile')
if not check:
to_delete.append(v)
small_mesh=np.delete(small_mesh, to_delete, 0)
mesh_pol=[]
for vk in range(len(self.mesh_array)):
mesh_pol.append([self.mesh_array[vk][0],self.mesh_array[vk][1]])
for vk in range(len(small_mesh)):
mesh_pol.append([small_mesh[vk][0],small_mesh[vk][1]])
return np.array(mesh_pol)
def radius_select(self,x0,y0,xr,yr,type_i='mesh'):
"""Verifica si dos puntos estan mas cerca que el criterio seleccionado
"""
r=((x0-xr)**2+(y0-yr)**2)**0.5
if type_i=='mesh':
cx=self.radius_criteria
elif type_i=='well':
cx=10*0.4*2**0.5
elif type_i=='fault':
cx=15
if r<cx:
boolean=1
else:
boolean=0
return boolean
def from_leapfrog_mesh(self):
"""Extrae puntos mas extremos y la posicion de los elementos de un set de datos provinientes de leapfrog, sin embargo no considera los elementos asociados a la roca ATM 0
"""
geometry_file="../mesh/from_leapfrog/LF_geometry.dat"
leapfrog_t2_file="../mesh/from_leapfrog/LF_t2.dat"
#Creates a dictionary using the layers
printlayer=False
layer_min=[]
layers={}
with open(geometry_file,'r') as f:
for line in f.readlines():
if line.rstrip()=='LAYERS':
printlayer=True
continue
elif line.rstrip()=='SURFA' or line.rstrip()=='':
printlayer=False
if printlayer:
layer=line.rstrip()[0:2]
if layer==' 0':
layer_min.append(line.rstrip()[2:13])
layer_middle=line.rstrip()[13:23]
layer_max=line.rstrip()[13:23]
continue
else:
layer_max=layer_min[-1]
layer_min.append(line.rstrip()[2:13])
layer_middle=line.rstrip()[13:23]
layers[int(layer)]=[float(layer_max),float(layer_middle),float(layer_min[-1])]
max_layer=max(layers.keys())
xc=[]
yc=[]
self.LP_mesh=[]
printeleme=False
#Takes the elements at the selected leyer
with open(leapfrog_t2_file,'r') as f:
for line in f.readlines():
if line.rstrip()=='ELEME':
printeleme=True
continue
elif line.rstrip()=='CONNE' or line.rstrip()=='':
printeleme=False
if printeleme and line.rstrip()[0:5]!="ATM 0" and int(line.rstrip()[3:5])==max_layer:
xc=float(line.rstrip()[51:60])
yc=float(line.rstrip()[60:70])
self.LP_mesh.append([xc,yc])
#Creates a dictionary of the VERTICES
printvertices=False
self.x_min=1E20
self.x_max=0
self.y_min=1E20
self.y_max=0
with open(geometry_file,'r') as f:
for line in f.readlines():
#It will read between the keywords GRID and CONNECTIONS
if line.rstrip()=='VERTICES':
printvertices=True
continue
elif line.rstrip()=='GRID' or line.rstrip()=="":
printvertices=False
if printvertices:
vertice_x=float(line.rstrip()[4:13])
vertice_y=float(line.rstrip()[13:23])
if vertice_x>self.x_max:
self.x_max=vertice_x
if vertice_y>self.y_max:
self.y_max=vertice_y
if vertice_x<self.x_min:
self.x_min=vertice_x
if vertice_y<self.y_min:
self.y_min=vertice_y
return self.x_max,self.x_min, self.y_min,self.y_max, self.LP_mesh
def data(self):
"""Define los puntos que ingresaran al archivo de entrada para amesh. Adicionalmente, en caso de definir un linea en el archivo de entrada <i><lines_data/i> se procedera a ingresar estos puntos y crear puntos paralelos en ambos extremos de la linea
"""
self.raw_data=np.genfromtxt(self.filepath+self.filename,dtype={'names':('ID','MD','X','Y','Z','TYPE'),'formats':('<U7','f4','f4','f4','f4','<U10')},delimiter=',',skip_header=True)
self.IDXY={}
if not self.from_leapfrog:
if self.with_polygon:
regular_mesh=self.reg_pol_mesh()
else:
regular_mesh=self.regular_mesh()
else:
x,x1,y1,y2,regular_mesh=self.from_leapfrog_mesh()
for n in range(len(self.raw_data['ID'])):
#Store the data from wells
self.IDXY["%s"%(str(self.raw_data['ID'][n]))]=[self.raw_data['X'][n],self.raw_data['Y'][n],self.raw_data['TYPE'][n]]
#self.IDXY["%s"%(str(self.raw_data['ID'][n])).split("'")[1]]=[self.raw_data['X'][n],self.raw_data['Y'][n],self.raw_data['TYPE'][n]]
to_delete=[]
x0=self.raw_data['X'][n]
y0=self.raw_data['Y'][n]
#Delete the regular points close to the wells
for ngrid in range(len(regular_mesh)):
if abs(x0-regular_mesh[ngrid][0])<self.radius_criteria or abs(y0-regular_mesh[ngrid][1])<self.radius_criteria:
boolean=self.radius_select(x0,y0,regular_mesh[ngrid][0],regular_mesh[ngrid][1])
if boolean==1:
to_delete.append(ngrid)
regular_mesh=np.delete(regular_mesh, to_delete, 0)
#Mesh around the wells
well_mesh=[]
d=self.distance_points_around_well
dw=np.linspace(-d*0.4,d*0.4,num=self.points_around_well)
dw=dw[dw!=0]
for n in range(len(self.raw_data['ID'])):
x=self.raw_data['X'][n]
y=self.raw_data['Y'][n]
for xr in dw:
for yr in dw:
xi=x+xr
yi=y+yr
#Rotating mesh around the wells
if self.rotate:
angle=self.angle
x1=xi-(x)
y1=yi-(y)
xii=x1*math.cos(math.pi*angle/180)-y1*math.sin(math.pi*angle/180)+(x)
yii=x1*math.sin(math.pi*angle/180)+y1*math.cos(math.pi*angle/180)+(y)
well_mesh.append([xii,yii])
else:
well_mesh.append([xi,yi])
well_mesh=np.array(well_mesh)
#Deleting elements on the mesh around the well to close to other regular elements of the mesh
for point in regular_mesh:
to_delete=[]
for i,well_point in enumerate(well_mesh):
boolean=self.radius_select(well_point[0],well_point[1],point[0],point[1],type_i='well')
if boolean==1:
to_delete.append(i)
well_mesh=np.delete(well_mesh, to_delete, 0)
#Finally appending the mesh
regular_mesh=np.append(regular_mesh,well_mesh,axis=0)
cnt=0
if self.line_file:
self.lines_data=np.loadtxt(self.filepath+self.line_file,dtype={'names':('ID','X','Y'),'formats':('S10','f4','f4')},delimiter=',',skiprows=1)
lines_dict={}
for v in self.lines_data:
key=v[0].decode('UTF-8')
if key not in list(lines_dict.keys()):
lines_dict[key]=[[float(v[1]),float(v[2])]]
else:
lines_dict[key].append([float(v[1]),float(v[2])])
d=self.fault_distance #Fine more of this for better definition a long the faults
#ds=np.arange(d,d*2.5,step=d/2)
#df=self.x_gap_space-d/4 #step=d/2
ds=[]
init_d=d/2
for j in range(self.fault_rows):
d_val=init_d*2**(j)
ds.append(d_val)
ds=np.array(ds)
#Iter over every structure
for n in lines_dict:
x_lines=[]
y_lines=[]
for v in lines_dict[n]:
x_lines.append(v[0])
y_lines.append(v[1])
x_lines_np=np.array([]) #Initialize X position of structures
y_lines_np=np.array([]) #Initialize Y position of structures
for nv in range(len(x_lines)):
if nv+1<len(x_lines):
#Generates a line with the usual two points of the fault
line=[(x_lines[nv],y_lines[nv]),(x_lines[nv+1],y_lines[nv+1])]
#Calculates the distance between the points
distDiag = cdist(line,line,'euclidean').diagonal(1)
#Estimastes the number of new points
d_btw_points=self.distance_points # 10
num_steps=int(distDiag//d_btw_points)
#Generates the new x positions to evaluate
x_lines_np_local=np.linspace(x_lines[nv],x_lines[nv+1],num=num_steps)
y_lines_np_local=np.linspace(y_lines[nv],y_lines[nv+1],num=num_steps)
#Generates a double quantity of new x positions
x_lines_np_local2=[]
y_lines_np_local2=[]
for h, val in enumerate(x_lines_np_local):
if h+1<len(x_lines_np_local):
x_val=(val+x_lines_np_local[h+1])/2
y_val=(y_lines_np_local[h]+y_lines_np_local[h+1])/2
x_lines_np_local2.append(x_val)
y_lines_np_local2.append(y_val)
x_lines_np_local2=np.array(x_lines_np_local2)
y_lines_np_local2=np.array(y_lines_np_local2)
x_lines_np=np.append(x_lines_np,x_lines_np_local)
y_lines_np=np.append(y_lines_np,y_lines_np_local)
#calculates line parameters
m,b=plb.polyfit(x_lines_np_local,y_lines_np_local,1)
#perpendicular splope
p_s=(-1/m)
for i, d in enumerate(ds):
c=d*(1+m**2)**0.5+b
if b<0:
c=b-d*(1+m**2)**0.5
if i%2!=0:
b2p=[]
for point in zip(x_lines_np_local,y_lines_np_local):
b2p.append(point[1]-point[0]*p_s)
if b>0:
x_u=[(c-b2)/(p_s-m) for x,b2 in zip(x_lines_np_local,b2p)]
x_d=[(2*b-c-b2)/(p_s-m) for x,b2 in zip(x_lines_np_local,b2p)]
else:
x_u=[(2*b-c-b2)/(p_s-m) for x,b2 in zip(x_lines_np_local,b2p)]
x_d=[(c-b2)/(p_s-m) for x,b2 in zip(x_lines_np_local,b2p)]
y_u=[p_s*xu+b2 for xu,b2 in zip(x_u,b2p)]
y_d=[p_s*xd+b2 for xd,b2 in zip(x_d,b2p)]
x_lines_np=np.append(x_lines_np,x_u)
x_lines_np=np.append(x_lines_np,x_d)
y_lines_np=np.append(y_lines_np,y_u)
y_lines_np=np.append(y_lines_np,y_d)
else:
x_u=[]
x_d=[]
y_u=[]
y_d=[]
for i2, x in enumerate(x_lines_np_local2):
b2p=y_lines_np_local2[i2]-x*p_s
if b>0:
x_uu=(c-b2p)/(p_s-m)
x_dd=(2*b-c-b2p)/(p_s-m)
else:
x_uu=(2*b-c-b2p)/(p_s-m)
x_dd=(c-b2p)/(p_s-m)
x_u.append(x_uu)
x_d.append(x_dd)
y_uu=p_s*x_uu+b2p
y_u.append(y_uu)
y_dd=p_s*x_dd+b2p
y_d.append(y_dd)
x_lines_np=np.append(x_lines_np,x_u)
x_lines_np=np.append(x_lines_np,x_d)
y_lines_np=np.append(y_lines_np,y_u)
y_lines_np=np.append(y_lines_np,y_d)
for n2 in range(len(x_lines_np)):
to_delete=[]
x0=x_lines_np[n2]
y0=y_lines_np[n2]
for ngrid in range(len(regular_mesh)):
boolean=self.radius_select(x0,y0,regular_mesh[ngrid][0],regular_mesh[ngrid][1],type_i='fault')
if boolean==1:
to_delete.append(ngrid)
regular_mesh=np.delete(regular_mesh, to_delete, 0)
for n3 in range(len(x_lines_np)):
self.IDXY["RG%s"%(n3+cnt)]=[x_lines_np[n3],y_lines_np[n3],'REGUL']
cnt+=n3
#Store the data from the regular mesh on IDXY
for n in range(len(regular_mesh)):
self.IDXY["RG%s"%(n+cnt)]=[regular_mesh[n][0],regular_mesh[n][1],'REGUL']
return {'raw_data':self.raw_data,'IDXY':self.IDXY}
def well_blk_assign(self):
"""Asigna el nombre a cada bloque, priorizando los pozos, es decir, estos llevan los correlativos mas bajos. Por ultimo almacena dos archivo json para registro de los bloques asignados a los pozos
"""
self.well_names=[]
self.blocks_PT={}
self.well_blocks_names={}
self.wells_correlative={}
data_dict=self.data()
for n in range(self.number_of_layer):
cnt=110
layer_num=0
for x in data_dict['IDXY']:
if cnt%100==0:
blocknumber=cnt+10
cnt=blocknumber
else:
blocknumber=cnt
if cnt%1010==0:
cnt=110
blocknumber=cnt
layer_num+=1
cnt+=1
string_ele=self.color_dict[n+1][0][(layer_num)]
blockname=string_ele+str(blocknumber)
self.well_names.append(blockname)
self.well_blocks_names["%s"%(blockname)]=[x,\
data_dict['IDXY'][x][0],\
data_dict['IDXY'][x][1],\
data_dict['IDXY'][x][2],\
n+1,self.rock_dict[n+1][4],\
self.rock_dict[n+1][2],self.rock_dict[n+1][5]]
#print(data_dict['IDXY'][x][2]!='REGUL')
#if data_dict['IDXY'][x][2]=='prod' or data_dict['IDXY'][x][2]=='rein':
if data_dict['IDXY'][x][2]!='REGUL':
#str(data_dict['IDXY'][x][2]).split("'")[1],\
self.blocks_PT["%s"%(blockname)]=[x,\
str(data_dict['IDXY'][x][0]),\
str(data_dict['IDXY'][x][1]),\
str(data_dict['IDXY'][x][2]),\
str(n+1),str(self.rock_dict[n+1][4]),\
self.rock_dict[n+1][2],str(self.rock_dict[n+1][5])]
try:
self.wells_correlative[x]=["%s"%(blockname[1:])]
except KeyError:
pass
json.dump(self.blocks_PT, open("../mesh/well_dict.txt",'w'),sort_keys=True, indent=4)
json.dump(self.wells_correlative, open("../mesh/wells_correlative.txt",'w'),sort_keys=True, indent=4)
return {'well_names':self.well_names, 'well_blocks_names':self.well_blocks_names}
def to_translate(self,points):
#Converts data points to new coordinate system
n_times=4
if self.rotate:
angle=self.angle
for n, point in enumerate(points):
if point[1]!=(self.Ymin-n_times*self.x_from_boarder) and point[0]!=(self.Xmin-n_times*self.x_from_boarder):
line=[(self.Xmin-n_times*self.x_from_boarder,self.Ymin-n_times*self.x_from_boarder),(point[0],point[1])]
r = cdist(line,line,'euclidean').diagonal(1)
alpha = np.arctan((point[1]-self.Ymin+n_times*self.x_from_boarder)/(point[0]-self.Xmin+n_times*self.x_from_boarder))
if alpha<0:
alpha=np.pi+alpha
betha = alpha - np.deg2rad(angle)
points[n][0]=r*np.cos(betha)
points[n][1]=r*np.sin(betha)
else:
points[n][0]=0
points[n][1]=0
return points
def de_translate(self,points):
#Converts data points to new coordinate system
n_times=4
if self.rotate:
angle=self.angle
for n, point in enumerate(points):
if point[1]!=0 and point[0]!=0:
betha = np.arctan(point[1]/point[0])
alpha=betha+np.deg2rad(angle)
line=[(0,0),(point[0],point[1])]
r = cdist(line,line,'euclidean').diagonal(1)
points[n][0]=r*np.cos(alpha)+(self.Xmin-n_times*self.x_from_boarder)
points[n][1]=r*np.sin(alpha)+(self.Ymin-n_times*self.x_from_boarder)
else:
points[n][0]=self.Xmin
points[n][1]=self.Ymin
return points
def well_block_assign_from_relaxation(self,layer,i):
letters=string.ascii_uppercase
name=layer+letters[int(i/810)]+str(int(1+(i-810*int(i/810))/90))+str(10+i-90*int(i/90))
return name
def relaxation(self):
data={'blocks':{},'borders':[]}
#Extracts the points from the regular mesh, it considers the rotation already and internal region of shapefile
data_dict=self.data()['IDXY']
points=[]
for key in data_dict:
points.append([data_dict[key][0],data_dict[key][1]])
points=np.array(points)
points=np.array(points)
fig, ax = plt.subplots(1, 1, figsize=(20,20))
ax.plot(points[:,0],points[:,1],'ok', linestyle='None',ms=1)
for k, dot in enumerate(self.polygon_external):
if k+1<len(self.polygon_external):
ax.plot([dot[0],self.polygon_external[k+1][0]],[dot[1],self.polygon_external[k+1][1]],'og')
ax.set_aspect('equal')
plt.savefig('points_original.png',dpi=600)
plt.show()
points=self.to_translate(points)
points=np.array(points)
fig, ax = plt.subplots(1, 1, figsize=(20,20))
ax.plot(points[:,0],points[:,1],'or', linestyle='None',ms=1)
ax.set_aspect('equal')
plt.savefig('points_traslated.png',dpi=600)
plt.show()
#Perform relaxation
for i in range(self.relaxation_times):
field = Field(points)
field.relax()
points= field.get_points()
points=np.array(points)
fig, ax = plt.subplots(1, 1, figsize=(20,20))
ax.plot(points[:,0],points[:,1],'og', linestyle='None',ms=1)
ax.set_aspect('equal')
plt.savefig('points_traslated_relaxed.png',dpi=600)
plt.show()
points=self.de_translate(points)
points=np.array(points)
fig, ax = plt.subplots(1, 1, figsize=(20,20))
ax.plot(points[:,0],points[:,1],'om', linestyle='None',ms=1)
for k, dot in enumerate(self.polygon_external):
if k+1<len(self.polygon_external):
ax.plot([dot[0],self.polygon_external[k+1][0]],[dot[1],self.polygon_external[k+1][1]],'og')
ax.set_aspect('equal')
plt.savefig('points_relaxed.png',dpi=600)
plt.show()
#Drops points out of shapefile
rlx_points=[]
for point in points:
if self.check_in_out('external',[point[0],point[1]],'shapefile'):
rlx_points.append([point[0],point[1]])
rlx_points=np.array(rlx_points)
#Assign names to new points
position={}
for n, layer in enumerate(self.rock_dict):
for i,point in enumerate(rlx_points):
name=self.well_block_assign_from_relaxation(self.rock_dict[layer][0][0],i)
data['blocks'][name]=[n+1,point[0],point[1],self.rock_dict[layer][4],self.rock_dict[layer][5]]
if n==0:
position[i]=name
#Read border to clip write into in file
borders=gpd.read_file('../../GIS/reservoir/reservoir_limits_1_pol.shp')
border_points=[]
for line in borders.iterrows():
pointList = line[1].geometry.exterior.coords.xy
for point in zip(pointList[0],pointList[1]):
border_points.append([point[0],point[1]])
data['borders']=self.polygon_external
#Rewrite the wells dictionaries by finding the new closer position to the wells
wells_dictionary={}
well_corr={}
wells=pd.read_csv('../input/well_feedzone_xyz.csv',sep=',')
tree = cKDTree(rlx_points)
for index, well in wells.iterrows():
distance = tree.query([well['x'],well['y']])
block_assig=position[distance[1]]
well_corr[well['well']]=[block_assig[1:5]]
x= data['blocks'][block_assig][1]
y=data['blocks'][block_assig][2]
for n in range(self.number_of_layer):
block_assig_i=self.rock_dict[n+1][0]+block_assig[2:5]
wells_dictionary[block_assig_i]=[well['well'],\
x,\
y,\
well['type'],\
n+1,
data['blocks'][block_assig][3],\
self.rock_dict[n+1][2],\
data['blocks'][block_assig][4]]
json.dump(well_corr, open("../mesh/well_dict.txt",'w'),sort_keys=True, indent=4)
json.dump(wells_dictionary, open("../mesh/wells_correlative.txt",'w'),sort_keys=True, indent=4)
return data
def input_file_to_amesh_from_relaxation(self):
data=self.relaxation()
blocks_dict=data['blocks']
file=open(self.filename_out, "w")
file.write("locat\n")
#Writing ELEMENTS
for x in sorted(blocks_dict.keys()):
string="{:5s}{:5d}{:20.2f}{:20.2f}{:20.2f}{:20.2f}\n".format(x,blocks_dict[x][0],\
blocks_dict[x][1],\
blocks_dict[x][2],\
blocks_dict[x][3],\
blocks_dict[x][4])
file.write(string)
file.write(" \n")
file.write("bound\n")
#Writing borders
for point in data['borders']:
string_bound=" %9.3E %9.3E\n"%(point[0],point[1])
file.write(string_bound)
file.write(" \n")
file.write("toler")
file.write(" \n")
file.write("%10s\n"%self.toler)
file.write(" ")
file.close()
pass
def run_amesh_from_relaxation(self):
os.system("amesh\n")
files2move=['in','conne','eleme','segmt']
for fn in files2move:
shutil.move(fn,'../mesh/from_amesh/%s'%fn)
return None
def input_file_to_amesh(self):
"""Genera el archivo de entrada para amesh
"""
welldict=self.well_blk_assign()['well_blocks_names']
if not self.from_leapfrog:
Xarray=np.array([self.Xmin,self.Xmax])
Yarray=np.array([self.Ymin,self.Ymax])
else:
xmax,xmin,ymin,ymax,regular_mesh=self.from_leapfrog_mesh()
Xarray=np.array([xmin,xmax])
Yarray=np.array([ymin,ymax])
boundaries=[]
for i, j in itertools.product(Xarray, Yarray):
boundaries.append([i,j])
if self.rotate:
angle=self.angle
for point_n in range(len(boundaries)):
x1=boundaries[point_n][0]-self.Xmin
y1=boundaries[point_n][1]-self.Ymin
boundaries[point_n][0]=x1*math.cos(math.pi*angle/180)-y1*math.sin(math.pi*angle/180)+self.Xmin
boundaries[point_n][1]=x1*math.sin(math.pi*angle/180)+y1*math.cos(math.pi*angle/180)+self.Ymin
boundaries=np.array(boundaries)
hull = ConvexHull(boundaries)
file=open(self.filename_out, "w")
file.write("locat\n")
for x in sorted(welldict.keys()):
string="{:5s}{:5d}{:20.2f}{:20.2f}{:20.2f}{:20.2f}\n".format(x,welldict[x][4],\
welldict[x][1],\
welldict[x][2],\
welldict[x][5],\
welldict[x][7])
file.write(string)
file.write(" \n")
file.write("bound\n")
for n in range(len(boundaries[hull.vertices,0])):
string_bound=" %9.3E %9.3E\n"%(boundaries[hull.vertices,0][::-1][n],boundaries[hull.vertices,1][::-1][n])
file.write(string_bound)
file.write(" \n")
file.write("toler")
file.write(" \n")
file.write("%10s\n"%self.toler)
file.write(" ")
file.close()
fileread=open(self.filename,'r')
os.system("amesh\n")
string_out=fileread.read()
files2move=['in','conne','eleme','segmt']
for fn in files2move:
shutil.move(fn,'../mesh/from_amesh/%s'%fn)
return string_out
def plot_voronoi(self):
"""En caso de ser solicitado, grafica la malla a partir de los archivo en la carpeta ../mesh/from_amesh
"""
welldict=self.well_blk_assign()['well_blocks_names']
if os.path.isfile('../mesh/from_amesh/segmt'):
data=np.genfromtxt('../mesh/from_amesh/segmt', dtype="f8,f8,f8,f8,i8,S5,S5", names=['X1','Y1','X2','Y2','index','elem1','elem2'],delimiter=[15,15,15,15,5,5,5])
fig, ax0 = plt.subplots(figsize=(10,10))
#Set of the principal plot
ax0.axis([self.Xmin,self.Xmax,self.Ymin,self.Ymax])
ax0.set_xlabel('East [m]')
ax0.set_ylabel('North [m]')
ax0.set_title("Mesh for layer %s"%self.layer_to_plot,y=1.04)
ax0.set_xticks(range(self.Xmin+self.x_from_boarder,self.Xmax+self.x_space-self.x_from_boarder,self.x_space))
ax0.set_yticks(range(self.Ymin+self.y_from_boarder,self.Ymax+self.y_space-self.y_from_boarder,self.y_space))
ax0.xaxis.set_minor_locator(AutoMinorLocator())
ax0.yaxis.set_minor_locator(AutoMinorLocator())
#Plot of the Yaxis in the top
ax1 = ax0.twinx()
ax1.set_ylim(ax0.get_ylim())
ax1.set_yticks(ax0.get_yticks())
ax1.yaxis.set_minor_locator(AutoMinorLocator())
#Plot of the Xaxis in the top
ax2 = ax0.twiny()
ax2.set_xticks(ax0.get_xticks())
ax2.set_xlim(ax0.get_xlim())
ax2.xaxis.set_minor_locator(AutoMinorLocator())
for n in np.unique(data['elem1']):
Xfill=[]
Yfill=[]
if n[0:2] in self.color_dict[self.layer_to_plot][0]:
for j in range(len(data['X1'])):
cnt=0
if data['elem1'][j]==n:
Xfill.append(data['X1'][j])
Xfill.append(data['X2'][j])
Yfill.append(data['Y1'][j])
Yfill.append(data['Y2'][j])
plt.plot([data['X1'][j],data['X2'][j]],[data['Y1'][j],data['Y2'][j]],'-k')
if self.plot_names:
if cnt==0:
if welldict[n][3]=='WELL':
ax0.text(welldict[n][1],welldict[n][2],\
"%s, %s"%(welldict[n][0],n),fontsize=8)
color_dot='r'
else:
ax0.text(welldict[n][1],welldict[n][2],\
welldict[n][0],fontsize=8)
color_dot='b'
cnt+=1
if self.plot_centers:
if welldict[n][3]=='WELL':
color_dot='r'
else:
color_dot='b'
ax0.plot(welldict[n][1],welldict[n][2],'%so'%color_dot,ms=1)
ax0.fill(Xfill,Yfill,welldict[n][6],alpha=0.5)
ax0.grid()
ax0.grid(which='major', color='#CCCCCC', linestyle='--',alpha=0.5)
fig.savefig('../mesh/plot_voronoi_'+str(datetime.datetime.now().strftime('%Y-%m-%d_%H%M%S')),bbox_inches="tight", pad_inches = 0)
plot_s=plt.show()
else:
plot_s="Excecute input_file_to_amesh() function first"
return plot_s
def to_steinar(self):
"""Convierte los archivos de salida amesh en formato de entrada para Steinar
"""
if os.path.isfile('../mesh/from_amesh/segmt'):
shutil.copyfile('../mesh/from_amesh/segmt', '../mesh/to_steinar/segmt')
ele_file_st=open('../mesh/to_steinar/eleme','w')
ele_file=open('../mesh/from_amesh/eleme','r')
data_eleme=ele_file.readlines()
if self.set_inac_from_poly or self.set_inac_from_inner:
source_file="../mesh/from_amesh/eleme"
data_eleme=pd.read_csv(source_file,delim_whitespace=True,skiprows=1,header=None,names=['block','rocktype','vol','X','Y','Z'])
data_eleme.set_index('block')
ele_file_st.write("eleme\n")
if self.set_inac_from_inner:
source='inner'
if self.set_inac_from_poly:
source='shapefile'
for index, row in data_eleme.iterrows():
check=self.check_in_out('internal',[row['X'],row['Y']],source=source)
outside=1
if not check:
outside=-1
ele_file_st.write("%5s%10s%5s%10.3E\n"%(row['block']," ",row['rocktype'],row['vol']*outside))
else:
cnt=0
ele_file_st.write("eleme")
for a_line in data_eleme:
if cnt!=0:
name_rock_vol = a_line[0:30]
ele_file_st.write("\n")
ele_file_st.write("%s"%name_rock_vol)
cnt+=1
ele_file_st.close()
ele_file.close()
conne_file_st=open('../mesh/to_steinar/conne','w')
conne_file=open('../mesh/from_amesh/conne','r')
data_conne=conne_file.readlines()
cnt=0
for a_line in data_conne:
if cnt!=0:
name_conne = a_line[0:11]
if '*' in name_conne:
pass
elif cnt<=len(data_conne)-2:
try:
conne_file_st.write("%s"%name_conne)
conne_file_st.write(" 0 0 0 ")
nod4=a_line[60:64]
if len(nod4)==4:
per=3
else:
per=a_line[29:30]
conne_file_st.write("%s"%per)
nod1=a_line[30:40].replace('+', '')
conne_file_st.write(" %9.3E"%(float(nod1)))
nod2=a_line[40:50].replace('+', '')
conne_file_st.write(" %9.3E"%(float(nod2)))
nod3=a_line[50:60].replace('+', '')
conne_file_st.write(" %9.3E"%(float(nod3)))
if len(nod4)==4:
#nod4=-1
conne_file_st.write("{:9.1f}".format(float(nod4)))
elif len(nod4)==1:
conne_file_st.write(" 0.0")
conne_file_st.write(" 0.0000E+00\n")
except ValueError:
pass
else:
conne_file_st.write("conne\n")
cnt+=1
conne_file_st.close()
conne_file.close()
in_file_st=open('../mesh/to_steinar/in','w')
in_file=open('../mesh/from_amesh/in','r')
data_in=in_file.readlines()
in_file_st.write("bound\n")
if not self.from_leapfrog:
Xarray=np.array([self.Xmin,self.Xmax])
Yarray=np.array([self.Ymin,self.Ymax])
else:
xmax,xmin,ymin,ymax,regular_mesh=self.from_leapfrog_mesh()
Xarray=np.array([xmin,xmax])
Yarray=np.array([ymin,ymax])
if self.rotate:
borders=self.polygon_external
for point in borders:
string_bound=" %6.2d %6.2d\n"%(point[0],point[1])
in_file_st.write(string_bound)
else:
in_file_st.write(" %6.2d %6.2d\n"%(Xarray[0],Yarray[0]))
in_file_st.write(" %6.2d %6.2d\n"%(Xarray[0],Yarray[1]))
in_file_st.write(" %6.2d %6.2d\n"%(Xarray[1],Yarray[1]))
in_file_st.write(" %6.2d %6.2d\n\n"%(Xarray[1],Yarray[0]))
in_file_st.write("\nlocat\n")
cnt=0
for ny in data_in:
if cnt>0 and cnt<(len(data_in)-10):
read1=ny[0:6]
read2=ny[8:11]
read3=ny[11:31]
read4=ny[31:52]
read5=ny[51:72]
read6=ny[71:92]
in_file_st.write("%s"%read1)
in_file_st.write("%4s"%read2)
in_file_st.write("%23.8E"%float(read3))
in_file_st.write("%24.8E"%float(read4))
in_file_st.write("%24.8E"%float(read5))
in_file_st.write("%24.8E\n"%float(read6))
cnt+=1
in_file_st.close()
in_file.close()
#Dummy rocktype
rock_file_st=open('../mesh/to_steinar/rocks','w')
rock_file_st.write("rock1 02.6500E+031.0000E-011.0000E-151.0000E-151.0000E-152.1000E+008.5000E+02 160 243 150")
rock_file_st.close()
else:
pass
def to_GIS(self):
"""Toma como entrada el archivo segment de ../mesh/from_amesh y eleme de ../mesh/to_stainar y los convierte en shapefile con atributos de roca, nombre y volumen
"""
if os.path.isfile('../mesh/from_amesh/segmt') and os.path.isfile('../mesh/to_steinar/eleme'):
if self.plot_all_GIS:
max_layer=self.number_of_layer
min_layer=1
else:
max_layer=self.layer_to_plot
min_layer=self.layer_to_plot
for ln in range(min_layer,max_layer+1,1):
w=shapefile.Writer('../mesh/GIS/mesh_layer_%s'%ln)
w.field('BLOCK_NAME', 'C', size=5)
w.field('ROCKTYPE', 'C', size=5)
w.field('VOLUMEN', 'F', decimal=10)
alttype = np.dtype([('X1', '<f8'), ('Y1', '<f8'), ('X2', '<f8'), ('Y2', '<f8'), ('index', '<f8'), ('elem1', 'U5'), ('elem2', 'U5')])
data = np.genfromtxt('../mesh/to_steinar/segmt', dtype=alttype, delimiter=[15,15,15,15,5,5,5])
elem_file = open('../mesh/to_steinar/eleme', 'r')
plain_elemfile=elem_file.read()
for n in np.unique(data['elem1']):
if n[0:2] in self.color_dict[ln][0]:
points=[]
line = re.findall(r"%s.*$"%n, plain_elemfile,re.MULTILINE)
rocktype=str(line)[17:22]
volumen=float(str(line)[22:32])
for j in range(len(data['X1'])):
if data['elem1'][j]==n:
point1=[data['X1'][j],data['Y1'][j]]
point2=[data['X2'][j],data['Y2'][j]]
points.append(point1)
points.append(point2)
w.poly([points])
w.record(n,rocktype,volumen)
w.close()
elem_file.close()
else:
None
def mesh_creation_func(input_mesh_dictionary,input_dictionary):
"""Creates a grid
Most of the parameters are keywords from the input dictionary input_mesh_dictionary. Otherwise, the input dictionary is specified.
Parameters
----------
filename : str
File name with well feedzone location
filepath : str
Path of input files
Xmin : float
Minimun X coordinates for the grid
Xmax : float
Maximun X coordinates for the grid
Ymin : float
Minimun Y coordinates for the grid
Ymax : float
Maximun Y coordinates for the grid
toler : float
AMESH parameter
layers : dictionary
Name (correlative) and thickness of every layer on the model, keyword on input_dictionary
layer_to_plot : int
In case it is specified a voronoi plot will be performed
x_space : float
Horizontal distance between elements for the outerfield
y_space : float
Vertical distance between elements for the outerfield
radius_criteria: float
Minimun distance between well location and a regular element
x_from_boarder: float
Horizontal distance from the first element to the east border
y_from_boarder: float
Vertical distance from the first element to the south border
x_gap_min: float
Minimun X coordinates on the grid for the well field
x_gap_max: float
Maximun X coordinates on the grid for the well field
x_gap_space: float
Horizontal distance between elements for the farfield
y_gap_min: float
Minimun Y coordinates on the grid for the well field o
y_gap_max: float
Maximun X coordinates on the grid for the well field
y_gap_space: float
Vertical distance between elements for the farfield
plot_names: bool
If true it plots the name of the blocks from the selected layer to plot
plot_centers: bool
If true it plots the centers of the blocks from the selected layer to plot
z0_level: float
Reference level (elevation) for all the grid, keyword on input_dictionary
mesh_creation: bool
If true the mesh is created
plot_layer: bool
If true it plots the selected layer
to_steinar: bool
If true it creates the input files for steinar
to_GIS: bool
If true it generates a shapefile of the selected layer
plot_all_GIS: bool
If true it generates a shapefile of all layers
from_leapfrog: bool
lee archivos leapfrong ../mesh/from_leapfrog/LF_geometry.dat y ../mesh/from_leapfrog/LF_t2.dat, sin embargo se pierde la simbologia usada en leapfrog y no te utiliza la malla regular ni los pozos. Solamente se crea la malla utilizando amesh
line_file: str
It defines the path and name of a line that can represented a fault or other structure on the mesh, The input file must contain the header: ID,X,Y on csv format. ID referes to the same
structure, thus, more than one structure can be defined on a single file.
fault_distance: float
In case a line_file is define, some paralels elements will be created at a defined distance
with_polygon: bool
If true a shapefile will be read to define the wellfield.
polygon_shape: str
The shapefile deines the wellfield boundaries. The shape must not contain any cavity
set_inac_from_poly: bool
If true all the elements on the outside of the shapefile are defined as inactive
set_inac_from_inner:bool
If true all the elements on the outerfield are defined as inactive
rotate: bool
If true it rotates the mesh a defined angle
angle: float
Angle in degrees
inner_mesh_type: string
Type of mesh on the inner part of the mesh, it could be 'honeycomb' or 'regular'
Returns
-------
file
eleme: list of blocks from the grid
file
conne : list of connections on the grid
shapefile
mesh_{field}_layer_{layer} : shapefile of a defined (or all) layer including rock distribution
plot
Voronoi plot (in case is specified)
Attention
---------
A copy of AMESH must be on the path or directory
"""
layers_thick=list(map(float,np.array(list(input_dictionary['LAYERS'].values()))[:][:,1]))
blocks=py2amesh(input_mesh_dictionary['filename'],input_mesh_dictionary['filepath'],input_mesh_dictionary['Xmin'],input_mesh_dictionary['Xmax'],input_mesh_dictionary['Ymin'],input_mesh_dictionary['Ymax'],\
input_mesh_dictionary['toler'],layers_thick,input_mesh_dictionary['layer_to_plot'],input_mesh_dictionary['x_space'],input_mesh_dictionary['y_space'],input_mesh_dictionary['radius_criteria'],input_mesh_dictionary['x_from_boarder'],input_mesh_dictionary['y_from_boarder'],\
input_mesh_dictionary['x_gap_min'],input_mesh_dictionary['x_gap_max'],input_mesh_dictionary['x_gap_space'],input_mesh_dictionary['y_gap_min'],input_mesh_dictionary['y_gap_max'],input_mesh_dictionary['y_gap_space'],input_mesh_dictionary['plot_names'],input_mesh_dictionary['plot_centers'],\
input_dictionary['z_ref'],input_mesh_dictionary['plot_all_GIS'],input_mesh_dictionary['from_leapfrog'],input_mesh_dictionary['line_file'],input_mesh_dictionary['fault_distance'],input_mesh_dictionary['with_polygon'],input_mesh_dictionary['polygon_shape'],\
input_mesh_dictionary['set_inac_from_poly'],input_mesh_dictionary['set_inac_from_inner'],input_mesh_dictionary['rotate'],input_mesh_dictionary['angle'],input_mesh_dictionary['inner_mesh_type'],\
input_mesh_dictionary['distance_points'],input_mesh_dictionary['fault_rows'],input_mesh_dictionary['relaxation_times'],input_mesh_dictionary['points_around_well'],input_mesh_dictionary['distance_points_around_well'])
#data=blocks.relaxation()
if input_mesh_dictionary['mesh_creation']:
if input_mesh_dictionary['relaxation_times']>0:
blocks.input_file_to_amesh_from_relaxation()
blocks.run_amesh_from_relaxation()
else:
blocks.input_file_to_amesh()
if input_mesh_dictionary['to_steinar']:
blocks.to_steinar()
if input_mesh_dictionary['plot_layer']:
blocks.plot_voronoi()
if input_mesh_dictionary['to_GIS']:
blocks.to_GIS()
return None #data #None
def change_ref_elevation(variation=0):
"""It modifies the in files from the folders to_steinar and from_amesh by increasing (or decreasing) a fixed value
Parameters
----------
variation: float
Defined value change the elevation of the in file from amesh
Returns
-------
file
in: modified on folders ../mesh/to_steinar and ../mesh/from_amesh
Attention
---------
The change is generated on all the elements. It is recommened to run ELEM_to_json() after this execution
Examples
--------
>>> change_ref_elevation(variation=-100)
"""
in_file_steinar='../mesh/to_steinar/in'
in_file_amesh='../mesh/from_amesh/in'
if os.path.isfile(in_file_steinar) and os.path.isfile(in_file_amesh):
steinar_colums=[(0,5),(5,10),(10,35),(35,60),(60,85),(85,110)]
steinar_data= | pd.read_fwf(in_file_steinar,colspecs=steinar_colums,skiprows=7,header=None,names=['block','level','X','Y','Z','h']) | pandas.read_fwf |
# Copyright 2018 <NAME> <EMAIL>
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pandas as pd
import numpy as np
import os
import warnings
from .dataset import DataSet
from .dataframe_tools import *
from .exceptions import FailedReindexWarning, ReindexMapError
class Brca(DataSet):
def __init__(self, version="latest"):
"""Load all of the brca dataframes as values in the self._data dict variable, with names as keys, and format them properly."""
# Set some needed variables, and pass them to the parent DataSet class __init__ function
valid_versions = ["3.1", "3.1.1"] # This keeps a record of all versions that the code is equipped to handle. That way, if there's a new data release but they didn't update their package, it won't try to parse the new data version it isn't equipped to handle.
data_files = {
"3.1": [
"prosp-brca-v3.1-acetylome-ratio-norm-NArm.gct.gz",
"prosp-brca-v3.1-gene-level-cnv-gistic2-all_data_by_genes.gct.gz",
"prosp-brca-v3.1-phosphoproteome-ratio-norm-NArm.gct.gz",
"prosp-brca-v3.1-proteome-ratio-norm-NArm.gct.gz",
"prosp-brca-v3.1-rnaseq-fpkm-log2-row-norm-2comp.gct.gz",
"prosp-brca-v3.1-sample-annotation.csv.gz"],
"3.1.1": [
"Breast_One_Year_Clinical_Data_20160927.xls",
"prosp-brca-v3.0-v1.4.somatic.variants.070918.maf.gz",
"prosp-brca-v3.1-acetylome-ratio-norm-NArm.gct.gz",
"prosp-brca-v3.1-gene-level-cnv-gistic2-all_data_by_genes.gct.gz",
"prosp-brca-v3.1-phosphoproteome-ratio-norm-NArm.gct.gz",
"prosp-brca-v3.1-proteome-ratio-norm-NArm.gct.gz",
"prosp-brca-v3.1-rnaseq-fpkm-log2-row-norm-2comp.gct.gz",
"prosp-brca-v3.1-sample-annotation.csv.gz"],
}
super().__init__(cancer_type="brca", version=version, valid_versions=valid_versions, data_files=data_files)
# Load the data into dataframes in the self._data dict
loading_msg = "Loading dataframes"
for file_path in self._data_files_paths: # Loops through files variable
# Print a loading message. We add a dot every time, so the user knows it's not frozen.
loading_msg = loading_msg + "."
print(loading_msg, end='\r')
path_elements = file_path.split(os.sep) # Get a list of the levels of the path
file_name = path_elements[-1] # The last element will be the name of the file
if file_name == "prosp-brca-v3.1-acetylome-ratio-norm-NArm.gct.gz":
df = pd.read_csv(file_path, sep='\t', skiprows=2, dtype=object) # First two rows of file aren't part of the dataframe. Also, due to extra metadata rows we're going to remove, all cols have mixed types, so we pass dtype=object for now.
df = df[df["GeneSymbol"] != "na"] # There are several metadata rows at the beginning of the dataframe, which duplicate the clinical and derived_molecular dataframes. They all don't have a value for GeneSymbol, so we'll use that to filter them out.
# Prepare some columns we'll need later for the multiindex
df["variableSites"] = df["variableSites"].str.replace(r"[a-z\s]", "") # Get rid of all lowercase delimeters and whitespace in the sites
df = df.rename(columns={
"GeneSymbol": "Name",
"variableSites": "Site",
"sequence": "Peptide", # We take this instead of sequenceVML, to match the other datasets' format
"accession_numbers": "Database_ID" # We take all accession numbers they have, instead of the singular accession_number column
})
# Some rows have at least one localized acetylation site, but also have other acetylations that aren't localized. We'll drop those rows, if their localized sites are duplicated in another row, to avoid creating duplicates, because we only preserve information about the localized sites in a given row. However, if the localized sites aren't duplicated in another row, we'll keep the row.
split_ids = df["id"].str.split('_', expand=True)
unlocalized_to_drop = df.index[~split_ids[3].eq(split_ids[4]) & df.duplicated(["Name", "Site", "Peptide", "Database_ID"], keep=False)] # Column 3 of the split "id" column is number of phosphorylations detected, and column 4 is number of phosphorylations localized, so if the two values aren't equal, the row has at least one unlocalized site
df = df.drop(index=unlocalized_to_drop)
# Give it a multiindex
df = df.set_index(["Name", "Site", "Peptide", "Database_ID"])
df = df.drop(columns=["id", "id.description", "geneSymbol", "numColumnsVMsiteObserved", "bestScore", "bestDeltaForwardReverseScore",
"Best_scoreVML", "sequenceVML", "accessionNumber_VMsites_numVMsitesPresent_numVMsitesLocalizedBest_earliestVMsiteAA_latestVMsiteAA",
"protein_mw", "species", "speciesMulti", "orfCategory", "accession_number", "protein_group_num", "entry_name"]) # We don't need these. The dropped columns include a "geneSymbol" column that is a duplicate of the original GeneSymbol.
df = df.apply(pd.to_numeric) # Now that we've dropped all the extra metadata columns, convert everything to floats.
df = df.sort_index()
df = df.transpose()
df = df.sort_index()
df.index.name = "Patient_ID"
self._data["acetylproteomics"] = df
elif file_name == "prosp-brca-v3.1-gene-level-cnv-gistic2-all_data_by_genes.gct.gz":
df = pd.read_csv(file_path, sep='\t', skiprows=2, index_col=0, dtype=object) # First two rows of file aren't part of the dataframe. Also, due to extra metadata rows we're going to remove, all cols have mixed types, so we pass dtype=object for now.
df = df[df["geneSymbol"] != "na"] # There are several metadata rows at the beginning of the dataframe, which duplicate the clinical and derived_molecular dataframes. They all don't have a value for geneSymbol, so we'll use that to filter them out.
df = df.drop(columns="Cytoband")
df["geneSymbol"] = df["geneSymbol"].str.rsplit('|', n=1, expand=True)[0] # Some of the geneSymbols have the gene IDs appended to them, to get rid of duplicates. We're going to create a multiindex with all the gene names and gene IDs, so we can drop the appended IDs.
df = df.rename(columns={"geneSymbol": "Name", "Gene.ID": "Database_ID"})
df = df.set_index(["Name", "Database_ID"])
df = df.apply(pd.to_numeric) # Now that we've dropped all the extra metadata columns, convert everything to floats.
df = df.sort_index()
df = df.transpose()
df = df.sort_index()
df.index.name = "Patient_ID"
self._data["CNV"] = df
elif file_name == "prosp-brca-v3.1-phosphoproteome-ratio-norm-NArm.gct.gz":
df = pd.read_csv(file_path, sep='\t', skiprows=2, dtype=object) # First two rows of file aren't part of the dataframe. Also, due to extra metadata rows we're going to remove, all cols have mixed types, so we pass dtype=object for now.
df = df[df["GeneSymbol"] != "na"] # There are several metadata rows at the beginning of the dataframe, which duplicate the clinical and derived_molecular dataframes. They all don't have a value for GeneSymbol, so we'll use that to filter them out.
# Prepare some columns we'll need later for the multiindex
df["variableSites"] = df["variableSites"].str.replace(r"[a-z\s]", "") # Get rid of all lowercase delimeters and whitespace in the sites
df = df.rename(columns={
"GeneSymbol": "Name",
"variableSites": "Site",
"sequence": "Peptide", # We take this instead of sequenceVML, to match the other datasets' format
"accession_numbers": "Database_ID" # We take all accession numbers they have, instead of the singular accession_number column
})
# Some rows have at least one localized phosphorylation site, but also have other phosphorylations that aren't localized. We'll drop those rows, if their localized sites are duplicated in another row, to avoid creating duplicates, because we only preserve information about the localized sites in a given row. However, if the localized sites aren't duplicated in another row, we'll keep the row.
split_ids = df["id"].str.split('_', expand=True)
unlocalized_to_drop = df.index[~split_ids[3].eq(split_ids[4]) & df.duplicated(["Name", "Site", "Peptide", "Database_ID"], keep=False)] # Column 3 of the split "id" column is number of phosphorylations detected, and column 4 is number of phosphorylations localized, so if the two values aren't equal, the row has at least one unlocalized site
df = df.drop(index=unlocalized_to_drop)
# Give it a multiindex
df = df.set_index(["Name", "Site", "Peptide", "Database_ID"])
df = df.drop(columns=["id", "id.description", "geneSymbol", "numColumnsVMsiteObserved", "bestScore", "bestDeltaForwardReverseScore",
"Best_scoreVML", "Best_numActualVMSites_sty", "Best_numLocalizedVMsites_sty", "sequenceVML",
"accessionNumber_VMsites_numVMsitesPresent_numVMsitesLocalizedBest_earliestVMsiteAA_latestVMsiteAA", "protein_mw", "species",
"speciesMulti", "orfCategory", "accession_number", "protein_group_num", "entry_name"]) # We don't need these. The dropped columns include a "geneSymbol" column that is a duplicate of the original GeneSymbol.
df = df.apply(pd.to_numeric) # Now that we've dropped all the extra metadata columns, convert everything to floats.
df = df.sort_index()
df = df.transpose()
df = df.sort_index()
df.index.name = "Patient_ID"
self._data["phosphoproteomics"] = df
elif file_name == "prosp-brca-v3.1-proteome-ratio-norm-NArm.gct.gz":
df = | pd.read_csv(file_path, sep='\t', skiprows=2, dtype=object) | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Functions for cleaning mdredze Sandy Twitter dataset.
"""
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from statsmodels.graphics.tsaplots import plot_acf
from twitterinfrastructure.tools import cross_corr, output, query
def create_timeseries_diff(df, col1, col2, zone_col, write_path=None):
"""Creates a dataframe where col1 and col2 columns are replaced by
first differenced time series.
Parameters
----------
df : Dataframe
Dataframe to containing time series data to difference (e.g. from
create_timeseries). Assumes dataframe is multi-indexed by zone_col and
timedelta (in hours).
col1 : str
Name of column containing first time series.
col2 : str
Name of column containing second time series.
zone_col : str
Name of zone column: 'zone_id' (nyiso zone), 'location_id' (taxi
zone), or 'borough' (taxi borough).
write_path : str or None
If str, then write a csv of the time series dataframe to the
specified path. Else, do not write.
Returns
-------
df_diff : dataframe
Notes
-----
"""
# create differenced time series dataframe
df_diff = pd.DataFrame(columns=[zone_col, 'timedelta',
col1, col2])
df_diff.set_index([zone_col, 'timedelta'], inplace=True)
zones = pd.unique(df.index.get_level_values(level=zone_col))
for zone in zones:
s_y1 = df[col1].xs(zone, level=0).dropna()
s_y2 = df[col2].xs(zone, level=0).dropna()
s_y1.index = pd.to_timedelta(s_y1.index.values, unit='h')
s_y2.index = pd.to_timedelta(s_y2.index.values, unit='h')
# difference both timeseries
s_y1_diff = pd.Series(data=np.diff(s_y1), index=s_y1.index.values[0:-1],
name=col1)
s_y2_diff = pd.Series(data=np.diff(s_y2), index=s_y2.index.values[0:-1],
name=col2)
df_zone = pd.concat([s_y1_diff, s_y2_diff], axis=1)
df_zone.index.name = 'timedelta'
df_zone = df_zone.reset_index()
df_zone[zone_col] = zone
df_zone = df_zone.set_index([zone_col, 'timedelta'])
# add zone to differenced dataframe
df_diff = df_diff.append(df_zone, ignore_index=False, sort='False')
# save to csv
if write_path:
df_csv = df_diff.reset_index()
df_csv['timedelta'] = df_csv['timedelta'].astype('timedelta64[h]')
df_csv.to_csv(write_path, index=False)
return df_diff
def create_timeseries_shift(df, df_max_rho, col1, col2, zone_col,
write_path=None):
"""Creates a dataframe where the 2nd time series column is time-shifted.
Parameters
----------
df : Dataframe
Dataframe to containing time series data to shift (e.g. from
create_timeseries). Assumes dataframe is multi-indexed by zone_col and
timedelta (in hours).
df_max_rho : Dataframe
Dataframe containing desired shifts for col2 in a 'max-lag' column,
indexed by zone_col.
col1 : str
Name of column containing first time series (copied).
col2 : str
Name of column containing second time series. This is the shifted
time series, where col2_shifted = col2 + shift.
zone_col : str
Name of zone column: 'zone_id' (nyiso zone), 'location_id' (taxi
zone), or 'borough' (taxi borough).
write_path : str or None
If str, then write a csv of the time series dataframe to the
specified path. Else, do not write.
Returns
-------
df_shift : dataframe
Notes
-----
"""
# create shifted time series dataframe
df_shift = pd.DataFrame(columns=[zone_col, 'timedelta', col1, col2])
df_shift.set_index([zone_col, 'timedelta'], inplace=True)
for zone in df_max_rho.index.values:
if not np.isnan(df_max_rho.loc[zone, 'max-rho']):
s_y1 = df[col1].xs(zone, level=0).dropna()
s_y2 = df[col2].xs(zone, level=0).dropna()
s_y1.index = pd.to_timedelta(s_y1.index.values, unit='h')
s_y2.index = pd.to_timedelta(s_y2.index.values, unit='h')
# shift 2nd time series
shift = df_max_rho.loc[zone, 'max-shift']
s_y2_shift = s_y2.shift(1, freq=pd.Timedelta(shift, unit='h'))
df_zone = pd.concat([s_y1, s_y2_shift], axis=1)
df_zone.index.name = 'timedelta'
df_zone = df_zone.reset_index()
df_zone[zone_col] = zone
df_zone = df_zone.set_index([zone_col, 'timedelta'])
# add zone to shifted dataframe
df_shift = df_shift.append(df_zone, ignore_index=False,
sort='False')
# save to csv
if write_path:
df_csv = df_shift.reset_index()
df_csv['timedelta'] = df_csv['timedelta'].astype('timedelta64[h]')
df_csv.to_csv(write_path, index=False)
return df_shift
def create_timeseries(df, zone_col, min_count, write_path=None, verbose=0):
"""Creates a time series dataframe where each column of df is
independently linearly interpolated over the total range of timedeltas of
each zone. Only time series with at least min_count data points are
included. Assumes the dataframe is indexed by a zone column (zone_col)
and a timedelta column (e.g. using index_timedelta).
Parameters
----------
df : Dataframe
Dataframe to calculate time series from.
zone_col : str
Name of zone column: 'zone_id' (nyiso zone), 'location_id' (taxi
zone), or 'borough' (taxi borough).
min_count : int
Minimum number of data points needed to convert to a time series.
write_path : str or None
If str, then write a csv of the time series dataframe to the
specified path. Else, do not write.
verbose : int
Defines verbosity for output statements.
Returns
-------
df_ts : dataframe
Notes
-----
"""
# loop through zones
df_ts = pd.DataFrame()
skipped = []
zones = pd.unique(df.index.get_level_values(zone_col))
for zone in zones:
df_zone = df.xs(zone, level=0)
# loop through columns (i.e. data to convert to time series)
y_interps = []
cols = df_zone.columns.values
for col in cols:
s = df_zone[col].dropna()
if s.count() < min_count:
skipped.append((zone, col))
else:
timedeltas = range(s.index.astype('timedelta64[h]').min(),
s.index.astype('timedelta64[h]').max() + 1)
y_interp = pd.Series(data=np.interp(
timedeltas, s.index.astype('timedelta64[h]'), s.values),
index=timedeltas, name=col)
y_interps.append(y_interp)
# add interpolated data to dataframe
if y_interps:
df_temp = pd.concat(objs=y_interps, axis=1, join='outer')
df_temp = df_temp.set_index(
pd.to_timedelta(df_temp.index.values, unit='h'))
df_temp[zone_col] = zone
df_temp.set_index(zone_col, append=True, inplace=True)
df_temp.index.names = ['timedelta', zone_col]
df_temp = df_temp.reorder_levels([1, 0])
df_ts = df_ts.append(df_temp, sort=False)
# save to csv
if write_path:
df_csv = df_ts.reset_index()
df_csv['timedelta'] = df_csv['timedelta'].astype('timedelta64[h]')
df_csv.to_csv(write_path, index=False)
if verbose >= 1:
output('skipped zones for having less than {min_count} data points '
'in original column data: {skipped}'.format(skipped=skipped,
min_count=min_count))
return df_ts
def index_timedelta(df, datetime_ref, datetime_col):
"""Indexes a dataframe on a timedelta calculated from datetime_col
relative to datetime_ref.
Parameters
----------
df : Dataframe
Dataframe to reindex on timedelta.
datetime_ref : Timestamp
Reference datetime to calculate timedelta relative to, specified as a
timezone-aware Pandas Timestamp object. Calculates timedelta as
datetime_col - datetime_ref.
e.g. enddate = pd.Timestamp('2012-11-03 00:00:00',
tz='America/New_York')
datetime_col : str
Name of column (or index) containing the datetime data to calculate
timedelta from.
Returns
-------
df : dataframe
Notes
-----
"""
indexes = df.index.names
df = df.reset_index()
# calculate and add timedelta
df['timedelta'] = df['datetimeNY'] - datetime_ref
# df['timedelta'] = [int(td.total_seconds() / 3600) for td
# in df['timedelta']]
# df['timedelta'] = pd.to_timedelta(df['timedelta'], unit='h')
# drop columns and reindex with datetime_col replaced by timedelta
df = df.drop([datetime_col], axis=1)
indexes = ['timedelta' if ind == datetime_col else ind for ind in indexes]
df = df.set_index(indexes)
df = df.sort_index(level=0)
return df
def load_nyctlc_zone(startdate, enddate, trip_type, trip_count_filter,
db_path, verbose=0):
"""Query and clean nyctlc dropoff or pickup data for the specified date
range from a sqlite database, grouped by zone. Assumes the database
contains a standard_zonedropoff_hour_sandy or
standard_zonepickup_hour_sandy table created using
create_standard_zone_hour.
Parameters
----------
startdate : Timestamp
Start date to include tweets from (inclusive), specified as a
timezone-aware Pandas Timestamp object.
E.g. startdate = pd.Timestamp('2012-10-28 00:00:00',
tz='America/New_York')
enddate : Timestamp
End date to include tweets from (exclusive), specified as a
timezone-aware Pandas Timestamp object.
e.g. enddate = pd.Timestamp('2012-11-03 00:00:00',
tz='America/New_York')
trip_type : str
Trip type: 'dropoff' or 'pickup'.
trip_count_filter : int
Minimum number of trips required to load a data point.
db_path : str
Path to sqlite database containing table.
verbose : int
Defines verbosity for output statements.
Returns
-------
df_taxi : dataframe
Notes
-----
Sqlite date queries are inclusive for start and end, datetimes in nyctlc
database are local (i.e. NY timezone).
"""
df_taxi = load_nyctlc_zone_hour(startdate, enddate, trip_type,
trip_count_filter, db_path, verbose=verbose)
# remove index, remove columns, and group by zone
df_taxi = df_taxi.reset_index()
df_taxi = df_taxi.drop(['datetimeNY'], axis=1)
df_taxi = df_taxi.groupby(['location_id']).mean()
if verbose >= 1:
if trip_type == 'dropoff':
output('[min, max] taxi pace and trips mean z-score: [' +
str(np.nanmin(df_taxi['zpace-drop'])) + ', ' +
str(np.nanmax(df_taxi['zpace-drop'])) + '], [' +
str(np.nanmin(df_taxi['ztrips-drop'])) + ', ' +
str(np.nanmax(df_taxi['ztrips-drop'])) + '].')
elif trip_type == 'pickup':
output('[min, max] taxi pace and trips mean z-score: [' +
str(np.nanmin(df_taxi['zpace-pick'])) + ', ' +
str(np.nanmax(df_taxi['zpace-pick'])) + '], [' +
str(np.nanmin(df_taxi['ztrips-pick'])) + ', ' +
str(np.nanmax(df_taxi['ztrips-pick'])) + '].')
return df_taxi
def load_nyctlc_zone_date(startdate, enddate, trip_type, trip_count_filter,
db_path, verbose=0):
"""Query and clean nyctlc dropoff or pickup data for the specified date
range from a sqlite database, grouped by zone and date. Assumes the database
contains a standard_zonedropoff_hour_sandy or
standard_zonepickup_hour_sandy table created using
create_standard_zone_hour.
Parameters
----------
startdate : Timestamp
Start date to include tweets from (inclusive), specified as a
timezone-aware Pandas Timestamp object.
E.g. startdate = pd.Timestamp('2012-10-28 00:00:00',
tz='America/New_York')
enddate : Timestamp
End date to include tweets from (exclusive), specified as a
timezone-aware Pandas Timestamp object.
e.g. enddate = pd.Timestamp('2012-11-03 00:00:00',
tz='America/New_York')
trip_type : str
Trip type: 'dropoff' or 'pickup'.
trip_count_filter : int
Minimum number of trips required to load a data point.
db_path : str
Path to sqlite database containing table.
verbose : int
Defines verbosity for output statements.
Returns
-------
df_taxi : dataframe
Notes
-----
Sqlite date queries are inclusive for start and end, datetimes in nyctlc
database are local (i.e. NY timezone).
"""
df_taxi = load_nyctlc_zone_hour(startdate, enddate, trip_type,
trip_count_filter, db_path, verbose=verbose)
# remove index, adjust datetime to date, and group by zone and date
df_taxi = df_taxi.reset_index()
df_taxi['datetimeNY'] = pd.to_datetime(df_taxi['datetimeNY']).dt.date
df_taxi = df_taxi.groupby(['location_id', 'datetimeNY']).mean()
if verbose >= 1:
if trip_type == 'dropoff':
output('[min, max] taxi pace and trips mean z-score: [' +
str(np.nanmin(df_taxi['zpace-drop'])) + ', ' +
str(np.nanmax(df_taxi['zpace-drop'])) + '], [' +
str(np.nanmin(df_taxi['ztrips-drop'])) + ', ' +
str(np.nanmax(df_taxi['ztrips-drop'])) + '].')
elif trip_type == 'pickup':
output('[min, max] taxi pace and trips mean z-score: [' +
str(np.nanmin(df_taxi['zpace-pick'])) + ', ' +
str(np.nanmax(df_taxi['zpace-pick'])) + '], [' +
str(np.nanmin(df_taxi['ztrips-pick'])) + ', ' +
str(np.nanmax(df_taxi['ztrips-pick'])) + '].')
return df_taxi
def load_nyctlc_zone_hour(startdate, enddate, trip_type, trip_count_filter,
db_path, verbose=0):
"""Query and clean nyctlc dropoff or pickup data for the specified date
range from a sqlite database, grouped by zone and hour. Assumes the
database contains a standard_zonedropoff_hour_sandy or
standard_zonepickup_hour_sandy table created using
create_standard_zone_hour.
Parameters
----------
startdate : Timestamp
Start date to include tweets from (inclusive), specified as a
timezone-aware Pandas Timestamp object.
E.g. startdate = pd.Timestamp('2012-10-28 00:00:00',
tz='America/New_York')
enddate : Timestamp
End date to include tweets from (exclusive), specified as a
timezone-aware Pandas Timestamp object.
e.g. enddate = pd.Timestamp('2012-11-03 00:00:00',
tz='America/New_York')
trip_type : str
Trip type: 'dropoff' or 'pickup'.
trip_count_filter : int
Minimum number of trips required to load a data point.
db_path : str
Path to sqlite database containing table.
verbose : int
Defines verbosity for output statements.
Returns
-------
df_taxi : dataframe
Notes
-----
Sqlite date queries are inclusive for start and end, datetimes in nyctlc
database are local (i.e. NY timezone).
"""
if verbose >= 1:
output('Started query.')
# define trip type
if trip_type not in ['dropoff', 'pickup']:
raise ValueError('Invalid trip_type argument: {arg}.'.format(
arg=trip_type))
# convert datetimes
enddate_exclusive = enddate - pd.Timedelta('1 second')
startdate_sql = startdate.strftime("%Y-%m-%d %H:%M:%S")
enddate_sql = enddate_exclusive.strftime("%Y-%m-%d %H:%M:%S")
# load dropoff/pickup data
sql = """
SELECT {trip_type}_datetime AS datetimeNY,
{trip_type}_location_id AS location_id,
z_mean_pace AS zpace, z_trip_count AS ztrips
FROM standard_zone{trip_type}_hour_sandy
WHERE
trip_count > {trip_count_filter} AND
{trip_type}_datetime BETWEEN
"{startdate_sql}" AND "{enddate_sql}"
;""".format(trip_count_filter=trip_count_filter,
startdate_sql=startdate_sql, enddate_sql=enddate_sql,
trip_type=trip_type)
df_taxi = query(db_path, sql)
# add columns
df_taxi['abs-zpace'] = abs(df_taxi['zpace'])
df_taxi['abs-ztrips'] = abs(df_taxi['ztrips'])
# convert datetimes
df_taxi['datetimeNY'] = pd.to_datetime(df_taxi['datetimeNY'])
df_taxi['datetimeNY'] = [dt.tz_localize(tz='America/New_York') for dt in
df_taxi['datetimeNY']]
# index and sort
df_taxi = df_taxi.set_index(['location_id', 'datetimeNY'])
df_taxi = df_taxi.sort_index(level=0)
if verbose >= 1:
output('[min, max] taxi datetimeNY (hourly): [' +
str(min(df_taxi.index.get_level_values(level=1))) + ', ' +
str(max(df_taxi.index.get_level_values(level=1))) + '].')
output('[min, max] taxi pace and trips mean z-score (hourly): [' +
str(np.nanmin(df_taxi['zpace'])) + ', ' +
str(np.nanmax(df_taxi['zpace'])) + '], [' +
str(np.nanmin(df_taxi['ztrips'])) + ', ' +
str(np.nanmax(df_taxi['ztrips'])) + '].')
# add drop or pick to column names
if trip_type == 'dropoff':
val = '-drop'
elif trip_type == 'pickup':
val = '-pick'
else:
pass
col_dict = {}
for col in df_taxi.columns.values:
col_dict[col] = col + val
df_taxi = df_taxi.rename(col_dict, axis='columns')
return df_taxi
def load_nyiso(startdate, enddate, db_path, verbose=0):
"""Query and clean nyiso load forecast error data for the specified date
range from a sqlite database. Assumes the database contains a
forecast_error table created using create_forecast_err.
Parameters
----------
startdate : Timestamp
Start date to include tweets from (inclusive), specified as a
timezone-aware Pandas Timestamp object.
E.g. startdate = pd.Timestamp('2012-10-28 00:00:00',
tz='America/New_York')
enddate : Timestamp
End date to include tweets from (exclusive), specified as a
timezone-aware Pandas Timestamp object.
e.g. enddate = pd.Timestamp('2012-11-03 00:00:00',
tz='America/New_York')
db_path : str
Path to sqlite database containing table.
verbose : int
Defines verbosity for output statements.
Returns
-------
df : dataframe
Notes
-----
Sqlite date queries are inclusive for start and end, forecast_error
datetimes are UTC.
"""
if verbose >= 1:
output('Started query.')
# convert datetimes
startdateUTC = startdate.tz_convert('UTC')
enddateUTC = enddate.tz_convert('UTC') - pd.Timedelta('1 second')
startdate_sql = startdateUTC.strftime("%Y-%m-%d %H:%M:%S")
enddate_sql = enddateUTC.strftime("%Y-%m-%d %H:%M:%S")
# load nyiso load data
sql = """
SELECT datetimeUTC, zone_id AS nyiso_zone,
forecast_error_p0 AS err0
FROM forecast_error
WHERE
datetimeUTC BETWEEN "{startdate_sql}" AND "{enddate_sql}"
;""".format(startdate_sql=startdate_sql, enddate_sql=enddate_sql)
df = query(db_path, sql)
# convert datetimes
df['datetimeUTC'] = pd.to_datetime(df['datetimeUTC'])
df['datetimeUTC'] = [datetime.tz_localize(tz='UTC') for datetime in
df['datetimeUTC']]
df['datetimeNY'] = [datetime.tz_convert('America/New_York') for
datetime in df['datetimeUTC']]
# add and drop columns
df['percent-err0'] = df['err0'] * 100
df = df.drop(['datetimeUTC'], axis=1)
# index and sort
df = df.set_index(['nyiso_zone', 'datetimeNY'])
df = df.sort_index(level=0)
if verbose >= 1:
output('[min, max] forecast error datetimeNY: [' +
str(min(df.index.get_level_values(level=1))) + ', ' +
str(max(df.index.get_level_values(level=1))) + '].')
output('[min, max] forecast error: [' +
str(np.nanmin(df['err0'])) + ', ' +
str(np.nanmax(df['err0'])) + '].')
output('Finished query.')
return df
def max_cross_corr(df, col1, col2, zone_col, shifts, min_overlap, verbose=0):
"""Creates a dataframe containing the time shift that maximizes
cross-correlation between two time series, the max cross-correlation value,
and the number of overlapping data points in those series.
Parameters
----------
df : Dataframe
Dataframe to containing time series data (e.g. from
create_timeseries). Assumes dataframe is multi-indexed by zone_col and
timedelta (in hours).
col1 : str
Name of column containing first time series.
col2 : str
Name of column containing second time series. This is the shifted
time series, where col2_shifted = col2 + shift.
zone_col : str
Name of spatial zone index.
shifts : list
List of time shifts to apply to 2nd time series (in hours).
min_overlap : int
Minimum number of overlapping data points (after the 2nd series is time
shifted) needed to calculate cross-correlation.
verbose : int
Defines verbosity for output statements.
Returns
-------
df_max_rho : dataframe
Dataframe of max cross-correlations and associated shifts and counts.
df_rho : dataframe
Dataframe of cross-correlations and associated shifts and counts for
all shifts.
Notes
-----
"""
df_rho = pd.DataFrame(columns=['shift', zone_col, 'rho'])
df_count = pd.DataFrame(columns=['shift', zone_col, 'count'])
skipped = []
zones = pd.unique(df.index.get_level_values(zone_col))
for shift in shifts:
for zone in zones:
s_y1 = df[col1].xs(zone, level=0).dropna()
s_y2 = df[col2].xs(zone, level=0).dropna()
s_y1.index = pd.to_timedelta(s_y1.index.values, unit='h')
s_y2.index = pd.to_timedelta(s_y2.index.values, unit='h')
# shift 2nd time series
s_y2_shift = s_y2.shift(1, freq=pd.Timedelta(shift, unit='h'))
# skip zone if not enough overlapping data points (after shift)
df_zone = pd.concat([s_y1, s_y2_shift], axis=1).dropna()
num_overlap = df_zone.shape[0]
if num_overlap < min_overlap:
df_rho = df_rho.append({'shift': shift, zone_col: zone,
'rho': np.nan}, ignore_index=True)
skipped.append((shift, zone))
continue
# normalized cross-correlation
rho = cross_corr(df_zone[col1].values, df_zone[col2].values, True)
df_rho = df_rho.append({'shift': shift, zone_col: zone, 'rho': rho},
ignore_index=True)
df_count = df_count.append({'shift': shift, zone_col: zone,
'count': num_overlap},
ignore_index=True)
# reshape and get max rhos and associated shifts and counts
df_rho = df_rho.set_index(['shift', zone_col])
df_rho_reshape = df_rho.reset_index()
df_rho_reshape = df_rho_reshape.pivot(index='shift', columns=zone_col,
values='rho')
s_max_shifts = df_rho_reshape.idxmax(axis=0)
s_max_shifts.name = 'max-shift'
s_max_rhos = df_rho_reshape.max(axis=0)
s_max_rhos.name = 'max-rho'
df_count = df_count.set_index(['shift', zone_col])
max_counts = []
for zone in zones:
max_shift = s_max_shifts.loc[zone]
if np.isnan(max_shift):
max_counts.append(np.nan)
else:
max_counts.append(df_count.loc[max_shift, zone].item())
s_max_counts = pd.Series(max_counts, index=zones)
s_max_counts.name = 'max-count'
df_max_rho = pd.concat([s_max_rhos, s_max_shifts, s_max_counts], axis=1)
if verbose >= 2:
output('Skipped {num_skipped} (shift, {zone}) combos: {skipped}'.format(
num_skipped=len(skipped), zone=zone_col, skipped=skipped))
return df_max_rho, df_rho
def plot_acf_series(s, figsize=(6, 4),
xlabel='Lag', ylabel=None,
save_path=None):
"""Creates a dataframe containing the time shift that maximizes
cross-correlation between two time series, the max cross-correlation value,
and the number of overlapping data points in those series.
Parameters
----------
s : list-like object
List-like object (e.g. list, pandas series) containing time series to
plot acf for. If ylable is None, then s must be a labeled series.
figsize : tuple (optional)
Two element tuple defining figure size in inches (width, height).
xlabel : str (optional)
Defines x-axis label.
ylabel : str (optioal)
Defines left y-axis label.
save_path : str or None
If str, defines path for saving figure. If None, does not save figure.
Returns
-------
Notes
-----
"""
# create figure and add acf plot
fig, ax = plt.subplots(figsize=figsize, tight_layout=False)
plot_acf(s, ax=ax)
# axes
ax.tick_params(axis='x', colors='k')
ax.tick_params(axis='y', colors='k')
ax.set_xlabel(xlabel, color='k')
if ylabel:
ax.set_ylabel(ylabel, color='k')
else:
ax.set_ylabel('ACF ({var})'.format(var=s.name), color='k')
plt.title('')
# save
if save_path:
fig.set_size_inches(figsize[0], figsize[1])
plt.savefig(save_path, dpi=300, bbox_inches='tight')
def plot_timeseries(s1, s2, figsize=(6, 4),
linestyles=('-', '--'),
colors=('xkcd:black', 'xkcd:red'),
xlabel='Timedelta, hours', y1label=None, y2label=None,
save_path=None):
"""Creates a dataframe containing the time shift that maximizes
cross-correlation between two time series, the max cross-correlation value,
and the number of overlapping data points in those series.
Parameters
----------
s1 : series
1st pandas series indexed by timedelta. Series must be named if y1label
is None.
s2 : series
2nd pandas series indexed by timedelta. Series must be named if y2label
is None.
figsize : tuple (optional)
Two element tuple defining figure size in inches (width, height).
linestyles : tuple (optional)
Tuple defining line styles to use for each series.
colors : tuple (optional)
Tuple defining colors to use for each series.
xlabel : str
Defines x-axis label.
y1label : str
Defines left y-axis label.
y2label : str
Defines right y-axis label.
save_path : str or None
If str, defines path for saving figure. If None, does not save figure.
Returns
-------
Notes
-----
"""
# get data
s1.index = pd.to_timedelta(s1.index.values, unit='h')
s2.index = | pd.to_timedelta(s2.index.values, unit='h') | pandas.to_timedelta |
from collections import namedtuple
import matplotlib.pyplot as plt
from numpy import *
from numpy.linalg import qr
from numpy.random import randint, randn, seed as randseed
from os import makedirs, remove
from os.path import isfile, basename, dirname
from pandas import Series
import pickle
from scipy.stats import norm, kstest
from typing import Any, Callable, Generator, List, Optional, Sequence, Union
__all__ = ['RBIG_file', 'RBIG', 'inv', 'empiric', 'gaussianization']
Empiric = namedtuple('Empiric', ['xk', 'pk'])
def inv(obj: Any) -> Callable:
"""Switches inv & call methods."""
return obj.__inv__
obj.__class__.__call__, obj.__inv__ = obj.__inv__, obj.__call__
return obj
def normalize(x):
"""Domain: [0, 1]"""
x = array(x)
# assert minimal value >= 0
x[x<0] = 0
# assert sum = 1
if sum(x) == 0:
x = ones_like(x)
return x/sum(x)
def normalize_cdf(x, tol=1e-8):
"""This prevents norm.ppf({0, 1}) = +-inf by shifting the values of CDF into (0, 1)."""
x[x==0.] = tol
x[x==1.] = 1. - tol
return x
class empiric:
"""Empiric distribution with connected support.
domains:
xk: real numbers
pk: [0,1]
ck, sk: (0,1)
to prevent norm.ppf from returning {-inf, +inf},
which in turn would break the rotations of RBIG
- anynumber * inf = nan
- matrix multiplications propagate nans
ToDo: Warnings:
RuntimeWarning: invalid value encountered in greater_equal
cond2 = (x >= np.asarray(_b)) & cond0
RuntimeWarning: invalid value encountered in less_equal
cond2 = cond0 & (x <= _a)
RuntimeWarning: invalid value encountered in greater
cond1 = (0 < q) & (q < 1)
RuntimeWarning: invalid value encountered in less
cond1 = (0 < q) & (q < 1)
"""
def __init__(self, x, p=None):
if ndim(x) == 2:
x, p = x[0], x[1]
y, py = unique(x, return_counts=True)
if p is not None:
py = p
if len(x) != len(y):
py = zeros(y.shape)
for i in range(y.size):
py[i] = p[where(x == y[i])].sum()
# normalize probabilities
p = normalize(py)
self.xk = y
self.pk = p
self.ck = cumsum(p)
# correct domains of ck & sk
# many values may be 0 or 1!
self.ck = normalize_cdf(self.ck, 1e-8)
self.sk = 1. - self.ck
def pdf(self, x): return interp(x, self.xk, self.pk)
def cdf(self, x): return interp(x, self.xk, self.ck)
def ppf(self, x): return interp(x, self.ck, self.xk)
def sf(self, x): return interp(x, self.xk, self.sk)
# reversed ordering - sf is decreasing
def isf(self, x): return interp(x, self.sk[::-1], self.xk[::-1])
params = property(lambda self: Empiric(self.xk, self.pk))
class gaussianization(empiric):
def __init__(self, x, p=None): super().__init__(x, p)
def __call__(self, x): return norm.ppf(self.cdf(x))
def __inv__(self, x): return self.ppf(norm.cdf(x))
class Gaussianize_Matrix:
"""Marginally gaussianize each feature of design matrix."""
def __call__(self, x, dists):
for i in range(len(x)):
x[i] = gaussianization(dists[i])(x[i])
return x
def __inv__(self, x, dists):
for i in range(len(x)):
x[i] = inv(gaussianization(dists[i]))(x[i])
return x
gaussianize_matrix = Gaussianize_Matrix()
def dists_of_matrix(x: ndarray) -> List[ndarray]:
"""Feature wise empirical distributions."""
dists = []
for feature in x:
dists += [empiric(feature).params]
# dists = self.dim * [0]
# for i, feature in enumerate(self._x):
# dists[i] = empiric(feature).params
return dists
def file_exists(file: str = 'tmp.rbig') -> str:
path, base = dirname(file), basename(file)
root, ext = base.split('.', 1)
i, sep = 0, len(root) + len(path) + 1
while isfile(file):
file = '{}-{:d}.{}'.format(file[:sep], i, ext)
i += 1
return file
class RBIG_file:
"""Rotation based iterative gaussianization (arXiv:1602.00229).
Design matrix is transposed for efficiency reasons.
All transformations are saved to a file."""
def __init__(
self,
x: ndarray,
epochs: int,
seed: Optional[int] = None,
file: str = '../tmp/tmp.rbig'):
self._x = copy(x.T)
self.dim = x.shape[1]
self.epoch_nr = 0 # !
self.file = file_exists(file)
self.fit(epochs, seed)
def fit(self, epochs: int, seed: Optional[int] = None):
randseed(seed)
for epoch in range(epochs):
dists = dists_of_matrix(self._x)
rot = qr(randn(self.dim, self.dim))[0]
self._x = gaussianize_matrix(self._x, dists)
self._x = rot @ self._x
ks = mean([kstest(y, 'norm')[0] for y in self._x])
self._save([self.epoch_nr, ks, dists, rot])
self.epoch_nr += 1
def __call__(self, x): return self.encode(x)
def __inv__(self, x): return self.decode(x)
def encode(self, x: ndarray) -> ndarray:
self._x = copy(x.T)
for epoch, ks, dists, rot in self._load():
self._x = gaussianize_matrix(self._x, dists)
self._x = rot @ self._x
return self.x
def decode(self, x: ndarray) -> ndarray:
self._x = copy(x.T)
for epoch, ks, dists, rot in reversed(list(self._load())):
self._x = rot.T @ self._x
self._x = inv(gaussianize_matrix)(self._x, dists)
return self.x
def kstest(self,
show: bool = False,
save: bool = False,
path: str = 'KS.png') -> Series:
ks = []
for epoch, k, dists, rot in self._load():
ks += [k]
ks = Series(ks, name='KS-test')
ks.index.name = 'Epochen'
ks.index += 1
if show or save:
ks.plot(title='Kolmogorow-Smirnow-Test', loglog=True, legend=False, color='k', figsize=(3, 2))
#plt.ylabel('KS Test Statistic')
if save:
plt.savefig(path, bbox_inches='tight')
plt.show() if show else plt.close()
return ks
def _save(self, params: list):
makedirs(dirname(self.file), exist_ok=True)
with open(self.file, 'ab') as file:
pickle.dump(params, file, pickle.HIGHEST_PROTOCOL)
def _load(self) -> Generator[List, None, None]:
with open(self.file, "rb") as f:
while True:
try:
yield pickle.load(f)
except EOFError:
break
@property
def rem(self):
if isfile(self.file):
remove(self.file)
x = property(lambda self: self._x.T)
def inspect_seeds(
epochs: Optional[int] = None,
seeds: Optional[Sequence[int]] = None) -> (int, List[int]):
"""
Assert number of epochs matches number of seeds and that seeds is a list.
:param epochs: Number of epochs for training.
:param seeds: Seeds for random number generation.
:return: Inspected epochs and seeds.
"""
# Assert that seeds is of type list.
if isinstance(seeds, int) or seeds is None:
randseed(seeds)
seeds = []
else:
seeds = list(seeds)
# Optionally determine number of epochs from number of seeds.
if epochs is None:
epochs = len(seeds)
# Match number of epochs with number of seeds.
if epochs > len(seeds):
# Extend random values to seeds.
seeds += list(randint(
2 ** 32 - 1, # Max accepted range for numpy seed generator.
size=epochs - len(seeds),
dtype=int64))
elif epochs < len(seeds):
seeds = seeds[:epochs]
return epochs, seeds
class RBIG:
"""
Rotation based iterative gaussianization (arXiv:1602.00229).
:member _x: Transposed design matrix with shape (features, samples).
:member features: Number of features of _x.
:member samples: Number of samples of _x.
:member epoch_nr: Current epoch number of training.
:member seeds: Seeds for epoch wise generation of random rotation matrices.
:member dists: Collection of feature wise empirical distributions of _x with shape (epochs, features, samples).
:member ks: Epoch wise Kolmogorow-Smirnow-Test-statistics.
"""
def __init__(
self,
x: ndarray,
epochs: Optional[int] = None,
seeds: Optional[Sequence[int]] = None,
eps: float = 1e-7):
"""
Initializes RBIG.
:param x: Design matrix with shape (samples, features).
:param epochs: Number of epochs for initial training.
:param seeds: Seeds for epoch wise generation of random rotation matrices for initial training.
:param eps: Precision parameter for marginal gaussianization.
"""
self._x = copy(x.T)
self.features = self._x.shape[0]
self.samples = self._x.shape[1]
self.epoch_nr = 0
self.dists = None
self.seeds = []
self.ks = []
self.eps = eps
epochs, seeds = inspect_seeds(epochs, seeds)
# initial gaussianization
self.fit(epochs, seeds)
def _empirical_distributions(self):
"""
Determines feature wise cumulated empirical distributions at recent epoch.
Correct support at the edges by a small tolerance value.
"""
dists = zeros((2,) + self._x.shape)
# Feature wise sorting and counting.
for feature in range(self.features):
vals, cnts = unique(self._x[feature], return_counts=True)
dists[0, feature, :len(vals)] = vals
dists[1, feature, :len(vals)] = cnts
# correct length of distributions, otherwise interp breaks!
dists[0, feature, len(vals):] = nan
# Cumulate counts feature wise.
dists[1] = cumsum(dists[1], axis=1)
# Apply feature wise [0, 1]-normalization.
dists[1] /= expand_dims(dists[1, :, -1], axis=1)
# Clip empirical cdf into open interval (0, 1).
dists[1] = clip(dists[1], self.eps, 1. - self.eps)
self.dists[self.epoch_nr] = dists
def _marginalcdf(self, epoch: int):
xk, ck = self.dists[epoch]
for feature in range(self.features):
self._x[feature] = interp(self._x[feature], xk[feature], ck[feature])
def _marginalppf(self, epoch: int):
xk, ck = self.dists[epoch]
for feature in range(self.features):
self._x[feature] = interp(self._x[feature], ck[feature], xk[feature])
def _mgauss(self, epoch: int):
self._marginalcdf(epoch)
self._x = norm.ppf(self._x)
def _igauss(self, epoch: int):
self._x = norm.cdf(self._x)
self._marginalppf(epoch)
def fit(
self,
epochs: Optional[int] = None,
seeds: Optional[Sequence[int]] = None):
"""
Fit gaussianization transform.
:param epochs: number of epochs to train; inferred from seeds if None
:param seeds: sequence of seeds for the random rotation matrices; random if None
:return:
"""
epochs, seeds = inspect_seeds(epochs, seeds)
if epochs == 0:
return
# Expand empirical distributions by incoming epochs.
dists = zeros((epochs, 2, self.features, self.samples))
if self.dists is None:
self.dists = dists
else:
self.dists = concatenate((self.dists, dists))
for epoch, seed in enumerate(seeds):
self._empirical_distributions()
randseed(seed)
rot = qr(randn(self.features, self.features))[0]
# ToDo: rot is all NaN in first epoch.
"""
f.e. prepare_gauss, plot_anomalies, plot_comparison and plot_time_series do work in terminal
but break if called from main.
Warnings:
RuntimeWarning: invalid value encountered in greater_equal
cond2 = (x >= np.asarray(_b)) & cond0
RuntimeWarning: invalid value encountered in less_equal
cond2 = cond0 & (x <= _a)
RuntimeWarning: invalid value encountered in greater
cond1 = (0 < q) & (q < 1)
RuntimeWarning: invalid value encountered in less
cond1 = (0 < q) & (q < 1)
"""
if any(isnan(rot)):
raise ValueError('Rotation matrix contains NaN in epoch %d from seed %d.' % (epoch, seed))
self._mgauss(epoch)
self._x = rot @ self._x
# Update members.
self.ks += [mean([kstest(feature, 'norm')[0] for feature in self._x])]
self.epoch_nr += 1
self.seeds += [seed]
def __call__(self, x):
return self.encode(x)
def __inv__(self, x):
return self.decode(x)
def encode(self, x: ndarray) -> ndarray:
self._x = copy(x.T)
for epoch, seed in enumerate(self.seeds):
randseed(seed)
rot = qr(randn(self.features, self.features))[0]
self._mgauss(epoch)
self._x = rot @ self._x
return self.x
def decode(self, x: ndarray) -> ndarray:
self._x = copy(x.T)
for epoch in reversed(range(self.epoch_nr)):
randseed(self.seeds[epoch])
rot = qr(randn(self.features, self.features))[0]
self._x = rot.T @ self._x
self._igauss(epoch)
return self.x
def kstest(
self,
show: bool = False,
save: bool = False,
path: str = 'KS.png') -> Series:
"""
Feature wise KS-tests.
Optionally plotting and saving.
"""
ks = | Series(self.ks, name='KS-test') | pandas.Series |
import sys,os
#os.chdir("/Users/utkarshvirendranigam/Desktop/Homework/Project")
# required_packages=["PyQt5","re", "scipy","itertools","random","matplotlib","pandas","numpy","sklearn","pydotplus","collections","warnings","seaborn"]
#print(os.getcwd())
# for my_package in required_packages:
# try:
# command_string="conda install "+ my_package+ " --yes"
# os.system(command_string)
# except:
# count=1
from PyQt5.QtWidgets import (QMainWindow, QApplication, QWidget, QPushButton, QAction, QComboBox, QLabel,
QGridLayout, QCheckBox, QGroupBox, QVBoxLayout, QHBoxLayout, QLineEdit, QPlainTextEdit)
from PyQt5.QtGui import QIcon
from PyQt5.QtCore import pyqtSlot, QRect
from PyQt5.QtCore import pyqtSignal
from PyQt5.QtCore import Qt
# from scipy import interp
from itertools import cycle, combinations
import random
from PyQt5.QtWidgets import QDialog, QVBoxLayout, QSizePolicy, QFormLayout, QRadioButton, QScrollArea, QMessageBox
from PyQt5.QtGui import QPixmap
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar
from matplotlib.figure import Figure
import pandas as pd
import numpy as np
import pickle
from numpy.polynomial.polynomial import polyfit
from sklearn.preprocessing import LabelEncoder, OneHotEncoder, StandardScaler
from sklearn.pipeline import make_pipeline
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.tree import DecisionTreeClassifier, export_graphviz
from sklearn.compose import make_column_transformer
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.metrics import roc_auc_score
from sklearn.metrics import roc_curve, auc, log_loss, brier_score_loss
from sklearn.calibration import calibration_curve
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import GradientBoostingClassifier
from sklearn import feature_selection
from sklearn import metrics
from sklearn.preprocessing import label_binarize
from sklearn.model_selection import cross_val_predict
# Libraries to display decision tree
from pydotplus import graph_from_dot_data
import collections
from sklearn.tree import export_graphviz
import webbrowser
import warnings
warnings.filterwarnings("ignore")
import matplotlib.pyplot as plt
from Preprocessing import PreProcessing
import random
import seaborn as sns
#%%-----------------------------------------------------------------------
import os
os.environ["PATH"] += os.pathsep + 'C:\\Program Files (x86)\\graphviz-2.38\\release\\bin'
#%%-----------------------------------------------------------------------
#::--------------------------------
# Deafault font size for all the windows
#::--------------------------------
font_size_window = 'font-size:18px'
class DecisionTree(QMainWindow):
#::--------------------------------------------------------------------------------
# Implementation of Random Forest Classifier using the happiness dataset
# the methods in this class are
# _init_ : initialize the class
# initUi : creates the canvas and all the elements in the canvas
# update : populates the elements of the canvas base on the parametes
# chosen by the user
#::---------------------------------------------------------------------------------
send_fig = pyqtSignal(str)
def __init__(self):
super(DecisionTree, self).__init__()
self.Title = "Decision Tree Classifier"
self.initUi()
def initUi(self):
#::-----------------------------------------------------------------
# Create the canvas and all the element to create a dashboard with
# all the necessary elements to present the results from the algorithm
# The canvas is divided using a grid loyout to facilitate the drawing
# of the elements
#::-----------------------------------------------------------------
self.setWindowTitle(self.Title)
self.setStyleSheet(font_size_window)
self.main_widget = QWidget(self)
self.layout = QGridLayout(self.main_widget)
self.groupBox1 = QGroupBox('Decision Tree Features')
self.groupBox1Layout= QGridLayout()
self.groupBox1.setLayout(self.groupBox1Layout)
self.feature0 = QCheckBox(features_list[0],self)
self.feature1 = QCheckBox(features_list[1],self)
self.feature2 = QCheckBox(features_list[2], self)
self.feature3 = QCheckBox(features_list[3], self)
self.feature4 = QCheckBox(features_list[4],self)
self.feature5 = QCheckBox(features_list[5],self)
self.feature6 = QCheckBox(features_list[6], self)
self.feature7 = QCheckBox(features_list[7], self)
self.feature8 = QCheckBox(features_list[8], self)
self.feature9 = QCheckBox(features_list[9], self)
self.feature10 = QCheckBox(features_list[10], self)
self.feature11 = QCheckBox(features_list[11], self)
self.feature12 = QCheckBox(features_list[12], self)
self.feature13 = QCheckBox(features_list[13], self)
self.feature14 = QCheckBox(features_list[14], self)
self.feature15 = QCheckBox(features_list[15], self)
self.feature16 = QCheckBox(features_list[16], self)
self.feature17 = QCheckBox(features_list[17], self)
self.feature18 = QCheckBox(features_list[18], self)
self.feature19 = QCheckBox(features_list[19], self)
self.feature20 = QCheckBox(features_list[20], self)
self.feature21 = QCheckBox(features_list[21], self)
self.feature22 = QCheckBox(features_list[22], self)
self.feature23 = QCheckBox(features_list[23], self)
self.feature24 = QCheckBox(features_list[24], self)
self.feature0.setChecked(True)
self.feature1.setChecked(True)
self.feature2.setChecked(True)
self.feature3.setChecked(True)
self.feature4.setChecked(True)
self.feature5.setChecked(True)
self.feature6.setChecked(True)
self.feature7.setChecked(True)
self.feature8.setChecked(True)
self.feature9.setChecked(True)
self.feature10.setChecked(True)
self.feature11.setChecked(True)
self.feature12.setChecked(True)
self.feature13.setChecked(True)
self.feature14.setChecked(True)
self.feature15.setChecked(True)
self.feature16.setChecked(True)
self.feature17.setChecked(True)
self.feature18.setChecked(True)
self.feature19.setChecked(True)
self.feature20.setChecked(True)
self.feature21.setChecked(True)
self.feature22.setChecked(True)
self.feature23.setChecked(True)
self.feature24.setChecked(True)
self.lblPercentTest = QLabel('Percentage for Test :')
self.lblPercentTest.adjustSize()
self.txtPercentTest = QLineEdit(self)
self.txtPercentTest.setText("30")
self.lblMaxDepth = QLabel('Maximun Depth :')
self.txtMaxDepth = QLineEdit(self)
self.txtMaxDepth.setText("3")
self.btnExecute = QPushButton("Run Model")
self.btnExecute.setGeometry(QRect(60, 500, 75, 23))
self.btnExecute.clicked.connect(self.update)
self.btnDTFigure = QPushButton("View Tree")
self.btnDTFigure.setGeometry(QRect(60, 500, 75, 23))
self.btnDTFigure.clicked.connect(self.view_tree)
# We create a checkbox for each feature
self.groupBox1Layout.addWidget(self.feature0, 0, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature1, 0, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature2, 1, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature3, 1, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature4, 2, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature5, 2, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature6, 3, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature7, 3, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature8, 4, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature9, 4, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature10, 5, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature11, 5, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature12, 6, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature13, 6, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature14, 7, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature15, 7, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature16, 8, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature17, 8, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature18, 9, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature19, 9, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature20, 10, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature21, 10, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature22, 11, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature23, 11, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature24, 12, 0, 1, 1)
self.groupBox1Layout.addWidget(self.lblPercentTest, 19, 0, 1, 1)
self.groupBox1Layout.addWidget(self.txtPercentTest, 19, 1, 1, 1)
self.groupBox1Layout.addWidget(self.lblMaxDepth, 20, 0, 1, 1)
self.groupBox1Layout.addWidget(self.txtMaxDepth, 20, 1, 1, 1)
self.groupBox1Layout.addWidget(self.btnExecute, 21, 0, 1, 1)
self.groupBox1Layout.addWidget(self.btnDTFigure, 21, 1, 1, 1)
self.groupBox2 = QGroupBox('Measurements:')
self.groupBox2Layout = QVBoxLayout()
self.groupBox2.setLayout(self.groupBox2Layout)
# self.groupBox2.setMinimumSize(400, 100)
self.current_model_summary = QWidget(self)
self.current_model_summary.layout = QFormLayout(self.current_model_summary)
self.txtCurrentAccuracy = QLineEdit()
self.txtCurrentPrecision = QLineEdit()
self.txtCurrentRecall = QLineEdit()
self.txtCurrentF1score = QLineEdit()
self.current_model_summary.layout.addRow('Accuracy:', self.txtCurrentAccuracy)
self.current_model_summary.layout.addRow('Precision:', self.txtCurrentPrecision)
self.current_model_summary.layout.addRow('Recall:', self.txtCurrentRecall)
self.current_model_summary.layout.addRow('F1 Score:', self.txtCurrentF1score)
self.groupBox2Layout.addWidget(self.current_model_summary)
self.groupBox3 = QGroupBox('Other Models Accuracy:')
self.groupBox3Layout = QVBoxLayout()
self.groupBox3.setLayout(self.groupBox3Layout)
self.other_models = QWidget(self)
self.other_models.layout = QFormLayout(self.other_models)
self.txtAccuracy_lr = QLineEdit()
self.txtAccuracy_gb = QLineEdit()
self.txtAccuracy_rf = QLineEdit()
self.other_models.layout.addRow('Logistic:', self.txtAccuracy_lr)
self.other_models.layout.addRow('Random Forest:', self.txtAccuracy_rf)
self.other_models.layout.addRow('Gradient Boosting:', self.txtAccuracy_gb)
self.groupBox3Layout.addWidget(self.other_models)
#::-------------------------------------
# Graphic 1 : Confusion Matrix
#::-------------------------------------
self.fig = Figure()
self.ax1 = self.fig.add_subplot(111)
self.axes=[self.ax1]
self.canvas = FigureCanvas(self.fig)
self.canvas.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.canvas.updateGeometry()
self.groupBoxG1 = QGroupBox('Confusion Matrix')
self.groupBoxG1Layout= QVBoxLayout()
self.groupBoxG1.setLayout(self.groupBoxG1Layout)
self.groupBoxG1Layout.addWidget(self.canvas)
#::---------------------------------------------
# Graphic 2 : ROC Curve
#::---------------------------------------------
self.fig2 = Figure()
self.ax2 = self.fig2.add_subplot(111)
self.axes2 = [self.ax2]
self.canvas2 = FigureCanvas(self.fig2)
self.canvas2.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.canvas2.updateGeometry()
self.groupBoxG2 = QGroupBox('ROC Curve')
self.groupBoxG2Layout = QVBoxLayout()
self.groupBoxG2.setLayout(self.groupBoxG2Layout)
self.groupBoxG2Layout.addWidget(self.canvas2)
#::-------------------------------------------
# Graphic 3 : Importance of Features
#::-------------------------------------------
self.fig3 = Figure()
self.ax3 = self.fig3.add_subplot(111)
self.axes3 = [self.ax3]
self.canvas3 = FigureCanvas(self.fig3)
self.canvas3.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.canvas3.updateGeometry()
self.groupBoxG3 = QGroupBox('Importance of Features')
self.groupBoxG3Layout = QVBoxLayout()
self.groupBoxG3.setLayout(self.groupBoxG3Layout)
self.groupBoxG3Layout.addWidget(self.canvas3)
#::--------------------------------------------
# Graphic 4 : ROC Curve by class
#::--------------------------------------------
self.fig4 = Figure()
self.ax4 = self.fig4.add_subplot(111)
self.axes4 = [self.ax4]
self.canvas4 = FigureCanvas(self.fig4)
self.canvas4.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.canvas4.updateGeometry()
self.groupBoxG4 = QGroupBox('ROC Curve by Class')
self.groupBoxG4Layout = QVBoxLayout()
self.groupBoxG4.setLayout(self.groupBoxG4Layout)
self.groupBoxG4Layout.addWidget(self.canvas4)
#::-------------------------------------------------
# End of graphs
#::-------------------------------------------------
self.layout.addWidget(self.groupBox1, 0, 0, 3, 2)
self.layout.addWidget(self.groupBoxG1, 0, 2, 1, 1)
self.layout.addWidget(self.groupBoxG3, 0, 3, 1, 1)
self.layout.addWidget(self.groupBoxG2, 1, 2, 1, 1)
self.layout.addWidget(self.groupBoxG4, 1, 3, 1, 1)
self.layout.addWidget(self.groupBox2, 2, 2, 1, 1)
self.layout.addWidget(self.groupBox3, 2, 3, 1, 1)
self.setCentralWidget(self.main_widget)
self.resize(1800, 1200)
self.show()
def update(self):
'''
Random Forest Classifier
We pupulate the dashboard using the parametres chosen by the user
The parameters are processed to execute in the skit-learn Random Forest algorithm
then the results are presented in graphics and reports in the canvas
:return:None
'''
# processing the parameters
self.list_corr_features = pd.DataFrame([])
if self.feature0.isChecked():
if len(self.list_corr_features)==0:
self.list_corr_features = df[features_list[0]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[0]]],axis=1)
if self.feature1.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[1]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[1]]],axis=1)
if self.feature2.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[2]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[2]]],axis=1)
if self.feature3.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[3]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[3]]],axis=1)
if self.feature4.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[4]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[4]]],axis=1)
if self.feature5.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[5]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[5]]],axis=1)
if self.feature6.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[6]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[6]]],axis=1)
if self.feature7.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[7]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[7]]],axis=1)
if self.feature8.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[8]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[8]]],axis=1)
if self.feature9.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[9]]
else:
self.list_corr_features = | pd.concat([self.list_corr_features, df[features_list[9]]],axis=1) | pandas.concat |
import numpy as np
import pandas as pd
import pytest
from sklearn.exceptions import NotFittedError
from feature_engine.datetime import DatetimeFeatures
from feature_engine.datetime._datetime_constants import (
FEATURES_DEFAULT,
FEATURES_SUFFIXES,
FEATURES_SUPPORTED,
)
vars_dt = ["datetime_range", "date_obj1", "date_obj2", "time_obj"]
vars_non_dt = ["Name", "Age"]
feat_names_default = [FEATURES_SUFFIXES[feat] for feat in FEATURES_DEFAULT]
dates_nan = pd.DataFrame({"dates_na": ["Feb-2010", np.nan, "Jun-1922", np.nan]})
_false_input_params = [
(["not_supported"], 3.519, "wrong_option"),
(["year", 1874], [1, -1.09, "var3"], 1),
("year", [3.5], [True, False]),
(14198, [0.1, False], {True}),
]
@pytest.mark.parametrize(
"_features_to_extract, _variables, _other_params", _false_input_params
)
def test_raises_error_when_wrong_input_params(
_features_to_extract, _variables, _other_params
):
with pytest.raises(ValueError):
assert DatetimeFeatures(features_to_extract=_features_to_extract)
with pytest.raises(ValueError):
assert DatetimeFeatures(variables=_variables)
with pytest.raises(ValueError):
assert DatetimeFeatures(missing_values=_other_params)
with pytest.raises(ValueError):
assert DatetimeFeatures(drop_original=_other_params)
with pytest.raises(ValueError):
assert DatetimeFeatures(utc=_other_params)
def test_default_params():
transformer = DatetimeFeatures()
assert isinstance(transformer, DatetimeFeatures)
assert transformer.variables is None
assert transformer.features_to_extract is None
assert transformer.drop_original
assert transformer.utc is None
assert transformer.dayfirst is False
assert transformer.yearfirst is False
assert transformer.missing_values == "raise"
_variables = [0, [0, 1, 9, 23], "var_str", ["var_str1", "var_str2"], [0, 1, "var3", 3]]
@pytest.mark.parametrize("_variables", _variables)
def test_variables_params(_variables):
assert DatetimeFeatures(variables=_variables).variables == _variables
def test_features_to_extract_param():
assert DatetimeFeatures(features_to_extract=None).features_to_extract is None
assert DatetimeFeatures(features_to_extract=["year"]).features_to_extract == [
"year"
]
assert DatetimeFeatures(features_to_extract="all").features_to_extract == "all"
_not_a_df = [
"not_a_df",
[1, 2, 3, "some_data"],
pd.Series([-2, 1.5, 8.94], name="not_a_df"),
]
@pytest.mark.parametrize("_not_a_df", _not_a_df)
def test_raises_error_when_fitting_not_a_df(_not_a_df):
transformer = DatetimeFeatures()
# trying to fit not a df
with pytest.raises(TypeError):
transformer.fit(_not_a_df)
def test_raises_error_when_variables_not_datetime(df_datetime):
# asking for not datetime variable(s)
with pytest.raises(TypeError):
DatetimeFeatures(variables=["Age"]).fit(df_datetime)
with pytest.raises(TypeError):
DatetimeFeatures(variables=["Name", "Age", "date_obj1"]).fit(df_datetime)
# passing a df that contains no datetime variables
with pytest.raises(ValueError):
DatetimeFeatures().fit(df_datetime[["Name", "Age"]])
def test_raises_error_when_df_has_nan():
# dataset containing nans
with pytest.raises(ValueError):
DatetimeFeatures().fit(dates_nan)
def test_attributes_upon_fitting(df_datetime):
transformer = DatetimeFeatures()
transformer.fit(df_datetime)
assert transformer.variables_ == vars_dt
assert transformer.features_to_extract_ == FEATURES_DEFAULT
assert transformer.n_features_in_ == df_datetime.shape[1]
transformer = DatetimeFeatures(variables="date_obj1", features_to_extract="all")
transformer.fit(df_datetime)
assert transformer.variables_ == ["date_obj1"]
assert transformer.features_to_extract_ == FEATURES_SUPPORTED
transformer = DatetimeFeatures(
variables=["date_obj1", "time_obj"],
features_to_extract=["year", "quarter_end", "second"],
)
transformer.fit(df_datetime)
assert transformer.variables_ == ["date_obj1", "time_obj"]
assert transformer.features_to_extract_ == ["year", "quarter_end", "second"]
@pytest.mark.parametrize("_not_a_df", _not_a_df)
def test_raises_error_when_transforming_not_a_df(_not_a_df, df_datetime):
transformer = DatetimeFeatures()
transformer.fit(df_datetime)
# trying to transform not a df
with pytest.raises(TypeError):
transformer.transform(_not_a_df)
def test_raises_error_when_transform_df_with_different_n_variables(df_datetime):
transformer = DatetimeFeatures()
transformer.fit(df_datetime)
# different number of columns than the df used to fit
with pytest.raises(ValueError):
transformer.transform(df_datetime[vars_dt])
def test_raises_error_when_nan_in_transform_df(df_datetime):
transformer = DatetimeFeatures()
transformer.fit(df_datetime)
# dataset containing nans
with pytest.raises(ValueError):
DatetimeFeatures().transform(dates_nan)
def test_raises_non_fitted_error(df_datetime):
# trying to transform before fitting
with pytest.raises(NotFittedError):
DatetimeFeatures().transform(df_datetime)
def test_extract_datetime_features_with_default_options(
df_datetime, df_datetime_transformed
):
transformer = DatetimeFeatures()
X = transformer.fit_transform(df_datetime)
pd.testing.assert_frame_equal(
X,
df_datetime_transformed[
vars_non_dt + [var + feat for var in vars_dt for feat in feat_names_default]
],
)
def test_extract_datetime_features_from_specified_variables(
df_datetime, df_datetime_transformed
):
# single datetime variable
X = DatetimeFeatures(variables="date_obj1").fit_transform(df_datetime)
pd.testing.assert_frame_equal(
X,
df_datetime_transformed[
vars_non_dt
+ ["datetime_range", "date_obj2", "time_obj"]
+ ["date_obj1" + feat for feat in feat_names_default]
],
)
# multiple datetime variables
X = DatetimeFeatures(variables=["datetime_range", "date_obj2"]).fit_transform(
df_datetime
)
pd.testing.assert_frame_equal(
X,
df_datetime_transformed[
vars_non_dt
+ ["date_obj1", "time_obj"]
+ [
var + feat
for var in ["datetime_range", "date_obj2"]
for feat in feat_names_default
]
],
)
# multiple datetime variables in different order than they appear in the df
X = DatetimeFeatures(variables=["date_obj2", "date_obj1"]).fit_transform(
df_datetime
)
pd.testing.assert_frame_equal(
X,
df_datetime_transformed[
vars_non_dt
+ ["datetime_range", "time_obj"]
+ [
var + feat
for var in ["date_obj2", "date_obj1"]
for feat in feat_names_default
]
],
)
def test_extract_all_datetime_features(df_datetime, df_datetime_transformed):
X = DatetimeFeatures(features_to_extract="all").fit_transform(df_datetime)
pd.testing.assert_frame_equal(X, df_datetime_transformed.drop(vars_dt, axis=1))
def test_extract_specified_datetime_features(df_datetime, df_datetime_transformed):
X = DatetimeFeatures(features_to_extract=["semester", "week"]).fit_transform(
df_datetime
)
pd.testing.assert_frame_equal(
X,
df_datetime_transformed[
vars_non_dt
+ [var + "_" + feat for var in vars_dt for feat in ["semester", "week"]]
],
)
# different order than they appear in the glossary
X = DatetimeFeatures(features_to_extract=["hour", "day_of_week"]).fit_transform(
df_datetime
)
pd.testing.assert_frame_equal(
X,
df_datetime_transformed[
vars_non_dt
+ [var + "_" + feat for var in vars_dt for feat in ["hour", "day_of_week"]]
],
)
def test_extract_features_from_categorical_variable(
df_datetime, df_datetime_transformed
):
cat_date = pd.DataFrame({"date_obj1": df_datetime["date_obj1"].astype("category")})
X = DatetimeFeatures(variables="date_obj1").fit_transform(cat_date)
pd.testing.assert_frame_equal(
X, df_datetime_transformed[["date_obj1" + feat for feat in feat_names_default]]
)
def test_extract_features_from_different_timezones(
df_datetime, df_datetime_transformed
):
time_zones = [4, -1, 9, -7]
tz_df = pd.DataFrame(
{"time_obj": df_datetime["time_obj"].add(["+4", "-1", "+9", "-7"])}
)
transformer = DatetimeFeatures(
variables="time_obj", features_to_extract=["hour"], utc=True
)
X = transformer.fit_transform(tz_df)
pd.testing.assert_frame_equal(
X,
df_datetime_transformed[["time_obj_hour"]].apply(
lambda x: x.subtract(time_zones)
),
)
exp_err_msg = (
"ValueError: variable(s) time_obj "
"could not be converted to datetime. Try setting utc=True"
)
with pytest.raises(ValueError) as errinfo:
assert DatetimeFeatures(
variables="time_obj", features_to_extract=["hour"], utc=False
).fit_transform(tz_df)
assert str(errinfo.value) == exp_err_msg
def test_extract_features_from_localized_tz_variables():
tz_df = pd.DataFrame(
{
"date_var": [
"2018-10-28 01:30:00",
"2018-10-28 02:00:00",
"2018-10-28 02:30:00",
"2018-10-28 02:00:00",
"2018-10-28 02:30:00",
"2018-10-28 03:00:00",
"2018-10-28 03:30:00",
]
}
)
tz_df["date_var"] = pd.to_datetime(tz_df["date_var"]).dt.tz_localize(
tz="US/Eastern"
)
# when utc is None
transformer = DatetimeFeatures(features_to_extract=["hour"]).fit(tz_df)
# init params
assert transformer.variables is None
assert transformer.utc is None
assert transformer.features_to_extract == ["hour"]
# fit attr
assert transformer.variables_ == ["date_var"]
assert transformer.features_to_extract_ == ["hour"]
assert transformer.n_features_in_ == 1
# transform
X = transformer.transform(tz_df)
df_expected = pd.DataFrame({"date_var_hour": [1, 2, 2, 2, 2, 3, 3]})
pd.testing.assert_frame_equal(X, df_expected)
# when utc is True
transformer = DatetimeFeatures(features_to_extract=["hour"], utc=True).fit(tz_df)
# init params
assert transformer.variables is None
assert transformer.utc is True
assert transformer.features_to_extract == ["hour"]
# fit attr
assert transformer.variables_ == ["date_var"]
assert transformer.features_to_extract_ == ["hour"]
assert transformer.n_features_in_ == 1
# transform
X = transformer.transform(tz_df)
df_expected = pd.DataFrame({"date_var_hour": [5, 6, 6, 6, 6, 7, 7]})
pd.testing.assert_frame_equal(X, df_expected)
def test_extract_features_without_dropping_original_variables(
df_datetime, df_datetime_transformed
):
X = DatetimeFeatures(
variables=["datetime_range", "date_obj2"],
features_to_extract=["week", "quarter"],
drop_original=False,
).fit_transform(df_datetime)
pd.testing.assert_frame_equal(
X,
pd.concat(
[df_datetime_transformed[column] for column in vars_non_dt]
+ [df_datetime[var] for var in vars_dt]
+ [
df_datetime_transformed[feat]
for feat in [
var + "_" + feat
for var in ["datetime_range", "date_obj2"]
for feat in ["week", "quarter"]
]
],
axis=1,
),
)
def test_extract_features_from_variables_containing_nans():
X = DatetimeFeatures(
features_to_extract=["year"], missing_values="ignore"
).fit_transform(dates_nan)
pd.testing.assert_frame_equal(
X,
| pd.DataFrame({"dates_na_year": [2010, np.nan, 1922, np.nan]}) | pandas.DataFrame |
#! /usr/bin/python3
from abc import ABCMeta, abstractmethod
import time
from typing import Type
from design import Design
from estimator import Estimator
from evaluator import Evaluator
import numpy as np
import pandas as pd
class Plan(metaclass=ABCMeta):
def __init__(self):
self.evaluators = {}
self.designs = {}
def add_design(self, design_name, design_class: Type[Design], estimator_class: Type[Estimator], design_kwargs = None):
self.designs[design_name] = (design_class, estimator_class, design_kwargs)
def add_evaluator(self, evaluator_name: str, evaluator: Evaluator):
self.evaluators[evaluator_name] = evaluator()
def add_env(self, dgp_factory, seed, X_source_dist = None, X_target_dist = None):
np.random.seed(seed)
dgp_source = dgp_factory.create_dgp(X_dist = X_source_dist)
self.X_source = dgp_source.X
self.Y0_source = dgp_source.Y([0] * dgp_source.n)
self.Y1_source = dgp_source.Y([1] * dgp_source.n)
self.ITE_source = dgp_source.ITE()
self.ATE_source = dgp_source.ATE()
self.source = dgp_source.X_dist
dgp_target = dgp_factory.create_dgp(X_dist = X_target_dist)
self.ITE_target = dgp_target.ITE()
self.ATE_target = dgp_target.ATE()
self.target = dgp_target.X_dist
self.weight_target = self.target
def use_weighted_estimator(self, weighted_estimator = False):
if not weighted_estimator:
self.weight_target = self.source
def execute(self, design_name, weight_threshold, weighted_estimator = False):
results = []
design_class, estimator_class, design_kwargs = self.designs[design_name]
def make_row(name, value):
return pd.DataFrame({"design": [design_name + "_weighted-estimator" + str(weighted_estimator)], "metric": [name], "value": [value]})
time_start = time.time()
if design_kwargs is None:
design_kwargs = {}
design = design_class(**design_kwargs)
design.fit(self.X_source)
A = design.assign(self.X_source)
time_end = time.time()
time_elapsed = time_end - time_start
results.append(make_row("time_design", time_elapsed))
YA_source = np.where(A==1, self.Y1_source, self.Y0_source)
time_start = time.time()
estimator = estimator_class(design)
ITEhat = estimator.ITE(self.X_source, A, YA_source)
if weighted_estimator:
ATEhat = estimator.weighted_ATE(self.source, self.weight_target, self.X_source, A, YA_source, weight_threshold = weight_threshold)
else:
ATEhat = estimator.ATE(self.X_source, A, YA_source)
time_end = time.time()
time_elapsed = time_end - time_start
results.append(make_row("time_estimation", time_elapsed))
for name, evaluator in self.evaluators.items():
val = evaluator.evaluate(self.X_source, self.Y0_source, self.Y1_source, self.ATE_target, self.ITE_target, A, YA_source, ATEhat, ITEhat)
results.append(make_row(name, val))
return | pd.concat(results) | pandas.concat |
#!/usr/bin/env python
# coding: utf-8
get_ipython().system(" python ../input/mlcomp/mlcomp/mlcomp/setup.py")
get_ipython().system(
"pip install ../input/../input/efficientnet-pytorch/efficientnet-pytorch/EfficientNet-PyTorch-master"
)
get_ipython().system(
"pip install ../input/pretrainedmodels/pretrainedmodels-0.7.4/pretrainedmodels-0.7.4/"
)
get_ipython().system(
"pip install --no-deps --no-dependencies ../input/segmentation-models-pytorch/ "
)
import warnings
warnings.filterwarnings("ignore")
import os
import matplotlib.pyplot as plt
import numpy as np
import cv2
import albumentations as A
from tqdm import tqdm_notebook
import pandas as pd
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from torch.jit import load
from mlcomp.contrib.transform.albumentations import ChannelTranspose
from mlcomp.contrib.dataset.classify import ImageDataset
from mlcomp.contrib.transform.rle import rle2mask, mask2rle
from mlcomp.contrib.transform.tta import TtaWrap
unet_se_resnext50_32x4d = load(
"/kaggle/input/severstalmodels/unet_se_resnext50_32x4d.pth"
).cuda()
unet_mobilenet2 = load("/kaggle/input/severstalmodels/unet_mobilenet2.pth").cuda()
# unet_resnet34 = load('/kaggle/input/severstalmodels/unet_resnet34.pth').cuda()
import os
from segmentation_models_pytorch import Unet, FPN
ENCODER = "resnet34"
ENCODER_WEIGHTS = "imagenet"
DEVICE = "cuda"
CLASSES = ["0", "1", "2", "3", "4"]
ACTIVATION = "softmax"
unet_resnet34 = Unet(
encoder_name=ENCODER, encoder_weights=None, classes=4, activation="sigmoid"
)
state = torch.load("../input/bce-clf/unet_res34_525.pth")
unet_resnet34.load_state_dict(state["model_state_dict"])
unet_resnet34 = unet_resnet34.cuda()
unet_resnet34 = unet_resnet34.eval()
device = torch.device("cuda")
model_senet = Unet(
"se_resnext50_32x4d", encoder_weights=None, classes=4, activation=None
)
model_senet.to(device)
model_senet.eval()
state = torch.load(
"../input/senetmodels/senext50_30_epochs_high_threshold.pth",
map_location=lambda storage, loc: storage,
)
model_senet.load_state_dict(state["state_dict"])
model_fpn91lb = FPN(
encoder_name="se_resnext50_32x4d", classes=4, activation=None, encoder_weights=None
)
model_fpn91lb.to(device)
model_fpn91lb.eval()
# state = torch.load('../input/fpnseresnext/model_se_resnext50_32x4d_fold_0_epoch_7_dice_0.935771107673645.pth', map_location=lambda storage, loc: storage)
state = torch.load(
"../input/fpnse50dice944/model_se_resnext50_32x4d_fold_0_epoch_26_dice_0.94392.pth",
map_location=lambda storage, loc: storage,
)
model_fpn91lb.load_state_dict(state["state_dict"])
model_fpn91lb_pseudo = FPN(
encoder_name="se_resnext50_32x4d", classes=4, activation=None, encoder_weights=None
)
model_fpn91lb_pseudo.to(device)
model_fpn91lb_pseudo.eval()
# state = torch.load('../input/fpnseresnext/model_se_resnext50_32x4d_fold_0_epoch_7_dice_0.935771107673645.pth', map_location=lambda storage, loc: storage)
state = torch.load(
"../input/942-finetuned-on-pseudo-to9399/pseudo_fpn_se_resnext50_32x4d_fold_0_epoch_22_dice_0.944/pseudo_fpn_se_resnext50_32x4d_fold_0_epoch_22_dice_0.9446276426315308.pth",
map_location=lambda storage, loc: storage,
)
model_fpn91lb_pseudo.load_state_dict(state["state_dict"])
ENCODER = "se_resnext50_32x4d"
ENCODER_WEIGHTS = "imagenet"
CLASSES = ["0", "1", "2", "3", "4"]
ACTIVATION = "softmax"
fpn_se = FPN(
encoder_name=ENCODER,
encoder_weights=None,
# encoder_weights=ENCODER_WEIGHTS,
classes=len(CLASSES),
activation=ACTIVATION,
)
state = torch.load("../input/bce-clf/fpn_se13.pth")
fpn_se.to(device)
fpn_se.eval()
fpn_se.load_state_dict(state["model_state_dict"])
ENCODER = "se_resnext50_32x4d"
ENCODER_WEIGHTS = "imagenet"
CLASSES = ["0", "1", "2", "3", "4"]
ACTIVATION = "softmax"
fpn_se2 = FPN(
encoder_name=ENCODER,
encoder_weights=None,
# encoder_weights=ENCODER_WEIGHTS,
classes=len(CLASSES),
activation=ACTIVATION,
)
state = torch.load("../input/bce-clf/fpn_lovash_9519.pth")
fpn_se2.to(device)
fpn_se2.eval()
fpn_se2.load_state_dict(state["model_state_dict"])
# ### Models' mean aggregator
class Model:
def __init__(self, models):
self.models = models
def __call__(self, x):
res = []
x = x.cuda()
with torch.no_grad():
for m in self.models[:-2]:
res.append(torch.sigmoid(m(x)))
# last model with 5 classes (+background)
res.append(torch.sigmoid(self.models[-2](x))[:, 1:, :, :])
res.append(torch.sigmoid(self.models[-1](x))[:, 1:, :, :])
res = torch.stack(res)
res = torch.mean(res, dim=0)
# print(res.shape)
# print(pred_cls.shape)
return res
model = Model(
[
unet_se_resnext50_32x4d,
unet_mobilenet2,
unet_resnet34,
model_senet,
model_fpn91lb,
model_fpn91lb_pseudo,
fpn_se,
fpn_se2,
]
)
# ### Create TTA transforms, datasets, loaders
def create_transforms(additional):
res = list(additional)
# add necessary transformations
res.extend(
[
A.Normalize(
mean=(0.485, 0.456, 0.406),
std=(0.230, 0.225, 0.223)
# mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)
),
ChannelTranspose(),
]
)
res = A.Compose(res)
return res
img_folder = "/kaggle/input/severstal-steel-defect-detection/test_images"
batch_size = 2
num_workers = 0
# Different transforms for TTA wrapper
transforms = [[], [A.HorizontalFlip(p=1)]]
transforms = [create_transforms(t) for t in transforms]
datasets = [
TtaWrap(ImageDataset(img_folder=img_folder, transforms=t), tfms=t)
for t in transforms
]
loaders = [
DataLoader(d, num_workers=num_workers, batch_size=batch_size, shuffle=False)
for d in datasets
]
# ### Loaders' mean aggregator
thresholds = [0.5, 0.5, 0.5, 0.49]
min_area = [500, 500, 1000, 2000]
res = []
# Iterate over all TTA loaders
total = len(datasets[0]) // batch_size
for loaders_batch in tqdm_notebook(zip(*loaders), total=total):
preds = []
image_file = []
for i, batch in enumerate(loaders_batch):
features = batch["features"].cuda()
# p = torch.sigmoid(model(features))
p = model(features)
# inverse operations for TTA
p = datasets[i].inverse(p)
preds.append(p)
image_file = batch["image_file"]
# TTA mean
preds = torch.stack(preds)
preds = torch.mean(preds, dim=0)
preds = preds.detach().cpu().numpy()
# Batch post processing
for p, file in zip(preds, image_file):
file = os.path.basename(file)
# Image postprocessing
for i in range(4):
p_channel = p[i]
imageid_classid = file + "_" + str(i + 1)
p_channel = (p_channel > thresholds[i]).astype(np.uint8)
if p_channel.sum() < min_area[i]:
p_channel = np.zeros(p_channel.shape, dtype=p_channel.dtype)
res.append(
{
"ImageId_ClassId": imageid_classid,
"EncodedPixels": mask2rle(p_channel),
}
)
df = pd.DataFrame(res)
df.to_csv("submission.csv", index=False)
df = pd.DataFrame(res)
df = df.fillna("")
df.to_csv("submission.csv", index=False)
# In[22]:
import pdb
import os
import cv2
import torch
import pandas as pd
import numpy as np
from tqdm import tqdm
import torch.backends.cudnn as cudnn
from torch.utils.data import DataLoader, Dataset
from albumentations import Normalize, Compose
from albumentations.pytorch import ToTensor
import torch.utils.data as data
import torchvision.models as models
import torch.nn as nn
from torch.nn import functional as F
BatchNorm2d = nn.BatchNorm2d
IMAGE_RGB_MEAN = [0.485, 0.456, 0.406]
IMAGE_RGB_STD = [0.229, 0.224, 0.225]
###############################################################################
CONVERSION = [
"block0.0.weight",
(64, 3, 7, 7),
"conv1.weight",
(64, 3, 7, 7),
"block0.1.weight",
(64,),
"bn1.weight",
(64,),
"block0.1.bias",
(64,),
"bn1.bias",
(64,),
"block0.1.running_mean",
(64,),
"bn1.running_mean",
(64,),
"block0.1.running_var",
(64,),
"bn1.running_var",
(64,),
"block1.1.conv_bn1.conv.weight",
(64, 64, 3, 3),
"layer1.0.conv1.weight",
(64, 64, 3, 3),
"block1.1.conv_bn1.bn.weight",
(64,),
"layer1.0.bn1.weight",
(64,),
"block1.1.conv_bn1.bn.bias",
(64,),
"layer1.0.bn1.bias",
(64,),
"block1.1.conv_bn1.bn.running_mean",
(64,),
"layer1.0.bn1.running_mean",
(64,),
"block1.1.conv_bn1.bn.running_var",
(64,),
"layer1.0.bn1.running_var",
(64,),
"block1.1.conv_bn2.conv.weight",
(64, 64, 3, 3),
"layer1.0.conv2.weight",
(64, 64, 3, 3),
"block1.1.conv_bn2.bn.weight",
(64,),
"layer1.0.bn2.weight",
(64,),
"block1.1.conv_bn2.bn.bias",
(64,),
"layer1.0.bn2.bias",
(64,),
"block1.1.conv_bn2.bn.running_mean",
(64,),
"layer1.0.bn2.running_mean",
(64,),
"block1.1.conv_bn2.bn.running_var",
(64,),
"layer1.0.bn2.running_var",
(64,),
"block1.2.conv_bn1.conv.weight",
(64, 64, 3, 3),
"layer1.1.conv1.weight",
(64, 64, 3, 3),
"block1.2.conv_bn1.bn.weight",
(64,),
"layer1.1.bn1.weight",
(64,),
"block1.2.conv_bn1.bn.bias",
(64,),
"layer1.1.bn1.bias",
(64,),
"block1.2.conv_bn1.bn.running_mean",
(64,),
"layer1.1.bn1.running_mean",
(64,),
"block1.2.conv_bn1.bn.running_var",
(64,),
"layer1.1.bn1.running_var",
(64,),
"block1.2.conv_bn2.conv.weight",
(64, 64, 3, 3),
"layer1.1.conv2.weight",
(64, 64, 3, 3),
"block1.2.conv_bn2.bn.weight",
(64,),
"layer1.1.bn2.weight",
(64,),
"block1.2.conv_bn2.bn.bias",
(64,),
"layer1.1.bn2.bias",
(64,),
"block1.2.conv_bn2.bn.running_mean",
(64,),
"layer1.1.bn2.running_mean",
(64,),
"block1.2.conv_bn2.bn.running_var",
(64,),
"layer1.1.bn2.running_var",
(64,),
"block1.3.conv_bn1.conv.weight",
(64, 64, 3, 3),
"layer1.2.conv1.weight",
(64, 64, 3, 3),
"block1.3.conv_bn1.bn.weight",
(64,),
"layer1.2.bn1.weight",
(64,),
"block1.3.conv_bn1.bn.bias",
(64,),
"layer1.2.bn1.bias",
(64,),
"block1.3.conv_bn1.bn.running_mean",
(64,),
"layer1.2.bn1.running_mean",
(64,),
"block1.3.conv_bn1.bn.running_var",
(64,),
"layer1.2.bn1.running_var",
(64,),
"block1.3.conv_bn2.conv.weight",
(64, 64, 3, 3),
"layer1.2.conv2.weight",
(64, 64, 3, 3),
"block1.3.conv_bn2.bn.weight",
(64,),
"layer1.2.bn2.weight",
(64,),
"block1.3.conv_bn2.bn.bias",
(64,),
"layer1.2.bn2.bias",
(64,),
"block1.3.conv_bn2.bn.running_mean",
(64,),
"layer1.2.bn2.running_mean",
(64,),
"block1.3.conv_bn2.bn.running_var",
(64,),
"layer1.2.bn2.running_var",
(64,),
"block2.0.conv_bn1.conv.weight",
(128, 64, 3, 3),
"layer2.0.conv1.weight",
(128, 64, 3, 3),
"block2.0.conv_bn1.bn.weight",
(128,),
"layer2.0.bn1.weight",
(128,),
"block2.0.conv_bn1.bn.bias",
(128,),
"layer2.0.bn1.bias",
(128,),
"block2.0.conv_bn1.bn.running_mean",
(128,),
"layer2.0.bn1.running_mean",
(128,),
"block2.0.conv_bn1.bn.running_var",
(128,),
"layer2.0.bn1.running_var",
(128,),
"block2.0.conv_bn2.conv.weight",
(128, 128, 3, 3),
"layer2.0.conv2.weight",
(128, 128, 3, 3),
"block2.0.conv_bn2.bn.weight",
(128,),
"layer2.0.bn2.weight",
(128,),
"block2.0.conv_bn2.bn.bias",
(128,),
"layer2.0.bn2.bias",
(128,),
"block2.0.conv_bn2.bn.running_mean",
(128,),
"layer2.0.bn2.running_mean",
(128,),
"block2.0.conv_bn2.bn.running_var",
(128,),
"layer2.0.bn2.running_var",
(128,),
"block2.0.shortcut.conv.weight",
(128, 64, 1, 1),
"layer2.0.downsample.0.weight",
(128, 64, 1, 1),
"block2.0.shortcut.bn.weight",
(128,),
"layer2.0.downsample.1.weight",
(128,),
"block2.0.shortcut.bn.bias",
(128,),
"layer2.0.downsample.1.bias",
(128,),
"block2.0.shortcut.bn.running_mean",
(128,),
"layer2.0.downsample.1.running_mean",
(128,),
"block2.0.shortcut.bn.running_var",
(128,),
"layer2.0.downsample.1.running_var",
(128,),
"block2.1.conv_bn1.conv.weight",
(128, 128, 3, 3),
"layer2.1.conv1.weight",
(128, 128, 3, 3),
"block2.1.conv_bn1.bn.weight",
(128,),
"layer2.1.bn1.weight",
(128,),
"block2.1.conv_bn1.bn.bias",
(128,),
"layer2.1.bn1.bias",
(128,),
"block2.1.conv_bn1.bn.running_mean",
(128,),
"layer2.1.bn1.running_mean",
(128,),
"block2.1.conv_bn1.bn.running_var",
(128,),
"layer2.1.bn1.running_var",
(128,),
"block2.1.conv_bn2.conv.weight",
(128, 128, 3, 3),
"layer2.1.conv2.weight",
(128, 128, 3, 3),
"block2.1.conv_bn2.bn.weight",
(128,),
"layer2.1.bn2.weight",
(128,),
"block2.1.conv_bn2.bn.bias",
(128,),
"layer2.1.bn2.bias",
(128,),
"block2.1.conv_bn2.bn.running_mean",
(128,),
"layer2.1.bn2.running_mean",
(128,),
"block2.1.conv_bn2.bn.running_var",
(128,),
"layer2.1.bn2.running_var",
(128,),
"block2.2.conv_bn1.conv.weight",
(128, 128, 3, 3),
"layer2.2.conv1.weight",
(128, 128, 3, 3),
"block2.2.conv_bn1.bn.weight",
(128,),
"layer2.2.bn1.weight",
(128,),
"block2.2.conv_bn1.bn.bias",
(128,),
"layer2.2.bn1.bias",
(128,),
"block2.2.conv_bn1.bn.running_mean",
(128,),
"layer2.2.bn1.running_mean",
(128,),
"block2.2.conv_bn1.bn.running_var",
(128,),
"layer2.2.bn1.running_var",
(128,),
"block2.2.conv_bn2.conv.weight",
(128, 128, 3, 3),
"layer2.2.conv2.weight",
(128, 128, 3, 3),
"block2.2.conv_bn2.bn.weight",
(128,),
"layer2.2.bn2.weight",
(128,),
"block2.2.conv_bn2.bn.bias",
(128,),
"layer2.2.bn2.bias",
(128,),
"block2.2.conv_bn2.bn.running_mean",
(128,),
"layer2.2.bn2.running_mean",
(128,),
"block2.2.conv_bn2.bn.running_var",
(128,),
"layer2.2.bn2.running_var",
(128,),
"block2.3.conv_bn1.conv.weight",
(128, 128, 3, 3),
"layer2.3.conv1.weight",
(128, 128, 3, 3),
"block2.3.conv_bn1.bn.weight",
(128,),
"layer2.3.bn1.weight",
(128,),
"block2.3.conv_bn1.bn.bias",
(128,),
"layer2.3.bn1.bias",
(128,),
"block2.3.conv_bn1.bn.running_mean",
(128,),
"layer2.3.bn1.running_mean",
(128,),
"block2.3.conv_bn1.bn.running_var",
(128,),
"layer2.3.bn1.running_var",
(128,),
"block2.3.conv_bn2.conv.weight",
(128, 128, 3, 3),
"layer2.3.conv2.weight",
(128, 128, 3, 3),
"block2.3.conv_bn2.bn.weight",
(128,),
"layer2.3.bn2.weight",
(128,),
"block2.3.conv_bn2.bn.bias",
(128,),
"layer2.3.bn2.bias",
(128,),
"block2.3.conv_bn2.bn.running_mean",
(128,),
"layer2.3.bn2.running_mean",
(128,),
"block2.3.conv_bn2.bn.running_var",
(128,),
"layer2.3.bn2.running_var",
(128,),
"block3.0.conv_bn1.conv.weight",
(256, 128, 3, 3),
"layer3.0.conv1.weight",
(256, 128, 3, 3),
"block3.0.conv_bn1.bn.weight",
(256,),
"layer3.0.bn1.weight",
(256,),
"block3.0.conv_bn1.bn.bias",
(256,),
"layer3.0.bn1.bias",
(256,),
"block3.0.conv_bn1.bn.running_mean",
(256,),
"layer3.0.bn1.running_mean",
(256,),
"block3.0.conv_bn1.bn.running_var",
(256,),
"layer3.0.bn1.running_var",
(256,),
"block3.0.conv_bn2.conv.weight",
(256, 256, 3, 3),
"layer3.0.conv2.weight",
(256, 256, 3, 3),
"block3.0.conv_bn2.bn.weight",
(256,),
"layer3.0.bn2.weight",
(256,),
"block3.0.conv_bn2.bn.bias",
(256,),
"layer3.0.bn2.bias",
(256,),
"block3.0.conv_bn2.bn.running_mean",
(256,),
"layer3.0.bn2.running_mean",
(256,),
"block3.0.conv_bn2.bn.running_var",
(256,),
"layer3.0.bn2.running_var",
(256,),
"block3.0.shortcut.conv.weight",
(256, 128, 1, 1),
"layer3.0.downsample.0.weight",
(256, 128, 1, 1),
"block3.0.shortcut.bn.weight",
(256,),
"layer3.0.downsample.1.weight",
(256,),
"block3.0.shortcut.bn.bias",
(256,),
"layer3.0.downsample.1.bias",
(256,),
"block3.0.shortcut.bn.running_mean",
(256,),
"layer3.0.downsample.1.running_mean",
(256,),
"block3.0.shortcut.bn.running_var",
(256,),
"layer3.0.downsample.1.running_var",
(256,),
"block3.1.conv_bn1.conv.weight",
(256, 256, 3, 3),
"layer3.1.conv1.weight",
(256, 256, 3, 3),
"block3.1.conv_bn1.bn.weight",
(256,),
"layer3.1.bn1.weight",
(256,),
"block3.1.conv_bn1.bn.bias",
(256,),
"layer3.1.bn1.bias",
(256,),
"block3.1.conv_bn1.bn.running_mean",
(256,),
"layer3.1.bn1.running_mean",
(256,),
"block3.1.conv_bn1.bn.running_var",
(256,),
"layer3.1.bn1.running_var",
(256,),
"block3.1.conv_bn2.conv.weight",
(256, 256, 3, 3),
"layer3.1.conv2.weight",
(256, 256, 3, 3),
"block3.1.conv_bn2.bn.weight",
(256,),
"layer3.1.bn2.weight",
(256,),
"block3.1.conv_bn2.bn.bias",
(256,),
"layer3.1.bn2.bias",
(256,),
"block3.1.conv_bn2.bn.running_mean",
(256,),
"layer3.1.bn2.running_mean",
(256,),
"block3.1.conv_bn2.bn.running_var",
(256,),
"layer3.1.bn2.running_var",
(256,),
"block3.2.conv_bn1.conv.weight",
(256, 256, 3, 3),
"layer3.2.conv1.weight",
(256, 256, 3, 3),
"block3.2.conv_bn1.bn.weight",
(256,),
"layer3.2.bn1.weight",
(256,),
"block3.2.conv_bn1.bn.bias",
(256,),
"layer3.2.bn1.bias",
(256,),
"block3.2.conv_bn1.bn.running_mean",
(256,),
"layer3.2.bn1.running_mean",
(256,),
"block3.2.conv_bn1.bn.running_var",
(256,),
"layer3.2.bn1.running_var",
(256,),
"block3.2.conv_bn2.conv.weight",
(256, 256, 3, 3),
"layer3.2.conv2.weight",
(256, 256, 3, 3),
"block3.2.conv_bn2.bn.weight",
(256,),
"layer3.2.bn2.weight",
(256,),
"block3.2.conv_bn2.bn.bias",
(256,),
"layer3.2.bn2.bias",
(256,),
"block3.2.conv_bn2.bn.running_mean",
(256,),
"layer3.2.bn2.running_mean",
(256,),
"block3.2.conv_bn2.bn.running_var",
(256,),
"layer3.2.bn2.running_var",
(256,),
"block3.3.conv_bn1.conv.weight",
(256, 256, 3, 3),
"layer3.3.conv1.weight",
(256, 256, 3, 3),
"block3.3.conv_bn1.bn.weight",
(256,),
"layer3.3.bn1.weight",
(256,),
"block3.3.conv_bn1.bn.bias",
(256,),
"layer3.3.bn1.bias",
(256,),
"block3.3.conv_bn1.bn.running_mean",
(256,),
"layer3.3.bn1.running_mean",
(256,),
"block3.3.conv_bn1.bn.running_var",
(256,),
"layer3.3.bn1.running_var",
(256,),
"block3.3.conv_bn2.conv.weight",
(256, 256, 3, 3),
"layer3.3.conv2.weight",
(256, 256, 3, 3),
"block3.3.conv_bn2.bn.weight",
(256,),
"layer3.3.bn2.weight",
(256,),
"block3.3.conv_bn2.bn.bias",
(256,),
"layer3.3.bn2.bias",
(256,),
"block3.3.conv_bn2.bn.running_mean",
(256,),
"layer3.3.bn2.running_mean",
(256,),
"block3.3.conv_bn2.bn.running_var",
(256,),
"layer3.3.bn2.running_var",
(256,),
"block3.4.conv_bn1.conv.weight",
(256, 256, 3, 3),
"layer3.4.conv1.weight",
(256, 256, 3, 3),
"block3.4.conv_bn1.bn.weight",
(256,),
"layer3.4.bn1.weight",
(256,),
"block3.4.conv_bn1.bn.bias",
(256,),
"layer3.4.bn1.bias",
(256,),
"block3.4.conv_bn1.bn.running_mean",
(256,),
"layer3.4.bn1.running_mean",
(256,),
"block3.4.conv_bn1.bn.running_var",
(256,),
"layer3.4.bn1.running_var",
(256,),
"block3.4.conv_bn2.conv.weight",
(256, 256, 3, 3),
"layer3.4.conv2.weight",
(256, 256, 3, 3),
"block3.4.conv_bn2.bn.weight",
(256,),
"layer3.4.bn2.weight",
(256,),
"block3.4.conv_bn2.bn.bias",
(256,),
"layer3.4.bn2.bias",
(256,),
"block3.4.conv_bn2.bn.running_mean",
(256,),
"layer3.4.bn2.running_mean",
(256,),
"block3.4.conv_bn2.bn.running_var",
(256,),
"layer3.4.bn2.running_var",
(256,),
"block3.5.conv_bn1.conv.weight",
(256, 256, 3, 3),
"layer3.5.conv1.weight",
(256, 256, 3, 3),
"block3.5.conv_bn1.bn.weight",
(256,),
"layer3.5.bn1.weight",
(256,),
"block3.5.conv_bn1.bn.bias",
(256,),
"layer3.5.bn1.bias",
(256,),
"block3.5.conv_bn1.bn.running_mean",
(256,),
"layer3.5.bn1.running_mean",
(256,),
"block3.5.conv_bn1.bn.running_var",
(256,),
"layer3.5.bn1.running_var",
(256,),
"block3.5.conv_bn2.conv.weight",
(256, 256, 3, 3),
"layer3.5.conv2.weight",
(256, 256, 3, 3),
"block3.5.conv_bn2.bn.weight",
(256,),
"layer3.5.bn2.weight",
(256,),
"block3.5.conv_bn2.bn.bias",
(256,),
"layer3.5.bn2.bias",
(256,),
"block3.5.conv_bn2.bn.running_mean",
(256,),
"layer3.5.bn2.running_mean",
(256,),
"block3.5.conv_bn2.bn.running_var",
(256,),
"layer3.5.bn2.running_var",
(256,),
"block4.0.conv_bn1.conv.weight",
(512, 256, 3, 3),
"layer4.0.conv1.weight",
(512, 256, 3, 3),
"block4.0.conv_bn1.bn.weight",
(512,),
"layer4.0.bn1.weight",
(512,),
"block4.0.conv_bn1.bn.bias",
(512,),
"layer4.0.bn1.bias",
(512,),
"block4.0.conv_bn1.bn.running_mean",
(512,),
"layer4.0.bn1.running_mean",
(512,),
"block4.0.conv_bn1.bn.running_var",
(512,),
"layer4.0.bn1.running_var",
(512,),
"block4.0.conv_bn2.conv.weight",
(512, 512, 3, 3),
"layer4.0.conv2.weight",
(512, 512, 3, 3),
"block4.0.conv_bn2.bn.weight",
(512,),
"layer4.0.bn2.weight",
(512,),
"block4.0.conv_bn2.bn.bias",
(512,),
"layer4.0.bn2.bias",
(512,),
"block4.0.conv_bn2.bn.running_mean",
(512,),
"layer4.0.bn2.running_mean",
(512,),
"block4.0.conv_bn2.bn.running_var",
(512,),
"layer4.0.bn2.running_var",
(512,),
"block4.0.shortcut.conv.weight",
(512, 256, 1, 1),
"layer4.0.downsample.0.weight",
(512, 256, 1, 1),
"block4.0.shortcut.bn.weight",
(512,),
"layer4.0.downsample.1.weight",
(512,),
"block4.0.shortcut.bn.bias",
(512,),
"layer4.0.downsample.1.bias",
(512,),
"block4.0.shortcut.bn.running_mean",
(512,),
"layer4.0.downsample.1.running_mean",
(512,),
"block4.0.shortcut.bn.running_var",
(512,),
"layer4.0.downsample.1.running_var",
(512,),
"block4.1.conv_bn1.conv.weight",
(512, 512, 3, 3),
"layer4.1.conv1.weight",
(512, 512, 3, 3),
"block4.1.conv_bn1.bn.weight",
(512,),
"layer4.1.bn1.weight",
(512,),
"block4.1.conv_bn1.bn.bias",
(512,),
"layer4.1.bn1.bias",
(512,),
"block4.1.conv_bn1.bn.running_mean",
(512,),
"layer4.1.bn1.running_mean",
(512,),
"block4.1.conv_bn1.bn.running_var",
(512,),
"layer4.1.bn1.running_var",
(512,),
"block4.1.conv_bn2.conv.weight",
(512, 512, 3, 3),
"layer4.1.conv2.weight",
(512, 512, 3, 3),
"block4.1.conv_bn2.bn.weight",
(512,),
"layer4.1.bn2.weight",
(512,),
"block4.1.conv_bn2.bn.bias",
(512,),
"layer4.1.bn2.bias",
(512,),
"block4.1.conv_bn2.bn.running_mean",
(512,),
"layer4.1.bn2.running_mean",
(512,),
"block4.1.conv_bn2.bn.running_var",
(512,),
"layer4.1.bn2.running_var",
(512,),
"block4.2.conv_bn1.conv.weight",
(512, 512, 3, 3),
"layer4.2.conv1.weight",
(512, 512, 3, 3),
"block4.2.conv_bn1.bn.weight",
(512,),
"layer4.2.bn1.weight",
(512,),
"block4.2.conv_bn1.bn.bias",
(512,),
"layer4.2.bn1.bias",
(512,),
"block4.2.conv_bn1.bn.running_mean",
(512,),
"layer4.2.bn1.running_mean",
(512,),
"block4.2.conv_bn1.bn.running_var",
(512,),
"layer4.2.bn1.running_var",
(512,),
"block4.2.conv_bn2.conv.weight",
(512, 512, 3, 3),
"layer4.2.conv2.weight",
(512, 512, 3, 3),
"block4.2.conv_bn2.bn.weight",
(512,),
"layer4.2.bn2.weight",
(512,),
"block4.2.conv_bn2.bn.bias",
(512,),
"layer4.2.bn2.bias",
(512,),
"block4.2.conv_bn2.bn.running_mean",
(512,),
"layer4.2.bn2.running_mean",
(512,),
"block4.2.conv_bn2.bn.running_var",
(512,),
"layer4.2.bn2.running_var",
(512,),
"logit.weight",
(1000, 512),
"fc.weight",
(1000, 512),
"logit.bias",
(1000,),
"fc.bias",
(1000,),
]
###############################################################################
class ConvBn2d(nn.Module):
def __init__(self, in_channel, out_channel, kernel_size=3, padding=1, stride=1):
super(ConvBn2d, self).__init__()
self.conv = nn.Conv2d(
in_channel,
out_channel,
kernel_size=kernel_size,
padding=padding,
stride=stride,
bias=False,
)
self.bn = nn.BatchNorm2d(out_channel, eps=1e-5)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
return x
############# resnext50 pyramid feature net #######################################
# https://github.com/Hsuxu/ResNeXt/blob/master/models.py
# https://github.com/D-X-Y/ResNeXt-DenseNet/blob/master/models/resnext.py
# https://github.com/miraclewkf/ResNeXt-PyTorch/blob/master/resnext.py
# bottleneck type C
class BasicBlock(nn.Module):
def __init__(self, in_channel, channel, out_channel, stride=1, is_shortcut=False):
super(BasicBlock, self).__init__()
self.is_shortcut = is_shortcut
self.conv_bn1 = ConvBn2d(
in_channel, channel, kernel_size=3, padding=1, stride=stride
)
self.conv_bn2 = ConvBn2d(
channel, out_channel, kernel_size=3, padding=1, stride=1
)
if is_shortcut:
self.shortcut = ConvBn2d(
in_channel, out_channel, kernel_size=1, padding=0, stride=stride
)
def forward(self, x):
z = F.relu(self.conv_bn1(x), inplace=True)
z = self.conv_bn2(z)
if self.is_shortcut:
x = self.shortcut(x)
z += x
z = F.relu(z, inplace=True)
return z
class ResNet34(nn.Module):
def __init__(self, num_class=1000):
super(ResNet34, self).__init__()
self.block0 = nn.Sequential(
nn.Conv2d(3, 64, kernel_size=7, padding=3, stride=2, bias=False),
BatchNorm2d(64),
nn.ReLU(inplace=True),
)
self.block1 = nn.Sequential(
nn.MaxPool2d(kernel_size=3, padding=1, stride=2),
BasicBlock(64, 64, 64, stride=1, is_shortcut=False),
*[BasicBlock(64, 64, 64, stride=1, is_shortcut=False) for i in range(1, 3)],
)
self.block2 = nn.Sequential(
BasicBlock(64, 128, 128, stride=2, is_shortcut=True),
*[
BasicBlock(128, 128, 128, stride=1, is_shortcut=False)
for i in range(1, 4)
],
)
self.block3 = nn.Sequential(
BasicBlock(128, 256, 256, stride=2, is_shortcut=True),
*[
BasicBlock(256, 256, 256, stride=1, is_shortcut=False)
for i in range(1, 6)
],
)
self.block4 = nn.Sequential(
BasicBlock(256, 512, 512, stride=2, is_shortcut=True),
*[
BasicBlock(512, 512, 512, stride=1, is_shortcut=False)
for i in range(1, 3)
],
)
self.logit = nn.Linear(512, num_class)
def forward(self, x):
batch_size = len(x)
x = self.block0(x)
x = self.block1(x)
x = self.block2(x)
x = self.block3(x)
x = self.block4(x)
x = F.adaptive_avg_pool2d(x, 1).reshape(batch_size, -1)
logit = self.logit(x)
return logit
class Resnet34_classification(nn.Module):
def __init__(self, num_class=4):
super(Resnet34_classification, self).__init__()
e = ResNet34()
self.block = nn.ModuleList([e.block0, e.block1, e.block2, e.block3, e.block4])
e = None # dropped
self.feature = nn.Conv2d(512, 32, kernel_size=1) # dummy conv for dim reduction
self.logit = nn.Conv2d(32, num_class, kernel_size=1)
def forward(self, x):
batch_size, C, H, W = x.shape
for i in range(len(self.block)):
x = self.block[i](x)
# print(i, x.shape)
x = F.dropout(x, 0.5, training=self.training)
x = F.adaptive_avg_pool2d(x, 1)
x = self.feature(x)
logit = self.logit(x)
return logit
model_classification = Resnet34_classification()
model_classification.load_state_dict(
torch.load(
"../input/clsification/00007500_model.pth",
map_location=lambda storage, loc: storage,
),
strict=True,
)
class TestDataset(Dataset):
"""Dataset for test prediction"""
def __init__(self, root, df, mean, std):
self.root = root
df["ImageId"] = df["ImageId_ClassId"].apply(lambda x: x.split("_")[0])
self.fnames = df["ImageId"].unique().tolist()
self.num_samples = len(self.fnames)
self.transform = Compose([Normalize(mean=mean, std=std, p=1), ToTensor()])
def __getitem__(self, idx):
fname = self.fnames[idx]
path = os.path.join(self.root, fname)
image = cv2.imread(path)
images = self.transform(image=image)["image"]
return fname, images
def __len__(self):
return self.num_samples
def sharpen(p, t=0.5):
if t != 0:
return p ** t
else:
return p
augment = ["null"]
def get_classification_preds(net, test_loader):
test_probability_label = []
test_id = []
net = net.cuda()
for t, (fnames, images) in enumerate(tqdm(test_loader)):
batch_size, C, H, W = images.shape
images = images.cuda()
with torch.no_grad():
net.eval()
num_augment = 0
if 1: # null
logit = net(images)
probability = torch.sigmoid(logit)
probability_label = sharpen(probability, 0)
num_augment += 1
if "flip_lr" in augment:
logit = net(torch.flip(images, dims=[3]))
probability = torch.sigmoid(logit)
probability_label += sharpen(probability)
num_augment += 1
if "flip_ud" in augment:
logit = net(torch.flip(images, dims=[2]))
probability = torch.sigmoid(logit)
probability_label += sharpen(probability)
num_augment += 1
probability_label = probability_label / num_augment
probability_label = probability_label.data.cpu().numpy()
test_probability_label.append(probability_label)
test_id.extend([i for i in fnames])
test_probability_label = np.concatenate(test_probability_label)
return test_probability_label, test_id
sample_submission_path = (
"../input/severstal-steel-defect-detection/sample_submission.csv"
)
test_data_folder = "../input/severstal-steel-defect-detection/test_images"
batch_size = 1
# mean and std
mean = (0.485, 0.456, 0.406)
std = (0.229, 0.224, 0.225)
df = pd.read_csv(sample_submission_path)
testset = DataLoader(
TestDataset(test_data_folder, df, mean, std),
batch_size=batch_size,
shuffle=False,
num_workers=0,
pin_memory=True,
)
threshold_label = [0.50, 0.50, 0.50, 0.50]
probability_label, image_id = get_classification_preds(model_classification, testset)
predict_label = probability_label > np.array(threshold_label).reshape(1, 4, 1, 1)
image_id_class_id = []
encoded_pixel = []
for b in range(len(image_id)):
for c in range(4):
image_id_class_id.append(image_id[b] + "_%d" % (c + 1))
if predict_label[b, c] == 0:
rle = ""
else:
rle = "1 1"
encoded_pixel.append(rle)
df_classification = pd.DataFrame(
zip(image_id_class_id, encoded_pixel), columns=["ImageId_ClassId", "EncodedPixels"]
)
df = pd.read_csv("submission.csv")
df = df.fillna("")
if 1:
df["Class"] = df["ImageId_ClassId"].str[-1].astype(np.int32)
df["Label"] = (df["EncodedPixels"] != "").astype(np.int32)
pos1 = ((df["Class"] == 1) & (df["Label"] == 1)).sum()
pos2 = ((df["Class"] == 2) & (df["Label"] == 1)).sum()
pos3 = ((df["Class"] == 3) & (df["Label"] == 1)).sum()
pos4 = ((df["Class"] == 4) & (df["Label"] == 1)).sum()
num_image = len(df) // 4
num = len(df)
pos = (df["Label"] == 1).sum()
neg = num - pos
print("")
print("\t\tnum_image = %5d(1801)" % num_image)
print("\t\tnum = %5d(7204)" % num)
print("\t\tneg = %5d(6172) %0.3f" % (neg, neg / num))
print("\t\tpos = %5d(1032) %0.3f" % (pos, pos / num))
print("\t\tpos1 = %5d( 128) %0.3f %0.3f" % (pos1, pos1 / num_image, pos1 / pos))
print("\t\tpos2 = %5d( 43) %0.3f %0.3f" % (pos2, pos2 / num_image, pos2 / pos))
print("\t\tpos3 = %5d( 741) %0.3f %0.3f" % (pos3, pos3 / num_image, pos3 / pos))
print("\t\tpos4 = %5d( 120) %0.3f %0.3f" % (pos4, pos4 / num_image, pos4 / pos))
df_mask = df.copy()
df_label = df_classification.copy()
assert np.all(df_mask["ImageId_ClassId"].values == df_label["ImageId_ClassId"].values)
print(
(df_mask.loc[df_label["EncodedPixels"] == "", "EncodedPixels"] != "").sum()
) # 202
df_mask.loc[df_label["EncodedPixels"] == "", "EncodedPixels"] = ""
# df_mask.to_csv("submission.csv", index=False)
df_mask.to_csv(
"submission.csv", columns=["ImageId_ClassId", "EncodedPixels"], index=False
)
if 1:
df_mask["Class"] = df_mask["ImageId_ClassId"].str[-1].astype(np.int32)
df_mask["Label"] = (df_mask["EncodedPixels"] != "").astype(np.int32)
pos1 = ((df_mask["Class"] == 1) & (df_mask["Label"] == 1)).sum()
pos2 = ((df_mask["Class"] == 2) & (df_mask["Label"] == 1)).sum()
pos3 = ((df_mask["Class"] == 3) & (df_mask["Label"] == 1)).sum()
pos4 = ((df_mask["Class"] == 4) & (df_mask["Label"] == 1)).sum()
num_image = len(df_mask) // 4
num = len(df_mask)
pos = (df_mask["Label"] == 1).sum()
neg = num - pos
print("")
print("\t\tnum_image = %5d(1801)" % num_image)
print("\t\tnum = %5d(7204)" % num)
print("\t\tneg = %5d(6172) %0.3f" % (neg, neg / num))
print("\t\tpos = %5d(1032) %0.3f" % (pos, pos / num))
print("\t\tpos1 = %5d( 128) %0.3f %0.3f" % (pos1, pos1 / num_image, pos1 / pos))
print("\t\tpos2 = %5d( 43) %0.3f %0.3f" % (pos2, pos2 / num_image, pos2 / pos))
print("\t\tpos3 = %5d( 741) %0.3f %0.3f" % (pos3, pos3 / num_image, pos3 / pos))
print("\t\tpos4 = %5d( 120) %0.3f %0.3f" % (pos4, pos4 / num_image, pos4 / pos))
# ### Visualization
get_ipython().run_line_magic("matplotlib", "inline")
df = | pd.read_csv("submission.csv") | pandas.read_csv |
# Licensed to Modin Development Team under one or more contributor license agreements.
# See the NOTICE file distributed with this work for additional information regarding
# copyright ownership. The Modin Development Team licenses this file to you under the
# Apache License, Version 2.0 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
import os
import pandas
import numpy as np
import pyarrow
import pytest
import re
from modin.config import IsExperimental, Engine, StorageFormat
from modin.pandas.test.utils import io_ops_bad_exc
from .utils import eval_io, ForceOmnisciImport, set_execution_mode, run_and_compare
from pandas.core.dtypes.common import is_list_like
IsExperimental.put(True)
Engine.put("native")
StorageFormat.put("omnisci")
import modin.pandas as pd
from modin.pandas.test.utils import (
df_equals,
bool_arg_values,
to_pandas,
test_data_values,
test_data_keys,
generate_multiindex,
eval_general,
df_equals_with_non_stable_indices,
)
from modin.utils import try_cast_to_pandas
from modin.experimental.core.execution.native.implementations.omnisci_on_native.partitioning.partition_manager import (
OmnisciOnNativeDataframePartitionManager,
)
from modin.experimental.core.execution.native.implementations.omnisci_on_native.df_algebra import (
FrameNode,
)
@pytest.mark.usefixtures("TestReadCSVFixture")
class TestCSV:
from modin import __file__ as modin_root
root = os.path.dirname(
os.path.dirname(os.path.abspath(modin_root)) + ".."
) # root of modin repo
boston_housing_names = [
"index",
"CRIM",
"ZN",
"INDUS",
"CHAS",
"NOX",
"RM",
"AGE",
"DIS",
"RAD",
"TAX",
"PTRATIO",
"B",
"LSTAT",
"PRICE",
]
boston_housing_dtypes = {
"index": "int64",
"CRIM": "float64",
"ZN": "float64",
"INDUS": "float64",
"CHAS": "float64",
"NOX": "float64",
"RM": "float64",
"AGE": "float64",
"DIS": "float64",
"RAD": "float64",
"TAX": "float64",
"PTRATIO": "float64",
"B": "float64",
"LSTAT": "float64",
"PRICE": "float64",
}
def test_usecols_csv(self):
"""check with the following arguments: names, dtype, skiprows, delimiter"""
csv_file = os.path.join(self.root, "modin/pandas/test/data", "test_usecols.csv")
for kwargs in (
{"delimiter": ","},
{"sep": None},
{"skiprows": 1, "names": ["A", "B", "C", "D", "E"]},
{"dtype": {"a": "int32", "e": "string"}},
{"dtype": {"a": np.dtype("int32"), "b": np.dtype("int64"), "e": "string"}},
):
eval_io(
fn_name="read_csv",
md_extra_kwargs={"engine": "arrow"},
# read_csv kwargs
filepath_or_buffer=csv_file,
**kwargs,
)
def test_housing_csv(self):
csv_file = os.path.join(self.root, "examples/data/boston_housing.csv")
for kwargs in (
{
"skiprows": 1,
"names": self.boston_housing_names,
"dtype": self.boston_housing_dtypes,
},
):
eval_io(
fn_name="read_csv",
md_extra_kwargs={"engine": "arrow"},
# read_csv kwargs
filepath_or_buffer=csv_file,
**kwargs,
)
def test_time_parsing(self):
csv_file = os.path.join(
self.root, "modin/pandas/test/data", "test_time_parsing.csv"
)
for kwargs in (
{
"skiprows": 1,
"names": [
"timestamp",
"symbol",
"high",
"low",
"open",
"close",
"spread",
"volume",
],
"parse_dates": ["timestamp"],
"dtype": {"symbol": "string"},
},
):
rp = pandas.read_csv(csv_file, **kwargs)
rm = pd.read_csv(csv_file, engine="arrow", **kwargs)
with ForceOmnisciImport(rm):
rm = to_pandas(rm)
df_equals(rm["timestamp"].dt.year, rp["timestamp"].dt.year)
df_equals(rm["timestamp"].dt.month, rp["timestamp"].dt.month)
df_equals(rm["timestamp"].dt.day, rp["timestamp"].dt.day)
def test_csv_fillna(self):
csv_file = os.path.join(self.root, "examples/data/boston_housing.csv")
for kwargs in (
{
"skiprows": 1,
"names": self.boston_housing_names,
"dtype": self.boston_housing_dtypes,
},
):
eval_io(
fn_name="read_csv",
md_extra_kwargs={"engine": "arrow"},
comparator=lambda df1, df2: df_equals(
df1["CRIM"].fillna(1000), df2["CRIM"].fillna(1000)
),
# read_csv kwargs
filepath_or_buffer=csv_file,
**kwargs,
)
@pytest.mark.parametrize("null_dtype", ["category", "float64"])
def test_null_col(self, null_dtype):
csv_file = os.path.join(
self.root, "modin/pandas/test/data", "test_null_col.csv"
)
ref = pandas.read_csv(
csv_file,
names=["a", "b", "c"],
dtype={"a": "int64", "b": "int64", "c": null_dtype},
skiprows=1,
)
ref["a"] = ref["a"] + ref["b"]
exp = pd.read_csv(
csv_file,
names=["a", "b", "c"],
dtype={"a": "int64", "b": "int64", "c": null_dtype},
skiprows=1,
)
exp["a"] = exp["a"] + exp["b"]
# df_equals cannot compare empty categories
if null_dtype == "category":
ref["c"] = ref["c"].astype("string")
with ForceOmnisciImport(exp):
exp = to_pandas(exp)
exp["c"] = exp["c"].astype("string")
df_equals(ref, exp)
def test_read_and_concat(self):
csv_file = os.path.join(self.root, "modin/pandas/test/data", "test_usecols.csv")
ref1 = pandas.read_csv(csv_file)
ref2 = pandas.read_csv(csv_file)
ref = pandas.concat([ref1, ref2])
exp1 = pandas.read_csv(csv_file)
exp2 = pandas.read_csv(csv_file)
exp = pd.concat([exp1, exp2])
with ForceOmnisciImport(exp):
df_equals(ref, exp)
@pytest.mark.parametrize("names", [None, ["a", "b", "c", "d", "e"]])
@pytest.mark.parametrize("header", [None, 0])
def test_from_csv(self, header, names):
csv_file = os.path.join(self.root, "modin/pandas/test/data", "test_usecols.csv")
eval_io(
fn_name="read_csv",
# read_csv kwargs
filepath_or_buffer=csv_file,
header=header,
names=names,
)
@pytest.mark.parametrize("kwargs", [{"sep": "|"}, {"delimiter": "|"}])
def test_sep_delimiter(self, kwargs):
csv_file = os.path.join(self.root, "modin/pandas/test/data", "test_delim.csv")
eval_io(
fn_name="read_csv",
# read_csv kwargs
filepath_or_buffer=csv_file,
**kwargs,
)
@pytest.mark.skip(reason="https://github.com/modin-project/modin/issues/2174")
def test_float32(self):
csv_file = os.path.join(self.root, "modin/pandas/test/data", "test_usecols.csv")
kwargs = {
"dtype": {"a": "float32", "b": "float32"},
}
pandas_df = pandas.read_csv(csv_file, **kwargs)
pandas_df["a"] = pandas_df["a"] + pandas_df["b"]
modin_df = pd.read_csv(csv_file, **kwargs, engine="arrow")
modin_df["a"] = modin_df["a"] + modin_df["b"]
with ForceOmnisciImport(modin_df):
df_equals(modin_df, pandas_df)
# Datetime Handling tests
@pytest.mark.parametrize("engine", [None, "arrow"])
@pytest.mark.parametrize(
"parse_dates",
[
True,
False,
["col2"],
["c2"],
[["col2", "col3"]],
{"col23": ["col2", "col3"]},
],
)
@pytest.mark.parametrize("names", [None, [f"c{x}" for x in range(1, 7)]])
def test_read_csv_datetime(
self,
engine,
parse_dates,
names,
):
parse_dates_unsupported = isinstance(parse_dates, dict) or (
isinstance(parse_dates, list) and isinstance(parse_dates[0], list)
)
if parse_dates_unsupported and engine == "arrow" and not names:
pytest.skip(
"In these cases Modin raises `ArrowEngineException` while pandas "
"doesn't raise any exceptions that causes tests fails"
)
# In these cases Modin raises `ArrowEngineException` while pandas
# raises `ValueError`, so skipping exception type checking
skip_exc_type_check = parse_dates_unsupported and engine == "arrow"
eval_io(
fn_name="read_csv",
md_extra_kwargs={"engine": engine},
check_exception_type=not skip_exc_type_check,
raising_exceptions=None if skip_exc_type_check else io_ops_bad_exc,
# read_csv kwargs
filepath_or_buffer=pytest.csvs_names["test_read_csv_regular"],
parse_dates=parse_dates,
names=names,
)
@pytest.mark.parametrize("engine", [None, "arrow"])
@pytest.mark.parametrize(
"usecols",
[
None,
["col1"],
["col1", "col1"],
["col1", "col2", "col6"],
["col6", "col2", "col1"],
[0],
[0, 0],
[0, 1, 5],
[5, 1, 0],
lambda x: x in ["col1", "col2"],
],
)
def test_read_csv_col_handling(
self,
engine,
usecols,
):
eval_io(
fn_name="read_csv",
check_kwargs_callable=not callable(usecols),
md_extra_kwargs={"engine": engine},
# read_csv kwargs
filepath_or_buffer=pytest.csvs_names["test_read_csv_regular"],
usecols=usecols,
)
class TestMasks:
data = {
"a": [1, 1, 2, 2, 3],
"b": [None, None, 2, 1, 3],
"c": [3, None, None, 2, 1],
}
cols_values = ["a", ["a", "b"], ["a", "b", "c"]]
@pytest.mark.parametrize("cols", cols_values)
def test_projection(self, cols):
def projection(df, cols, **kwargs):
return df[cols]
run_and_compare(projection, data=self.data, cols=cols)
def test_drop(self):
def drop(df, **kwargs):
return df.drop(columns="a")
run_and_compare(drop, data=self.data)
def test_iloc(self):
def mask(df, **kwargs):
return df.iloc[[0, 1]]
run_and_compare(mask, data=self.data, allow_subqueries=True)
def test_empty(self):
def empty(df, **kwargs):
return df
run_and_compare(empty, data=None)
def test_filter(self):
def filter(df, **kwargs):
return df[df["a"] == 1]
run_and_compare(filter, data=self.data)
def test_filter_with_index(self):
def filter(df, **kwargs):
df = df.groupby("a").sum()
return df[df["b"] > 1]
run_and_compare(filter, data=self.data)
def test_filter_proj(self):
def filter(df, **kwargs):
df1 = df + 2
return df1[(df["a"] + df1["b"]) > 1]
run_and_compare(filter, data=self.data)
def test_filter_drop(self):
def filter(df, **kwargs):
df = df[["a", "b"]]
df = df[df["a"] != 1]
df["a"] = df["a"] * df["b"]
return df
run_and_compare(filter, data=self.data)
class TestMultiIndex:
data = {"a": np.arange(24), "b": np.arange(24)}
@pytest.mark.parametrize("names", [None, ["", ""], ["name", "name"]])
def test_dup_names(self, names):
index = pandas.MultiIndex.from_tuples(
[(i, j) for i in range(3) for j in range(8)], names=names
)
pandas_df = pandas.DataFrame(self.data, index=index) + 1
modin_df = pd.DataFrame(self.data, index=index) + 1
df_equals(pandas_df, modin_df)
@pytest.mark.parametrize(
"names",
[
None,
[None, "s", None],
["i1", "i2", "i3"],
["i1", "i1", "i3"],
["i1", "i2", "a"],
],
)
def test_reset_index(self, names):
index = pandas.MultiIndex.from_tuples(
[(i, j, k) for i in range(2) for j in range(3) for k in range(4)],
names=names,
)
def applier(lib):
df = lib.DataFrame(self.data, index=index) + 1
return df.reset_index()
eval_general(pd, pandas, applier)
@pytest.mark.parametrize("is_multiindex", [True, False])
@pytest.mark.parametrize(
"column_names", [None, ["level1", None], ["level1", "level2"]]
)
def test_reset_index_multicolumns(self, is_multiindex, column_names):
index = (
pandas.MultiIndex.from_tuples(
[(i, j, k) for i in range(2) for j in range(3) for k in range(4)],
names=["l1", "l2", "l3"],
)
if is_multiindex
else pandas.Index(np.arange(len(self.data["a"])), name="index")
)
columns = pandas.MultiIndex.from_tuples(
[("a", "b"), ("b", "c")], names=column_names
)
data = np.array(list(self.data.values())).T
def applier(df, **kwargs):
df = df + 1
return df.reset_index(drop=False)
run_and_compare(
fn=applier,
data=data,
constructor_kwargs={"index": index, "columns": columns},
)
def test_set_index_name(self):
index = pandas.Index.__new__(pandas.Index, data=[i for i in range(24)])
pandas_df = pandas.DataFrame(self.data, index=index)
pandas_df.index.name = "new_name"
modin_df = pd.DataFrame(self.data, index=index)
modin_df._query_compiler.set_index_name("new_name")
df_equals(pandas_df, modin_df)
def test_set_index_names(self):
index = pandas.MultiIndex.from_tuples(
[(i, j, k) for i in range(2) for j in range(3) for k in range(4)]
)
pandas_df = pandas.DataFrame(self.data, index=index)
pandas_df.index.names = ["new_name1", "new_name2", "new_name3"]
modin_df = pd.DataFrame(self.data, index=index)
modin_df._query_compiler.set_index_names(
["new_name1", "new_name2", "new_name3"]
)
df_equals(pandas_df, modin_df)
class TestFillna:
data = {"a": [1, 1, None], "b": [None, None, 2], "c": [3, None, None]}
values = [1, {"a": 1, "c": 3}, {"a": 1, "b": 2, "c": 3}]
@pytest.mark.parametrize("value", values)
def test_fillna_all(self, value):
def fillna(df, value, **kwargs):
return df.fillna(value)
run_and_compare(fillna, data=self.data, value=value)
def test_fillna_bool(self):
def fillna(df, **kwargs):
df["a"] = df["a"] == 1
df["a"] = df["a"].fillna(False)
return df
run_and_compare(fillna, data=self.data)
class TestConcat:
data = {
"a": [1, 2, 3],
"b": [10, 20, 30],
"d": [1000, 2000, 3000],
"e": [11, 22, 33],
}
data2 = {
"a": [4, 5, 6],
"c": [400, 500, 600],
"b": [40, 50, 60],
"f": [444, 555, 666],
}
data3 = {
"f": [2, 3, 4],
"g": [400, 500, 600],
"h": [20, 30, 40],
}
@pytest.mark.parametrize("join", ["inner", "outer"])
@pytest.mark.parametrize("sort", bool_arg_values)
@pytest.mark.parametrize("ignore_index", bool_arg_values)
def test_concat(self, join, sort, ignore_index):
def concat(lib, df1, df2, join, sort, ignore_index):
return lib.concat(
[df1, df2], join=join, sort=sort, ignore_index=ignore_index
)
run_and_compare(
concat,
data=self.data,
data2=self.data2,
join=join,
sort=sort,
ignore_index=ignore_index,
)
def test_concat_with_same_df(self):
def concat(df, **kwargs):
df["f"] = df["a"]
return df
run_and_compare(concat, data=self.data)
def test_setitem_lazy(self):
def applier(df, **kwargs):
df = df + 1
df["a"] = df["a"] + 1
df["e"] = df["a"] + 1
df["new_int8"] = np.int8(10)
df["new_int16"] = np.int16(10)
df["new_int32"] = np.int32(10)
df["new_int64"] = np.int64(10)
df["new_int"] = 10
df["new_float"] = 5.5
df["new_float64"] = np.float64(10.1)
return df
run_and_compare(applier, data=self.data)
def test_setitem_default(self):
def applier(df, lib, **kwargs):
df = df + 1
df["a"] = np.arange(3)
df["b"] = lib.Series(np.arange(3))
return df
run_and_compare(applier, data=self.data, force_lazy=False)
def test_insert_lazy(self):
def applier(df, **kwargs):
df = df + 1
df.insert(2, "new_int", 10)
df.insert(1, "new_float", 5.5)
df.insert(0, "new_a", df["a"] + 1)
return df
run_and_compare(applier, data=self.data)
def test_insert_default(self):
def applier(df, lib, **kwargs):
df = df + 1
df.insert(1, "new_range", np.arange(3))
df.insert(1, "new_series", lib.Series(np.arange(3)))
return df
run_and_compare(applier, data=self.data, force_lazy=False)
def test_concat_many(self):
def concat(df1, df2, lib, **kwargs):
df3 = df1.copy()
df4 = df2.copy()
return lib.concat([df1, df2, df3, df4])
def sort_comparator(df1, df2):
"""Sort and verify equality of the passed frames."""
# We sort values because order of rows in the 'union all' result is inconsistent in OmniSci
df1, df2 = (
try_cast_to_pandas(df).sort_values(df.columns[0]) for df in (df1, df2)
)
return df_equals(df1, df2)
run_and_compare(
concat, data=self.data, data2=self.data2, comparator=sort_comparator
)
def test_concat_agg(self):
def concat(lib, df1, df2):
df1 = df1.groupby("a", as_index=False).agg(
{"b": "sum", "d": "sum", "e": "sum"}
)
df2 = df2.groupby("a", as_index=False).agg(
{"c": "sum", "b": "sum", "f": "sum"}
)
return lib.concat([df1, df2])
run_and_compare(concat, data=self.data, data2=self.data2, allow_subqueries=True)
@pytest.mark.parametrize("join", ["inner", "outer"])
@pytest.mark.parametrize("sort", bool_arg_values)
@pytest.mark.parametrize("ignore_index", bool_arg_values)
def test_concat_single(self, join, sort, ignore_index):
def concat(lib, df, join, sort, ignore_index):
return lib.concat([df], join=join, sort=sort, ignore_index=ignore_index)
run_and_compare(
concat,
data=self.data,
join=join,
sort=sort,
ignore_index=ignore_index,
)
def test_groupby_concat_single(self):
def concat(lib, df):
df = lib.concat([df])
return df.groupby("a").agg({"b": "min"})
run_and_compare(
concat,
data=self.data,
)
@pytest.mark.parametrize("join", ["inner"])
@pytest.mark.parametrize("sort", bool_arg_values)
@pytest.mark.parametrize("ignore_index", bool_arg_values)
def test_concat_join(self, join, sort, ignore_index):
def concat(lib, df1, df2, join, sort, ignore_index, **kwargs):
return lib.concat(
[df1, df2], axis=1, join=join, sort=sort, ignore_index=ignore_index
)
run_and_compare(
concat,
data=self.data,
data2=self.data3,
join=join,
sort=sort,
ignore_index=ignore_index,
)
def test_concat_index_name(self):
df1 = pandas.DataFrame(self.data)
df1 = df1.set_index("a")
df2 = pandas.DataFrame(self.data3)
df2 = df2.set_index("f")
ref = pandas.concat([df1, df2], axis=1, join="inner")
exp = pd.concat([df1, df2], axis=1, join="inner")
df_equals(ref, exp)
df2.index.name = "a"
ref = pandas.concat([df1, df2], axis=1, join="inner")
exp = pd.concat([df1, df2], axis=1, join="inner")
df_equals(ref, exp)
def test_concat_index_names(self):
df1 = pandas.DataFrame(self.data)
df1 = df1.set_index(["a", "b"])
df2 = pandas.DataFrame(self.data3)
df2 = df2.set_index(["f", "h"])
ref = pandas.concat([df1, df2], axis=1, join="inner")
exp = pd.concat([df1, df2], axis=1, join="inner")
df_equals(ref, exp)
df2.index.names = ["a", "b"]
ref = pandas.concat([df1, df2], axis=1, join="inner")
exp = pd.concat([df1, df2], axis=1, join="inner")
df_equals(ref, exp)
class TestGroupby:
data = {
"a": [1, 1, 2, 2, 2, 1],
"b": [11, 21, 12, 22, 32, 11],
"c": [101, 201, 202, 202, 302, 302],
}
cols_value = ["a", ["a", "b"]]
@pytest.mark.parametrize("cols", cols_value)
@pytest.mark.parametrize("as_index", bool_arg_values)
def test_groupby_sum(self, cols, as_index):
def groupby_sum(df, cols, as_index, **kwargs):
return df.groupby(cols, as_index=as_index).sum()
run_and_compare(groupby_sum, data=self.data, cols=cols, as_index=as_index)
@pytest.mark.parametrize("cols", cols_value)
@pytest.mark.parametrize("as_index", bool_arg_values)
def test_groupby_count(self, cols, as_index):
def groupby_count(df, cols, as_index, **kwargs):
return df.groupby(cols, as_index=as_index).count()
run_and_compare(groupby_count, data=self.data, cols=cols, as_index=as_index)
@pytest.mark.xfail(
reason="Currently mean() passes a lambda into query compiler which cannot be executed on OmniSci engine"
)
@pytest.mark.parametrize("cols", cols_value)
@pytest.mark.parametrize("as_index", bool_arg_values)
def test_groupby_mean(self, cols, as_index):
def groupby_mean(df, cols, as_index, **kwargs):
return df.groupby(cols, as_index=as_index).mean()
run_and_compare(groupby_mean, data=self.data, cols=cols, as_index=as_index)
@pytest.mark.parametrize("cols", cols_value)
@pytest.mark.parametrize("as_index", bool_arg_values)
def test_groupby_proj_sum(self, cols, as_index):
def groupby_sum(df, cols, as_index, **kwargs):
return df.groupby(cols, as_index=as_index).c.sum()
run_and_compare(
groupby_sum, data=self.data, cols=cols, as_index=as_index, force_lazy=False
)
@pytest.mark.parametrize("agg", ["count", "size", "nunique"])
def test_groupby_agg(self, agg):
def groupby(df, agg, **kwargs):
return df.groupby("a").agg({"b": agg})
run_and_compare(groupby, data=self.data, agg=agg)
def test_groupby_agg_default_to_pandas(self):
def lambda_func(df, **kwargs):
return df.groupby("a").agg(lambda df: (df.mean() - df.sum()) // 2)
run_and_compare(lambda_func, data=self.data, force_lazy=False)
def not_implemented_func(df, **kwargs):
return df.groupby("a").agg("cumprod")
run_and_compare(lambda_func, data=self.data, force_lazy=False)
@pytest.mark.xfail(
reason="Function specified as a string should be passed into query compiler API, but currently it is transformed into a lambda"
)
@pytest.mark.parametrize("cols", cols_value)
@pytest.mark.parametrize("as_index", bool_arg_values)
def test_groupby_agg_mean(self, cols, as_index):
def groupby_mean(df, cols, as_index, **kwargs):
return df.groupby(cols, as_index=as_index).agg("mean")
run_and_compare(groupby_mean, data=self.data, cols=cols, as_index=as_index)
def test_groupby_lazy_multiindex(self):
index = generate_multiindex(len(self.data["a"]))
def groupby(df, *args, **kwargs):
df = df + 1
return df.groupby("a").agg({"b": "size"})
run_and_compare(groupby, data=self.data, constructor_kwargs={"index": index})
def test_groupby_lazy_squeeze(self):
def applier(df, **kwargs):
return df.groupby("a").sum().squeeze(axis=1)
run_and_compare(
applier,
data=self.data,
constructor_kwargs={"columns": ["a", "b"]},
force_lazy=True,
)
@pytest.mark.parametrize("method", ["sum", "size"])
def test_groupby_series(self, method):
def groupby(df, **kwargs):
ser = df[df.columns[0]]
return getattr(ser.groupby(ser), method)()
run_and_compare(groupby, data=self.data)
def test_groupby_size(self):
def groupby(df, **kwargs):
return df.groupby("a").size()
run_and_compare(groupby, data=self.data)
@pytest.mark.parametrize("by", [["a"], ["a", "b", "c"]])
@pytest.mark.parametrize("agg", ["sum", "size"])
@pytest.mark.parametrize("as_index", [True, False])
def test_groupby_agg_by_col(self, by, agg, as_index):
def simple_agg(df, **kwargs):
return df.groupby(by, as_index=as_index).agg(agg)
run_and_compare(simple_agg, data=self.data)
def dict_agg(df, **kwargs):
return df.groupby(by, as_index=as_index).agg({by[0]: agg})
run_and_compare(dict_agg, data=self.data)
def dict_agg_all_cols(df, **kwargs):
return df.groupby(by, as_index=as_index).agg({col: agg for col in by})
run_and_compare(dict_agg_all_cols, data=self.data)
# modin-issue#3461
def test_groupby_pure_by(self):
data = [1, 1, 2, 2]
# Test when 'by' is a 'TransformNode'
run_and_compare(lambda df: df.groupby(df).sum(), data=data, force_lazy=True)
# Test when 'by' is a 'FrameNode'
md_ser, pd_ser = pd.Series(data), pandas.Series(data)
md_ser._query_compiler._modin_frame._execute()
assert isinstance(
md_ser._query_compiler._modin_frame._op, FrameNode
), "Triggering execution of the Modin frame supposed to set 'FrameNode' as a frame's op"
set_execution_mode(md_ser, "lazy")
md_res = md_ser.groupby(md_ser).sum()
set_execution_mode(md_res, None)
pd_res = pd_ser.groupby(pd_ser).sum()
df_equals(md_res, pd_res)
taxi_data = {
"a": [1, 1, 2, 2],
"b": [11, 21, 12, 11],
"c": pandas.to_datetime(
["20190902", "20180913", "20190921", "20180903"], format="%Y%m%d"
),
"d": [11.5, 21.2, 12.8, 13.4],
}
# TODO: emulate taxi queries with group by category types when we have loading
# using arrow
# Another way of doing taxi q1 is
# res = df.groupby("cab_type").size() - this should be tested later as well
def test_taxi_q1(self):
def taxi_q1(df, **kwargs):
return df.groupby("a").size()
run_and_compare(taxi_q1, data=self.taxi_data)
def test_taxi_q2(self):
def taxi_q2(df, **kwargs):
return df.groupby("a").agg({"b": "mean"})
run_and_compare(taxi_q2, data=self.taxi_data)
@pytest.mark.parametrize("as_index", bool_arg_values)
def test_taxi_q3(self, as_index):
def taxi_q3(df, as_index, **kwargs):
return df.groupby(["b", df["c"].dt.year], as_index=as_index).size()
run_and_compare(taxi_q3, data=self.taxi_data, as_index=as_index)
def test_groupby_expr_col(self):
def groupby(df, **kwargs):
df = df.loc[:, ["b", "c"]]
df["year"] = df["c"].dt.year
df["month"] = df["c"].dt.month
df["id1"] = df["year"] * 12 + df["month"]
df["id2"] = (df["id1"] - 24000) // 12
df = df.groupby(["id1", "id2"], as_index=False).agg({"b": "max"})
return df
run_and_compare(groupby, data=self.taxi_data)
def test_series_astype(self):
def series_astype(df, **kwargs):
return df["d"].astype("int")
run_and_compare(series_astype, data=self.taxi_data)
def test_df_astype(self):
def df_astype(df, **kwargs):
return df.astype({"b": "float", "d": "int"})
run_and_compare(df_astype, data=self.taxi_data)
def test_df_indexed_astype(self):
def df_astype(df, **kwargs):
df = df.groupby("a").agg({"b": "sum"})
return df.astype({"b": "float"})
run_and_compare(df_astype, data=self.taxi_data)
@pytest.mark.parametrize("as_index", bool_arg_values)
def test_taxi_q4(self, as_index):
def taxi_q4(df, **kwargs):
df["c"] = df["c"].dt.year
df["d"] = df["d"].astype("int64")
df = df.groupby(["b", "c", "d"], sort=True, as_index=as_index).size()
if as_index:
df = df.reset_index()
return df.sort_values(
by=["c", 0 if as_index else "size"],
ignore_index=True,
ascending=[True, False],
)
run_and_compare(taxi_q4, data=self.taxi_data)
h2o_data = {
"id1": ["id1", "id2", "id3", "id1", "id2", "id3", "id1", "id2", "id3", "id1"],
"id2": ["id1", "id2", "id1", "id2", "id1", "id2", "id1", "id2", "id1", "id2"],
"id3": ["id4", "id5", "id6", "id4", "id5", "id6", "id4", "id5", "id6", "id4"],
"id4": [4, 5, 4, 5, 4, 5, 4, 5, 4, 5],
"id5": [7, 8, 9, 7, 8, 9, 7, 8, 9, 7],
"id6": [7, 8, 7, 8, 7, 8, 7, 8, 7, 8],
"v1": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
"v2": [1, 3, 5, 7, 9, 10, 8, 6, 4, 2],
"v3": [1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9, 10.0],
}
def _get_h2o_df(self):
df = pandas.DataFrame(self.h2o_data)
df["id1"] = df["id1"].astype("category")
df["id2"] = df["id2"].astype("category")
df["id3"] = df["id3"].astype("category")
return df
def test_h2o_q1(self):
df = self._get_h2o_df()
ref = df.groupby(["id1"], observed=True).agg({"v1": "sum"})
ref.reset_index(inplace=True)
modin_df = pd.DataFrame(df)
set_execution_mode(modin_df, "lazy")
modin_df = modin_df.groupby(["id1"], observed=True, as_index=False).agg(
{"v1": "sum"}
)
set_execution_mode(modin_df, None)
exp = to_pandas(modin_df)
exp["id1"] = exp["id1"].astype("category")
df_equals(ref, exp)
def test_h2o_q2(self):
df = self._get_h2o_df()
ref = df.groupby(["id1", "id2"], observed=True).agg({"v1": "sum"})
ref.reset_index(inplace=True)
modin_df = pd.DataFrame(df)
set_execution_mode(modin_df, "lazy")
modin_df = modin_df.groupby(["id1", "id2"], observed=True, as_index=False).agg(
{"v1": "sum"}
)
set_execution_mode(modin_df, None)
exp = to_pandas(modin_df)
exp["id1"] = exp["id1"].astype("category")
exp["id2"] = exp["id2"].astype("category")
df_equals(ref, exp)
def test_h2o_q3(self):
df = self._get_h2o_df()
ref = df.groupby(["id3"], observed=True).agg({"v1": "sum", "v3": "mean"})
ref.reset_index(inplace=True)
modin_df = pd.DataFrame(df)
set_execution_mode(modin_df, "lazy")
modin_df = modin_df.groupby(["id3"], observed=True, as_index=False).agg(
{"v1": "sum", "v3": "mean"}
)
set_execution_mode(modin_df, None)
exp = to_pandas(modin_df)
exp["id3"] = exp["id3"].astype("category")
df_equals(ref, exp)
def test_h2o_q4(self):
df = self._get_h2o_df()
ref = df.groupby(["id4"], observed=True).agg(
{"v1": "mean", "v2": "mean", "v3": "mean"}
)
ref.reset_index(inplace=True)
modin_df = pd.DataFrame(df)
set_execution_mode(modin_df, "lazy")
modin_df = modin_df.groupby(["id4"], observed=True, as_index=False).agg(
{"v1": "mean", "v2": "mean", "v3": "mean"}
)
set_execution_mode(modin_df, None)
exp = to_pandas(modin_df)
df_equals(ref, exp)
def test_h2o_q5(self):
df = self._get_h2o_df()
ref = df.groupby(["id6"], observed=True).agg(
{"v1": "sum", "v2": "sum", "v3": "sum"}
)
ref.reset_index(inplace=True)
modin_df = pd.DataFrame(df)
set_execution_mode(modin_df, "lazy")
modin_df = modin_df.groupby(["id6"], observed=True, as_index=False).agg(
{"v1": "sum", "v2": "sum", "v3": "sum"}
)
set_execution_mode(modin_df, None)
exp = to_pandas(modin_df)
df_equals(ref, exp)
def test_h2o_q7(self):
df = self._get_h2o_df()
ref = (
df.groupby(["id3"], observed=True)
.agg({"v1": "max", "v2": "min"})
.assign(range_v1_v2=lambda x: x["v1"] - x["v2"])[["range_v1_v2"]]
)
ref.reset_index(inplace=True)
modin_df = pd.DataFrame(df)
set_execution_mode(modin_df, "lazy")
modin_df = modin_df.groupby(["id3"], observed=True).agg(
{"v1": "max", "v2": "min"}
)
modin_df["range_v1_v2"] = modin_df["v1"] - modin_df["v2"]
modin_df = modin_df[["range_v1_v2"]]
modin_df.reset_index(inplace=True)
set_execution_mode(modin_df, None)
exp = to_pandas(modin_df)
exp["id3"] = exp["id3"].astype("category")
df_equals(ref, exp)
def test_h2o_q10(self):
df = self._get_h2o_df()
ref = df.groupby(["id1", "id2", "id3", "id4", "id5", "id6"], observed=True).agg(
{"v3": "sum", "v1": "count"}
)
ref.reset_index(inplace=True)
modin_df = pd.DataFrame(df)
modin_df = modin_df.groupby(
["id1", "id2", "id3", "id4", "id5", "id6"], observed=True
).agg({"v3": "sum", "v1": "count"})
modin_df.reset_index(inplace=True)
exp = to_pandas(modin_df)
exp["id1"] = exp["id1"].astype("category")
exp["id2"] = exp["id2"].astype("category")
exp["id3"] = exp["id3"].astype("category")
df_equals(ref, exp)
std_data = {
"a": [1, 2, 1, 1, 1, 2, 2, 2, 1, 2],
"b": [4, 3, 1, 6, 9, 8, 0, 9, 5, 13],
"c": [12.8, 45.6, 23.5, 12.4, 11.2, None, 56.4, 12.5, 1, 55],
}
def test_agg_std(self):
def std(df, **kwargs):
df = df.groupby("a").agg({"b": "std", "c": "std"})
if not isinstance(df, pandas.DataFrame):
df = to_pandas(df)
df["b"] = df["b"].apply(lambda x: round(x, 10))
df["c"] = df["c"].apply(lambda x: round(x, 10))
return df
run_and_compare(std, data=self.std_data, force_lazy=False)
skew_data = {
"a": [1, 2, 1, 1, 1, 2, 2, 2, 1, 2, 3, 4, 4],
"b": [4, 3, 1, 6, 9, 8, 0, 9, 5, 13, 12, 44, 6],
"c": [12.8, 45.6, 23.5, 12.4, 11.2, None, 56.4, 12.5, 1, 55, 4.5, 7.8, 9.4],
}
def test_agg_skew(self):
def std(df, **kwargs):
df = df.groupby("a").agg({"b": "skew", "c": "skew"})
if not isinstance(df, pandas.DataFrame):
df = to_pandas(df)
df["b"] = df["b"].apply(lambda x: round(x, 10))
df["c"] = df["c"].apply(lambda x: round(x, 10))
return df
run_and_compare(std, data=self.skew_data, force_lazy=False)
def test_multilevel(self):
def groupby(df, **kwargs):
return df.groupby("a").agg({"b": "min", "c": ["min", "max", "sum", "skew"]})
run_and_compare(groupby, data=self.data)
class TestAgg:
data = {
"a": [1, 2, None, None, 1, None],
"b": [10, 20, None, 20, 10, None],
"c": [None, 200, None, 400, 500, 600],
"d": [11, 22, 33, 22, 33, 22],
}
int_data = pandas.DataFrame(data).fillna(0).astype("int").to_dict()
@pytest.mark.parametrize("agg", ["max", "min", "sum", "mean"])
@pytest.mark.parametrize("skipna", bool_arg_values)
def test_simple_agg(self, agg, skipna):
def apply(df, agg, skipna, **kwargs):
return getattr(df, agg)(skipna=skipna)
run_and_compare(apply, data=self.data, agg=agg, skipna=skipna, force_lazy=False)
def test_count_agg(self):
def apply(df, **kwargs):
return df.count()
run_and_compare(apply, data=self.data, force_lazy=False)
@pytest.mark.parametrize("data", [data, int_data], ids=["nan_data", "int_data"])
@pytest.mark.parametrize("cols", ["a", "d", ["a", "d"]])
@pytest.mark.parametrize("dropna", [True, False])
@pytest.mark.parametrize("sort", [True])
@pytest.mark.parametrize("ascending", [True, False])
def test_value_counts(self, data, cols, dropna, sort, ascending):
def value_counts(df, cols, dropna, sort, ascending, **kwargs):
return df[cols].value_counts(dropna=dropna, sort=sort, ascending=ascending)
if dropna and pandas.DataFrame(
data, columns=cols if is_list_like(cols) else [cols]
).isna().any(axis=None):
pytest.xfail(
reason="'dropna' parameter is forcibly disabled in OmniSci's GroupBy"
"due to performance issues, you can track this problem at:"
"https://github.com/modin-project/modin/issues/2896"
)
# Custom comparator is required because pandas is inconsistent about
# the order of equal values, we can't match this behaviour. For more details:
# https://github.com/modin-project/modin/issues/1650
run_and_compare(
value_counts,
data=data,
cols=cols,
dropna=dropna,
sort=sort,
ascending=ascending,
comparator=df_equals_with_non_stable_indices,
)
@pytest.mark.parametrize(
"method", ["sum", "mean", "max", "min", "count", "nunique"]
)
def test_simple_agg_no_default(self, method):
def applier(df, **kwargs):
if isinstance(df, pd.DataFrame):
# At the end of reduction function it does inevitable `transpose`, which
# is defaulting to pandas. The following logic check that `transpose` is the only
# function that falling back to pandas in the reduction operation flow.
with pytest.warns(UserWarning) as warns:
res = getattr(df, method)()
assert (
len(warns) == 1
), f"More than one warning were arisen: len(warns) != 1 ({len(warns)} != 1)"
message = warns[0].message.args[0]
assert (
re.match(r".*transpose.*defaulting to pandas", message) is not None
), f"Expected DataFrame.transpose defaulting to pandas warning, got: {message}"
else:
res = getattr(df, method)()
return res
run_and_compare(applier, data=self.data, force_lazy=False)
@pytest.mark.parametrize("data", [data, int_data])
@pytest.mark.parametrize("dropna", bool_arg_values)
def test_nunique(self, data, dropna):
def applier(df, **kwargs):
return df.nunique(dropna=dropna)
run_and_compare(applier, data=data, force_lazy=False)
class TestMerge:
data = {
"a": [1, 2, 3, 6, 5, 4],
"b": [10, 20, 30, 60, 50, 40],
"e": [11, 22, 33, 66, 55, 44],
}
data2 = {
"a": [4, 2, 3, 7, 1, 5],
"b": [40, 20, 30, 70, 10, 50],
"d": [4000, 2000, 3000, 7000, 1000, 5000],
}
on_values = ["a", ["a"], ["a", "b"], ["b", "a"], None]
how_values = ["inner", "left"]
@pytest.mark.parametrize("on", on_values)
@pytest.mark.parametrize("how", how_values)
@pytest.mark.parametrize("sort", [True, False])
def test_merge(self, on, how, sort):
def merge(lib, df1, df2, on, how, sort, **kwargs):
return df1.merge(df2, on=on, how=how, sort=sort)
run_and_compare(
merge, data=self.data, data2=self.data2, on=on, how=how, sort=sort
)
def test_merge_non_str_column_name(self):
def merge(lib, df1, df2, on, **kwargs):
return df1.merge(df2, on=on, how="inner")
run_and_compare(merge, data=[[1, 2], [3, 4]], data2=[[1, 2], [3, 4]], on=1)
h2o_data = {
"id1": ["id1", "id10", "id100", "id1000"],
"id2": ["id2", "id20", "id200", "id2000"],
"id3": ["id3", "id30", "id300", "id3000"],
"id4": [4, 40, 400, 4000],
"id5": [5, 50, 500, 5000],
"id6": [6, 60, 600, 6000],
"v1": [3.3, 4.4, 7.7, 8.8],
}
h2o_data_small = {
"id1": ["id10", "id100", "id1000", "id10000"],
"id4": [40, 400, 4000, 40000],
"v2": [30.3, 40.4, 70.7, 80.8],
}
h2o_data_medium = {
"id1": ["id10", "id100", "id1000", "id10000"],
"id2": ["id20", "id200", "id2000", "id20000"],
"id4": [40, 400, 4000, 40000],
"id5": [50, 500, 5000, 50000],
"v2": [30.3, 40.4, 70.7, 80.8],
}
h2o_data_big = {
"id1": ["id10", "id100", "id1000", "id10000"],
"id2": ["id20", "id200", "id2000", "id20000"],
"id3": ["id30", "id300", "id3000", "id30000"],
"id4": [40, 400, 4000, 40000],
"id5": [50, 500, 5000, 50000],
"id6": [60, 600, 6000, 60000],
"v2": [30.3, 40.4, 70.7, 80.8],
}
def _get_h2o_df(self, data):
df = pandas.DataFrame(data)
if "id1" in data:
df["id1"] = df["id1"].astype("category")
if "id2" in data:
df["id2"] = df["id2"].astype("category")
if "id3" in data:
df["id3"] = df["id3"].astype("category")
return df
# Currently OmniSci returns category as string columns
# and therefore casted to category it would only have
# values from actual data. In Pandas category would
# have old values as well. Simply casting category
# to string for somparison doesn't work because None
# casted to category and back to strting becomes
# "nan". So we cast everything to category and then
# to string.
def _fix_category_cols(self, df):
if "id1" in df.columns:
df["id1"] = df["id1"].astype("category")
df["id1"] = df["id1"].astype(str)
if "id1_x" in df.columns:
df["id1_x"] = df["id1_x"].astype("category")
df["id1_x"] = df["id1_x"].astype(str)
if "id1_y" in df.columns:
df["id1_y"] = df["id1_y"].astype("category")
df["id1_y"] = df["id1_y"].astype(str)
if "id2" in df.columns:
df["id2"] = df["id2"].astype("category")
df["id2"] = df["id2"].astype(str)
if "id2_x" in df.columns:
df["id2_x"] = df["id2_x"].astype("category")
df["id2_x"] = df["id2_x"].astype(str)
if "id2_y" in df.columns:
df["id2_y"] = df["id2_y"].astype("category")
df["id2_y"] = df["id2_y"].astype(str)
if "id3" in df.columns:
df["id3"] = df["id3"].astype("category")
df["id3"] = df["id3"].astype(str)
def test_h2o_q1(self):
lhs = self._get_h2o_df(self.h2o_data)
rhs = self._get_h2o_df(self.h2o_data_small)
ref = lhs.merge(rhs, on="id1")
self._fix_category_cols(ref)
modin_lhs = pd.DataFrame(lhs)
modin_rhs = pd.DataFrame(rhs)
modin_res = modin_lhs.merge(modin_rhs, on="id1")
exp = to_pandas(modin_res)
self._fix_category_cols(exp)
df_equals(ref, exp)
def test_h2o_q2(self):
lhs = self._get_h2o_df(self.h2o_data)
rhs = self._get_h2o_df(self.h2o_data_medium)
ref = lhs.merge(rhs, on="id2")
self._fix_category_cols(ref)
modin_lhs = pd.DataFrame(lhs)
modin_rhs = pd.DataFrame(rhs)
modin_res = modin_lhs.merge(modin_rhs, on="id2")
exp = to_pandas(modin_res)
self._fix_category_cols(exp)
df_equals(ref, exp)
def test_h2o_q3(self):
lhs = self._get_h2o_df(self.h2o_data)
rhs = self._get_h2o_df(self.h2o_data_medium)
ref = lhs.merge(rhs, how="left", on="id2")
self._fix_category_cols(ref)
modin_lhs = pd.DataFrame(lhs)
modin_rhs = pd.DataFrame(rhs)
modin_res = modin_lhs.merge(modin_rhs, how="left", on="id2")
exp = to_pandas(modin_res)
self._fix_category_cols(exp)
df_equals(ref, exp)
def test_h2o_q4(self):
lhs = self._get_h2o_df(self.h2o_data)
rhs = self._get_h2o_df(self.h2o_data_medium)
ref = lhs.merge(rhs, on="id5")
self._fix_category_cols(ref)
modin_lhs = pd.DataFrame(lhs)
modin_rhs = pd.DataFrame(rhs)
modin_res = modin_lhs.merge(modin_rhs, on="id5")
exp = to_pandas(modin_res)
self._fix_category_cols(exp)
df_equals(ref, exp)
def test_h2o_q5(self):
lhs = self._get_h2o_df(self.h2o_data)
rhs = self._get_h2o_df(self.h2o_data_big)
ref = lhs.merge(rhs, on="id3")
self._fix_category_cols(ref)
modin_lhs = pd.DataFrame(lhs)
modin_rhs = pd.DataFrame(rhs)
modin_res = modin_lhs.merge(modin_rhs, on="id3")
exp = to_pandas(modin_res)
self._fix_category_cols(exp)
df_equals(ref, exp)
dt_data1 = {
"id": [1, 2],
"timestamp": | pandas.to_datetime(["20000101", "20000201"], format="%Y%m%d") | pandas.to_datetime |
'''
Created on 18.03.2015
@author: <NAME>
'''
import pandas as pd
from pandas import DataFrame, MultiIndex
import datetime as dt
from dateutil.relativedelta import relativedelta
from datetime import date, datetime, timedelta
import h5py
import numpy as np
import datetime
class meda(object):
'''
class containing the data object (a apandas DataFrame) with multicolumns
'''
def __init__(self,flatname=None):
self._dfglob=DataFrame()
def genLevels(self): ###### used (but always commented)
'''
convertion of the columns names (in tuple format)
of the main dataframe to multiIndex
'''
cols=MultiIndex.from_tuples(self._dfglob.columns.values)
self._dfglob.columns=cols
def viewer_time_to_datetime(self,viewer_timevector, roundTo=None):
''' Will create a pandas-accesible datetime-time-vector instead of the time-format
used in the hdf5-File and in the hdf5-EBC-viewer.
'''
def roundTime(dt=None, roundTo=60):
"""Round a datetime object to any time laps in seconds
dt : datetime.datetime object, default now.
roundTo : Closest number of seconds to round to, default 1 minute.
"""
if dt == None : dt = datetime.datetime.now()
seconds = (dt - dt.min).seconds
# // is a floor division, not a comment on following line:
rounding = (seconds+roundTo/2) // roundTo * roundTo
dt + datetime.timedelta(0,rounding-seconds,-dt.microsecond)
dt.replace(second=0, microsecond=0)
return dt
def to_datetime(time_value, roundTo=60):
# NOTE: a much nicer methods, but it leaves milliseconds and I prefer a less exact rounding: http://stackoverflow.com/questions/26187139/create-a-datetime-time-object-just-from-seconds/26187347#26187347
''' Convert one time-value into datetime-object
'''
dt = datetime.datetime.min + datetime.timedelta(days=time_value - 1)
if roundTo!=None:
dt=roundTime(dt,roundTo)
return dt
time_vec = map(to_datetime,viewer_timevector)
return time_vec
def _unifyWP(self,level=3):
'''
to be called before using the data of the main DataFrame
change the 10-0 signal (window open/closed) into 1-0.
'''
self._dfglob.iloc[:, self._dfglob.columns.get_level_values(level)=='WP1']=self._dfglob.iloc[:, self._dfglob.columns.get_level_values(level)=='WP1']/10
self._dfglob.iloc[:, self._dfglob.columns.get_level_values(level)=='WP2']=self._dfglob.iloc[:, self._dfglob.columns.get_level_values(level)=='WP2']/10
def _loadTimeGroup(self,h5file,group2go=None,columnNames=[],limit2=[],exclude=[]):
'''
method to open leafs from a selected group.
It allows for leaf inclusion or exclusion.
intergroups should be provided separated by a slash
as a string, in the "group2go" argument.
columnNames useful for multiindexing is a list.
it will be as column name up to the level before the leaf name
'''
exclude=exclude+["Time"]
keys=h5file[group2go].keys()
try: time = np.ndarray.flatten(
h5file[group2go+"/Time"][:])
except KeyError:
print ("Oops, ", group2go, "doesn't exist!")
adr=DataFrame()
else:
timeArr = self.viewer_time_to_datetime(time, 60)
data_dict = {}
divideWP=10
for key in keys:
if limit2!=[]:
if key in limit2:
data_dict[tuple(columnNames+[key])] = np.ndarray.flatten(h5file[group2go+"/"+key][:])
else:
if key not in exclude:
data_dict[tuple(columnNames+[key])] = np.ndarray.flatten(h5file[group2go+"/"+key][:])
adr=pd.DataFrame(data = data_dict,index = timeArr)
adr["index"] = adr.index
adr.drop_duplicates(subset='index', keep='last', inplace=True)
del adr["index"]
return adr
def loadAT(self,Year=2013, Month=1):
Year=str(Year)
if Month<10: Month="0"+str(Month)
else: Month=str(Month)
FileName='D:/HDF-Archive/iiiHD_'+str(Year)+'_'+Month+'.hdf5'
h5file = h5py.File(FileName,'r')
time = np.ndarray.flatten(
h5file['Field_Test']['Weather_Station']['Time'][:])
data_dict = {}
data_dict[("Weather","-","-",'AT')] = np.ndarray.flatten(
h5file['Field_Test']['Weather_Station']['AT'][:])
time = self.viewer_time_to_datetime(time, 60)
df=pd.DataFrame(data = data_dict,index = time)
def openFile(self, Year=2013, Month=1):
'''
Method to open a HDF File based on the asked month and year
'''
Year=str(Year)
if Month<10: Month="0"+str(Month)
else: Month=str(Month)
FileName='D:/HDF-Archive/iiiHD_'+str(Year)+'_'+Month+'.hdf5'
print (FileName)
h5file = h5py.File(FileName,'r')
return h5file
def prepare4Room(self,Building=2,Entrance=1,Apartment=1,Room="Children"):
'''
Method to generate the column names including building (e.g. B2E1), apartment and room
'''
Building=str(Building)
Entrance=str(Entrance)
if Apartment<10: Apartment="0"+str(Apartment)
else: Apartment=str(Apartment)
Room="Room"+"_"+Room
print ("Loading ", Room)
bdCode="B"+Building+"E"+Entrance
ApCode="A"+Apartment
group2go='Field_Test/Building_'+Building+'/Entrance_B'+Building+'E'+Entrance+'/Apartment_'+Apartment+'/'+Room+'/RMU/'
columnName=[bdCode, ApCode, Room]
return Building, Entrance, Apartment, Room, group2go, columnName
def findWiloGroup(self,Building=2,Entrance=1,Apartment=1,Room="Children"):
group2go='Field_Test/Building_'+Building+'/Entrance_B'+Building+'E'+Entrance+'/Apartment_'+Apartment+'/'+Room+'/Wilo_Pump/'
return group2go
def loadRoomRMU(self, Year=2013, Month=1,Building=2,Entrance=1,Apartment=1,Room="Children",loadAmbTemp=True,limit2=[],esclude=[],df2glob=True,h5file=None, WeatherLimit2=["AT","Wind_Speed"],Wilo=False, WiloLimit2=['WRT', 'Set_Temp', 'Set_Temp_2']):
'''
limit2 and Exclude only refer to the RMU group
'''
if h5file==None:
h5file=self.openFile(Year,Month)
Building, Entrance, Apartment, Room, group2go, columnName=self.prepare4Room(Building, Entrance, Apartment, Room)
group2goWilo=self.findWiloGroup(Building, Entrance, Apartment, Room)
temp=self._loadTimeGroup(h5file,group2go, columnName,limit2,esclude)
if df2glob==True:
self._dfglob=temp
if loadAmbTemp==True:
group2go='Field_Test/Weather_Station'
columnName=["Weather","-","-"]
Wlimit2=WeatherLimit2
temp=self._loadTimeGroup(h5file,group2go, columnName,Wlimit2)
self._dfglob=self._dfglob.join(temp)
if Wilo==True:
print ('load Wilo')
temp=self._loadTimeGroup(h5file,group2goWilo, columnName,WiloLimit2)
self._dfglob=self._dfglob.join(temp)
self._unifyWP()
else:
if loadAmbTemp==True:
print ("Warning, not including AT at this stage")
return temp
def loadAparRMU(self,Year=2013, Month=1,Building=2,Entrance=1,Apartment=1,loadAmbTemp=True,limit2=[],esclude=[],df2glob=True,h5file=None, WeatherLimit2=["AT","Wind_Speed"],Wilo=False, WiloLimit2=['WRT', 'Set_Temp', 'Set_Temp_2']):
if h5file==None: h5file=self.openFile(Year,Month)
Rooms=["Living","Kitchen","Sleeping","Children","Bath"]
print ("Loading apartment ", Apartment)
RL1=self.loadRoomRMU(Year, Month, Building, Entrance, Apartment, Rooms[0], False, limit2, esclude, False, h5file,Wilo, WiloLimit2)
RK1=self.loadRoomRMU(Year, Month, Building, Entrance, Apartment, Rooms[1], False, limit2, esclude, False, h5file,Wilo, WiloLimit2)
RS1=self.loadRoomRMU(Year, Month, Building, Entrance, Apartment, Rooms[2], False, limit2, esclude, False, h5file,Wilo, WiloLimit2)
RC1=self.loadRoomRMU(Year, Month, Building, Entrance, Apartment, Rooms[3], False, limit2, esclude, False, h5file,Wilo, WiloLimit2)
RB1=self.loadRoomRMU(Year, Month, Building, Entrance, Apartment, Rooms[4], False, limit2, esclude, False, h5file,Wilo, WiloLimit2)
lenAr=(RL1.shape[0],RK1.shape[0],RS1.shape[0],RC1.shape[0],RB1.shape[0])
joinAr=[lenAr[0]]
for indexlA, size in enumerate(lenAr):
if indexlA>0:
if joinAr[0]!=size:
joinAr.append(size)
print (joinAr)
if loadAmbTemp==True:
print ("I join all...")
group2go='Field_Test/Weather_Station'
columnName=["Weather","-","-"]
Wlimit2=WeatherLimit2
df=self._loadTimeGroup(h5file,group2go, columnName,Wlimit2)
joinIt=[RK1,RS1,RC1,RB1,df]
RL1=RL1.join(joinIt, how="outer")
temp=RL1
else:
if len(joinAr)==1:
print ("I concat")
temp=pd.concat([RL1,RK1,RS1,RC1,RB1],axis=1, join="outer")
else:
print ("I join")
temp=RL1.join([RK1,RS1,RC1,RB1], how="outer")
if df2glob==True:
self._dfglob=temp
self._unifyWP()
else:
return temp
def loadEntrRMU(self,Year=2013, Month=1,Building=2,Entrance=1,loadAmbTemp=True,limit2=[],esclude=[],df2glob=True,h5file=None, WeatherLimit2=["AT","Wind_Speed"],Wilo=False, WiloLimit2=['WRT', 'Set_Temp', 'Set_Temp_2']):
if h5file==None: h5file=self.openFile(Year,Month)
A1=self.loadAparRMU(Year, Month, Building, Entrance, 1, False, limit2, esclude, False, h5file,Wilo, WiloLimit2)
A2=self.loadAparRMU(Year, Month, Building, Entrance, 2, False, limit2, esclude, False, h5file,Wilo, WiloLimit2)
A3=self.loadAparRMU(Year, Month, Building, Entrance, 3, False, limit2, esclude, False, h5file,Wilo, WiloLimit2)
A4=self.loadAparRMU(Year, Month, Building, Entrance, 4, False, limit2, esclude, False, h5file,Wilo, WiloLimit2)
A5=self.loadAparRMU(Year, Month, Building, Entrance, 5, False, limit2, esclude, False, h5file,Wilo, WiloLimit2)
A6=self.loadAparRMU(Year, Month, Building, Entrance, 6, False, limit2, esclude, False, h5file,Wilo, WiloLimit2)
A7=self.loadAparRMU(Year, Month, Building, Entrance, 7, False, limit2, esclude, False, h5file,Wilo, WiloLimit2)
A8=self.loadAparRMU(Year, Month, Building, Entrance, 8, False, limit2, esclude, False, h5file,Wilo, WiloLimit2)
A9=self.loadAparRMU(Year, Month, Building, Entrance, 9, False, limit2, esclude, False, h5file,Wilo, WiloLimit2)
A10=self.loadAparRMU(Year, Month, Building, Entrance, 10, False, limit2, esclude, False, h5file,Wilo, WiloLimit2)
lenAr=(A1.shape[0],A2.shape[0],A3.shape[0],A4.shape[0],A5.shape[0],A6.shape[0],A7.shape[0],A8.shape[0],A9.shape[0],A10.shape[0])
joinAr=[lenAr[0]]
for indexlA, size in enumerate(lenAr):
if indexlA>0:
if joinAr[0]!=size:
joinAr.append(size)
print (joinAr)
if loadAmbTemp==True:
group2go='Field_Test/Weather_Station'
columnName=["Weather","-","-"]
Wlimit2=WeatherLimit2
df=self._loadTimeGroup(h5file,group2go, columnName,Wlimit2)
temp=A1.join([A2,A3,A4,A5,A6,A7,A8,A9,A10,df], how="outer")
else:
if len(joinAr)!=1:
print ("I concat")
temp= | pd.concat([A1,A2,A3,A4,A5,A6,A7,A8,A9,A10],axis=1, join="outer") | pandas.concat |
import pandas as pd
from pandas.util.testing import assert_frame_equal
import numpy as np
from dstk.imputation.encoders import MasterExploder, StringFeatureEncoder
def test_master_exploder_encodes_ints_bools_floats_strings():
T = True
F = False
N = None
NaN = np.NaN
S = 's'
data = pd.DataFrame(dict(
a=[T, N, T, T, F, F, F, N],
b=[N, F, T, F, N, F, T, F],
c=[T, T, F, N, N, T, F, F],
d=np.array([NaN, NaN, 1.0, NaN, NaN, 2.5, 0.0, NaN]),
e=[S, N, N, S, N, S, S, N]
))
actual = MasterExploder(encode_categorical=True).fit(data).transform(data)
expected = pd.DataFrame(dict(
a=[2, 0, 2, 2, 1, 1, 1, 0],
b=[0, 1, 2, 1, 0, 1, 2, 1],
c=[2, 2, 1, 0, 0, 2, 1, 1],
d_filled_in=[1.0, 1.0, 1.0, 1.0, 1.0, 2.5, 0.0, 1.0],
d_missing=[True, True, False, True, True, False, False, True],
e=[1, 0, 0, 1, 0, 1, 1, 0]
))
assert_frame_equal(actual, expected)
def test_master_exploder_ignores_categorical_columns_when_told():
NaN = np.NaN
data = pd.DataFrame(dict(
a=[1, -1, 1, 1, 0, 0, 0, 1],
b=[-1, 0, 1, 0, -1, 0, 1, 0],
c=[1, 1, 0, -1, -1, 1, 0, 0],
d=np.array([NaN, NaN, 1.0, NaN, NaN, 2.5, 0.0, NaN]),
e=[0, -1, -1, 0, -1, 0, -1, 0],
f=[1, 2, 3, 0, 0, 2, 2, 1]
))
actual = MasterExploder(encode_categorical=False).fit(data).transform(data)
expected = pd.DataFrame(dict(
a=[1, -1, 1, 1, 0, 0, 0, 1],
b=[-1, 0, 1, 0, -1, 0, 1, 0],
c=[1, 1, 0, -1, -1, 1, 0, 0],
d_filled_in=[1.0, 1.0, 1.0, 1.0, 1.0, 2.5, 0.0, 1.0],
d_missing=[True, True, False, True, True, False, False, True],
e=[0, -1, -1, 0, -1, 0, -1, 0],
f=[1, 2, 3, 0, 0, 2, 2, 1]
))
| assert_frame_equal(actual, expected) | pandas.util.testing.assert_frame_equal |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 24 10:27:25 2021
@author: laura.gf
"""
import requests
from requests.exceptions import HTTPError
import time
import pandas as pd
import argparse # Input parameters from command line
import sys
def query_entry_pt(url):
"""This function takes as input a URL entry point and returns the complete JSON response in a REST API
Input:
- url(string): complete url (or entry point) pointing at server
Output:
- jsonResponse(json object): JSON response associated wtih query
"""
try:
# Time query
start_time = time.time()
# Using GET command
response = requests.get(url)
total_time = time.time() - start_time
# Raise issues if response is different from 200
response.raise_for_status()
# access JSOn content
jsonResponse = response.json()
return [jsonResponse,total_time]
except HTTPError as http_err:
print(f'HTTP error occurred: {http_err}')
except Exception as err:
print(f'Other error occurred: {err}')
def format_json_resp(json_resp,query_time,record_path_field,date,time,output_dir,base_url,entry_pt):
"""
This function takes query result in JSON format and saves a CSV with results
Parameters
----------
json_resp : JSON object
content of query.
query_time : string
time it took for the query to complete.
record_path_field : string
level necessary to flatten JSON.
date : datetime object
date of query.
time : datetime object
time of query.
output_dir : string
path to directory where CSV query results will be stored.
base_url : string
URL pointing at REST API.
entry_pt : string
complete query type.
Returns
-------
None.
"""
df = | pd.json_normalize(json_resp,record_path=record_path_field) | pandas.json_normalize |
import numpy as np
import pandas as pd
from pandas.testing import assert_series_equal
import pytest
from rcbm import fab
def test_calculate_fabric_heat_loss_coefficient():
"""Output is equivalent to DEAP 4.2.0 example A"""
floor_area = pd.Series([63])
roof_area = pd.Series([63])
wall_area = pd.Series([85.7])
window_area = pd.Series([29.6])
door_area = pd.Series([1.85])
floor_uvalue = pd.Series([0.14])
roof_uvalue = pd.Series([0.11])
wall_uvalue = pd.Series([0.13])
window_uvalue = pd.Series([0.87])
door_uvalue = pd.Series([1.5])
thermal_bridging_factor = | pd.Series([0.05]) | pandas.Series |
# output.py module dealing with outputs of DEMENTpy.
# <NAME>, January, 2020
import numpy as np
import pandas as pd
class Output():
"""
This class deals with outputs.
Accepts data derived from the initialization.py and grid.py modules, and have two methods:
output(): stores all time series
microbes_abundance(): a special method
microbes_tradeoff(): another special method tracking intracellular carbon allocation
"""
def __init__(self,runtime,data_init):
"""
The constructor of Output class.
Parameters:
runtime: user-specified parameters when running the model
data_init: data dictionary;all inititialized data from the 'initialization.py' module
Returns:
Initialization: all data initialized preceding the execution of grid.py: dictionary
Microbial_traits: microbial traits only pulled out from Initialization: dataframe
SubstratesSeries: substrate-specific total mass over the grid: Substrate * C
Substrates_Sum: total substrates over the grid: day * C
MonomersSeries: monomoer-specific total mass over the grid: Monomer * C
Monomers_Sum: total monomers over the grid: day * C
MicrobesSeries: taxon-specific total biomass over the grid: Taxon * (C,N,P)
Microbes_Sum: total biomass over the grid: day * (C,N,P)
TransporterSeries: taxon-specific total transporter production over the grid
EnzymesSeries: enzyme-specific total mass over the grid: Enzyme * C
Enzymes_Sum: total enzyme summed up over the grid: list
OsmolyteSeries: taxon-specific total osmolyte production over the grid
RespSeries: total respiration over the grid
CUE:
Notes:
Variables that are not tracked but instead by DEMENT(R version):
NH4Series:
PO4Series:
"""
# A couple of vars used in processing outputs
n_taxa = int(runtime.loc['n_taxa',1]) # number of taxa
Mic_index = ["Tax" + str(i) for i in range(1,n_taxa + 1)] # microbial taxa index
# number of time steps in each pulse
self.cycle = int(runtime.loc['end_time',1])
# Pass all runtime parameters to Runtime
self.Runtime = runtime
# Pass all initialized data (a dictionary) to 'Initialization'
self.Initialization = data_init
# Pull out microbial traits data and put them in a dataframe:Microbial_traits
data = np.concatenate(
(data_init['fb'][0:n_taxa],
data_init['Microbes_pp']['C'][0:n_taxa],
data_init['Microbes_pp']['N'][0:n_taxa],
data_init['Microbes_pp']['P'][0:n_taxa],
data_init['UptakeGenes'].sum(axis=1)[0:n_taxa],
data_init['EnzGenes'].sum(axis=1)[0:n_taxa],
data_init['OsmoGenes'].sum(axis=1)[0:n_taxa],
data_init['UptakeGenes_trait'][0:n_taxa],
data_init['EnzProdConsti_trait'][0:n_taxa],
data_init['EnzProdInduci_trait'][0:n_taxa],
data_init['OsmoProdConsti_trait'][0:n_taxa],
data_init['OsmoProdInduci_trait'][0:n_taxa],
data_init['TaxDroughtTol'][0:n_taxa]),
axis=0
)
columns = [
'F/B','C','N','P','Uptake_Gene','Enz_Gene','Osmo_Gene','Uptake_Cost','Enz_Consti_Cost',
'Enz_Induci_Cost','Osmo_Consti_Cost','Osmo_Induci_Cost','Drought_tolerance'
]
self.Microbial_traits = pd.DataFrame(data=data.reshape(13,n_taxa).T, index=Mic_index, columns=columns, dtype='float32')
# Account for inputs in mass balance
# self.Cum_Substrate = (data_init['Substrates'].groupby(level=0,sort=False).sum()).sum(axis=0)
# self.Cum_Monomer = (data_init['Monomers'].groupby(level=0,sort=False).sum()).sum(axis=0)
# self.Cum_Monomer_ratios = ecosystem.Cum_Monomer_ratios
# Degradation rates
#DecayRates_list = [0] * n_substrates * gridsize
#DecayRates_array = np.array(DecayRates_list).reshape(n_substrates * gridsize,1)
#DecayRates_df = pd.DataFrame(data = DecayRates_array, index= data_init['Substrates'].index, columns= ['C'])
#self.DecayRatesSeries = DecayRates_df.groupby(level=0,sort=False).sum()
# Leaching
#self.Cum_Leaching_N = pd.Series([0],index=[0])
#self.Cum_Leaching_P = pd.Series([0],index=[0])
# Substrates
Substrates_grid = data_init['Substrates'].groupby(level=0,sort=False).sum()
Substrates_grid['C'].name = 0 # set the series name to 0
self.SubstratesSeries = Substrates_grid['C']
#self.Substrates_Sum = pd.Series([Substrates_grid['C'].sum()],index=[0])
# Monomers
Monomers_grid = data_init['Monomers'].groupby(level=0,sort=False).sum()
Monomers_grid['C'].name = 0
self.MonomersSeries = Monomers_grid['C']
#self.Monomers_Sum = pd.Series([sum(Monomers_grid["C"])],index=[0])
#self.NH4Series = pd.Series([Monomers_grid.loc["NH4","N"]],index=[0])
#self.PO4Series = pd.Series([Monomers_grid.loc["PO4","P"]],index=[0])
# Microbes
Microbes_grid = data_init['Microbes'].groupby(level=0,sort=False).sum()
Microbes_grid['C'].name = 0 # rename the series name to 0
self.MicrobesSeries = Microbes_grid['C'] # taxon-specific total biomass summed over the grid
self.MicrobesSeries_repop = Microbes_grid['C'] # created for reinitialization
# define a new var placing biomass (in C) by taxon grid-specifically
self.microbes_grid_taxa = data_init['Microbes']['C']
# Number of individuals of Taxon count
Taxon_index = data_init['Microbes']['C'] > 0
Taxon_index.name = 0
self.Taxon_count = Taxon_index.groupby(level=0,sort=False).sum().astype('uint32')
#self.Taxon_count_repop = Taxon_index.groupby(level=0,sort=False).sum().astype('uint32')
# Taxon-specific CUE
#self.CUE_TaxonSeries = pd.Series(data=[0]*n_taxa,index=Mic_index)
# Enzymes
Enzymes_grid = data_init['Enzymes'].groupby(level=0,sort=False).sum() # total of each enzyme summed over the spatial grid
Enzymes_grid.name = 0
self.EnzymesSeries = Enzymes_grid
#self.Enzymes_Sum = pd.Series([Enzymes_grid.sum()],index=[0])
# Emergent properties over the grid
self.RespSeries = pd.Series([0],index=[0], dtype='float32') # respiration
self.CUE_system = pd.Series([0],index=[0], dtype='float32') # emergent CUE
self.Kill = pd.Series([0],index=[0], dtype='uint32') # stochastic death toll
# Transporters: taxon-specific production summed over the grid by taxon
#self.TransporterSeries = pd.Series(data=[0]*n_taxa,index=Mic_index)
# Enyzmes: taxon-specific production summed over the grid by taxon
#self.EnzymeConSeries = pd.Series(data=[0]*n_taxa,index=Mic_index)
#self.EnzymeIndSeries = pd.Series(data=[0]*n_taxa,index=Mic_index)
self.Enzyme_TaxonSeries = pd.Series(data=[0]*n_taxa, index=Mic_index, dtype='float32')
# Osmolytes: taxon-specific production summed over the grid by taxon
#self.OsmolyteConSeries = pd.Series(data=[0]*n_taxa,index=Mic_index)
#self.OsmolyteIndSeries = pd.Series(data=[0]*n_taxa,index=Mic_index)
self.Osmolyte_TaxonSeries = pd.Series(data=[0]*n_taxa, index=Mic_index, dtype='float32')
# Growth yield
self.Growth_yield = pd.Series(data=[0]*n_taxa, index=Mic_index, dtype='float32')
def output(self,ecosystem,year,day):
"""
Records outputs in various variables of each iteration.
Parameters:
ecosystem: object from the grid.py module
day: the day to record to outputs
Returns:
"""
# Account for inputs in mass balance
# self.Cum_Substrate = self.Cum_Substrate + ecosystem.SubInput.sum(axis = 0)
# self.Cum_Monomer = self.Cum_Monomer + (ecosystem.MonInput.mul(ecosystem.Cum_Monomer_ratios,axis=0)).sum(axis=0)
# DecayRates
# DecayRates_grid = ecosystem.DecayRates.groupby(level=0,sort=False).sum()
# self.DecayRatesSeries = pd.concat([self.DecayRatesSeries,DecayRates_grid],axis=1,sort=False)
# Substrates
Substrates_grid = ecosystem.Substrates.groupby(level=0,sort=False).sum()
Substrates_grid['C'].name = self.cycle*year + (day+1) # index the output continuously by day
self.SubstratesSeries = pd.concat([self.SubstratesSeries,Substrates_grid['C']], axis=1, sort=False)
#self.Substrates_Sum = pd.concat([self.Substrates_Sum,pd.Series([Substrates_grid['C'].sum()],index=[day+1])],axis=0,sort=False)
# Monomers
Monomers_grid = ecosystem.Monomers.groupby(level=0,sort=False).sum()
Monomers_grid['C'].name = self.cycle*year + (day+1)
self.MonomersSeries = pd.concat([self.MonomersSeries,Monomers_grid['C']], axis=1, sort=False)
#self.Monomers_Sum = pd.concat([self.Monomers_Sum, pd.Series([sum(Monomers_grid["C"])], index=[day+1])], axis=0, sort=False)
#self.NH4Series = pd.concat([self.NH4Series, pd.Series([Monomers_grid.loc["NH4","N"]],index=[day+1])], axis=0, sort=False)
#self.PO4Series = pd.concat([self.PO4Series, pd.Series([Monomers_grid.loc["PO4","P"]],index=[day+1])], axis=0, sort=False)
# Interim Microbes
#Microbes_interim_grid = ecosystem.Microbes_interim.groupby(level=0,sort=False).sum()
#self.Microbes_Interim = pd.concat([self.MicrobesSeries,Microbes_interim_grid['C']],axis=1,sort=False)
# Count taxon for averaging taxon CUE
#Taxon_index = (ecosystem.Microbes_interim)['C'] > 0
#Taxon_index.name = day + 1
#taxon_count = Taxon_index.groupby(level=0,sort=False).sum()
#self.Taxon_count = pd.concat([self.Taxon_count,taxon_count],axis=1,sort=False)
# Microbe
## Taxon abundance
Taxon_index = ecosystem.Microbes['C'] > 0
Taxon_index.name = self.cycle*year + (day+1)
self.Taxon_count = pd.concat([self.Taxon_count, Taxon_index.groupby(level=0,sort=False).sum().astype('uint32')], axis=1, sort=False)
## Taxon biomass
Microbes_grid = ecosystem.Microbes.groupby(level=0,sort=False).sum()
Microbes_grid['C'].name = self.cycle*year + (day+1)
self.MicrobesSeries = pd.concat([self.MicrobesSeries, Microbes_grid['C']], axis=1, sort=False)
#self.Microbes_Sum = pd.concat([self.Microbes_Sum,pd.Series([Microbes_grid['C'].sum()],index=[day+1])],axis=0,sort=False)
self.microbes_grid_taxa = | pd.concat([self.microbes_grid_taxa,ecosystem.Microbes['C']],axis=1,sort=False) | pandas.concat |
import numpy as np
import pandas as pd
from utils import topickle, load_feats
from constants import FEATS_DIR, MAX_DAYS, NUM_COMMON_CONS
from processing.const import START, END, HOLIDAYS
from featnames import HOLIDAY, DOW_PREFIX, CON, INDEX
def save_date_feats():
N = pd.to_datetime(END) - | pd.to_datetime(START) | pandas.to_datetime |
# -*- coding: utf-8 -*-
"""
Created on Tur Jan 11 11:51:13 2018
@author: Diogo
"""
import pandas as pd
import numpy as np
from files_treatment_new.generic_xls_file import _generic_xls_file
from natsort import natsorted, ns
import numpy as np
from objects_new.Proteins_new import *
class XlsGenBankPatric(_generic_xls_file):
"""
Class specified in the treatment of the gen_bank format file of xls. Remember that its a heritage of the class _generic_xls_file that its used to read the data from a file
"""
def __init__(self, path_file, sheet_name = "Sheet1"):
"""
Constructor of the class excel gen_bank class, this one contain all methods for the treatment of excel files gen mark of RAST platform. After the parameters initialisation, the file is loaded in the init method.
:param path_file: Complete path with file name
:param sheet_name: name of the sheet where is the information
:type path_file: string - required
:type sheet_name: string - required
"""
_generic_xls_file.__init__(self, path_file, sheet_name)
self.read_xls_file()
def get_contigs_id_sorted(self):
"""
Get all the contigs id sorted naturally.
:return: array of string of id_contigs
:rtype: array(string)
"""
ids_contigs = self.panda_data_file["contig_id"].unique()
if isinstance(ids_contigs[0], str) == True:
ids_contigs = np.asarray(natsorted(ids_contigs, key=lambda y: y.lower()))
return ids_contigs
def get_proteins_ids_by_contig_id(self, contig_id):
"""
Given a contig id, its return all proteins associated. in the file, these one are called "feature_id"
:param contig_id: contig id
:type contig_id: string - required
:return: array of string of id_proteins
:rtype: array(string)
"""
self.panda_data_file['contig_id'] = self.panda_data_file['contig_id'].astype(str)
ids_proteins = self.panda_data_file.loc[(self.panda_data_file["contig_id"] == contig_id, ["feature_id"])]
ids_proteins = ids_proteins["feature_id"].values
return ids_proteins
def get_information_line_by_protein_id(self, protein_id):
"""
Given a protein id return a dictionary with all details of the protein, where:
- Key: key value in the excel document
- value: value of the detail in string format
:param protein_id: contig id
:type protein_id: string - required
:return: dictionary with the details of the protein
:rtype: dictionary
"""
prot = self.panda_data_file["feature_id"] == protein_id
protein_line = self.panda_data_file[self.panda_data_file["feature_id"] == protein_id].iloc[0].to_dict()
if isinstance(protein_line['aa_sequence'], float) == True:
protein_line['aa_sequence'] = ' '
return protein_line
def get_number_of_proteins(self):
"""
Return the number of lines in the dataframe
"""
qty_proteins = self.panda_data_file.shape[0]
return qty_proteins
def get_number_different_contigs(self):
"""
Return the quantity of contigs in excel files
"""
qty_contigs = np.size( | pd.unique(self.panda_data_file['contig_id']) | pandas.unique |
from __future__ import division
import logging
from os import path
import time
from ast import literal_eval
import traceback
from flask import request
from sqlalchemy.sql import select
from sqlalchemy.sql import text
import settings
import skyline_version
from skyline_functions import (
mkdir_p,
get_redis_conn_decoded,
get_redis_conn,
)
from database import (
get_engine, engine_disposal, ionosphere_table_meta, metrics_table_meta,
ionosphere_matched_table_meta,
ionosphere_layers_matched_table_meta,
anomalies_table_meta,
)
skyline_version = skyline_version.__absolute_version__
skyline_app = 'webapp'
skyline_app_logger = '%sLog' % skyline_app
logger = logging.getLogger(skyline_app_logger)
skyline_app_logfile = '%s/%s.log' % (settings.LOG_PATH, skyline_app)
logfile = '%s/%s.log' % (settings.LOG_PATH, skyline_app)
try:
ENABLE_WEBAPP_DEBUG = settings.ENABLE_WEBAPP_DEBUG
except EnvironmentError as e:
logger.error('error :: cannot determine ENABLE_WEBAPP_DEBUG from settings - %s' % e)
ENABLE_WEBAPP_DEBUG = False
# @added 20210107 - Feature #3934: ionosphere_performance
def get_ionosphere_performance(
metric, metric_like, from_timestamp, until_timestamp, format,
# @added 20210128 - Feature #3934: ionosphere_performance
# Improve performance and pass arguments to get_ionosphere_performance
# for cache key
anomalies, new_fps, fps_matched_count, layers_matched_count,
sum_matches, title, period, height, width, fp_type, timezone_str):
"""
Analyse the performance of Ionosphere on a metric or metric namespace and
create the graph resources or json data as required.
:rtype: dict
"""
import datetime
import pytz
import pandas as pd
dev_null = None
ionosphere_performance_debug = False
determine_start_timestamp = False
redis_conn = None
redis_conn_decoded = None
# @added 20210202 - Feature #3934: ionosphere_performance
# Handle user timezone
tz_from_timestamp_datetime_obj = None
tz_until_timestamp_datetime_obj = None
utc_epoch_timestamp = datetime.datetime(1970, 1, 1, tzinfo=datetime.timezone(datetime.timedelta(seconds=0)))
determine_timezone_start_date = False
determine_timezone_end_date = False
user_timezone = pytz.timezone(timezone_str)
utc_timezone = pytz.timezone('UTC')
# @added 20210203 - Feature #3934: ionosphere_performance
# Add default timestamp
start_timestamp = 0
end_timestamp = 0
if from_timestamp == 0:
start_timestamp = 0
determine_start_timestamp = True
if from_timestamp != 0:
if ":" in from_timestamp:
# @modified 20210202 - Feature #3934: ionosphere_performance
# Handle user timezone
if timezone_str == 'UTC':
new_from_timestamp = time.mktime(datetime.datetime.strptime(from_timestamp, '%Y%m%d %H:%M').timetuple())
logger.info('get_ionosphere_performance - new_from_timestamp - %s' % str(new_from_timestamp))
else:
utc_from_timestamp = time.mktime(datetime.datetime.strptime(from_timestamp, '%Y%m%d %H:%M').timetuple())
logger.info('get_ionosphere_performance - utc_from_timestamp - %s' % str(utc_from_timestamp))
from_timestamp_datetime_obj = datetime.datetime.strptime(from_timestamp, '%Y%m%d %H:%M')
logger.info('get_ionosphere_performance - from_timestamp_datetime_obj - %s' % str(from_timestamp_datetime_obj))
tz_offset = pytz.timezone(timezone_str).localize(from_timestamp_datetime_obj).strftime('%z')
tz_from_date = '%s:00 %s' % (from_timestamp, tz_offset)
logger.info('get_ionosphere_performance - tz_from_date - %s' % str(tz_from_date))
tz_from_timestamp_datetime_obj = datetime.datetime.strptime(tz_from_date, '%Y%m%d %H:%M:%S %z')
tz_epoch_timestamp = int((tz_from_timestamp_datetime_obj - utc_epoch_timestamp).total_seconds())
new_from_timestamp = tz_epoch_timestamp
# new_from_timestamp = time.mktime(datetime.datetime.strptime(tz_from_timestamp, '%Y%m%d %H:%M:%S %z').timetuple())
logger.info('get_ionosphere_performance - new_from_timestamp - %s' % str(new_from_timestamp))
determine_timezone_start_date = True
start_timestamp = int(new_from_timestamp)
# @added 20210203 - Feature #3934: ionosphere_performance
# Add default timestamp
else:
if from_timestamp == 'all':
start_timestamp = 0
determine_start_timestamp = True
else:
start_timestamp = int(from_timestamp)
if from_timestamp == 'all':
start_timestamp = 0
determine_start_timestamp = True
if from_timestamp == 'all':
start_timestamp = 0
determine_start_timestamp = True
if until_timestamp and until_timestamp != 'all':
if ":" in until_timestamp:
if timezone_str == 'UTC':
new_until_timestamp = time.mktime(datetime.datetime.strptime(until_timestamp, '%Y%m%d %H:%M').timetuple())
else:
until_timestamp_datetime_obj = datetime.datetime.strptime(until_timestamp, '%Y%m%d %H:%M')
tz_offset = pytz.timezone(timezone_str).localize(until_timestamp_datetime_obj).strftime('%z')
tz_until_date = '%s:00 %s' % (until_timestamp, tz_offset)
logger.info('get_ionosphere_performance - tz_until_date - %s' % str(tz_until_date))
tz_until_timestamp_datetime_obj = datetime.datetime.strptime(tz_until_date, '%Y%m%d %H:%M:%S %z')
tz_epoch_timestamp = int((tz_until_timestamp_datetime_obj - utc_epoch_timestamp).total_seconds())
new_from_timestamp = tz_epoch_timestamp
# new_from_timestamp = time.mktime(datetime.datetime.strptime(tz_until_timestamp, '%Y%m%d %H:%M:%S %z').timetuple())
end_timestamp = int(new_until_timestamp)
# @added 20210203 - Feature #3934: ionosphere_performance
# Add default timestamp
else:
if until_timestamp == 'all':
end_timestamp = int(time.time())
else:
end_timestamp = int(until_timestamp)
determine_timezone_end_date = False
if until_timestamp == 'all':
end_timestamp = int(time.time())
determine_timezone_end_date = True
if until_timestamp == 0:
end_timestamp = int(time.time())
determine_timezone_end_date = True
start_timestamp_str = str(start_timestamp)
# end_timestamp_str = str(end_timestamp)
if timezone_str == 'UTC':
begin_date = datetime.datetime.utcfromtimestamp(start_timestamp).strftime('%Y-%m-%d')
end_date = datetime.datetime.utcfromtimestamp(end_timestamp).strftime('%Y-%m-%d')
else:
if determine_timezone_start_date:
logger.info('get_ionosphere_performance - determine_timezone_start_date - True')
# non_tz_start_datetime_object = datetime.datetime.utcfromtimestamp(start_timestamp)
# logger.info('get_ionosphere_performance - non_tz_start_datetime_object - %s' % str(non_tz_start_datetime_object))
# tz_start_datetime_object = utc_timezone.localize(non_tz_start_datetime_object).astimezone(user_timezone)
# logger.info('get_ionosphere_performance - tz_end_datetime_object - %s' % str(tz_start_datetime_object))
begin_date = tz_from_timestamp_datetime_obj.strftime('%Y-%m-%d')
logger.info('get_ionosphere_performance - begin_date with %s timezone applied - %s' % (timezone_str, str(begin_date)))
else:
begin_date = datetime.datetime.utcfromtimestamp(start_timestamp).strftime('%Y-%m-%d')
if determine_timezone_end_date:
logger.info('get_ionosphere_performance - determine_timezone_end_date - True')
non_tz_end_datetime_object = datetime.datetime.utcfromtimestamp(end_timestamp)
logger.info('get_ionosphere_performance - non_tz_end_datetime_object - %s' % str(non_tz_end_datetime_object))
tz_end_datetime_object = utc_timezone.localize(non_tz_end_datetime_object).astimezone(user_timezone)
logger.info('get_ionosphere_performance - tz_end_datetime_object - %s' % str(tz_end_datetime_object))
end_date = tz_end_datetime_object.strftime('%Y-%m-%d')
logger.info('get_ionosphere_performance - end_date with %s timezone applied - %s' % (timezone_str, str(end_date)))
else:
logger.info('get_ionosphere_performance - determine_timezone_end_date - False')
end_date = datetime.datetime.utcfromtimestamp(end_timestamp).strftime('%Y-%m-%d')
original_begin_date = begin_date
# Determine period
frequency = 'D'
if 'period' in request.args:
period = request.args.get('period', 'daily')
if period == 'daily':
frequency = 'D'
extended_end_timestamp = end_timestamp + 86400
if period == 'weekly':
frequency = 'W'
extended_end_timestamp = end_timestamp + (86400 * 7)
if period == 'monthly':
frequency = 'M'
extended_end_timestamp = end_timestamp + (86400 * 30)
extended_end_date = datetime.datetime.utcfromtimestamp(extended_end_timestamp).strftime('%Y-%m-%d')
remove_prefix = False
try:
remove_prefix_str = request.args.get('remove_prefix', 'false')
if remove_prefix_str != 'false':
remove_prefix = True
except Exception as e:
dev_null = e
# Allow for the removal of a prefix from the metric name
use_metric_name = metric
if remove_prefix:
try:
if remove_prefix_str.endswith('.'):
remove_prefix = '%s' % remove_prefix_str
else:
remove_prefix = '%s.' % remove_prefix_str
use_metric_name = metric.replace(remove_prefix, '')
except Exception as e:
logger.error('error :: failed to remove prefix %s from %s - %s' % (str(remove_prefix_str), metric, e))
# @added 20210129 - Feature #3934: ionosphere_performance
# Improve performance and pass arguments to get_ionosphere_performance
# for cache key
yesterday_timestamp = end_timestamp - 86400
yesterday_end_date = datetime.datetime.utcfromtimestamp(yesterday_timestamp).strftime('%Y-%m-%d')
metric_like_str = str(metric_like)
metric_like_wildcard = metric_like_str.replace('.%', '')
# @modified 20210202 - Feature #3934: ionosphere_performance
# Handle user timezone
yesterday_data_cache_key = 'performance.%s.metric.%s.metric_like.%s.begin_date.%s.tz.%s.anomalies.%s.new_fps.%s.fps_matched_count.%s.layers_matched_count.%s.sum_matches.%s.period.%s.fp_type.%s' % (
str(yesterday_end_date), str(metric), metric_like_wildcard, str(begin_date),
str(timezone_str), str(anomalies), str(new_fps), str(fps_matched_count),
str(layers_matched_count), str(sum_matches), str(period), str(fp_type))
logger.info('get_ionosphere_performance - yesterday_data_cache_key - %s' % yesterday_data_cache_key)
try:
redis_conn_decoded = get_redis_conn_decoded(skyline_app)
except Exception as e:
logger.error(traceback.format_exc())
logger.error('error :: get_ionosphere_performance :: get_redis_conn_decoded failed')
dev_null = e
yesterday_data_raw = None
try:
yesterday_data_raw = redis_conn_decoded.get(yesterday_data_cache_key)
except Exception as e:
trace = traceback.format_exc()
fail_msg = 'error :: get_ionosphere_performance - could not get Redis data for - %s' % yesterday_data_cache_key
logger.error(trace)
logger.error(fail_msg)
dev_null = e
yesterday_data = None
if yesterday_data_raw:
try:
yesterday_data = literal_eval(yesterday_data_raw)
except Exception as e:
trace = traceback.format_exc()
fail_msg = 'error :: get_ionosphere_performance - could not get literal_eval Redis data from key - %s' % yesterday_data_cache_key
logger.error(trace)
logger.error(fail_msg)
dev_null = e
if yesterday_data:
logger.info('get_ionosphere_performance - using cache data from yesterday with %s items' % str(len(yesterday_data)))
new_from = '%s 23:59:59' % yesterday_end_date
# @modified 20210202 - Feature #3934: ionosphere_performance
# Handle user timezone
if timezone_str == 'UTC':
new_from_timestamp = time.mktime(datetime.datetime.strptime(new_from, '%Y-%m-%d %H:%M:%S').timetuple())
start_timestamp = int(new_from_timestamp) + 1
begin_date = datetime.datetime.utcfromtimestamp(start_timestamp).strftime('%Y-%m-%d')
else:
tz_new_from_timestamp_datetime_obj = datetime.datetime.strptime(new_from, '%Y-%m-%d %H:%M:%S')
tz_offset = pytz.timezone(timezone_str).localize(tz_new_from_timestamp_datetime_obj).strftime('%z')
tz_from_timestamp = '%s %s' % (new_from, tz_offset)
new_from_timestamp = time.mktime(datetime.datetime.strptime(tz_from_timestamp, '%Y-%m-%d %H:%M:%S %z').timetuple())
start_timestamp = int(new_from_timestamp) + 1
begin_date = tz_new_from_timestamp_datetime_obj.strftime('%Y-%m-%d')
logger.info('get_ionosphere_performance - using cache data from yesterday, set new start_timestamp: %s, begin_date: %s' % (
str(start_timestamp), str(begin_date)))
determine_start_timestamp = False
else:
logger.info('get_ionosphere_performance - no cache data for yesterday_data')
try:
engine, fail_msg, trace = get_engine(skyline_app)
logger.info(fail_msg)
except Exception as e:
trace = traceback.format_exc()
logger.error(trace)
logger.error('%s' % fail_msg)
logger.error('error :: get_ionosphere_performance - could not get a MySQL engine')
dev_null = e
raise # to webapp to return in the UI
if not engine:
trace = 'none'
fail_msg = 'error :: get_ionosphere_performance - engine not obtained'
logger.error(fail_msg)
raise
try:
metrics_table, log_msg, trace = metrics_table_meta(skyline_app, engine)
except Exception as e:
logger.error(traceback.format_exc())
logger.error('error :: get_ionosphere_performance - failed to get metrics_table meta')
dev_null = e
if engine:
engine_disposal(skyline_app, engine)
raise # to webapp to return in the UI
metric_id = None
metric_ids = []
if metric_like != 'all':
metric_like_str = str(metric_like)
logger.info('get_ionosphere_performance - metric_like - %s' % metric_like_str)
metrics_like_query = text("""SELECT id FROM metrics WHERE metric LIKE :like_string""")
metric_like_wildcard = metric_like_str.replace('.%', '')
request_key = '%s.%s.%s.%s' % (metric_like_wildcard, begin_date, end_date, frequency)
plot_title = '%s - %s' % (metric_like_wildcard, period)
logger.info('get_ionosphere_performance - metric like query, cache key being generated from request key - %s' % request_key)
try:
connection = engine.connect()
result = connection.execute(metrics_like_query, like_string=metric_like_str)
connection.close()
for row in result:
m_id = row['id']
metric_ids.append(int(m_id))
except Exception as e:
trace = traceback.format_exc()
logger.error(trace)
logger.error('error :: get_ionosphere_performance - could not determine ids from metrics table LIKE query - %s' % e)
if engine:
engine_disposal(skyline_app, engine)
return {}
start_timestamp_date = None
# If the from_timestamp is 0 or all
if determine_start_timestamp:
created_dates = []
try:
connection = engine.connect()
# stmt = select([metrics_table.c.created_timestamp], metrics_table.c.id.in_(metric_ids)).limit(1)
stmt = select([metrics_table.c.created_timestamp], metrics_table.c.id.in_(metric_ids))
result = connection.execute(stmt)
for row in result:
# start_timestamp_date = row['created_timestamp']
created_dates.append(row['created_timestamp'])
# break
connection.close()
start_timestamp_date = sorted(created_dates)[0]
if not start_timestamp_date:
logger.error('error :: get_ionosphere_performance - could not determine created_timestamp - returning empty')
if engine:
engine_disposal(skyline_app, engine)
return {}
start_timestamp_str = str(start_timestamp_date)
logger.info('get_ionosphere_performance - determined start_timestamp_str - %s' % start_timestamp_str)
new_from_timestamp = time.mktime(datetime.datetime.strptime(start_timestamp_str, '%Y-%m-%d %H:%M:%S').timetuple())
start_timestamp = int(new_from_timestamp)
logger.info('get_ionosphere_performance - determined start_timestamp - %s' % str(start_timestamp))
begin_date = datetime.datetime.utcfromtimestamp(start_timestamp).strftime('%Y-%m-%d')
logger.info('get_ionosphere_performance - determined begin_date - %s' % str(begin_date))
# @added 20210203 - Feature #3934: ionosphere_performance
# Handle user timezone
if timezone_str != 'UTC':
logger.info('get_ionosphere_performance - determining %s datetime from UTC start_timestamp_str - %s' % (timezone_str, str(start_timestamp_str)))
from_timestamp_datetime_obj = datetime.datetime.strptime(start_timestamp_str, '%Y-%m-%d %H:%M:%S')
logger.info('get_ionosphere_performance - from_timestamp_datetime_obj - %s' % str(from_timestamp_datetime_obj))
tz_offset = pytz.timezone(timezone_str).localize(from_timestamp_datetime_obj).strftime('%z')
tz_from_date = '%s %s' % (start_timestamp_str, tz_offset)
logger.info('get_ionosphere_performance - tz_from_date - %s' % str(tz_from_date))
tz_from_timestamp_datetime_obj = datetime.datetime.strptime(tz_from_date, '%Y-%m-%d %H:%M:%S %z')
begin_date = tz_from_timestamp_datetime_obj.strftime('%Y-%m-%d')
logger.info('get_ionosphere_performance - begin_date with %s timezone applied - %s' % (timezone_str, str(begin_date)))
determine_start_timestamp = False
request_key = '%s.%s.%s.%s' % (metric_like_wildcard, begin_date, end_date, frequency)
except Exception as e:
trace = traceback.format_exc()
logger.error(trace)
logger.error('error :: get_ionosphere_performance - could not determine ids from metrics table LIKE query - %s' % e)
if engine:
engine_disposal(skyline_app, engine)
return {}
logger.info('get_ionosphere_performance - metric_ids length - %s' % str(len(metric_ids)))
if not metric_ids:
# stmt = select([metrics_table]).where(metrics_table.c.id > 0)
if metric == 'all':
request_key = 'all.%s.%s.%s' % (begin_date, end_date, frequency)
plot_title = 'All metrics - %s' % period
logger.info('get_ionosphere_performance - metric all query, cache key being generated from request key - %s' % request_key)
# If the from_timestamp is 0 or all
if determine_start_timestamp:
try:
connection = engine.connect()
stmt = select([metrics_table.c.created_timestamp]).limit(1)
result = connection.execute(stmt)
for row in result:
start_timestamp_date = row['created_timestamp']
break
connection.close()
start_timestamp_str = str(start_timestamp_date)
logger.info('get_ionosphere_performance - determined start_timestamp_str - %s' % start_timestamp_str)
new_from_timestamp = time.mktime(datetime.datetime.strptime(start_timestamp_str, '%Y-%m-%d %H:%M:%S').timetuple())
start_timestamp = int(new_from_timestamp)
logger.info('get_ionosphere_performance - determined start_timestamp - %s' % str(start_timestamp))
begin_date = datetime.datetime.utcfromtimestamp(start_timestamp).strftime('%Y-%m-%d')
logger.info('get_ionosphere_performance - determined begin_date - %s' % str(begin_date))
# @added 20210203 - Feature #3934: ionosphere_performance
# Handle user timezone
if timezone_str != 'UTC':
logger.info('get_ionosphere_performance - determining %s datetime from UTC start_timestamp_str - %s' % (timezone_str, str(start_timestamp_str)))
from_timestamp_datetime_obj = datetime.datetime.strptime(start_timestamp_str, '%Y-%m-%d %H:%M:%S')
logger.info('get_ionosphere_performance - from_timestamp_datetime_obj - %s' % str(from_timestamp_datetime_obj))
tz_offset = pytz.timezone(timezone_str).localize(from_timestamp_datetime_obj).strftime('%z')
tz_from_date = '%s %s' % (start_timestamp_str, tz_offset)
logger.info('get_ionosphere_performance - tz_from_date - %s' % str(tz_from_date))
tz_from_timestamp_datetime_obj = datetime.datetime.strptime(tz_from_date, '%Y-%m-%d %H:%M:%S %z')
begin_date = tz_from_timestamp_datetime_obj.strftime('%Y-%m-%d')
logger.info('get_ionosphere_performance - begin_date with %s timezone applied - %s' % (timezone_str, str(begin_date)))
determine_start_timestamp = False
request_key = 'all.%s.%s.%s' % (begin_date, end_date, frequency)
logger.info('get_ionosphere_performance - metric all query, determine_start_timestamp cache key being generated from request key - %s' % request_key)
except Exception as e:
trace = traceback.format_exc()
logger.error(trace)
logger.error('error :: get_ionosphere_performance - could not determine ids from metrics table LIKE query - %s' % e)
if engine:
engine_disposal(skyline_app, engine)
return {}
try:
request_key = '%s.%s.%s.%s' % (metric, begin_date, end_date, frequency)
plot_title = '%s - %s' % (use_metric_name, period)
logger.info('get_ionosphere_performance - metric all query, cache key being generated from request key - %s' % request_key)
connection = engine.connect()
stmt = select([metrics_table]).where(metrics_table.c.id > 0)
result = connection.execute(stmt)
for row in result:
metric_id_str = row['id']
r_metric_id = int(metric_id_str)
metric_ids.append(r_metric_id)
connection.close()
except Exception as e:
logger.error(traceback.format_exc())
logger.error('error :: get_ionosphere_performance - could not determine metric ids from metrics - %s' % e)
if engine:
engine_disposal(skyline_app, engine)
raise
if metric != 'all':
logger.info('get_ionosphere_performance - metric - %s' % metric)
try:
request_key = '%s.%s.%s.%s' % (metric, begin_date, end_date, frequency)
plot_title = '%s - %s' % (use_metric_name, period)
logger.info('get_ionosphere_performance - metric query, cache key being generated from request key - %s' % request_key)
connection = engine.connect()
stmt = select([metrics_table]).where(metrics_table.c.metric == str(metric))
result = connection.execute(stmt)
for row in result:
metric_id_str = row['id']
metric_id = int(metric_id_str)
connection.close()
except Exception as e:
logger.error(traceback.format_exc())
logger.error('error :: get_ionosphere_performance - could not determine metric id from metrics - %s' % e)
if engine:
engine_disposal(skyline_app, engine)
raise
if determine_start_timestamp and metric_id:
try:
connection = engine.connect()
stmt = select([metrics_table.c.created_timestamp]).where(metrics_table.c.metric == str(metric))
result = connection.execute(stmt)
for row in result:
start_timestamp_date = row['created_timestamp']
break
connection.close()
start_timestamp_str = str(start_timestamp_date)
logger.info('get_ionosphere_performance - determined start_timestamp_str - %s' % start_timestamp_str)
new_from_timestamp = time.mktime(datetime.datetime.strptime(start_timestamp_str, '%Y-%m-%d %H:%M:%S').timetuple())
start_timestamp = int(new_from_timestamp)
logger.info('get_ionosphere_performance - determined start_timestamp - %s' % str(start_timestamp))
begin_date = datetime.datetime.utcfromtimestamp(start_timestamp).strftime('%Y-%m-%d')
logger.info('get_ionosphere_performance - determined begin_date - %s' % str(begin_date))
request_key = '%s.%s.%s.%s' % (metric, begin_date, end_date, frequency)
logger.info('get_ionosphere_performance - metric query, determine_start_timestamp cache key being generated from request key - %s' % request_key)
except Exception as e:
trace = traceback.format_exc()
logger.error(trace)
logger.error('error :: get_ionosphere_performance - could not determine ids from metrics table LIKE query - %s' % e)
if engine:
engine_disposal(skyline_app, engine)
return {}
logger.info('get_ionosphere_performance - metric - %s' % str(metric))
logger.info('get_ionosphere_performance - metric_id - %s' % str(metric_id))
if metric != 'all':
if not metric_ids and not metric_id:
if engine:
engine_disposal(skyline_app, engine)
logger.info('get_ionosphere_performance - no metric_id or metric_ids, nothing to do')
performance = {
'performance': {'date': None, 'reason': 'no metric data found'},
'request_key': request_key,
'success': False,
'reason': 'no data for metric/s',
'plot': None,
'csv': None,
}
return performance
logger.info('get_ionosphere_performance - metric_id: %s, metric_ids length: %s' % (
str(metric_id), str(len(metric_ids))))
# Create request_key performance directory
ionosphere_dir = path.dirname(settings.IONOSPHERE_DATA_FOLDER)
performance_dir = '%s/performance/%s' % (ionosphere_dir, request_key)
if not path.exists(performance_dir):
mkdir_p(performance_dir)
# Report anomalies
report_anomalies = False
if 'anomalies' in request.args:
anomalies_str = request.args.get('performance', 'false')
if anomalies_str == 'true':
report_anomalies = True
anomalies = []
anomalies_ts = []
if report_anomalies:
try:
anomalies_table, log_msg, trace = anomalies_table_meta(skyline_app, engine)
logger.info(log_msg)
logger.info('anomalies_table OK')
except Exception as e:
logger.error(traceback.format_exc())
logger.error('error :: failed to get anomalies_table meta')
dev_null = e
if engine:
engine_disposal(skyline_app, engine)
raise # to webapp to return in the UI
try:
connection = engine.connect()
if metric_ids:
# stmt = select([anomalies_table.c.id, anomalies_table.c.anomaly_timestamp], anomalies_table.c.metric_id.in_(metric_ids)).\
stmt = select([anomalies_table.c.id, anomalies_table.c.metric_id, anomalies_table.c.anomaly_timestamp]).\
where(anomalies_table.c.anomaly_timestamp >= start_timestamp).\
where(anomalies_table.c.anomaly_timestamp <= end_timestamp)
result = connection.execute(stmt)
elif metric_id:
stmt = select([anomalies_table.c.id, anomalies_table.c.metric_id, anomalies_table.c.anomaly_timestamp]).\
where(anomalies_table.c.metric_id == int(metric_id)).\
where(anomalies_table.c.anomaly_timestamp >= start_timestamp).\
where(anomalies_table.c.anomaly_timestamp <= end_timestamp)
result = connection.execute(stmt)
else:
stmt = select([anomalies_table.c.id, anomalies_table.c.metric_id, anomalies_table.c.anomaly_timestamp]).\
where(anomalies_table.c.anomaly_timestamp >= start_timestamp).\
where(anomalies_table.c.anomaly_timestamp <= end_timestamp)
result = connection.execute(stmt)
for row in result:
r_metric_id = row['metric_id']
append_result = False
if r_metric_id == metric_id:
append_result = True
if not append_result:
if r_metric_id in metric_ids:
append_result = True
if append_result:
anomaly_id = row['id']
anomaly_timestamp = row['anomaly_timestamp']
anomalies.append(int(anomaly_timestamp))
# anomalies_ts.append([datetime.datetime.fromtimestamp(int(anomaly_timestamp)), int(anomaly_id)])
anomalies_ts.append([int(anomaly_timestamp), int(anomaly_id)])
connection.close()
except Exception as e:
logger.error(traceback.format_exc())
logger.error('error :: could not determine anomaly ids')
dev_null = e
if engine:
engine_disposal(skyline_app, engine)
raise
logger.info('get_ionosphere_performance - anomalies_ts length - %s' % str(len(anomalies_ts)))
fp_type = 'all'
if 'fp_type' in request.args:
fp_type = request.args.get('fp_type', 'all')
# Get fp_ids
fp_ids = []
fp_ids_ts = []
fp_ids_cache_key = 'performance.%s.%s.fp_ids' % (request_key, timezone_str)
fp_ids_ts_cache_key = 'performance.%s.%s.fp_ids_ts' % (request_key, timezone_str)
if not redis_conn_decoded:
try:
redis_conn_decoded = get_redis_conn_decoded(skyline_app)
except Exception as e:
logger.error(traceback.format_exc())
logger.error('error :: get_ionosphere_performance :: get_redis_conn_decoded failed')
dev_null = e
try:
fp_ids_raw = redis_conn_decoded.get(fp_ids_cache_key)
except Exception as e:
trace = traceback.format_exc()
fail_msg = 'error :: get_ionosphere_performance - could not get Redis data for - %s' % fp_ids_cache_key
logger.error(trace)
logger.error(fail_msg)
dev_null = e
if fp_ids_raw:
try:
fp_ids = literal_eval(fp_ids_raw)
except Exception as e:
trace = traceback.format_exc()
fail_msg = 'error :: get_ionosphere_performance - could not get literal_eval Redis data from key - %s' % fp_ids_cache_key
logger.error(trace)
logger.error(fail_msg)
dev_null = e
if fp_ids:
logger.info('get_ionosphere_performance - using fp_ids from cache')
try:
fp_ids_ts_raw = redis_conn_decoded.get(fp_ids_ts_cache_key)
except Exception as e:
trace = traceback.format_exc()
fail_msg = 'error :: get_ionosphere_performance - could not get Redis data for - %s' % fp_ids_ts_cache_key
logger.error(trace)
logger.error(fail_msg)
dev_null = e
if fp_ids_ts_raw:
try:
fp_ids_ts = literal_eval(fp_ids_ts_raw)
except Exception as e:
trace = traceback.format_exc()
fail_msg = 'error :: get_ionosphere_performance - could not get literal_eval Redis data from key - %s' % fp_ids_ts_cache_key
logger.error(trace)
logger.error(fail_msg)
dev_null = e
if fp_ids_ts:
logger.info('get_ionosphere_performance - using fp_ids_ts from cache')
if not fp_ids or not fp_ids_ts:
try:
ionosphere_table, log_msg, trace = ionosphere_table_meta(skyline_app, engine)
logger.info(log_msg)
except Exception as e:
logger.error(traceback.format_exc())
logger.error('error :: get_ionosphere_performance - failed to get ionosphere_table meta')
dev_null = e
if engine:
engine_disposal(skyline_app, engine)
raise # to webapp to return in the UI
try:
logger.info('get_ionosphere_performance - determining fp ids of type %s' % fp_type)
connection = engine.connect()
if metric_ids:
if fp_type == 'user':
# stmt = select([ionosphere_table.c.id, ionosphere_table.c.anomaly_timestamp], ionosphere_table.c.metric_id.in_(metric_ids)).\
stmt = select([ionosphere_table.c.id, ionosphere_table.c.metric_id, ionosphere_table.c.anomaly_timestamp]).\
where(ionosphere_table.c.enabled == 1).\
where(ionosphere_table.c.anomaly_timestamp <= end_timestamp).\
where(ionosphere_table.c.generation <= 1)
elif fp_type == 'learnt':
# stmt = select([ionosphere_table.c.id, ionosphere_table.c.anomaly_timestamp], ionosphere_table.c.metric_id.in_(metric_ids)).\
stmt = select([ionosphere_table.c.id, ionosphere_table.c.metric_id, ionosphere_table.c.anomaly_timestamp]).\
where(ionosphere_table.c.enabled == 1).\
where(ionosphere_table.c.anomaly_timestamp <= end_timestamp).\
where(ionosphere_table.c.generation >= 2)
else:
# stmt = select([ionosphere_table.c.id, ionosphere_table.c.anomaly_timestamp], ionosphere_table.c.metric_id.in_(metric_ids)).\
stmt = select([ionosphere_table.c.id, ionosphere_table.c.metric_id, ionosphere_table.c.anomaly_timestamp]).\
where(ionosphere_table.c.enabled == 1).\
where(ionosphere_table.c.anomaly_timestamp <= end_timestamp)
logger.info('get_ionosphere_performance - determining fp ids of type %s for metric_ids' % fp_type)
result = connection.execute(stmt)
elif metric_id:
if fp_type == 'user':
stmt = select([ionosphere_table.c.id, ionosphere_table.c.metric_id, ionosphere_table.c.anomaly_timestamp]).\
where(ionosphere_table.c.metric_id == int(metric_id)).\
where(ionosphere_table.c.enabled == 1).\
where(ionosphere_table.c.anomaly_timestamp <= end_timestamp).\
where(ionosphere_table.c.generation <= 1)
elif fp_type == 'learnt':
stmt = select([ionosphere_table.c.id, ionosphere_table.c.metric_id, ionosphere_table.c.anomaly_timestamp]).\
where(ionosphere_table.c.metric_id == int(metric_id)).\
where(ionosphere_table.c.enabled == 1).\
where(ionosphere_table.c.anomaly_timestamp <= end_timestamp).\
where(ionosphere_table.c.generation >= 2)
else:
stmt = select([ionosphere_table.c.id, ionosphere_table.c.metric_id, ionosphere_table.c.anomaly_timestamp]).\
where(ionosphere_table.c.metric_id == int(metric_id)).\
where(ionosphere_table.c.enabled == 1).\
where(ionosphere_table.c.anomaly_timestamp <= end_timestamp)
logger.info('get_ionosphere_performance - determining fp ids for metric_id')
result = connection.execute(stmt)
else:
if fp_type == 'user':
stmt = select([ionosphere_table.c.id, ionosphere_table.c.metric_id, ionosphere_table.c.anomaly_timestamp]).\
where(ionosphere_table.c.enabled == 1).\
where(ionosphere_table.c.anomaly_timestamp <= end_timestamp).\
where(ionosphere_table.c.generation <= 1)
elif fp_type == 'learnt':
stmt = select([ionosphere_table.c.id, ionosphere_table.c.metric_id, ionosphere_table.c.anomaly_timestamp]).\
where(ionosphere_table.c.enabled == 1).\
where(ionosphere_table.c.anomaly_timestamp <= end_timestamp).\
where(ionosphere_table.c.generation >= 2)
else:
stmt = select([ionosphere_table.c.id, ionosphere_table.c.metric_id, ionosphere_table.c.anomaly_timestamp]).\
where(ionosphere_table.c.enabled == 1).\
where(ionosphere_table.c.anomaly_timestamp <= end_timestamp)
logger.info('get_ionosphere_performance - determining fp ids for all metrics')
result = connection.execute(stmt)
for row in result:
r_metric_id = row['metric_id']
append_result = False
if r_metric_id == metric_id:
append_result = True
if r_metric_id in metric_ids:
append_result = True
if append_result:
fp_id = row['id']
anomaly_timestamp = row['anomaly_timestamp']
fp_ids.append(int(fp_id))
fp_ids_ts.append([int(anomaly_timestamp), int(fp_id)])
connection.close()
except Exception as e:
logger.error(traceback.format_exc())
logger.error('error :: get_ionosphere_performance - could not determine fp_ids')
dev_null = e
if engine:
engine_disposal(skyline_app, engine)
raise
logger.info('get_ionosphere_performance - fp_ids_ts length - %s' % str(len(fp_ids_ts)))
if fp_ids:
if not redis_conn:
try:
redis_conn = get_redis_conn(skyline_app)
except Exception as e:
logger.error(traceback.format_exc())
logger.error('error :: get_redis_conn failed for get_ionosphere_performance')
dev_null = e
if redis_conn:
try:
logger.info('get_ionosphere_performance - setting Redis performance key with fp_ids containing %s items' % str(len(fp_ids)))
redis_conn.setex(fp_ids_cache_key, 600, str(fp_ids))
except Exception as e:
logger.error(traceback.format_exc())
logger.error('error :: get_redis_conn failed to set - %s' % fp_ids_cache_key)
dev_null = e
if fp_ids_ts:
if not redis_conn:
try:
redis_conn = get_redis_conn(skyline_app)
except Exception as e:
logger.error(traceback.format_exc())
logger.error('error :: get_redis_conn failed for get_ionosphere_performance')
dev_null = e
if redis_conn:
try:
logger.info('get_ionosphere_performance - setting Redis performance key with fp_ids_ts containing %s items' % str(len(fp_ids_ts)))
redis_conn.setex(fp_ids_ts_cache_key, 600, str(fp_ids_ts))
except Exception as e:
logger.error(traceback.format_exc())
logger.error('error :: get_redis_conn failed to set - %s' % fp_ids_ts_cache_key)
dev_null = e
# Get fp matches
try:
ionosphere_matched_table, log_msg, trace = ionosphere_matched_table_meta(skyline_app, engine)
except Exception as e:
logger.error(traceback.format_exc())
logger.error('error :: get_ionosphere_performance - failed to get ionosphere_matched_table_meta_table meta')
dev_null = e
if engine:
engine_disposal(skyline_app, engine)
raise # to webapp to return in the UI
fps_matched_ts = []
if fp_ids:
try:
connection = engine.connect()
# stmt = select([ionosphere_matched_table.c.id, ionosphere_matched_table.c.metric_timestamp], ionosphere_matched_table.c.fp_id.in_(fp_ids)).\
stmt = select([ionosphere_matched_table.c.id, ionosphere_matched_table.c.fp_id, ionosphere_matched_table.c.metric_timestamp]).\
where(ionosphere_matched_table.c.metric_timestamp >= start_timestamp).\
where(ionosphere_matched_table.c.metric_timestamp <= end_timestamp)
result = connection.execute(stmt)
for row in result:
append_result = False
if metric == 'all' and metric_like == 'all':
append_result = True
if not append_result:
fp_id = row['fp_id']
if fp_id in fp_ids:
append_result = True
if append_result:
matched_id = row['id']
metric_timestamp = row['metric_timestamp']
fps_matched_ts.append([int(metric_timestamp), int(matched_id)])
connection.close()
except Exception as e:
logger.error(traceback.format_exc())
logger.error('error :: get_ionosphere_performance - could not determine timestamps from ionosphere_matched')
dev_null = e
if engine:
engine_disposal(skyline_app, engine)
raise
logger.info('get_ionosphere_performance - fps_matched_ts - %s' % str(len(fps_matched_ts)))
# Get layers matches
try:
ionosphere_layers_matched_table, log_msg, trace = ionosphere_layers_matched_table_meta(skyline_app, engine)
except Exception as e:
trace = traceback.format_exc()
logger.error(trace)
fail_msg = 'error :: get_ionosphere_performance - failed to get ionosphere_layers_matched_table meta'
logger.error('%s' % fail_msg)
dev_null = e
if engine:
engine_disposal(skyline_app, engine)
raise # to webapp to return in the UI
layers_matched_ts = []
if fp_ids:
try:
connection = engine.connect()
# stmt = select([ionosphere_layers_matched_table.c.id, ionosphere_layers_matched_table.c.anomaly_timestamp], ionosphere_layers_matched_table.c.fp_id.in_(fp_ids)).\
stmt = select([ionosphere_layers_matched_table.c.id, ionosphere_layers_matched_table.c.fp_id, ionosphere_layers_matched_table.c.anomaly_timestamp]).\
where(ionosphere_layers_matched_table.c.anomaly_timestamp >= start_timestamp).\
where(ionosphere_layers_matched_table.c.anomaly_timestamp <= end_timestamp)
result = connection.execute(stmt)
for row in result:
append_result = False
if metric == 'all' and metric_like == 'all':
append_result = True
if not append_result:
fp_id = row['fp_id']
if fp_id in fp_ids:
append_result = True
if append_result:
matched_layers_id = row['id']
matched_timestamp = row['anomaly_timestamp']
layers_matched_ts.append([int(matched_timestamp), int(matched_layers_id)])
connection.close()
except Exception as e:
trace = traceback.format_exc()
logger.error(trace)
fail_msg = 'error :: get_ionosphere_performance - could not determine timestamps from ionosphere_layers_matched'
logger.error('%s' % fail_msg)
dev_null = e
if engine:
engine_disposal(skyline_app, engine)
raise # to webapp to return in the UI
logger.info('get_ionosphere_performance - layers_matched_ts - %s' % str(len(layers_matched_ts)))
anomalies_df = []
if anomalies_ts:
try:
anomalies_df = pd.DataFrame(anomalies_ts, columns=['date', 'id'])
anomalies_df['date'] = pd.to_datetime(anomalies_df['date'], unit='s')
# @added 20210202 - Feature #3934: ionosphere_performance
# Handle user timezone
if timezone_str != 'UTC':
anomalies_df['date'] = anomalies_df['date'].dt.tz_localize('UTC').dt.tz_convert(user_timezone)
anomalies_df = anomalies_df.set_index(pd.DatetimeIndex(anomalies_df['date']))
anomalies_df = anomalies_df.resample(frequency).apply({'id': 'count'})
anomalies_df.rename(columns={'id': 'anomaly_count'}, inplace=True)
if ionosphere_performance_debug:
fname_out = '%s/%s.anomalies_df.csv' % (settings.SKYLINE_TMP_DIR, request_key)
anomalies_df.to_csv(fname_out)
except Exception as e:
trace = traceback.format_exc()
logger.error(trace)
fail_msg = 'error :: get_ionosphere_performance - could not create anomalies_df'
logger.error('%s' % fail_msg)
dev_null = e
if engine:
engine_disposal(skyline_app, engine)
raise # to webapp to return in the UI
fp_ids_df = []
fps_total_df = []
if fp_ids_ts:
try:
fp_ids_df = | pd.DataFrame(fp_ids_ts, columns=['date', 'id']) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Relé de proteção do TAP-02
"""
# Importing libraries
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from PIL import Image
import streamlit as st
# =============================================================================
# Functions
# =============================================================================
# =============================================================================
# # Page title
# =============================================================================
st.title('Proteção do TAP-02')
# =============================================================================
# Dados técnicos
# =============================================================================
expander_6 = st.beta_expander(label='Dados técnicos')
with expander_6:
st.write('Dados do relé')
st.text('Marca: Siemens')
st.text('Modelo: 7UT63')
st.write('Dados do TA-02')
st.text('Primário: 525 kV, Y aterrado')
st.text('Secndário: 13,8 kV, Y aterrado')
st.write('RTCs')
st.text('06TA02: 1200/5 A')
st.text('86TA02: 1200/5 A')
st.text('52-A2: 2000/5 A')
st.text('Neutro 13,8 kV: 2000/5 A')
# =============================================================================
# Diagrama unifilar
# =============================================================================
expander_1 = st.beta_expander(label='Diagrama unifilar')
with expander_1:
image = Image.open('src/unifilar.JPG')
st.image(image)
# =============================================================================
# # Relay settings table
# =============================================================================
expander_2 = st.beta_expander(label='Ajustes')
with expander_2:
st.write("Ajustes:")
dict_ajustes = {'Parâmetro': ['Pick-up (pu)', 'Tempo de operação (s)'],
'Valor': [0.5, 11] }
df_ajustes = pd.DataFrame(dict_ajustes)
st.table(df_ajustes)
# Validate parameters
if dict_ajustes['Valor'][1] > 10:
st.write(ValueError('O tempo de operação deve ser no máximo 10 s'))
# =============================================================================
# Configuração pré-falta
# =============================================================================
expander_3 = st.beta_expander(label='Configuração pré-falta')
with expander_3:
c1, c2, c3 = st.beta_columns(3)
sampling_freq = c1.number_input(label='Taxa de amostragem (kHz)',
min_value=1,
max_value=20,
value=10)
dt = 1 / (sampling_freq*1000) # sampling period in seconds
t_final = 1 # end of simulation
t_vec = np.arange(start=0, stop=t_final, step=dt) # time array
# =============================================================================
# # Corrente do TC 06TA2
# =============================================================================
st.subheader('Corrente do TC 06TA2')
c1, c2, c3 = st.beta_columns(3)
c1.write('Fase A')
tc_06ta2_r_mag = c1.number_input(label='Mag (A)',
min_value=1,
max_value=4000,
value=500,
step=100,
key='tc_06ta2_r_mag')
tc_06ta2_r_f = c1.number_input(label='f (Hz)',
min_value=55,
max_value=65,
value=60,
key='tc_06ta2_r_f')
tc_06ta2_r_phi = c1.number_input(label='Fase (°)',
min_value=-180,
max_value=180,
value=0,
key='tc_06ta2_r_phi')
c2.write('Fase B')
tc_06ta2_s_mag = c2.number_input(label='Mag (A)',
min_value=1,
max_value=4000,
value=500,
step=100,
key='tc_06ta2_s_mag')
tc_06ta2_s_f = c2.number_input(label='f (Hz)',
min_value=55,
max_value=65,
value=60,
key='tc_06ta2_s_f')
tc_06ta2_s_phi = c2.number_input(label='Fase (°)',
min_value=-180,
max_value=180,
value=-120,
key='tc_06ta2_s_phi')
c3.write('Fase C')
tc_06ta2_t_mag = c3.number_input(label='Mag (A)',
min_value=1,
max_value=4000,
value=500,
step=100,
key='tc_06ta2_t_mag')
tc_06ta2_t_f = c3.number_input(label='f (Hz)',
min_value=55,
max_value=65,
value=60,
key='tc_06ta2_t_f')
tc_06ta2_t_phi = c3.number_input(label='Fase (°)',
min_value=-180,
max_value=180,
value=120,
key='tc_06ta2_t_phi')
tc_06ta2_r_phi = tc_06ta2_r_phi*np.pi/180
tc_06ta2_s_phi = tc_06ta2_s_phi*np.pi/180
tc_06ta2_t_phi = tc_06ta2_t_phi*np.pi/180
tc_06ta2_r = [tc_06ta2_r_mag*np.sin(2*np.pi*tc_06ta2_r_f*t + tc_06ta2_r_phi) \
for t in t_vec]
tc_06ta2_s = [tc_06ta2_s_mag*np.sin(2*np.pi*tc_06ta2_s_f*t + tc_06ta2_s_phi) \
for t in t_vec]
tc_06ta2_t = [tc_06ta2_t_mag*np.sin(2*np.pi*tc_06ta2_t_f*t + tc_06ta2_t_phi) \
for t in t_vec]
# =============================================================================
# # Corrente do TC 86TA2
# =============================================================================
st.subheader('Corrente do TC 86TA2')
c1, c2, c3 = st.beta_columns(3)
c1.write('Fase A')
tc_86ta2_r_mag = c1.number_input(label='Mag (A)',
min_value=1,
max_value=4000,
value=500,
step=100,
key='tc_86ta2_r_mag')
tc_86ta2_r_f = c1.number_input(label='f (Hz)',
min_value=55,
max_value=65,
value=60,
key='tc_86ta2_r_f')
tc_86ta2_r_phi = c1.number_input(label='Fase (°)',
min_value=-180,
max_value=180,
value=0,
key='tc_86ta2_r_phi')
c2.write('Fase B')
tc_86ta2_s_mag = c2.number_input(label='Mag (A)',
min_value=1,
max_value=4000,
value=500,
step=100,
key='tc_86ta2_s_mag')
tc_86ta2_s_f = c2.number_input(label='f (Hz)',
min_value=55,
max_value=65,
value=60,
key='tc_86ta2_s_f')
tc_86ta2_s_phi = c2.number_input(label='Fase (°)',
min_value=-180,
max_value=180,
value=-120,
key='tc_86ta2_s_phi')
c3.write('Fase C')
tc_86ta2_t_mag = c3.number_input(label='Mag (A)',
min_value=1,
max_value=4000,
value=500,
step=100,
key='tc_86ta2_t_mag')
tc_86ta2_t_f = c3.number_input(label='f (Hz)',
min_value=55,
max_value=65,
value=60,
key='tc_86ta2_t_f')
tc_86ta2_t_phi = c3.number_input(label='Fase (°)',
min_value=-180,
max_value=180,
value=120,
key='tc_86ta2_t_phi')
tc_86ta2_r_phi = tc_86ta2_r_phi*np.pi/180
tc_86ta2_s_phi = tc_86ta2_s_phi*np.pi/180
tc_86ta2_t_phi = tc_86ta2_t_phi*np.pi/180
tc_86ta2_r = [tc_86ta2_r_mag*np.sin(2*np.pi*tc_86ta2_r_f*t + tc_86ta2_r_phi) \
for t in t_vec]
tc_86ta2_s = [tc_86ta2_s_mag*np.sin(2*np.pi*tc_86ta2_s_f*t + tc_86ta2_s_phi) \
for t in t_vec]
tc_86ta2_t = [tc_86ta2_t_mag*np.sin(2*np.pi*tc_86ta2_t_f*t + tc_86ta2_t_phi) \
for t in t_vec]
# =============================================================================
# # Corrente do TC 52-A2
# =============================================================================
st.subheader('Corrente do TC 52-A2')
c1, c2, c3 = st.beta_columns(3)
c1.write('Fase A')
tc_52a2_r_mag = c1.number_input(label='Mag (A)',
min_value=1,
max_value=4000,
value=1000,
step=100,
key='tc_52a2_r_mag')
tc_52a2_r_f = c1.number_input(label='f (Hz)',
min_value=55,
max_value=65,
value=60,
key='tc_52a2_r_f')
tc_52a2_r_phi = c1.number_input(label='Fase (°)',
min_value=-180,
max_value=180,
value=0,
key='tc_52a2_r_phi')
c2.write('Fase B')
tc_52a2_s_mag = c2.number_input(label='Mag (A)',
min_value=1,
max_value=4000,
value=1000,
step=100,
key='tc_52a2_s_mag')
tc_52a2_s_f = c2.number_input(label='f (Hz)',
min_value=55,
max_value=65,
value=60,
key='tc_52a2_s_f')
tc_52a2_s_phi = c2.number_input(label='Fase (°)',
min_value=-180,
max_value=180,
value=-120,
key='tc_52a2_s_phi')
c3.write('Fase C')
tc_52a2_t_mag = c3.number_input(label='Mag (A)',
min_value=1,
max_value=4000,
value=1000,
step=100,
key='tc_52a2_t_mag')
tc_52a2_t_f = c3.number_input(label='f (Hz)',
min_value=55,
max_value=65,
value=60,
key='tc_52a2_t_f')
tc_52a2_t_phi = c3.number_input(label='Fase (°)',
min_value=-180,
max_value=180,
value=120,
key='tc_52a2_t_phi')
tc_52a2_r_phi = tc_52a2_r_phi*np.pi/180
tc_52a2_s_phi = tc_52a2_s_phi*np.pi/180
tc_52a2_t_phi = tc_52a2_t_phi*np.pi/180
tc_52a2_r = [tc_52a2_r_mag*np.sin(2*np.pi*tc_52a2_r_f*t + tc_52a2_r_phi) \
for t in t_vec]
tc_52a2_s = [tc_52a2_s_mag*np.sin(2*np.pi*tc_52a2_s_f*t + tc_52a2_s_phi) \
for t in t_vec]
tc_52a2_t = [tc_52a2_t_mag*np.sin(2*np.pi*tc_52a2_t_f*t + tc_52a2_t_phi) \
for t in t_vec]
# =============================================================================
# Configuração da Falta
# =============================================================================
expander_4 = st.beta_expander(label='Configuração da Falta')
with expander_4:
c1, c2, c3 = st.beta_columns(3)
pre_fault = c1.number_input(label='Tempo pré-falta (ms)',
min_value=40,
max_value=300,
value=100)
fault_location = c2.selectbox('Local da falta:',
['Interna', 'Externa'])
fault_type = c3.selectbox('Tipo da falta:',
['Circuito aberto L',
'Circuito aberto L-L',
'Curto-circuito L',
'Curto-circuito L-L',
'Curto-circuito L-L-L'])
# =============================================================================
# Gerar oscilografia da falta
# =============================================================================
t_vec_fault_start_pos = int(pre_fault /1000 / dt) # posição do vetor tempo no início da falta
fault_created = False # Flag to indicate if the fault has been configured and applied
t_fault_end = t_vec[-t_vec_fault_start_pos]
t_fault = np.arange(start=0, stop=t_fault_end, step=dt) # time array for calculating the fault
fault_created = st.button(label='Criar falta', key='criar_falta')
if (fault_type == 'Circuito aberto L') & fault_created:
last_value = tc_06ta2_r[t_vec_fault_start_pos]
tc_06ta2_r_fault = last_value * np.exp(-500*t_fault)
tc_06ta2_r[t_vec_fault_start_pos:] = tc_06ta2_r_fault
last_value = tc_86ta2_r[t_vec_fault_start_pos]
tc_86ta2_r_fault = last_value * np.exp(-500*t_fault)
tc_86ta2_r[t_vec_fault_start_pos:] = tc_86ta2_r_fault
last_value = tc_52a2_r[t_vec_fault_start_pos]
tc_52a2_r_fault = last_value * np.exp(-500*t_fault)
tc_52a2_r[t_vec_fault_start_pos:] = tc_52a2_r_fault
if (fault_type == 'Circuito aberto L-L') & fault_created:
last_value = tc_06ta2_r[t_vec_fault_start_pos]
tc_06ta2_r_fault = last_value * np.exp(-500*t_fault)
tc_06ta2_r[t_vec_fault_start_pos:] = tc_06ta2_r_fault
last_value = tc_06ta2_s[t_vec_fault_start_pos]
tc_06ta2_s_fault = last_value * np.exp(-500*t_fault)
tc_06ta2_s[t_vec_fault_start_pos:] = tc_06ta2_s_fault
last_value = tc_86ta2_r[t_vec_fault_start_pos]
tc_86ta2_r_fault = last_value * np.exp(-500*t_fault)
tc_86ta2_r[t_vec_fault_start_pos:] = tc_86ta2_r_fault
last_value = tc_86ta2_s[t_vec_fault_start_pos]
tc_86ta2_s_fault = last_value * np.exp(-500*t_fault)
tc_86ta2_s[t_vec_fault_start_pos:] = tc_86ta2_s_fault
last_value = tc_52a2_r[t_vec_fault_start_pos]
tc_52a2_r_fault = last_value * np.exp(-500*t_fault)
tc_52a2_r[t_vec_fault_start_pos:] = tc_52a2_r_fault
last_value = tc_52a2_s[t_vec_fault_start_pos]
tc_52a2_s_fault = last_value * np.exp(-500*t_fault)
tc_52a2_s[t_vec_fault_start_pos:] = tc_52a2_s_fault
cond_1 = (fault_type == 'Curto-circuito L-L-L')
cond_2 = fault_created
cond_3 = (fault_location =='Interna')
if cond_1 & cond_2 & cond_3:
short_circuit_level = 3 #current increases 3 times when a short-circuit occurs (arbitrary, needs correction with the protection study)
after_pre_fault = tc_06ta2_r[t_vec_fault_start_pos:]
fault_values = [ short_circuit_level * x for x in after_pre_fault ]
tc_06ta2_r[t_vec_fault_start_pos:] = fault_values
after_pre_fault = tc_06ta2_s[t_vec_fault_start_pos:]
fault_values = [ short_circuit_level * x for x in after_pre_fault ]
tc_06ta2_s[t_vec_fault_start_pos:] = fault_values
after_pre_fault = tc_06ta2_t[t_vec_fault_start_pos:]
fault_values = [ short_circuit_level * x for x in after_pre_fault ]
tc_06ta2_t[t_vec_fault_start_pos:] = fault_values
after_pre_fault = tc_86ta2_r[t_vec_fault_start_pos:]
fault_values = [ short_circuit_level * x for x in after_pre_fault ]
tc_86ta2_r[t_vec_fault_start_pos:] = fault_values
after_pre_fault = tc_86ta2_s[t_vec_fault_start_pos:]
fault_values = [ short_circuit_level * x for x in after_pre_fault ]
tc_86ta2_s[t_vec_fault_start_pos:] = fault_values
after_pre_fault = tc_86ta2_t[t_vec_fault_start_pos:]
fault_values = [ short_circuit_level * x for x in after_pre_fault ]
tc_86ta2_t[t_vec_fault_start_pos:] = fault_values
after_pre_fault = tc_52a2_r[t_vec_fault_start_pos:]
fault_values = [ 0 * x for x in after_pre_fault ]
tc_52a2_r[t_vec_fault_start_pos:] = fault_values
after_pre_fault = tc_52a2_s[t_vec_fault_start_pos:]
fault_values = [ 0 * x for x in after_pre_fault ]
tc_52a2_s[t_vec_fault_start_pos:] = fault_values
after_pre_fault = tc_52a2_t[t_vec_fault_start_pos:]
fault_values = [ 0 * x for x in after_pre_fault ]
tc_52a2_t[t_vec_fault_start_pos:] = fault_values
cond_1 = (fault_type == 'Curto-circuito L-L-L')
cond_2 = fault_created
cond_3 = (fault_location =='Externa')
if cond_1 & cond_2 & cond_3:
short_circuit_level = 1.4 #current increases 3 times when a short-circuit occurs (arbitrary, needs correction with the protection study)
after_pre_fault = tc_06ta2_r[t_vec_fault_start_pos:]
fault_values = [ short_circuit_level * x for x in after_pre_fault ]
tc_06ta2_r[t_vec_fault_start_pos:] = fault_values
after_pre_fault = tc_06ta2_s[t_vec_fault_start_pos:]
fault_values = [ short_circuit_level * x for x in after_pre_fault ]
tc_06ta2_s[t_vec_fault_start_pos:] = fault_values
after_pre_fault = tc_06ta2_t[t_vec_fault_start_pos:]
fault_values = [ short_circuit_level * x for x in after_pre_fault ]
tc_06ta2_t[t_vec_fault_start_pos:] = fault_values
after_pre_fault = tc_86ta2_r[t_vec_fault_start_pos:]
fault_values = [ short_circuit_level * x for x in after_pre_fault ]
tc_86ta2_r[t_vec_fault_start_pos:] = fault_values
after_pre_fault = tc_86ta2_s[t_vec_fault_start_pos:]
fault_values = [ short_circuit_level * x for x in after_pre_fault ]
tc_86ta2_s[t_vec_fault_start_pos:] = fault_values
after_pre_fault = tc_86ta2_t[t_vec_fault_start_pos:]
fault_values = [ short_circuit_level * x for x in after_pre_fault ]
tc_86ta2_t[t_vec_fault_start_pos:] = fault_values
after_pre_fault = tc_52a2_r[t_vec_fault_start_pos:]
fault_values = [ short_circuit_level * x for x in after_pre_fault ]
tc_52a2_r[t_vec_fault_start_pos:] = fault_values
after_pre_fault = tc_52a2_s[t_vec_fault_start_pos:]
fault_values = [ short_circuit_level * x for x in after_pre_fault ]
tc_52a2_s[t_vec_fault_start_pos:] = fault_values
after_pre_fault = tc_52a2_t[t_vec_fault_start_pos:]
fault_values = [ short_circuit_level * x for x in after_pre_fault ]
tc_52a2_t[t_vec_fault_start_pos:] = fault_values
cond_1 = (fault_type == 'Curto-circuito L-L')
cond_2 = fault_created
cond_3 = (fault_location =='Interna')
if cond_1 & cond_2 & cond_3:
short_circuit_level = 3 #current increases 3 times when a short-circuit occurs (arbitrary, needs correction with the protection study)
after_pre_fault = tc_06ta2_r[t_vec_fault_start_pos:]
fault_values = [ short_circuit_level * x for x in after_pre_fault ]
tc_06ta2_r[t_vec_fault_start_pos:] = fault_values
after_pre_fault = tc_06ta2_s[t_vec_fault_start_pos:]
fault_values = [ short_circuit_level * x for x in after_pre_fault ]
tc_06ta2_s[t_vec_fault_start_pos:] = fault_values
after_pre_fault = tc_86ta2_r[t_vec_fault_start_pos:]
fault_values = [ short_circuit_level * x for x in after_pre_fault ]
tc_86ta2_r[t_vec_fault_start_pos:] = fault_values
after_pre_fault = tc_86ta2_s[t_vec_fault_start_pos:]
fault_values = [ short_circuit_level * x for x in after_pre_fault ]
tc_86ta2_s[t_vec_fault_start_pos:] = fault_values
after_pre_fault = tc_52a2_r[t_vec_fault_start_pos:]
fault_values = [ 0 * x for x in after_pre_fault ]
tc_52a2_r[t_vec_fault_start_pos:] = fault_values
after_pre_fault = tc_52a2_s[t_vec_fault_start_pos:]
fault_values = [ 0 * x for x in after_pre_fault ]
tc_52a2_s[t_vec_fault_start_pos:] = fault_values
cond_1 = (fault_type == 'Curto-circuito L-L')
cond_2 = fault_created
cond_3 = (fault_location =='Externa')
if cond_1 & cond_2 & cond_3:
short_circuit_level = 1.4 #current increases 3 times when a short-circuit occurs (arbitrary, needs correction with the protection study)
after_pre_fault = tc_06ta2_r[t_vec_fault_start_pos:]
fault_values = [ short_circuit_level * x for x in after_pre_fault ]
tc_06ta2_r[t_vec_fault_start_pos:] = fault_values
after_pre_fault = tc_06ta2_s[t_vec_fault_start_pos:]
fault_values = [ short_circuit_level * x for x in after_pre_fault ]
tc_06ta2_s[t_vec_fault_start_pos:] = fault_values
tc_06ta2_t[t_vec_fault_start_pos:] = fault_values
after_pre_fault = tc_86ta2_r[t_vec_fault_start_pos:]
fault_values = [ short_circuit_level * x for x in after_pre_fault ]
tc_86ta2_r[t_vec_fault_start_pos:] = fault_values
after_pre_fault = tc_86ta2_s[t_vec_fault_start_pos:]
fault_values = [ short_circuit_level * x for x in after_pre_fault ]
tc_86ta2_s[t_vec_fault_start_pos:] = fault_values
tc_86ta2_t[t_vec_fault_start_pos:] = fault_values
after_pre_fault = tc_52a2_r[t_vec_fault_start_pos:]
fault_values = [ short_circuit_level * x for x in after_pre_fault ]
tc_52a2_r[t_vec_fault_start_pos:] = fault_values
after_pre_fault = tc_52a2_s[t_vec_fault_start_pos:]
fault_values = [ short_circuit_level * x for x in after_pre_fault ]
tc_52a2_s[t_vec_fault_start_pos:] = fault_values
cond_1 = (fault_type == 'Curto-circuito L')
cond_2 = fault_created
cond_3 = (fault_location =='Interna')
if cond_1 & cond_2 & cond_3:
short_circuit_level = 3 #current increases 3 times when a short-circuit occurs (arbitrary, needs correction with the protection study)
after_pre_fault = tc_06ta2_r[t_vec_fault_start_pos:]
fault_values = [ short_circuit_level * x for x in after_pre_fault ]
tc_06ta2_r[t_vec_fault_start_pos:] = fault_values
after_pre_fault = tc_86ta2_r[t_vec_fault_start_pos:]
fault_values = [ short_circuit_level * x for x in after_pre_fault ]
tc_86ta2_r[t_vec_fault_start_pos:] = fault_values
after_pre_fault = tc_52a2_r[t_vec_fault_start_pos:]
fault_values = [ 0 * x for x in after_pre_fault ]
tc_52a2_r[t_vec_fault_start_pos:] = fault_values
cond_1 = (fault_type == 'Curto-circuito L-L')
cond_2 = fault_created
cond_3 = (fault_location =='Externa')
if cond_1 & cond_2 & cond_3:
short_circuit_level = 1.4 #current increases 3 times when a short-circuit occurs (arbitrary, needs correction with the protection study)
after_pre_fault = tc_06ta2_r[t_vec_fault_start_pos:]
fault_values = [ short_circuit_level * x for x in after_pre_fault ]
tc_06ta2_r[t_vec_fault_start_pos:] = fault_values
after_pre_fault = tc_86ta2_r[t_vec_fault_start_pos:]
fault_values = [ short_circuit_level * x for x in after_pre_fault ]
tc_86ta2_r[t_vec_fault_start_pos:] = fault_values
after_pre_fault = tc_52a2_r[t_vec_fault_start_pos:]
fault_values = [ short_circuit_level * x for x in after_pre_fault ]
tc_52a2_r[t_vec_fault_start_pos:] = fault_values
fault_created = False
# =============================================================================
# Oscilografia
# =============================================================================
expander_5 = st.beta_expander(label='Oscilografia')
with expander_5:
st.header('Oscilografia')
fig, axes = plt.subplots(nrows=9, ncols=1, sharex=True, figsize=(10,6),
sharey=True)
# plotting
axes[0].plot(pd.DataFrame({'IA-06TA2': tc_06ta2_r}, index=t_vec))
axes[1].plot(pd.DataFrame({'IB-06TA2': tc_06ta2_s}, index=t_vec))
axes[2].plot(pd.DataFrame({'IC-06TA2': tc_06ta2_t}, index=t_vec))
axes[3].plot(pd.DataFrame({'IA-86TA2': tc_86ta2_r}, index=t_vec))
axes[4].plot( | pd.DataFrame({'IB-86TA2': tc_86ta2_s}, index=t_vec) | pandas.DataFrame |
import pandas as pd
import generator_labeler.ExecutionPlanAnalyzer as EPAnalyzer
from IPython.display import display
import numpy as np
from scipy.stats import kurtosis, skew
import matplotlib.pyplot as plt
import seaborn as sns
import networkx as nx
import re
####################
## Table features ##
####################
from generator_labeler.TaskMonitor.TaskManagerClient import TaskDetailsParser
def get_tables_features(df_tables, data_cardinality=False, factors=-1, verbose=True):
one_hot = sorted(list(set([t for tables in df_tables.values for t in tables])))
one_hot_enc = {one_hot[c]: c for c in range(one_hot.__len__())}
one_hot_dec = {c: one_hot[c] for c in range(one_hot.__len__())}
print("one_hot_enc:", one_hot_enc, "\none_hot_dec:", one_hot_dec)
def encode(x, one_hot_enc):
enc_vec = np.zeros(one_hot_enc.__len__())
for v in x:
idx = one_hot_enc[v]
enc_vec[idx] = 1.0
return enc_vec
df_tables_enc = df_tables.apply(lambda x: encode(x, one_hot_enc))
if data_cardinality:
cards = []
ks = []
for k, values in df_tables_enc.iteritems():
new_values = [v * data_cardinality[k[1]][one_hot_dec[idx]] for idx, v in enumerate(values)]
cards.append(new_values)
ks.append(k)
cards_df = pd.DataFrame(cards)
cards_df["plan_id"] = [k[0] for k in ks]
cards_df["data_id"] = [k[1] for k in ks]
cards_df = cards_df.set_index(["plan_id", "data_id"])
df_tables_enc = cards_df
df_tables_enc = df_tables_enc.apply(pd.Series)
df_tables_enc.columns = ["t_" + str(c) for c in df_tables_enc.columns]
if factors > 0:
from sklearn.decomposition import PCA
pca_model = PCA(n_components=factors, svd_solver="auto")
df_tables_enc_factors = pca_model.fit_transform(df_tables_enc)
for f in range(factors):
df_tables_enc[f"t_F{f + 1}"] = df_tables_enc_factors[:, f]
if verbose:
print("n_components:", pca_model.n_components,
"\nexplained_variance_ :", pca_model.explained_variance_,
"\nexplained_variance_ratio_ :", np.cumsum(pca_model.explained_variance_ratio_))
return df_tables_enc
###################
## DEPRECATED! - Data sampling ##
###################
def sample_by_value_dist(df, sample_size, sample_col, verbose=True):
values = df[sample_col].values
arg_s_values = values.argsort()
if verbose:
plt.plot(values, marker='.', linewidth=0)
plt.ylabel(sample_col)
plt.xlabel("Samples")
plt.show()
norm_values = values / max(values)
if verbose:
plt.plot(norm_values[arg_s_values], marker=".")
plt.ylabel("Norm - " + sample_col)
plt.xlabel("Samples")
plt.show()
n_samples = int(norm_values.__len__() * sample_size)
th_step = (norm_values.max() - norm_values.min()) / n_samples
cut_points_indexes = []
th = norm_values.min()
sampled_sorted_idx = []
i = 0
for idx in arg_s_values:
if norm_values[idx] >= th:
cut_points_indexes.append(idx)
th = th + th_step
sampled_sorted_idx.append(i)
i = i + 1
cut_points_indexes.append(arg_s_values[-1]) # Add last point
sampled_sorted_idx.append(arg_s_values.__len__() - 1)
if verbose:
plt.plot(norm_values[arg_s_values], marker=".")
plt.plot(sampled_sorted_idx, norm_values[cut_points_indexes], 'r.')
plt.ylabel("Norm - " + sample_col)
plt.xlabel("Samples")
plt.show()
if verbose:
plt.plot(values, marker='.', linewidth=0)
plt.plot(cut_points_indexes, values[cut_points_indexes], 'r.')
plt.ylabel(sample_col)
plt.xlabel("Samples")
plt.show()
sampled_data = df.iloc[cut_points_indexes, :]
return sampled_data
def sample_by_data_2(x, samples_key, sample_col, verbose=False):
k = x["data_id"].values[0]
print(k) if verbose else None
return sample_by_value_dist(x, samples_key[k], sample_col, verbose)
def custom_train_test_split_2(df, samples_key, sample_col, job_filter=".*", verbose=False):
df = df.reset_index().loc[df.reset_index()["plan_id"].str.contains(job_filter), :]
print("Len before:", df.__len__())
df_train = df.groupby("data_id", group_keys=False).apply(
lambda x: sample_by_data_2(x, samples_key, sample_col, verbose))
print("Len train:", df_train.__len__())
df_test = df[~df.index.isin(df_train.index)]
print("Len test:", df_test.__len__())
return df_train, df_test
######################
## Outliers removal ##
######################
def remove_outliers(df, outlier_col, b=0.01, verbose=False):
asd = []
for ds in sorted(df["data_size"].unique()):
ds_df = df.loc[df["data_size"] == ds, :]
len_before = ds_df.__len__()
q = b
ds_df = ds_df[ds_df[outlier_col] > np.quantile(ds_df[outlier_col], q)]
q = 1.0 - b
ds_df = ds_df[ds_df[outlier_col] < np.quantile(ds_df[outlier_col], q)]
print(f"Removing {len_before - ds_df.__len__()} outliers from '{ds}'")
asd.append(ds_df)
df_no_out = | pd.concat(asd) | pandas.concat |
#!/usr/bin/env python
# coding: utf-8
import math
import glob
import re
import os.path
import numpy as np
import pandas as pd
import geopandas as gpd
import matplotlib.pyplot as plt
import matplotlib.colors as colors
from pathlib import Path
from io import StringIO
from pyproj import Transformer
from itertools import takewhile
from scipy import stats
import multiprocessing as mp
from sklearn.metrics import r2_score
from sklearn.metrics import mean_squared_error
from sklearn.metrics import mean_absolute_error
from shapely.geometry import box, Point, LineString
def standardise_source(df):
# Dictionary containing method values to rename
remap_dict = {'emery': 'emery/levelling',
'levelling': 'emery/levelling',
'dunefill': np.NaN,
'rtk gps': 'gps',
'photogrammetry': 'aerial photogrammetry',
'stereo photogrammtery': 'aerial photogrammetry',
'ads80': 'aerial photogrammetry',
'photos': 'aerial photogrammetry',
'total station': 'total station',
'total station\t': 'total station',
'laser scanning': 'terrestrial laser scanning',
'satellite': 'satellite',
'gps rtk gps': 'gps'}
# Set all values to lower case for easier conversion
df['source'] = df.source.str.lower()
# Replace values
df['source'] = df.source.replace(remap_dict)
def to_vector(df,
fname='test.shp',
x='x',
y='y',
crs='EPSG:3577',
output_crs='EPSG:3577'):
# Convert datetimes to strings
df = df.copy()
is_datetime = df.dtypes == 'datetime64[ns]'
df.loc[:, is_datetime] = df.loc[:, is_datetime].astype(str)
# Export to file
gdf = gpd.GeoDataFrame(data=df.loc[:, df.dtypes != 'datetime64[ns]'],
geometry=gpd.points_from_xy(x=df[x], y=df[y]),
crs=crs).to_crs(output_crs).to_file(fname)
return gdf
def export_eval(df, output_name, output_crs='EPSG:3577'):
from shapely.geometry import box, Point, LineString
# Extract geometries
val_points = gpd.points_from_xy(x=df.val_x, y=df.val_y)
deacl_points = gpd.points_from_xy(x=df.deacl_x, y=df.deacl_y)
df_profiles = df.groupby('id').first()
profile_lines = df_profiles.apply(
lambda x: LineString([(x.start_x, x.start_y), (x.end_x, x.end_y)]), axis=1)
# Export validation points
val_gdf = gpd.GeoDataFrame(data=df,
geometry=val_points,
crs=output_crs).to_crs('EPSG:4326')
val_gdf.to_file(f'figures/eval/{output_name}_val.geojson',
driver='GeoJSON')
# Export DEACL points
deacl_gdf = gpd.GeoDataFrame(data=df,
geometry=deacl_points,
crs=output_crs).to_crs('EPSG:4326')
deacl_gdf.to_file(f'figures/eval/{output_name}_deacl.geojson',
driver='GeoJSON')
# Export profiles
profile_gdf = gpd.GeoDataFrame(data=df_profiles,
geometry=profile_lines,
crs=output_crs).to_crs('EPSG:4326')
profile_gdf.to_file(f'figures/eval/{output_name}_profiles.geojson',
driver='GeoJSON')
def deacl_val_stats(val_dist, deacl_dist, n=None, remove_bias=False):
np.seterr(all='ignore')
# Compute difference and bias
diff_dist = val_dist - deacl_dist
bias = diff_dist.mean()
if remove_bias:
deacl_dist += bias
diff_dist = val_dist - deacl_dist
# Compute stats
if n is None:
n = len(val_dist)
else:
n = sum(n)
mae = mean_absolute_error(val_dist, deacl_dist)
rmse = mean_squared_error(val_dist, deacl_dist)**0.5
if n > 1:
corr = np.corrcoef(x=val_dist, y=deacl_dist)[0][1]
stdev = diff_dist.std()
else:
corr = np.nan
stdev = np.nan
return pd.Series({
'n': n,
'mae': f'{mae:.2f}',
'rmse': f'{rmse:.2f}',
'stdev': f'{stdev:.2f}',
'corr': f'{corr:.3f}',
'bias': f'{bias:.2f}',
}).astype(float)
def rse_tableformat(not_bias_corrected, bias_corrected, groupby='source'):
# Fix rounding and total observations
not_bias_corrected['n'] = not_bias_corrected['n'].astype(int)
not_bias_corrected[['bias', 'stdev', 'mae', 'rmse']] = not_bias_corrected[['bias', 'stdev', 'mae', 'rmse']].round(1)
not_bias_corrected['n'] = not_bias_corrected.groupby(groupby)['n'].sum()
# Move bias corrected values into brackets
not_bias_corrected['MAE (m)'] = (not_bias_corrected.mae.astype('str') + ' (' +
bias_corrected.mae.round(1).astype('str') + ')')
not_bias_corrected['RMSE (m)'] = (not_bias_corrected.rmse.astype('str') + ' (' +
bias_corrected.rmse.round(1).astype('str') + ')')
# Sort by MAE, rename columns
not_bias_corrected = (not_bias_corrected.sort_values('mae')
.drop(['mae', 'rmse'], axis=1)
.rename({'stdev': 'SD (m)', 'corr': 'Correlation', 'bias': 'Bias (m)'}, axis=1)
[['n', 'Bias (m)', 'MAE (m)', 'RMSE (m)', 'SD (m)', 'Correlation']])
return not_bias_corrected
def val_slope(profiles_df, intercept_df, datum=0, buffer=25, method='distance'):
# Join datum dist to full profile dataframe
profiles_datum_dist = (profiles_df.set_index(
['id', 'date'])[['distance', 'z']].join(intercept_df[f'{datum}_dist']))
if method == 'distance':
# Filter to measurements within distance of datum distance
beach_data = profiles_datum_dist[profiles_datum_dist.distance.between(
profiles_datum_dist[f'{datum}_dist'] - buffer,
profiles_datum_dist[f'{datum}_dist'] + buffer)]
elif method == 'height':
# Filter measurements within height of datum
beach_data = profiles_datum_dist.loc[
profiles_datum_dist.z.between(-buffer, buffer)]
# Calculate slope
beach_slope = beach_data.groupby(['id', 'date']).apply(
lambda x: stats.linregress(x=x.distance, y=x.z).slope)
return beach_slope.round(3)
def dms2dd(s):
# example: s = "0°51'56.29"
degrees, minutes, seconds = re.split('[°\'"]+', s)
if float(degrees) > 0:
dd = float(degrees) + float(minutes) / 60 + float(seconds) / (60 * 60)
else:
dd = float(degrees) - float(minutes) / 60 - float(seconds) / (60 * 60);
return dd
def dist_angle(lon, lat, dist, angle):
lon_end = lon + dist * np.sin(angle * np.pi / 180)
lat_end = lat + dist * np.cos(angle * np.pi / 180)
return pd.Series({'end_y': lat_end, 'end_x': lon_end})
def interp_intercept(x, y1, y2, reverse=False):
"""
Find the intercept of two curves, given by the same x data
References:
----------
Source: https://stackoverflow.com/a/43551544/2510900
"""
def intercept(point1, point2, point3, point4):
"""find the intersection between two lines
the first line is defined by the line between point1 and point2
the first line is defined by the line between point3 and point4
each point is an (x,y) tuple.
So, for example, you can find the intersection between
intercept((0,0), (1,1), (0,1), (1,0)) = (0.5, 0.5)
Returns: the intercept, in (x,y) format
"""
def line(p1, p2):
A = (p1[1] - p2[1])
B = (p2[0] - p1[0])
C = (p1[0] * p2[1] - p2[0] * p1[1])
return A, B, -C
def intersection(L1, L2):
D = L1[0] * L2[1] - L1[1] * L2[0]
Dx = L1[2] * L2[1] - L1[1] * L2[2]
Dy = L1[0] * L2[2] - L1[2] * L2[0]
x = Dx / D
y = Dy / D
return x,y
L1 = line([point1[0],point1[1]], [point2[0],point2[1]])
L2 = line([point3[0],point3[1]], [point4[0],point4[1]])
R = intersection(L1, L2)
return R
try:
if isinstance(y2, (int, float)):
y2 = np.array([y2] * len(x))
if reverse:
x = x[::-1]
y1 = y1[::-1]
y2 = y2[::-1]
idx = np.argwhere(np.diff(np.sign(y1 - y2)) != 0)
xc, yc = intercept((x[idx], y1[idx]),((x[idx + 1], y1[idx + 1])),
((x[idx], y2[idx])), ((x[idx + 1], y2[idx + 1])))
return xc[0][0]
except:
return np.nan
def dist_along_transect(dist, start_x, start_y, end_x, end_y):
transect_line = LineString([(start_x, start_y), (end_x, end_y)])
distance_coords = transect_line.interpolate(dist).coords.xy
return [coord[0] for coord in distance_coords]
def waterline_intercept(x,
dist_col='distance',
x_col='x',
y_col='y',
z_col='z',
z_val=0,
debug=False):
# Extract distance and coordinates of where the z_val first
# intersects with the profile line
dist_int = interp_intercept(x[dist_col].values, x[z_col].values, z_val)
x_int = interp_intercept(x[x_col].values, x[z_col].values, z_val)
y_int = interp_intercept(x[y_col].values, x[z_col].values, z_val)
# Identify last distance where the z_value intersects the profile
rev_int = interp_intercept(x[dist_col].values, x[z_col].values, z_val,
reverse=True)
# If first and last intersects are the identical, return data.
# If not, the comparison is invalid (i.e. NaN)
if dist_int == rev_int:
if debug: print('Single intersection found')
return pd.Series({f'{z_val}_dist': dist_int,
f'{z_val}_x': x_int,
f'{z_val}_y': y_int})
else:
if debug: print('Multiple intersections returned')
return pd.Series({f'{z_val}_dist': np.NaN,
f'{z_val}_x': np.NaN,
f'{z_val}_y': np.NaN})
def reproj_crs(in_data,
in_crs,
x='x',
y='y',
out_crs='EPSG:3577'):
# Reset index to allow merging new data with original data
in_data = in_data.reset_index(drop=True)
# Reproject coords to Albers and create geodataframe
trans = Transformer.from_crs(in_crs, out_crs, always_xy=True)
coords = trans.transform(in_data[x].values, in_data[y].values)
in_data[['x', 'y']] = pd.DataFrame(zip(*coords))
return in_data
def profiles_from_dist(profiles_df,
id_col='id',
dist_col='distance',
x_col='x',
y_col='y'):
# Compute origin points for each profile
min_ids = profiles_df.groupby(id_col)[dist_col].idxmin()
start_xy = profiles_df.loc[min_ids, [id_col, x_col, y_col]]
start_xy = start_xy.rename({x_col: f'start_{x_col}',
y_col: f'start_{y_col}'},
axis=1)
# Compute end points for each profile
max_ids = profiles_df.groupby(id_col)[dist_col].idxmax()
end_xy = profiles_df.loc[max_ids, [x_col, y_col]]
# Add end coords into same dataframe
start_xy = start_xy.reset_index(drop=True)
end_xy = end_xy.reset_index(drop=True)
start_xy[[f'end_{x_col}', f'end_{y_col}']] = end_xy
return start_xy
def perpendicular_line(input_line, length):
# Generate parallel lines either side of input line
left = input_line.parallel_offset(length / 2.0, 'left')
right = input_line.parallel_offset(length / 2.0, 'right')
# Create new line between centroids of parallel line.
# This should be perpendicular to the original line
return LineString([left.centroid, right.centroid])
def generate_transects(line_geom,
length=400,
interval=200,
buffer=20):
# Create tangent line at equal intervals along line geom
interval_dists = np.arange(buffer, line_geom.length, interval)
tangent_geom = [LineString([line_geom.interpolate(dist - buffer),
line_geom.interpolate(dist + buffer)])
for dist in interval_dists]
# Convert to geoseries and remove erroneous lines by length
tangent_gs = gpd.GeoSeries(tangent_geom)
tangent_gs = tangent_gs.loc[tangent_gs.length.round(1) <= buffer * 2]
# Compute perpendicular lines
return tangent_gs.apply(perpendicular_line, length=length)
def coastal_transects(bbox,
name,
interval=200,
transect_length=400,
simplify_length=200,
transect_buffer=20,
output_crs='EPSG:3577',
coastline='../input_data/Smartline.gdb',
land_poly='/g/data/r78/rt1527/shapefiles/australia/australia/cstauscd_r.shp'):
# Load smartline
coastline_gdf = gpd.read_file(coastline, bbox=bbox).to_crs(output_crs)
coastline_geom = coastline_gdf.geometry.unary_union.simplify(simplify_length)
# Load Australian land water polygon
land_gdf = gpd.read_file(land_poly, bbox=bbox).to_crs(output_crs)
land_gdf = land_gdf.loc[land_gdf.FEAT_CODE.isin(["mainland", "island"])]
land_geom = gpd.overlay(df1=land_gdf, df2=bbox).unary_union
# Extract transects along line
geoms = generate_transects(coastline_geom,
length=transect_length,
interval=interval,
buffer=transect_buffer)
# Test if end points of transects fall in water or land
p1 = gpd.GeoSeries([Point(i.coords[0]) for i in geoms])
p2 = gpd.GeoSeries([Point(i.coords[1]) for i in geoms])
p1_within_land = p1.within(land_geom)
p2_within_land = p2.within(land_geom)
# Create geodataframe, remove invalid land-land/water-water transects
transect_gdf = gpd.GeoDataFrame(data={'p1': p1_within_land,
'p2': p2_within_land},
geometry=geoms.values,
crs=output_crs)
transect_gdf = transect_gdf[~(transect_gdf.p1 == transect_gdf.p2)]
# Reverse transects so all point away from land
transect_gdf['geometry'] = transect_gdf.apply(
lambda i: LineString([i.geometry.coords[1],
i.geometry.coords[0]])
if i.p1 < i.p2 else i.geometry, axis=1)
# Export to file
transect_gdf[['geometry']].to_file(f'input_data/coastal_transects_{name}.geojson',
driver='GeoJSON')
def coastal_transects_parallel(
regions_gdf,
interval=200,
transect_length=400,
simplify_length=200,
transect_buffer=20,
overwrite=False,
output_path='input_data/combined_transects_wadot.geojson'):
if not os.path.exists(output_path) or overwrite:
if os.path.exists(output_path):
print('Removing existing file')
os.remove(output_path)
# Generate transects for each region
print('Generating transects')
with mp.Pool(mp.cpu_count()) as pool:
for i, _ in regions_gdf.iterrows():
name = str(i).replace(' ', '').replace('/', '').lower()
pool.apply_async(coastal_transects, [
regions_gdf.loc[[i]], name, interval, transect_length,
simplify_length, transect_buffer
])
pool.close()
pool.join()
# Load regional transects and combine into a single file
print('Combining data')
transect_list = glob.glob('input_data/coastal_transects_*.geojson')
gdf = pd.concat(
[gpd.read_file(shp, ignore_index=True) for shp in transect_list])
gdf = gdf.reset_index(drop=True)
gdf['profile'] = gdf.index.astype(str)
gdf.to_file(output_path, driver='GeoJSON')
# Clean files
[os.remove(f) for f in transect_list]
def preprocess_wadot(compartment,
overwrite=True,
fname='input_data/wadot/Coastline_Movements_20190819.gdb'):
beach = str(compartment.index.item())
fname_out = f'output_data/wadot_{beach}.csv'
print(f'Processing {beach:<80}', end='\r')
# Test if file exists
if not os.path.exists(fname_out) or overwrite:
# Read file and filter to AHD 0 shorelines
val_gdf = gpd.read_file(fname,
bbox=compartment).to_crs('EPSG:3577')
val_gdf = gpd.clip(gdf=val_gdf, mask=compartment, keep_geom_type=True)
val_gdf = val_gdf[(val_gdf.TYPE == 'AHD 0m') |
(val_gdf.TYPE == 'AHD 0m ')]
# Filter to post 1987 shorelines and set index to year
val_gdf = val_gdf[val_gdf.PHOTO_YEAR > 1987]
val_gdf = val_gdf.set_index('PHOTO_YEAR')
# If no data is returned, skip this iteration
if len(val_gdf.index) == 0:
print(f'Failed: {beach:<80}', end='\r')
return None
######################
# Generate transects #
######################
transect_gdf = gpd.read_file('input_data/combined_transects_wadot.geojson',
bbox=compartment)
transect_gdf = gpd.clip(gdf=transect_gdf, mask=compartment, keep_geom_type=True)
################################
# Identify 0 MSL intersections #
################################
output_list = []
# Select one year
for year in val_gdf.index.unique().sort_values():
# Extract validation contour
print(f'Processing {beach} {year:<80}', end='\r')
val_contour = val_gdf.loc[[year]].geometry.unary_union
# Copy transect data, and find intersects
# between transects and contour
intersect_gdf = transect_gdf.copy()
intersect_gdf['val_point'] = transect_gdf.intersection(val_contour)
to_keep = gpd.GeoSeries(intersect_gdf['val_point']).geom_type == 'Point'
intersect_gdf = intersect_gdf.loc[to_keep]
# If no data is returned, skip this iteration
if len(intersect_gdf.index) == 0:
print(f'Failed: {beach} {year:<80}', end='\r')
continue
# Add generic metadata
intersect_gdf['date'] = pd.to_datetime(str(year))
intersect_gdf['beach'] = beach
intersect_gdf['section'] = 'all'
intersect_gdf['source'] = 'aerial photogrammetry'
intersect_gdf['name'] = 'wadot'
intersect_gdf['id'] = (intersect_gdf.beach + '_' +
intersect_gdf.section + '_' +
intersect_gdf.profile)
# Add measurement metadata
intersect_gdf[['start_x', 'start_y']] = intersect_gdf.apply(
lambda x: pd.Series(x.geometry.coords[0]), axis=1)
intersect_gdf[['end_x', 'end_y']] = intersect_gdf.apply(
lambda x: pd.Series(x.geometry.coords[1]), axis=1)
intersect_gdf['0_dist'] = intersect_gdf.apply(
lambda x: Point(x.start_x, x.start_y).distance(x['val_point']), axis=1)
intersect_gdf[['0_x', '0_y']] = intersect_gdf.apply(
lambda x: pd.Series(x.val_point.coords[0]), axis=1)
# Add empty slope var (not possible to compute without profile data)
intersect_gdf['slope'] = np.nan
# Keep required columns
intersect_gdf = intersect_gdf[['id', 'date', 'beach',
'section', 'profile', 'name',
'source', 'slope', 'start_x',
'start_y', 'end_x', 'end_y',
'0_dist', '0_x', '0_y']]
# Append to file
output_list.append(intersect_gdf)
# Combine all year data and export to file
if len(output_list) > 0:
shoreline_df = pd.concat(output_list)
shoreline_df.to_csv(fname_out, index=False)
else:
print(f'Skipping {beach:<80}', end='\r')
def preprocess_dasilva2021(fname='input_data/dasilva2021/dasilva_etal_2021_shorelines.shp'):
beach = 'dasilva2021'
print(f'Processing {beach:<80}', end='\r')
# Read file and filter to AHD 0 shorelines
fname='input_data/dasilva2021/dasilva_etal_2021_shorelines.shp'
val_gdf = gpd.read_file(fname).to_crs('EPSG:3577')
val_gdf = val_gdf.loc[val_gdf.Year_ > 1987]
val_gdf['Year_'] = val_gdf.Year_.astype(str)
val_gdf = val_gdf.set_index('Year_')
# If no data is returned, skip this iteration
if len(val_gdf.index) == 0:
print(f'Failed: {beach:<80}', end='\r')
return None
######################
# Generate transects #
######################
transect_gdf = gpd.read_file('input_data/dasilva2021/dasilva_etal_2021_retransects.shp').to_crs('EPSG:3577')[['TransectID', 'Direction', 'order', 'geometry']]
transect_gdf.columns = ['profile', 'section', 'order', 'geometry']
transect_gdf = transect_gdf.sort_values('order').set_index('order')
transect_gdf['profile'] = transect_gdf.profile.astype(str)
################################
# Identify 0 MSL intersections #
################################
output_list = []
# Select one year
for year in val_gdf.index.unique().sort_values():
# Extract validation contour
print(f'Processing {beach} {year:<80}', end='\r')
val_contour = val_gdf.loc[[year]].geometry.unary_union
# Copy transect data, and find intersects
# between transects and contour
intersect_gdf = transect_gdf.copy()
intersect_gdf['val_point'] = transect_gdf.intersection(val_contour)
to_keep = gpd.GeoSeries(intersect_gdf['val_point']).geom_type == 'Point'
intersect_gdf = intersect_gdf.loc[to_keep]
# If no data is returned, skip this iteration
if len(intersect_gdf.index) == 0:
print(f'Failed: {beach} {year:<80}', end='\r')
continue
# Add generic metadata
intersect_gdf['date'] = pd.to_datetime(str(year))
intersect_gdf['beach'] = beach
intersect_gdf['source'] = 'satellite'
intersect_gdf['name'] = 'dasilva2021'
intersect_gdf['id'] = (intersect_gdf.beach + '_' +
intersect_gdf.section + '_' +
intersect_gdf.profile)
# Add measurement metadata
intersect_gdf[['start_x', 'start_y']] = intersect_gdf.apply(
lambda x: pd.Series(x.geometry.coords[0]), axis=1)
intersect_gdf[['end_x', 'end_y']] = intersect_gdf.apply(
lambda x: pd.Series(x.geometry.coords[1]), axis=1)
intersect_gdf['0_dist'] = intersect_gdf.apply(
lambda x: Point(x.start_x, x.start_y).distance(x['val_point']), axis=1)
intersect_gdf[['0_x', '0_y']] = intersect_gdf.apply(
lambda x: pd.Series(x.val_point.coords[0][0:2]), axis=1)
# Add empty slope var (not possible to compute without profile data)
intersect_gdf['slope'] = np.nan
# Keep required columns
intersect_gdf = intersect_gdf[['id', 'date', 'beach',
'section', 'profile', 'name',
'source', 'slope', 'start_x',
'start_y', 'end_x', 'end_y',
'0_dist', '0_x', '0_y']]
# Append to file
output_list.append(intersect_gdf)
# Combine all year data and export to file
if len(output_list) > 0:
shoreline_df = pd.concat(output_list)
shoreline_df.to_csv(f'output_data/{beach}.csv', index=False)
def preprocess_stirling(fname_out, datum=0):
# List containing files to import and all params to extract data
survey_xl = [
{'fname': 'input_data/stirling/2015 05 28 - From Stirling - Coastal Profiles 2014-2015 April-Feb with updated reef#2.xlsm',
'skiprows': 2,
'skipcols': 5,
'nrows': 100,
'meta_skiprows': 0,
'meta_nrows': 1,
'meta_usecols': [6, 7]},
{'fname': 'input_data/stirling/Coastal Profiles 2013-2014 JUL-MAY#2.xlsx',
'skiprows': 2,
'skipcols': 5,
'nrows': 100,
'meta_skiprows': 0,
'meta_nrows': 1,
'meta_usecols': [6, 7]},
{'fname': 'input_data/stirling/COASTAL PROFILES 2013 JAN - JUNE#2.xls',
'skiprows': 3,
'skipcols': 0,
'nrows': 40,
'meta_skiprows': 1,
'meta_nrows': 2,
'meta_usecols': [1, 2]},
{'fname': 'input_data/stirling/COASTAL PROFILES 2012 JUN - DEC#2.xls',
'skiprows': 3,
'skipcols': 0,
'nrows': 40,
'meta_skiprows': 1,
'meta_nrows': 2,
'meta_usecols': [1, 2]},
{'fname': 'input_data/stirling/COASTAL PROFILES 2011-2012 NOV - MAY#2.xls',
'skiprows': 3,
'skipcols': 0,
'nrows': 40,
'meta_skiprows': 1,
'meta_nrows': 2,
'meta_usecols': [1, 2]}
]
# List to contain processed profile data
output = []
# For each survey excel file in the list above:
for survey in survey_xl:
# Load profile start metadata
all_meta = pd.read_excel(survey['fname'],
sheet_name=None,
nrows=survey['meta_nrows'],
skiprows=survey['meta_skiprows'],
usecols=survey['meta_usecols'],
header=None,
on_demand=True)
# Load data
all_sheets = pd.read_excel(survey['fname'],
sheet_name=None,
skiprows=survey['skiprows'],
nrows=survey['nrows'],
parse_dates=False,
usecols=lambda x: 'Unnamed' not in str(x))
# Iterate through each profile in survey data
for profile_id in np.arange(1, 20).astype('str'):
# Extract profile start metadata and profile data
start_x, start_y = all_meta[profile_id].values[0]
sheet = all_sheets[profile_id].iloc[:,survey['skipcols']:]
# First set all column names to lower case strings
sheet.columns = (sheet.columns.astype(str)
.str.slice(0, 10)
.str.lower())
# Drop note columns and distance/angle offset
sheet = sheet.loc[:,~sheet.columns.str.contains('note|notes')]
sheet = sheet.drop(['dist', 'angle dd'], axis=1, errors='ignore')
# Expand date column values into rows for each sampling event
sheet.loc[:,sheet.columns[::4]] = sheet.columns[::4]
# Number date columns incrementally to match other fields
start_num = 1 if survey['skipcols'] > 0 else 0
rename_dict = {name: f'date.{i + start_num}' for
i, name in enumerate(sheet.columns[::4])}
sheet = sheet.rename(rename_dict, axis=1).reset_index()
sheet = sheet.rename({'x': 'x.0', 'y': 'y.0', 'z': 'z.0'}, axis=1)
# Reshape data into long format
profile_df = pd.wide_to_long(sheet,
stubnames=['date', 'x', 'y', 'z'],
i='index',
j='dropme',
sep='.').reset_index(drop=True)
# Set datetimes
profile_df['date'] = pd.to_datetime(profile_df.date,
errors='coerce',
dayfirst=True)
# Add profile metadata
profile_df['beach'] = 'stirling'
profile_df['section'] = 'all'
profile_df['profile'] = profile_id
profile_df['name'] = 'stirling'
profile_df['source'] = 'gps'
profile_df['start_x'] = start_x
profile_df['start_y'] = start_y
profile_df['id'] = (profile_df.beach + '_' +
profile_df.section + '_' +
profile_df.profile)
# Add results to list
output.append(profile_df.dropna())
# Combine all survey and profile data
profiles_df = pd.concat(output)
# Reproject Perth Coastal Grid coordinates into Australian Albers
pcg_crs = '+proj=tmerc +lat_0=0 +lon_0=115.8166666666667 ' \
'+k=0.9999990600000001 +x_0=50000 +y_0=3800000 ' \
'+ellps=GRS80 +towgs84=0,0,0,0,0,0,0 +units=m +no_defs'
trans = Transformer.from_crs(pcg_crs, 'EPSG:3577', always_xy=True)
profiles_df['x'], profiles_df['y'] = trans.transform(
profiles_df.y.values, profiles_df.x.values)
profiles_df['start_x'], profiles_df['start_y'] = trans.transform(
profiles_df.start_y.values, profiles_df.start_x.values)
# Calculate per-point distance from start of profile
profiles_df['distance'] = profiles_df.apply(
lambda x: Point(x.start_x, x.start_y).distance(Point(x.x, x.y)), axis=1)
# Identify end of profiles by max distance from start, and merge back
max_dist = (profiles_df.sort_values('distance', ascending=False)
.groupby('id')['x', 'y']
.first()
.rename({'x': 'end_x', 'y': 'end_y'}, axis=1))
profiles_df = profiles_df.merge(max_dist, on='id')
# Find location and distance to water for datum height (e.g. 0 m AHD)
intercept_df = profiles_df.groupby(['id', 'date']).apply(
waterline_intercept, z_val=datum).dropna()
# Join into dataframe
shoreline_dist = intercept_df.join(
profiles_df.groupby(['id', 'date']).first())
# Keep required columns
shoreline_dist = shoreline_dist[['beach', 'section', 'profile', 'name',
'source', 'start_x', 'start_y',
'end_x', 'end_y', f'{datum}_dist',
f'{datum}_x', f'{datum}_y']]
# Export to file
shoreline_dist.to_csv(fname_out)
def preprocess_vicdeakin(fname,
datum=0):
# Dictionary to map correct CRSs to locations
crs_dict = {'apo': 'epsg:32754',
'cow': 'epsg:32755',
'inv': 'epsg:32755',
'leo': 'epsg:32755',
'mar': 'epsg:32754',
'pfa': 'epsg:32754',
'por': 'epsg:32755',
'prd': 'epsg:32755',
'sea': 'epsg:32755',
'wbl': 'epsg:32754'}
# Read data
profiles_df = pd.read_csv(fname,
parse_dates=['survey_date']).dropna()
# Restrict to pre-2019
profiles_df = profiles_df.loc[profiles_df.survey_date.dt.year < 2020]
profiles_df = profiles_df.reset_index(drop=True)
# Remove invalid profiles
invalid = ((profiles_df.location == 'leo') & (profiles_df.tr_id == 94))
profiles_df = profiles_df.loc[~invalid].reset_index(drop=True)
# Extract coordinates
coords = profiles_df.coordinates.str.findall(r'\d+\.\d+')
profiles_df[['x', 'y']] = pd.DataFrame(coords.values.tolist(),
dtype=np.float32)
# Add CRS and convert to Albers
profiles_df['crs'] = profiles_df.location.apply(lambda x: crs_dict[x])
profiles_df = profiles_df.groupby('crs', as_index=False).apply(
lambda x: reproj_crs(x, in_crs=x.crs.iloc[0])).drop('crs', axis=1)
profiles_df = profiles_df.reset_index(drop=True)
# Convert columns to strings and add unique ID column
profiles_df = profiles_df.rename({'location': 'beach',
'tr_id': 'profile',
'survey_date': 'date',
'z': 'z_dirty',
'z_clean': 'z'}, axis=1)
profiles_df['profile'] = profiles_df['profile'].astype(str)
profiles_df['section'] = 'all'
profiles_df['source'] = 'drone photogrammetry'
profiles_df['name'] = 'vicdeakin'
profiles_df['id'] = (profiles_df.beach + '_' +
profiles_df.section + '_' +
profiles_df.profile)
# Reverse profile distances by subtracting max distance from each
prof_max = profiles_df.groupby('id')['distance'].transform('max')
profiles_df['distance'] = (profiles_df['distance'] - prof_max).abs()
# Compute origin and end points for each profile and merge into data
start_end_xy = profiles_from_dist(profiles_df)
profiles_df = pd.merge(left=profiles_df, right=start_end_xy)
# Export each beach
for beach_name, beach in profiles_df.groupby('beach'):
# Create output file name
fname_out = f'output_data/vicdeakin_{beach_name}.csv'
print(f'Processing {fname_out:<80}', end='\r')
# Find location and distance to water for datum height (0 m AHD)
intercept_df = beach.groupby(['id', 'date']).apply(
waterline_intercept, z_val=datum).dropna()
# If the output contains data
if len(intercept_df.index) > 0:
# Join into dataframe
shoreline_dist = intercept_df.join(
beach.groupby(['id', 'date']).first())
# Compute validation slope and join into dataframe
slope = val_slope(beach, intercept_df, datum=datum)
shoreline_dist = shoreline_dist.join(slope.rename('slope'))
# Keep required columns
shoreline_dist = shoreline_dist[['beach', 'section', 'profile', 'name',
'source', 'slope', 'start_x', 'start_y',
'end_x', 'end_y', f'{datum}_dist',
f'{datum}_x', f'{datum}_y']]
# Export to file
shoreline_dist.to_csv(fname_out)
def preprocess_nswbpd(fname, datum=0, overwrite=False):
# Get output filename
name = Path(fname).stem.split('_')[-1].lower().replace(' ', '')
fname_out = f'output_data/nswbpd_{name}.csv'
# Test if file exists
if not os.path.exists(fname_out) or overwrite:
# Read in data
print(f'Processing {fname_out:<80}', end='\r')
profiles_df = pd.read_csv(fname, skiprows=5, dtype={'Block': str, 'Profile': str})
profiles_df['Year/Date'] = pd.to_datetime(profiles_df['Year/Date'],
dayfirst=True,
errors='coerce')
# Convert columns to strings and add unique ID column
profiles_df['Beach'] = profiles_df['Beach'].str.lower().str.replace(' ', '')
profiles_df['Block'] = profiles_df['Block'].str.lower()
profiles_df['Profile'] = profiles_df['Profile'].astype(str).str.lower()
profiles_df['id'] = (profiles_df.Beach + '_' +
profiles_df.Block + '_' +
profiles_df.Profile)
profiles_df['name'] = 'nswbpd'
# Rename columns
profiles_df.columns = ['beach', 'section', 'profile',
'date', 'distance', 'z', 'x', 'y',
'source', 'id', 'name']
# Reproject coords to Albers
trans = Transformer.from_crs('EPSG:32756', 'EPSG:3577', always_xy=True)
profiles_df['x'], profiles_df['y'] = trans.transform(
profiles_df.x.values, profiles_df.y.values)
# Restrict to post 1987
profiles_df = profiles_df[profiles_df['date'] > '1987']
# Compute origin and end points for each profile and merge into data
start_end_xy = profiles_from_dist(profiles_df)
profiles_df = pd.merge(left=profiles_df, right=start_end_xy)
# Drop profiles that have been assigned incorrect profile IDs.
# To do this, we use a correlation test to determine whether x
# and y coordinates within each individual profiles fall along a
# straight line. If a profile has a low correlation (e.g. less
# than 99.9), it is likely that multiple profile lines have been
# incorrectly labelled with a single profile ID.
valid_profiles = lambda x: x[['x', 'y']].corr().abs().iloc[0, 1] > 0.99
drop = (~profiles_df.groupby('id').apply(valid_profiles)).sum()
profiles_df = profiles_df.groupby('id').filter(valid_profiles)
if drop.sum() > 0: print(f'\nDropping invalid profiles: {drop:<80}')
# If profile data remains
if len(profiles_df.index) > 0:
# Restrict profiles to data that falls ocean-ward of the top of
# the foredune (the highest point in the profile) to remove
# spurious validation points, e.g. due to a non-shoreline lagoon
# at the back of the profile
foredune_dist = profiles_df.groupby(['id', 'date']).apply(
lambda x: x.distance.loc[x.z.idxmax()]).reset_index(name='foredune_dist')
profiles_df = pd.merge(left=profiles_df, right=foredune_dist)
profiles_df = profiles_df.loc[(profiles_df.distance >=
profiles_df.foredune_dist)]
# Find location and distance to water for datum height (e.g. 0 m AHD)
intercept_df = profiles_df.groupby(['id', 'date']).apply(
waterline_intercept, z_val=datum).dropna()
# If any datum intercepts are found
if len(intercept_df.index) > 0:
# Join into dataframe
shoreline_dist = intercept_df.join(
profiles_df.groupby(['id', 'date']).agg(
lambda x: | pd.Series.mode(x) | pandas.Series.mode |
# -*- coding: UTF-8 -*-
"""
functions to process vcf files
"""
import sys
import subprocess
import numpy as np
import pandas as pd
import os.path as op
from pathlib import Path
from subprocess import run
from schnablelab.apps.base import ActionDispatcher, OptionParser, glob, put2slurm
# the location of linkimpute, beagle executable
lkipt = op.abspath(op.dirname(__file__)) + '/../apps/LinkImpute.jar'
begle = op.abspath(op.dirname(__file__)) + '/../apps/beagle.24Aug19.3e8.jar'
tassel = op.abspath(op.dirname(__file__)) + '/../apps/tassel-5-standalone/run_pipeline.pl'
def main():
actions = (
('BatchFilterMissing', 'apply FilterMissing on multiple vcf files'),
('BatchFilterMAF', 'apply FilterMissing on multiple vcf files'),
('BatchFilterHetero', 'apply FilterMissing on multiple vcf files'),
('IndexVCF', 'index vcf using bgzip and tabix'),
('splitVCF', 'split a vcf to several smaller files with equal size'),
('merge_files', 'combine split vcf or hmp files'),
('combineFQ', 'combine split fqs'),
('impute_beagle', 'impute vcf using beagle or linkimpute'),
('FixIndelHmp', 'fix the indels problems in hmp file converted from tassel'),
('FilterVCF', 'remove bad snps using bcftools'),
('only_ALT', 'filter number of ALT'),
('fixGTsep', 'fix the allele separator for beagle imputation'),
('calculateLD', 'calculate r2 using Plink'),
('summarizeLD', 'summarize ld decay in log scale')
)
p = ActionDispatcher(actions)
p.dispatch(globals())
def BatchFilterMissing(args):
"""
%prog in_dir
apply FilterMissing on multiple vcf files
"""
p = OptionParser(BatchFilterMissing.__doc__)
p.add_option('--pattern', default='*.vcf',
help="file pattern of vcf files in the 'dir_in'")
p.add_option('--missing_cutoff', default='0.7',
help='missing rate cutoff, SNPs higher than this cutoff will be removed')
p.add_option('--disable_slurm', default=False, action="store_true",
help='do not convert commands to slurm jobs')
p.add_slurm_opts(job_prefix=BatchFilterMissing.__name__)
opts, args = p.parse_args(args)
if len(args) == 0:
sys.exit(not p.print_help())
in_dir, = args
in_dir_path= Path(in_dir)
vcfs = in_dir_path.glob(opts.pattern)
cmds = []
for vcf in vcfs:
cmd = "python -m schnablelab.SNPcalling.base FilterMissing %s --missing_cutoff %s"%(vcf, opts.missing_cutoff)
cmds.append(cmd)
cmd_sh = '%s.cmds%s.sh'%(opts.job_prefix, len(cmds))
pd.DataFrame(cmds).to_csv(cmd_sh, index=False, header=None)
print('check %s for all the commands!'%cmd_sh)
if not opts.disable_slurm:
put2slurm_dict = vars(opts)
put2slurm(cmds, put2slurm_dict)
def BatchFilterMAF(args):
"""
%prog in_dir
apply FilterMAF on multiple vcf files
"""
p = OptionParser(BatchFilterMAF.__doc__)
p.add_option('--pattern', default='*.vcf',
help="file pattern of vcf files in the 'dir_in'")
p.add_option('--maf_cutoff', default='0.01',
help='maf cutoff, SNPs lower than this cutoff will be removed')
p.add_option('--disable_slurm', default=False, action="store_true",
help='do not convert commands to slurm jobs')
p.add_slurm_opts(job_prefix=BatchFilterMAF.__name__)
opts, args = p.parse_args(args)
if len(args) == 0:
sys.exit(not p.print_help())
in_dir, = args
in_dir_path= Path(in_dir)
vcfs = in_dir_path.glob(opts.pattern)
cmds = []
for vcf in vcfs:
cmd = "python -m schnablelab.SNPcalling.base FilterMAF %s --maf_cutoff %s"%(vcf, opts.maf_cutoff)
cmds.append(cmd)
cmd_sh = '%s.cmds%s.sh'%(opts.job_prefix, len(cmds))
pd.DataFrame(cmds).to_csv(cmd_sh, index=False, header=None)
print('check %s for all the commands!'%cmd_sh)
if not opts.disable_slurm:
put2slurm_dict = vars(opts)
put2slurm(cmds, put2slurm_dict)
def BatchFilterHetero(args):
"""
%prog in_dir
apply FilterMAF on multiple vcf files
"""
p = OptionParser(BatchFilterHetero.__doc__)
p.add_option('--pattern', default='*.vcf',
help="file pattern of vcf files in the 'dir_in'")
p.add_option('--het_cutoff', default='0.1',
help='heterozygous rate cutoff, SNPs higher than this cutoff will be removed')
p.add_option('--disable_slurm', default=False, action="store_true",
help='do not convert commands to slurm jobs')
p.add_slurm_opts(job_prefix=BatchFilterHetero.__name__)
opts, args = p.parse_args(args)
if len(args) == 0:
sys.exit(not p.print_help())
in_dir, = args
in_dir_path= Path(in_dir)
vcfs = in_dir_path.glob(opts.pattern)
cmds = []
for vcf in vcfs:
cmd = "python -m schnablelab.SNPcalling.base FilterHetero %s --het_cutoff %s"%(vcf, opts.het_cutoff)
cmds.append(cmd)
cmd_sh = '%s.cmds%s.sh'%(opts.job_prefix, len(cmds))
| pd.DataFrame(cmds) | pandas.DataFrame |
import pandas as pd
from matplotlib import pyplot as plt
import seaborn as sns
import numpy as np
begge_kjonn_5 = pd.read_csv("begge_kjonn_5.csv")
gutter_5 = | pd.read_csv("gutter_5.csv") | pandas.read_csv |
import pandas as pd
import math
import sqlite3 as sql
def read_tables_info(con):
data = pd.read_sql_query('select * from tables_info',con,index_col='index')
return data
def is_table_exists(cursor,table_name):
cursor.execute('select count(*) from sqlite_master where type="table" and name="'+table_name+'"')
values = cursor.fetchall()
#print(values[0][0])
return values[0][0] == 1
def table_info(cursor,table_name):
cursor.execute('pragma table_info("'+table_name+'")')
values = cursor.fetchall()
print(values)
def read_trade_cal(con):
data = pd.read_sql_query('select * from trade_cal',con,index_col='index')
return data
def read_daily_by_date(con,sdate,edate):
sql_str = 'select * from daily where trade_date >= "'+sdate+'" and trade_date <= "'+edate+'"'
data = pd.read_sql_query(sql_str,con,index_col='index')
return data
def read_fut_daily_by_tscode(con,tscode):
sql_str = 'select * from fut_daily where ts_code = "'+tscode+'"'
data = pd.read_sql_query(sql_str,con,index_col='index')
return data
def read_fut_daily_by_symbol(con,symbol):
sql_str = 'select * from fut_daily where symbol = "'+symbol+'"'
data = pd.read_sql_query(sql_str,con,index_col='index')
return data
def read_fut_holding_by_symbol(con,symbol):
sql_str = 'select * from fut_holding where symbol = "'+symbol+'"'
data = pd.read_sql_query(sql_str,con,index_col='index')
return data
def read_concept_info(con):
sql_str = 'select * from concept_info'
data = pd.read_sql_query(sql_str,con,index_col='index')
return data
def read_concept_detail(con):
sql_str = 'select * from concept_detail'
data = pd.read_sql_query(sql_str,con,index_col='index')
return data
def read_daily_by_tscode(con,tscode):
sql_str = 'select * from daily where ts_code="'+tscode+'"'
data = pd.read_sql_query(sql_str,con)
return data
def read_daily_basic_by_date(con,sdate,edate):
sql_str = 'select * from daily_basic where trade_date >= "'+sdate+'" and trade_date <= "'+edate+'"'
data = pd.read_sql_query(sql_str,con,index_col='index')
return data
def read_margin_detail_by_date(con,sdate,edate):
sql_str = 'select * from margin_detail where trade_date >= "'+sdate+'" and trade_date <= "'+edate+'"'
data = pd.read_sql_query(sql_str,con,index_col='index')
return data
def read_hk_hold_by_date(con,sdate,edate):
sql_str = 'select * from hk_hold where trade_date >= "'+sdate+'" and trade_date <= "'+edate+'"'
data = pd.read_sql_query(sql_str,con,index_col='index')
return data
def read_daily_by_date_and_tscode(con,tscode,sdate,edate):
sql_str = 'select * from daily where ts_code="'+tscode+'" and trade_date >= "'+sdate+'" and trade_date <= "'+edate+'"'
data = pd.read_sql_query(sql_str,con,index_col='trade_date')
return data
def read_daily_basic_by_tscode(con,tscode):
sql_str = 'select * from daily_basic where ts_code="'+tscode+'"'
data = pd.read_sql_query(sql_str,con)
return data
def find_date_need_update(con,sdate,edate):
sql_str='select cal_date from trade_cal where is_open = 1 and cal_date >="'+sdate+'" and cal_date <="'+edate+'" and (cal_date not in (select trade_date from daily) or cal_date not in (select trade_date from daily_basic))'
data = pd.read_sql_query(sql_str,con)
return data
def find_date_need_update_daily(con,sdate,edate):
sql_str='select cal_date from trade_cal where is_open = 1 and cal_date >="'+sdate+'" and cal_date <="'+edate+'" and cal_date not in (select trade_date from daily)'
data = pd.read_sql_query(sql_str,con)
return data
def find_date_need_update_daily_basic(con,sdate,edate):
sql_str='select cal_date from trade_cal where is_open = 1 and cal_date >="'+sdate+'" and cal_date <="'+edate+'" and cal_date not in (select trade_date from daily_basic)'
data = pd.read_sql_query(sql_str,con)
return data
def find_date_need_update_adj_factor(con,sdate,edate):
sql_str='select cal_date from trade_cal where is_open = 1 and cal_date >="'+sdate+'" and cal_date <="'+edate+'" and cal_date not in (select trade_date from adj_factor)'
data = pd.read_sql_query(sql_str,con)
return data
def find_date_need_update_block_trade(con,sdate,edate):
try:
sql_str='select cal_date from trade_cal where is_open = 1 and cal_date >="'+sdate+'" and cal_date <="'+edate+'" and cal_date not in (select trade_date from block_trade)'
data = pd.read_sql_query(sql_str,con)
except Exception as e:
print("ex:"+str(e))
return None
return data
def find_date_need_update_stock_suspend(con,sdate,edate):
try:
sql_str='select cal_date from trade_cal where is_open = 1 and cal_date >="'+sdate+'" and cal_date <="'+edate+'" and cal_date not in (select suspend_date from stock_suspend)'
data = pd.read_sql_query(sql_str,con)
except Exception as e:
print("ex:"+str(e))
return None
return data
def find_date_need_update_longhubang_list(con,sdate,edate):
try:
sql_str='select cal_date from trade_cal where is_open = 1 and cal_date >="'+sdate+'" and cal_date <="'+edate+'" and cal_date not in (select trade_date from longhubang_list)'
data = pd.read_sql_query(sql_str,con)
except Exception as e:
print("ex:"+str(e))
return None
return data
def find_date_need_update_money_flow(con,sdate,edate):
try:
sql_str='select cal_date from trade_cal where is_open = 1 and cal_date >="'+sdate+'" and cal_date <="'+edate+'" and cal_date not in (select trade_date from money_flow)'
data = pd.read_sql_query(sql_str,con)
except Exception as e:
print("ex:"+str(e))
return None
return data
def find_date_need_update_stock_limit_price(con,sdate,edate):
try:
sql_str='select cal_date from trade_cal where is_open = 1 and cal_date >="'+sdate+'" and cal_date <="'+edate+'" and cal_date not in (select trade_date from stock_price_limit)'
data = pd.read_sql_query(sql_str,con)
except Exception as e:
print("ex:"+str(e))
return None
return data
def find_date_need_update_stk_holdernumber(con,sdate,edate):
try:
sql_str='select cal_date from trade_cal where cal_date >="'+'20190101'+'" and cal_date <="'+edate+'" and cal_date not in (select end_date from stk_holder_num)'
data = pd.read_sql_query(sql_str,con)
except Exception as e:
print("ex:"+str(e))
return None
return data
def find_date_need_update_hk_hold(con,sdate,edate):
try:
sql_str='select cal_date from trade_cal where cal_date >="'+'20190101'+'" and cal_date <="'+edate+'" and cal_date not in (select trade_date from hk_hold)'
data = pd.read_sql_query(sql_str,con)
except Exception as e:
print("ex:"+str(e))
return None
return data
def find_date_need_update_fut_daily(con,sdate,edate):
try:
sql_str='select cal_date from trade_cal where is_open = 1 and cal_date >="'+'20190101'+'" and cal_date <="'+edate+'" and cal_date not in (select trade_date from fut_daily)'
data = pd.read_sql_query(sql_str,con)
except Exception as e:
print("ex:"+str(e))
return None
return data
def find_date_need_update_fut_holding(con,sdate,edate):
try:
sql_str='select cal_date from trade_cal where is_open = 1 and cal_date >="'+'20190101'+'" and cal_date <="'+edate+'" and cal_date not in (select trade_date from fut_holding)'
data = pd.read_sql_query(sql_str,con)
except Exception as e:
print("ex:"+str(e))
return None
return data
def find_date_need_update_fut_wsr(con,sdate,edate):
try:
sql_str='select cal_date from trade_cal where is_open = 1 and cal_date >="'+'20190101'+'" and cal_date <="'+edate+'" and cal_date not in (select trade_date from fut_wsr)'
data = pd.read_sql_query(sql_str,con)
except Exception as e:
print("ex:"+str(e))
return None
return data
def find_date_need_update_margin_detail(con,sdate,edate):
try:
sql_str='select cal_date from trade_cal where cal_date >="'+'20190101'+'" and cal_date <="'+edate+'" and cal_date not in (select trade_date from margin_detail)'
data = pd.read_sql_query(sql_str,con)
except Exception as e:
print("ex:"+str(e))
return None
return data
def read_money_flow(con,tscode):
sql_str='select * from money_flow where ts_code="'+tscode+'"'
data = | pd.read_sql_query(sql_str,con) | pandas.read_sql_query |
import argparse
import os
import shutil
import time
from tqdm import tqdm
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.utils import shuffle
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data as data
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import moon_net
import pdb
import bisect
from loader_moon_two import MoonData
import math
from math import ceil
import torch.nn.functional as F
from methods_two import train_sup, train_mt, validate
parser = argparse.ArgumentParser(description='PyTorch Moon Age Estimation Model Testing')
parser.add_argument('--arch', '-a', metavar='ARCH', default='resnet50',
help='model architecture: '+ ' (default: resnet50)')
parser.add_argument('--model', '-m', metavar='MODEL', default='mt',
help='model: '+' (default: baseline)', choices=['baseline', 'mt'])
parser.add_argument('--ntrial', default=5, type=int, help='number of trial')
parser.add_argument('--optim', '-o', metavar='OPTIM', default='adam',
help='optimizer: '+' (default: adam)', choices=['adam'])
parser.add_argument('--dataset', '-d', metavar='DATASET', default='moon_two',
help='dataset: '+' (default: moon_two)')
parser.add_argument('--aug', action='store_true', default=False, help='control data aug or not')
parser.add_argument('-j', '--workers', default=4, type=int, metavar='N',
help='number of data loading workers (default: 4)')
parser.add_argument('--epochs', default=5, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('-b', '--batch-size', default=32, type=int,
metavar='N', help='mini-batch size (default: 225)')
parser.add_argument('--lr', '--learning-rate', default=0.0003, type=float,
metavar='LR', help='initial learning rate')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum')
parser.add_argument('--weight-decay', '--wd', default=1e-4, type=float,
metavar='W', help='weight decay (default: 1e-4)')
parser.add_argument('--weight_l1', '--l1', default=1e-3, type=float,
metavar='W1', help='l1 regularization (default: 1e-3)')
parser.add_argument('--print-freq', '-p', default=50, type=int,
metavar='N', help='print frequency (default: 10)')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('--num_classes',default=5, type=int, help='number of classes in the model')
parser.add_argument('--attr_filters',default=(78, 1024, 4096))
parser.add_argument('--ckpt', default='ckpt', type=str, metavar='PATH',
help='path to save checkpoint (default: ckpt)')
parser.add_argument('--gpu',default=0, type=str, help='cuda_visible_devices')
parser.add_argument('--is_inception', help='is or not inception struction',
default=False, type=bool)
args = parser.parse_args()
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]=str(args.gpu)
def main(trial=1):
global args, best_prec1, best_test_prec1
global acc1_tr, losses_tr
global losses_cl_tr
global acc1_val, losses_val, losses_et_val
global acc1_test, losses_test, losses_et_test
global weights_cl
args = parser.parse_args()
print(args)
attr_range = (0,78)
# Data loading code
# test data
df_test = pd.read_csv('input/train_imgs_gray_CE1_single_size/test_labels_{}_trial{}.csv'.format(256, trial))
test_img_path = 'input/train_imgs_gray_CE1_single_size/train_{}/'.format(256)
test_attr_path = 'input/train_attr_CE1/'
if args.aug:
pred_img_path = 'result_CV_CE1/pred_error_test_img_by_{}_{}_trial{}_aug/'.format(args.arch, args.model, trial)
else:
pred_img_path = 'result_CV_CE1/pred_error_test_img_by_{}_{}_trial{}/'.format(args.arch, args.model, trial)
if not os.path.exists(pred_img_path):
os.makedirs(pred_img_path)
img_path_test = []
attr_path_test = []
y_test = []
for f, tags in tqdm(df_test.values, miniters=100):
img_path = test_img_path + '{}.jpg'.format(f.split('.')[0])
img_path_test.append(img_path)
attr_path = test_attr_path + '{}.csv'.format(f)
attr_path_test.append(attr_path)
y_test.append(tags-1)
img_path_test, attr_path_test, y_test = shuffle(img_path_test, attr_path_test, y_test, random_state = 24)
print('Testing on {} samples\n'.format(len(img_path_test)))
testset = MoonData(img_path_test, attr_path_test, labels=y_test, attr_range=attr_range)
test_loader = data.DataLoader(testset,
batch_size=args.batch_size,
shuffle=False,
num_workers=args.workers,
pin_memory=True)
# create model
if args.arch == 'resnet50':
print("Model: %s" %args.arch)
model = moon_net.resnet_50_two(num_classes=args.num_classes, attr_filters=args.attr_filters)
elif args.arch == 'resnet101':
print("Model: %s" %args.arch)
model = moon_net.resnet_101_two(num_classes=args.num_classes, attr_filters=args.attr_filters)
elif args.arch == 'resnet152':
print("Model: %s" %args.arch)
model = moon_net.resnet_152_two(num_classes=args.num_classes, attr_filters=args.attr_filters)
elif args.arch == 'densenet201':
print("Model: %s" %args.arch)
model = moon_net.densenet_201_two(num_classes=args.num_classes, attr_filters=args.attr_filters)
elif args.arch == 'senet':
print("Model: %s" %args.arch)
model = moon_net.senet_two(num_classes=args.num_classes, attr_filters=args.attr_filters)
elif args.arch == 'se_resnet152':
print("Model: %s" %args.arch)
model = moon_net.se_resnet152_two(num_classes=args.num_classes, attr_filters=args.attr_filters)
elif args.arch == 'se_resnet101':
print("Model: %s" %args.arch)
model = moon_net.se_resnet101_two(num_classes=args.num_classes, attr_filters=args.attr_filters)
elif args.arch == 'se_resnet50':
print("Model: %s" %args.arch)
model = moon_net.se_resnet50_two(num_classes=args.num_classes, attr_filters=args.attr_filters)
elif args.arch == 'se_resnext101':
print("Model: %s" %args.arch)
model = moon_net.se_resnext101_two(num_classes=args.num_classes, attr_filters=args.attr_filters)
elif args.arch == 'polynet':
print("Model: %s" %args.arch)
model = moon_net.polynet_two(num_classes=args.num_classes, attr_filters=args.attr_filters)
elif args.arch == 'inceptionv3':
print("Model: %s" %args.arch)
args.is_inception = False
model = moon_net.inceptionv3_two(num_classes=args.num_classes, attr_filters=args.attr_filters)
elif args.arch == 'dpn68b':
print("Model: %s" %args.arch)
model = moon_net.dpn68b_two(num_classes=args.num_classes, attr_filters=args.attr_filters)
else:
assert(False)
if args.model == 'mt':
import copy
model_teacher = copy.deepcopy(model)
model_teacher = torch.nn.DataParallel(model_teacher).cuda()
model = torch.nn.DataParallel(model).cuda()
#print(model)
if args.aug:
ckpt_dir = args.ckpt+'_'+args.dataset+'_'+args.arch+'_'+args.model+'_aug'+'_'+args.optim
else:
ckpt_dir = args.ckpt+'_'+args.dataset+'_'+args.arch+'_'+args.model+'_'+args.optim
ckpt_dir = ckpt_dir + '_e%d'%(args.epochs)
cudnn.benchmark = True
# deifine loss function (criterion) and optimizer
criterion = nn.CrossEntropyLoss(size_average=False).cuda()
criterion_mse = nn.MSELoss(size_average=False).cuda()
criterion_kl = nn.KLDivLoss(size_average=False).cuda()
criterion_l1 = nn.L1Loss(size_average=False).cuda()
criterions = (criterion, criterion_mse, criterion_kl, criterion_l1)
'''pred test set one by one and save error pred img'''
if args.aug:
checkpoint = torch.load(os.path.join(ckpt_dir, args.arch.lower() + '_aug_trial{}'.format(trial) + '_best.pth.tar'))
prob_error_file = 'prob_error_test_img_{}_aug_trial{}.csv'.format(args.arch.lower(), trial)
prob_test_file = 'prob_test_img_{}_aug_trial{}.csv'.format(args.arch.lower(), trial)
else:
checkpoint = torch.load(os.path.join(ckpt_dir, args.arch.lower() + '_trial{}'.format(trial) + '_best.pth.tar'))
prob_error_file = 'prob_error_test_img_{}_trial{}.csv'.format(args.arch.lower(), trial)
prob_test_file = 'prob_test_img_{}_trial{}.csv'.format(args.arch.lower(), trial)
model.load_state_dict(checkpoint['state_dict'])
prec1_t_test, loss_t_test, pred_test, prob_test = validate(test_loader, model, criterions, args, 'test')
prob_test = np.array(prob_test)
prob_error = []
prob_img_name = []
prob_img_name_test = []
for ii in range(len(y_test)):
target = y_test[ii]
maxk = pred_test[ii]
prob = prob_test[ii]
img_path = img_path_test[ii]
img_path_temp = img_path.split('.')[0]
img_path_temp = img_path_temp.split('/')[-1]
prob_img_name_test.append(img_path_temp)
if target != maxk:
print(maxk, target)
print(img_path)
img_path_error = pred_img_path + img_path_temp + '_' + str(maxk+1) + '.jpg'
shutil.copyfile(img_path, img_path_error)
prob_error.append(prob)
prob_img_name.append(img_path_temp)
prob_error = np.array(prob_error)
prob_dict = {'p'+str(ii+1):prob_error.T[ii] for ii in range(args.num_classes)}
img_name_dict = {'img_name':prob_img_name}
prob_dict.update(img_name_dict)
prob_df = | pd.DataFrame(prob_dict) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Unofficial python API to datashake reviews API
(https://www.datashake.com/review-scraper-api)
This module makes it easier to schedule jobs and fetch the results
Official web API documentation: https://api.datashake.com/#reviews
You need to have datashake API key to use this module
Authors:
<NAME> (<EMAIL>)
"""
import time
import math
import re
import datetime
import json
import requests
import pandas as pd
def _prepare_date(from_date):
"""
Private function to prepare from_date by converting
it to YYYY-MM-DD format.
"""
# check if from_date was provided and if it was provided in the right
# format
from_date_str = None
if from_date is not None:
if not isinstance(from_date, str):
try:
from_date_str = from_date.strftime('%Y-%m-%d')
except AttributeError:
raise ValueError(
f"""from_date must be a string in the format YYYY-MM-DD
or datetime. String provided: {from_date}"
"""
)
else:
# regex template for YYYY-MM-DD
pattern = re.compile("\\d{4}-\\d{2}-\\d{2}")
match = pattern.match(from_date)
if match is None:
raise ValueError(
f"""from_date must be a string in the format YYYY-MM-DD \
or datetime. String provided: {from_date}"
"""
)
from_date_str = from_date[0:10]
return from_date_str
class APIConnectionError(Exception):
"""Exception to handle errors while connecting to API"""
class APIResponseError(Exception):
"""Exception to handle errors received from API"""
class DatashakeReviewAPI:
"""
Class to manage Datashake Review API (https://api.datashake.com/#reviews)
Pratameters
-----------
api_key : str, 40-symbol api key for Datashake Reviews.
Must be obtained on their website
max_requests_per_second : number of requests allowed to be send to
the API service per second.
Introduced to avoid 429 status code (Too Many Requests)
Link to the Datashake doc: https://api.datashake.com/#rate-limiting
language_code : str, default='en'. Language code of the reviews.
allow_response : boolean, default=True
min_days_since_last_crawl : int, default=3 - the number of days
that need to pass since the last crawl to launch another one
"""
def __init__(self, api_key, max_requests_per_second=10,
language_code='en', allow_response=True,
min_days_since_last_crawl=3):
self.api_key = str(api_key)
if len(self.api_key) != 40:
raise ValueError(f"""api_key must be 40 symbols long, \
the key provided was {len(self.api_key)} symbols long"\
""")
self.max_requests_per_second = max_requests_per_second
self.language_code = str(language_code)
self.allow_response = str(allow_response)
self.min_days_since_last_crawl = min_days_since_last_crawl
# setting up hidden attribues
self.__time_counter = 0 # counts in seconds
self.__requests_done = 0
self.reviews_per_page = 500
def __check_load_and_wait(self):
"""
Hidden method to check workload of requests to API
and wait to ensure the number of requests
sent to API stays within the threshold
Attribute max_requests_per_second regulates the behaviour
of this method.
More info here: https://api.datashake.com/#rate-limiting
"""
if self.__time_counter == 0:
self.__time_counter = time.perf_counter()
elif (time.perf_counter() - self.__time_counter) > 1.0:
self.__time_counter = time.perf_counter()
self.__requests_done = 1
elif self.__requests_done < self.max_requests_per_second:
self.__requests_done += 1
else:
wait_secs = 1.0 - (time.perf_counter() - self.__time_counter) + 0.1
print(f'API overload risk, waiting for {wait_secs} seconds')
time.sleep(wait_secs)
self.__requests_done = 1
self.__time_counter = time.perf_counter()
def get_job_status(self, job_id):
"""
Returns the status of the scheduled review job
Parameters
----------
job_id : str, identificator of the scheduled job
Returns
-------
Dictionary with the job status results. Example:
{'success': True,
'status': 200,
'job_id': 278171040,
'source_url': 'https://uk.trustpilot.com/review/uk.iqos.com',
'source_name': 'trustpilot',
'place_id': None,
'external_identifier': None,
'meta_data': None,
'unique_id': None,
'review_count': 3400,
'average_rating': 4.5,
'last_crawl': '2021-09-28',
'crawl_status': 'complete',
'percentage_complete': 100,
'result_count': 3401,
'credits_used': 3409,
'from_date': '2017-01-01',
'blocks': None}
"""
url = "https://app.datashake.com/api/v2/profiles/info"
querystring = {"job_id": str(job_id)}
headers = {
'spiderman-token': self.api_key,
}
self.__check_load_and_wait()
response = requests.request("GET", url, headers=headers,
params=querystring)
if response.ok is False:
error_str = 'API Connection Error. '
error_str += f"""Error code: {response.status_code} - \
{response.reason}. URL: {url}"""
raise APIConnectionError(error_str)
if response.json()['success'] is False:
error_str = 'API Response Error. '
error_str += f"{response.text}. Job ID: {job_id}. URL: {url}"
raise APIResponseError(error_str)
return response.json()
def get_job_reviews(self, job_id, from_date=None):
"""
Return job status and reviews scraped within the sepcified job if
job is finished.
If gob is not finished, the reviews results will be empty
Parameters
----------
job_id : str, identificator of the job_id that was scheduled to
scrape the reviews.
from_date : str or datetime, optional. If not provided, all reviews
will be queried.
If from date was provided while scheduling the job you can't get
any reviews before that date with this method.
Returns
-------
tuple containing:
dictionary with the job_status from the API
pandas Dataframe with reviews
"""
from_date_str = _prepare_date(from_date)
df_reviews = pd.DataFrame()
# Chekc the job status
job_status = self.get_job_status(job_id)
if not (job_status['success'] and
job_status['crawl_status'] == 'complete' and
job_status['review_count'] > 0):
# early exit
return (job_status, df_reviews)
# job complete, let's fetch all the results
review_count = job_status['review_count']
pages_count = math.trunc((review_count - 1) /
self.reviews_per_page) + 1
for page_num in range(1, pages_count + 2):
url = "https://app.datashake.com/api/v2/profiles/reviews"
querystring = {"job_id": str(job_id),
"language_code": self.language_code,
"page": str(page_num),
"per_page": self.reviews_per_page,
"allow_response": str(self.allow_response)
}
if from_date_str is not None:
querystring['from_date'] = from_date_str
headers = {
'spiderman-token': self.api_key,
}
self.__check_load_and_wait()
response = requests.request("GET", url, headers=headers,
params=querystring)
if response.ok is False:
error_str = 'API Connection Error. '
error_str += f"Error code: {response.status_code} - \
{response.reason}. URL: {url}"
raise APIConnectionError(error_str)
df = pd.DataFrame(json.loads(response.text))
df = df[['job_id', 'source_name', 'reviews']]
if len(df.index) == 0:
break
df = df.join(df['reviews'].apply(pd.Series), how='inner')
df.drop('reviews', axis=1, inplace=True)
df_reviews = df_reviews.append(df)
if df_reviews.index.size > 0:
df_reviews.set_index('unique_id', inplace=True)
return (job_status, df_reviews)
def schedule_job(self, review_url, from_date=None, previous_job_id=None):
"""
Schedules a new job to get reviews from the url provided.
Parameters
----------
review_url : str, url to the page with reveiws
from_date : str in format YYYY-MM-DD or datetime,
the start dat of the reviews to be collected. Defaults to None.
previous_job_id : str, id of the previous job that for this url.
Helps to scrape only delta ans save credits.
Returns
-------
Dictionary with the results of the call. Example:
{"success":true,"job_id":278171040,"status":200,
"message":"Added this profile to the queue..."
}
"""
from_date_str = _prepare_date(from_date)
# prepare the parameteres for the POST request
url = "https://app.datashake.com/api/v2/profiles/add"
querystring = {"url": review_url}
if from_date_str is not None:
querystring['from_date'] = from_date_str
if previous_job_id is not None:
querystring['diff'] = str(previous_job_id)
headers = {
'spiderman-token': self.api_key,
}
# POST request
self.__check_load_and_wait()
response = requests.request("POST", url, headers=headers,
params=querystring)
if response.ok is False:
error_str = 'API Connection Error. '
error_str += f"Error code: {response.status_code} - \
{response.reason}. URL: {url}"
raise APIConnectionError(error_str)
print(response.json())
return response.json()
def schedule_job_list(self, df_jobs_input):
"""
Schedule or refresh a list of jobs based on the csv file.
Save the results to the same file.
Parameters
----------
df_jobs_input :pandas.DataFrame with the list of jobs
to schedule/reschedule.
Returns
-------
Dataframe with the dataframe after update
"""
df_jobs = df_jobs_input.copy()
df_jobs.dropna(axis=0, how='any', subset=['url'], inplace=True)
for i in df_jobs.index:
# skip if not enough days passed since last crawl
if | pd.isnull(df_jobs.loc[i, 'status']) | pandas.isnull |
""" This module takes care of getting the data, wherever it may be."""
from pandas_datareader import data
import datetime as dt
import pandas as pd
import numpy as np
from time import time
import logging
log = logging.getLogger(__name__)
logging.basicConfig(level=logging.WARN)
DATA_SOURCE = 'google'
SPY_CREATION_DATE = dt.datetime(1993, 1, 22)
START_DATE = SPY_CREATION_DATE
END_DATE = dt.datetime(2017, 1, 1)
SP500_TICKER = 'SPY'
DATE_COL = 'date'
FEATURE_COL = 'feature'
SP500_LIST_PATH = '../data/sp500_list.csv'
DATES_BATCH_DAYS = 3650
PATH_TO_PICKLE = '../data/data_df.pkl'
def download_capstone_data():
""" Gets the data for the capstone project and saves it in pickle format."""
try:
data_df = | pd.read_pickle(PATH_TO_PICKLE) | pandas.read_pickle |
# -*- coding: utf-8 -*-
"""
modules for universal fetcher that gives historical daily data and realtime data
for almost everything in the market
"""
import requests
import time
import datetime as dt
import pandas as pd
from bs4 import BeautifulSoup
from functools import wraps
from xalpha.info import fundinfo, mfundinfo
from xalpha.cons import connection_errors
def rget(*args, **kws):
tries = 5
for count in range(tries):
try:
r = requests.get(*args, **kws)
return r
except connection_errors as e:
if count == tries - 1:
print(*args, sep="\n")
raise e
time.sleep(1)
def rpost(*args, **kws):
tries = 5
for count in range(tries):
try:
r = requests.post(*args, **kws)
return r
except connection_errors as e:
if count == tries - 1:
print(*args, sep="\n")
raise e
time.sleep(1)
def today_obj():
now = dt.datetime.today()
return now.replace(hour=0, minute=0, second=0, microsecond=0)
def tomorrow_ts():
dto = dt.datetime.now() + dt.timedelta(1)
return dto.timestamp()
def get_token():
r = rget("https://xueqiu.com", headers={"user-agent": "Mozilla"})
return r.cookies["xq_a_token"]
def get_history(
code, prefix="SH", count=365, token="<KEY>"
):
url = "https://stock.xueqiu.com/v5/stock/chart/kline.json?symbol={prefix}{code}&begin={tomorrow}&period=day&type=before&count=-{count}"
data = rget(
url.format(
code=code, prefix=prefix, tomorrow=int(tomorrow_ts() * 1000), count=count
),
cookies={"xq_a_token": token},
headers={"user-agent": "Mozilla/5.0"},
)
return data.json()
def ts2pdts(ts):
tz_bj = dt.timezone(dt.timedelta(hours=8))
dto = dt.datetime.fromtimestamp(ts / 1000, tz=tz_bj).replace(tzinfo=None)
return dto.replace(
hour=0, minute=0, second=0, microsecond=0
) # 雪球美股数据时间戳是美国0点,按北京时区换回时间后,把时分秒扔掉就重合了
def get_xueqiu(code, count):
r = get_history(code=code, prefix="", count=count, token=get_token())
df = pd.DataFrame(data=r["data"]["item"], columns=r["data"]["column"])
df["date"] = (df["timestamp"]).apply(ts2pdts) # reset hours to zero
return df
def get_cninvesting(curr_id, st_date, end_date):
r = rpost(
"https://cn.investing.com/instruments/HistoricalDataAjax",
data={
"curr_id": curr_id,
# "smlID": smlID, # ? but seems to be fixed with curr_id, it turns out it doesn't matter
"st_date": st_date,
"end_date": end_date,
"interval_sec": "Daily",
"sort_col": "date",
"sort_ord": "DESC",
"action": "historical_data",
},
headers={
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_4)\
AppleWebKit/537.36 (KHTML, like Gecko)",
"Host": "cn.investing.com",
"X-Requested-With": "XMLHttpRequest",
},
)
s = BeautifulSoup(r.text, "lxml")
dfdict = {}
cols = []
for col in s.find_all("th"):
dfdict[str(col.contents[0])] = []
cols.append(str(col.contents[0]))
num_cols = len(cols)
for i, td in enumerate(s.find_all("td")[:-5]):
if cols[i % num_cols] == "日期":
dfdict[cols[i % num_cols]].append(
dt.datetime.strptime(str(td.string), "%Y年%m月%d日")
)
else:
dfdict[cols[i % num_cols]].append(str(td.string))
return pd.DataFrame(dfdict)
def prettify(df):
_map = {
"日期": "date",
"收盘": "close",
"开盘": "open",
"高": "high",
"低": "low",
"涨跌幅": "percent",
}
df.rename(_map, axis=1, inplace=True)
if len(df) > 1 and df.iloc[1]["date"] < df.iloc[0]["date"]:
df = df[::-1]
df = df[["date", "open", "close", "high", "low", "percent"]]
for k in ["open", "close", "high", "low"]:
df[k] = df[k].apply(_float)
return df
def dstr2dobj(dstr):
if len(dstr.split("/")) > 1:
d_obj = dt.datetime.strptime(dstr, "%Y/%m/%d")
elif len(dstr.split(".")) > 1:
d_obj = dt.datetime.strptime(dstr, "%Y.%m.%d")
elif len(dstr.split("-")) > 1:
d_obj = dt.datetime.strptime(dstr, "%Y-%m-%d")
else:
d_obj = dt.datetime.strptime(dstr, "%Y%m%d")
return d_obj
def get_investing_id(suburl):
url = "https://cn.investing.com"
if not suburl.startswith("/"):
url += "/"
url += suburl
r = rget(
url,
headers={
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_4) AppleWebKit/537.36"
},
)
s = BeautifulSoup(r.text, "lxml")
pid = s.find("span", id="last_last")["class"][-1].split("-")[1]
return pid
def get_rmb(start=None, end=None, prev=360, currency="USD/CNY"):
"""
获取人民币汇率中间价
:param start:
:param end:
:param prev:
:param currency:
:return: pd.DataFrame
"""
url = "http://www.chinamoney.com.cn/ags/ms/cm-u-bk-ccpr/CcprHisNew?startDate={start_str}&endDate={end_str}¤cy={currency}&pageNum=1&pageSize=300"
if not end:
end_obj = today_obj()
else:
end_obj = dstr2dobj(end)
if not start:
start_obj = end_obj - dt.timedelta(prev)
else:
start_obj = dstr2dobj(start)
start_str = start_obj.strftime("%Y-%m-%d")
end_str = end_obj.strftime("%Y-%m-%d")
count = (end_obj - start_obj).days + 1
rl = []
if count <= 360:
r = rpost(url.format(start_str=start_str, end_str=end_str, currency=currency))
rl.extend(r.json()["records"])
else: # data more than 1 year cannot be fetched once due to API limitation
sepo_obj = end_obj
sepn_obj = sepo_obj - dt.timedelta(360)
# sep0_obj = end_obj - dt.timedelta(361)
while sepn_obj > start_obj: # [sepn sepo]
r = rpost(
url.format(
start_str=sepn_obj.strftime("%Y-%m-%d"),
end_str=sepo_obj.strftime("%Y-%m-%d"),
currency=currency,
)
)
rl.extend(r.json()["records"])
sepo_obj = sepn_obj - dt.timedelta(1)
sepn_obj = sepo_obj - dt.timedelta(360)
r = rpost(
url.format(
start_str=start_obj.strftime("%Y-%m-%d"),
end_str=sepo_obj.strftime("%Y-%m-%d"),
currency=currency,
)
)
rl.extend(r.json()["records"])
data = {"date": [], "close": []}
for d in rl:
data["date"].append( | pd.Timestamp(d["date"]) | pandas.Timestamp |
import pandas as pd
import numpy as np
import random
import plotly.express as px
import pickle
class ShapeEstimator:
def __init__(self, connections_file_name, duplicate_data=False, point_details_file_name=None, color_definitions_file=None, optimized_points_file=None):
self.data = self.load_data(connections_file_name)
self.duplicate_data = duplicate_data
if self.duplicate_data:
self.data = self.data_duplicator()
self.point_details_data = None
self.color_dictionary = {}
self.color_definition_column = None
if point_details_file_name is not None:
self.point_details_data = self.load_data(point_details_file_name)
if color_definitions_file is not None:
self.color_definitions_data = self.load_data(color_definitions_file)
self.color_definition_column = self.color_definitions_data.columns[0]
self.color_dictionary = self.create_color_dictionary()
if optimized_points_file is not None:
self.load_data_from_optimization_file(optimized_points_file)
self.unique_points = self.get_all_unique_points_codes()
self.connections_count = self.points_connections_counter()
self.best_point_hub_name = self.connections_count.head(1)['point_name'].item()
if optimized_points_file is None:
self.points_dictionary = self.create_points_dictionary()
self.optimize()
self.values, self.labels = self.split_points_dictionary()
self.normalize_values()
self.data_frame = self.create_dataframe_from_points_dictionary()
self.calculate_average_points_distance_from_the_center()
def load_data_from_optimization_file(self, optimized_points_file):
pickle_in = open(optimized_points_file, 'rb')
self.points_dictionary, cumulative_errors, max_errors, self.duplicate_data = pickle.load(pickle_in)
if self.duplicate_data:
self.data = self.data_duplicator()
self.show_optimization_stats(cumulative_errors, max_errors)
def data_duplicator(self):
return self.data.append(self.data, ignore_index=True)
@staticmethod
def load_data(file_name):
return pd.read_csv(file_name, sep=',')
def get_all_unique_points_codes(self):
return pd.unique(np.array(self.data[['departure_point', 'arrival_point']]).flatten())
def create_points_dictionary(self):
points_dictionary = {}
for point_name in self.unique_points:
points_dictionary[point_name] = [(random.random() - 0.5) * 2, (random.random() - 0.5) * 2, (random.random() - 0.5) * 2]
return points_dictionary
def create_color_dictionary(self):
def get_values_from_columns(index):
return [self.color_definitions_data.iloc[i, index] for i in range(len(self.color_definitions_data.iloc[:, index]))]
keys = get_values_from_columns(0)
values = get_values_from_columns(1)
return dict(zip(keys, values))
def calculate_errors(self):
cumulative_error = 0
max_error = 0
for index, row in self.data.iterrows():
error = abs(self.calculate_euclidean_distance_between_two_points(self.points_dictionary[row['departure_point']], self.points_dictionary[row['arrival_point']]) - row['measurement_value'])
cumulative_error += error
if max_error < error:
max_error = error
return cumulative_error, max_error
def points_connections_counter(self):
connections_dictionary = dict(zip(self.unique_points, [0] * len(self.unique_points)))
for index, row in self.data.iterrows():
points = [row['departure_point'], row['arrival_point']]
for point in points:
connections_dictionary[point] += 1
connections_count = | pd.DataFrame(columns=['point_name', 'count']) | pandas.DataFrame |
import pandas as pd
import utils as sru
import defs
import projections as fp
from datetime import datetime
import os
def run_rr_simulation(sewer_df, annual_replacements, startdate, results_dir = None,
return_snapshot=False):
sewers = sewer_df[:]
#prep data: assume that sewers with 9999 or UNK install year installed at 1900
#sewers['Year'] = sewers.Year.fillna(1900)
sewers.loc[pd.isnull(sewers.Year), 'Year'] = 1900
sewers.loc[sewers.Year > 9000, 'Year'] = 1900
#calculate the remaining useful years for each assets
sewers['RemainingLife'] = sewers.apply(lambda x: remaining_life_span(x, startdate), axis=1)
#sewers['Year'] = sewers.Year.replace( [9952., 9973., 9974., 9983., 9999.], 1900)
#create Dataframe to hold result for each year
res_columns = ['Year', 'AvgAge', 'AvgRemainingLife', 'MinRemainingLife',
'75thPercRemLife', '25thPercRemLife', 'AvgAgeOfReplaced',
'CumulativeMiles']
results_df = pd.DataFrame(columns=res_columns, data=None)
if results_dir:
xlpath = os.path.join(results_dir, '{}_{}.xlsx'.format(annual_replacements[0], startdate))
excelwriter = pd.ExcelWriter(xlpath)
#save the initial data set to the first sheet
sewers.sort_values('RemainingLife').to_excel(excelwriter, 'existing_assets')
#snapshots = {}
date = startdate
cumulative_miles = 0.0
for miles in annual_replacements:
if results_dir:
#save a snapshot of the data if a results_dir is provided
# fname = os.path.join(results_dir, '{}.csv'.format(date))
# sewers.to_csv(fname)
sheetname = '{}'.format(date)
sewers.sort_values('RemainingLife').to_excel(excelwriter, sheetname)
#measure this years stats before making improvements
avg_age = sru.average_sewer_age(sewers, datetime(date,1,1))
min_rem_life = sewers.RemainingLife.min()
percentile75 = sewers.RemainingLife.quantile(0.75)
percentile25 = sewers.RemainingLife.quantile(0.25)
#length weighted avg remaining useful life
avg_rem_life = (sewers.RemainingLife * sewers.Length).sum() / sewers.Length.sum()
#find candidates, and replace
repl = find_replacement_candidates(sewers, miles, date)
sewers = apply_replacements(sewers, repl, date)
#snapshots.update({date:repl}) #hold on to snapshots of each year
avg_age_replaced = sru.average_sewer_age(repl, datetime(date,1,1))
oldestreplaced = repl.Year.min()
#compute and record this year's results
res = [date, avg_age, avg_rem_life, min_rem_life, percentile75, percentile25, avg_age_replaced, cumulative_miles]
results_df = results_df.append(pd.DataFrame(columns = res_columns, data = [res]))
#increment the year that is currently being modeled and age each sewer segment
date += 1
sewers['RemainingLife'] = sewers['RemainingLife'] - 1
cumulative_miles += repl.Length.sum() / 5280.0
#compute the rate of aging (of the weighted average age of whole system)
results_df['AgeRate'] = results_df['AvgAge'].diff()
if results_dir:
excelwriter.save()
if return_snapshot:
return sewers
else:
return results_df.set_index('Year')
def remaining_life_span(asset, replacement_year):
lifespan = defs.lifespans.get(asset.Material, 150) #assume 150 if not found
age = replacement_year - asset.Year
remaininglife = lifespan - age
if not | pd.isnull(asset.LinerDate) | pandas.isnull |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
test = pd.read_csv("test.csv")
train = pd.read_csv("train.csv")
from sklearn.ensemble import RandomForestClassifier
modelo = RandomForestClassifier(n_estimators=100, n_jobs=-1, random_state=0)
variaveis = ['Sex_binario','Age']
def change_str(value):
if value == 'female':
return 1
else:
return 0
train['Sex_binario'] = train['Sex'].map(change_str)
# ntrain = train.loc[:,variaveis].dropna()
X = train[variaveis].fillna(-1)
y = train['Survived']
modelo.fit(X,y)
# conjunto de test
test['Sex_binario'] = test['Sex'].map(change_str)
X_prev = test[variaveis].fillna(-1)
p = modelo.predict(X_prev)
surv = | pd.Series(p, index=test['PassengerId'], name='Survived') | pandas.Series |
"""
@author: Tim_Janke
"""
import numpy as np
import pandas as pd
#from helpers import emp_cdf
from models.igc import ImplicitGenerativeCopula, GMMNCopula, GenerativeMomentMatchingNetwork, GenerativeAdversarialNetwork
from models.mv_copulas import GaussianCopula
import statsmodels.api as sm
from sklearn.neighbors import KernelDensity
from sklearn.model_selection import GridSearchCV
from models.utils import cdf_interpolator
import os
import subprocess
import pyvinecopulib as pv
import copy
from statsmodels.distributions.empirical_distribution import ECDF as fit_ecdf
def run_experiment(data_train,
data_test,
margins_model="ECDF", # margins model for copula based methods
evaluate_cdf=False, # evaluate copula by ISE and IAE
evaluate_ed_cop=False, # evaluate copula distribution via energy distance
evaluate_ed_data=False, # evaluate data distribution via energy distance
evaluate_likelihood = False, # evaluate data distribution via KDE LogLik
n_eval = int(1e5), # number of points to generate for evaluation
IndepCop=False, GaussCop=False, VineCop=False, GMMNCop=False, GMMNFull=False, IGC=True, GAN=False, # models to use (GMMNFull and GAN operate on data distribution)
options_nn={"n_neurons": 100, "n_layers": 2, "n_samples_train": 200}, # options for NN architecture (for GMMN and IGC)
options_nn_training={"epochs": 500, "batch_size": 100}, # options for NN architecture (for GMMN and IGC)
options_gan={"n_neurons": 100, "n_layers": 2}, # options for GAN architecture
options_gan_training={"epochs": 500, "batch_size": 100}, # options for GAN architecture
bw_kde = None, # bandwidth for KDE
fit_margins_on_train_and_test=False # if True margins will be fit an compete data set, otherwise only traing data
):
""" Evaluate models for given test and training data set."""
print("Training models ...")
if fit_margins_on_train_and_test:
data_test_margins= data_test
else:
data_test_margins = None
models_joint, models_margins = fit_models(data_train=data_train,
data_test=data_test_margins, # only used for fitting marginal models if not None
margins_model=margins_model,
IndepCop=IndepCop,
GaussCop=GaussCop,
VineCop=VineCop,
GMMNCop=GMMNCop,
GMMNFull=GMMNFull,
GAN=GAN,
IGC=IGC,
options_nn=options_nn,
options_nn_training=options_nn_training,
options_gan=options_gan,
options_gan_training=options_gan_training)
print("Done.\n")
print("Sampling data for evalutation...")
data_models_v, data_models_y = make_data_eval(models_joint, models_margins, n=n_eval)
print("Done.\n")
print("Computing evaluation metrics...")
all_scores = pd.DataFrame(index=models_joint.keys())
if evaluate_cdf:
print("ISE and IAE...")
pobs_test = []
for i in range(data_test.shape[1]):
pobs_test.append(models_margins[i].cdf(data_test[:,i]))
pobs_test = np.column_stack(pobs_test)
cdf_vals_test = emp_cdf(pobs_test,pobs_test)
cdf_vals_models = {}
for key_i, v_i in data_models_v.items():
cdf_vals_models[key_i] = emp_cdf(v=pobs_test, u=v_i)
ise, iae = eval_copula_cdf(cdf_vals_models, cdf_vals_test)
all_scores["ise"] = pd.Series(ise)
all_scores["iae"] = pd.Series(iae)
if evaluate_ed_cop:
print("ED unit space...")
ed = eval_energy_distance(data_models_v, data_test, standardize=False)
all_scores["ED_unitspace"] = pd.Series(ed)
if evaluate_ed_data:
print("ED data space...")
ed = eval_energy_distance(data_models_y, data_test, standardize=True)
all_scores["ED_dataspace"] = pd.Series(ed)
if evaluate_likelihood:
print("LogLikelihood data space...")
ll = eval_likelihood(data_models_y, data_test, bw=bw_kde)
all_scores["NLL_dataspace"] = pd.Series(ll)
print("All done.\n")
return all_scores, data_models_v, data_models_y, models_joint, models_margins
class IdentityMargins(object):
def __init__(self):
pass
def fit(self):
pass
def cdf(self, x):
return x
def icdf(self, tau):
return self.ppf(tau)
def ppf(self, tau):
return tau
def fit_models(data_train, data_test=None, margins_model="ECDF", IndepCop=False, GaussCop=False, VineCop=True, GMMNCop=True, GMMNFull=False, IGC=True, GAN=False,
options_nn={"n_neurons": 100, "n_layers": 2, "n_samples_train": 200},
options_nn_training={"epochs": 500, "batch_size": 100},
options_gan={"n_neurons": 50, "n_layers": 1},
options_gan_training={"epochs": 100, "batch_size": 100},
):
""" Trains models for given data set"""
models_margins=[]
pobs_train=[]
for i in range(data_train.shape[1]):
if data_test is not None:
data_train_i = np.concatenate((data_train[:,i], data_test[:,i]), axis=0)
else:
data_train_i = data_train[:,i]
if margins_model == "identity":
# if margins are assumed to be uniform already
mdl_i = IdentityMargins()
elif margins_model == "ECDF":
#linear interpolation of empirical marginal CDFs
mdl_i = cdf_interpolator(data_train_i,
kind="linear",
x_min=np.min(data_train_i)-np.diff(np.sort(data_train_i)[0:2])[0],
x_max=np.max(data_train_i)+np.diff(np.sort(data_train_i)[-2:])[0])
# elif margins_model == "KDE":
# mdl_i = sm.nonparametric.KDEUnivariate(data_train_i)
# mdl_i.fit()
else:
raise ValueError("Unknown model type for margins.")
models_margins.append(mdl_i)
pobs_train.append(mdl_i.cdf(data_train[:,i]))
pobs_train = np.column_stack(pobs_train)
models_joint = {}
# Independence Copula
if IndepCop:
models_joint["indep"] = None
# Gaussian Copula
if GaussCop:
cop_gauss = GaussianCopula()
cop_gauss.fit(pobs_train)
models_joint["gauss"] = cop_gauss
# Vine Copula (TLL2)
if VineCop:
controls_tll = pv.FitControlsVinecop(family_set=[pv.BicopFamily.tll])
cop_tll = pv.Vinecop(data=pobs_train, controls=controls_tll)
models_joint["vine_tll2"] = cop_tll
pobs_train_nn = pobs_train[0:int(np.floor(len(pobs_train)/options_nn_training["batch_size"])*options_nn_training["batch_size"])] #make training data dividebale by 100
# IGC model
if IGC:
cop_igc = ImplicitGenerativeCopula(dim_latent=pobs_train.shape[1]*3, dim_out=pobs_train.shape[1], **options_nn)
cop_igc.fit(pobs_train_nn, **options_nn_training)
models_joint["igc"] = cop_igc
# GMMN copula
if GMMNCop:
cop_gmmn = GMMNCopula(dim_latent=pobs_train.shape[1]*3, dim_out=pobs_train.shape[1], **options_nn)
cop_gmmn.fit(pobs_train_nn, **options_nn_training)
models_joint["gmmn_cop"] = cop_gmmn
# GMMN with ED loss (models both margins and joint at once)
if GMMNFull:
gmmn = GenerativeMomentMatchingNetwork(dim_latent=pobs_train.shape[1]*3, dim_out=pobs_train.shape[1], **options_nn)
gmmn.fit(data_train, **options_nn_training)
models_joint["gmmn_full"] = gmmn
# GAN (models both margins and joint at once)
if GAN:
gan = GenerativeAdversarialNetwork(dim_latent=pobs_train.shape[1]*3, dim_out=pobs_train.shape[1], **options_gan)
gan.fit(data_train, **options_gan_training)
models_joint["gan"] = gan
return models_joint, models_margins
def make_data_eval(models_joint, models_margins, n=int(1e5)):
data_v = {}
data_y = {}
# generate samples in unit space
for key_i, mdl_i in models_joint.items():
if key_i == "gmmn_cop":
data_v[key_i] = mdl_i.simulate(n)
elif key_i == "gmmn_full":
data_v[key_i] = None
elif key_i == "gan":
data_v[key_i] = None
elif key_i == "indep":
data_v[key_i] = np.random.uniform(0.0,1.0,size=(n, len(models_margins)))
else:
data_v[key_i] = mdl_i.simulate(n)
# obtain samples in data space by transforming samples componentwise via the inverse cdf
for key_i, v_i in data_v.items():
if key_i == "gmmn_full":
data_y[key_i] = models_joint["gmmn_full"].simulate(n)
data_v[key_i] = models_joint["gmmn_full"]._to_pobs(data_y[key_i])
elif key_i == "gan":
data_y[key_i] = models_joint["gan"].simulate(n)
data_v[key_i] = models_joint["gan"]._to_pobs(data_y[key_i])
else:
y=[]
for j, mdl_j in enumerate(models_margins):
y.append(mdl_j.icdf(v_i[:,j]))
data_y[key_i] = np.column_stack(y)
return data_v, data_y
def eval_copula_cdf(cdf_vals_models, cdf_vals_test):
# compute ISE and IAE from cdf values
ise = {}
iae = {}
for key_i, cdf_i in cdf_vals_models.items():
eps = cdf_vals_test-cdf_i
iae[key_i] = np.sum(np.abs(eps))
ise[key_i] = np.sum(np.square(eps))
return ise, iae
def eval_likelihood(data_models, data_test, bw, n_eval=10000):
# evaluate likelihood of test data under KDE based likelihood from trained models
nll={}
if bw is None:
grid_cv = GridSearchCV(KernelDensity(), param_grid={"bandwidth": np.logspace(-1.0,1.0,10)}) # use CV to find best bandwidth on the test data
grid_cv.fit(data_test)
bw_opt = grid_cv.best_params_["bandwidth"]
print(bw_opt)
elif isinstance(bw, (list, tuple, np.ndarray)):
grid_cv = GridSearchCV(KernelDensity(), param_grid={"bandwidth": bw}) # use CV to find best bandwidth on the test data
grid_cv.fit(data_test)
bw_opt = grid_cv.best_params_["bandwidth"]
print(bw_opt)
elif isinstance(bw, float):
bw_opt = bw
for key_i, y_i in data_models.items():
kde_model = KernelDensity(bandwidth=bw_opt).fit(y_i)
nll[key_i] = -np.mean(kde_model.score_samples(data_test[0:n_eval])) # compute likelihood of test data under KDE
return nll
def eval_energy_distance(data_models, data_test, standardize=False, n_eval=int(5e3)):
if standardize:
means = np.expand_dims(np.mean(data_test, axis=0),0)
stds = np.expand_dims(np.std(data_test, axis=0),0)
else:
means = np.zeros((1, data_test.shape[1]))
stds = np.ones((1, data_test.shape[1]))
ed_df={}
for key_i, y_i in data_models.items():
ed_df[key_i] = energy_distance(X=(data_test[0:n_eval,:]-means)/stds, Y=(y_i[0:n_eval,:]-means)/stds)
return ed_df
def energy_distance(X,Y):
# X,Y are shape NxD with N samples and D dimensions
n_x = X.shape[0]
n_y = Y.shape[0]
X = np.expand_dims(X.T,0) # (N_x,D) --> (1,D,N_x)
Y = np.expand_dims(Y.T,0) # (N_y,D) --> (1,D,N_y)
ed_xx = np.sum(np.sqrt(np.sum(np.square(X - np.repeat(np.transpose(X, axes=(2,1,0)), repeats=n_x, axis=2)), axis=1)))
ed_yy = np.sum(np.sqrt(np.sum(np.square(Y - np.repeat(np.transpose(Y, axes=(2,1,0)), repeats=n_y, axis=2)), axis=1)))
ed_xy = np.sum(np.sqrt(np.sum(np.square(Y - np.repeat(np.transpose(X, axes=(2,1,0)), repeats=n_y, axis=2)), axis=1)))
return 2*ed_xy/(n_x*n_y) - ed_yy/(n_y*(n_y-1)) - ed_xx/(n_y*(n_y-1))
###### functions for generating random copulas and vines ####
def random_bicop(family):
""" sample a copula with random parameters for given family """
if family == "gaussian":
return {"family": family, "rotation": 0, "parameters": np.random.choice((-1,1))*np.random.uniform(0.5,0.95,1)}
elif family == "student":
return {"family": family, "rotation": 0, "parameters": np.array([np.random.choice((-1,1))*np.random.uniform(0.5,0.95), np.random.uniform(2.0, 10.0)])}
elif any(x==family for x in ["clayton", "gumbel", "frank", "joe"]):
if family == "frank":
return {"family": family, "rotation": 0, "parameters": np.random.uniform(10.0, 25.0, 1)}
else:
return {"family": family, "rotation": np.random.choice((0, 90, 180, 270)), "parameters": np.random.uniform(2.0,10.0,1)}
elif family == "bb1":
return {"family": family, "rotation": np.random.choice((0, 90, 180, 270)), "parameters": np.array([np.random.uniform(1.0,5.0), np.random.uniform(1.0, 5.0)])}
elif family=="bb7":
return {"family": family, "rotation": np.random.choice((0, 90, 180, 270)), "parameters": np.array([np.random.uniform(1.0,6.0), np.random.uniform(2.0, 20.0)])}
elif family == "indep":
return {"family": family}
else:
raise ValueError("Unknown copula family.")
def random_tree(dim, families):
""" create a tree with random families and random parameters"""
trees = []
for d_i in range(dim-1):
tree_i=[]
for j in range(dim-d_i-1):
tree_i.append(random_bicop(np.random.choice(families)))
trees.append(tree_i)
return trees
def make_random_vinecopula(dim=3, families=["gaussian", "student", "clayton", "gumbel", "frank", "joe", "bb1", "bb7", "indep"]):
""" creates a dictionary with info for a random vine copula """
vine_cop = {}
#vine_cop["vine_class"] = "rvine"
vine_cop["structure"] = pv.RVineStructure.simulate(dim)
vine_cop["pair_copulas"] = get_pvtrees(random_tree(dim, families))
#vine_cop["d"] = dim
return vine_cop
def get_pvcopfamily(family):
""" maps strings to pyvinecopulib bivaraite families """
if family == "gaussian":
return pv.BicopFamily.gaussian
elif family == "student":
return pv.BicopFamily.student
elif family == "clayton":
return pv.BicopFamily.clayton
elif family == "gumbel":
return pv.BicopFamily.gumbel
elif family == "frank":
return pv.BicopFamily.frank
elif family == "joe":
return pv.BicopFamily.joe
elif family == "bb1":
return pv.BicopFamily.bb1
elif family == "bb7":
return pv.BicopFamily.bb7
elif family == "indep":
return pv.BicopFamily.indep
else:
raise ValueError("Unknown copula family.")
def get_pvtrees(trees):
""" creates pyvinecopulib tree list """
_trees = copy.deepcopy(trees)
tree_list = []
for tree_i in _trees:
cops = []
for cop_j in tree_i:
cop_j["family"] = get_pvcopfamily(cop_j["family"])
cops.append(pv.Bicop(**cop_j))
tree_list.append(cops)
return tree_list
### more helper functions ###
def emp_cdf(v, u):
""" evaluate the empirical copula at points v using the samples u"""
# cdf is evaluated at points v, v has to be a MxD vector in [0,1]^D, cdf is evaluated at these points
# u are samples from model NxD vector in [0,1]^D, u should be very large
# larger u will lead to better estimation of the empirical copula but slows down computation
cdf_vals = np.empty(shape=(len(v)))
for i in range(v.shape[0]):
cdf_vals[i] = np.sum(np.all(u<=v[[i],:], axis=1))
return cdf_vals/len(u)
def beta_copula_cdf(u_train, u_test, rscript_path):
# rscript_path is ususally something like "C:/Users/USERNAME/R/R-4.0.4/bin/Rscript.exe"
# write csv files
pd.DataFrame(u_train).to_csv("R/_u_train_bicop.csv", header=False, index=False)
pd.DataFrame(u_test).to_csv("R/_u_test_bicop.csv", header=False, index=False)
# run R script
subprocess.run([rscript_path, "R/_beta_copula_cdf.R"]) # TODO: assumes that R script is in current working directory
# read results from R script
cdf_beta = pd.read_csv("R/_cdf_beta.csv", header=None, index_col=False) # TODO: assumes that csv is in current working directory
# remove csv files
os.remove("R/_u_train_bicop.csv")
os.remove("R/_u_test_bicop.csv")
os.remove("R/_cdf_beta.csv")
return np.squeeze(cdf_beta.values)
def gaussian_copula_cdf(u_train, u_test, rscript_path):
# rscript_path is ususally something like "C:/Users/USERNAME/R/R-4.0.4/bin/Rscript.exe"
# write csv files
pd.DataFrame(u_train).to_csv("R/_u_train_gausscop.csv", header=False, index=False)
pd.DataFrame(u_test).to_csv("R/_u_test_gausscop.csv", header=False, index=False)
# run R script
subprocess.run([rscript_path, "R/_gaussian_copula_cdf.R"]) # TODO: assumes that R script is in current working directory
# read results from R script
cdf_gauss = pd.read_csv("R/_cdf_gausscop.csv", header=None, index_col=False) # TODO: assumes that csv is in current working directory
pdf_gauss = | pd.read_csv("R/_pdf_gausscop.csv", header=None, index_col=False) | pandas.read_csv |
"""
Models for classical robust Generalised Quadratic Discriminant Analysis (Bose
et al. 2015) and Robust Generalised Quadratic Discriminant Analysis (Ghosh
et al. 2020) using various robust estimators.
"""
import numpy as np
import pandas as pd
from ._algo_utils import fit_t
from ._models_lda import QDA
import rpy2.robjects as robjects
from rpy2.robjects.packages import importr
from rpy2.robjects import pandas2ri
# Import R libraries for robust estimators
pandas2ri.activate()
r = robjects.r
psych = importr('psych')
rrcov = importr('rrcov')
SpatialNP = importr('SpatialNP')
LaplacesDemon = importr('LaplacesDemon')
class GQDA(QDA):
"""Generalised Quadratic Discriminant Analysis.
See `_models_lda.QDA` for more details. Inherits from QDA and fits an
additional parameter on top of the classic estimation, which models a
large class of distributions, by minimisation of misclassification
error. The method 'generalised' must be used to benefit from this.
When `c_ = 1`, this is equal to QDA.
"""
def __init__(self):
super().__init__(method='generalised')
def _bose_k(self):
""" Generalised discriminant coefficient according to
Bose et al. (2015).
"""
return np.array([0.5/self.c_])
def fit(self, X, y, c_=None):
"""Fit GQDA model parameters according to data.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data.
y : array-like of shape (n_samples,)
Target values.
c_ : float, default=None.
The generalised coefficient. If set, don't fit this parameter.
If not, estimate using method of error minimisation.
"""
super().fit(X,y) #Kx[n_k, M]
if c_ is not None:
self.c_ = c_
return self
uijs = [np.zeros((self.X_classes_[k].shape[0], self._K, self._K))
for k in range(self._K)] #Kx[n_kxIxJ]
sij = np.zeros((self._K,self._K))
logdets = np.log(np.linalg.det(self.covariance_)) #K,
for i in range(self._K):
for j in range(self._K):
dij_on_i = self._mahalanobis(self.X_classes_[i], ki=j) \
- self._mahalanobis(self.X_classes_[i], ki=i) #Kxn_i
dij_on_j = self._mahalanobis(self.X_classes_[j], ki=j) \
- self._mahalanobis(self.X_classes_[j], ki=i) #Kxn_j
sij[i,j] = logdets[j] - logdets[i]
uijs[i][:, i, j] = dij_on_i / sij[i,j]
uijs[i][:, j, j] = np.inf
uijs[j][:, i, j] = dij_on_j / sij[i,j]
T = []
for uij in uijs:
T.append(uij[(uij > 0) * (uij<1)])
T = np.sort(np.concatenate(T))
T = np.concatenate([np.array([0]), T])
#print(T)
MCc = np.zeros((len(T)))
for e,c_ in enumerate(T):
for i in range(self._K):
Rijc = []
for j in range(self._K):
if i==j: continue
p = uijs[i][:, i,j]
to_app = p > -c_ if sij[i,j]>0 else p < -c_
Rijc.append(self.X_classes_[i][to_app])
Rijc = np.vstack(Rijc)
Ric = np.unique(Rijc, axis=0)
#print(Ric.shape, Rijc.shape)
lenRic = Ric.shape[0]
MCic = self.X_classes_[i].shape[0] - lenRic
#print(MCic, Ric.shape)
MCc[e] += MCic
#return uijs, MCc, T
c_star = T[MCc.argmin()]
self.c_ = c_star if c_star > 0 else 0.001
print("optimal c is", c_star)
return self
class RGQDA(GQDA):
"""Robust Generalised Quadratic Discriminant Analysis.
See `GQDA` for more details. Inherits from GQDA and replaces classical
mean and covariance estimation with robust estimators, as used by
Ghosh et al. (2020). Note that when `c_ = 1`, this becomes classical
QDA with robust estimators.
Additional Parameters
---------------------
estimation : str, {'gaussian', 't-EM', 'winsorised', 'MVE', 'MCD',
'M-estimator', 'S-estimator', 'SD-estimator'},
default='gaussian'
Method of robust estimation.
"""
def __init__(self, estimation='gaussian'):
super().__init__()
self.estimation = estimation
def _estimate_t_EM(self, X):
"""Estimate by fitting t-distribution using EM
"""
return fit_t(X) #discarding dof parameters
def _estimate_gaussian_MLE(self, X):
"""Estimate by fitting Gaussian according to the MLE
"""
return [X.mean(axis=0), np.cov(X.T)]
def _get_r_frame(self, X):
"""Prepare data for passing into R
"""
return pandas2ri.py2rpy( | pd.DataFrame(X) | pandas.DataFrame |
# Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
from collections import OrderedDict
import numpy as np
import pandas as pd
import pytest
try:
import pyarrow as pa
except ImportError: # pragma: no cover
pa = None
from ....config import options, option_context
from ....dataframe import DataFrame
from ....tensor import arange, tensor
from ....tensor.random import rand
from ....tests.core import require_cudf
from ....utils import lazy_import
from ... import eval as mars_eval, cut, qcut
from ...datasource.dataframe import from_pandas as from_pandas_df
from ...datasource.series import from_pandas as from_pandas_series
from ...datasource.index import from_pandas as from_pandas_index
from .. import to_gpu, to_cpu
from ..to_numeric import to_numeric
from ..rebalance import DataFrameRebalance
cudf = lazy_import('cudf', globals=globals())
@require_cudf
def test_to_gpu_execution(setup_gpu):
pdf = pd.DataFrame(np.random.rand(20, 30), index=np.arange(20, 0, -1))
df = from_pandas_df(pdf, chunk_size=(13, 21))
cdf = to_gpu(df)
res = cdf.execute().fetch()
assert isinstance(res, cudf.DataFrame)
pd.testing.assert_frame_equal(res.to_pandas(), pdf)
pseries = pdf.iloc[:, 0]
series = from_pandas_series(pseries)
cseries = series.to_gpu()
res = cseries.execute().fetch()
assert isinstance(res, cudf.Series)
pd.testing.assert_series_equal(res.to_pandas(), pseries)
@require_cudf
def test_to_cpu_execution(setup_gpu):
pdf = pd.DataFrame(np.random.rand(20, 30), index=np.arange(20, 0, -1))
df = from_pandas_df(pdf, chunk_size=(13, 21))
cdf = to_gpu(df)
df2 = to_cpu(cdf)
res = df2.execute().fetch()
assert isinstance(res, pd.DataFrame)
pd.testing.assert_frame_equal(res, pdf)
pseries = pdf.iloc[:, 0]
series = from_pandas_series(pseries, chunk_size=(13, 21))
cseries = to_gpu(series)
series2 = to_cpu(cseries)
res = series2.execute().fetch()
assert isinstance(res, pd.Series)
pd.testing.assert_series_equal(res, pseries)
def test_rechunk_execution(setup):
data = pd.DataFrame(np.random.rand(8, 10))
df = from_pandas_df(pd.DataFrame(data), chunk_size=3)
df2 = df.rechunk((3, 4))
res = df2.execute().fetch()
pd.testing.assert_frame_equal(data, res)
data = pd.DataFrame(np.random.rand(10, 10), index=np.random.randint(-100, 100, size=(10,)),
columns=[np.random.bytes(10) for _ in range(10)])
df = from_pandas_df(data)
df2 = df.rechunk(5)
res = df2.execute().fetch()
pd.testing.assert_frame_equal(data, res)
# test Series rechunk execution.
data = pd.Series(np.random.rand(10,))
series = from_pandas_series(data)
series2 = series.rechunk(3)
res = series2.execute().fetch()
pd.testing.assert_series_equal(data, res)
series2 = series.rechunk(1)
res = series2.execute().fetch()
pd.testing.assert_series_equal(data, res)
# test index rechunk execution
data = pd.Index(np.random.rand(10,))
index = from_pandas_index(data)
index2 = index.rechunk(3)
res = index2.execute().fetch()
pd.testing.assert_index_equal(data, res)
index2 = index.rechunk(1)
res = index2.execute().fetch()
pd.testing.assert_index_equal(data, res)
# test rechunk on mixed typed columns
data = pd.DataFrame({0: [1, 2], 1: [3, 4], 'a': [5, 6]})
df = from_pandas_df(data)
df = df.rechunk((2, 2)).rechunk({1: 3})
res = df.execute().fetch()
pd.testing.assert_frame_equal(data, res)
def test_series_map_execution(setup):
raw = pd.Series(np.arange(10))
s = from_pandas_series(raw, chunk_size=7)
with pytest.raises(ValueError):
# cannot infer dtype, the inferred is int,
# but actually it is float
# just due to nan
s.map({5: 10})
r = s.map({5: 10}, dtype=float)
result = r.execute().fetch()
expected = raw.map({5: 10})
pd.testing.assert_series_equal(result, expected)
r = s.map({i: 10 + i for i in range(7)}, dtype=float)
result = r.execute().fetch()
expected = raw.map({i: 10 + i for i in range(7)})
pd.testing.assert_series_equal(result, expected)
r = s.map({5: 10}, dtype=float, na_action='ignore')
result = r.execute().fetch()
expected = raw.map({5: 10}, na_action='ignore')
pd.testing.assert_series_equal(result, expected)
# dtype can be inferred
r = s.map({5: 10.})
result = r.execute().fetch()
expected = raw.map({5: 10.})
pd.testing.assert_series_equal(result, expected)
r = s.map(lambda x: x + 1, dtype=int)
result = r.execute().fetch()
expected = raw.map(lambda x: x + 1)
pd.testing.assert_series_equal(result, expected)
def f(x: int) -> float:
return x + 1.
# dtype can be inferred for function
r = s.map(f)
result = r.execute().fetch()
expected = raw.map(lambda x: x + 1.)
pd.testing.assert_series_equal(result, expected)
def f(x: int):
return x + 1.
# dtype can be inferred for function
r = s.map(f)
result = r.execute().fetch()
expected = raw.map(lambda x: x + 1.)
pd.testing.assert_series_equal(result, expected)
# test arg is a md.Series
raw2 = pd.Series([10], index=[5])
s2 = from_pandas_series(raw2)
r = s.map(s2, dtype=float)
result = r.execute().fetch()
expected = raw.map(raw2)
pd.testing.assert_series_equal(result, expected)
# test arg is a md.Series, and dtype can be inferred
raw2 = pd.Series([10.], index=[5])
s2 = from_pandas_series(raw2)
r = s.map(s2)
result = r.execute().fetch()
expected = raw.map(raw2)
pd.testing.assert_series_equal(result, expected)
# test str
raw = pd.Series(['a', 'b', 'c', 'd'])
s = from_pandas_series(raw, chunk_size=2)
r = s.map({'c': 'e'})
result = r.execute().fetch()
expected = raw.map({'c': 'e'})
pd.testing.assert_series_equal(result, expected)
# test map index
raw = pd.Index(np.random.rand(7))
idx = from_pandas_index(pd.Index(raw), chunk_size=2)
r = idx.map(f)
result = r.execute().fetch()
expected = raw.map(lambda x: x + 1.)
pd.testing.assert_index_equal(result, expected)
def test_describe_execution(setup):
s_raw = pd.Series(np.random.rand(10))
# test one chunk
series = from_pandas_series(s_raw, chunk_size=10)
r = series.describe()
result = r.execute().fetch()
expected = s_raw.describe()
pd.testing.assert_series_equal(result, expected)
r = series.describe(percentiles=[])
result = r.execute().fetch()
expected = s_raw.describe(percentiles=[])
pd.testing.assert_series_equal(result, expected)
# test multi chunks
series = from_pandas_series(s_raw, chunk_size=3)
r = series.describe()
result = r.execute().fetch()
expected = s_raw.describe()
pd.testing.assert_series_equal(result, expected)
r = series.describe(percentiles=[])
result = r.execute().fetch()
expected = s_raw.describe(percentiles=[])
pd.testing.assert_series_equal(result, expected)
rs = np.random.RandomState(5)
df_raw = pd.DataFrame(rs.rand(10, 4), columns=list('abcd'))
df_raw['e'] = rs.randint(100, size=10)
# test one chunk
df = from_pandas_df(df_raw, chunk_size=10)
r = df.describe()
result = r.execute().fetch()
expected = df_raw.describe()
pd.testing.assert_frame_equal(result, expected)
r = series.describe(percentiles=[], include=np.float64)
result = r.execute().fetch()
expected = s_raw.describe(percentiles=[], include=np.float64)
pd.testing.assert_series_equal(result, expected)
# test multi chunks
df = from_pandas_df(df_raw, chunk_size=3)
r = df.describe()
result = r.execute().fetch()
expected = df_raw.describe()
pd.testing.assert_frame_equal(result, expected)
r = df.describe(percentiles=[], include=np.float64)
result = r.execute().fetch()
expected = df_raw.describe(percentiles=[], include=np.float64)
pd.testing.assert_frame_equal(result, expected)
# test skip percentiles
r = df.describe(percentiles=False, include=np.float64)
result = r.execute().fetch()
expected = df_raw.describe(percentiles=[], include=np.float64)
expected.drop(['50%'], axis=0, inplace=True)
pd.testing.assert_frame_equal(result, expected)
with pytest.raises(ValueError):
df.describe(percentiles=[1.1])
with pytest.raises(ValueError):
# duplicated values
df.describe(percentiles=[0.3, 0.5, 0.3])
# test input dataframe which has unknown shape
df = from_pandas_df(df_raw, chunk_size=3)
df2 = df[df['a'] < 0.5]
r = df2.describe()
result = r.execute().fetch()
expected = df_raw[df_raw['a'] < 0.5].describe()
pd.testing.assert_frame_equal(result, expected)
def test_data_frame_apply_execute(setup):
cols = [chr(ord('A') + i) for i in range(10)]
df_raw = pd.DataFrame(dict((c, [i ** 2 for i in range(20)]) for c in cols))
old_chunk_store_limit = options.chunk_store_limit
try:
options.chunk_store_limit = 20
df = from_pandas_df(df_raw, chunk_size=5)
r = df.apply('ffill')
result = r.execute().fetch()
expected = df_raw.apply('ffill')
pd.testing.assert_frame_equal(result, expected)
r = df.apply(['sum', 'max'])
result = r.execute().fetch()
expected = df_raw.apply(['sum', 'max'])
pd.testing.assert_frame_equal(result, expected)
r = df.apply(np.sqrt)
result = r.execute().fetch()
expected = df_raw.apply(np.sqrt)
pd.testing.assert_frame_equal(result, expected)
r = df.apply(lambda x: pd.Series([1, 2]))
result = r.execute().fetch()
expected = df_raw.apply(lambda x: pd.Series([1, 2]))
pd.testing.assert_frame_equal(result, expected)
r = df.apply(np.sum, axis='index')
result = r.execute().fetch()
expected = df_raw.apply(np.sum, axis='index')
pd.testing.assert_series_equal(result, expected)
r = df.apply(np.sum, axis='columns')
result = r.execute().fetch()
expected = df_raw.apply(np.sum, axis='columns')
pd.testing.assert_series_equal(result, expected)
r = df.apply(lambda x: [1, 2], axis=1)
result = r.execute().fetch()
expected = df_raw.apply(lambda x: [1, 2], axis=1)
pd.testing.assert_series_equal(result, expected)
r = df.apply(lambda x: pd.Series([1, 2], index=['foo', 'bar']), axis=1)
result = r.execute().fetch()
expected = df_raw.apply(lambda x: pd.Series([1, 2], index=['foo', 'bar']), axis=1)
pd.testing.assert_frame_equal(result, expected)
r = df.apply(lambda x: [1, 2], axis=1, result_type='expand')
result = r.execute().fetch()
expected = df_raw.apply(lambda x: [1, 2], axis=1, result_type='expand')
pd.testing.assert_frame_equal(result, expected)
r = df.apply(lambda x: list(range(10)), axis=1, result_type='reduce')
result = r.execute().fetch()
expected = df_raw.apply(lambda x: list(range(10)), axis=1, result_type='reduce')
pd.testing.assert_series_equal(result, expected)
r = df.apply(lambda x: list(range(10)), axis=1, result_type='broadcast')
result = r.execute().fetch()
expected = df_raw.apply(lambda x: list(range(10)), axis=1, result_type='broadcast')
pd.testing.assert_frame_equal(result, expected)
finally:
options.chunk_store_limit = old_chunk_store_limit
def test_series_apply_execute(setup):
idxes = [chr(ord('A') + i) for i in range(20)]
s_raw = pd.Series([i ** 2 for i in range(20)], index=idxes)
series = from_pandas_series(s_raw, chunk_size=5)
r = series.apply('add', args=(1,))
result = r.execute().fetch()
expected = s_raw.apply('add', args=(1,))
pd.testing.assert_series_equal(result, expected)
r = series.apply(['sum', 'max'])
result = r.execute().fetch()
expected = s_raw.apply(['sum', 'max'])
pd.testing.assert_series_equal(result, expected)
r = series.apply(np.sqrt)
result = r.execute().fetch()
expected = s_raw.apply(np.sqrt)
pd.testing.assert_series_equal(result, expected)
r = series.apply('sqrt')
result = r.execute().fetch()
expected = s_raw.apply('sqrt')
pd.testing.assert_series_equal(result, expected)
r = series.apply(lambda x: [x, x + 1], convert_dtype=False)
result = r.execute().fetch()
expected = s_raw.apply(lambda x: [x, x + 1], convert_dtype=False)
pd.testing.assert_series_equal(result, expected)
s_raw2 = pd.Series([np.array([1, 2, 3]), np.array([4, 5, 6])])
series = from_pandas_series(s_raw2)
dtypes = pd.Series([np.dtype(float)] * 3)
r = series.apply(pd.Series, output_type='dataframe',
dtypes=dtypes)
result = r.execute().fetch()
expected = s_raw2.apply(pd.Series)
pd.testing.assert_frame_equal(result, expected)
@pytest.mark.skipif(pa is None, reason='pyarrow not installed')
def test_apply_with_arrow_dtype_execution(setup):
df1 = pd.DataFrame({'a': [1, 2, 1],
'b': ['a', 'b', 'a']})
df = from_pandas_df(df1)
df['b'] = df['b'].astype('Arrow[string]')
r = df.apply(lambda row: str(row[0]) + row[1], axis=1)
result = r.execute().fetch()
expected = df1.apply(lambda row: str(row[0]) + row[1], axis=1)
pd.testing.assert_series_equal(result, expected)
s1 = df1['b']
s = from_pandas_series(s1)
s = s.astype('arrow_string')
r = s.apply(lambda x: x + '_suffix')
result = r.execute().fetch()
expected = s1.apply(lambda x: x + '_suffix')
pd.testing.assert_series_equal(result, expected)
def test_transform_execute(setup):
cols = [chr(ord('A') + i) for i in range(10)]
df_raw = pd.DataFrame(dict((c, [i ** 2 for i in range(20)]) for c in cols))
idx_vals = [chr(ord('A') + i) for i in range(20)]
s_raw = pd.Series([i ** 2 for i in range(20)], index=idx_vals)
def rename_fn(f, new_name):
f.__name__ = new_name
return f
old_chunk_store_limit = options.chunk_store_limit
try:
options.chunk_store_limit = 20
# DATAFRAME CASES
df = from_pandas_df(df_raw, chunk_size=5)
# test transform scenarios on data frames
r = df.transform(lambda x: list(range(len(x))))
result = r.execute().fetch()
expected = df_raw.transform(lambda x: list(range(len(x))))
pd.testing.assert_frame_equal(result, expected)
r = df.transform(lambda x: list(range(len(x))), axis=1)
result = r.execute().fetch()
expected = df_raw.transform(lambda x: list(range(len(x))), axis=1)
pd.testing.assert_frame_equal(result, expected)
r = df.transform(['cumsum', 'cummax', lambda x: x + 1])
result = r.execute().fetch()
expected = df_raw.transform(['cumsum', 'cummax', lambda x: x + 1])
pd.testing.assert_frame_equal(result, expected)
fn_dict = OrderedDict([
('A', 'cumsum'),
('D', ['cumsum', 'cummax']),
('F', lambda x: x + 1),
])
r = df.transform(fn_dict)
result = r.execute().fetch()
expected = df_raw.transform(fn_dict)
pd.testing.assert_frame_equal(result, expected)
r = df.transform(lambda x: x.iloc[:-1], _call_agg=True)
result = r.execute().fetch()
expected = df_raw.agg(lambda x: x.iloc[:-1])
pd.testing.assert_frame_equal(result, expected)
r = df.transform(lambda x: x.iloc[:-1], axis=1, _call_agg=True)
result = r.execute().fetch()
expected = df_raw.agg(lambda x: x.iloc[:-1], axis=1)
pd.testing.assert_frame_equal(result, expected)
fn_list = [rename_fn(lambda x: x.iloc[1:].reset_index(drop=True), 'f1'),
lambda x: x.iloc[:-1].reset_index(drop=True)]
r = df.transform(fn_list, _call_agg=True)
result = r.execute().fetch()
expected = df_raw.agg(fn_list)
pd.testing.assert_frame_equal(result, expected)
r = df.transform(lambda x: x.sum(), _call_agg=True)
result = r.execute().fetch()
expected = df_raw.agg(lambda x: x.sum())
pd.testing.assert_series_equal(result, expected)
fn_dict = OrderedDict([
('A', rename_fn(lambda x: x.iloc[1:].reset_index(drop=True), 'f1')),
('D', [rename_fn(lambda x: x.iloc[1:].reset_index(drop=True), 'f1'),
lambda x: x.iloc[:-1].reset_index(drop=True)]),
('F', lambda x: x.iloc[:-1].reset_index(drop=True)),
])
r = df.transform(fn_dict, _call_agg=True)
result = r.execute().fetch()
expected = df_raw.agg(fn_dict)
pd.testing.assert_frame_equal(result, expected)
# SERIES CASES
series = from_pandas_series(s_raw, chunk_size=5)
# test transform scenarios on series
r = series.transform(lambda x: x + 1)
result = r.execute().fetch()
expected = s_raw.transform(lambda x: x + 1)
pd.testing.assert_series_equal(result, expected)
r = series.transform(['cumsum', lambda x: x + 1])
result = r.execute().fetch()
expected = s_raw.transform(['cumsum', lambda x: x + 1])
pd.testing.assert_frame_equal(result, expected)
# test transform on string dtype
df_raw = pd.DataFrame({'col1': ['str'] * 10, 'col2': ['string'] * 10})
df = from_pandas_df(df_raw, chunk_size=3)
r = df['col1'].transform(lambda x: x + '_suffix')
result = r.execute().fetch()
expected = df_raw['col1'].transform(lambda x: x + '_suffix')
pd.testing.assert_series_equal(result, expected)
r = df.transform(lambda x: x + '_suffix')
result = r.execute().fetch()
expected = df_raw.transform(lambda x: x + '_suffix')
pd.testing.assert_frame_equal(result, expected)
r = df['col2'].transform(lambda x: x + '_suffix', dtype=np.dtype('str'))
result = r.execute().fetch()
expected = df_raw['col2'].transform(lambda x: x + '_suffix')
pd.testing.assert_series_equal(result, expected)
finally:
options.chunk_store_limit = old_chunk_store_limit
@pytest.mark.skipif(pa is None, reason='pyarrow not installed')
def test_transform_with_arrow_dtype_execution(setup):
df1 = pd.DataFrame({'a': [1, 2, 1],
'b': ['a', 'b', 'a']})
df = from_pandas_df(df1)
df['b'] = df['b'].astype('Arrow[string]')
r = df.transform({'b': lambda x: x + '_suffix'})
result = r.execute().fetch()
expected = df1.transform({'b': lambda x: x + '_suffix'})
pd.testing.assert_frame_equal(result, expected)
s1 = df1['b']
s = from_pandas_series(s1)
s = s.astype('arrow_string')
r = s.transform(lambda x: x + '_suffix')
result = r.execute().fetch()
expected = s1.transform(lambda x: x + '_suffix')
pd.testing.assert_series_equal(result, expected)
def test_string_method_execution(setup):
s = pd.Series(['s1,s2', 'ef,', 'dd', np.nan])
s2 = pd.concat([s, s, s])
series = from_pandas_series(s, chunk_size=2)
series2 = from_pandas_series(s2, chunk_size=2)
# test getitem
r = series.str[:3]
result = r.execute().fetch()
expected = s.str[:3]
pd.testing.assert_series_equal(result, expected)
# test split, expand=False
r = series.str.split(',', n=2)
result = r.execute().fetch()
expected = s.str.split(',', n=2)
pd.testing.assert_series_equal(result, expected)
# test split, expand=True
r = series.str.split(',', expand=True, n=1)
result = r.execute().fetch()
expected = s.str.split(',', expand=True, n=1)
pd.testing.assert_frame_equal(result, expected)
# test rsplit
r = series.str.rsplit(',', expand=True, n=1)
result = r.execute().fetch()
expected = s.str.rsplit(',', expand=True, n=1)
pd.testing.assert_frame_equal(result, expected)
# test cat all data
r = series2.str.cat(sep='/', na_rep='e')
result = r.execute().fetch()
expected = s2.str.cat(sep='/', na_rep='e')
assert result == expected
# test cat list
r = series.str.cat(['a', 'b', np.nan, 'c'])
result = r.execute().fetch()
expected = s.str.cat(['a', 'b', np.nan, 'c'])
pd.testing.assert_series_equal(result, expected)
# test cat series
r = series.str.cat(series.str.capitalize(), join='outer')
result = r.execute().fetch()
expected = s.str.cat(s.str.capitalize(), join='outer')
pd.testing.assert_series_equal(result, expected)
# test extractall
r = series.str.extractall(r"(?P<letter>[ab])(?P<digit>\d)")
result = r.execute().fetch()
expected = s.str.extractall(r"(?P<letter>[ab])(?P<digit>\d)")
pd.testing.assert_frame_equal(result, expected)
# test extract, expand=False
r = series.str.extract(r'[ab](\d)', expand=False)
result = r.execute().fetch()
expected = s.str.extract(r'[ab](\d)', expand=False)
pd.testing.assert_series_equal(result, expected)
# test extract, expand=True
r = series.str.extract(r'[ab](\d)', expand=True)
result = r.execute().fetch()
expected = s.str.extract(r'[ab](\d)', expand=True)
pd.testing.assert_frame_equal(result, expected)
def test_datetime_method_execution(setup):
# test datetime
s = pd.Series([pd.Timestamp('2020-1-1'),
pd.Timestamp('2020-2-1'),
np.nan])
series = from_pandas_series(s, chunk_size=2)
r = series.dt.year
result = r.execute().fetch()
expected = s.dt.year
pd.testing.assert_series_equal(result, expected)
r = series.dt.strftime('%m-%d-%Y')
result = r.execute().fetch()
expected = s.dt.strftime('%m-%d-%Y')
pd.testing.assert_series_equal(result, expected)
# test timedelta
s = pd.Series([pd.Timedelta('1 days'),
pd.Timedelta('3 days'),
np.nan])
series = from_pandas_series(s, chunk_size=2)
r = series.dt.days
result = r.execute().fetch()
expected = s.dt.days
pd.testing.assert_series_equal(result, expected)
def test_isin_execution(setup):
# one chunk in multiple chunks
a = pd.Series([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
b = pd.Series([2, 1, 9, 3])
sa = from_pandas_series(a, chunk_size=10)
sb = from_pandas_series(b, chunk_size=2)
result = sa.isin(sb).execute().fetch()
expected = a.isin(b)
pd.testing.assert_series_equal(result, expected)
# multiple chunk in one chunks
a = pd.Series([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
b = pd.Series([2, 1, 9, 3])
sa = from_pandas_series(a, chunk_size=2)
sb = from_pandas_series(b, chunk_size=4)
result = sa.isin(sb).execute().fetch()
expected = a.isin(b)
pd.testing.assert_series_equal(result, expected)
# multiple chunk in multiple chunks
a = pd.Series([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
b = pd.Series([2, 1, 9, 3])
sa = from_pandas_series(a, chunk_size=2)
sb = from_pandas_series(b, chunk_size=2)
result = sa.isin(sb).execute().fetch()
expected = a.isin(b)
pd.testing.assert_series_equal(result, expected)
a = pd.Series([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
b = pd.Series([2, 1, 9, 3])
sa = from_pandas_series(a, chunk_size=2)
result = sa.isin(sb).execute().fetch()
expected = a.isin(b)
pd.testing.assert_series_equal(result, expected)
a = pd.Series([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
b = np.array([2, 1, 9, 3])
sa = from_pandas_series(a, chunk_size=2)
sb = tensor(b, chunk_size=3)
result = sa.isin(sb).execute().fetch()
expected = a.isin(b)
pd.testing.assert_series_equal(result, expected)
a = pd.Series([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
b = {2, 1, 9, 3} # set
sa = from_pandas_series(a, chunk_size=2)
result = sa.isin(sb).execute().fetch()
expected = a.isin(b)
pd.testing.assert_series_equal(result, expected)
rs = np.random.RandomState(0)
raw = pd.DataFrame(rs.randint(1000, size=(10, 3)))
df = from_pandas_df(raw, chunk_size=(5, 2))
# set
b = {2, 1, raw[1][0]}
r = df.isin(b)
result = r.execute().fetch()
expected = raw.isin(b)
pd.testing.assert_frame_equal(result, expected)
# mars object
b = tensor([2, 1, raw[1][0]], chunk_size=2)
r = df.isin(b)
result = r.execute().fetch()
expected = raw.isin([2, 1, raw[1][0]])
pd.testing.assert_frame_equal(result, expected)
# dict
b = {1: tensor([2, 1, raw[1][0]], chunk_size=2),
2: [3, 10]}
r = df.isin(b)
result = r.execute().fetch()
expected = raw.isin({1: [2, 1, raw[1][0]], 2: [3, 10]})
pd.testing.assert_frame_equal(result, expected)
def test_cut_execution(setup):
session = setup
rs = np.random.RandomState(0)
raw = rs.random(15) * 1000
s = pd.Series(raw, index=[f'i{i}' for i in range(15)])
bins = [10, 100, 500]
ii = pd.interval_range(10, 500, 3)
labels = ['a', 'b']
t = tensor(raw, chunk_size=4)
series = from_pandas_series(s, chunk_size=4)
iii = from_pandas_index(ii, chunk_size=2)
# cut on Series
r = cut(series, bins)
result = r.execute().fetch()
pd.testing.assert_series_equal(result, pd.cut(s, bins))
r, b = cut(series, bins, retbins=True)
r_result = r.execute().fetch()
b_result = b.execute().fetch()
r_expected, b_expected = pd.cut(s, bins, retbins=True)
pd.testing.assert_series_equal(r_result, r_expected)
np.testing.assert_array_equal(b_result, b_expected)
# cut on tensor
r = cut(t, bins)
# result and expected is array whose dtype is CategoricalDtype
result = r.execute().fetch()
expected = pd.cut(raw, bins)
assert len(result) == len(expected)
for r, e in zip(result, expected):
np.testing.assert_equal(r, e)
# one chunk
r = cut(s, tensor(bins, chunk_size=2), right=False, include_lowest=True)
result = r.execute().fetch()
pd.testing.assert_series_equal(result, pd.cut(s, bins, right=False, include_lowest=True))
# test labels
r = cut(t, bins, labels=labels)
# result and expected is array whose dtype is CategoricalDtype
result = r.execute().fetch()
expected = pd.cut(raw, bins, labels=labels)
assert len(result) == len(expected)
for r, e in zip(result, expected):
np.testing.assert_equal(r, e)
r = cut(t, bins, labels=False)
# result and expected is array whose dtype is CategoricalDtype
result = r.execute().fetch()
expected = pd.cut(raw, bins, labels=False)
np.testing.assert_array_equal(result, expected)
# test labels which is tensor
labels_t = tensor(['a', 'b'], chunk_size=1)
r = cut(raw, bins, labels=labels_t, include_lowest=True)
# result and expected is array whose dtype is CategoricalDtype
result = r.execute().fetch()
expected = pd.cut(raw, bins, labels=labels, include_lowest=True)
assert len(result) == len(expected)
for r, e in zip(result, expected):
np.testing.assert_equal(r, e)
# test labels=False
r, b = cut(raw, ii, labels=False, retbins=True)
# result and expected is array whose dtype is CategoricalDtype
r_result, b_result = session.fetch(*session.execute(r, b))
r_expected, b_expected = pd.cut(raw, ii, labels=False, retbins=True)
for r, e in zip(r_result, r_expected):
np.testing.assert_equal(r, e)
pd.testing.assert_index_equal(b_result, b_expected)
# test bins which is md.IntervalIndex
r, b = cut(series, iii, labels=tensor(labels, chunk_size=1), retbins=True)
r_result = r.execute().fetch()
b_result = b.execute().fetch()
r_expected, b_expected = pd.cut(s, ii, labels=labels, retbins=True)
pd.testing.assert_series_equal(r_result, r_expected)
pd.testing.assert_index_equal(b_result, b_expected)
# test duplicates
bins2 = [0, 2, 4, 6, 10, 10]
r, b = cut(s, bins2, labels=False, retbins=True,
right=False, duplicates='drop')
r_result = r.execute().fetch()
b_result = b.execute().fetch()
r_expected, b_expected = pd.cut(s, bins2, labels=False, retbins=True,
right=False, duplicates='drop')
pd.testing.assert_series_equal(r_result, r_expected)
np.testing.assert_array_equal(b_result, b_expected)
# test integer bins
r = cut(series, 3)
result = r.execute().fetch()
pd.testing.assert_series_equal(result, pd.cut(s, 3))
r, b = cut(series, 3, right=False, retbins=True)
r_result, b_result = session.fetch(*session.execute(r, b))
r_expected, b_expected = pd.cut(s, 3, right=False, retbins=True)
pd.testing.assert_series_equal(r_result, r_expected)
np.testing.assert_array_equal(b_result, b_expected)
# test min max same
s2 = pd.Series([1.1] * 15)
r = cut(s2, 3)
result = r.execute().fetch()
pd.testing.assert_series_equal(result, pd.cut(s2, 3))
# test inf exist
s3 = s2.copy()
s3[-1] = np.inf
with pytest.raises(ValueError):
cut(s3, 3).execute()
def test_transpose_execution(setup):
raw = pd.DataFrame({"a": ['1', '2', '3'], "b": ['5', '-6', '7'], "c": ['1', '2', '3']})
# test 1 chunk
df = from_pandas_df(raw)
result = df.transpose().execute().fetch()
pd.testing.assert_frame_equal(result, raw.transpose())
# test multi chunks
df = from_pandas_df(raw, chunk_size=2)
result = df.transpose().execute().fetch()
pd.testing.assert_frame_equal(result, raw.transpose())
df = from_pandas_df(raw, chunk_size=2)
result = df.T.execute().fetch()
pd.testing.assert_frame_equal(result, raw.transpose())
# dtypes are varied
raw = pd.DataFrame({"a": [1.1, 2.2, 3.3], "b": [5, -6, 7], "c": [1, 2, 3]})
df = from_pandas_df(raw, chunk_size=2)
result = df.transpose().execute().fetch()
pd.testing.assert_frame_equal(result, raw.transpose())
raw = pd.DataFrame({"a": [1.1, 2.2, 3.3], "b": ['5', '-6', '7']})
df = from_pandas_df(raw, chunk_size=2)
result = df.transpose().execute().fetch()
pd.testing.assert_frame_equal(result, raw.transpose())
# Transposing from results of other operands
raw = pd.DataFrame(np.arange(0, 100).reshape(10, 10))
df = DataFrame(arange(0, 100, chunk_size=5).reshape(10, 10))
result = df.transpose().execute().fetch()
pd.testing.assert_frame_equal(result, raw.transpose())
df = DataFrame(rand(100, 100, chunk_size=10))
raw = df.to_pandas()
result = df.transpose().execute().fetch()
pd.testing.assert_frame_equal(result, raw.transpose())
def test_to_numeric_execition(setup):
rs = np.random.RandomState(0)
s = pd.Series(rs.randint(5, size=100))
s[rs.randint(100)] = np.nan
# test 1 chunk
series = from_pandas_series(s)
r = to_numeric(series)
pd.testing.assert_series_equal(r.execute().fetch(),
pd.to_numeric(s))
# test multi chunks
series = from_pandas_series(s, chunk_size=20)
r = to_numeric(series)
pd.testing.assert_series_equal(r.execute().fetch(),
pd.to_numeric(s))
# test object dtype
s = pd.Series(['1.0', 2, -3, '2.0'])
series = from_pandas_series(s)
r = to_numeric(series)
pd.testing.assert_series_equal(r.execute().fetch(),
pd.to_numeric(s))
# test errors and downcast
s = pd.Series(['appple', 2, -3, '2.0'])
series = from_pandas_series(s)
r = to_numeric(series, errors='ignore', downcast='signed')
pd.testing.assert_series_equal(r.execute().fetch(),
pd.to_numeric(s, errors='ignore', downcast='signed'))
# test list data
l = ['1.0', 2, -3, '2.0']
r = to_numeric(l)
np.testing.assert_array_equal(r.execute().fetch(),
pd.to_numeric(l))
def test_q_cut_execution(setup):
rs = np.random.RandomState(0)
raw = rs.random(15) * 1000
s = pd.Series(raw, index=[f'i{i}' for i in range(15)])
series = from_pandas_series(s)
r = qcut(series, 3)
result = r.execute().fetch()
expected = pd.qcut(s, 3)
pd.testing.assert_series_equal(result, expected)
r = qcut(s, 3)
result = r.execute().fetch()
expected = pd.qcut(s, 3)
pd.testing.assert_series_equal(result, expected)
series = from_pandas_series(s)
r = qcut(series, [0.3, 0.5, 0.7])
result = r.execute().fetch()
expected = pd.qcut(s, [0.3, 0.5, 0.7])
pd.testing.assert_series_equal(result, expected)
r = qcut(range(5), 3)
result = r.execute().fetch()
expected = pd.qcut(range(5), 3)
assert isinstance(result, type(expected))
pd.testing.assert_series_equal(pd.Series(result),
pd.Series(expected))
r = qcut(range(5), [0.2, 0.5])
result = r.execute().fetch()
expected = pd.qcut(range(5), [0.2, 0.5])
assert isinstance(result, type(expected))
pd.testing.assert_series_equal(pd.Series(result),
pd.Series(expected))
r = qcut(range(5), tensor([0.2, 0.5]))
result = r.execute().fetch()
expected = pd.qcut(range(5), [0.2, 0.5])
assert isinstance(result, type(expected))
pd.testing.assert_series_equal(pd.Series(result),
pd.Series(expected))
def test_shift_execution(setup):
# test dataframe
rs = np.random.RandomState(0)
raw = pd.DataFrame(rs.randint(1000, size=(10, 8)),
columns=['col' + str(i + 1) for i in range(8)])
df = from_pandas_df(raw, chunk_size=5)
for periods in (2, -2, 6, -6):
for axis in (0, 1):
for fill_value in (None, 0, 1.):
r = df.shift(periods=periods, axis=axis,
fill_value=fill_value)
try:
result = r.execute().fetch()
expected = raw.shift(periods=periods, axis=axis,
fill_value=fill_value)
pd.testing.assert_frame_equal(result, expected, check_dtype=False)
except AssertionError as e: # pragma: no cover
raise AssertionError(
f'Failed when periods: {periods}, axis: {axis}, fill_value: {fill_value}'
) from e
raw2 = raw.copy()
raw2.index = pd.date_range('2020-1-1', periods=10)
raw2.columns = pd.date_range('2020-3-1', periods=8)
df2 = from_pandas_df(raw2, chunk_size=5)
# test freq not None
for periods in (2, -2):
for axis in (0, 1):
for fill_value in (None, 0, 1.):
r = df2.shift(periods=periods, freq='D', axis=axis,
fill_value=fill_value)
try:
result = r.execute().fetch()
expected = raw2.shift(periods=periods, freq='D', axis=axis,
fill_value=fill_value)
pd.testing.assert_frame_equal(result, expected)
except AssertionError as e: # pragma: no cover
raise AssertionError(
f'Failed when periods: {periods}, axis: {axis}, fill_value: {fill_value}') from e
# test tshift
r = df2.tshift(periods=1)
result = r.execute().fetch()
expected = raw2.tshift(periods=1)
pd.testing.assert_frame_equal(result, expected)
with pytest.raises(ValueError):
_ = df.tshift(periods=1)
# test series
s = raw.iloc[:, 0]
series = from_pandas_series(s, chunk_size=5)
for periods in (0, 2, -2, 6, -6):
for fill_value in (None, 0, 1.):
r = series.shift(periods=periods, fill_value=fill_value)
try:
result = r.execute().fetch()
expected = s.shift(periods=periods, fill_value=fill_value)
pd.testing.assert_series_equal(result, expected)
except AssertionError as e: # pragma: no cover
raise AssertionError(
f'Failed when periods: {periods}, fill_value: {fill_value}') from e
s2 = raw2.iloc[:, 0]
# test freq not None
series2 = from_pandas_series(s2, chunk_size=5)
for periods in (2, -2):
for fill_value in (None, 0, 1.):
r = series2.shift(periods=periods, freq='D', fill_value=fill_value)
try:
result = r.execute().fetch()
expected = s2.shift(periods=periods, freq='D', fill_value=fill_value)
pd.testing.assert_series_equal(result, expected)
except AssertionError as e: # pragma: no cover
raise AssertionError(
f'Failed when periods: {periods}, fill_value: {fill_value}') from e
def test_diff_execution(setup):
rs = np.random.RandomState(0)
raw = pd.DataFrame(rs.randint(1000, size=(10, 8)),
columns=['col' + str(i + 1) for i in range(8)])
raw1 = raw.copy()
raw1['col4'] = raw1['col4'] < 400
r = from_pandas_df(raw1, chunk_size=(10, 5)).diff(-1)
pd.testing.assert_frame_equal(r.execute().fetch(),
raw1.diff(-1))
r = from_pandas_df(raw1, chunk_size=5).diff(-1)
pd.testing.assert_frame_equal(r.execute().fetch(),
raw1.diff(-1))
r = from_pandas_df(raw, chunk_size=(5, 8)).diff(1, axis=1)
pd.testing.assert_frame_equal(r.execute().fetch(),
raw.diff(1, axis=1))
r = from_pandas_df(raw, chunk_size=5).diff(1, axis=1)
pd.testing.assert_frame_equal(r.execute().fetch(),
raw.diff(1, axis=1), check_dtype=False)
# test series
s = raw.iloc[:, 0]
s1 = s.copy() < 400
r = from_pandas_series(s, chunk_size=10).diff(-1)
pd.testing.assert_series_equal(r.execute().fetch(),
s.diff(-1))
r = from_pandas_series(s, chunk_size=5).diff(-1)
pd.testing.assert_series_equal(r.execute().fetch(),
s.diff(-1))
r = from_pandas_series(s1, chunk_size=5).diff(1)
pd.testing.assert_series_equal(r.execute().fetch(),
s1.diff(1))
def test_value_counts_execution(setup):
rs = np.random.RandomState(0)
s = pd.Series(rs.randint(5, size=100), name='s')
s[rs.randint(100)] = np.nan
# test 1 chunk
series = from_pandas_series(s, chunk_size=100)
r = series.value_counts()
pd.testing.assert_series_equal(r.execute().fetch(),
s.value_counts())
r = series.value_counts(bins=5, normalize=True)
pd.testing.assert_series_equal(r.execute().fetch(),
s.value_counts(bins=5, normalize=True))
# test multi chunks
series = from_pandas_series(s, chunk_size=30)
r = series.value_counts(method='tree')
pd.testing.assert_series_equal(r.execute().fetch(),
s.value_counts())
r = series.value_counts(method='tree', normalize=True)
pd.testing.assert_series_equal(r.execute().fetch(),
s.value_counts(normalize=True))
# test bins and normalize
r = series.value_counts(method='tree', bins=5, normalize=True)
pd.testing.assert_series_equal(r.execute().fetch(),
s.value_counts(bins=5, normalize=True))
def test_astype(setup):
rs = np.random.RandomState(0)
raw = pd.DataFrame(rs.randint(1000, size=(20, 8)),
columns=['c' + str(i + 1) for i in range(8)])
# single chunk
df = from_pandas_df(raw)
r = df.astype('int32')
result = r.execute().fetch()
expected = raw.astype('int32')
pd.testing.assert_frame_equal(expected, result)
# multiply chunks
df = from_pandas_df(raw, chunk_size=6)
r = df.astype('int32')
result = r.execute().fetch()
expected = raw.astype('int32')
pd.testing.assert_frame_equal(expected, result)
# dict type
df = from_pandas_df(raw, chunk_size=5)
r = df.astype({'c1': 'int32', 'c2': 'float', 'c8': 'str'})
result = r.execute().fetch()
expected = raw.astype({'c1': 'int32', 'c2': 'float', 'c8': 'str'})
pd.testing.assert_frame_equal(expected, result)
# test arrow_string dtype
df = from_pandas_df(raw, chunk_size=8)
r = df.astype({'c1': 'arrow_string'})
result = r.execute().fetch()
expected = raw.astype({'c1': 'arrow_string'})
pd.testing.assert_frame_equal(expected, result)
# test series
s = pd.Series(rs.randint(5, size=20))
series = from_pandas_series(s)
r = series.astype('int32')
result = r.execute().fetch()
expected = s.astype('int32')
pd.testing.assert_series_equal(result, expected)
series = from_pandas_series(s, chunk_size=6)
r = series.astype('arrow_string')
result = r.execute().fetch()
expected = s.astype('arrow_string')
pd.testing.assert_series_equal(result, expected)
# test index
raw = pd.Index(rs.randint(5, size=20))
mix = from_pandas_index(raw)
r = mix.astype('int32')
result = r.execute().fetch()
expected = raw.astype('int32')
pd.testing.assert_index_equal(result, expected)
# multiply chunks
series = from_pandas_series(s, chunk_size=6)
r = series.astype('str')
result = r.execute().fetch()
expected = s.astype('str')
pd.testing.assert_series_equal(result, expected)
# test category
raw = pd.DataFrame(rs.randint(3, size=(20, 8)),
columns=['c' + str(i + 1) for i in range(8)])
df = from_pandas_df(raw)
r = df.astype('category')
result = r.execute().fetch()
expected = raw.astype('category')
pd.testing.assert_frame_equal(expected, result)
df = from_pandas_df(raw)
r = df.astype({'c1': 'category', 'c8': 'int32', 'c4': 'str'})
result = r.execute().fetch()
expected = raw.astype({'c1': 'category', 'c8': 'int32', 'c4': 'str'})
pd.testing.assert_frame_equal(expected, result)
df = from_pandas_df(raw, chunk_size=5)
r = df.astype('category')
result = r.execute().fetch()
expected = raw.astype('category')
pd.testing.assert_frame_equal(expected, result)
df = from_pandas_df(raw, chunk_size=3)
r = df.astype({'c1': 'category', 'c8': 'int32', 'c4': 'str'})
result = r.execute().fetch()
expected = raw.astype({'c1': 'category', 'c8': 'int32', 'c4': 'str'})
pd.testing.assert_frame_equal(expected, result)
df = from_pandas_df(raw, chunk_size=6)
r = df.astype({'c1': 'category', 'c5': 'float', 'c2': 'int32',
'c7': pd.CategoricalDtype([1, 3, 4, 2]),
'c4': pd.CategoricalDtype([1, 3, 2])})
result = r.execute().fetch()
expected = raw.astype({'c1': 'category', 'c5': 'float', 'c2': 'int32',
'c7': pd.CategoricalDtype([1, 3, 4, 2]),
'c4': pd.CategoricalDtype([1, 3, 2])})
pd.testing.assert_frame_equal(expected, result)
df = from_pandas_df(raw, chunk_size=8)
r = df.astype({'c2': 'category'})
result = r.execute().fetch()
expected = raw.astype({'c2': 'category'})
pd.testing.assert_frame_equal(expected, result)
# test series category
raw = pd.Series(np.random.choice(['a', 'b', 'c'], size=(10,)))
series = from_pandas_series(raw, chunk_size=4)
result = series.astype('category').execute().fetch()
expected = raw.astype('category')
| pd.testing.assert_series_equal(expected, result) | pandas.testing.assert_series_equal |
#!/usr/bin/env python3
import os
import sys
import random
import time
from random import seed, randint
import argparse
import platform
from datetime import datetime
import imp
import subprocess
import glob
import re
from helperFunctions.myFunctions_helper import *
import numpy as np
import pandas as pd
import fileinput
from itertools import product
from Bio.PDB.PDBParser import PDBParser
from Bio.PDB import PDBList
from pdbfixer import PDBFixer
from simtk.openmm.app import PDBFile
# compute cross Q for every pdb pair in one folder
# parser = argparse.ArgumentParser(description="Compute cross q")
# parser.add_argument("-m", "--mode",
# type=int, default=1)
# args = parser.parse_args()
def getFromTerminal(CMD):
return subprocess.Popen(CMD,stdout=subprocess.PIPE,shell=True).communicate()[0].decode()
def read_hydrophobicity_scale(seq, isNew=False):
seq_dataFrame = pd.DataFrame({"oneLetterCode":list(seq)})
HFscales = pd.read_table("~/opt/small_script/Whole_residue_HFscales.txt")
if not isNew:
# Octanol Scale
# new and old difference is at HIS.
code = {"GLY" : "G", "ALA" : "A", "LEU" : "L", "ILE" : "I",
"ARG+" : "R", "LYS+" : "K", "MET" : "M", "CYS" : "C",
"TYR" : "Y", "THR" : "T", "PRO" : "P", "SER" : "S",
"TRP" : "W", "ASP-" : "D", "GLU-" : "E", "ASN" : "N",
"GLN" : "Q", "PHE" : "F", "HIS+" : "H", "VAL" : "V",
"M3L" : "K", "MSE" : "M", "CAS" : "C"}
else:
code = {"GLY" : "G", "ALA" : "A", "LEU" : "L", "ILE" : "I",
"ARG+" : "R", "LYS+" : "K", "MET" : "M", "CYS" : "C",
"TYR" : "Y", "THR" : "T", "PRO" : "P", "SER" : "S",
"TRP" : "W", "ASP-" : "D", "GLU-" : "E", "ASN" : "N",
"GLN" : "Q", "PHE" : "F", "HIS0" : "H", "VAL" : "V",
"M3L" : "K", "MSE" : "M", "CAS" : "C"}
HFscales_with_oneLetterCode = HFscales.assign(oneLetterCode=HFscales.AA.str.upper().map(code)).dropna()
data = seq_dataFrame.merge(HFscales_with_oneLetterCode, on="oneLetterCode", how="left")
return data
def create_zim(seqFile, isNew=False):
a = seqFile
seq = getFromTerminal("cat " + a).rstrip()
data = read_hydrophobicity_scale(seq, isNew=isNew)
z = data["DGwoct"].values
np.savetxt("zim", z, fmt="%.2f")
def expand_grid(dictionary):
return pd.DataFrame([row for row in product(*dictionary.values())],
columns=dictionary.keys())
def duplicate_pdb(From, To, offset_x=0, offset_y=0, offset_z=0, new_chain="B"):
with open(To, "w") as out:
with open(From, "r") as f:
for line in f:
tmp = list(line)
atom = line[0:4]
atomSerialNumber = line[6:11]
atomName = line[12:16]
atomResidueName = line[17:20]
chain = line[21]
residueNumber = line[22:26]
# change chain A to B
# new_chain = "B"
tmp[21] = new_chain
if atom == "ATOM":
x = float(line[30:38])
y = float(line[38:46])
z = float(line[46:54])
# add 40 to the x
new_x = x + offset_x
new_y = y + offset_y
new_z = z + offset_z
tmp[30:38] = "{:8.3f}".format(new_x)
tmp[38:46] = "{:8.3f}".format(new_y)
tmp[46:54] = "{:8.3f}".format(new_z)
a = "".join(tmp)
out.write(a)
def compute_native_contacts(coords, MAX_OFFSET=4, DISTANCE_CUTOFF=9.5):
native_coords = np.array(coords)
a= native_coords[:,np.newaxis]
dis = np.sqrt(np.sum((a - native_coords)**2, axis=2))
n = len(dis)
remove_band = np.eye(n)
for i in range(1, MAX_OFFSET):
remove_band += np.eye(n, k=i)
remove_band += np.eye(n, k=-i)
dis[remove_band==1] = np.max(dis)
native_contacts = dis < DISTANCE_CUTOFF
return native_contacts.astype("int")
def compute_contacts(coords, native_contacts, DISTANCE_CUTOFF=9.5):
native_coords = np.array(coords)
a= native_coords[:,np.newaxis]
dis = np.sqrt(np.sum((a - native_coords)**2, axis=2))
constacts = dis < DISTANCE_CUTOFF
constacts = constacts*native_contacts # remove non native contacts
return np.sum(constacts, axis=1).astype("float")
def compute_localQ_init(MAX_OFFSET=4, DISTANCE_CUTOFF=9.5):
from pathlib import Path
home = str(Path.home())
struct_id = '2xov'
filename = os.path.join(home, "opt/pulling/2xov.pdb")
p = PDBParser(PERMISSIVE=1)
s = p.get_structure(struct_id, filename)
chains = s[0].get_list()
# import pdb file
native_coords = []
for chain in chains:
dis = []
all_res = []
for res in chain:
is_regular_res = res.has_id('CA') and res.has_id('O')
res_id = res.get_id()[0]
if (res.get_resname()=='GLY'):
native_coords.append(res['CA'].get_coord())
elif (res_id==' ' or res_id=='H_MSE' or res_id=='H_M3L' or res_id=='H_CAS') and is_regular_res:
native_coords.append(res['CB'].get_coord())
else:
print('ERROR: irregular residue at %s!' % res)
exit()
native_contacts_table = compute_native_contacts(native_coords, MAX_OFFSET, DISTANCE_CUTOFF)
return native_contacts_table
def compute_localQ(native_contacts_table, pre=".", ii=-1, MAX_OFFSET=4, DISTANCE_CUTOFF=9.5):
native_contacts = np.sum(native_contacts_table, axis=1).astype("float")
dump = read_lammps(os.path.join(pre, f"dump.lammpstrj.{ii}"), ca=False)
localQ_list = []
for atom in dump:
contacts = compute_contacts(np.array(atom), native_contacts_table, DISTANCE_CUTOFF=DISTANCE_CUTOFF)
c = np.divide(contacts, native_contacts, out=np.zeros_like(contacts), where=native_contacts!=0)
localQ_list.append(c)
data = pd.DataFrame(localQ_list)
data.columns = ["Res" + str(i+1) for i in data.columns]
data.to_csv(os.path.join(pre, f"localQ.{ii}.csv"), index=False)
def readPMF_basic(pre):
# perturbation_table = {0:"original", 1:"p_mem",
# 2:"m_mem", 3:"p_lipid",
# 4:"m_lipid", 5:"p_go",
# 6:"m_go", 7:"p_rg", 8:"m_rg"}
perturbation_table = {0:"original", 1:"m_go",
2:"p_go", 3:"m_lipid",
4:"p_lipid", 5:"m_mem",
6:"p_mem", 7:"m_rg", 8:"p_rg"}
pmf_list = {
"perturbation":list(perturbation_table.keys())
}
pmf_list_data = expand_grid(pmf_list)
all_pmf_list = []
for index, row in pmf_list_data.iterrows():
perturbation = row["perturbation"]
if perturbation == 0:
location = pre + f"/pmf-*.dat"
pmf_list = glob.glob(location)
change = "none"
upOrDown = "none"
else:
location = pre + f"/perturbation-{perturbation}-pmf-*.dat"
pmf_list = glob.glob(location)
change = perturbation_table[perturbation].split("_")[-1]
upOrDown = perturbation_table[perturbation].split("_")[0]
# print(location)
name_list = ["f", "df", "e", "s"]
names = ["bin", "x"] + name_list
for location in pmf_list:
# print(location)
temp = re.findall(r'pmf-(\d+)', location)
if len(temp) != 1:
raise ValueError('Not expected to see more than one or none')
else:
temp = temp[0]
data = pd.read_table(location, skiprows=2, sep='\s+', names=names).assign(upOrDown=upOrDown, change=change, temp=temp, perturbation=perturbation_table[perturbation])
all_pmf_list.append(data)
return pd.concat(all_pmf_list).dropna().reset_index()
def make_metadata_3(k=1000.0, temps_list=["450"], i=-1, biasLow=None, biasHigh=None):
print("make metadata")
cwd = os.getcwd()
files = glob.glob(f"../data_{i}/*")
kconstant = k
with open("metadatafile", "w") as out:
for oneFile in sorted(files):
tmp = oneFile.split("/")[-1].replace('.dat', '')
t = tmp.split("_")[1]
bias = tmp.split("_")[3]
if biasLow:
if float(bias) < biasLow:
continue
if biasHigh:
if float(bias) > biasHigh:
continue
# print(tmp)
# if int(float(dis)) > 150:
# continue
if t in temps_list:
target = "../{} {} {} {}\n".format(oneFile, t, kconstant, bias)
out.write(target)
def readPMF(pre, is2d=False, force_list=["0.0", "0.1", "0.2"]):
# perturbation_table = {0:"original", 1:"p_mem",
# 2:"m_mem", 3:"p_lipid",
# 4:"m_lipid", 5:"p_go",
# 6:"m_go", 7:"p_rg", 8:"m_rg"}
perturbation_table = {0:"original", 1:"m_go",
2:"p_go", 3:"m_lipid",
4:"p_lipid", 5:"m_mem",
6:"p_mem", 7:"m_rg", 8:"p_rg"}
pmf_list = {
"perturbation":list(perturbation_table.keys()),
"force":force_list
}
pmf_list_data = expand_grid(pmf_list)
all_pmf_list = []
for index, row in pmf_list_data.iterrows():
force = row["force"]
perturbation = row["perturbation"]
if perturbation == 0:
location = pre + f"/force_{force}/pmf-*.dat"
pmf_list = glob.glob(location)
change = "none"
upOrDown = "none"
else:
location = pre + f"/force_{force}/perturbation-{perturbation}-pmf-*.dat"
pmf_list = glob.glob(location)
change = perturbation_table[perturbation].split("_")[-1]
upOrDown = perturbation_table[perturbation].split("_")[0]
# print(pmf_list)
name_list = ["f", "df", "e", "s"]
if is2d:
names = ["x", "y"] + name_list
else:
names = ["bin", "x"] + name_list
for location in pmf_list:
# print(location)
temp = re.findall(r'pmf-(\d+)', location)
if len(temp) != 1:
raise ValueError('Not expected to see more than one or none')
else:
temp = temp[0]
data = pd.read_table(location, skiprows=2, sep='\s+', names=names).assign(upOrDown=upOrDown, change=change, force=force, temp=temp, perturbation=perturbation_table[perturbation])
all_pmf_list.append(data)
return pd.concat(all_pmf_list).dropna().reset_index()
def readPMF_2(pre, is2d=0, force_list=["0.0", "0.1", "0.2"]):
if is2d:
print("reading 2d pmfs")
else:
print("reading 1d dis, qw and z")
if is2d == 1:
mode_list = ["2d_qw_dis", "2d_z_dis", "2d_z_qw"]
elif is2d == 2:
mode_list = ["quick"]
else:
mode_list = ["1d_dis", "1d_qw", "1d_z"]
all_data_list =[]
for mode in mode_list:
tmp = readPMF(mode, is2d, force_list).assign(mode=mode)
all_data_list.append(tmp)
return pd.concat(all_data_list).dropna().reset_index()
def shrinkage(n=552, shrink_size=6, max_frame=2000, fileName="dump.lammpstrj"):
print("Shrinkage: size: {}, max_frame: {}".format(shrink_size, max_frame))
bashCommand = "wc " + fileName
process = subprocess.Popen(bashCommand.split(), stdout=subprocess.PIPE)
output, error = process.communicate()
line_number = int(output.decode("utf-8").split()[0])
print(line_number)
print(line_number/552)
# number of atom = 543
n = 552
count = 0
with open("small.lammpstrj", "w") as out:
with open(fileName, "r") as f:
for i, line in enumerate(f):
if (i // n) % shrink_size == 0:
if count >= max_frame*n:
break
count += 1
out.write(line)
def compute_theta_for_each_helix(output="angles.csv", dumpName="../dump.lammpstrj.0"):
print("This is for 2xov only")
helices_list = [(94,114), (147,168), (171, 192), (200, 217), (226, 241), (250, 269)]
atoms_all_frames = read_lammps(dumpName)
# print(atoms[0])
# print(len(atoms), len(atoms[0]))
# helices_angles_all_frames = []
with open(output, "w") as out:
out.write("Frame, Helix, Angle\n")
for ii, frame in enumerate(atoms_all_frames):
# helices_angles = []
for count, (i, j) in enumerate(helices_list):
# print(i, j)
i = i-91
j = j-91
# end - start
a = np.array(frame[j]) - np.array(frame[i])
b = np.array([0, 0, 1])
angle = a[2]/length(a) # in form of cos theta
# helices_angles.append(angle)
# print(angle)
out.write("{}, {}, {}\n".format(ii, count+1, angle))
# helices_angles_all_frames.append(helices_angles)
def structure_prediction_run(protein):
print(protein)
protocol_list = ["awsemer", "frag", "er"]
do = os.system
cd = os.chdir
cd(protein)
# run = "frag"
for protocol in protocol_list:
do("rm -r " + protocol)
do("mkdir -p " + protocol)
do("cp -r {} {}/".format(protein, protocol))
cd(protocol)
cd(protein)
# do("cp ~/opt/gremlin/protein/{}/gremlin/go_rnativeC* .".format(protein))
do("cp ~/opt/gremlin/protein/{}/raptor/go_rnativeC* .".format(protein))
fileName = protein + "_multi.in"
backboneFile = "fix_backbone_coeff_" + protocol
with fileinput.FileInput(fileName, inplace=True, backup='.bak') as file:
for line in file:
tmp = line.replace("fix_backbone_coeff_er", backboneFile)
print(tmp, end='')
cd("..")
do("run.py -m 0 -n 20 {}".format(protein))
cd("..")
cd("..")
# do("")
def check_and_correct_fragment_memory(fragFile="fragsLAMW.mem"):
with open("tmp.mem", "w") as out:
with open(fragFile, "r") as f:
for i in range(4):
line = next(f)
out.write(line)
for line in f:
gro, _, i, n, _ = line.split()
delete = False
# print(gro, i, n)
# name = gro.split("/")[-1]
with open(gro, "r") as one:
next(one)
next(one)
all_residues = set()
for atom in one:
residue, *_ = atom.split()
# print(residue)
all_residues.add(int(residue))
for test in range(int(i), int(i)+int(n)):
if test not in all_residues:
print("ATTENTION", gro, i, n, "missing:",test)
delete = True
if not delete:
out.write(line)
os.system(f"mv {fragFile} fragsLAMW_back")
os.system(f"mv tmp.mem {fragFile}")
def read_complete_temper_2(n=4, location=".", rerun=-1, qnqc=False, average_z=False, localQ=False, disReal=False, dis_h56=False, goEnergy=False, goEnergy3H=False, goEnergy4H=False):
all_data_list = []
for i in range(n):
file = "lipid.{}.dat".format(i)
lipid = pd.read_csv(location+file)
lipid.columns = lipid.columns.str.strip()
remove_columns = ['Steps']
lipid = lipid.drop(remove_columns, axis=1)
file = "rgs.{}.dat".format(i)
rgs = pd.read_csv(location+file)
rgs.columns = rgs.columns.str.strip()
remove_columns = ['Steps']
rgs = rgs.drop(remove_columns, axis=1)
file = "energy.{}.dat".format(i)
energy = pd.read_csv(location+file)
energy.columns = energy.columns.str.strip()
energy = energy[["AMH-Go", "Membrane", "Rg"]]
file = "addforce.{}.dat".format(i)
dis = pd.read_csv(location+file)
dis.columns = dis.columns.str.strip()
remove_columns = ['Steps', 'AddedForce', 'Dis12', 'Dis34', 'Dis56']
dis.drop(remove_columns, axis=1,inplace=True)
file = "wham.{}.dat".format(i)
wham = pd.read_csv(location+file).assign(Run=i)
wham.columns = wham.columns.str.strip()
remove_columns = ['Rg', 'Tc']
wham = wham.drop(remove_columns, axis=1)
if qnqc:
qc = pd.read_table(location+f"qc_{i}", names=["qc"])[1:].reset_index(drop=True)
qn = pd.read_table(location+f"qn_{i}", names=["qn"])[1:].reset_index(drop=True)
qc2 = pd.read_table(location+f"qc2_{i}", names=["qc2"])[1:].reset_index(drop=True)
wham = pd.concat([wham, qn, qc, qc2],axis=1)
# if average_z:
# z = pd.read_table(location+f"z_{i}.dat", names=["AverageZ"])[1:].reset_index(drop=True)
# wham = pd.concat([wham, z],axis=1)
if disReal:
tmp = pd.read_csv(location+f"distance_{i}.dat")[1:].reset_index(drop=True).drop('Steps', axis=1)
# print(tmp)
tmp.columns = tmp.columns.str.strip()
wham = pd.concat([wham, tmp],axis=1)
if dis_h56:
tmp = pd.read_csv(location+f"distance_h56_{i}.dat")[1:].reset_index(drop=True).drop('Steps', axis=1)
tmp1 = pd.read_csv(location+f"distance_h12_{i}.dat")[1:].reset_index(drop=True).drop('Steps', axis=1)
tmp2 = pd.read_csv(location+f"distance_h34_{i}.dat")[1:].reset_index(drop=True).drop('Steps', axis=1)
# print(tmp)
tmp.columns = tmp.columns.str.strip()
tmp1.columns = tmp1.columns.str.strip()
tmp2.columns = tmp2.columns.str.strip()
wham = pd.concat([wham, tmp, tmp1, tmp2],axis=1)
if average_z:
z = pd.read_csv(location+f"z_complete_{i}.dat")[1:].reset_index(drop=True)
z.columns = z.columns.str.strip()
wham = pd.concat([wham, z],axis=1)
if localQ:
all_localQ = pd.read_csv(location+f"localQ.{i}.csv")[1:].reset_index(drop=True)
wham = pd.concat([wham, all_localQ], axis=1)
if goEnergy:
tmp = pd.read_csv(location+f"Go_{i}/goEnergy.dat")[1:].reset_index(drop=True).drop('Steps', axis=1)
# print(tmp)
tmp.columns = tmp.columns.str.strip()
wham = pd.concat([wham, tmp],axis=1)
if goEnergy3H:
nEnergy = pd.read_csv(location+f"Go_3helix_{i}/goEnergy.dat")[1:].reset_index(drop=True).drop('Steps', axis=1)
# print(tmp)
nEnergy.columns = nEnergy.columns.str.strip()
wham = pd.concat([wham, nEnergy],axis=1)
if goEnergy4H:
nEnergy = pd.read_csv(location+f"Go_4helix_{i}/goEnergy.dat")[1:].reset_index(drop=True).drop('Steps', axis=1)
# print(tmp)
nEnergy.columns = nEnergy.columns.str.strip()
wham = pd.concat([wham, nEnergy],axis=1)
data = pd.concat([wham, dis, energy, rgs, lipid], axis=1)
# lipid = lipid[["Steps","Lipid","Run"]]
all_data_list.append(data)
data = pd.concat(all_data_list)
file = f"../log{rerun}/log.lammps"
temper = pd.read_table(location+file, skiprows=2, sep=' ')
temper = temper.melt(id_vars=['Step'], value_vars=['T' + str(i) for i in range(n)], value_name="Temp", var_name="Run")
temper["Run"] = temper["Run"].str[1:].astype(int)
temper["Temp"] = "T" + temper["Temp"].astype(str)
# print(temper)
# print(wham)
t2 = temper.merge(data, how='inner', left_on=["Step", "Run"], right_on=["Steps", "Run"]).sort_values('Step').drop('Steps', axis=1)
# print(t2)
t3 = t2.assign(TotalE=t2.Energy + t2.Lipid)
return t3.sort_values(["Step", "Run"]).reset_index(drop=True)
def process_complete_temper_data_3(pre, data_folder, folder_list, rerun=-1, end=-1, n=12, bias="dis", qnqc=False, average_z=False, disReal=False, dis_h56=False, localQ=False, goEnergy=False, goEnergy3H=False, goEnergy4H=False, label=""):
print("process temp data")
dateAndTime = datetime.today().strftime('%d_%h_%H%M%S')
for folder in folder_list:
simulation_list = glob.glob(pre+folder+f"/simulation/{bias}_*")
print(pre+folder+f"/simulation/{bias}_*")
os.system("mkdir -p " + pre+folder+"/data")
# this one only consider rerun >=0, for the case rerun=-1, move log.lammps to log0
for i in range(rerun, end, -1):
all_data_list = []
for one_simulation in simulation_list:
bias_num = one_simulation.split("_")[-1]
print(bias_num, "!")
location = one_simulation + f"/{i}/"
print(location)
data = read_complete_temper_2(location=location, n=n, rerun=i, qnqc=qnqc, average_z=average_z, localQ=localQ, disReal=disReal, dis_h56=dis_h56, goEnergy=goEnergy, goEnergy3H=goEnergy3H, goEnergy4H=goEnergy4H)
print(data.shape)
# remove_columns = ['Step', "Run"]
# data = data.drop(remove_columns, axis=1)
all_data_list.append(data.assign(BiasTo=bias_num))
data = pd.concat(all_data_list).reset_index(drop=True)
# if localQ:
# print("hi")
# else:
# data.to_csv(os.path.join(pre, folder, f"data/rerun_{i}.csv"))
# complete_data_list.append(data)
# temps = list(dic.keys())
# complete_data = pd.concat(complete_data_list)
name = f"rerun_{2*i}_{dateAndTime}.feather"
data = data.reset_index(drop=True)
data.query(f'Step > {2*i}e7 & Step <= {2*i+1}e7').reset_index(drop=True).to_feather(pre+folder+"/" + name)
os.system("cp "+pre+folder+"/" + name + " "+data_folder+label+name)
name = f"rerun_{2*i+1}_{dateAndTime}.feather"
data = data.reset_index(drop=True)
data.query(f'Step > {2*i+1}e7 & Step <= {2*i+2}e7').reset_index(drop=True).to_feather(pre+folder+"/" + name)
os.system("cp "+pre+folder+"/" + name + " "+data_folder+label+name)
def move_data4(data_folder, freeEnergy_folder, folder_list, temp_dict_mode=1, sub_mode_name="", kmem=0.2, klipid=0.1, kgo=0.1, krg=0.2, sample_range_mode=0, biasName="dis", qnqc=False, average_z=0, chosen_mode=0):
print("move data")
# dic = {"T_defined":300, "T0":350, "T1":400, "T2":450, "T3":500, "T4":550, "T5":600, "T6":650, "T7":700, "T8":750, "T9":800, "T10":900, "T11":1000}
if temp_dict_mode == 1:
dic = {"T0":280, "T1":300, "T2":325, "T3":350, "T4":375, "T5":400, "T6":450, "T7":500, "T8":550, "T9":600, "T10":650, "T11":700}
if temp_dict_mode == 2:
dic = {"T0":280, "T1":290, "T2":300, "T3":315, "T4":335, "T5":355, "T6":380, "T7":410, "T8":440, "T9":470, "T10":500, "T11":530}
if temp_dict_mode == 3:
dic = {"T0":280, "T1":290, "T2":300, "T3":310, "T4":320, "T5":335, "T6":350, "T7":365, "T8":380, "T9":410, "T10":440, "T11":470}
if temp_dict_mode == 4:
dic = {"T0":300, "T1":335, "T2":373, "T3":417, "T4":465, "T5":519, "T6":579, "T7":645, "T8":720, "T9":803, "T10":896, "T11":1000}
# read in complete.feather
data_list = []
for folder in folder_list:
tmp = pd.read_feather(data_folder + folder +".feather")
data_list.append(tmp)
data = pd.concat(data_list)
os.system("mkdir -p "+freeEnergy_folder+"/"+sub_mode_name+f"/data_{sample_range_mode}")
for bias, oneBias in data.groupby("BiasTo"):
for tempSymbol, oneTempAndBias in oneBias.groupby("Temp"):
temp = dic[tempSymbol]
if float(temp) > 800:
continue
print(f"t_{temp}_{biasName}_{bias}.dat")
if sample_range_mode == 0:
queryCmd = 'Step > 0 & Step <= 1e7'
if sample_range_mode == 1:
queryCmd = 'Step > 1e7 & Step <= 2e7'
elif sample_range_mode == 2:
queryCmd ='Step > 2e7 & Step <= 3e7'
elif sample_range_mode == 3:
queryCmd ='Step > 3e7 & Step <= 4e7'
elif sample_range_mode == 4:
queryCmd ='Step > 4e7 & Step <= 5e7'
elif sample_range_mode == 5:
queryCmd ='Step > 5e7 & Step <= 6e7'
elif sample_range_mode == 6:
queryCmd ='Step > 6e7 & Step <= 7e7'
elif sample_range_mode == 7:
queryCmd ='Step > 7e7 & Step <= 8e7'
elif sample_range_mode == -1:
queryCmd ='Step > 4e7 & Step <= 6e7'
if sample_range_mode == -2:
tmp = oneTempAndBias.reset_index(drop=True)
else:
tmp = oneTempAndBias.query(queryCmd).reset_index()
if average_z < 5:
chosen_list = ["TotalE", "Qw", "Distance"]
elif average_z == 5:
chosen_list = ["TotalE", "Qw", "DisReal"]
chosen_list += ["z_h6"]
if average_z == 1:
chosen_list += ["abs_z_average"]
if average_z == 2 or average_z == 3:
chosen_list += ["z_h6"]
if average_z == 3:
chosen_list += ["DisReal"]
if average_z == 4:
tmp["z_h5_and_h6"] = tmp["z_h5"] + tmp["z_h6"]
chosen_list += ["z_h5_and_h6"]
chosen_list += ["DisReal"]
if average_z == 6:
chosen_list = ["TotalE", "Qw", "DisReal"]
tmp["z_h5_and_h6"] = tmp["z_h5"] + tmp["z_h6"]
chosen_list += ["z_h5_and_h6"]
chosen_list += ["z_h5"]
chosen_list += ["z_h6"]
chosen_list += ["Dis_h56"]
if average_z == 7:
chosen_list = ["TotalE", "Qw", "DisReal"]
tmp["z_h56"] = tmp["z_h5"] + tmp["z_h6"]
tmp["z_h14"] = tmp["z_h1"] + tmp["z_h2"] + tmp["z_h3"] + tmp["z_h4"]
chosen_list += ["z_h14"]
chosen_list += ["z_h56"]
chosen_list += ["z_h5"]
chosen_list += ["z_h6"]
chosen_list += ["Dis_h12"]
chosen_list += ["Dis_h34"]
chosen_list += ["Dis_h56"]
if chosen_mode == 0:
chosen = tmp[chosen_list]
chosen = chosen.assign(TotalE_perturb_mem_p=tmp.TotalE + kmem*tmp.Membrane,
TotalE_perturb_mem_m=tmp.TotalE - kmem*tmp.Membrane,
TotalE_perturb_lipid_p=tmp.TotalE + klipid*tmp.Lipid,
TotalE_perturb_lipid_m=tmp.TotalE - klipid*tmp.Lipid,
TotalE_perturb_go_p=tmp.TotalE + kgo*tmp["AMH-Go"],
TotalE_perturb_go_m=tmp.TotalE - kgo*tmp["AMH-Go"],
TotalE_perturb_rg_p=tmp.TotalE + krg*tmp.Rg,
TotalE_perturb_rg_m=tmp.TotalE - krg*tmp.Rg)
if chosen_mode == 1:
chosen_list += ["Res" + str(i+1) for i in range(181)]
chosen = tmp[chosen_list]
if chosen_mode == 2:
chosen_list += ["Res" + str(i+1) for i in range(181)]
chosen = tmp[chosen_list]
chosen = chosen.assign(TotalE_perturb_go_m=tmp.TotalE - kgo*tmp["AMH-Go"],
TotalE_perturb_go_p=tmp.TotalE + kgo*tmp["AMH-Go"],
TotalE_perturb_lipid_m=tmp.TotalE - klipid*tmp.Lipid,
TotalE_perturb_lipid_p=tmp.TotalE + klipid*tmp.Lipid,
TotalE_perturb_mem_m=tmp.TotalE - kmem*tmp.Membrane,
TotalE_perturb_mem_p=tmp.TotalE + kmem*tmp.Membrane,
TotalE_perturb_rg_m=tmp.TotalE - krg*tmp.Rg,
TotalE_perturb_rg_p=tmp.TotalE + krg*tmp.Rg)
# print(tmp.count())
if chosen_mode == 3:
chosen_list += ["AMH-Go", "Lipid", "Membrane", "Rg"]
chosen = tmp[chosen_list]
if chosen_mode == 4:
chosen_list += ["Dis_h56"]
chosen = tmp[chosen_list]
if chosen_mode == 5:
chosen_list += ["Dis_h56"]
chosen = tmp[chosen_list]
chosen = chosen.assign(TotalE_perturb_go_m=tmp.TotalE/10,
TotalE_perturb_go_p=0,
Go=tmp["AMH-Go"])
if chosen_mode == 6:
chosen_list += ["Dis_h56"]
chosen = tmp[chosen_list]
chosen = chosen.assign(TotalE_1=tmp.TotalE + 0.1*tmp.AMH,
TotalE_2=tmp.TotalE + 0.2*tmp.AMH,
TotalE_3=tmp.TotalE + 0.5*tmp.AMH,
TotalE_4=tmp.TotalE + tmp.AMH,
TotalE_5=tmp.AMH)
if chosen_mode == 7:
chosen_list += ["Dis_h56"]
chosen = tmp[chosen_list]
chosen = chosen.assign(TotalE_1=tmp.TotalE + 0.1*tmp.AMH_3H,
TotalE_2=tmp.TotalE + 0.2*tmp.AMH_3H,
TotalE_3=tmp.TotalE + 0.5*tmp.AMH_3H,
TotalE_4=tmp.TotalE + tmp.AMH_3H,
TotalE_5=tmp.TotalE + 0.1*tmp.AMH,
TotalE_6=tmp.TotalE + 0.2*tmp.AMH)
if chosen_mode == 8:
# chosen_list += ["Dis_h56"]
chosen_list += ["z_average"]
chosen = tmp[chosen_list]
chosen = chosen.assign(TotalE_1=tmp.TotalE + 0.1*tmp.AMH_4H,
TotalE_2=tmp.TotalE + 0.2*tmp.AMH_4H,
TotalE_3=tmp.TotalE + 0.5*tmp.AMH_4H,
TotalE_4=tmp.TotalE + 0.1*tmp.AMH_3H,
TotalE_5=tmp.TotalE + 0.2*tmp.AMH_3H,
TotalE_6=tmp.TotalE + 0.5*tmp.AMH_3H)
if chosen_mode == 9:
# chosen_list += ["Dis_h56"]
chosen_list += ["z_average"]
chosen = tmp[chosen_list]
chosen = chosen.assign(TotalE_1=tmp.TotalE + 0.1*tmp.AMH_4H,
TotalE_2=tmp.TotalE + 0.2*tmp.AMH_4H,
TotalE_3=tmp.TotalE + 0.5*tmp.AMH_4H)
chosen = chosen.assign(TotalE_perturb_1go_m=chosen.TotalE_2 - kgo*tmp["AMH-Go"],
TotalE_perturb_1go_p=chosen.TotalE_2 + kgo*tmp["AMH-Go"],
TotalE_perturb_2lipid_m=chosen.TotalE_2 - tmp.Lipid,
TotalE_perturb_2lipid_p=chosen.TotalE_2 + tmp.Lipid,
TotalE_perturb_3mem_m=chosen.TotalE_2 - tmp.Membrane,
TotalE_perturb_3mem_p=chosen.TotalE_2 + tmp.Membrane,
TotalE_perturb_4rg_m=chosen.TotalE_2 - tmp.Rg,
TotalE_perturb_4rg_p=chosen.TotalE_2 + tmp.Rg,
TotalE_perturb_5go=tmp["AMH-Go"],
TotalE_perturb_5lipid=tmp.Lipid,
TotalE_perturb_5mem=tmp.Membrane,
TotalE_perturb_5rg=tmp.Rg)
if chosen_mode == 10:
# chosen_list += ["Dis_h56"]
chosen_list += ["z_average"]
chosen = tmp[chosen_list]
chosen = chosen.assign(TotalE_1=tmp.TotalE + 0.1*tmp.AMH_4H,
TotalE_2=tmp.TotalE + 0.2*tmp.AMH_4H,
TotalE_3=tmp.TotalE + 0.5*tmp.AMH_4H)
chosen = chosen.assign(TotalE_perturb_1lipid_m1=chosen.TotalE_2 - 0.1*tmp.Lipid,
TotalE_perturb_1lipid_p1=chosen.TotalE_2 + 0.1*tmp.Lipid,
TotalE_perturb_2lipid_m2=chosen.TotalE_2 - 0.2*tmp.Lipid,
TotalE_perturb_2lipid_p2=chosen.TotalE_2 + 0.2*tmp.Lipid,
TotalE_perturb_3lipid_m3=chosen.TotalE_2 - 0.3*tmp.Lipid,
TotalE_perturb_3lipid_p3=chosen.TotalE_2 + 0.3*tmp.Lipid,
TotalE_perturb_4lipid_m4=chosen.TotalE_2 - 0.5*tmp.Lipid,
TotalE_perturb_4lipid_p4=chosen.TotalE_2 + 0.5*tmp.Lipid,
TotalE_perturb_5go=tmp["AMH-Go"],
TotalE_perturb_5lipid=tmp.Lipid,
TotalE_perturb_5mem=tmp.Membrane,
TotalE_perturb_5rg=tmp.Rg)
if chosen_mode == 11:
# chosen_list += ["Dis_h56"]
chosen_list += ["z_average"]
chosen = tmp[chosen_list]
chosen = chosen.assign(TotalE_1=tmp.TotalE + 1.1*0.1*tmp.AMH_4H + 0.1*tmp["AMH-Go"],
TotalE_2=tmp.TotalE + 1.1*0.2*tmp.AMH_4H + 0.1*tmp["AMH-Go"],
TotalE_3=tmp.TotalE + 1.1*0.5*tmp.AMH_4H + 0.1*tmp["AMH-Go"])
chosen = chosen.assign(TotalE_perturb_1lipid_m1=chosen.TotalE_2 - 0.1*tmp.Lipid,
TotalE_perturb_1lipid_p1=chosen.TotalE_2 + 0.1*tmp.Lipid,
TotalE_perturb_2lipid_m2=chosen.TotalE_2 - 0.2*tmp.Lipid,
TotalE_perturb_2lipid_p2=chosen.TotalE_2 + 0.2*tmp.Lipid,
TotalE_perturb_3lipid_m3=chosen.TotalE_2 - 0.1*tmp.Membrane,
TotalE_perturb_3lipid_p3=chosen.TotalE_2 + 0.1*tmp.Membrane,
TotalE_perturb_4lipid_m4=chosen.TotalE_2 - 0.2*tmp.Membrane,
TotalE_perturb_4lipid_p4=chosen.TotalE_2 + 0.2*tmp.Membrane,
TotalE_perturb_5go=tmp["AMH-Go"],
TotalE_perturb_5lipid=tmp.Lipid,
TotalE_perturb_5mem=tmp.Membrane,
TotalE_perturb_5rg=tmp.Rg)
if chosen_mode == 12:
chosen = tmp[chosen_list]
# chosen["z_h56"] = (chosen["z_h5"] + chosen["z_h6"])/2
chosen = chosen.assign(TotalE_2=tmp.TotalE + 0.2*tmp.AMH_4H,
z_h56=(tmp.z_h5 + tmp.z_h6)/2)
if chosen_mode == 13:
chosen_list += ["z_average"]
chosen = tmp[chosen_list]
# chosen["z_h56"] = (chosen["z_h5"] + chosen["z_h6"])/2
force = 0.1
chosen = chosen.assign(TotalE_2=tmp.TotalE + 0.2*tmp.AMH_4H - (tmp.DisReal - 25.1)*force,
TotalE_3=tmp.TotalE - (tmp.DisReal - 25.1)*force,
TotalE_4=tmp.TotalE + 0.2*tmp.AMH_4H,
TotalE_5=tmp.TotalE + 0.2*tmp.AMH_4H - (tmp.DisReal)*force)
chosen.to_csv(freeEnergy_folder+"/"+sub_mode_name+f"/data_{sample_range_mode}/t_{temp}_{biasName}_{bias}.dat", sep=' ', index=False, header=False)
# perturbation_table = {0:"original", 1:"m_go",
# 2:"p_go", 3:"m_lipid",
# 4:"p_lipid", 5:"m_mem",
# 6:"p_mem", 7:"m_rg", 8:"p_rg"}
def compute_average_z(dumpFile, outFile):
# input dump, output z.dat
z_list = []
with open(outFile, "w") as f:
a = read_lammps(dumpFile)
for atoms in a:
b = np.array(atoms)
z = b.mean(axis=0)[2]
z_list.append(z)
f.write(str(z)+"\n")
def compute_average_z_2(dumpFile, outFile):
# input dump, output z.dat
helices_list = [(94,114), (147,168), (171, 192), (200, 217), (226, 241), (250, 269)]
with open(outFile, "w") as f:
a = read_lammps(dumpFile)
f.write("z_average, abs_z_average, z_h1, z_h2, z_h3, z_h4, z_h5, z_h6\n")
for atoms in a:
b = np.array(atoms)
z = b.mean(axis=0)[2]
f.write(str(z)+ ", ")
z = np.abs(b).mean(axis=0)[2]
f.write(str(z)+ ", ")
for count, (i,j) in enumerate(helices_list):
i = i - 91
j = j - 91
z = np.mean(b[i:j], axis=0)[2]
if count == 5:
f.write(str(z))
else:
f.write(str(z)+ ", ")
f.write("\n")
def read_simulation_2(location=".", i=-1, qnqc=False, average_z=False, localQ=False, disReal=False, **kwargs):
file = "lipid.dat"
lipid = pd.read_csv(location+file)
lipid.columns = lipid.columns.str.strip()
remove_columns = ['Steps']
lipid = lipid.drop(remove_columns, axis=1)
file = "rgs.dat"
rgs = pd.read_csv(location+file)
rgs.columns = rgs.columns.str.strip()
remove_columns = ['Steps']
rgs = rgs.drop(remove_columns, axis=1)
file = "energy.dat"
energy = pd.read_csv(location+file)
energy.columns = energy.columns.str.strip()
energy = energy[["AMH-Go", "Membrane", "Rg"]]
file = "addforce.dat"
dis = pd.read_csv(location+file)
dis.columns = dis.columns.str.strip()
remove_columns = ['Steps', 'AddedForce', 'Dis12', 'Dis34', 'Dis56']
dis.drop(remove_columns, axis=1,inplace=True)
file = "wham.dat"
wham = pd.read_csv(location+file).assign(Run=i)
wham.columns = wham.columns.str.strip()
remove_columns = ['Rg', 'Tc']
wham = wham.drop(remove_columns, axis=1)
if qnqc:
qc = pd.read_table(location+f"qc", names=["qc"])[1:].reset_index(drop=True)
qn = pd.read_table(location+f"qn", names=["qn"])[1:].reset_index(drop=True)
qc2 = pd.read_table(location+f"qc2", names=["qc2"])[1:].reset_index(drop=True)
wham = pd.concat([wham, qn, qc, qc2],axis=1)
# if average_z:
# z = pd.read_table(location+f"z_{i}.dat", names=["AverageZ"])[1:].reset_index(drop=True)
# wham = pd.concat([wham, z],axis=1)
if disReal:
tmp = pd.read_csv(location+f"distance.dat")[1:].reset_index(drop=True).drop('Steps', axis=1)
# print(tmp)
tmp.columns = tmp.columns.str.strip()
wham = pd.concat([wham, tmp],axis=1)
if average_z:
z = pd.read_csv(location+f"z_complete.dat")[1:].reset_index(drop=True)
z.columns = z.columns.str.strip()
wham = pd.concat([wham, z],axis=1)
if localQ:
all_localQ = pd.read_csv(location+f"localQ.csv")[1:].reset_index(drop=True)
wham = pd.concat([wham, all_localQ], axis=1)
data = pd.concat([wham, dis, energy, rgs, lipid], axis=1)
t3 = data.assign(TotalE=data.Energy + data.Lipid)
return t3.reset_index(drop=True)
def read_folder(location, match="", **kwargs):
runFolders = os.listdir(location+"/simulation")
if match == "qbias":
runFolders = [f for f in runFolders if re.match(r'qbias_[0-9]+', f)]
else:
runFolders = [f for f in runFolders if re.match(r'[0-9]+', f)]
print(runFolders)
data_list = []
for run in runFolders:
tmp = read_simulation_2(location+"/simulation/"+run+"/0/", **kwargs).assign(Run=run)
data_list.append(tmp)
return pd.concat(data_list).reset_index(drop=True)
def read_variable_folder(location, match="*_", **kwargs):
variables = glob.glob(os.path.join(location, match))
print(variables)
data_list = []
for variableFolder in variables:
tmp = variableFolder.split("/")[-1]
data_list.append(read_folder(variableFolder, **kwargs).assign(Folder=tmp))
data = pd.concat(data_list)
name = f"{datetime.today().strftime('%d_%h_%H%M%S')}.feather"
data.reset_index(drop=True).to_feather(name)
def downloadPdb(pdb_list):
os.system("mkdir -p original_pdbs")
for pdb_id in pdb_list:
pdb = f"{pdb_id.lower()[:4]}"
pdbFile = pdb+".pdb"
if not os.path.isfile("original_pdbs/"+pdbFile):
pdbl = PDBList()
name = pdbl.retrieve_pdb_file(pdb, pdir='.', file_format='pdb')
os.system(f"mv {name} original_pdbs/{pdbFile}")
def cleanPdb(pdb_list, chain=None):
os.system("mkdir -p cleaned_pdbs")
for pdb_id in pdb_list:
pdb = f"{pdb_id.lower()[:4]}"
if chain is None:
if len(pdb_id) == 5:
Chosen_chain = pdb_id[4].upper()
else:
assert(len(pdb_id) == 4)
Chosen_chain = "A"
else:
Chosen_chain = chain
pdbFile = pdb+".pdb"
# clean pdb
fixer = PDBFixer(filename="original_pdbs/"+pdbFile)
# remove unwanted chains
chains = list(fixer.topology.chains())
chains_to_remove = [i for i, x in enumerate(chains) if x.id not in Chosen_chain]
fixer.removeChains(chains_to_remove)
fixer.findMissingResidues()
# add missing residues in the middle of a chain, not ones at the start or end of the chain.
chains = list(fixer.topology.chains())
keys = fixer.missingResidues.keys()
# print(keys)
for key in list(keys):
chain = chains[key[0]]
if key[1] == 0 or key[1] == len(list(chain.residues())):
del fixer.missingResidues[key]
fixer.findNonstandardResidues()
fixer.replaceNonstandardResidues()
fixer.removeHeterogens(keepWater=False)
fixer.findMissingAtoms()
fixer.addMissingAtoms()
fixer.addMissingHydrogens(7.0)
PDBFile.writeFile(fixer.topology, fixer.positions, open("cleaned_pdbs/"+pdbFile, 'w'))
def getAllChains(pdbFile):
fixer = PDBFixer(filename=pdbFile)
# remove unwanted chains
chains = list(fixer.topology.chains())
a = ""
for i in chains:
a += i.id
return ''.join(sorted(set(a.upper().replace(" ", ""))))
def add_chain_to_pymol_pdb(location):
# location = "/Users/weilu/Research/server/nov_2018/openMM/random_start/1r69.pdb"
with open("tmp", "w") as out:
with open(location, "r") as f:
for line in f:
info = list(line)
if len(info) > 21:
info[21] = "A"
out.write("".join(info))
os.system(f"mv tmp {location}")
# ----------------------------depreciated---------------------------------------
def read_simulation(location):
file = "lipid.dat"
lipid = pd.read_csv(location+file)
lipid.columns = lipid.columns.str.strip()
file = "energy.dat"
energy = pd.read_csv(location+file)
energy.columns = energy.columns.str.strip()
file = "addforce.dat"
dis = pd.read_csv(location+file)
dis.columns = dis.columns.str.strip()
# remove_columns = ['AddedForce', 'Dis12', 'Dis34', 'Dis56']
file = "rgs.dat"
rgs = pd.read_csv(location+file)
rgs.columns = rgs.columns.str.strip()
file = "wham.dat"
wham = pd.read_csv(location+file)
wham.columns = wham.columns.str.strip()
remove_columns = ['Rg', 'Tc']
wham = wham.drop(remove_columns, axis=1)
data = wham.merge(rgs, how='inner', left_on=["Steps"], right_on=["Steps"]).\
merge(dis, how='inner', left_on=["Steps"], right_on=["Steps"]).\
merge(energy, how='inner', left_on=["Steps"], right_on=["Steps"]).\
merge(lipid, how='inner', left_on=["Steps"], right_on=["Steps"])
data = data.assign(TotalE=data.Energy + data.Lipid)
return data
def process_complete_temper_data_2(pre, data_folder, folder_list, rerun=-1, n=12, bias="dis", qnqc=False, average_z=False, localQ=False):
print("process temp data")
dateAndTime = datetime.today().strftime('%d_%h_%H%M%S')
for folder in folder_list:
simulation_list = glob.glob(pre+folder+f"/simulation/{bias}_*")
print(pre+folder+f"/simulation/{bias}_*")
os.system("mkdir -p " + pre+folder+"/data")
# this one only consider rerun >=0, for the case rerun=-1, move log.lammps to log0
for i in range(rerun+1):
all_data_list = []
for one_simulation in simulation_list:
bias_num = one_simulation.split("_")[-1]
print(bias_num, "!")
location = one_simulation + f"/{i}/"
print(location)
data = read_complete_temper_2(location=location, n=n, rerun=i, qnqc=qnqc, average_z=average_z, localQ=localQ)
print(data.shape)
# remove_columns = ['Step', "Run"]
# data = data.drop(remove_columns, axis=1)
all_data_list.append(data.assign(BiasTo=bias_num))
data = pd.concat(all_data_list).reset_index(drop=True)
# if localQ:
# print("hi")
# else:
# data.to_csv(os.path.join(pre, folder, f"data/rerun_{i}.csv"))
# complete_data_list.append(data)
# temps = list(dic.keys())
# complete_data = pd.concat(complete_data_list)
name = f"rerun_{i}_{dateAndTime}.feather"
data.reset_index(drop=True).to_feather(pre+folder+"/" + name)
os.system("cp "+pre+folder+"/" + name + " "+data_folder)
def move_data3(data_folder, freeEnergy_folder, folder, sub_mode_name="", kmem=0.2, klipid=0.1, kgo=0.1, krg=0.2, sample_range_mode=0, biasName="dis", qnqc=False, average_z=0, chosen_mode=0):
print("move data")
dic = {"T0":350, "T1":400, "T2":450, "T3":500, "T4":550, "T5":600, "T6":650, "T7":700, "T8":750, "T9":800, "T10":900, "T11":1000}
# read in complete.feather
data = pd.read_feather(data_folder + folder +".feather")
os.system("mkdir -p "+freeEnergy_folder+"/"+sub_mode_name+f"/data_{sample_range_mode}")
for bias, oneBias in data.groupby("BiasTo"):
for tempSymbol, oneTempAndBias in oneBias.groupby("Temp"):
temp = dic[tempSymbol]
if float(temp) > 800:
continue
print(f"t_{temp}_{biasName}_{bias}.dat")
if sample_range_mode == 0:
queryCmd = 'Step > 0 & Step <= 1e7'
if sample_range_mode == 1:
queryCmd = 'Step > 1e7 & Step <= 2e7'
elif sample_range_mode == 2:
queryCmd ='Step > 2e7 & Step <= 3e7'
elif sample_range_mode == 3:
queryCmd ='Step > 3e7 & Step <= 4e7'
elif sample_range_mode == 4:
queryCmd ='Step > 4e7 & Step <= 5e7'
elif sample_range_mode == 5:
queryCmd ='Step > 5e7 & Step <= 6e7'
elif sample_range_mode == -1:
queryCmd ='Step > 4e7 & Step <= 6e7'
tmp = oneTempAndBias.query(queryCmd)
chosen_list = ["TotalE", "Qw", "Distance"]
if average_z == 1:
chosen_list += ["abs_z_average"]
if average_z == 2:
chosen_list += ["z_h6"]
if chosen_mode == 0:
chosen = tmp[chosen_list]
chosen = chosen.assign(TotalE_perturb_mem_p=tmp.TotalE + kmem*tmp.Membrane,
TotalE_perturb_mem_m=tmp.TotalE - kmem*tmp.Membrane,
TotalE_perturb_lipid_p=tmp.TotalE + klipid*tmp.Lipid,
TotalE_perturb_lipid_m=tmp.TotalE - klipid*tmp.Lipid,
TotalE_perturb_go_p=tmp.TotalE + kgo*tmp["AMH-Go"],
TotalE_perturb_go_m=tmp.TotalE - kgo*tmp["AMH-Go"],
TotalE_perturb_rg_p=tmp.TotalE + krg*tmp.Rg,
TotalE_perturb_rg_m=tmp.TotalE - krg*tmp.Rg)
if chosen_mode == 1:
chosen_list += ["Res" + str(i+1) for i in range(181)]
chosen = tmp[chosen_list]
# print(tmp.count())
chosen.to_csv(freeEnergy_folder+"/"+sub_mode_name+f"/data_{sample_range_mode}/t_{temp}_{biasName}_{bias}.dat", sep=' ', index=False, header=False)
def move_data2(data_folder, freeEnergy_folder, folder, sub_mode_name="", kmem=0.2, klipid=0.1, kgo=0.1, krg=0.2, sample_range_mode=0, biasName="dis", qnqc=False, average_z=0, chosen_mode=0):
print("move data")
dic = {"T0":350, "T1":400, "T2":450, "T3":500, "T4":550, "T5":600, "T6":650, "T7":700, "T8":750, "T9":800, "T10":900, "T11":1000}
# read in complete.feather
data = pd.read_feather(data_folder + folder +".feather")
os.system("mkdir -p "+freeEnergy_folder+folder+sub_mode_name+"/data")
for bias, oneBias in data.groupby("BiasTo"):
for tempSymbol, oneTempAndBias in oneBias.groupby("Temp"):
temp = dic[tempSymbol]
if float(temp) > 800:
continue
print(f"t_{temp}_{biasName}_{bias}.dat")
if sample_range_mode == 0:
queryCmd = 'Step > 1e7 & Step <= 2e7'
elif sample_range_mode == 1:
queryCmd ='Step > 2e7 & Step <= 3e7'
elif sample_range_mode == 2:
queryCmd ='Step > 3e7 & Step <= 4e7'
elif sample_range_mode == 3:
queryCmd ='Step > 4e7 & Step <= 5e7'
elif sample_range_mode == 4:
queryCmd ='Step > 5e7 & Step <= 6e7'
elif sample_range_mode == -1:
queryCmd ='Step > 4e7 & Step <= 6e7'
tmp = oneTempAndBias.query(queryCmd)
chosen_list = ["TotalE", "Qw", "Distance"]
if average_z == 1:
chosen_list += ["abs_z_average"]
if average_z == 2:
chosen_list += ["z_h6"]
if chosen_mode == 0:
chosen = tmp[chosen_list]
chosen = chosen.assign(TotalE_perturb_mem_p=tmp.TotalE + kmem*tmp.Membrane,
TotalE_perturb_mem_m=tmp.TotalE - kmem*tmp.Membrane,
TotalE_perturb_lipid_p=tmp.TotalE + klipid*tmp.Lipid,
TotalE_perturb_lipid_m=tmp.TotalE - klipid*tmp.Lipid,
TotalE_perturb_go_p=tmp.TotalE + kgo*tmp["AMH-Go"],
TotalE_perturb_go_m=tmp.TotalE - kgo*tmp["AMH-Go"],
TotalE_perturb_rg_p=tmp.TotalE + krg*tmp.Rg,
TotalE_perturb_rg_m=tmp.TotalE - krg*tmp.Rg)
if chosen_mode == 1:
chosen_list += ["Res" + str(i+1) for i in range(181)]
chosen = tmp[chosen_list]
# print(tmp.count())
chosen.to_csv(freeEnergy_folder+folder+sub_mode_name+f"/data/t_{temp}_{biasName}_{bias}.dat", sep=' ', index=False, header=False)
# chosen
def make_metadata_2(cwd=".", k=1000.0, temps_list=["450"]):
files = glob.glob("../../data/*")
kconstant = k
with open("metadatafile", "w") as out:
for oneFile in files:
tmp = oneFile.split("/")[-1].replace('.dat', '')
t = tmp.split("_")[1]
bias = tmp.split("_")[3]
# print(tmp)
# if int(float(dis)) > 150:
# continue
if t in temps_list:
target = "{} {} {} {}\n".format(oneFile, t, kconstant, bias)
out.write(target)
def make_metadata(k=1000.0, temps_list=["450"]):
cwd = os.getcwd()
files = glob.glob("../data/*")
kconstant = k
with open("metadatafile", "w") as out:
for oneFile in files:
tmp = oneFile.split("/")[-1].replace('.dat', '')
t = tmp.split("_")[1]
bias = tmp.split("_")[3]
# print(tmp)
# if int(float(dis)) > 150:
# continue
if t in temps_list:
target = "../{} {} {} {}\n".format(oneFile, t, kconstant, bias)
out.write(target)
def read_complete_temper(n=4, location=".", rerun=-1, qnqc=False, average_z=False, localQ=False):
all_lipid_list = []
for i in range(n):
file = "lipid.{}.dat".format(i)
lipid = pd.read_csv(location+file).assign(Run=i)
lipid.columns = lipid.columns.str.strip()
# lipid = lipid[["Steps","Lipid","Run"]]
all_lipid_list.append(lipid)
lipid = pd.concat(all_lipid_list)
all_rgs_list = []
for i in range(n):
file = "rgs.{}.dat".format(i)
rgs = pd.read_csv(location+file).assign(Run=i)
rgs.columns = rgs.columns.str.strip()
# lipid = lipid[["Steps","Lipid","Run"]]
all_rgs_list.append(rgs)
rgs = pd.concat(all_rgs_list)
all_energy_list = []
for i in range(n):
file = "energy.{}.dat".format(i)
energy = pd.read_csv(location+file).assign(Run=i)
energy.columns = energy.columns.str.strip()
energy = energy[["Steps", "AMH-Go", "Membrane", "Rg", "Run"]]
all_energy_list.append(energy)
energy = pd.concat(all_energy_list)
all_dis_list = []
for i in range(n):
file = "addforce.{}.dat".format(i)
dis = pd.read_csv(location+file).assign(Run=i)
dis.columns = dis.columns.str.strip()
remove_columns = ['AddedForce', 'Dis12', 'Dis34', 'Dis56']
dis.drop(remove_columns, axis=1,inplace=True)
all_dis_list.append(dis)
dis = | pd.concat(all_dis_list) | pandas.concat |
import json
import numpy as np
import pandas as pd
import shap
import sys
from sqlalchemy import create_engine
from sklearn.isotonic import IsotonicRegression
from sklearn.metrics import roc_auc_score, average_precision_score, matthews_corrcoef, precision_score, recall_score
from tensorflow.keras.callbacks import ModelCheckpoint, Callback
from tensorflow.keras.layers import Activation, Lambda, ReLU
from tensorflow.keras.models import Model, load_model
from utils import pick_features, handle_imbalance, remove_old_results_from_db, make_feature_subset
class early_stopping_by_metric(Callback):
"""
A helper callback allowing us to stop training only when the defined metric reaches a certain value.
"""
def __init__(self, monitor="loss", value=0.00001):
super(Callback, self).__init__()
self.monitor, self.value = monitor, value
def on_epoch_end(self, epoch, logs={}):
if logs.get(self.monitor) <= self.value:
self.model.stop_training = True
# sys.stdout = open(snakemake.log[0], "w")
DBNAME = snakemake.params["dbname"]
DBUSER = snakemake.params["dbuser"]
DBSCHEMA = snakemake.params["dbschema"]
psql_url = f"postgresql://{DBUSER}@trans-db-01/{DBNAME}?options=-c%20search_path={DBSCHEMA}"
engine = create_engine(psql_url)
with engine.connect() as connection:
outcome_type = snakemake.wildcards["outcome_type"]
model_type = snakemake.wildcards["model_type"]
outcome_variable = snakemake.wildcards["outcome_variable"]
optuna_study_name = f"{outcome_type}__{model_type}__{outcome_variable}"
feature_subset = make_feature_subset(outcome_type)
hp = pd.read_sql(f"SELECT hyperparameters FROM best_trials WHERE study_name = '{optuna_study_name}'", connection)
hp = json.loads(hp.hyperparameters[0])
datasets = {x: pd.read_sql("data_" + x, connection) for x in ("dev", "test", "test_new")}
features = {k: pick_features(v, feature_subset).values for k,v in datasets.items()}
y = {k: v[outcome_variable].values for k,v in datasets.items()}
features["dev"], y["dev"], class_weights = \
handle_imbalance(features["dev"], y["dev"], hp["class_handling"])
# Read settings needed to pick up where best CV fold stopped so we can load the model
cv_model_to_continue_training = pd.read_sql(f"""
WITH cte_loss AS (
SELECT
bt.study_name
, trial_number
, cv_fold
, (eval_val::json->>'loss')::float AS target_loss
FROM best_trials AS bt
INNER JOIN trials
ON trials.trial_id = bt.trial_id
INNER JOIN training_summaries AS ts
ON ts.study_name = bt.study_name
AND ts.trial_number = trials.number
)
SELECT *
FROM (
SELECT *, ROW_NUMBER() OVER(PARTITION BY study_name ORDER BY target_loss) AS rn
FROM cte_loss
) AS loss
WHERE rn = 1
AND study_name = '{optuna_study_name}';
""", connection)
cv_fold = int(cv_model_to_continue_training.cv_fold)
trial_number = int(cv_model_to_continue_training.trial_number)
target_loss = float(cv_model_to_continue_training.target_loss)
model_crude = load_model(snakemake.params["weights_dir"] + \
f"/{optuna_study_name}__trial_{trial_number}__cv_fold_{cv_fold}__best_weights.hdf5")
# Define callbacks
study_name = f"{outcome_type}__{model_type}__{outcome_variable}__final"
weights_fpath = snakemake.params["weights_dir"] + f"/final__{study_name}__best_weights.hdf5"
checkpoint = ModelCheckpoint( # needed to save the model so we can re-invoke it below for Platt calibration
monitor="loss",
filepath=weights_fpath,
save_best_only=True,
save_weights_only=False,
mode="min"
)
early_stopping = early_stopping_by_metric(value=target_loss)
# Continue training
hist = model_crude.fit(
x=features["dev"], y=y["dev"],
verbose=False,
epochs=500,
batch_size=hp["batch_size"],
validation_data=None,
callbacks=[checkpoint, early_stopping],
class_weight=class_weights
)
# Platt recalibration
# acoeff, bcoeff = _sigmoid_calibration(model_crude.predict(features["dev"]), y["dev"])
#
# platt = model_crude.layers[-1].output
# platt = Lambda(lambda x: -(acoeff*x + bcoeff))(platt)
# platt = Activation("sigmoid")(platt)
# model_platt = Model(inputs=model_crude.layers[0].input, outputs=platt)
#
# # Isotonic regression
# iso_reg = IsotonicRegression(y_min=0, y_max=1, out_of_bounds="clip")
# iso_reg = iso_reg.fit(model_crude.predict(features["dev"]).squeeze(), y["dev"])
# Make predictions and save in database
def make_pred_df (y_true, features, dataset, mod, pred_type=None):
y_pred = pd.DataFrame(mod.predict(features), columns=["y_pred"])
# if pred_type == "isotonic":
# y_pred = pd.DataFrame(iso_reg.predict(y_pred.squeeze()), columns=["y_pred"])
context = pd.DataFrame({
"study_name": study_name,
"dataset": dataset,
"admission_id": datasets[dataset].admission_id,
"pred_type": pred_type,
"y_true": y_true})
return pd.concat([context, y_pred], axis="columns")
preds = pd.concat([make_pred_df(y[k], features[k], k, model_crude, "crude") for k in datasets.keys()])
remove_old_results_from_db("predictions", "study_name", study_name, connection)
preds.to_sql("predictions", connection, if_exists="append", index=False)
# Evaluation metrics
def get_metrics(k, mod=model_crude, pred_type="crude"):
# First, find best threshold as per Matthew's correlation coefficient
y_pred_train = mod.predict(features["dev"])
y_true_train = y["dev"]
best_mcc = 0
threshold = 0
for th in np.linspace(0, 1, 100, endpoint=False):
y_binary = (y_pred_train > th).astype("int32")
mcc = matthews_corrcoef(y_true_train, y_binary)
if mcc > best_mcc:
threshold = th
# Then, use threshold to compute metrics
y_true = y[k]
y_pred = mod.predict(features[k])
y_pred_binary = (y_pred > threshold).astype("int32")
return pd.DataFrame({
"study_name": study_name,
"dataset": k,
"pred_type": pred_type,
"auroc": roc_auc_score(y_true, y_pred),
"avg_prec_score": average_precision_score(y_true, y_pred),
"mcc": matthews_corrcoef(y_true, y_pred_binary),
"precision": precision_score(y_true, y_pred_binary),
"recall": recall_score(y_true, y_pred_binary),
"threshold": threshold,
"n_epochs": len(hist.history["loss"])
}, index=[0])
metrics = pd.concat([get_metrics(k, model_crude, "crude") for k in datasets.keys()])
remove_old_results_from_db("evaluation_metrics", "study_name", study_name, connection)
metrics.to_sql("evaluation_metrics", connection, if_exists="append", index=False)
# Make SHAP explanations
features = {k: pick_features(v, feature_subset) for k,v in datasets.items()} # needed again without converting to np array
try:
n_samples = min(snakemake.params["n_shap_background"], features["dev"].shape[0])
background_set = features["dev"].sample(n_samples, random_state=42)
except:
background_set = features["dev"]
try:
n_explanations = snakemake.params["n_explanations"]
except:
n_explanations = features["test"].shape[0]
print("Creating explainer")
e = shap.DeepExplainer(model_crude, background_set.values)
print("Computing shap values")
shap_values = e.shap_values(features["test"][:n_explanations].values)[0]
print("Creating combined dataframe and writing to database")
context = pd.DataFrame({
"study_name": study_name,
"dataset": "test",
"pred_type": "crude",
"admission_id": datasets["test"].admission_id[:n_explanations],
"y_true": y["test"][:n_explanations],
"y_pred": model_crude.predict(features["test"][:n_explanations]).squeeze(),
"base_value": float(e.expected_value)
})
shap_df = pd.concat([context, | pd.DataFrame(shap_values, columns=background_set.columns) | pandas.DataFrame |
from abc import ABC, abstractmethod
import argparse
import datetime
import multiprocessing as mp
import numpy as np
import os
import sys
import pandas as pd
import shutil
import subprocess
from typing import Union, List
from yaml import dump
import ChildProject
from ChildProject.pipelines.pipeline import Pipeline
pipelines = {}
class AudioProcessor(ABC):
def __init__(
self,
project: ChildProject.projects.ChildProject,
name: str,
input_profile: str = None,
threads: int = 1,
recordings: Union[str, List[str], pd.DataFrame] = None,
):
self.project = project
self.name = name
self.threads = int(threads)
self.recordings = Pipeline.recordings_from_list(recordings)
self.input_profile = input_profile
if self.input_profile:
input_path = os.path.join(
self.project.path,
ChildProject.projects.ChildProject.CONVERTED_RECORDINGS,
self.input_profile,
)
assert os.path.exists(
input_path
), f"provided input profile {input_profile} does not exist"
self.converted = pd.DataFrame()
def __init_subclass__(cls, **kwargs):
super().__init_subclass__(**kwargs)
pipelines[cls.SUBCOMMAND] = cls
def output_directory(self):
return os.path.join(
self.project.path,
ChildProject.projects.ChildProject.CONVERTED_RECORDINGS,
self.name,
)
def read_metadata(self):
path = os.path.join(self.output_directory(), "recordings.csv")
if os.path.exists(path):
return pd.read_csv(path).set_index(
["original_filename", "converted_filename"]
)
else:
return None
def export_metadata(self):
path = os.path.join(self.output_directory(), "recordings.csv")
self.converted.to_csv(path)
@abstractmethod
def process_recording(self, recording):
pass
def process(self, parameters):
recordings = self.project.get_recordings_from_list(self.recordings)
os.makedirs(name=self.output_directory(), exist_ok=True)
if self.threads == 1:
self.converted = [
self.process_recording(recording)
for recording in recordings.to_dict("records")
]
else:
with mp.Pool(processes=self.threads) as pool:
self.converted = pool.map(
self.process_recording, recordings.to_dict("records")
)
previously_converted = self.read_metadata()
self.converted = | pd.concat(self.converted) | pandas.concat |
# Import python modules
import os, sys
# data handling libraries
import pandas as pd
import numpy as np
import pickle
import json
import dask
from multiprocessing import Pool
# graphical control libraries
import matplotlib as mpl
mpl.use('agg')
import matplotlib.pyplot as plt
# shape and layer libraries
import fiona
from shapely.geometry import MultiPolygon, shape, point, box
from descartes import PolygonPatch
from matplotlib.collections import PatchCollection
from mpl_toolkits.basemap import Basemap
from mpl_toolkits.axes_grid1 import make_axes_locatable
import geopandas as gpd
# data wrangling libraries
import ftplib, urllib, wget, bz2
from bs4 import BeautifulSoup as bs
class ogh_meta:
"""
The json file that describes the Gridded climate data products
"""
def __init__(self):
self.__meta_data = dict(json.load(open('ogh_meta.json','rb')))
# key-value retrieval
def __getitem__(self, key):
return(self.__meta_data[key])
# key list
def keys(self):
return(self.__meta_data.keys())
# value list
def values(self):
return(self.__meta_data.values())
# print('Version '+datetime.fromtimestamp(os.path.getmtime('ogh.py')).strftime('%Y-%m-%d %H:%M:%S')+' jp')
def saveDictOfDf(outfilename, dictionaryObject):
# write a dictionary of dataframes to a json file using pickle
with open(outfilename, 'wb') as f:
pickle.dump(dictionaryObject, f)
f.close()
def readDictOfDf(infilename):
# read a dictionary of dataframes from a json file using pickle
with open(infilename, 'rb') as f:
dictionaryObject = pickle.load(f)
f.close()
return(dictionaryObject)
def reprojShapefile(sourcepath, newprojdictionary={'proj':'longlat', 'ellps':'WGS84', 'datum':'WGS84'}, outpath=None):
"""
sourcepath: (dir) the path to the .shp file
newprojdictionary: (dict) the new projection definition in the form of a dictionary (default provided)
outpath: (dir) the output path for the new shapefile
"""
# if outpath is none, treat the reprojection as a file replacement
if isinstance(outpath, type(None)):
outpath = sourcepath
shpfile = gpd.GeoDataFrame.from_file(sourcepath)
shpfile = shpfile.to_crs(newprojdictionary)
shpfile.to_file(outpath)
def getFullShape(shapefile):
"""
Generate a MultiPolygon to represent each shape/polygon within the shapefile
shapefile: (dir) a path to the ESRI .shp shapefile
"""
shp = fiona.open(shapefile)
mp = [shape(pol['geometry']) for pol in shp]
mp = MultiPolygon(mp)
shp.close()
return(mp)
def getShapeBbox(polygon):
"""
Generate a geometric box to represent the bounding box for the polygon, shapefile connection, or MultiPolygon
polygon: (geometry) a geometric polygon, MultiPolygon, or shapefile connection
"""
# identify the cardinal bounds
minx, miny, maxx, maxy = polygon.bounds
bbox = box(minx, miny, maxx, maxy, ccw=True)
return(bbox)
def readShapefileTable(shapefile):
"""
read in the datatable captured within the shapefile properties
shapefile: (dir) a path to the ESRI .shp shapefile
"""
#cent_df = gpd.read_file(shapefile)
shp = fiona.open(shapefile)
centroid = [eachpol['properties'] for eachpol in shp]
cent_df = pd.DataFrame.from_dict(centroid, orient='columns')
shp.close()
return(cent_df)
def filterPointsinShape(shape, points_lat, points_lon, points_elev=None, buffer_distance=0.06, buffer_resolution=16,
labels=['LAT', 'LONG_', 'ELEV']):
"""
filter for datafiles that can be used
shape: (geometry) a geometric polygon or MultiPolygon
points_lat: (series) a series of latitude points in WGS84 projection
points_lon: (series) a series of longitude points in WGS84 projection
points_elev: (series) a series of elevation points in meters; optional - default is None
buffer_distance: (float64) a numerical multiplier to increase the geodetic boundary area
buffer_resolution: (float64) the increments between geodetic longlat degrees
labels: (list) a list of preferred labels for latitude, longitude, and elevation
"""
# add buffer region
region = shape.buffer(buffer_distance, resolution=buffer_resolution)
# construct points_elev if null
if isinstance(points_elev, type(None)):
points_elev=np.repeat(np.nan, len(points_lon))
# Intersection each coordinate with the region
limited_list = []
for lon, lat, elev in zip(points_lon, points_lat, points_elev):
gpoint = point.Point(lon, lat)
if gpoint.intersects(region):
limited_list.append([lat, lon, elev])
maptable = pd.DataFrame.from_records(limited_list, columns=labels)
## dask approach ##
#intersection=[]
#for lon, lat, elev in zip(points_lon, points_lat, points_elev):
# gpoint = point.Point(lon, lat)
# intersection.append(dask.delayed(gpoint.intersects(region)))
# limited_list.append([intersection, lat, lon, elev])
# convert to dataframe
#maptable = pd.DataFrame({labels[0]:points_lat, labels[1]:points_lon, labels[2]:points_elev}
# .loc[dask.compute(intersection)[0],:]
# .reset_index(drop=True)
return(maptable)
def scrapeurl(url, startswith=None, hasKeyword=None):
"""
scrape the gridded datafiles from a url of interest
url: (str) the web folder path to be scraped for hyperlink references
startswith: (str) the starting keywords for a webpage element; default is None
hasKeyword: (str) keywords represented in a webpage element; default is None
"""
# grab the html of the url, and prettify the html structure
page = urllib2.urlopen(url).read()
page_soup = bs(page, 'lxml')
page_soup.prettify()
# loop through and filter the hyperlinked lines
if pd.isnull(startswith):
temp = [anchor['href'] for anchor in page_soup.findAll('a', href=True) if hasKeyword in anchor['href']]
else:
temp = [anchor['href'] for anchor in page_soup.findAll('a', href=True) if anchor['href'].startswith(startswith)]
# convert to dataframe then separate the lon and lat as float coordinate values
temp = pd.DataFrame(temp, columns = ['filenames'])
return(temp)
def treatgeoself(shapefile, NAmer, folder_path=os.getcwd(), outfilename='mappingfile.csv', buffer_distance=0.06):
"""
TreatGeoSelf to some [data] lovin'!
shapefile: (dir) the path to an ESRI shapefile for the region of interest
Namer: (dir) the path to an ESRI shapefile, which has each 1/16th coordinate and elevation information from a DEM
folder_path: (dir) the destination folder path for the mappingfile output; default is the current working directory
outfilename: (str) the name of the output file; default name is 'mappingfile.csv'
buffer_distance: (float64) the multiplier to be applied for increasing the geodetic boundary area; default is 0.06
"""
# conform projections to longlat values in WGS84
reprojShapefile(shapefile, newprojdictionary={'proj':'longlat', 'ellps':'WGS84', 'datum':'WGS84'}, outpath=None)
# read shapefile into a multipolygon shape-object
shape_mp = getFullShape(shapefile)
# read in the North American continental DEM points for the station elevations
NAmer_datapoints = readShapefileTable(NAmer).rename(columns={'Lat':'LAT','Long':'LONG_','Elev':'ELEV'})
# generate maptable
maptable = filterPointsinShape(shape_mp,
points_lat=NAmer_datapoints.LAT,
points_lon=NAmer_datapoints.LONG_,
points_elev=NAmer_datapoints.ELEV,
buffer_distance=buffer_distance, buffer_resolution=16, labels=['LAT', 'LONG_', 'ELEV'])
maptable.reset_index(inplace=True)
maptable = maptable.rename(columns={"index":"FID"})
print(maptable.shape)
print(maptable.tail())
# print the mappingfile
mappingfile=os.path.join(folder_path, outfilename)
maptable.to_csv(mappingfile, sep=',', header=True, index=False)
return(mappingfile)
def mapContentFolder(resid):
"""
map the content folder within HydroShare
resid: (str) a string hash that represents the hydroshare resource that has been migrated
"""
path = os.path.join('/home/jovyan/work/notebooks/data', str(resid), str(resid), 'data/contents')
return(path)
# ### CIG (DHSVM)-oriented functions
def compile_bc_Livneh2013_locations(maptable):
"""
compile a list of file URLs for bias corrected Livneh et al. 2013 (CIG)
maptable: (dataframe) a dataframe that contains the FID, LAT, LONG_, and ELEV for each interpolated data file
"""
locations=[]
for ind, row in maptable.iterrows():
basename='_'.join(['data', str(row['LAT']), str(row['LONG_'])])
url=['http://cses.washington.edu/rocinante/Livneh/bcLivneh_WWA_2013/forcings_ascii/',basename]
locations.append(''.join(url))
return(locations)
def compile_Livneh2013_locations(maptable):
"""
compile a list of file URLs for Livneh et al. 2013 (CIG)
maptable: (dataframe) a dataframe that contains the FID, LAT, LONG_, and ELEV for each interpolated data file
"""
locations=[]
for ind, row in maptable.iterrows():
basename='_'.join(['data', str(row['LAT']), str(row['LONG_'])])
url=['http://www.cses.washington.edu/rocinante/Livneh/Livneh_WWA_2013/forcs_dhsvm/',basename]
locations.append(''.join(url))
return(locations)
### VIC-oriented functions
def compile_VICASCII_Livneh2015_locations(maptable):
"""
compile the list of file URLs for Livneh et al., 2015 VIC.ASCII outputs
maptable: (dataframe) a dataframe that contains the FID, LAT, LONG_, and ELEV for each interpolated data file
"""
locations=[]
for ind, row in maptable.iterrows():
loci='_'.join(['Fluxes_Livneh_NAmerExt_15Oct2014', str(row['LAT']), str(row['LONG_'])])
url=["ftp://192.168.127.12/pub/dcp/archive/OBS/livneh2014.1_16deg/VIC.ASCII/latitude.",str(row['LAT']),'/',loci,'.bz2']
locations.append(''.join(url))
return(locations)
def compile_VICASCII_Livneh2013_locations(maptable):
"""
compile the list of file URLs for Livneh et al., 2013 VIC.ASCII outputs for the USA
maptable: (dataframe) a dataframe that contains the FID, LAT, LONG_, and ELEV for each interpolated data file
"""
# identify the subfolder blocks
blocks = scrape_domain(domain='livnehpublicstorage.colorado.edu',
subdomain='/public/Livneh.2013.CONUS.Dataset/Fluxes.asc.v.1.2.1915.2011.bz2/',
startswith='fluxes')
# map each coordinate to the subfolder
maptable = mapToBlock(maptable, blocks)
locations=[]
for ind, row in maptable.iterrows():
loci='_'.join(['VIC_fluxes_Livneh_CONUSExt_v.1.2_2013', str(row['LAT']), str(row['LONG_'])])
url='/'.join(["ftp://livnehpublicstorage.colorado.edu/public/Livneh.2013.CONUS.Dataset/Fluxes.asc.v.1.2.1915.2011.bz2", str(row['blocks']), loci+".bz2"])
locations.append(url)
return(locations)
### Climate (Meteorological observations)-oriented functions
def canadabox_bc():
"""
Establish the Canadian (north of the US bounding boxes) Columbia river basin bounding box
"""
# left, bottom, right top
return(box(-138.0, 49.0, -114.0, 53.0))
def scrape_domain(domain, subdomain, startswith=None):
"""
scrape the gridded datafiles from a url of interest
domain: (str) the web folder path
subdomain: (str) the subfolder path to be scraped for hyperlink references
startswith: (str) the starting keywords for a webpage element; default is None
"""
# connect to domain
ftp = ftplib.FTP(domain)
ftp.login()
ftp.cwd(subdomain)
# scrape for data directories
tmp = [dirname for dirname in ftp.nlst() if dirname.startswith(startswith)]
geodf = pd.DataFrame(tmp, columns=['dirname'])
# conform to bounding box format
tmp = geodf['dirname'].apply(lambda x: x.split('.')[1:])
tmp = tmp.apply(lambda x: list(map(float,x)) if len(x)>2 else x)
# assemble the boxes
geodf['bbox']=tmp.apply(lambda x: box(x[0]*-1, x[2]-1, x[1]*-1, x[3]) if len(x)>2 else canadabox_bc())
return(geodf)
def mapToBlock(df_points, df_regions):
for index, eachblock in df_regions.iterrows():
for ind, row in df_points.iterrows():
if point.Point(row['LONG_'], row['LAT']).intersects(eachblock['bbox']):
df_points.loc[ind, 'blocks'] = str(eachblock['dirname'])
return(df_points)
def compile_dailyMET_Livneh2013_locations(maptable):
"""
compile the list of file URLs for Livneh et al., 2013 Daily Meteorology data
maptable: (dataframe) a dataframe that contains the FID, LAT, LONG_, and ELEV for each interpolated data file
"""
# identify the subfolder blocks
blocks = scrape_domain(domain='livnehpublicstorage.colorado.edu',
subdomain='/public/Livneh.2013.CONUS.Dataset/Meteorology.asc.v.1.2.1915.2011.bz2/',
startswith='data')
# map each coordinate to the subfolder
maptable = mapToBlock(maptable, blocks)
locations=[]
for ind, row in maptable.iterrows():
loci='_'.join(['Meteorology_Livneh_CONUSExt_v.1.2_2013', str(row['LAT']), str(row['LONG_'])])
url='/'.join(["ftp://livnehpublicstorage.colorado.edu/public/Livneh.2013.CONUS.Dataset/Meteorology.asc.v.1.2.1915.2011.bz2", str(row['blocks']), loci+".bz2"])
locations.append(url)
return(locations)
def compile_dailyMET_Livneh2015_locations(maptable):
"""
compile the list of file URLs for Livneh et al., 2015 Daily Meteorology data
maptable: (dataframe) a dataframe that contains the FID, LAT, LONG_, and ELEV for each interpolated data file
"""
locations=[]
for ind, row in maptable.iterrows():
loci='_'.join(['Meteorology_Livneh_NAmerExt_15Oct2014', str(row['LAT']), str(row['LONG_'])])
url=["ftp://192.168.127.12/pub/dcp/archive/OBS/livneh2014.1_16deg/ascii/daily/latitude.", str(row['LAT']),"/",loci,".bz2"]
locations.append(''.join(url))
return(locations)
# ### WRF-oriented functions
def compile_wrfnnrp_raw_Salathe2014_locations(maptable):
"""
compile a list of file URLs for Salathe et al., 2014 raw WRF NNRP data
maptable: (dataframe) a dataframe that contains the FID, LAT, LONG_, and ELEV for each interpolated data file
"""
locations=[]
for ind, row in maptable.iterrows():
basename='_'.join(['data', str(row['LAT']), str(row['LONG_'])])
url=['http://cses.washington.edu/rocinante/WRF/NNRP/vic_16d/WWA_1950_2010/raw/forcings_ascii/',basename]
locations.append(''.join(url))
return(locations)
def compile_wrfnnrp_bc_Salathe2014_locations(maptable):
"""
compile a list of file URLs for the Salathe et al., 2014 bias corrected WRF NNRP data
maptable: (dataframe) a dataframe that contains the FID, LAT, LONG_, and ELEV for each interpolated data file
"""
locations=[]
for ind, row in maptable.iterrows():
basename='_'.join(['data', str(row['LAT']), str(row['LONG_'])])
url=['http://cses.washington.edu/rocinante/WRF/NNRP/vic_16d/WWA_1950_2010/bc/forcings_ascii/',basename]
locations.append(''.join(url))
return(locations)
# ## Data file migration functions
def ensure_dir(f):
"""
check if the destination folder directory exists; if not, create it and set it as the working directory
f: (dir) the directory to create and/or set as working directory
"""
if not os.path.exists(f):
os.makedirs(f)
os.chdir(f)
def wget_download(listofinterest):
"""
Download files from an http domain
listofinterest: (list) a list of urls to request
"""
# check and download each location point, if it doesn't already exist in the download directory
for fileurl in listofinterest:
basename = os.path.basename(fileurl)
try:
ping = urllib.request.urlopen(fileurl)
if ping.getcode()!=404:
wget.download(fileurl)
print('downloaded: ' + basename)
except:
print('File does not exist at this URL: ' + basename)
# Download the files to the subdirectory
def wget_download_one(fileurl):
"""
Download a file from an http domain
fileurl: (url) a url to request
"""
# check and download each location point, if it doesn't already exist in the download directory
basename=os.path.basename(fileurl)
# if it exists, remove for new download (overwrite mode)
if os.path.isfile(basename):
os.remove(basename)
try:
ping = urllib.request.urlopen(fileurl)
if ping.getcode()!=404:
wget.download(fileurl)
print('downloaded: ' + basename)
except:
print('File does not exist at this URL: ' + basename)
def wget_download_p(listofinterest, nworkers=20):
"""
Download files from an http domain in parallel
listofinterest: (list) a list of urls to request
nworkers: (int) the number of processors to distribute tasks; default is 10
"""
pool = Pool(int(nworkers))
pool.map(wget_download_one, listofinterest)
pool.close()
pool.terminate()
def ftp_download(listofinterest):
"""
Download and decompress files from an ftp domain
listofinterest: (list) a list of urls to request
"""
for loci in listofinterest:
# establish path info
fileurl=loci.replace('ftp://','') # loci is already the url with the domain already appended
ipaddress=fileurl.split('/',1)[0] # ip address
path=os.path.dirname(fileurl.split('/',1)[1]) # folder path
filename=os.path.basename(fileurl) # filename
# download the file from the ftp server
ftp=ftplib.FTP(ipaddress)
ftp.login()
ftp.cwd(path)
try:
ftp.retrbinary("RETR " + filename ,open(filename, 'wb').write)
ftp.close()
# decompress the file
decompbz2(filename)
except:
os.remove(filename)
print('File does not exist at this URL: '+fileurl)
def ftp_download_one(loci):
"""
Download and decompress a file from an ftp domain
loci: (url) a url to request
"""
# establish path info
fileurl=loci.replace('ftp://','') # loci is already the url with the domain already appended
ipaddress=fileurl.split('/',1)[0] # ip address
path=os.path.dirname(fileurl.split('/',1)[1]) # folder path
filename=os.path.basename(fileurl) # filename
# download the file from the ftp server
ftp=ftplib.FTP(ipaddress)
ftp.login()
ftp.cwd(path)
try:
ftp.retrbinary("RETR " + filename ,open(filename, 'wb').write)
ftp.close()
# decompress the file
decompbz2(filename)
except:
os.remove(filename)
print('File does not exist at this URL: '+fileurl)
def ftp_download_p(listofinterest, nworkers=5):
"""
Download and decompress files from an ftp domain in parallel
listofinterest: (list) a list of urls to request
nworkers: (int) the number of processors to distribute tasks; default is 5
"""
pool = Pool(int(nworkers))
pool.map(ftp_download_one, listofinterest)
pool.close()
pool.terminate()
def decompbz2(filename):
"""
Extract a file from a bz2 file of the same name, then remove the bz2 file
filename: (dir) the file path for a bz2 compressed file
"""
with open(filename.split(".bz2",1)[0], 'wb') as new_file, open(filename, 'rb') as zipfile:
decompressor = bz2.BZ2Decompressor()
for data in iter(lambda : zipfile.read(100 * 1024), b''):
new_file.write(decompressor.decompress(data))
os.remove(filename)
zipfile.close()
new_file.close()
print(os.path.splitext(filename)[0] + ' unzipped')
def catalogfiles(folderpath):
"""
make a catalog of the gridded files within a folderpath
folderpath: (dir) the folder of files to be catalogged, which have LAT and LONG_ as the last two filename features
"""
# read in downloaded files
temp = [eachfile for eachfile in os.listdir(folderpath) if not os.path.isdir(eachfile)]
if len(temp)==0:
# no files were available; setting default catalog output structure
catalog = pd.DataFrame([], columns=['filenames','LAT','LONG_'])
else:
# create the catalog dataframe and extract the filename components
catalog = pd.DataFrame(temp, columns=['filenames'])
catalog[['LAT','LONG_']] = catalog['filenames'].apply(lambda x: pd.Series(str(x).rsplit('_',2))[1:3]).astype(float)
# convert the filenames column to a filepath
catalog['filenames'] = catalog['filenames'].apply(lambda x: os.path.join(folderpath, x))
return(catalog)
def addCatalogToMap(outfilepath, maptable, folderpath, catalog_label):
"""
Update the mappingfile with a new column, a vector of filepaths for the downloaded files
outfilepath: (dir) the path for the output file
maptable: (dataframe) a dataframe containing the FID, LAT, LONG_, and ELEV information
folderpath: (dir) the folder of files to be catalogged, which have LAT and LONG_ as the last two filename features
catalog_label: (str) the preferred name for the series of catalogged filepaths
"""
# assert catalog_label as a string-object
catalog_label = str(catalog_label)
# catalog the folder directory
catalog = catalogfiles(folderpath).rename(columns={'filenames':catalog_label})
# drop existing column
if catalog_label in maptable.columns:
maptable = maptable.drop(labels=catalog_label, axis=1)
# update with a vector for the catalog of files
maptable = maptable.merge(catalog, on=['LAT','LONG_'], how='left')
# remove blocks, if they were needed
if 'blocks' in maptable.columns:
maptable = maptable.drop(labels=['blocks'], axis=1)
# write the updated mappingfile
maptable.to_csv(outfilepath, header=True, index=False)
# Wrapper scripts
def getDailyMET_livneh2013(homedir, mappingfile, subdir='livneh2013/Daily_MET_1915_2011/raw', catalog_label='dailymet_livneh2013'):
"""
Get the Livneh el al., 2013 Daily Meteorology files of interest using the reference mapping file
homedir: (dir) the home directory to be used for establishing subdirectories
mappingfile: (dir) the file path to the mappingfile, which contains the LAT, LONG_, and ELEV coordinates of interest
subdir: (dir) the subdirectory to be established under homedir
catalog_label: (str) the preferred name for the series of catalogged filepaths
"""
# check and generate DailyMET livneh 2013 data directory
filedir=os.path.join(homedir, subdir)
ensure_dir(filedir)
# generate table of lats and long coordinates
maptable = pd.read_csv(mappingfile)
# compile the longitude and latitude points
locations = compile_dailyMET_Livneh2013_locations(maptable)
# Download the files
ftp_download_p(locations)
# update the mappingfile with the file catalog
addCatalogToMap(outfilepath=mappingfile, maptable=maptable, folderpath=filedir, catalog_label=catalog_label)
# return to the home directory
os.chdir(homedir)
return(filedir)
def getDailyMET_livneh2015(homedir, mappingfile, subdir='livneh2015/Daily_MET_1950_2013/raw', catalog_label='dailymet_livneh2015'):
"""
Get the Livneh el al., 2015 Daily Meteorology files of interest using the reference mapping file
homedir: (dir) the home directory to be used for establishing subdirectories
mappingfile: (dir) the file path to the mappingfile, which contains the LAT, LONG_, and ELEV coordinates of interest
subdir: (dir) the subdirectory to be established under homedir
catalog_label: (str) the preferred name for the series of catalogged filepaths
"""
# check and generate Daily MET livneh 2015 data directory
filedir=os.path.join(homedir, subdir)
ensure_dir(filedir)
# generate table of lats and long coordinates
maptable = pd.read_csv(mappingfile)
# compile the longitude and latitude points
locations = compile_dailyMET_Livneh2015_locations(maptable)
# Download the files
ftp_download_p(locations)
# update the mappingfile with the file catalog
addCatalogToMap(outfilepath=mappingfile, maptable=maptable, folderpath=filedir, catalog_label=catalog_label)
# return to the home directory
os.chdir(homedir)
return(filedir)
def getDailyMET_bcLivneh2013(homedir, mappingfile, subdir='livneh2013/Daily_MET_1915_2011/bc', catalog_label='dailymet_bclivneh2013'):
"""
Get the Livneh el al., 2013 bias corrected Daily Meteorology files of interest using the reference mapping file
homedir: (dir) the home directory to be used for establishing subdirectories
mappingfile: (dir) the file path to the mappingfile, which contains the LAT, LONG_, and ELEV coordinates of interest
subdir: (dir) the subdirectory to be established under homedir
catalog_label: (str) the preferred name for the series of catalogged filepaths
"""
# check and generate baseline_corrected livneh 2013 data directory
filedir=os.path.join(homedir, subdir)
ensure_dir(filedir)
# generate table of lats and long coordinates
maptable = pd.read_csv(mappingfile)
# compile the longitude and latitude points
locations = compile_bc_Livneh2013_locations(maptable)
# download the files
wget_download_p(locations)
# update the mappingfile with the file catalog
addCatalogToMap(outfilepath=mappingfile, maptable=maptable, folderpath=filedir, catalog_label=catalog_label)
# return to the home directory
os.chdir(homedir)
return(filedir)
def getDailyVIC_livneh2013(homedir, mappingfile, subdir='livneh2013/Daily_VIC_1915_2011', catalog_label='dailyvic_livneh2013'):
"""
Get the Livneh el al., 2013 Daily VIC files of interest using the reference mapping file
homedir: (dir) the home directory to be used for establishing subdirectories
mappingfile: (dir) the file path to the mappingfile, which contains the LAT, LONG_, and ELEV coordinates of interest
subdir: (dir) the subdirectory to be established under homedir
catalog_label: (str) the preferred name for the series of catalogged filepaths
"""
# FIRST RUN
# check and generate VIC_ASCII Flux model livneh 2013 data directory
filedir=os.path.join(homedir, subdir)
ensure_dir(filedir)
# generate table of lats and long coordinates
maptable = pd.read_csv(mappingfile)
# compile the longitude and latitude points for USA
locations = compile_VICASCII_Livneh2013_locations(maptable)
# Download the files
ftp_download_p(locations)
# update the mappingfile with the file catalog
addCatalogToMap(outfilepath=mappingfile, maptable=maptable, folderpath=filedir, catalog_label=catalog_label)
# return to the home directory
os.chdir(homedir)
return(filedir)
def getDailyVIC_livneh2015(homedir, mappingfile, subdir='livneh2015/Daily_VIC_1950_2013', catalog_label='dailyvic_livneh2015'):
"""
Get the Livneh el al., 2015 Daily VIC files of interest using the reference mapping file
homedir: (dir) the home directory to be used for establishing subdirectories
mappingfile: (dir) the file path to the mappingfile, which contains the LAT, LONG_, and ELEV coordinates of interest
subdir: (dir) the subdirectory to be established under homedir
catalog_label: (str) the preferred name for the series of catalogged filepaths
"""
# check and generate Daily VIC.ASCII Flux model livneh 2015 data directory
filedir=os.path.join(homedir, subdir)
ensure_dir(filedir)
# generate table of lats and long coordinates
maptable = pd.read_csv(mappingfile)
# compile the longitude and latitude points
locations = compile_VICASCII_Livneh2015_locations(maptable)
# Download the files
ftp_download_p(locations)
# update the mappingfile with the file catalog
addCatalogToMap(outfilepath=mappingfile, maptable=maptable, folderpath=filedir, catalog_label=catalog_label)
# return to the home directory
os.chdir(homedir)
return(filedir)
def getDailyWRF_salathe2014(homedir, mappingfile, subdir='salathe2014/WWA_1950_2010/raw', catalog_label='dailywrf_salathe2014'):
"""
Get the Salathe el al., 2014 raw Daily WRF files of interest using the reference mapping file
homedir: (dir) the home directory to be used for establishing subdirectories
mappingfile: (dir) the file path to the mappingfile, which contains the LAT, LONG_, and ELEV coordinates of interest
subdir: (dir) the subdirectory to be established under homedir
catalog_label: (str) the preferred name for the series of catalogged filepaths
"""
# check and generate the Daily Meteorology raw WRF Salathe 2014 data directory
filedir=os.path.join(homedir, subdir)
ensure_dir(filedir)
# read in the longitude and latitude points from the reference mapping file
maptable = pd.read_csv(mappingfile)
# compile the longitude and latitude points
locations = compile_wrfnnrp_raw_Salathe2014_locations(maptable)
# download the data
wget_download_p(locations)
# update the mappingfile with the file catalog
addCatalogToMap(outfilepath=mappingfile, maptable=maptable, folderpath=filedir, catalog_label=catalog_label)
# return to the home directory
os.chdir(homedir)
return(filedir)
def getDailyWRF_bcsalathe2014(homedir, mappingfile, subdir='salathe2014/WWA_1950_2010/bc', catalog_label='dailywrf_bcsalathe2014'):
"""
Get the Salathe el al., 2014 bias corrected Daily WRF files of interest using the reference mapping file
homedir: (dir) the home directory to be used for establishing subdirectories
mappingfile: (dir) the file path to the mappingfile, which contains the LAT, LONG_, and ELEV coordinates of interest
subdir: (dir) the subdirectory to be established under homedir
catalog_label: (str) the preferred name for the series of catalogged filepaths
"""
# check and generate the Daily Meteorology bias corrected WRF Salathe 2014 data directory
filedir=os.path.join(homedir, subdir)
ensure_dir(filedir)
# read in the longitude and latitude points from the reference mapping file
maptable = pd.read_csv(mappingfile)
# compile the longitude and latitude points
locations = compile_wrfnnrp_bc_Salathe2014_locations(maptable)
# download the data
wget_download_p(locations)
# update the mappingfile with the file catalog
addCatalogToMap(outfilepath=mappingfile, maptable=maptable, folderpath=filedir, catalog_label=catalog_label)
# return to the home directory
os.chdir(homedir)
return(filedir)
# # Data Processing libraries
def filesWithPath(folderpath):
"""
Create a list of filepaths for the files
folderpath: (dir) the folder of interest
"""
files =[os.path.join(folderpath, eachfile) for eachfile in os.listdir(folderpath)
if not eachfile.startswith('.') and not os.path.isdir(eachfile)] # exclude hidden files
return(files)
def compareonvar(map_df, colvar='all'):
"""
subsetting a dataframe based on some columns of interest
map_df: (dataframe) the dataframe of the mappingfile table
colvar: (str or list) the column(s) to use for subsetting; 'None' will return an outerjoin, 'all' will return an innerjoin
"""
# apply row-wise inclusion based on a subset of columns
if isinstance(colvar, type(None)):
return(map_df)
if colvar is 'all':
# compare on all columns except the station info
return(map_df.dropna())
else:
# compare on only the listed columns
return(map_df.dropna(subset=colvar))
def mappingfileToDF(mappingfile, colvar='all'):
"""
read in a dataframe and subset based on columns of interest
mappingfile: (dir) the file path to the mappingfile, which contains the LAT, LONG_, and ELEV coordinates of interest
colvar: (str or list) the column(s) to use for subsetting; 'None' will return an outerjoin, 'all' will return an innerjoin
"""
# Read in the mappingfile as a data frame
map_df = pd.read_csv(mappingfile)
# select rows (datafiles) based on the colvar(s) chosen, default is
map_df = compareonvar(map_df=map_df, colvar=colvar)
# compile summaries
print(map_df.head())
print('Number of gridded data files:'+ str(len(map_df)))
print('Minimum elevation: ' + str(np.min(map_df.ELEV))+ 'm')
print('Mean elevation: '+ str(np.mean(map_df.ELEV))+ 'm')
print('Maximum elevation: '+ str(np.max(map_df.ELEV))+ 'm')
return(map_df, len(map_df))
def read_in_all_files(map_df, dataset, metadata, file_start_date, file_end_date, file_time_step, file_colnames, file_delimiter, subset_start_date, subset_end_date):
"""
Read in files based on dataset label
map_df: (dataframe) the mappingfile clipped to the subset that will be read-in
dataset: (str) the name of the dataset catalogged into map_df
metadata (str) the dictionary that contains the metadata explanations; default is None
file_colnames: (list) the list of shorthand variables; default is None
file_start_date: (date) the start date of the files that will be read-in; default is None
file_end_date: (date) the end date for the files that will be read in; default is None
file_time_step: (str) the timedelta code that represents the difference between time points; default is 'D' (daily)
subset_start_date: (date) the start date of a date range of interest
subset_end_date: (date) the end date of a date range of interest
"""
# extract metadata if the information are not provided
if pd.notnull(metadata):
if isinstance(file_start_date, type(None)):
file_start_date = metadata[dataset]['start_date']
if isinstance(file_end_date, type(None)):
file_end_date = metadata[dataset]['end_date']
if isinstance(file_time_step, type(None)):
file_time_step = metadata[dataset]['temporal_resolution']
if isinstance(file_colnames, type(None)):
file_colnames = metadata[dataset]['variable_list']
if isinstance(file_delimiter, type(None)):
file_delimiter = metadata[dataset]['delimiter']
#initialize dictionary and time sequence
df_dict=dict()
met_daily_dates=pd.date_range(file_start_date, file_end_date, freq=file_time_step) # daily
# import data for all climate stations
for ind, row in map_df.iterrows():
tmp = pd.read_table(row[dataset], header=None, delimiter=file_delimiter, names=file_colnames)
tmp.set_index(met_daily_dates, inplace=True)
# subset to the date range of interest (default is file date range)
tmp = tmp.iloc[(met_daily_dates>=subset_start_date) & (met_daily_dates<=subset_end_date),:]
# set row indices
df_dict[tuple(row[['FID','LAT','LONG_']].tolist())] = tmp
return(df_dict)
def read_files_to_vardf(map_df, df_dict, gridclimname, dataset, metadata,
file_start_date, file_end_date, file_delimiter, file_time_step, file_colnames,
subset_start_date, subset_end_date, min_elev, max_elev):
"""
# reads in the files to generate variables dataframes
map_df: (dataframe) the mappingfile clipped to the subset that will be read-in
df_dict: (dict) an existing dictionary where new computations will be stored
gridclimname: (str) the suffix for the dataset to be named; if None is provided, default to the dataset name
dataset: (str) the name of the dataset catalogged into map_df
metadata: (str) the dictionary that contains the metadata explanations; default is None
file_start_date: (date) the start date of the files that will be read-in; default is None
file_end_date: (date) the end date for the files that will be read in; default is None
file_delimiter: (str) a file parsing character to be used for file reading
file_time_step: (str) the timedelta code that represents the difference between time points; default is 'D' (daily)
file_colnames: (list) the list of shorthand variables; default is None
subset_start_date: (date) the start date of a date range of interest
subset_end_date: (date) the end date of a date range of interest
"""
# start time
starttime = pd.datetime.now()
# date range from ogh_meta file
met_daily_dates=pd.date_range(file_start_date, file_end_date, freq=file_time_step)
met_daily_subdates=pd.date_range(subset_start_date, subset_end_date, freq=file_time_step)
# omit null entries or missing data file
map_df = map_df.loc[pd.notnull(map_df[dataset]),:]
print('Number of data files within elevation range ('+str(min_elev)+':'+str(max_elev)+'): '+str(len(map_df)))
# iterate through each data file
for eachvar in metadata[dataset]['variable_list']:
# identify the variable column index
usecols = [metadata[dataset]['variable_list'].index(eachvar)]
# initiate df as a list
df_list=[]
# loop through each file
for ind, row in map_df.iterrows():
# consider rewriting the params to just select one column by index at a time
var_series = dask.delayed(pd.read_table)(filepath_or_buffer=row[dataset],
delimiter=file_delimiter,header=None,usecols=usecols,
names=[tuple(row[['FID','LAT','LONG_']])])
# append the series into the list of series
df_list.append(var_series)
# concatenate list of series (axis=1 is column-wise) into a dataframe
df1 = dask.delayed(pd.concat)(df_list, axis=1)
# set and subset date_range index
df2 = df1.set_index(met_daily_dates, inplace=False).loc[met_daily_subdates]
# end of variable table
print(eachvar+ ' dataframe reading to start: ' + str(pd.datetime.now()-starttime))
# assign dataframe to dictionary object
df_dict['_'.join([eachvar, gridclimname])] = dask.compute(df2)[0]
print(eachvar+ ' dataframe complete:' + str(pd.datetime.now()-starttime))
return(df_dict)
def read_daily_streamflow(file_name, drainage_area_m2, file_colnames=None, delimiter='\t', header='infer'):
# read in a daily streamflow data set
# if file_colnames are supplied, use header=None
if file_colnames is not None:
header=None
# read in the data
daily_data=pd.read_table(file_name, delimiter=delimiter, header=header)
# set columns, if header=None
if file_colnames is not None:
daily_data.columns=file_colnames
else:
file_colnames=list(daily_data.columns)
# calculate cfs to cms conversion, or vice versa
if 'flow_cfs' in daily_data.columns:
flow_cfs=daily_data['flow_cfs']
flow_cms=flow_cfs/(3.28084**3)
flow_mmday=flow_cms*1000*3600*24/drainage_area_m2
elif 'flow_cms' in daily_data.columns:
flow_cms=daily_data['flow_cms']
flow_cfs=flow_cms*(3.28084**3)
flow_mmday=flow_cms*1000*3600*24/drainage_area_m2
# determine the datetime
date_index=[file_colnames.index(each) for each in ['year','month','day']]
row_dates=pd.to_datetime(daily_data[date_index])
# generate the daily_flow and set the datetime as row indices
daily_flow=pd.concat([flow_cfs, flow_cms, flow_mmday],axis=1)
daily_flow.set_index(row_dates, inplace=True)
daily_flow.columns=['flow_cfs', 'flow_cms', 'flow_mmday']
return(daily_flow)
def read_daily_precip(file_name, file_colnames=None, header='infer', delimiter='\s+'):
# read in a daily precipitation data set
# if file_colnames are supplied, use header=None
if ps.notnull(file_colnames):
header=None
# read in the data
daily_data=pd.read_table(file_name, delimiter=delimiter, header=header)
# set columns, if header=None
if pd.notnull(file_colnames):
daily_data.columns=file_colnames
else:
file_colnames=list(daily_data.columns)
# calculate cfs to cms conversion, or vice versa
if 'precip_m' in daily_data.columns:
precip_m=daily_data['precip_m']
precip_mm=precip_m*1000
# determine the datetime
date_index=[file_colnames.index(each) for each in ['year','month','day']]
row_dates=pd.to_datetime(daily_data[date_index])
# generate the daily_flow and set the datetime as row indices
daily_precip=pd.concat([precip_m, precip_mm],axis=1)
daily_precip.set_index(row_dates, inplace=True)
daily_precip.columns=['precip_m', 'precip_mm']
return(daily_precip)
def read_daily_snotel(file_name, file_colnames=None, usecols=None, delimiter=',', header='infer'):
# read in a daily SNOTEL observation data set
# if file_colnames are supplied, use header=None
if file_colnames is not None:
header=None
# read in the data
daily_data=pd.read_table(file_name, usecols=usecols, header=header, delimiter=delimiter)
# reset the colnames
daily_data.columns=['Date', 'Tmax_C', 'Tmin_C', 'Tavg_C', 'Precip_mm']
# transform the data
daily_data['Tmax_C']=(daily_data['Tmax_C'] -32)/1.8
daily_data['Tmin_C']=(daily_data['Tmin_C'] -32)/1.8
daily_data['Tavg_C']=(daily_data['Tavg_C'] -32)/1.8
daily_data['Precip_mm']=daily_data['Precip_mm'] *25.4
# determine the datetime
row_dates=pd.to_datetime(daily_data.Date)
# generate the daily_flow and set the datetime as row indices
daily_snotel=daily_data[['Tmax_C', 'Tmin_C', 'Tavg_C', 'Precip_mm']]
daily_snotel.set_index(row_dates, inplace=True)
return(daily_snotel)
def read_daily_coop(file_name, file_colnames=None, usecols=None, delimiter=',', header='infer'):
# read in a daily COOP observation data set
# if file_colnames are supplied, use header=None
if file_colnames is not None:
header=None
# read in the data
daily_data=pd.read_table(file_name, usecols=usecols, header=header, delimiter=delimiter,
date_parser=lambda x: pd.datetime.strptime(x, '%Y%m%d'),
parse_dates=[0],
na_values=-9999)
# reset the colnames
daily_data.columns=['Date', 'Precip_mm','Tmax_C', 'Tmin_C', 'Tavg_C']
# transform the data
daily_data['Tmax_C']=(daily_data['Tmax_C'] -32)/1.8
daily_data['Tmin_C']=(daily_data['Tmin_C'] -32)/1.8
daily_data['Tavg_C']=(daily_data['Tavg_C'] -32)/1.8
daily_data['Precip_mm']=daily_data['Precip_mm'] *25.4
# determine the datetime
row_dates=pd.to_datetime(daily_data.Date)
# generate the daily_flow and set the datetime as row indices
daily_coop=daily_data[['Precip_mm','Tmax_C', 'Tmin_C', 'Tavg_C']]
daily_coop.set_index(row_dates, inplace=True)
return(daily_coop)
# ### Data Processing functions
def generateVarTables(file_dict, gridclimname, dataset, metadata, df_dict=None):
"""
Slice the files by their common variable
all_files: (dict) a dictionary of dataframes for each tabular datafile
dataset: (str) the name of the dataset
metadata (dict) the dictionary that contains the metadata explanations; default is None
"""
# combine the files into a pandas panel
panel = pd.Panel.from_dict(file_dict)
# initiate output dictionary
if pd.isnull(df_dict):
df_dict = dict()
# slice the panel for each variable in list
for eachvar in metadata[dataset]['variable_list']:
df_dict['_'.join([eachvar, gridclimname])] = panel.xs(key=eachvar, axis=2)
return(df_dict)
# compare two date sets for the start and end of the overlapping dates
def overlappingDates(date_set1, date_set2):
# find recent date
if date_set1[0] > date_set2[0]:
start_date = date_set1[0]
else:
start_date = date_set2[0]
# find older date
if date_set1[-1] < date_set2[-1]:
end_date = date_set1[-1]
else:
end_date = date_set2[-1]
return(start_date, end_date)
# Calculate means by 8 different methods
def multigroupMeans(VarTable, n_stations, start_date, end_date):
Var_daily = VarTable.loc[start_date:end_date, range(0,n_stations)]
# e.g., Mean monthly temperature at each station
month_daily=Var_daily.groupby(Var_daily.index.month).mean() # average monthly minimum temperature at each station
# e.g., Mean monthly temperature averaged for all stations in analysis
meanmonth_daily=month_daily.mean(axis=1)
# e.g., Mean monthly temperature for minimum and maximum elevation stations
meanmonth_min_maxelev_daily=Var_daily.loc[:,analysis_elev_max_station].groupby(Var_daily.index.month).mean()
meanmonth_min_minelev_daily=Var_daily.loc[:,analysis_elev_min_station].groupby(Var_daily.index.month).mean()
# e.g., Mean annual temperature
year_daily=Var_daily.groupby(Var_daily.index.year).mean()
# e.g., mean annual temperature each year for all stations
meanyear_daily=year_daily.mean(axis=1)
# e.g., mean annual min temperature for all years, for all stations
meanallyear_daily=np.nanmean(meanyear_daily)
# e.g., anomoly per year compared to average
anom_year_daily=meanyear_daily-meanallyear_daily
return(month_daily,
meanmonth_daily,
meanmonth_min_maxelev_daily,
meanmonth_min_minelev_daily,
year_daily,
meanyear_daily,
meanallyear_daily,
anom_year_daily)
def specialTavgMeans(VarTable):
Var_daily = VarTable.loc[start_date:end_date, range(0,n_stations)]
# Average temperature for each month at each station
permonth_daily=Var_daily.groupby(pd.TimeGrouper("M")).mean()
# Average temperature each month averaged at all stations
meanpermonth_daily=permonth_daily.mean(axis=1)
# Average monthly temperature for all stations
meanallpermonth_daily=meanpermonth_daily.mean(axis=0)
# anomoly per year compared to average
anom_month_daily=(meanpermonth_daily-meanallpermonth_daily)/1000
return(permonth_daily,
meanpermonth_daily,
meanallpermonth_daily,
anom_month_daily)
def aggregate_space_time_average(VarTable, df_dict, suffix, start_date, end_date):
"""
VarTable: (dataframe) a dataframe with date ranges as the index
df_dict: (dict) a dictionary to which computed outputs will be stored
suffix: (str) a string representing the name of the original table
start_date: (date) the start of the date range within the original table
end_date: (date) the end of the date range within the original table
"""
starttime = pd.datetime.now()
# subset dataframe to the date range of interest
Var_daily = VarTable.loc[start_date:end_date,:]
# Mean monthly temperature at each station
df_dict['month_'+suffix] = Var_daily.groupby(Var_daily.index.month).mean()
# Mean monthly temperature averaged for all stations in analysis
df_dict['meanmonth_'+suffix] = Var_daily.groupby(Var_daily.index.month).mean().mean(axis=1)
# Mean annual temperature
df_dict['year_'+suffix] = Var_daily.groupby(Var_daily.index.year).mean()
# mean annual temperature each year for all stations
df_dict['meanyear_'+suffix] = Var_daily.groupby(Var_daily.index.year).mean().mean(axis=1)
# mean annual temperature for all years, for all stations
df_dict['meanallyear_'+suffix] = Var_daily.mean(axis=1).mean(axis=0)
# anomaly per year compared to average
df_dict['anom_year_'+suffix] = df_dict['meanyear_'+suffix] - df_dict['meanallyear_'+suffix]
print(suffix+ ' calculations completed in ' + str(pd.datetime.now()-starttime))
return(df_dict)
def aggregate_space_time_sum(VarTable, n_stations, start_date, end_date):
Var_daily = VarTable.loc[start_date:end_date, range(0,n_stations)]
# Average precipitation per month at each station
permonth_daily=Var_daily.groupby(pd.TimeGrouper("M")).sum()
# Average precipitation per month averaged at all stations
meanpermonth_daily=permonth_daily.mean(axis=1)
# Average monthly precipitation averaged at all stations
meanmonth_daily= meanpermonth_daily.groupby(meanpermonth_daily.index.month).mean()
return(Var_daily,
permonth_daily,
meanpermonth_daily,
meanmonth_daily)
#def aggregate_space_time_sum(VarTable, n_stations, start_date, end_date):
# Var_daily = VarTable.loc[start_date:end_date, range(0,n_stations)]
#
# # Average precipitation per month at each station
# permonth_daily=Var_daily.groupby(pd.TimeGrouper("M")).sum()
#
# # Average precipitation per month averaged at all stations
# meanpermonth_daily=permonth_daily.mean(axis=1)
#
# # Average monthly precipitation averaged at all stations
# meanmonth_daily= meanpermonth_daily.groupby(meanpermonth_daily.index.month).mean()
#
# return(Var_daily,
# permonth_daily,
# meanpermonth_daily,
# meanmonth_daily)
#def specialTavgMeans(VarTable):
# Var_daily = VarTable.loc[start_date:end_date, range(0,n_stations)]
#
# # Average temperature for each month at each station
# permonth_daily=Var_daily.groupby(pd.TimeGrouper("M")).mean()
#
# # Average temperature each month averaged at all stations
# meanpermonth_daily=permonth_daily.mean(axis=1)
#
# # Average monthly temperature for all stations
# meanallpermonth_daily=meanpermonth_daily.mean(axis=0)
#
# # anomoly per year compared to average
# anom_month_daily=(meanpermonth_daily-meanallpermonth_daily)/1000
#
# return(permonth_daily,
# meanpermonth_daily,
# meanallpermonth_daily,
# anom_month_daily)
def plotTavg(dictionary, loc_name, start_date, end_date):
# Plot 1: Monthly temperature analysis of Livneh data
if 'meanmonth_temp_avg_liv2013_met_daily' and 'meanmonth_temp_avg_wrf2014_met_daily' not in dictionary.keys():
pass
# generate month indices
wy_index=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
wy_numbers=[10, 11, 12, 1, 2, 3, 4, 5, 6, 7, 8, 9]
month_strings=[ 'Oct', 'Nov', 'Dec', 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sept']
# initiate the plot object
fig, ax=plt.subplots(1,1,figsize=(10, 6))
if 'meanmonth_temp_avg_liv2013_met_daily' in dictionary.keys():
# Liv2013
plt.plot(wy_index, dictionary['meanmonth_maxelev_temp_avg_liv2013_met_daily'][wy_numbers],'r*--',linewidth=1, label='Liv Tavg- Max Elev='+str(dictionary['analysis_elev_max_cutoff'])+"-"+str(dictionary['analysis_elev_max'])+'m')
plt.plot(wy_index, dictionary['meanmonth_midelev_temp_avg_liv2013_met_daily'][wy_numbers],'r-', linewidth=1, label='Liv Tavg- Mid Elev='+str(dictionary['analysis_elev_min_cutoff'])+"-"+str(dictionary['analysis_elev_max_cutoff'])+'m')
plt.plot(wy_index, dictionary['meanmonth_minelev_temp_avg_liv2013_met_daily'][wy_numbers],'rX--',linewidth=1, label='Liv Tavg- Min Elev='+str(dictionary['analysis_elev_min'])+"-"+str(dictionary['analysis_elev_min_cutoff'])+'m')
if 'meanmonth_temp_avg_wrf2014_met_daily' in dictionary.keys():
# WRF2014
plt.plot(wy_index, dictionary['meanmonth_maxelev_temp_avg_wrf2014_met_daily'][wy_numbers],'b^--',linewidth=1, label='WRF Tavg- Max Elev='+str(dictionary['analysis_elev_max_cutoff'])+"-"+str(dictionary['analysis_elev_max'])+'m')
plt.plot(wy_index, dictionary['meanmonth_midelev_temp_avg_wrf2014_met_daily'][wy_numbers],'b-',linewidth=1, label='WRF Tavg- Mid Elev='+str(dictionary['analysis_elev_min_cutoff'])+"-"+str(dictionary['analysis_elev_max_cutoff'])+'m')
plt.plot(wy_index, dictionary['meanmonth_minelev_temp_avg_wrf2014_met_daily'][wy_numbers],'bo--',linewidth=1, label='WRF Tavg- Min Elev='+str(dictionary['analysis_elev_min'])+"-"+str(dictionary['analysis_elev_min_cutoff'])+'m')
if 'meanmonth_temp_avg_livneh2013_wrf2014bc_met_daily' in dictionary.keys():
# WRF2014
plt.plot(wy_index, dictionary['meanmonth_maxelev_temp_avg_livneh2013_wrf2014bc_met_daily'][wy_numbers],'g^--',linewidth=1, label='WRFbc Tavg- Max Elev='+str(dictionary['analysis_elev_max_cutoff'])+"-"+str(dictionary['analysis_elev_max'])+'m')
plt.plot(wy_index, dictionary['meanmonth_midelev_temp_avg_livneh2013_wrf2014bc_met_daily'][wy_numbers],'g-',linewidth=1, label='WRFbc Tavg- Mid Elev='+str(dictionary['analysis_elev_min_cutoff'])+"-"+str(dictionary['analysis_elev_max_cutoff'])+'m')
plt.plot(wy_index, dictionary['meanmonth_minelev_temp_avg_livneh2013_wrf2014bc_met_daily'][wy_numbers],'go--',linewidth=1, label='WRFbc Tavg- Min Elev='+str(dictionary['analysis_elev_min'])+"-"+str(dictionary['analysis_elev_min_cutoff'])+'m')
# add reference line at y=0
plt.plot([1, 12],[0, 0], 'k-',linewidth=1)
plt.ylabel('Temperature (deg C)',fontsize=14)
plt.xlabel('Month',fontsize=14)
plt.xlim(1,12);
plt.xticks(wy_index, month_strings);
plt.tick_params(labelsize=12)
plt.legend(loc='best')
plt.grid(which='both')
plt.title(str(loc_name)+'\nAverage Temperature\n Years: '+str(start_date.year)+'-'+str(end_date.year)+'; Elevation: '+str(dictionary['analysis_elev_min'])+'-'+str(dictionary['analysis_elev_max'])+'m', fontsize=16)
plt.savefig('avg_monthly_temp'+str(loc_name)+'.png')
plt.show()
def plotPavg(dictionary, loc_name, start_date, end_date):
# Plot 1: Monthly temperature analysis of Livneh data
if 'meanmonth_precip_liv2013_met_daily' and 'meanmonth_precip_wrf2014_met_daily' not in dictionary.keys():
pass
# generate month indices
wy_index=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
wy_numbers=[10, 11, 12, 1, 2, 3, 4, 5, 6, 7, 8, 9]
month_strings=[ 'Oct', 'Nov', 'Dec', 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sept']
# initiate the plot object
fig, ax=plt.subplots(1,1,figsize=(10, 6))
if 'meanmonth_precip_liv2013_met_daily' in dictionary.keys():
# Liv2013
plt.plot(wy_index, dictionary['meanmonth_maxelev_precip_liv2013_met_daily'][wy_numbers],'r^--',linewidth=1, label='Liv Precip- Max Elev='+str(dictionary['analysis_elev_max_cutoff'])+"-"+str(dictionary['analysis_elev_max'])+'m')
plt.plot(wy_index, dictionary['meanmonth_midelev_precip_liv2013_met_daily'][wy_numbers],'r-', linewidth=1, label='Liv Precip- Mid Elev='+str(dictionary['analysis_elev_min_cutoff'])+"-"+str(dictionary['analysis_elev_max_cutoff'])+'m')
plt.plot(wy_index, dictionary['meanmonth_minelev_precip_liv2013_met_daily'][wy_numbers],'ro--',linewidth=1, label='Liv Precip- Min Elev='+str(dictionary['analysis_elev_min'])+"-"+str(dictionary['analysis_elev_min_cutoff'])+'m')
if 'meanmonth_temp_avg_wrf2014_met_daily' in dictionary.keys():
# WRF2014
plt.plot(wy_index, dictionary['meanmonth_maxelev_precip_wrf2014_met_daily'][wy_numbers],'b^--',linewidth=1, label='WRF Precip- Max Elev='+str(dictionary['analysis_elev_max_cutoff'])+"-"+str(dictionary['analysis_elev_max'])+'m')
plt.plot(wy_index, dictionary['meanmonth_midelev_precip_wrf2014_met_daily'][wy_numbers],'b-',linewidth=1, label='WRF Precip- Mid Elev='+str(dictionary['analysis_elev_min_cutoff'])+"-"+str(dictionary['analysis_elev_max_cutoff'])+'m')
plt.plot(wy_index, dictionary['meanmonth_minelev_precip_wrf2014_met_daily'][wy_numbers],'bo--',linewidth=1, label='WRF Precip- Min Elev='+str(dictionary['analysis_elev_min'])+"-"+str(dictionary['analysis_elev_min_cutoff'])+'m')
if 'meanmonth_temp_avg_livneh2013_wrf2014bc_met_daily' in dictionary.keys():
# WRF2014
plt.plot(wy_index, dictionary['meanmonth_maxelev_precip_livneh2013_wrf2014bc_met_daily'][wy_numbers],'g^--',linewidth=1, label='WRFbc Precip- Max Elev='+str(dictionary['analysis_elev_max_cutoff'])+"-"+str(dictionary['analysis_elev_max'])+'m')
plt.plot(wy_index, dictionary['meanmonth_midelev_precip_livneh2013_wrf2014bc_met_daily'][wy_numbers],'g-',linewidth=1, label='WRFbc Precip- Mid Elev='+str(dictionary['analysis_elev_min_cutoff'])+"-"+str(dictionary['analysis_elev_max_cutoff'])+'m')
plt.plot(wy_index, dictionary['meanmonth_minelev_precip_livneh2013_wrf2014bc_met_daily'][wy_numbers],'go--',linewidth=1, label='WRFbc Precip- Min Elev='+str(dictionary['analysis_elev_min'])+"-"+str(dictionary['analysis_elev_min_cutoff'])+'m')
# add reference line at y=0
plt.plot([1, 12],[0, 0], 'k-',linewidth=1)
plt.ylabel('Precip (mm)',fontsize=14)
plt.xlabel('Month',fontsize=14)
plt.xlim(1,12);
plt.xticks(wy_index, month_strings);
plt.tick_params(labelsize=12)
plt.legend(loc='best')
plt.grid(which='both')
plt.title(str(loc_name)+'\nAverage Precipitation\n Years: '+str(start_date.year)+'-'+str(end_date.year)+'; Elevation: '+str(dictionary['analysis_elev_min'])+'-'+str(dictionary['analysis_elev_max'])+'m', fontsize=16)
plt.savefig('avg_monthly_precip'+str(loc_name)+'.png')
plt.show()
def gridclim_dict(mappingfile, dataset, gridclimname=None, metadata=None, min_elev=None, max_elev=None,
file_start_date=None, file_end_date=None, file_time_step=None,
file_colnames=None, file_delimiter=None,
subset_start_date=None, subset_end_date=None, df_dict=None, colvar='all'):
"""
# pipelined operation for assimilating data, processing it, and standardizing the plotting
mappingfile: (dir) the path directory to the mappingfile
dataset: (str) the name of the dataset within mappingfile to use
gridclimname: (str) the suffix for the dataset to be named; if None is provided, default to the dataset name
metadata: (str) the dictionary that contains the metadata explanations; default is None
min_elev: (float) the minimum elevation criteria; default is None
max_elev: (float) the maximum elevation criteria; default is None
file_start_date: (date) the start date of the files that will be read-in; default is None
file_end_date: (date) the end date for the files that will be read in; default is None
file_time_step: (str) the timedelta code that represents the difference between time points; default is 'D' (daily)
file_colnames: (list) the list of shorthand variables; default is None
file_delimiter: (str) a file parsing character to be used for file reading
subset_start_date: (date) the start date of a date range of interest
subset_end_date: (date) the end date of a date range of interest
df_dict: (dict) an existing dictionary where new computations will be stored
"""
# generate the climate locations and n_stations
locations_df, n_stations = mappingfileToDF(mappingfile, colvar=colvar)
# generate the climate station info
if pd.isnull(min_elev):
min_elev = locations_df.ELEV.min()
if pd.isnull(max_elev):
max_elev = locations_df.ELEV.max()
# extract metadata if the information are not provided
if not isinstance(metadata, type(None)):
if isinstance(file_start_date, type(None)):
file_start_date = metadata[dataset]['start_date']
if isinstance(file_end_date, type(None)):
file_end_date = metadata[dataset]['end_date']
if isinstance(file_time_step, type(None)):
file_time_step = metadata[dataset]['temporal_resolution']
if isinstance(file_colnames, type(None)):
file_colnames = metadata[dataset]['variable_list']
if isinstance(file_delimiter, type(None)):
file_delimiter = metadata[dataset]['delimiter']
# take all defaults if subset references are null
if pd.isnull(subset_start_date):
subset_start_date = file_start_date
if pd.isnull(subset_end_date):
subset_end_date = file_end_date
# initiate output dictionary df_dict was null
if pd.isnull(df_dict):
df_dict = dict()
if pd.isnull(gridclimname):
if pd.notnull(dataset):
gridclimname=dataset
else:
print('no suffix name provided. Provide a gridclimname or dataset label.')
return
# assemble the stations within min and max elevantion ranges
locations_df = locations_df[(locations_df.ELEV >= min_elev) & (locations_df.ELEV <= max_elev)]
# create dictionary of dataframe
df_dict = read_files_to_vardf(map_df=locations_df,
dataset=dataset,
metadata=metadata,
gridclimname=gridclimname,
file_start_date=file_start_date,
file_end_date=file_end_date,
file_delimiter=file_delimiter,
file_time_step=file_time_step,
file_colnames=file_colnames,
subset_start_date=subset_start_date,
subset_end_date=subset_end_date,
min_elev=min_elev,
max_elev=max_elev,
df_dict=df_dict)
#
vardf_list = [eachvardf for eachvardf in df_dict.keys() if eachvardf.endswith(gridclimname)]
# loop through the dictionary to compute each aggregate_space_time_average object
for eachvardf in vardf_list:
# update the dictionary with spatial and temporal average computations
df_dict.update(aggregate_space_time_average(VarTable=df_dict[eachvardf],
df_dict=df_dict,
suffix=eachvardf,
start_date=subset_start_date,
end_date=subset_end_date))
# if the number of stations exceeds 500, remove daily time-series dataframe
if len(locations_df)>500:
del df_dict[eachvardf]
return(df_dict)
def compute_diffs(df_dict, df_str, gridclimname1, gridclimname2, prefix1, prefix2='meanmonth', comp_dict=None):
#Compute difference between monthly means for some data (e.g,. Temp) for two different gridded datasets (e.g., Liv, WRF)
if isinstance(comp_dict, type(None)):
comp_dict=dict()
for each1 in prefix1:
for each2 in prefix2:
comp_dict['_'.join([str(each1),df_str])] = df_dict['_'.join([each2,each1,gridclimname1])]-df_dict['_'.join([each2,each1,gridclimname2])]
return(comp_dict)
def compute_ratios(df_dict, df_str, gridclimname1, gridclimname2, prefix1, prefix2='meanmonth', comp_dict=None):
#Compute difference between monthly means for some data (e.g,. Temp) for two different gridded datasets (e.g., Liv, WRF)
if isinstance(comp_dict, type(None)):
comp_dict=dict()
for each1 in prefix1:
for each2 in prefix2:
comp_dict['_'.join([str(each1),df_str])] = df_dict['_'.join([each2,each1,gridclimname1])]/df_dict['_'.join([each2,each1,gridclimname2])]
return(comp_dict)
def compute_elev_diffs(df_dict, df_str, gridclimname1, prefix1, prefix2a='meanmonth_minelev_', prefix2b='meanmonth_maxelev_'):
comp_dict=dict()
for each1 in prefix1:
comp_dict[str(each1)+df_str] = df_dict[prefix2a+each1+gridclimname1]-df_dict[prefix2b+each1+gridclimname1]
return(comp_dict)
def monthlyBiasCorrection_deltaTratioP_Livneh_METinput(homedir, mappingfile, BiasCorr,
lowrange='0to1000m', LowElev=range(0,1000),
midrange='1000to1500m', MidElev=range(1001,1501),
highrange='1500to3000m', HighElev=range(1501,3000),
data_dir=None, file_start_date=None, file_end_date=None):
np.set_printoptions(precision=3)
# take liv2013 date set date range as default if file reference dates are not given
if isinstance(file_start_date, type(None)):
file_start_date = pd.datetime(1915,1,1)
if isinstance(file_end_date, type(None)):
file_end_date = pd.datetime(2011,12,31)
# generate the month vector
month = pd.date_range(start=file_start_date, end=file_end_date).month
month = pd.DataFrame({'month':month})
# create NEW directory
dest_dir = os.path.join(homedir, 'biascorrWRF_liv')
if not os.path.exists(dest_dir):
os.mkdir(dest_dir)
print('destdir created')
# read in the Elevation table
zdiff = pd.read_table(mappingfile, sep=',', header='infer')
zdiff = zdiff.rename(columns={'RASTERVALU':'Elev','ELEV':'Elev'})
zdiff = zdiff[['LAT','LONG_', 'Elev']]
zdiff['filename'] = zdiff[['LAT','LONG_']].apply(lambda x: '_'.join(['Meteorology_Livneh_CONUSExt_v.1.2_2013',str(x[0]), str(x[1])]), axis=1)
#print(zdiff[0:10])
# lapse rate vector by month
# temperature adjustment vector by month
# identify the files to read
print('reading in data_long_lat files')
data_files = [os.path.join(data_dir,dat) for dat in os.listdir(data_dir) if os.path.basename(dat).startswith('Meteorology_Livneh_CONUSExt_v.1.2_2013')]
print('done reading data_long_lat files')
# loop through each file
for eachfile in data_files:
# subset the zdiff table using the eachfile's filename, then assign Elevation to equal the Elev value
Elevation = zdiff[zdiff['filename']==os.path.basename(eachfile)]['Elev'].reset_index(drop=True)
print(Elevation)
# decide on the elevation-based Tcorr
#print('Convert BiasCorr to a df')
if Elevation.iloc[0] in LowElev:
BiasCorr_sub = {k: v for k, v in BiasCorr.items() if k.endswith('_'+lowrange)}
#BiasCorr_sub_df = pd.DataFrame.from_dict(BiasCorr_sub, orient='columns').reset_index()
#BiasCorr_sub_df.columns = ['month', 'precip', 'Tmax', 'Tmin']
elif Elevation.iloc[0] in MidElev:
BiasCorr_sub = {k: v for k, v in BiasCorr.items() if k.endswith('_'+midrange)}
#BiasCorr_sub_df = pd.DataFrame.from_dict(BiasCorr_sub, orient='columns').reset_index()
#BiasCorr_sub_df.columns = ['month', 'precip', 'Tmax', 'Tmin']
elif Elevation.iloc[0] in HighElev:
BiasCorr_sub = {k: v for k, v in BiasCorr.items() if k.endswith('_'+highrange)}
#BiasCorr_sub_df = pd.DataFrame.from_dict(BiasCorr_sub, orient='columns').reset_index()
#BiasCorr_sub_df.columns = ['month', 'precip', 'Tmax', 'Tmin']
#print('reading in eachfile')
read_dat = pd.read_table(eachfile, delimiter='\s+', header=None)
read_dat.columns = ['precip', 'temp_max','temp_min','wind']
# print('done reading eachfile')
# extrapolate monthly values for each variable
for eachvar in ['precip', 'temp_max', 'temp_min']:
BiasCorr_sub_df = [BiasCorr_sub[eachkey] for eachkey in BiasCorr_sub.keys() if eachkey.startswith(eachvar)]
# subset the column for the eachfile station number
BiasCorr_sub_df = BiasCorr_sub_df.loc[:,zdiff[zdiff['filename']==eachfile].index]
BiasCorr_sub_df.columns = ['var']
# regenerate the month
BiasCorr_sub_df = BiasCorr_sub_df.reset_index().rename(columns={'index':'month'})
# generate s-vectors
month_obj = month.merge(BiasCorr_sub_df, how='left', on='month')
# generate the s-vector
s = pd.Series(month_obj.var)
#
if eachvar=='precip':
read_dat[eachvar] = np.array(read_dat[eachvar])*np.array(s)
else:
read_dat[eachvar] = np.array(read_dat[eachvar])+np.array(s)
#print('grabbing the S vector of monthlapse after the merge between month and Tcorr_df')
#print('number of corrections to apply: '+str(len(month)))
# write it out to the new destination location
read_dat.to_csv(os.path.join(dest_dir, os.path.basename(eachfile)), sep='\t', header=None, index=False)
print(os.path.join(dest_dir, os.path.basename(eachfile)))
print('mission complete.')
print('this device will now self-destruct.')
print('just kidding.')
def monthlyBiasCorrection_WRFlongtermmean_elevationbins_METinput(homedir, mappingfile, BiasCorr,
lowrange='0to1000m', LowElev=range(0,1000),
midrange='1000to1500m', MidElev=range(1001,1501),
highrange='1500to3000m', HighElev=range(1501,3000),
data_dir=None,
file_start_date=None,
file_end_date=None):
np.set_printoptions(precision=3)
# take liv2013 date set date range as default if file reference dates are not given
if isinstance(file_start_date, type(None)):
file_start_date = pd.datetime(1950,1,1)
if isinstance(file_end_date, type(None)):
file_end_date = pd.datetime(2010,12,31)
# generate the month vector
month = pd.date_range(start=file_start_date, end=file_end_date).month
month = pd.DataFrame({'month':month})
# create NEW directory
dest_dir = os.path.join(homedir, 'biascorr_WRF_ltm')
if not os.path.exists(dest_dir):
os.mkdir(dest_dir)
print('destdir created')
# read in the Elevation table
zdiff = pd.read_table(mappingfile, sep=',', header='infer')
zdiff = zdiff.rename(columns={'RASTERVALU':'Elev','ELEV':'Elev'})
zdiff = zdiff[['LAT','LONG_', 'Elev']]
zdiff['filename'] = zdiff[['LAT','LONG_']].apply(lambda x: '_'.join(['data',str(x[0]), str(x[1])]), axis=1)
#print(zdiff[0:10])
# lapse rate vector by month
# temperature adjustment vector by month
# identify the files to read
print('reading in data_long_lat files')
data_files = [os.path.join(data_dir,dat) for dat in os.listdir(data_dir) if os.path.basename(dat).startswith('data')]
#print('done reading data_long_lat files')
# loop through each file
for eachfile in data_files:
# subset the zdiff table using the eachfile's filename, then assign Elevation to equal the Elev value
Elevation = zdiff[zdiff['filename']==os.path.basename(eachfile)]['Elev'].reset_index(drop=True)
print(Elevation)
# decide on the elevation-based Tcorr
#print('Convert BiasCorr to a df')
if Elevation.iloc[0] in LowElev:
BiasCorr_sub = {k: v for k, v in BiasCorr.items() if k.endswith('_'+lowrange)}
BiasCorr_sub_df = pd.DataFrame.from_dict(BiasCorr_sub, orient='columns').reset_index()
BiasCorr_sub_df.columns = ['month', 'precip', 'Tmax', 'Tmin']
elif Elevation.iloc[0] in MidElev:
BiasCorr_sub = {k: v for k, v in BiasCorr.items() if k.endswith('_'+midrange)}
BiasCorr_sub_df = pd.DataFrame.from_dict(BiasCorr_sub, orient='columns').reset_index()
BiasCorr_sub_df.columns = ['month', 'precip', 'Tmax', 'Tmin']
elif Elevation.iloc[0] in HighElev:
BiasCorr_sub = {k: v for k, v in BiasCorr.items() if k.endswith('_'+highrange)}
BiasCorr_sub_df = pd.DataFrame.from_dict(BiasCorr_sub, orient='columns').reset_index()
BiasCorr_sub_df.columns = ['month', 'precip', 'Tmax', 'Tmin']
print('reading in eachfile')
read_dat = pd.read_table(eachfile, delimiter='\s+', header=None)
read_dat.columns = ['precip', 'Tmax','Tmin','wind']
print('done reading eachfile')
# extrapolate monthly values
month_obj = month.merge(BiasCorr_sub_df, how='left', on='month')
#print('merged month with Tcorr_df')
#print(month_obj.head(35))
# generate s-vectors
s1 = pd.Series(month_obj.Tmin)
s2 = | pd.Series(month_obj.Tmax) | pandas.Series |
# This file is part of NEORL.
# Copyright (c) 2021 Exelon Corporation and MIT Nuclear Science and Engineering
# NEORL is free software: you can redistribute it and/or modify
# it under the terms of the MIT LICENSE
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#"""
#Created on Tue Feb 25 14:42:24 2020
#
#@author: <NAME>
#"""
#Implementation of RL-informed ES (PPO-ES)
#Based on papers:
#<NAME>., & <NAME>. (2021). Rule-based reinforcement learning
#methodology to inform evolutionary algorithms for constrained optimization
#of engineering applications. Knowledge-Based Systems, 217, 106836.
#<NAME>., <NAME>., & <NAME>. (2021). Large-scale design
#optimisation of boiling water reactor bundles with neuroevolution.
#Annals of Nuclear Energy, 160, 108355.
import warnings
warnings.filterwarnings("ignore")
import random
import pandas as pd
import numpy as np
from neorl.evolu.discrete import mutate_discrete, encode_grid_to_discrete, decode_discrete_to_grid
from neorl import ES
from neorl import PPO2, MlpPolicy, RLLogger
from neorl.utils.seeding import set_neorl_seed
def encode_grid_individual_to_discrete(individual, bounds):
new_indv=[]
for i, key in enumerate(bounds):
if bounds[key][0]=='grid':
int_val=bounds[key][1].index(individual[i])
new_indv.append(int_val)
else:
new_indv.append(individual[i])
return new_indv
class PPOES(object):
"""
A PPO-informed ES Neuroevolution module
:param mode: (str) problem type, either ``min`` for minimization problem or ``max`` for maximization
:param fit: (function) the fitness function to be used with ES
:param env: (NEORL environment or Gym environment) The environment to learn with PPO, either use NEORL method ``CreateEnvironment`` (see **below**) or construct your custom Gym environment.
:param bounds: (dict) input parameter type and lower/upper bounds in dictionary form. Example: ``bounds={'x1': ['int', 1, 4], 'x2': ['float', 0.1, 0.8], 'x3': ['float', 2.2, 6.2]}``
:param npop: (int): population size of ES
:param npop_rl: (int): number of RL/PPO individuals to use in ES population (``npop_rl < npop``)
:param init_pop_rl: (bool) flag to initialize ES population with PPO individuals
:param hyperparam: (dict) dictionary of ES hyperparameters (``cxpb``, ``cxmode``, ``mutpb``, ``alpha``, ``mu``, ``smin``, ``smax``)
and PPO hyperparameters (``n_steps``, ``gamma``, ``learning_rate``, ``ent_coef``, ``vf_coef``, ``lam``, ``cliprange``, ``max_grad_norm``, ``nminibatches``, ``noptephocs``)
:param seed: (int) random seed for sampling
"""
def __init__ (self, mode, fit, env, bounds, npop=60, npop_rl=6,
init_pop_rl=True, hyperparam={},
seed=None):
self.seed = seed
set_neorl_seed(self.seed)
assert npop_rl < npop, '--error: the size of RL individuals `npop_rl` MUST be less than `npop`'
self.mode=mode
self.bounds=bounds
self.fit=fit
self.env=env
self.npop=npop
self.npop_rl=npop_rl
self.init_pop_rl=init_pop_rl
#--mir
self.mode=mode
#infer variable types
self.var_type = np.array([bounds[item][0] for item in bounds])
self.bounds=bounds
#mir-grid
if "grid" in self.var_type:
self.grid_flag=True
else:
self.grid_flag=False
self.dim = len(bounds)
self.var_names=[item for item in self.bounds]
self.hyperparam = hyperparam
#PPO hyperparameters
self.n_steps = hyperparam['n_steps'] if 'n_steps' in hyperparam else 128
self.gamma = hyperparam['gamma'] if 'gamma' in hyperparam else 0.99
self.ent_coef = hyperparam['ent_coef'] if 'ent_coef' in hyperparam else 0.01
self.learning_rate = hyperparam['learning_rate'] if 'learning_rate' in hyperparam else 0.00025
self.vf_coef = hyperparam['vf_coef'] if 'vf_coef' in hyperparam else 0.5
self.lam = hyperparam['lam'] if 'lam' in hyperparam else 0.95
self.max_grad_norm = hyperparam['max_grad_norm'] if 'max_grad_norm' in hyperparam else 0.5
self.cliprange = hyperparam['cliprange'] if 'cliprange' in hyperparam else 0.2
self.nminibatches = hyperparam['nminibatches'] if 'nminibatches' in hyperparam else 4
self.noptepochs = hyperparam['noptepochs'] if 'noptepochs' in hyperparam else 10
#ES hyperparameters
self.cxpb = hyperparam['cxpb'] if 'cxpb' in hyperparam else 0.6
self.cxmode = hyperparam['cxmode'] if 'cxmode' in hyperparam else 'blend'
self.alpha = hyperparam['alpha'] if 'alpha' in hyperparam else 0.5
self.mutpb = hyperparam['mutpb'] if 'mutpb' in hyperparam else 0.3
self.mu = hyperparam['mu'] if 'mu' in hyperparam else int(npop/2)
self.smin = hyperparam['smin'] if 'smin' in hyperparam else 0.01
self.smax = hyperparam['smax'] if 'smax' in hyperparam else 0.5
#will be activated after using `learn` method
self.PPO_RUN_FLAG=False
def learn(self, total_timesteps, rl_filter=100, verbose=False):
"""
This function starts the learning of PPO algorithm for number of timesteps to create individuals for evolutionary search
:param total_timesteps: (int) number of timesteps to run
:param rl_filter: (int) number of top individuals to keep from the full RL search
:param verbose: (bool) print statistics to screen
:return: (dataframe) dataframe of individuals/fitness sorted from best to worst
"""
self.PPO_RUN_FLAG=True
print('---------------------------------------------------------------------------------')
print('------------------------------- PPO-ES is Running -------------------------------')
print('---------------------------------------------------------------------------------')
try:
ncores=len(self.env.get_attr('mode'))
print('Paralell RL is running with {} cores'.format(ncores))
self.env.mode=self.env.get_attr('mode')[0]
except:
try:
self.env.mode #
ncores=1
print('Serial RL is running with {} core'.format(ncores))
except:
self.env.mode = 'max' # or some other default value.
print('--warning: Problem mode defined in the RL enviroment is', self.env.mode)
print('--warning: Problem mode defined in the PPOES class is', self.mode)
if self.env.mode == self.mode:
print('--warning: Both problem modes match')
else:
raise ValueError('The two problem modes do not match, alg terminates')
print('------------------------------- Part I: PPO is collecting data -------------------------------')
cb=RLLogger(check_freq=1)
ppo = PPO2(MlpPolicy, env=self.env,
n_steps=self.n_steps,
gamma=self.gamma,
ent_coef=self.ent_coef,
vf_coef=self.vf_coef,
cliprange=self.cliprange,
max_grad_norm=self.max_grad_norm,
lam=self.lam,
nminibatches=self.nminibatches,
noptepochs=self.noptepochs,
seed=self.seed,
verbose=verbose) #run PPO
ppo.learn(total_timesteps=total_timesteps, callback=cb)
rl_data= | pd.DataFrame(cb.x_hist, columns=self.var_names) | pandas.DataFrame |
"""
Base Model Module
-----------------
This is the base class for all model modules. This class does not contain an particular model but it does include all of the functions to run a model, capture model statistics, and visualize model data.
"""
__author__ = 'krishnab'
__version__ = '0.1.0'
import abc
import numpy as np
import pandas as pd
import datetime
from operator import neg
from .DataManagement import DataManagement
import xarray as xr
from tqdm import tqdm
from .ColumnSpecs import EXCLUDED_COLUMNS
np.seterr(divide='ignore', invalid='ignore')
class Base_model(metaclass=abc.ABCMeta):
def __init__(self, argsdict ={}):
self.name = 'base model m'
self.label = 'base model m'
self.nf1 = argsdict.get('number_of_females_1', 0)
self.nf2 = argsdict.get('number_of_females_2', 0)
self.nf3 = argsdict.get('number_of_females_3', 0)
self.nm1 = argsdict.get('number_of_males_1', 0)
self.nm2 = argsdict.get('number_of_males_2', 0)
self.nm3 = argsdict.get('number_of_males_3',0)
self.bf1 = argsdict.get('hiring_rate_women_1', 0)
self.bf2 = argsdict.get('hiring_rate_women_2',0)
self.bf3 = argsdict.get('hiring_rate_women_3',0)
self.df1 = argsdict.get('attrition_rate_women_1',0)
self.df2 = argsdict.get('attrition_rate_women_2',0)
self.df3 = argsdict.get('attrition_rate_women_3',0)
self.dm1 = argsdict.get('attrition_rate_men_1',0)
self.dm2 = argsdict.get('attrition_rate_men_2',0)
self.dm3 = argsdict.get('attrition_rate_men_3',0)
self.target_female_percentage = argsdict.get('t_fpct', 0.25)
self.duration = argsdict.get('duration',0)
self.female_promotion_probability_1 = argsdict.get('female_promotion_probability_1',0)
self.female_promotion_probability_2 = argsdict.get('female_promotion_probability_2',0)
self.male_promotion_probability_1 = argsdict.get('male_promotion_probability_1',0)
self.male_promotion_probability_2 = argsdict.get('male_promotion_probability_2',0)
self.upperbound = argsdict.get('upperbound',0)
self.lowerbound = argsdict.get('lowerbound',0)
self.variation_range = argsdict.get('variation_range',0)
self.mgmt_data = DataManagement().load_data()
self.model_run_date_time = get_date_time_of_today()
self.model_common_name = argsdict.get('model_name', '')
self.number_of_sim_columns = 0
self.itercount = 0
self.annotation = argsdict.get('annotation_text', '')
self.init_default_rates()
@staticmethod
def get_mgmt_data():
return DataManagement().load_data()
def load_baseline_data_mgmt(self):
'''
This function will load the parameter values for the baseline
scenario of the Business School into the model
:return: This function does not return anything. It changes the
current model in place.
:rtype: void
'''
self.nf1 = 2
self.nf2 = 3
self.nf3 = 3
self.nm1 = 11
self.nm2 = 13
self.nm3 = 42
self.vac3 = 0
self.vac2 = 0
self.vac1 = 0
self.bf1 = 14/68
self.bf2 = 4/68
self.bf3 = 0/68
self.df1 = 0.0615
self.df2 = 0.00
self.df3 = 0.0600
self.dm1 = 0.0859
self.dm2 = 0.0473
self.dm3 = 0.0414
self.phire2 = 0.
self.phire3 = 0.
self.female_promotion_probability_1 = 0.0769
self.female_promotion_probability_2 = 0.1111
self.male_promotion_probability_1 = 0.0707
self.male_promotion_probability_2 = 0.0946
self.upperbound = 84
self.lowerbound = 64
self.variation_range = 3
self.name = "Promote-Hire baseline"
self.label = "Promote-Hire baseline"
def init_default_rates(self):
self.default_rates = {'default_hiring_rate_f1': round(14/68, 4),
'default_hiring_rate_f2': round(4/68, 4),
'default_hiring_rate_f3': round(0/68, 4),
'default_hiring_rate_m1': round(36/68, 4),
'default_hiring_rate_m2': round(8/68, 4),
'default_hiring_rate_m3': round(6/68, 4)}
@abc.abstractmethod
def run_model(self):
pass
@abc.abstractmethod
def get_number_of_model_data_columns(self):
pass
def run_multiple(self,num_iterations):
# first get the sizing of the storage array
# set up the array to hold the data array from each model run.
self.run_model_iterations(num_iterations)
# calculate summaries
self.calculate_simulation_summaries()
def run_model_iterations(self, num_iterations):
if self.number_of_sim_columns == 0:
raise ValueError('number of simulation columns should not be 0. '
'Fix the __init__ function in the model class.')
results_matrix = np.zeros([num_iterations, self.duration, self.number_of_sim_columns])
simulation_matrix = xr.DataArray(results_matrix,
coords={'simdata': self.sim_column_list,
'run': range(num_iterations),
'year': range(self.duration)},
dims=('run', 'year', 'simdata'))
simulation_matrix = simulation_matrix.astype('object')
self.itercount = 0
for i in tqdm(range(num_iterations)):
simulation_matrix[i, :, :] = self.run_model()
self.itercount += 1
self.simulation_matrix = simulation_matrix
self.itercount = 0
def calculate_simulation_summaries(self):
# allocate column names
summary_matrix_columns,\
sim_results_cols, \
sim_setting_cols, \
mgmt_data_cols = self.create_summary_column_names_list()
# create new dataframe to hold summary info
temp = np.zeros([self.duration, len(summary_matrix_columns)])
summary_matrix = | pd.DataFrame(temp) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
dwx_analytics.py - Pythonic access to raw DARWIN analytics data via FTP
--
@author: <NAME> (www.darwinex.com)
Last Updated: October 17, 2019
Copyright (c) 2017-2019, Darwinex. All rights reserved.
Licensed under the BSD 3-Clause License, you may not use this file except
in compliance with the License.
You may obtain a copy of the License at:
https://opensource.org/licenses/BSD-3-Clause
"""
import gzip
import json, os
import pandas as pd
from tqdm import tqdm
from ftplib import FTP
from io import BytesIO
from matplotlib import pyplot as plt
import logging
logger = logging.getLogger()
class DWX_Darwin_Data_Analytics_API():
'''This API has the ability to download DARWIN data and analyze it.'''
def __init__(self, dwx_ftp_user, dwx_ftp_pass, dwx_ftp_hostname, dwx_ftp_port):
"""Initialize variables, setup byte buffer and FTP connection.
Parameters
----------
ftp_server : str
FTP server that houses raw DARWIN data
ftp_username : str
Your Darwinex username
ftp_password : str
Your FTP password (NOT your Darwinex password)
ftp_port : int
Port to connect to FTP server on.
--
"""
# Analytics Headers
self.analytics_headers = {'AVG_LEVERAGE': ['timestamp','periods','darwin_vs_eurusd_volatility'],
'ORDER_DIVERGENCE': ['timestamp','instrument','usd_volume','latency','divergence'],
'RETURN_DIVERGENCE': ['timestamp','quote','quote_after_avg_divergence'],
'MONTHLY_DIVERGENCE': ['timestamp','average_divergence','monthly_divergence'],
'DAILY_FIXED_DIVERGENCE': ['timestamp','profit_difference'],
'DAILY_REAL_DIVERGENCE': ['timestamp','profit_difference'],
'POSITIONS': ['timestamp','periods','array','total_pos_number','max_open_trades'],
'TRADES': ['timestamp','periods','array'],
'TRADE_CONSISTENCY': ['timestamp','periods','array'],
'ROTATION': ['timestamp','periods','daily_rotation']}
# Setup data container
self.retbuf = BytesIO()
# Setup data access mode (file or FTP)
self.mode = 0 # Default is file.
try:
self.server = FTP(dwx_ftp_hostname)
self.server.login(dwx_ftp_user, dwx_ftp_pass)
# 200+ codes signify success.
if str(self.server.lastresp).startswith('2'):
logger.warning('[KERNEL] FTP Connection Successful. Data will now be pulled from Darwinex FTP Server.')
self.mode = 1 # 1 = FTP, 0
logger.warning(f'[KERNEL] Last FTP Status Code: {self.server.lastresp} | Please consult https://en.wikipedia.org/wiki/List_of_FTP_server_return_codes for code definitions.')
except Exception as ex:
logger.warning(f'Exception: {ex}')
exit(-1)
##########################################################################
"""Parse a line containing a list. Only works for max one list or one list of lists."""
def parse_line(self, line):
for start, end in [['[[', ']]'], ['[', ']']]:
if start in line:
ls = line.split(start)
ls1 = ls[1].split(end)
return ls[0].split(',')[:-1] + [json.loads(start+ls1[0].replace("'", '"')+end)] + ls1[1].split(',')[1:]
return line.split(',')
def get_data_from_ftp(self, darwin, data_type):
"""Connect to FTP server and download requested data for DARWIN.
For example, darwin='PLF' and data_type='AVG_LEVERAGE' results in this
code retrieving the file 'PLF/AVG_LEVERAGE' from the FTP server.
Parameters
----------
darwin : str
DARWIN ticker symbol, e.g. $PLF
data_type : str
Must be a key in self.analytics_headers dictionary.
Returns
-------
df
Pandas DataFrame
--
"""
# Clear / reinitialize buffer
self.retbuf = BytesIO()
self.server.retrbinary(f"RETR {darwin}/{data_type}", self.retbuf.write)
self.retbuf.seek(0)
# Extract data from BytesIO object
ret = []
while True:
line = self.retbuf.readline()
if len(line) > 1:
ret.append(self.parse_line(line.strip().decode()))
else:
break
# Return as Dataframe
return pd.DataFrame(ret)
def get_data_from_file(self, darwin, data_type):
"""Read data from local file stored in path darwin/filename
For example, darwin='PLF' and data_type='AVG_LEVERAGE' results in this
code retrieving the file 'PLF/AVG_LEVERAGE' from the current directory.
Parameters
----------
darwin : str
DARWIN ticker symbol, e.g. $PLF
data_type : str
Must be a key in self.analytics_headers dictionary.
Returns
-------
df
Pandas DataFrame
--
"""
if self.mode == 0:
logger.warning(f'Retrieving data from file for DARWIN {darwin}...')
return pd.read_csv(f'{str(darwin).upper()}/{str(data_type).upper()}', header=None)
else:
logger.warning(f'Retrieving data from FTP Server for DARWIN {darwin}...')
return self.get_data_from_ftp(str(darwin).upper(), str(data_type).upper())
def save_data_to_csv(self, dataframe_to_save, which_path, filename):
# Save:
if which_path:
# It will save the data to the specified path:
dataframe_to_save.to_csv(which_path + filename + '.csv')
else:
# It will save the data in the working directory:
dataframe_to_save.to_csv(filename + '.csv')
##########################################################################
def get_analytics(self, darwin, data_type):
"""Get, index and prepare requested data.
For example, darwin='PLF' and data_type='AVG_LEVERAGE' results in:
- the code retrieving the file 'PLF/AVG_LEVERAGE'
- converting millisecond timestamps column to Pandas datetime
- Setting the above converted timestamps as the index
- Dropping the timestamp column itself.
Parameters
----------
darwin : str
DARWIN ticker symbol, e.g. $PLF
data_type : str
Must be a key in self.analytics_headers dictionary.
Returns
-------
df
Pandas DataFrame
--
"""
df = self.get_data_from_file(darwin, data_type)
df.columns = self.analytics_headers[data_type]
df.set_index(pd.to_datetime(df['timestamp'], unit='ms'), inplace=True)
df.drop(['timestamp'], axis=1, inplace=True)
return df
##########################################################################
def get_darwin_vs_eurusd_volatility(self, darwin, plot=True):
"""Get the evolution of the given DARWIN's volatility vs that of the EUR/USD.
Parameters
----------
darwin : str
DARWIN ticker symbol, e.g. $PLF
plot : bool
If true, produce a chart as defined in the method.
Returns
-------
df
Pandas DataFrame
--
"""
# Set required data type
data_type = 'AVG_LEVERAGE'
# Get raw data into pandas dataframe
df = self.get_analytics(darwin, data_type)
# DARWIN vs EURUSD volatility is a list. We need the last value
df.loc[:,self.analytics_headers[data_type][-1]] = \
df.loc[:,self.analytics_headers[data_type][-1]].apply(eval).apply(lambda x: x[-1])
if plot:
df['darwin_vs_eurusd_volatility'].plot(title=f'${darwin}: DARWIN vs EUR/USD Volatility',
figsize=(10,8))
# Return processed data
return df
##############################################################################
def get_order_divergence(self, darwin,
plot=True):
"""Get the evolution of the given DARWIN's replication latency and investor
divergence, per order executed by the trader.
Parameters
----------
darwin : str
DARWIN ticker symbol, e.g. $PLF
plot : bool
If true, produce a chart as defined in the method.
Returns
-------
df
Pandas DataFrame
--
"""
# Set required data type
data_type = 'ORDER_DIVERGENCE'
# Get raw data into pandas dataframe
df = self.get_analytics(darwin, data_type)
# Convert values to numeric
df[['latency','usd_volume','divergence']] = df[['latency','usd_volume','divergence']].apply(pd.to_numeric, errors='coerce')
# Plot
if plot:
fig = plt.figure(figsize=(10,12))
# 2x1 grid, first plot
ax1 = fig.add_subplot(211)
ax1.xaxis.set_label_text('Replication Latency (ms)')
# 2x1 grid, second plot
ax2 = fig.add_subplot(212)
ax2.xaxis.set_label_text('Investor Divergence')
# Plot Median Replication Latency by Instrument
df.groupby('instrument').latency.median()\
.sort_values(ascending=True).plot(kind='barh',\
title=f'${darwin} | Median Order Replication Latency (ms)',\
ax=ax1)
# Plot Median Investor Divergence by Instrument
df.groupby('instrument').divergence.median()\
.sort_values(ascending=True).plot(kind='barh',\
title=f'${darwin} | Median Investor Divergence per Order',\
ax=ax2)
fig.subplots_adjust(hspace=0.2)
# Return processed data
return df.dropna()
##########################################################################
def get_return_divergence(self, darwin, plot=True):
"""Get the evolution of the given DARWIN's Quote and Quote after applying
average investors' divergence.
Parameters
----------
darwin : str
DARWIN ticker symbol, e.g. $PLF
plot : bool
If true, produce a chart as defined in the method.
Returns
-------
df
Pandas DataFrame
--
"""
# Set required data type
data_type = 'RETURN_DIVERGENCE'
# Get raw data into pandas dataframe
df = self.get_analytics(darwin, data_type).apply(pd.to_numeric, errors='coerce')
if plot:
df.plot(title=f'${darwin} | Quote vs Quote with Average Divergence',
figsize=(10,8))
return df
##########################################################################
def get_monthly_divergence(self, darwin):
"""Get the evolution of the given DARWIN's average and monthly divergence.
Parameters
----------
darwin : str
DARWIN ticker symbol, e.g. $PLF
Returns
-------
df
Pandas DataFrame
--
"""
# Set required data type
data_type = 'MONTHLY_DIVERGENCE'
# Get raw data into pandas dataframe
df = self.get_analytics(darwin, data_type).apply(pd.to_numeric, errors='coerce')
return df
##########################################################################
def get_daily_fixed_divergence(self, darwin, plot=True):
"""Analyses the effect of applying a fixed divergence (10e-5) on the profit.
Parameters
----------
darwin : str
DARWIN ticker symbol, e.g. $PLF
plot : bool
If true, produce a chart as defined in the method.
Returns
-------
df
Pandas DataFrame
--
"""
# Set required data type
data_type = 'DAILY_FIXED_DIVERGENCE'
# Get raw data into pandas dataframe
df = self.get_analytics(darwin, data_type).apply(pd.to_numeric, errors='coerce')
if plot:
df.plot(title=f'${darwin} | Effect of 10e-5 Fixed Divergence on profit',
figsize=(10,8))
return df
##########################################################################
def get_daily_real_divergence(self, darwin, plot=True):
"""Analyse the effect of applying the investors' divergence on the profit.
Parameters
----------
darwin : str
DARWIN ticker symbol, e.g. $PLF
plot : bool
If true, produce a chart as defined in the method.
Returns
-------
df
Pandas DataFrame
--
"""
# Set required data type
data_type = 'DAILY_REAL_DIVERGENCE'
# Get raw data into pandas dataframe
df = self.get_analytics(darwin, data_type).apply(pd.to_numeric, errors='coerce')
if plot:
df.plot(title=f'${darwin} | Effect of Investor Divergence on profit',
figsize=(10,8))
return df
##########################################################################
def get_quotes_from_ftp(self,
darwin='PLF',
suffix='4.1',
monthly=True, # If set to False, month/year used.
month='01',
year='2019',
former_or_new='former'):
"""Download Quote data for any DARWIN directly via FTP.
Parameters
----------
darwin : str
DARWIN ticker symbol, e.g. $PLF
suffix : str
Reflects risk, '4.1' being for 10% VaR DARWIN assets.
monthly : bool
If set to True, month and year arguments are ignored, and ALL
data available for the DARWIN is downloaded.
month : str
Data on the FTP server has the following directory structure:
DARWIN_Symbol -> {year}-{month} -> *.csv.gz files
e.g. PLF/2019-01/PLF....csv.gz
Specifies month for {year}-{month} tuple as above.
year : str
Data on the FTP server has the following directory structure:
DARWIN_Symbol -> {year}-{month} -> *.csv.gz files
e.g. PLF/2019-01/PLF....csv.gz
Specifies year for {year}-{month} tuple as above.
former_or_new : str
Access the former var10 DARWIN data or new var6.5 DARWIN data.
Returns
-------
df
Pandas DataFrame containing Quotes, indexed by timeframe.
--
"""
if former_or_new == 'former':
quote_files = []
roots = []
if monthly:
tqdm.write(f'\n[KERNEL] Searching for Quote data for DARWIN (FORMER VAR_10) {darwin}, please wait..', end='')
self.server.retrlines(f'NLST {darwin}/_{darwin}_former_var10/quotes/', roots.append)
roots_pbar = tqdm(roots, position=0, leave=True)
for root in roots_pbar:
try:
roots_pbar.set_description("Getting filenames for month: %s" % root)
root_files = []
self.server.retrlines(f'NLST {darwin}/_{darwin}_former_var10/quotes/{root}', root_files.append)
# Finalize filenames
quote_files += [f'{darwin}/_{darwin}_former_var10/quotes/{root}/{root_file}'\
for root_file in root_files if '{}.{}'.format(darwin, suffix) in root_file]
except Exception as ex:
logger.warning(ex)
return
elif pd.to_numeric(month) > 0 and pd.to_numeric(year) > 2010:
tqdm.write(f'\n[KERNEL] Asserting data on DARWIN (FORMER VAR_10) for month {year}-{month}, please wait..', end='')
quote_files = []
try:
self.server.retrlines(f'NLST {darwin}/_{darwin}_former_var10/quotes/{year}-{month}/', quote_files.append)
quote_files = [f'{darwin}/_{darwin}_former_var10/quotes/{year}-{month}/{quote_file}'\
for quote_file in quote_files if '{}.{}'.format(darwin, suffix) in quote_file]
except Exception as ex:
logger.warning(ex)
return
else:
logger.warning('\n[ERROR] Please either set monthly=True or ensure both month and year have integer values')
return
# Process tick data files
tqdm.write(f'\n[KERNEL] {len(quote_files)} files retrieved.. post-processing now, please wait..', end='')
ticks_df = pd.DataFrame()
ticks_pbar = tqdm(quote_files, position=0, leave=True)
for tick_file in ticks_pbar:
try:
ticks_pbar.set_description("Processing %s" % tick_file)
# Clear / reinitialize buffer
self.retbuf = BytesIO()
self.server.retrbinary(f"RETR {tick_file}", self.retbuf.write)
self.retbuf.seek(0)
# Extract data from BytesIO object
ret = [line.strip().decode().split(',') for line in gzip.open(self.retbuf)]
ticks_df = pd.concat([ticks_df, pd.DataFrame(ret[1:])], axis=0)
except Exception as ex:
logger.warning(ex)
# Clean up
ticks_df.columns = ['timestamp','quote']
ticks_df.timestamp = ticks_df.timestamp.apply(pd.to_numeric)
ticks_df.set_index('timestamp', drop=True, inplace=True)
ticks_df.index = pd.to_datetime(ticks_df.index, unit='ms')
ticks_df.quote = ticks_df.quote.apply(pd.to_numeric)
# Return DataFrame
return ticks_df.dropna()
elif former_or_new == 'new':
quote_files = []
roots = []
if monthly:
tqdm.write(f'\n[KERNEL] Searching for Quote data for DARWIN (NEW) {darwin}, please wait..', end='')
self.server.retrlines(f'NLST {darwin}/quotes/', roots.append)
roots_pbar = tqdm(roots, position=0, leave=True)
for root in roots_pbar:
try:
roots_pbar.set_description("Getting filenames for month: %s" % root)
root_files = []
self.server.retrlines(f'NLST {darwin}/quotes/{root}', root_files.append)
# Finalize filenames
quote_files += [f'{darwin}/quotes/{root}/{root_file}'\
for root_file in root_files if '{}.{}'.format(darwin, suffix) in root_file]
except Exception as ex:
logger.warning(ex)
return
elif pd.to_numeric(month) > 0 and | pd.to_numeric(year) | pandas.to_numeric |
from datetime import datetime
import numpy as np
import pandas as pd
from fetcher.extras.common import MaRawData
from fetcher.utils import Fields, extract_arcgis_attributes
NULL_DATE = datetime(2020, 1, 1)
DATE = Fields.DATE.name
TS = Fields.TIMESTAMP.name
def make_cumsum_df(data, timestamp_field=Fields.TIMESTAMP.name):
df = pd.DataFrame(data)
df.set_index(timestamp_field, inplace=True)
df.sort_index(inplace=True)
df = df.select_dtypes(exclude=['string', 'object'])
# .groupby(level=0).last() # can do it here, but not mandatory
cumsum_df = df.cumsum()
cumsum_df[Fields.TIMESTAMP.name] = cumsum_df.index
return cumsum_df
def handle_ak(res, mapping):
tests = res[0]
collected = [x['attributes'] for x in tests['features']]
df = | pd.DataFrame(collected) | pandas.DataFrame |
import math
import warnings
import numpy as np
import pandas as pd
import seaborn as sns
import scipy.stats as ss
import scipy.cluster.hierarchy as sch
import matplotlib.pyplot as plt
from collections import Counter
from .data_utils import identify_columns_by_type
from ._private import (
convert, remove_incomplete_samples, replace_nan_with_value
)
__all__ = [
'associations',
'cluster_correlations',
'compute_associations',
'conditional_entropy',
'correlation_ratio',
'cramers_v',
'identify_nominal_columns',
'identify_numeric_columns',
'numerical_encoding',
'theils_u'
]
_REPLACE = 'replace'
_DROP = 'drop'
_DROP_SAMPLES = 'drop_samples'
_DROP_FEATURES = 'drop_features'
_SKIP = 'skip'
_DEFAULT_REPLACE_VALUE = 0.0
def _inf_nan_str(x):
if np.isnan(x):
return 'NaN'
elif abs(x) == np.inf:
return 'inf'
else:
return ''
def conditional_entropy(x,
y,
nan_strategy=_REPLACE,
nan_replace_value=_DEFAULT_REPLACE_VALUE,
log_base: float = math.e):
"""
Calculates the conditional entropy of x given y: S(x|y)
Wikipedia: https://en.wikipedia.org/wiki/Conditional_entropy
Parameters:
-----------
x : list / NumPy ndarray / Pandas Series
A sequence of measurements
y : list / NumPy ndarray / Pandas Series
A sequence of measurements
nan_strategy : string, default = 'replace'
How to handle missing values: can be either 'drop' to remove samples
with missing values, or 'replace' to replace all missing values with
the nan_replace_value. Missing values are None and np.nan.
nan_replace_value : any, default = 0.0
The value used to replace missing values with. Only applicable when
nan_strategy is set to 'replace'.
log_base: float, default = e
specifying base for calculating entropy. Default is base e.
Returns:
--------
float
"""
if nan_strategy == _REPLACE:
x, y = replace_nan_with_value(x, y, nan_replace_value)
elif nan_strategy == _DROP:
x, y = remove_incomplete_samples(x, y)
y_counter = Counter(y)
xy_counter = Counter(list(zip(x, y)))
total_occurrences = sum(y_counter.values())
entropy = 0.0
for xy in xy_counter.keys():
p_xy = xy_counter[xy] / total_occurrences
p_y = y_counter[xy[1]] / total_occurrences
entropy += p_xy * math.log(p_y / p_xy, log_base)
return entropy
def cramers_v(x,
y,
bias_correction=True,
nan_strategy=_REPLACE,
nan_replace_value=_DEFAULT_REPLACE_VALUE):
"""
Calculates Cramer's V statistic for categorical-categorical association.
This is a symmetric coefficient: V(x,y) = V(y,x)
Original function taken from: https://stackoverflow.com/a/46498792/5863503
Wikipedia: https://en.wikipedia.org/wiki/Cram%C3%A9r%27s_V
Parameters:
-----------
x : list / NumPy ndarray / Pandas Series
A sequence of categorical measurements
y : list / NumPy ndarray / Pandas Series
A sequence of categorical measurements
bias_correction : Boolean, default = True
Use bias correction from Bergsma and Wicher,
Journal of the Korean Statistical Society 42 (2013): 323-328.
nan_strategy : string, default = 'replace'
How to handle missing values: can be either 'drop' to remove samples
with missing values, or 'replace' to replace all missing values with
the nan_replace_value. Missing values are None and np.nan.
nan_replace_value : any, default = 0.0
The value used to replace missing values with. Only applicable when
nan_strategy is set to 'replace'.
Returns:
--------
float in the range of [0,1]
"""
if nan_strategy == _REPLACE:
x, y = replace_nan_with_value(x, y, nan_replace_value)
elif nan_strategy == _DROP:
x, y = remove_incomplete_samples(x, y)
confusion_matrix = pd.crosstab(x, y)
chi2 = ss.chi2_contingency(confusion_matrix)[0]
n = confusion_matrix.sum().sum()
phi2 = chi2 / n
r, k = confusion_matrix.shape
if bias_correction:
phi2corr = max(0, phi2 - ((k - 1) * (r - 1)) / (n - 1))
rcorr = r - ((r - 1) ** 2) / (n - 1)
kcorr = k - ((k - 1) ** 2) / (n - 1)
if min((kcorr - 1), (rcorr - 1)) == 0:
warnings.warn(
"Unable to calculate Cramer's V using bias correction. Consider using bias_correction=False",
RuntimeWarning)
return np.nan
else:
return np.sqrt(phi2corr / min((kcorr - 1), (rcorr - 1)))
else:
return np.sqrt(phi2 / min(k - 1, r - 1))
def theils_u(x,
y,
nan_strategy=_REPLACE,
nan_replace_value=_DEFAULT_REPLACE_VALUE):
"""
Calculates Theil's U statistic (Uncertainty coefficient) for categorical-
categorical association. This is the uncertainty of x given y: value is
on the range of [0,1] - where 0 means y provides no information about
x, and 1 means y provides full information about x.
This is an asymmetric coefficient: U(x,y) != U(y,x)
Wikipedia: https://en.wikipedia.org/wiki/Uncertainty_coefficient
Parameters:
-----------
x : list / NumPy ndarray / Pandas Series
A sequence of categorical measurements
y : list / NumPy ndarray / Pandas Series
A sequence of categorical measurements
nan_strategy : string, default = 'replace'
How to handle missing values: can be either 'drop' to remove samples
with missing values, or 'replace' to replace all missing values with
the nan_replace_value. Missing values are None and np.nan.
nan_replace_value : any, default = 0.0
The value used to replace missing values with. Only applicable when
nan_strategy is set to 'replace'.
Returns:
--------
float in the range of [0,1]
"""
if nan_strategy == _REPLACE:
x, y = replace_nan_with_value(x, y, nan_replace_value)
elif nan_strategy == _DROP:
x, y = remove_incomplete_samples(x, y)
s_xy = conditional_entropy(x, y)
x_counter = Counter(x)
total_occurrences = sum(x_counter.values())
p_x = list(map(lambda n: n / total_occurrences, x_counter.values()))
s_x = ss.entropy(p_x)
if s_x == 0:
return 1
else:
return (s_x - s_xy) / s_x
def correlation_ratio(categories,
measurements,
nan_strategy=_REPLACE,
nan_replace_value=_DEFAULT_REPLACE_VALUE):
"""
Calculates the Correlation Ratio (sometimes marked by the greek letter Eta)
for categorical-continuous association.
Answers the question - given a continuous value of a measurement, is it
possible to know which category is it associated with?
Value is in the range [0,1], where 0 means a category cannot be determined
by a continuous measurement, and 1 means a category can be determined with
absolute certainty.
Wikipedia: https://en.wikipedia.org/wiki/Correlation_ratio
Parameters:
-----------
categories : list / NumPy ndarray / Pandas Series
A sequence of categorical measurements
measurements : list / NumPy ndarray / Pandas Series
A sequence of continuous measurements
nan_strategy : string, default = 'replace'
How to handle missing values: can be either 'drop' to remove samples
with missing values, or 'replace' to replace all missing values with
the nan_replace_value. Missing values are None and np.nan.
nan_replace_value : any, default = 0.0
The value used to replace missing values with. Only applicable when
nan_strategy is set to 'replace'.
Returns:
--------
float in the range of [0,1]
"""
if nan_strategy == _REPLACE:
categories, measurements = replace_nan_with_value(
categories, measurements, nan_replace_value)
elif nan_strategy == _DROP:
categories, measurements = remove_incomplete_samples(
categories, measurements)
categories = convert(categories, 'array')
measurements = convert(measurements, 'array')
fcat, _ = | pd.factorize(categories) | pandas.factorize |
# load packages
import pandas as pd
import statsmodels.tsa.stattools as stats
import statsmodels.graphics.tsaplots as sg
import matplotlib.pyplot as plt
import matplotlib
import itertools as it
import sys
from datetime import datetime
import numpy as np
import warnings
import json
import time
warnings.filterwarnings("ignore")
import networkx as nx
from nxpd import draw
from nxpd import nxpdParams
nxpdParams['show'] = 'ipynb'
sys.path.append("../../pipelines")
import Pipelines as tdw
data_folder = "../../data/invitro/"
output_path = "../../data/invitro/"
current_time = datetime.now().strftime('%Y-%m-%d_%H:%M:%S')
file_path = "../../data/invitro/gardner_timeseries.tsv"
n_trials = 50
run_params = {'data_folder': data_folder,
'file_path': file_path,
'td_window': 7,
'min_lag': 0,
'max_lag': 1,
'n_trees': 1000,
'permutation_n': 0,
'lag_method': 'mean_mean',
'calc_mse': False,
'bootstrap_n': 1000,
'n_trilas50': n_trials,
'run_time': current_time,
'sort_by': 'rank',
'window_type': 'RandomForest'
}
overall_df = | pd.DataFrame() | pandas.DataFrame |
"""
Test output formatting for Series/DataFrame, including to_string & reprs
"""
from datetime import datetime
from io import StringIO
import itertools
from operator import methodcaller
import os
from pathlib import Path
import re
from shutil import get_terminal_size
import sys
import textwrap
import dateutil
import numpy as np
import pytest
import pytz
from pandas.compat import (
IS64,
is_platform_windows,
)
import pandas.util._test_decorators as td
import pandas as pd
from pandas import (
DataFrame,
Index,
MultiIndex,
NaT,
Series,
Timestamp,
date_range,
get_option,
option_context,
read_csv,
reset_option,
set_option,
)
import pandas._testing as tm
import pandas.io.formats.format as fmt
import pandas.io.formats.printing as printing
use_32bit_repr = is_platform_windows() or not IS64
@pytest.fixture(params=["string", "pathlike", "buffer"])
def filepath_or_buffer_id(request):
"""
A fixture yielding test ids for filepath_or_buffer testing.
"""
return request.param
@pytest.fixture
def filepath_or_buffer(filepath_or_buffer_id, tmp_path):
"""
A fixture yielding a string representing a filepath, a path-like object
and a StringIO buffer. Also checks that buffer is not closed.
"""
if filepath_or_buffer_id == "buffer":
buf = StringIO()
yield buf
assert not buf.closed
else:
assert isinstance(tmp_path, Path)
if filepath_or_buffer_id == "pathlike":
yield tmp_path / "foo"
else:
yield str(tmp_path / "foo")
@pytest.fixture
def assert_filepath_or_buffer_equals(
filepath_or_buffer, filepath_or_buffer_id, encoding
):
"""
Assertion helper for checking filepath_or_buffer.
"""
def _assert_filepath_or_buffer_equals(expected):
if filepath_or_buffer_id == "string":
with open(filepath_or_buffer, encoding=encoding) as f:
result = f.read()
elif filepath_or_buffer_id == "pathlike":
result = filepath_or_buffer.read_text(encoding=encoding)
elif filepath_or_buffer_id == "buffer":
result = filepath_or_buffer.getvalue()
assert result == expected
return _assert_filepath_or_buffer_equals
def curpath():
pth, _ = os.path.split(os.path.abspath(__file__))
return pth
def has_info_repr(df):
r = repr(df)
c1 = r.split("\n")[0].startswith("<class")
c2 = r.split("\n")[0].startswith(r"<class") # _repr_html_
return c1 or c2
def has_non_verbose_info_repr(df):
has_info = has_info_repr(df)
r = repr(df)
# 1. <class>
# 2. Index
# 3. Columns
# 4. dtype
# 5. memory usage
# 6. trailing newline
nv = len(r.split("\n")) == 6
return has_info and nv
def has_horizontally_truncated_repr(df):
try: # Check header row
fst_line = np.array(repr(df).splitlines()[0].split())
cand_col = np.where(fst_line == "...")[0][0]
except IndexError:
return False
# Make sure each row has this ... in the same place
r = repr(df)
for ix, l in enumerate(r.splitlines()):
if not r.split()[cand_col] == "...":
return False
return True
def has_vertically_truncated_repr(df):
r = repr(df)
only_dot_row = False
for row in r.splitlines():
if re.match(r"^[\.\ ]+$", row):
only_dot_row = True
return only_dot_row
def has_truncated_repr(df):
return has_horizontally_truncated_repr(df) or has_vertically_truncated_repr(df)
def has_doubly_truncated_repr(df):
return has_horizontally_truncated_repr(df) and has_vertically_truncated_repr(df)
def has_expanded_repr(df):
r = repr(df)
for line in r.split("\n"):
if line.endswith("\\"):
return True
return False
@pytest.mark.filterwarnings("ignore::FutureWarning:.*format")
class TestDataFrameFormatting:
def test_eng_float_formatter(self, float_frame):
df = float_frame
df.loc[5] = 0
fmt.set_eng_float_format()
repr(df)
fmt.set_eng_float_format(use_eng_prefix=True)
repr(df)
fmt.set_eng_float_format(accuracy=0)
repr(df)
tm.reset_display_options()
def test_show_null_counts(self):
df = DataFrame(1, columns=range(10), index=range(10))
df.iloc[1, 1] = np.nan
def check(show_counts, result):
buf = StringIO()
df.info(buf=buf, show_counts=show_counts)
assert ("non-null" in buf.getvalue()) is result
with option_context(
"display.max_info_rows", 20, "display.max_info_columns", 20
):
check(None, True)
check(True, True)
check(False, False)
with option_context("display.max_info_rows", 5, "display.max_info_columns", 5):
check(None, False)
check(True, False)
check(False, False)
# GH37999
with tm.assert_produces_warning(
FutureWarning, match="null_counts is deprecated.+"
):
buf = StringIO()
df.info(buf=buf, null_counts=True)
assert "non-null" in buf.getvalue()
# GH37999
with pytest.raises(ValueError, match=r"null_counts used with show_counts.+"):
df.info(null_counts=True, show_counts=True)
def test_repr_truncation(self):
max_len = 20
with option_context("display.max_colwidth", max_len):
df = DataFrame(
{
"A": np.random.randn(10),
"B": [
tm.rands(np.random.randint(max_len - 1, max_len + 1))
for i in range(10)
],
}
)
r = repr(df)
r = r[r.find("\n") + 1 :]
adj = fmt.get_adjustment()
for line, value in zip(r.split("\n"), df["B"]):
if adj.len(value) + 1 > max_len:
assert "..." in line
else:
assert "..." not in line
with option_context("display.max_colwidth", 999999):
assert "..." not in repr(df)
with option_context("display.max_colwidth", max_len + 2):
assert "..." not in repr(df)
def test_repr_deprecation_negative_int(self):
# TODO(2.0): remove in future version after deprecation cycle
# Non-regression test for:
# https://github.com/pandas-dev/pandas/issues/31532
width = get_option("display.max_colwidth")
with tm.assert_produces_warning(FutureWarning):
set_option("display.max_colwidth", -1)
set_option("display.max_colwidth", width)
def test_repr_chop_threshold(self):
df = DataFrame([[0.1, 0.5], [0.5, -0.1]])
reset_option("display.chop_threshold") # default None
assert repr(df) == " 0 1\n0 0.1 0.5\n1 0.5 -0.1"
with option_context("display.chop_threshold", 0.2):
assert repr(df) == " 0 1\n0 0.0 0.5\n1 0.5 0.0"
with option_context("display.chop_threshold", 0.6):
assert repr(df) == " 0 1\n0 0.0 0.0\n1 0.0 0.0"
with option_context("display.chop_threshold", None):
assert repr(df) == " 0 1\n0 0.1 0.5\n1 0.5 -0.1"
def test_repr_chop_threshold_column_below(self):
# GH 6839: validation case
df = DataFrame([[10, 20, 30, 40], [8e-10, -1e-11, 2e-9, -2e-11]]).T
with option_context("display.chop_threshold", 0):
assert repr(df) == (
" 0 1\n"
"0 10.0 8.000000e-10\n"
"1 20.0 -1.000000e-11\n"
"2 30.0 2.000000e-09\n"
"3 40.0 -2.000000e-11"
)
with option_context("display.chop_threshold", 1e-8):
assert repr(df) == (
" 0 1\n"
"0 10.0 0.000000e+00\n"
"1 20.0 0.000000e+00\n"
"2 30.0 0.000000e+00\n"
"3 40.0 0.000000e+00"
)
with option_context("display.chop_threshold", 5e-11):
assert repr(df) == (
" 0 1\n"
"0 10.0 8.000000e-10\n"
"1 20.0 0.000000e+00\n"
"2 30.0 2.000000e-09\n"
"3 40.0 0.000000e+00"
)
def test_repr_obeys_max_seq_limit(self):
with option_context("display.max_seq_items", 2000):
assert len(printing.pprint_thing(list(range(1000)))) > 1000
with option_context("display.max_seq_items", 5):
assert len(printing.pprint_thing(list(range(1000)))) < 100
with option_context("display.max_seq_items", 1):
assert len(printing.pprint_thing(list(range(1000)))) < 9
def test_repr_set(self):
assert printing.pprint_thing({1}) == "{1}"
def test_repr_is_valid_construction_code(self):
# for the case of Index, where the repr is traditional rather than
# stylized
idx = Index(["a", "b"])
res = eval("pd." + repr(idx))
tm.assert_series_equal(Series(res), Series(idx))
def test_repr_should_return_str(self):
# https://docs.python.org/3/reference/datamodel.html#object.__repr__
# "...The return value must be a string object."
# (str on py2.x, str (unicode) on py3)
data = [8, 5, 3, 5]
index1 = ["\u03c3", "\u03c4", "\u03c5", "\u03c6"]
cols = ["\u03c8"]
df = DataFrame(data, columns=cols, index=index1)
assert type(df.__repr__()) == str # both py2 / 3
def test_repr_no_backslash(self):
with option_context("mode.sim_interactive", True):
df = DataFrame(np.random.randn(10, 4))
assert "\\" not in repr(df)
def test_expand_frame_repr(self):
df_small = | DataFrame("hello", index=[0], columns=[0]) | pandas.DataFrame |
def Moder_merger(params : dict):
def Solo_M1mHpC4H11N(ion_idx, mgf_file):
mz = mgf_file[ion_idx].get('pepmass')[0]
peaks = pd.Series(mgf_file[ion_idx].peaks.mz)
mz_H = -1.007825 + mz - 72.081324
mz_Cl = 34.968853 + mz - 72.081324
valid_H = peaks.between(mz_H - prec_mass_error, mz_H + prec_mass_error, inclusive = "both").sum() > 0
valid_Cl = peaks.between(mz_Cl - prec_mass_error, mz_Cl + prec_mass_error, inclusive = "both").sum() > 0
return sum([valid_H, valid_Cl])
def Solo_M1mHpHCOOH(ion_idx, mgf_file):
mz = mgf_file[ion_idx].get('pepmass')[0]
peaks = pd.Series(mgf_file[ion_idx].peaks.mz)
mz_H = -1.007825 + mz - 44.997654
mz_Cl = 34.968853 + mz - 44.997654
valid_H = peaks.between(mz_H - prec_mass_error, mz_H + prec_mass_error, inclusive = "both").sum() > 0
valid_Cl = peaks.between(mz_Cl - prec_mass_error, mz_Cl + prec_mass_error, inclusive = "both").sum() > 0
return sum([valid_H, valid_Cl])
def Solo_M1m2HpNapHCOOH(ion_idx, mgf_file):
mz = mgf_file[ion_idx].get('pepmass')[0]
peaks = pd.Series(mgf_file[ion_idx].peaks.mz)
mz_H = -1.007825 + mz - 66.979600
mz_Cl = 34.968853 + mz - 66.979600
mz_m2HpNa = 20.97412 + mz - 66.979600
valid_H = peaks.between(mz_H - prec_mass_error, mz_H + prec_mass_error, inclusive = "both").sum() > 0
valid_Cl = peaks.between(mz_Cl - prec_mass_error, mz_Cl + prec_mass_error, inclusive = "both").sum() > 0
valid_m2HpNa = peaks.between(mz_m2HpNa - prec_mass_error, mz_m2HpNa + prec_mass_error, inclusive = "both").sum() > 0
return sum([valid_H, valid_Cl, valid_m2HpNa])
def Solo_M1m2HpNa(ion_idx, mgf_file):
mz = mgf_file[ion_idx].get('pepmass')[0]
peaks = pd.Series(mgf_file[ion_idx].peaks.mz)
mz_H = -1.007825 + mz - 66.979600
valid_H = peaks.between(mz_H - prec_mass_error, mz_H + prec_mass_error, inclusive = "both").sum() > 0
return sum([valid_H])
def Solo_M1m2HpK(ion_idx, mgf_file):
mz = mgf_file[ion_idx].get('pepmass')[0]
peaks = pd.Series(mgf_file[ion_idx].peaks.mz)
mz_H = -1.007825 + mz - 36.948058
valid_H = peaks.between(mz_H - prec_mass_error, mz_H + prec_mass_error, inclusive = "both").sum() > 0
return sum([valid_H])
def Solo_M2mHpC4H11N(ion_idx, mgf_file):
mz = mgf_file[ion_idx].get('pepmass')[0]
peaks = pd.Series(mgf_file[ion_idx].peaks.mz)
mz_H = -1.007825 + (mz - 72.081324)/2
mz_Cl = 34.968853 + (mz - 72.081324)/2
mz_m2HpNa = 20.97412 + (mz - 72.081324)/2
valid_H = peaks.between(mz_H - prec_mass_error, mz_H + prec_mass_error, inclusive = "both").sum() > 0
valid_Cl = peaks.between(mz_Cl - prec_mass_error, mz_Cl + prec_mass_error, inclusive = "both").sum() > 0
valid_m2HpNa = peaks.between(mz_m2HpNa - prec_mass_error, mz_m2HpNa + prec_mass_error, inclusive = "both").sum() > 0
return sum([valid_H, valid_Cl, valid_m2HpNa])
def Solo_M2mHpHCOOH(ion_idx, mgf_file):
mz = mgf_file[ion_idx].get('pepmass')[0]
peaks = pd.Series(mgf_file[ion_idx].peaks.mz)
mz_H = -1.007825 + (mz - 44.997654)/2
mz_Cl = 34.968853 + (mz - 44.997654)/2
mz_m2HpNa = 20.97412 + (mz - 44.997654)/2
mz_mHpHCOOH = 44.997654 + (mz - 44.997654)/2
valid_H = peaks.between(mz_H - prec_mass_error, mz_H + prec_mass_error, inclusive = "both").sum() > 0
valid_Cl = peaks.between(mz_Cl - prec_mass_error, mz_Cl + prec_mass_error, inclusive = "both").sum() > 0
valid_m2HpNa = peaks.between(mz_m2HpNa - prec_mass_error, mz_m2HpNa + prec_mass_error, inclusive = "both").sum() > 0
valid_mHpHCOOH = peaks.between(mz_mHpHCOOH - prec_mass_error, mz_mHpHCOOH + prec_mass_error, inclusive = "both").sum() > 0
return sum([valid_H, valid_Cl, valid_m2HpNa, valid_mHpHCOOH])
def Solo_M2mH(ion_idx, mgf_file):
mz = mgf_file[ion_idx].get('pepmass')[0]
peaks = pd.Series(mgf_file[ion_idx].peaks.mz)
mz_H = -1.007825 + (mz + 1.007825)/2
mz_Cl = 34.968853 + (mz + 1.007825)/2
mz_m2HpNa = 20.97412 + (mz + 1.007825)/2
valid_H = peaks.between(mz_H - prec_mass_error, mz_H + prec_mass_error, inclusive = "both").sum() > 0
valid_Cl = peaks.between(mz_Cl - prec_mass_error, mz_Cl + prec_mass_error, inclusive = "both").sum() > 0
valid_m2HpNa = peaks.between(mz_m2HpNa - prec_mass_error, mz_m2HpNa + prec_mass_error, inclusive = "both").sum() > 0
return sum([valid_H, valid_Cl, valid_m2HpNa])
def Solo_M2pCl(ion_idx, mgf_file):
mz = mgf_file[ion_idx].get('pepmass')[0]
peaks = pd.Series(mgf_file[ion_idx].peaks.mz)
mz_H = -1.007825 + (mz - 34.968853)/2
mz_Cl = 34.968853 + (mz - 34.968853)/2
mz_m2HpNa = 20.97412 + (mz - 34.968853)/2
valid_H = peaks.between(mz_H - prec_mass_error, mz_H + prec_mass_error, inclusive = "both").sum() > 0
valid_Cl = peaks.between(mz_Cl - prec_mass_error, mz_Cl + prec_mass_error, inclusive = "both").sum() > 0
valid_m2HpNa = peaks.between(mz_m2HpNa - prec_mass_error, mz_m2HpNa + prec_mass_error, inclusive = "both").sum() > 0
return sum([valid_H, valid_Cl, valid_m2HpNa])
def Solo_M2m2HpNapHCOOH(ion_idx, mgf_file):
mz = mgf_file[ion_idx].get('pepmass')[0]
peaks = pd.Series(mgf_file[ion_idx].peaks.mz)
mz_H = -1.007825 + (mz - 66.979600)/2
mz_Cl = 34.968853 + (mz - 66.979600)/2
mz_m2HpNa = 20.97412 + (mz - 66.979600)/2
mz_m2HpNapHCOOH = 66.9796 + (mz - 66.979600)/2
valid_H = peaks.between(mz_H - prec_mass_error, mz_H + prec_mass_error, inclusive = "both").sum() > 0
valid_Cl = peaks.between(mz_Cl - prec_mass_error, mz_Cl + prec_mass_error, inclusive = "both").sum() > 0
valid_m2HpNa = peaks.between(mz_m2HpNa - prec_mass_error, mz_m2HpNa + prec_mass_error, inclusive = "both").sum() > 0
valid_m2HpNapHCOOH = peaks.between(mz_m2HpNapHCOOH - prec_mass_error, mz_m2HpNapHCOOH + prec_mass_error, inclusive = "both").sum() > 0
return sum([valid_H, valid_Cl, valid_m2HpNa, valid_m2HpNapHCOOH])
def Solo_M2m2HpNa(ion_idx, mgf_file):
mz = mgf_file[ion_idx].get('pepmass')[0]
peaks = pd.Series(mgf_file[ion_idx].peaks.mz)
mz_H = -1.007825 + (mz - 20.97412)/2
mz_Cl = 34.968853 + (mz - 20.97412)/2
mz_m2HpNa = 20.97412 + (mz - 20.97412)/2
mz_m2HpNapHCOOH = 66.9796 + (mz - 20.97412)/2
valid_H = peaks.between(mz_H - prec_mass_error, mz_H + prec_mass_error, inclusive = "both").sum() > 0
valid_Cl = peaks.between(mz_Cl - prec_mass_error, mz_Cl + prec_mass_error, inclusive = "both").sum() > 0
valid_m2HpNa = peaks.between(mz_m2HpNa - prec_mass_error, mz_m2HpNa + prec_mass_error, inclusive = "both").sum() > 0
valid_m2HpNapHCOOH = peaks.between(mz_m2HpNapHCOOH - prec_mass_error, mz_m2HpNapHCOOH + prec_mass_error, inclusive = "both").sum() > 0
return sum([valid_H, valid_Cl, valid_m2HpNa, valid_m2HpNapHCOOH])
def Solo_M2m2HpK(ion_idx, mgf_file):
mz = mgf_file[ion_idx].get('pepmass')[0]
peaks = pd.Series(mgf_file[ion_idx].peaks.mz)
mz_H = -1.007825 + (mz - 36.948058)/2
mz_Cl = 34.968853 + (mz - 36.948058)/2
mz_m2HpNa = 20.97412 + (mz - 36.948058)/2
mz_m2HpNapHCOOH = 66.9796 + (mz - 36.948058)/2
mz_m2HpK = 36.948058 + (mz - 36.948058)/2
valid_H = peaks.between(mz_H - prec_mass_error, mz_H + prec_mass_error, inclusive = "both").sum() > 0
valid_Cl = peaks.between(mz_Cl - prec_mass_error, mz_Cl + prec_mass_error, inclusive = "both").sum() > 0
valid_m2HpNa = peaks.between(mz_m2HpNa - prec_mass_error, mz_m2HpNa + prec_mass_error, inclusive = "both").sum() > 0
valid_m2HpNapHCOOH = peaks.between(mz_m2HpNapHCOOH - prec_mass_error, mz_m2HpNapHCOOH + prec_mass_error, inclusive = "both").sum() > 0
valid_m2HpK = peaks.between(mz_m2HpK - prec_mass_error, mz_m2HpK + prec_mass_error, inclusive = "both").sum() > 0
return sum([valid_H, valid_Cl, valid_m2HpNa, valid_m2HpNapHCOOH, valid_m2HpK])
def Solo_M3mH(ion_idx, mgf_file):
mz = mgf_file[ion_idx].get('pepmass')[0]
peaks = pd.Series(mgf_file[ion_idx].peaks.mz)
mz_H = -1.007825 + (mz + 1.007825)/3
mz_Cl = 34.968853 + (mz + 1.007825)/3
mz_m2HpNa = 20.97412 + (mz + 1.007825)/3
mz_m2HpNapHCOOH = 66.9796 + (mz + 1.007825)/3
mz_m2HpK = 36.948058 + (mz + 1.007825)/3
mz_M2mH = -1.007825 + (mz + 1.007825)*(2/3)
mz_M2pCl = 34.968853 + (mz + 1.007825)*(2/3)
mz_M2m2HpNa = 20.97412 + (mz + 1.007825)*(2/3)
mz_M2m2HpNapHCOOH = 66.9796 + (mz + 1.007825)*(2/3)
mz_M2m2HpK = 36.948058 + (mz + 1.007825)*(2/3)
valid_H = peaks.between(mz_H - prec_mass_error, mz_H + prec_mass_error, inclusive = "both").sum() > 0
valid_Cl = peaks.between(mz_Cl - prec_mass_error, mz_Cl + prec_mass_error, inclusive = "both").sum() > 0
valid_m2HpNa = peaks.between(mz_m2HpNa - prec_mass_error, mz_m2HpNa + prec_mass_error, inclusive = "both").sum() > 0
valid_m2HpNapHCOOH = peaks.between(mz_m2HpNapHCOOH - prec_mass_error, mz_m2HpNapHCOOH + prec_mass_error, inclusive = "both").sum() > 0
valid_m2HpK = peaks.between(mz_m2HpK - prec_mass_error, mz_m2HpK + prec_mass_error, inclusive = "both").sum() > 0
valid_M2mH = peaks.between(mz_M2mH - prec_mass_error, mz_M2mH + prec_mass_error, inclusive = "both").sum() > 0
valid_M2pCl = peaks.between(mz_M2pCl - prec_mass_error, mz_M2pCl + prec_mass_error, inclusive = "both").sum() > 0
valid_M2m2HpNa = peaks.between(mz_M2m2HpNa - prec_mass_error, mz_M2m2HpNa + prec_mass_error, inclusive = "both").sum() > 0
valid_M2m2HpNapHCOOH = peaks.between(mz_M2m2HpNapHCOOH - prec_mass_error, mz_M2m2HpNapHCOOH + prec_mass_error, inclusive = "both").sum() > 0
valid_M2m2HpK = peaks.between(mz_M2m2HpK - prec_mass_error, mz_M2m2HpK + prec_mass_error, inclusive = "both").sum() > 0
return sum([valid_H, valid_Cl, valid_m2HpNa, valid_m2HpNapHCOOH, valid_m2HpK,
valid_M2mH, valid_M2pCl, valid_M2m2HpNa, valid_M2m2HpNapHCOOH, valid_M2m2HpK])
def Solo_M3pCl(ion_idx, mgf_file):
mz = mgf_file[ion_idx].get('pepmass')[0]
peaks = pd.Series(mgf_file[ion_idx].peaks.mz)
mz_H = -1.007825 + (mz - 34.968853)/3
mz_Cl = 34.968853 + (mz - 34.968853)/3
mz_m2HpNa = 20.97412 + (mz - 34.968853)/3
mz_m2HpNapHCOOH = 66.9796 + (mz - 34.968853)/3
mz_m2HpK = 36.948058 + (mz - 34.968853)/3
mz_M2mH = -1.007825 + (mz - 34.968853)*(2/3)
mz_M2pCl = 34.968853 + (mz - 34.968853)*(2/3)
mz_M2m2HpNa = 20.97412 + (mz - 34.968853)*(2/3)
mz_M2m2HpNapHCOOH = 66.9796 + (mz - 34.968853)*(2/3)
mz_M2m2HpK = 36.948058 + (mz - 34.968853)*(2/3)
valid_H = peaks.between(mz_H - prec_mass_error, mz_H + prec_mass_error, inclusive = "both").sum() > 0
valid_Cl = peaks.between(mz_Cl - prec_mass_error, mz_Cl + prec_mass_error, inclusive = "both").sum() > 0
valid_m2HpNa = peaks.between(mz_m2HpNa - prec_mass_error, mz_m2HpNa + prec_mass_error, inclusive = "both").sum() > 0
valid_m2HpNapHCOOH = peaks.between(mz_m2HpNapHCOOH - prec_mass_error, mz_m2HpNapHCOOH + prec_mass_error, inclusive = "both").sum() > 0
valid_m2HpK = peaks.between(mz_m2HpK - prec_mass_error, mz_m2HpK + prec_mass_error, inclusive = "both").sum() > 0
valid_M2mH = peaks.between(mz_M2mH - prec_mass_error, mz_M2mH + prec_mass_error, inclusive = "both").sum() > 0
valid_M2pCl = peaks.between(mz_M2pCl - prec_mass_error, mz_M2pCl + prec_mass_error, inclusive = "both").sum() > 0
valid_M2m2HpNa = peaks.between(mz_M2m2HpNa - prec_mass_error, mz_M2m2HpNa + prec_mass_error, inclusive = "both").sum() > 0
valid_M2m2HpNapHCOOH = peaks.between(mz_M2m2HpNapHCOOH - prec_mass_error, mz_M2m2HpNapHCOOH + prec_mass_error, inclusive = "both").sum() > 0
valid_M2m2HpK = peaks.between(mz_M2m2HpK - prec_mass_error, mz_M2m2HpK + prec_mass_error, inclusive = "both").sum() > 0
return sum([valid_H, valid_Cl, valid_m2HpNa, valid_m2HpNapHCOOH, valid_m2HpK,
valid_M2mH, valid_M2pCl, valid_M2m2HpNa, valid_M2m2HpNapHCOOH, valid_M2m2HpK])
def Solo_M3m2HpNapHCOOH(ion_idx, mgf_file):
mz = mgf_file[ion_idx].get('pepmass')[0]
peaks = pd.Series(mgf_file[ion_idx].peaks.mz)
mz_H = -1.007825 + (mz - 66.979600)/3
mz_Cl = 34.968853 + (mz - 66.979600)/3
mz_m2HpNa = 20.97412 + (mz - 66.979600)/3
mz_m2HpNapHCOOH = 66.9796 + (mz - 66.979600)/3
mz_m2HpK = 36.948058 + (mz - 66.979600)/3
mz_M2mH = -1.007825 + (mz - 66.979600)*(2/3)
mz_M2pCl = 34.968853 + (mz - 66.979600)*(2/3)
mz_M2m2HpNa = 20.97412 + (mz - 66.979600)*(2/3)
mz_M2m2HpNapHCOOH = 66.9796 + (mz - 66.979600)*(2/3)
mz_M2m2HpK = 36.948058 + (mz - 66.979600)*(2/3)
valid_H = peaks.between(mz_H - prec_mass_error, mz_H + prec_mass_error, inclusive = "both").sum() > 0
valid_Cl = peaks.between(mz_Cl - prec_mass_error, mz_Cl + prec_mass_error, inclusive = "both").sum() > 0
valid_m2HpNa = peaks.between(mz_m2HpNa - prec_mass_error, mz_m2HpNa + prec_mass_error, inclusive = "both").sum() > 0
valid_m2HpNapHCOOH = peaks.between(mz_m2HpNapHCOOH - prec_mass_error, mz_m2HpNapHCOOH + prec_mass_error, inclusive = "both").sum() > 0
valid_m2HpK = peaks.between(mz_m2HpK - prec_mass_error, mz_m2HpK + prec_mass_error, inclusive = "both").sum() > 0
valid_M2mH = peaks.between(mz_M2mH - prec_mass_error, mz_M2mH + prec_mass_error, inclusive = "both").sum() > 0
valid_M2pCl = peaks.between(mz_M2pCl - prec_mass_error, mz_M2pCl + prec_mass_error, inclusive = "both").sum() > 0
valid_M2m2HpNa = peaks.between(mz_M2m2HpNa - prec_mass_error, mz_M2m2HpNa + prec_mass_error, inclusive = "both").sum() > 0
valid_M2m2HpNapHCOOH = peaks.between(mz_M2m2HpNapHCOOH - prec_mass_error, mz_M2m2HpNapHCOOH + prec_mass_error, inclusive = "both").sum() > 0
valid_M2m2HpK = peaks.between(mz_M2m2HpK - prec_mass_error, mz_M2m2HpK + prec_mass_error, inclusive = "both").sum() > 0
return sum([valid_H, valid_Cl, valid_m2HpNa, valid_m2HpNapHCOOH, valid_m2HpK,
valid_M2mH, valid_M2pCl, valid_M2m2HpNa, valid_M2m2HpNapHCOOH, valid_M2m2HpK])
def Solo_M3m2HpNa(ion_idx, mgf_file):
mz = mgf_file[ion_idx].get('pepmass')[0]
peaks = pd.Series(mgf_file[ion_idx].peaks.mz)
mz_H = -1.007825 + (mz - 20.97412)/3
mz_Cl = 34.968853 + (mz - 20.97412)/3
mz_m2HpNa = 20.97412 + (mz - 20.97412)/3
mz_m2HpNapHCOOH = 66.9796 + (mz - 20.97412)/3
mz_m2HpK = 36.948058 + (mz - 20.97412)/3
mz_M2mH = -1.007825 + (mz - 20.97412)*(2/3)
mz_M2pCl = 34.968853 + (mz - 20.97412)*(2/3)
mz_M2m2HpNa = 20.97412 + (mz - 20.97412)*(2/3)
mz_M2m2HpNapHCOOH = 66.9796 + (mz - 20.97412)*(2/3)
mz_M2m2HpK = 36.948058 + (mz - 20.97412)*(2/3)
valid_H = peaks.between(mz_H - prec_mass_error, mz_H + prec_mass_error, inclusive = "both").sum() > 0
valid_Cl = peaks.between(mz_Cl - prec_mass_error, mz_Cl + prec_mass_error, inclusive = "both").sum() > 0
valid_m2HpNa = peaks.between(mz_m2HpNa - prec_mass_error, mz_m2HpNa + prec_mass_error, inclusive = "both").sum() > 0
valid_m2HpNapHCOOH = peaks.between(mz_m2HpNapHCOOH - prec_mass_error, mz_m2HpNapHCOOH + prec_mass_error, inclusive = "both").sum() > 0
valid_m2HpK = peaks.between(mz_m2HpK - prec_mass_error, mz_m2HpK + prec_mass_error, inclusive = "both").sum() > 0
valid_M2mH = peaks.between(mz_M2mH - prec_mass_error, mz_M2mH + prec_mass_error, inclusive = "both").sum() > 0
valid_M2pCl = peaks.between(mz_M2pCl - prec_mass_error, mz_M2pCl + prec_mass_error, inclusive = "both").sum() > 0
valid_M2m2HpNa = peaks.between(mz_M2m2HpNa - prec_mass_error, mz_M2m2HpNa + prec_mass_error, inclusive = "both").sum() > 0
valid_M2m2HpNapHCOOH = peaks.between(mz_M2m2HpNapHCOOH - prec_mass_error, mz_M2m2HpNapHCOOH + prec_mass_error, inclusive = "both").sum() > 0
valid_M2m2HpK = peaks.between(mz_M2m2HpK - prec_mass_error, mz_M2m2HpK + prec_mass_error, inclusive = "both").sum() > 0
return sum([valid_H, valid_Cl, valid_m2HpNa, valid_m2HpNapHCOOH, valid_m2HpK,
valid_M2mH, valid_M2pCl, valid_M2m2HpNa, valid_M2m2HpNapHCOOH, valid_M2m2HpK])
def Solo_M4mH(ion_idx, mgf_file):
mz = mgf_file[ion_idx].get('pepmass')[0]
peaks = pd.Series(mgf_file[ion_idx].peaks.mz)
mz_H = -1.007825 + (mz + 1.007825)/4
mz_Cl = 34.968853 + (mz + 1.007825)/4
mz_m2HpNa = 20.97412 + (mz + 1.007825)/4
mz_m2HpNapHCOOH = 66.9796 + (mz + 1.007825)/4
mz_m2HpK = 36.948058 + (mz + 1.007825)/4
mz_M2mH = -1.007825 + (mz + 1.007825)/2
mz_M2pCl = 34.968853 + (mz + 1.007825)/2
mz_M2m2HpNa = 20.97412 + (mz + 1.007825)/2
mz_M2m2HpNapHCOOH = 66.9796 + (mz + 1.007825)/2
mz_M2m2HpK = 36.948058 + (mz + 1.007825)/2
mz_M3mH = -1.007825 + (mz + 1.007825)*(3/4)
mz_M3pCl = 34.968853 + (mz + 1.007825)*(3/4)
mz_M3m2HpNa = 20.97412 + (mz + 1.007825)*(3/4)
mz_M3m2HpNapHCOOH = 66.9796 + (mz + 1.007825)*(3/4)
mz_M3m2HpK = 36.948058 + (mz + 1.007825)*(3/4)
valid_H = peaks.between(mz_H - prec_mass_error, mz_H + prec_mass_error, inclusive = "both").sum() > 0
valid_Cl = peaks.between(mz_Cl - prec_mass_error, mz_Cl + prec_mass_error, inclusive = "both").sum() > 0
valid_m2HpNa = peaks.between(mz_m2HpNa - prec_mass_error, mz_m2HpNa + prec_mass_error, inclusive = "both").sum() > 0
valid_m2HpNapHCOOH = peaks.between(mz_m2HpNapHCOOH - prec_mass_error, mz_m2HpNapHCOOH + prec_mass_error, inclusive = "both").sum() > 0
valid_m2HpK = peaks.between(mz_m2HpK - prec_mass_error, mz_m2HpK + prec_mass_error, inclusive = "both").sum() > 0
valid_M2mH = peaks.between(mz_M2mH - prec_mass_error, mz_M2mH + prec_mass_error, inclusive = "both").sum() > 0
valid_M2pCl = peaks.between(mz_M2pCl - prec_mass_error, mz_M2pCl + prec_mass_error, inclusive = "both").sum() > 0
valid_M2m2HpNa = peaks.between(mz_M2m2HpNa - prec_mass_error, mz_M2m2HpNa + prec_mass_error, inclusive = "both").sum() > 0
valid_M2m2HpNapHCOOH = peaks.between(mz_M2m2HpNapHCOOH - prec_mass_error, mz_M2m2HpNapHCOOH + prec_mass_error, inclusive = "both").sum() > 0
valid_M2m2HpK = peaks.between(mz_M2m2HpK - prec_mass_error, mz_M2m2HpK + prec_mass_error, inclusive = "both").sum() > 0
valid_M3mH = peaks.between(mz_M3mH - prec_mass_error, mz_M3mH + prec_mass_error, inclusive = "both").sum() > 0
valid_M3pCl = peaks.between(mz_M3pCl - prec_mass_error, mz_M3pCl + prec_mass_error, inclusive = "both").sum() > 0
valid_M3m2HpNa = peaks.between(mz_M3m2HpNa - prec_mass_error, mz_M3m2HpNa + prec_mass_error, inclusive = "both").sum() > 0
valid_M3m2HpNapHCOOH = peaks.between(mz_M3m2HpNapHCOOH - prec_mass_error, mz_M3m2HpNapHCOOH + prec_mass_error, inclusive = "both").sum() > 0
valid_M3m2HpK = peaks.between(mz_M3m2HpK - prec_mass_error, mz_M3m2HpK + prec_mass_error, inclusive = "both").sum() > 0
return sum([valid_H, valid_Cl, valid_m2HpNa, valid_m2HpNapHCOOH, valid_m2HpK,
valid_M2mH, valid_M2pCl, valid_M2m2HpNa, valid_M2m2HpNapHCOOH, valid_M2m2HpK,
valid_M3mH, valid_M3pCl, valid_M3m2HpNa, valid_M3m2HpNapHCOOH, valid_M3m2HpK])
def Solo_M4pCl(ion_idx, mgf_file):
mz = mgf_file[ion_idx].get('pepmass')[0]
peaks = pd.Series(mgf_file[ion_idx].peaks.mz)
mz_H = -1.007825 + (mz - 34.968853)/4
mz_Cl = 34.968853 + (mz - 34.968853)/4
mz_m2HpNa = 20.97412 + (mz - 34.968853)/4
mz_m2HpNapHCOOH = 66.9796 + (mz - 34.968853)/4
mz_m2HpK = 36.948058 + (mz - 34.968853)/4
mz_M2mH = -1.007825 + (mz - 34.968853)/2
mz_M2pCl = 34.968853 + (mz - 34.968853)/2
mz_M2m2HpNa = 20.97412 + (mz - 34.968853)/2
mz_M2m2HpNapHCOOH = 66.9796 + (mz - 34.968853)/2
mz_M2m2HpK = 36.948058 + (mz - 34.968853)/2
mz_M3mH = -1.007825 + (mz - 34.968853)*(3/4)
mz_M3pCl = 34.968853 + (mz - 34.968853)*(3/4)
mz_M3m2HpNa = 20.97412 + (mz - 34.968853)*(3/4)
mz_M3m2HpNapHCOOH = 66.9796 + (mz - 34.968853)*(3/4)
mz_M3m2HpK = 36.948058 + (mz - 34.968853)*(3/4)
valid_H = peaks.between(mz_H - prec_mass_error, mz_H + prec_mass_error, inclusive = "both").sum() > 0
valid_Cl = peaks.between(mz_Cl - prec_mass_error, mz_Cl + prec_mass_error, inclusive = "both").sum() > 0
valid_m2HpNa = peaks.between(mz_m2HpNa - prec_mass_error, mz_m2HpNa + prec_mass_error, inclusive = "both").sum() > 0
valid_m2HpNapHCOOH = peaks.between(mz_m2HpNapHCOOH - prec_mass_error, mz_m2HpNapHCOOH + prec_mass_error, inclusive = "both").sum() > 0
valid_m2HpK = peaks.between(mz_m2HpK - prec_mass_error, mz_m2HpK + prec_mass_error, inclusive = "both").sum() > 0
valid_M2mH = peaks.between(mz_M2mH - prec_mass_error, mz_M2mH + prec_mass_error, inclusive = "both").sum() > 0
valid_M2pCl = peaks.between(mz_M2pCl - prec_mass_error, mz_M2pCl + prec_mass_error, inclusive = "both").sum() > 0
valid_M2m2HpNa = peaks.between(mz_M2m2HpNa - prec_mass_error, mz_M2m2HpNa + prec_mass_error, inclusive = "both").sum() > 0
valid_M2m2HpNapHCOOH = peaks.between(mz_M2m2HpNapHCOOH - prec_mass_error, mz_M2m2HpNapHCOOH + prec_mass_error, inclusive = "both").sum() > 0
valid_M2m2HpK = peaks.between(mz_M2m2HpK - prec_mass_error, mz_M2m2HpK + prec_mass_error, inclusive = "both").sum() > 0
valid_M3mH = peaks.between(mz_M3mH - prec_mass_error, mz_M3mH + prec_mass_error, inclusive = "both").sum() > 0
valid_M3pCl = peaks.between(mz_M3pCl - prec_mass_error, mz_M3pCl + prec_mass_error, inclusive = "both").sum() > 0
valid_M3m2HpNa = peaks.between(mz_M3m2HpNa - prec_mass_error, mz_M3m2HpNa + prec_mass_error, inclusive = "both").sum() > 0
valid_M3m2HpNapHCOOH = peaks.between(mz_M3m2HpNapHCOOH - prec_mass_error, mz_M3m2HpNapHCOOH + prec_mass_error, inclusive = "both").sum() > 0
valid_M3m2HpK = peaks.between(mz_M3m2HpK - prec_mass_error, mz_M3m2HpK + prec_mass_error, inclusive = "both").sum() > 0
return sum([valid_H, valid_Cl, valid_m2HpNa, valid_m2HpNapHCOOH, valid_m2HpK,
valid_M2mH, valid_M2pCl, valid_M2m2HpNa, valid_M2m2HpNapHCOOH, valid_M2m2HpK,
valid_M3mH, valid_M3pCl, valid_M3m2HpNa, valid_M3m2HpNapHCOOH, valid_M3m2HpK])
def Solo_M4m2HpNapHCOOH(ion_idx, mgf_file):
mz = mgf_file[ion_idx].get('pepmass')[0]
peaks = pd.Series(mgf_file[ion_idx].peaks.mz)
mz_H = -1.007825 + (mz - 66.979600)/4
mz_Cl = 34.968853 + (mz - 66.979600)/4
mz_m2HpNa = 20.97412 + (mz - 66.979600)/4
mz_m2HpNapHCOOH = 66.9796 + (mz - 66.979600)/4
mz_m2HpK = 36.948058 + (mz - 66.979600)/4
mz_M2mH = -1.007825 + (mz - 66.979600)/2
mz_M2pCl = 34.968853 + (mz - 66.979600)/2
mz_M2m2HpNa = 20.97412 + (mz - 66.979600)/2
mz_M2m2HpNapHCOOH = 66.9796 + (mz - 66.979600)/2
mz_M2m2HpK = 36.948058 + (mz - 66.979600)/2
mz_M3mH = -1.007825 + (mz - 66.979600)*(3/4)
mz_M3pCl = 34.968853 + (mz - 66.979600)*(3/4)
mz_M3m2HpNa = 20.97412 + (mz - 66.979600)*(3/4)
mz_M3m2HpNapHCOOH = 66.9796 + (mz - 66.979600)*(3/4)
mz_M3m2HpK = 36.948058 + (mz - 66.979600)*(3/4)
valid_H = peaks.between(mz_H - prec_mass_error, mz_H + prec_mass_error, inclusive = "both").sum() > 0
valid_Cl = peaks.between(mz_Cl - prec_mass_error, mz_Cl + prec_mass_error, inclusive = "both").sum() > 0
valid_m2HpNa = peaks.between(mz_m2HpNa - prec_mass_error, mz_m2HpNa + prec_mass_error, inclusive = "both").sum() > 0
valid_m2HpNapHCOOH = peaks.between(mz_m2HpNapHCOOH - prec_mass_error, mz_m2HpNapHCOOH + prec_mass_error, inclusive = "both").sum() > 0
valid_m2HpK = peaks.between(mz_m2HpK - prec_mass_error, mz_m2HpK + prec_mass_error, inclusive = "both").sum() > 0
valid_M2mH = peaks.between(mz_M2mH - prec_mass_error, mz_M2mH + prec_mass_error, inclusive = "both").sum() > 0
valid_M2pCl = peaks.between(mz_M2pCl - prec_mass_error, mz_M2pCl + prec_mass_error, inclusive = "both").sum() > 0
valid_M2m2HpNa = peaks.between(mz_M2m2HpNa - prec_mass_error, mz_M2m2HpNa + prec_mass_error, inclusive = "both").sum() > 0
valid_M2m2HpNapHCOOH = peaks.between(mz_M2m2HpNapHCOOH - prec_mass_error, mz_M2m2HpNapHCOOH + prec_mass_error, inclusive = "both").sum() > 0
valid_M2m2HpK = peaks.between(mz_M2m2HpK - prec_mass_error, mz_M2m2HpK + prec_mass_error, inclusive = "both").sum() > 0
valid_M3mH = peaks.between(mz_M3mH - prec_mass_error, mz_M3mH + prec_mass_error, inclusive = "both").sum() > 0
valid_M3pCl = peaks.between(mz_M3pCl - prec_mass_error, mz_M3pCl + prec_mass_error, inclusive = "both").sum() > 0
valid_M3m2HpNa = peaks.between(mz_M3m2HpNa - prec_mass_error, mz_M3m2HpNa + prec_mass_error, inclusive = "both").sum() > 0
valid_M3m2HpNapHCOOH = peaks.between(mz_M3m2HpNapHCOOH - prec_mass_error, mz_M3m2HpNapHCOOH + prec_mass_error, inclusive = "both").sum() > 0
valid_M3m2HpK = peaks.between(mz_M3m2HpK - prec_mass_error, mz_M3m2HpK + prec_mass_error, inclusive = "both").sum() > 0
return sum([valid_H, valid_Cl, valid_m2HpNa, valid_m2HpNapHCOOH, valid_m2HpK,
valid_M2mH, valid_M2pCl, valid_M2m2HpNa, valid_M2m2HpNapHCOOH, valid_M2m2HpK,
valid_M3mH, valid_M3pCl, valid_M3m2HpNa, valid_M3m2HpNapHCOOH, valid_M3m2HpK])
def Solo_M4m2HpNa(ion_idx, mgf_file):
mz = mgf_file[ion_idx].get('pepmass')[0]
peaks = pd.Series(mgf_file[ion_idx].peaks.mz)
mz_H = -1.007825 + (mz - 20.97412)/4
mz_Cl = 34.968853 + (mz - 20.97412)/4
mz_m2HpNa = 20.97412 + (mz - 20.97412)/4
mz_m2HpNapHCOOH = 66.9796 + (mz - 20.97412)/4
mz_m2HpK = 36.948058 + (mz - 20.97412)/4
mz_M2mH = -1.007825 + (mz - 20.97412)/2
mz_M2pCl = 34.968853 + (mz - 20.97412)/2
mz_M2m2HpNa = 20.97412 + (mz - 20.97412)/2
mz_M2m2HpNapHCOOH = 66.9796 + (mz - 20.97412)/2
mz_M2m2HpK = 36.948058 + (mz - 20.97412)/2
mz_M3mH = -1.007825 + (mz - 20.97412)*(3/4)
mz_M3pCl = 34.968853 + (mz - 20.97412)*(3/4)
mz_M3m2HpNa = 20.97412 + (mz - 20.97412)*(3/4)
mz_M3m2HpNapHCOOH = 66.9796 + (mz - 20.97412)*(3/4)
mz_M3m2HpK = 36.948058 + (mz - 20.97412)*(3/4)
valid_H = peaks.between(mz_H - prec_mass_error, mz_H + prec_mass_error, inclusive = "both").sum() > 0
valid_Cl = peaks.between(mz_Cl - prec_mass_error, mz_Cl + prec_mass_error, inclusive = "both").sum() > 0
valid_m2HpNa = peaks.between(mz_m2HpNa - prec_mass_error, mz_m2HpNa + prec_mass_error, inclusive = "both").sum() > 0
valid_m2HpNapHCOOH = peaks.between(mz_m2HpNapHCOOH - prec_mass_error, mz_m2HpNapHCOOH + prec_mass_error, inclusive = "both").sum() > 0
valid_m2HpK = peaks.between(mz_m2HpK - prec_mass_error, mz_m2HpK + prec_mass_error, inclusive = "both").sum() > 0
valid_M2mH = peaks.between(mz_M2mH - prec_mass_error, mz_M2mH + prec_mass_error, inclusive = "both").sum() > 0
valid_M2pCl = peaks.between(mz_M2pCl - prec_mass_error, mz_M2pCl + prec_mass_error, inclusive = "both").sum() > 0
valid_M2m2HpNa = peaks.between(mz_M2m2HpNa - prec_mass_error, mz_M2m2HpNa + prec_mass_error, inclusive = "both").sum() > 0
valid_M2m2HpNapHCOOH = peaks.between(mz_M2m2HpNapHCOOH - prec_mass_error, mz_M2m2HpNapHCOOH + prec_mass_error, inclusive = "both").sum() > 0
valid_M2m2HpK = peaks.between(mz_M2m2HpK - prec_mass_error, mz_M2m2HpK + prec_mass_error, inclusive = "both").sum() > 0
valid_M3mH = peaks.between(mz_M3mH - prec_mass_error, mz_M3mH + prec_mass_error, inclusive = "both").sum() > 0
valid_M3pCl = peaks.between(mz_M3pCl - prec_mass_error, mz_M3pCl + prec_mass_error, inclusive = "both").sum() > 0
valid_M3m2HpNa = peaks.between(mz_M3m2HpNa - prec_mass_error, mz_M3m2HpNa + prec_mass_error, inclusive = "both").sum() > 0
valid_M3m2HpNapHCOOH = peaks.between(mz_M3m2HpNapHCOOH - prec_mass_error, mz_M3m2HpNapHCOOH + prec_mass_error, inclusive = "both").sum() > 0
valid_M3m2HpK = peaks.between(mz_M3m2HpK - prec_mass_error, mz_M3m2HpK + prec_mass_error, inclusive = "both").sum() > 0
return sum([valid_H, valid_Cl, valid_m2HpNa, valid_m2HpNapHCOOH, valid_m2HpK,
valid_M2mH, valid_M2pCl, valid_M2m2HpNa, valid_M2m2HpNapHCOOH, valid_M2m2HpK,
valid_M3mH, valid_M3pCl, valid_M3m2HpNa, valid_M3m2HpNapHCOOH, valid_M3m2HpK])
def Solo_M2pH(ion_idx, mgf_file):
mz = mgf_file[ion_idx].get('pepmass')[0]
peaks = pd.Series(mgf_file[ion_idx].peaks.mz)
mz_H = 1.007825 + (mz - 1.007825)/2
valid_H = peaks.between(mz_H - prec_mass_error, mz_H + prec_mass_error, inclusive = "both").sum() > 0
return sum([valid_H])
def Solo_M2pHpCH3CN(ion_idx, mgf_file):
mz = mgf_file[ion_idx].get('pepmass')[0]
peaks = pd.Series(mgf_file[ion_idx].peaks.mz)
mz_H = 1.007825 + (mz - 42.034374)/2
mz_Na = 22.98977 + (mz - 42.034374)/2
mz_K = 38.963708 + (mz - 42.034374)/2
mz_HpCH3CN = 42.034374 + (mz - 42.034374)/2
mz_HpCH3OH = 33.034040 + (mz - 42.034374)/2
mz_NapCH3CN = 64.016319 + (mz - 42.034374)/2
mz_NapCH3OH = 55.015985 + (mz - 42.034374)/2
mz_KpCH3CN = 79.990257 + (mz - 42.034374)/2
mz_KpCH3OH = 70.989923 + (mz - 42.034374)/2
valid_H = peaks.between(mz_H - prec_mass_error, mz_H + prec_mass_error, inclusive = "both").sum() > 0
valid_Na = peaks.between(mz_Na - prec_mass_error, mz_Na + prec_mass_error, inclusive = "both").sum() > 0
valid_K = peaks.between(mz_K - prec_mass_error, mz_K + prec_mass_error, inclusive = "both").sum() > 0
valid_HpCH3CN = peaks.between(mz_HpCH3CN - prec_mass_error, mz_HpCH3CN + prec_mass_error, inclusive = "both").sum() > 0
valid_HpCH3OH = peaks.between(mz_HpCH3OH - prec_mass_error, mz_HpCH3OH + prec_mass_error, inclusive = "both").sum() > 0
valid_NapCH3CN = peaks.between(mz_NapCH3CN - prec_mass_error, mz_NapCH3CN + prec_mass_error, inclusive = "both").sum() > 0
valid_NapCH3OH = peaks.between(mz_NapCH3OH - prec_mass_error, mz_NapCH3OH + prec_mass_error, inclusive = "both").sum() > 0
valid_KpCH3CN = peaks.between(mz_KpCH3CN - prec_mass_error, mz_KpCH3CN + prec_mass_error, inclusive = "both").sum() > 0
valid_KpCH3OH = peaks.between(mz_KpCH3OH - prec_mass_error, mz_KpCH3OH + prec_mass_error, inclusive = "both").sum() > 0
return sum([valid_H, valid_Na, valid_K, valid_HpCH3CN, valid_HpCH3OH, valid_NapCH3CN, valid_NapCH3OH, valid_KpCH3CN, valid_KpCH3OH])
def Solo_M2pHpCH3OH(ion_idx, mgf_file):
mz = mgf_file[ion_idx].get('pepmass')[0]
peaks = pd.Series(mgf_file[ion_idx].peaks.mz)
mz_H = 1.007825 + (mz - 33.034040)/2
mz_Na = 22.98977 + (mz - 33.034040)/2
mz_K = 38.963708 + (mz - 33.034040)/2
mz_HpCH3CN = 42.034374 + (mz - 33.034040)/2
mz_HpCH3OH = 33.034040 + (mz - 33.034040)/2
mz_NapCH3CN = 64.016319 + (mz - 33.034040)/2
mz_NapCH3OH = 55.015985 + (mz - 33.034040)/2
mz_KpCH3CN = 79.990257 + (mz - 33.034040)/2
mz_KpCH3OH = 70.989923 + (mz - 33.034040)/2
valid_H = peaks.between(mz_H - prec_mass_error, mz_H + prec_mass_error, inclusive = "both").sum() > 0
valid_Na = peaks.between(mz_Na - prec_mass_error, mz_Na + prec_mass_error, inclusive = "both").sum() > 0
valid_K = peaks.between(mz_K - prec_mass_error, mz_K + prec_mass_error, inclusive = "both").sum() > 0
valid_HpCH3CN = peaks.between(mz_HpCH3CN - prec_mass_error, mz_HpCH3CN + prec_mass_error, inclusive = "both").sum() > 0
valid_HpCH3OH = peaks.between(mz_HpCH3OH - prec_mass_error, mz_HpCH3OH + prec_mass_error, inclusive = "both").sum() > 0
valid_NapCH3CN = peaks.between(mz_NapCH3CN - prec_mass_error, mz_NapCH3CN + prec_mass_error, inclusive = "both").sum() > 0
valid_NapCH3OH = peaks.between(mz_NapCH3OH - prec_mass_error, mz_NapCH3OH + prec_mass_error, inclusive = "both").sum() > 0
valid_KpCH3CN = peaks.between(mz_KpCH3CN - prec_mass_error, mz_KpCH3CN + prec_mass_error, inclusive = "both").sum() > 0
valid_KpCH3OH = peaks.between(mz_KpCH3OH - prec_mass_error, mz_KpCH3OH + prec_mass_error, inclusive = "both").sum() > 0
return sum([valid_H, valid_Na, valid_K, valid_HpCH3CN, valid_HpCH3OH, valid_NapCH3CN, valid_NapCH3OH, valid_KpCH3CN, valid_KpCH3OH])
def Solo_M2pHpHCOOH(ion_idx, mgf_file):
mz = mgf_file[ion_idx].get('pepmass')[0]
peaks = pd.Series(mgf_file[ion_idx].peaks.mz)
mz_H = 1.007825 + (mz - 47.013304)/2
mz_Na = 22.98977 + (mz - 47.013304)/2
mz_K = 38.963708 + (mz - 47.013304)/2
mz_HpCH3CN = 42.034374 + (mz - 47.0133042)/2
mz_HpCH3OH = 33.034040 + (mz - 47.013304)/2
mz_NapCH3CN = 64.016319 + (mz - 47.013304)/2
mz_NapCH3OH = 55.015985 + (mz - 47.013304)/2
mz_KpCH3CN = 79.990257 + (mz - 47.013304)/2
mz_KpCH3OH = 70.989923 + (mz - 47.013304)/2
valid_H = peaks.between(mz_H - prec_mass_error, mz_H + prec_mass_error, inclusive = "both").sum() > 0
valid_Na = peaks.between(mz_Na - prec_mass_error, mz_Na + prec_mass_error, inclusive = "both").sum() > 0
valid_K = peaks.between(mz_K - prec_mass_error, mz_K + prec_mass_error, inclusive = "both").sum() > 0
valid_HpCH3CN = peaks.between(mz_HpCH3CN - prec_mass_error, mz_HpCH3CN + prec_mass_error, inclusive = "both").sum() > 0
valid_HpCH3OH = peaks.between(mz_HpCH3OH - prec_mass_error, mz_HpCH3OH + prec_mass_error, inclusive = "both").sum() > 0
valid_NapCH3CN = peaks.between(mz_NapCH3CN - prec_mass_error, mz_NapCH3CN + prec_mass_error, inclusive = "both").sum() > 0
valid_NapCH3OH = peaks.between(mz_NapCH3OH - prec_mass_error, mz_NapCH3OH + prec_mass_error, inclusive = "both").sum() > 0
valid_KpCH3CN = peaks.between(mz_KpCH3CN - prec_mass_error, mz_KpCH3CN + prec_mass_error, inclusive = "both").sum() > 0
valid_KpCH3OH = peaks.between(mz_KpCH3OH - prec_mass_error, mz_KpCH3OH + prec_mass_error, inclusive = "both").sum() > 0
return sum([valid_H, valid_Na, valid_K, valid_HpCH3CN, valid_HpCH3OH, valid_NapCH3CN, valid_NapCH3OH, valid_KpCH3CN, valid_KpCH3OH])
def Solo_M2pNH4(ion_idx, mgf_file):
mz = mgf_file[ion_idx].get('pepmass')[0]
peaks = pd.Series(mgf_file[ion_idx].peaks.mz)
mz_H = 1.007825 + (mz - 18.034374)/2
mz_NH4 = 18.034374 + (mz - 18.034374)/2
mz_Na = 22.98977 + (mz - 18.034374)/2
mz_K = 38.963708 + (mz - 18.034374)/2
mz_HpCH3CN = 42.034374 + (mz - 18.034374)/2
mz_HpCH3OH = 33.034040 + (mz - 18.034374)/2
mz_NapCH3CN = 64.016319 + (mz - 18.034374)/2
mz_NapCH3OH = 55.015985 + (mz - 18.034374)/2
mz_KpCH3CN = 79.990257 + (mz - 18.034374)/2
mz_KpCH3OH = 70.989923 + (mz - 18.034374)/2
valid_H = peaks.between(mz_H - prec_mass_error, mz_H + prec_mass_error, inclusive = "both").sum() > 0
valid_NH4 = peaks.between(mz_NH4 - prec_mass_error, mz_NH4 + prec_mass_error, inclusive = "both").sum() > 0
valid_Na = peaks.between(mz_Na - prec_mass_error, mz_Na + prec_mass_error, inclusive = "both").sum() > 0
valid_K = peaks.between(mz_K - prec_mass_error, mz_K + prec_mass_error, inclusive = "both").sum() > 0
valid_HpCH3CN = peaks.between(mz_HpCH3CN - prec_mass_error, mz_HpCH3CN + prec_mass_error, inclusive = "both").sum() > 0
valid_HpCH3OH = peaks.between(mz_HpCH3OH - prec_mass_error, mz_HpCH3OH + prec_mass_error, inclusive = "both").sum() > 0
valid_NapCH3CN = peaks.between(mz_NapCH3CN - prec_mass_error, mz_NapCH3CN + prec_mass_error, inclusive = "both").sum() > 0
valid_NapCH3OH = peaks.between(mz_NapCH3OH - prec_mass_error, mz_NapCH3OH + prec_mass_error, inclusive = "both").sum() > 0
valid_KpCH3CN = peaks.between(mz_KpCH3CN - prec_mass_error, mz_KpCH3CN + prec_mass_error, inclusive = "both").sum() > 0
valid_KpCH3OH = peaks.between(mz_KpCH3OH - prec_mass_error, mz_KpCH3OH + prec_mass_error, inclusive = "both").sum() > 0
return sum([valid_H, valid_NH4, valid_Na, valid_K, valid_HpCH3CN, valid_HpCH3OH, valid_NapCH3CN, valid_NapCH3OH, valid_KpCH3CN, valid_KpCH3OH])
def Solo_M2pNa(ion_idx, mgf_file):
mz = mgf_file[ion_idx].get('pepmass')[0]
peaks = pd.Series(mgf_file[ion_idx].peaks.mz)
mz_H = 1.007825 + (mz - 22.98977)/2
mz_Na = 22.98977 + (mz - 22.98977)/2
mz_NapCH3CN = 64.016319 + (mz - 22.98977)/2
mz_NapCH3OH = 55.015985 + (mz - 22.98977)/2
valid_H = peaks.between(mz_H - prec_mass_error, mz_H + prec_mass_error, inclusive = "both").sum() > 0
valid_Na = peaks.between(mz_Na - prec_mass_error, mz_Na + prec_mass_error, inclusive = "both").sum() > 0
valid_NapCH3CN = peaks.between(mz_NapCH3CN - prec_mass_error, mz_NapCH3CN + prec_mass_error, inclusive = "both").sum() > 0
valid_NapCH3OH = peaks.between(mz_NapCH3OH - prec_mass_error, mz_NapCH3OH + prec_mass_error, inclusive = "both").sum() > 0
return sum([valid_H, valid_Na, valid_NapCH3CN, valid_NapCH3OH])
def Solo_M2pNapCH3OH(ion_idx, mgf_file) :
mz = mgf_file[ion_idx].get('pepmass')[0]
peaks = pd.Series(mgf_file[ion_idx].peaks.mz)
mz_H = 1.007825 + (mz - 55.015985)/2
mz_Na = 22.98977 + (mz - 55.015985)/2
mz_NapCH3CN = 64.016319 + (mz - 55.015985)/2
mz_NapCH3OH = 55.015985 + (mz - 55.015985)/2
valid_H = peaks.between(mz_H - prec_mass_error, mz_H + prec_mass_error, inclusive = "both").sum() > 0
valid_Na = peaks.between(mz_Na - prec_mass_error, mz_Na + prec_mass_error, inclusive = "both").sum() > 0
valid_NapCH3CN = peaks.between(mz_NapCH3CN - prec_mass_error, mz_NapCH3CN + prec_mass_error, inclusive = "both").sum() > 0
valid_NapCH3OH = peaks.between(mz_NapCH3OH - prec_mass_error, mz_NapCH3OH + prec_mass_error, inclusive = "both").sum() > 0
return sum([valid_H, valid_Na, valid_NapCH3CN, valid_NapCH3OH])
def Solo_M2pNapCH3CN(ion_idx, mgf_file) :
mz = mgf_file[ion_idx].get('pepmass')[0]
peaks = pd.Series(mgf_file[ion_idx].peaks.mz)
mz_H = 1.007825 + (mz - 64.016319)/2
mz_Na = 22.98977 + (mz - 64.016319)/2
mz_NapCH3CN = 64.016319 + (mz - 64.016319)/2
mz_NapCH3OH = 55.015985 + (mz - 64.016319)/2
valid_H = peaks.between(mz_H - prec_mass_error, mz_H + prec_mass_error, inclusive = "both").sum() > 0
valid_Na = peaks.between(mz_Na - prec_mass_error, mz_Na + prec_mass_error, inclusive = "both").sum() > 0
valid_NapCH3CN = peaks.between(mz_NapCH3CN - prec_mass_error, mz_NapCH3CN + prec_mass_error, inclusive = "both").sum() > 0
valid_NapCH3OH = peaks.between(mz_NapCH3OH - prec_mass_error, mz_NapCH3OH + prec_mass_error, inclusive = "both").sum() > 0
return sum([valid_H, valid_Na, valid_NapCH3CN, valid_NapCH3OH])
def Solo_M2pK(ion_idx, mgf_file):
mz = mgf_file[ion_idx].get('pepmass')[0]
peaks = pd.Series(mgf_file[ion_idx].peaks.mz)
mz_H = 1.007825 + (mz - 38.963708)/2
mz_Na = 22.98977 + (mz - 38.963708)/2
mz_K = 38.963708 + (mz - 38.963708)/2
valid_H = peaks.between(mz_H - prec_mass_error, mz_H + prec_mass_error, inclusive = "both").sum() > 0
valid_Na = peaks.between(mz_Na - prec_mass_error, mz_Na + prec_mass_error, inclusive = "both").sum() > 0
valid_K = peaks.between(mz_K - prec_mass_error, mz_K + prec_mass_error, inclusive = "both").sum() > 0
return sum([valid_H, valid_Na, valid_K])
def Solo_M1pHpCH3CN(ion_idx, mgf_file):
mz = mgf_file[ion_idx].get('pepmass')[0]
peaks = pd.Series(mgf_file[ion_idx].peaks.mz)
mz_H = 1.007825 + mz - 42.034374
mz_Na = 22.98977 + mz - 42.034374
mz_HpCH3OH = 33.034040 + mz - 42.034374
valid_H = peaks.between(mz_H - prec_mass_error, mz_H + prec_mass_error, inclusive = "both").sum() > 0
valid_Na = peaks.between(mz_Na - prec_mass_error, mz_Na + prec_mass_error, inclusive = "both").sum() > 0
valid_HpCH3OH = peaks.between(mz_HpCH3OH - prec_mass_error, mz_HpCH3OH + prec_mass_error, inclusive = "both").sum() > 0
return sum([valid_H, valid_Na, valid_HpCH3OH])
def Solo_M1pHpCH3OH(ion_idx, mgf_file):
mz = mgf_file[ion_idx].get('pepmass')[0]
peaks = pd.Series(mgf_file[ion_idx].peaks.mz)
mz_H = 1.007825 + mz - 33.034040
mz_Na = 22.98977 + mz - 33.034040
valid_H = peaks.between(mz_H - prec_mass_error, mz_H + prec_mass_error, inclusive = "both").sum() > 0
valid_Na = peaks.between(mz_Na - prec_mass_error, mz_Na + prec_mass_error, inclusive = "both").sum() > 0
return sum([valid_H, valid_Na])
def Solo_M1pHpHCOOH(ion_idx, mgf_file):
mz = mgf_file[ion_idx].get('pepmass')[0]
peaks = pd.Series(mgf_file[ion_idx].peaks.mz)
mz_H = 1.007825 + mz - 47.013304
mz_Na = 22.98977 + mz - 47.013304
mz_HpCH3OH = 33.034040 + mz - 47.013304
mz_HpCH3CN = 42.034374 + mz - 47.013304
valid_H = peaks.between(mz_H - prec_mass_error, mz_H + prec_mass_error, inclusive = "both").sum() > 0
valid_Na = peaks.between(mz_Na - prec_mass_error, mz_Na + prec_mass_error, inclusive = "both").sum() > 0
valid_HpCH3OH = peaks.between(mz_HpCH3OH - prec_mass_error, mz_HpCH3OH + prec_mass_error, inclusive = "both").sum() > 0
valid_HpCH3CN = peaks.between(mz_HpCH3CN - prec_mass_error, mz_HpCH3CN + prec_mass_error, inclusive = "both").sum() > 0
return sum([valid_H, valid_Na, valid_HpCH3OH, valid_HpCH3CN])
def Solo_M1pNa(ion_idx, mgf_file):
mz = mgf_file[ion_idx].get('pepmass')[0]
peaks = pd.Series(mgf_file[ion_idx].peaks.mz)
mz_H = 1.007825 + mz - 22.989770
valid_H = peaks.between(mz_H - prec_mass_error, mz_H + prec_mass_error, inclusive = "both").sum()
return valid_H
def Solo_M1pNapCH3CN(ion_idx, mgf_file):
mz = mgf_file[ion_idx].get('pepmass')[0]
peaks = pd.Series(mgf_file[ion_idx].peaks.mz)
mz_H = mz - 64.016319 + 1.007825
mz_Na = mz - 64.016319 + 22.98977
mz_NapCH3OH = mz - 64.016319 + 55.015985
valid_H = peaks.between(mz_H - prec_mass_error, mz_H + prec_mass_error, inclusive = "both").sum() > 0
valid_Na = peaks.between(mz_Na - prec_mass_error, mz_Na + prec_mass_error, inclusive = "both").sum() > 0
valid_NapCH3OH = peaks.between(mz_NapCH3OH - prec_mass_error, mz_NapCH3OH + prec_mass_error, inclusive = "both").sum() > 0
return sum([valid_H, valid_Na, valid_NapCH3OH])
def Solo_M1pNapCH3OH(ion_idx, mgf_file):
mz = mgf_file[ion_idx].get('pepmass')[0]
peaks = pd.Series(mgf_file[ion_idx].peaks.mz)
mz_H = 1.007825 + mz - 55.015985
mz_Na = 22.98977 + mz - 55.015985
valid_H = peaks.between(mz_H - prec_mass_error, mz_H + prec_mass_error, inclusive = "both").sum() > 0
valid_Na = peaks.between(mz_Na - prec_mass_error, mz_Na + prec_mass_error, inclusive = "both").sum() > 0
return sum([valid_H, valid_Na])
def Solo_M1pNH4(ion_idx, mgf_file):
mz = mgf_file[ion_idx].get('pepmass')[0]
peaks = pd.Series(mgf_file[ion_idx].peaks.mz)
mz_H = 1.007825 + mz - 18.034374
valid_H = peaks.between(mz_H - prec_mass_error, mz_H + prec_mass_error, inclusive = "both").sum()
return valid_H
def Solo_M1pNH4pCH3CN(ion_idx, mgf_file):
mz = mgf_file[ion_idx].get('pepmass')[0]
peaks = pd.Series(mgf_file[ion_idx].peaks.mz)
mz_H = 1.007825 + mz - 59.060923
mz_NH4 = 18.034374 + mz - 59.060923
valid_H = peaks.between(mz_H - prec_mass_error, mz_H + prec_mass_error, inclusive = "both").sum() > 0
valid_NH4 = peaks.between(mz_NH4 - prec_mass_error, mz_NH4 + prec_mass_error, inclusive = "both").sum() > 0
return sum([valid_H, valid_NH4])
def Solo_M1pK(ion_idx, mgf_file):
mz = mgf_file[ion_idx].get('pepmass')[0]
peaks = pd.Series(mgf_file[ion_idx].peaks.mz)
mz_H = 1.007825 + mz - 38.963708
mz_NH4 = 18.034374 + mz - 38.963708
mz_Na = 22.98977 + mz - 38.963708
valid_H = peaks.between(mz_H - prec_mass_error, mz_H + prec_mass_error, inclusive = "both").sum()
valid_NH4 = peaks.between(mz_NH4 - prec_mass_error, mz_NH4 + prec_mass_error, inclusive = "both").sum()
valid_Na = peaks.between(mz_Na - prec_mass_error, mz_Na + prec_mass_error, inclusive = "both").sum()
return sum([valid_H, valid_NH4, valid_Na])
def Solo_M1pKpCH3OH(ion_idx, mgf_file):
mz = mgf_file[ion_idx].get('pepmass')[0]
peaks = pd.Series(mgf_file[ion_idx].peaks.mz)
mz_H = 1.007825 + mz - 70.989923
mz_NH4 = 18.034374 + mz - 70.989923
mz_Na = 22.98977 + mz - 70.989923
mz_K = 38.963708 + mz - 70.989923
valid_H = peaks.between(mz_H - prec_mass_error, mz_H + prec_mass_error, inclusive = "both").sum()
valid_NH4 = peaks.between(mz_NH4 - prec_mass_error, mz_NH4 + prec_mass_error, inclusive = "both").sum()
valid_Na = peaks.between(mz_Na - prec_mass_error, mz_Na + prec_mass_error, inclusive = "both").sum()
valid_K = peaks.between(mz_K - prec_mass_error, mz_K + prec_mass_error, inclusive = "both").sum()
return sum([valid_H, valid_NH4, valid_Na, valid_K])
def Solo_M3pH(ion_idx, mgf_file):
mz = mgf_file[ion_idx].get('pepmass')[0]
peaks = pd.Series(mgf_file[ion_idx].peaks.mz)
mz_H = 1.007825 + (mz - 1.007825)/3
mz_M2pH = 1.007825 + (mz - 1.007825)*(2/3)
valid_H = peaks.between(mz_H - prec_mass_error, mz_H + prec_mass_error, inclusive = "both").sum() > 0
valid_M2pH = peaks.between(mz_M2pH - prec_mass_error, mz_M2pH + prec_mass_error, inclusive = "both").sum() > 0
return sum([valid_H, valid_M2pH])
def Solo_M3pNH4(ion_idx, mgf_file):
mz = mgf_file[ion_idx].get('pepmass')[0]
peaks = | pd.Series(mgf_file[ion_idx].peaks.mz) | pandas.Series |
import pandas as pd
import numpy as np
import datetime
class Durations(object):
@classmethod
def set(cls, X, extract_cols, dataset):
print("... ... Durations")
all_df = dataset["all_df"]
# duration from first action to clickout
dffac_df = all_df[["session_id", "timestamp", "timestamp_dt"]].groupby(
"session_id").first().reset_index()
dffac_df = dffac_df[["session_id", "timestamp_dt"]]
dffac_df.columns = ["session_id", "first_timestamp_dt"]
X = pd.merge(X, dffac_df, on="session_id", how="left")
X["session_duration"] = X.apply(lambda x: (x.timestamp_dt - x.first_timestamp_dt).seconds, axis=1)
extract_cols = extract_cols + ["session_duration"]
del dffac_df
# duration from last distination to clickout
dflsc_df = all_df[["session_id", "_session_id", "timestamp", "timestamp_dt"]].groupby(
"_session_id").first().reset_index()
dflsc_df = dflsc_df[dflsc_df._session_id.isin(X._session_id)]
dflsc_df = dflsc_df[["session_id", "timestamp_dt"]]
dflsc_df.columns = ["session_id", "step_first_timestamp_dt"]
X = pd.merge(X, dflsc_df, on="session_id", how="left")
X["step_duration"] = X.apply(lambda x: (x.timestamp_dt - x.step_first_timestamp_dt).seconds, axis=1)
extract_cols = extract_cols + ["step_duration"]
del dflsc_df
return (X, extract_cols)
class JustClickout(object):
@classmethod
def set(cls, X, extract_cols):
print("... ... JustClickout")
# append current fillters
def get_cf_features(x):
sbp = 1 if "Sort by Price" in x.current_filters else 0
sbd = 1 if "Sort By Distance" in x.current_filters else 0
sbr = 1 if "Sort By Rating" in x.current_filters else 0
fod = 1 if "Focus on Distance" in x.current_filters else 0
fsr = 1 if "Focus on Rating" in x.current_filters else 0
bev = 1 if "Best Value" in x.current_filters else 0
return pd.Series({'cf_sbp': sbp
, 'cf_sbd': sbd
, 'cf_sbr': sbr
, 'cf_fod': fod
, 'cf_fsr': fsr
, 'cf_bev': bev})
X["current_filters"] = X["current_filters"].fillna("")
curf_df = X[["current_filters"]].apply(lambda x: get_cf_features(x), axis=1)
X = pd.concat([X, curf_df], axis=1)
extract_cols = extract_cols + list(curf_df.columns)
del curf_df
return (X, extract_cols)
class JustBeforeClickout(object):
@classmethod
def set(cls, X, dataset):
print("... ... JustBeforeClickout")
all_df = dataset["all_df"]
# last action_type
lasttype_df = all_df[["session_id", "action_type", "is_y"]].copy()
lasttype_df["lat"] = lasttype_df["action_type"].shift(1)
lasttype_df["last_session_id"] = lasttype_df["session_id"].shift(1)
lasttype_df = lasttype_df[lasttype_df.is_y == 1]
lasttype_df = lasttype_df[lasttype_df.session_id == lasttype_df.last_session_id]
lasttype_df = lasttype_df[["session_id", "lat"]]
onehot_lat = pd.get_dummies(lasttype_df, columns=['lat'])
X = pd.merge(X, onehot_lat, on="session_id", how="left")
lat_cols = list(onehot_lat.columns)
lat_cols.remove("session_id")
for lat_col in lat_cols:
X[lat_col] = X[lat_col].fillna(0)
del lasttype_df
del onehot_lat
return X
class Record2Impression(object):
@classmethod
def expand(cls, X, extract_cols, dataset):
print("... ... Record2Impression")
# create expanded
X = X.reset_index()
X["gid"] = X.index
X["n_imps"] = X[["impressions"]].apply(lambda x: len(str(x.impressions).split("|")), axis=1)
X["price_mean"] = X[["prices"]].apply(lambda x: np.mean(np.array(str(x.prices).split("|")).astype(int)), axis=1)
X["price_std"] = X[["prices"]].apply(lambda x: np.std(np.array(str(x.prices).split("|")).astype(int)), axis=1)
X["impression"] = X[["impressions"]].apply(lambda x: str(x.impressions).split("|"), axis=1)
X["price"] = X[["prices"]].apply(lambda x: str(x.prices).split("|"), axis=1)
X_impression = X[["gid", "impression"]].set_index('gid').impression.apply(pd.Series).stack().reset_index(
level=0).rename(columns={0: 'impression'})
X_price = X[["gid", "price"]].set_index('gid').price.apply(pd.Series).stack().reset_index(level=0).rename(
columns={0: 'price'})
X_position = X[["gid", "impression"]].set_index('gid').impression.apply(
lambda x: pd.Series(range(len(x)))).stack().reset_index(level=0).rename(columns={0: 'position'})
X_expanded = pd.concat([X_impression, X_price], axis=1)
X_expanded = pd.concat([X_expanded, X_position], axis=1)
X_expanded.columns = ["gid", "impression", "gid2", "price", "gid3", "position"]
X_expanded = X_expanded[["gid", "impression", "price", "position"]]
# join expaned
X = pd.merge(X_expanded, X[["gid", "n_imps", "price_mean", "price_std"] + extract_cols], on="gid", how="left")
# to normalize position and price
X["pos_rate"] = X["position"] / X["n_imps"]
X["pos"] = X["position"] + 1
X["price_norm"] = (X["price"].astype(float) - X["price_mean"].astype(float)) / X["price_std"].astype(float)
# join price_norm rank
pnorm_rank_df = X[["session_id", "price_norm"]].copy()
pnorm_rank_df = pnorm_rank_df[["session_id", "price_norm"]].groupby("session_id").rank(ascending=False)
pnorm_rank_df.columns = ["price_norm_rank"]
X = pd.concat([X, pnorm_rank_df], axis=1)
del pnorm_rank_df
# calc discount rate
X["price"] = X["price"].astype(float)
prices_df = X[["impression", "price"]].groupby("impression").agg({'price': np.mean}).reset_index()
prices_df.columns = ["impression", "item_price_mean"]
X = pd.merge(X, prices_df, on="impression", how="left")
X["discount_rate"] = X["price"] / X["item_price_mean"]
del prices_df
# append some important props and other props with over 0.2 coverage
sum_item_props_df = dataset["sum_item_props_df"]
item_props = dataset["item_props"]
prop_cols = ["pGood Rating"
, "pVery Good Rating"
, "pExcellent Rating"
, "pSatisfactory Rating"
, "p1 Star"
, "p2 Star"
, "p3 Star"
, "p4 Star"
, "p5 Star"
, "pBusiness Centre"
, "pBusiness Hotel"
, "pConference Rooms"]
c02over_prop_cols = sum_item_props_df[sum_item_props_df.coverage >= 0.2]["prop"].tolist()
prop_cols = prop_cols + c02over_prop_cols
prop_cols = list(set(prop_cols))
X = pd.merge(X, item_props[["item_id"] + prop_cols], left_on="impression", right_on="item_id", how="left")
X[prop_cols] = X[prop_cols].fillna(0)
return (X, extract_cols)
class DecisionMakingProcess(object):
@classmethod
def detect(cls, X, dataset):
print("... ... Decision Making Process")
print("... ... ... Attention and Perceptual Encoding")
print("... ... ... Information Acquisition and Evaluation")
all_df = dataset["all_df"]
# join pos stats"
copos_df = all_df[all_df.action_type == "clickout item"][
["session_id", "reference", "impressions", "is_y"]].copy()
copos_df = copos_df[copos_df.is_y == 0]
copos_df["impression"] = copos_df[["impressions"]].apply(lambda x: str(x.impressions).split("|"), axis=1)
copos_df["co_pos"] = copos_df[["impression", "reference"]].apply(
lambda x: x.impression.index(x.reference) + 1 if x.reference in x.impression else 1, axis=1)
copos_df_stats = copos_df[["session_id", "co_pos"]].groupby("session_id").agg(
{'co_pos': [np.min, np.max, np.mean]}).reset_index()
copos_df_stats.columns = ["session_id", "co_pos_min", "co_pos_max", "co_pos_mean"]
X = pd.merge(X, copos_df_stats, on="session_id", how="left")
X["co_pos_min"] = X["co_pos_min"].fillna(1)
X["co_pos_mean"] = X["co_pos_mean"].fillna(1)
X["co_pos_max"] = X["co_pos_max"].fillna(1)
X["co_pos_min_diff"] = X["pos"] - X["co_pos_min"]
X["co_pos_mean_diff"] = X["pos"] - X["co_pos_mean"]
X["clickouted_pos_max_diff"] = X["co_pos_max"] - X["pos"]
del copos_df
del copos_df_stats
# is_last and is_last_elapsed_time
action_types = ["interaction item rating"
, "interaction item info"
, "interaction item image"
, "interaction item deals"
, "search for item"
, "clickout item"]
lastref_df = all_df[["session_id", "action_type", "reference", "timestamp", "is_y"]].copy()
lastref_df["is_target"] = 0
lastref_df.loc[lastref_df.is_y == 1, "is_target"] = 1
lastref_df = lastref_df[lastref_df.action_type.isin(action_types)]
lastref_df["last_session_id"] = lastref_df["session_id"].shift(1)
lastref_df["last_reference"] = lastref_df["reference"].shift(1)
lastref_df["last_timestamp"] = lastref_df["timestamp"].shift(1)
lastref_df = lastref_df[lastref_df.session_id == lastref_df.last_session_id]
lastref_df = lastref_df[lastref_df.is_target == 1][["session_id", "last_reference", "last_timestamp"]]
X = pd.merge(X, lastref_df, on="session_id", how="left")
X[["last_reference"]] = X[["last_reference"]].fillna("-1")
X[["last_timestamp"]] = X[["last_timestamp"]].fillna(-1)
X["is_last"] = X[["impression", "last_reference"]].apply(lambda x: 1 if x.impression == x.last_reference else 0,
axis=1)
X["elapsed_time_between_is_last"] = X[["impression", "last_reference", "timestamp", "last_timestamp"]].apply(
lambda x: int(x.timestamp) - int(x.last_timestamp) if x.impression == x.last_reference else np.nan, axis=1)
lastdur_df = X[["session_id", "elapsed_time_between_is_last"]].copy()
lastdur_df = lastdur_df.dropna(axis=0, how='any')
X.drop("elapsed_time_between_is_last", axis=1, inplace=True)
X = pd.merge(X, lastdur_df, on="session_id", how="left")
del lastref_df
del lastdur_df
# join is_last_last
lastref_df = all_df[["session_id", "action_type", "reference", "is_y"]].copy()
lastref_df["last_last_session_id"] = lastref_df["session_id"].shift(2)
lastref_df["last_last_reference"] = lastref_df["reference"].shift(2)
lastref_df = lastref_df[lastref_df.is_y == 1]
lastref_df = lastref_df[lastref_df.session_id == lastref_df.last_last_session_id]
lastref_df = lastref_df[["session_id", "last_last_reference"]]
lastref_df = lastref_df[~lastref_df.duplicated()]
X = pd.merge(X, lastref_df, on="session_id", how="left")
X[["last_last_reference"]] = X[["last_last_reference"]].fillna("-1")
X["is_last_last"] = X[["impression", "last_last_reference"]].apply(
lambda x: 1 if x.impression == x.last_last_reference else 0, axis=1)
del lastref_df
# elapsed next mean by item "it's kind of a future information."
action_types = ["interaction item rating"
, "interaction item info"
, "interaction item image"
, "interaction item deals"
, "search for item"
, "clickout item"]
isnext_df = all_df[["session_id", "action_type", "reference", "timestamp", "is_y"]].copy()
isnext_df["next_session_id"] = isnext_df["session_id"].shift(-1)
isnext_df["next_timestamp"] = isnext_df["timestamp"].shift(-1)
isnext_df = isnext_df[isnext_df.session_id == isnext_df.next_session_id]
isnext_df["elapsed_next"] = isnext_df["next_timestamp"] - isnext_df["timestamp"]
isnext_df = isnext_df[isnext_df.action_type.isin(action_types)]
isnext_df = isnext_df[isnext_df.is_y == 0]
isnext_gp_df = isnext_df[["reference", "elapsed_next"]].groupby("reference").agg(
{"elapsed_next": np.mean}).reset_index()
isnext_gp_df.columns = ["impression", "next_elapsed_time"]
X = pd.merge(X, isnext_gp_df, on="impression", how="left")
del isnext_gp_df
isnext_gp_df = isnext_df[isnext_df.action_type == "clickout item"][["reference", "elapsed_next"]].groupby(
"reference").agg({"elapsed_next": np.mean}).reset_index()
isnext_gp_df.columns = ["impression", "next_elapsed_time_byco"]
X = pd.merge(X, isnext_gp_df, on="impression", how="left")
del isnext_df
del isnext_gp_df
# clickouted item during session
couted_df = all_df[["action_type", "session_id", "reference", "is_y"]].copy()
couted_df = couted_df[couted_df.action_type == "clickout item"]
couted_df = couted_df[couted_df.is_y == 0] # to prevent leakage
couted_df = couted_df[["session_id", "reference"]]
couted_df.columns = ["session_id", "impression"]
couted_df = couted_df[~couted_df.duplicated()]
couted_df["clickouted"] = 1
X = pd.merge(X, couted_df, on=["session_id", "impression"], how="left")
X["clickouted"] = X["clickouted"].fillna(0)
X["clickouted"] = X["clickouted"].astype(int)
# diff between clickouted price mean
co_price_df = all_df[all_df.action_type == "clickout item"][
["session_id", "reference", "prices", "impressions", "is_y"]].copy()
co_price_df = co_price_df[co_price_df.is_y == 0] # to prevent leakage
def get_price(reference, impressions, prices):
imps = str(impressions).split("|")
prs = str(prices).split("|")
if reference in imps:
return prs[imps.index(reference)]
else:
return 0
co_price_df["price"] = co_price_df.apply(lambda x: get_price(x.reference, x.impressions, x.prices), axis=1)
co_price_df["price"] = co_price_df["price"].astype(float)
co_price_df = co_price_df.groupby("session_id").agg({'price': np.mean}).reset_index()
co_price_df.columns = ["session_id", "couted_price_mean"]
X = | pd.merge(X, co_price_df, on="session_id", how="left") | pandas.merge |
#
from scripts.features import features
import os
import pandas as pd
from numpy import asarray
import numpy as np
import pandas as pd
def paths():
INPUTS_DIR = os.getenv('VH_INPUTS_DIR', './inputs')
folders = [
't1/BattlesStaging_01012021_WL_tagged.csv',
't2/BattlesStaging_01032021_WL_tagged.csv']
dr=[]
for folder in folders:
dr.append(os.path.join(INPUTS_DIR, folder))
return dr
def main():
print('Initializing main...')
dr = paths()
chunk_size = 500000
dfList = []
for file in dr:
filename = file
text_file_reader = pd.read_csv(filename, engine='python',encoding='utf-8-sig', chunksize = chunk_size, index_col=0)
for df in text_file_reader:
dfList.append(df)
if len(dfList)>1:
df = pd.concat(dfList,sort=False)
else:
df = dfList[0]
print('CSV as dataframes. Calculating features and labels...')
win_columns,loose_columns = features()
# Features
X1 = df[win_columns]
X1.columns = range(X1.shape[1])
X2 = df[loose_columns]
X2.columns = range(X2.shape[1])
X = | pd.concat([X1,X2],axis=0) | pandas.concat |
# -*- coding: utf-8 -*-
"""
AIDeveloper
---------
@author: maikherbig
"""
import os,sys,gc
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'#suppress warnings/info from tensorflow
if not sys.platform.startswith("win"):
from multiprocessing import freeze_support
freeze_support()
# Make sure to get the right icon file on win,linux and mac
if sys.platform=="darwin":
icon_suff = ".icns"
else:
icon_suff = ".ico"
import pyqtgraph as pg
from pyqtgraph.Qt import QtCore, QtWidgets, QtGui
from pyqtgraph import Qt
import aid_start
dir_root = os.path.dirname(aid_start.__file__)#ask the module for its origin
dir_settings = os.path.join(dir_root,"aid_settings.json")#dir to settings
Default_dict = aid_start.get_default_dict(dir_settings)
#try:
# splashapp = QtWidgets.QApplication(sys.argv)
# #splashapp.setWindowIcon(QtGui.QIcon("."+os.sep+"art"+os.sep+Default_dict["Icon theme"]+os.sep+"main_icon_simple_04_256.ico"))
# # Create and display the splash screen
# splash_pix = os.path.join(dir_root,"art",Default_dict["Icon theme"],"main_icon_simple_04_256"+icon_suff)
# splash_pix = QtGui.QPixmap(splash_pix)
# #splash_pix = QtGui.QPixmap("."+os.sep+"art"+os.sep+Default_dict["Icon theme"]+os.sep+"main_icon_simple_04_256"+icon_suff)
# splash = QtWidgets.QSplashScreen(splash_pix, QtCore.Qt.WindowStaysOnTopHint)
# splash.setMask(splash_pix.mask())
# splash.show()
#except:
# pass
#BEFORE importing tensorflow or anything from keras: make sure the keras.json has
#certain properties
keras_json_path = os.path.expanduser('~')+os.sep+'.keras'+os.sep+'keras.json'
if not os.path.isdir(os.path.expanduser('~')+os.sep+'.keras'):
os.mkdir(os.path.expanduser('~')+os.sep+'.keras')
aid_start.banner() #show a fancy banner in console
aid_start.keras_json_check(keras_json_path)
import traceback,shutil,re,ast,io,platform
import h5py,json,time,copy,urllib,datetime
from stat import S_IREAD,S_IRGRP,S_IROTH,S_IWRITE,S_IWGRP,S_IWOTH
import tensorflow as tf
from tensorboard import program
from tensorboard import default
from tensorflow.python.client import device_lib
devices = device_lib.list_local_devices()
device_types = [devices[i].device_type for i in range(len(devices))]
#Get the number of CPU cores and GPUs
cpu_nr = os.cpu_count()
gpu_nr = device_types.count("GPU")
print("Nr. of GPUs detected: "+str(gpu_nr))
print("Found "+str(len(devices))+" device(s):")
print("------------------------")
for i in range(len(devices)):
print("Device "+str(i)+": "+devices[i].name)
print("Device type: "+devices[i].device_type)
print("Device description: "+devices[i].physical_device_desc)
print("------------------------")
#Split CPU and GPU into two lists of devices
devices_cpu = []
devices_gpu = []
for dev in devices:
if dev.device_type=="CPU":
devices_cpu.append(dev)
elif dev.device_type=="GPU":
devices_gpu.append(dev)
else:
print("Unknown device type:"+str(dev)+"\n")
import numpy as np
rand_state = np.random.RandomState(117) #to get the same random number on diff. PCs
from scipy import ndimage,misc
from sklearn import metrics,preprocessing
import PIL
import dclab
import cv2
import pandas as pd
import openpyxl,xlrd
import psutil
from keras.models import model_from_json,model_from_config,load_model,clone_model
from keras import backend as K
if 'GPU' in device_types:
keras_gpu_avail = K.tensorflow_backend._get_available_gpus()
if len(keras_gpu_avail)>0:
print("Following GPU is used:")
print(keras_gpu_avail)
print("------------------------")
else:
print("TensorFlow detected GPU, but Keras didn't")
print("------------------------")
from keras.preprocessing.image import load_img
from keras.utils import np_utils,multi_gpu_model
from keras.utils.conv_utils import convert_kernel
import keras_metrics #side package for precision, recall etc during training
global keras_metrics
import model_zoo
from keras2onnx import convert_keras
from onnx import save_model as save_onnx
import aid_img, aid_dl, aid_bin
import aid_frontend
from partial_trainability import partial_trainability
import aid_imports
VERSION = "0.2.3" #Python 3.5.6 Version
model_zoo_version = model_zoo.__version__()
print("AIDeveloper Version: "+VERSION)
print("model_zoo.py Version: "+model_zoo.__version__())
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtWidgets.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtWidgets.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtWidgets.QApplication.translate(context, text, disambig)
tooltips = aid_start.get_tooltips()
class MyPopup(QtWidgets.QWidget):
def __init__(self):
QtWidgets.QWidget.__init__(self)
class WorkerSignals(QtCore.QObject):
'''
Code inspired from here: https://www.learnpyqt.com/courses/concurrent-execution/multithreading-pyqt-applications-qthreadpool/
Defines the signals available from a running worker thread.
Supported signals are:
finished
No data
error
`tuple` (exctype, value, traceback.format_exc() )
result
`object` data returned from processing, anything
progress
`int` indicating % progress
history
`dict` containing keras model history.history resulting from .fit
'''
finished = QtCore.pyqtSignal()
error = QtCore.pyqtSignal(tuple)
result = QtCore.pyqtSignal(object)
progress = QtCore.pyqtSignal(int)
history = QtCore.pyqtSignal(dict)
class Worker(QtCore.QRunnable):
'''
Code inspired/copied from: https://www.learnpyqt.com/courses/concurrent-execution/multithreading-pyqt-applications-qthreadpool/
Worker thread
Inherits from QRunnable to handler worker thread setup, signals and wrap-up.
:param callback: The function callback to run on this worker thread. Supplied args and
kwargs will be passed through to the runner.
:type callback: function
:param args: Arguments to pass to the callback function
:param kwargs: Keywords to pass to the callback function
'''
def __init__(self, fn, *args, **kwargs):
super(Worker, self).__init__()
# Store constructor arguments (re-used for processing)
self.fn = fn
self.args = args
self.kwargs = kwargs
self.signals = WorkerSignals()
# Add the callback to our kwargs
self.kwargs['progress_callback'] = self.signals.progress
self.kwargs['history_callback'] = self.signals.history
@QtCore.pyqtSlot()
def run(self):
'''
Initialise the runner function with passed args, kwargs.
'''
# Retrieve args/kwargs here; and fire processing using them
try:
result = self.fn(*self.args, **self.kwargs)
except:
traceback.print_exc()
exctype, value = sys.exc_info()[:2]
self.signals.error.emit((exctype, value, traceback.format_exc()))
else:
self.signals.result.emit(result) # Return the result of the processing
finally:
self.signals.finished.emit() # Done
class MainWindow(QtWidgets.QMainWindow):
def __init__(self, *args, **kwargs):
super(MainWindow, self).__init__(*args, **kwargs)
self.setupUi()
def setupUi(self):
aid_frontend.setup_main_ui(self,gpu_nr)
def retranslateUi(self):
aid_frontend.retranslate_main_ui(self,gpu_nr,VERSION)
def dataDropped(self, l):
#If there is data stored on ram tell user that RAM needs to be refreshed!
if len(self.ram)>0:
self.statusbar.showMessage("Newly added data is not yet in RAM. Only RAM data will be used. Use ->'File'->'Data to RAM now' to update RAM",5000)
#l is a list of some filenames (.rtdc) or folders (containing .jpg, jpeg, .png)
#Iterate over l and check if it is a folder or a file (directory)
isfile = [os.path.isfile(str(url)) for url in l]
isfolder = [os.path.isdir(str(url)) for url in l]
#####################For folders with images:##########################
#where are folders?
ind_true = np.where(np.array(isfolder)==True)[0]
foldernames = list(np.array(l)[ind_true]) #select the indices that are valid
#On mac, there is a trailing / in case of folders; remove them
foldernames = [os.path.normpath(url) for url in foldernames]
basename = [os.path.basename(f) for f in foldernames]
#Look quickly inside the folders and ask the user if he wants to convert
#to .rtdc (might take a while!)
if len(foldernames)>0: #User dropped (also) folders (which may contain images)
# filecounts = []
# for i in range(len(foldernames)):
# url = foldernames[i]
# files = os.listdir(url)
# files_full = [os.path.join(url,files[i]) for i in range(len(files))]
# filecounts.append(len([f for f in files_full if os.path.isfile(f)]))
# Text = []
# for b,n in zip(basename,filecounts):
# Text.append(b+": "+str(n)+" images")
# Text = "\n".join(Text)
Text = "Images from single folders are read and saved to individual \
.rtdc files with the same name like the corresponding folder.<b>If \
you have RGB images you can either save the full RGB information, \
or do a conversion to Grayscale (saves some diskspace but information \
about color is lost). RGB is recommended since AID will automatically\
do the conversion to grayscale later if required.<b>If you have \
Grayscale images, a conversion to RGB will just copy the info to all \
channels, which allows you to use RGB-mode and Grayscale-mode lateron."
Text = Text+"\nImages from following folders will be converted:\n"+"\n".join(basename)
#Show the user a summary with all the found folders and how many files are
#contained. Ask if he want to convert
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Question)
text = "<html><head/><body><p>Should the images of the chosen folder(s)\
be converted to .rtdc using <b>RGB</b> or <b>Grayscale</b> format? <b>\
(RGB is recommended!)</b> Either option might take some time. You can \
reuse the .rtdc file next time.</p></body></html>"
msg.setText(text)
msg.setDetailedText(Text)
msg.setWindowTitle("Format for conversion to .rtdc (RGB/Grayscale)")
msg.addButton(QtGui.QPushButton('Convert to Grayscale'), QtGui.QMessageBox.YesRole)
msg.addButton(QtGui.QPushButton('Convert to RGB'), QtGui.QMessageBox.NoRole)
msg.addButton(QtGui.QPushButton('Cancel'), QtGui.QMessageBox.RejectRole)
retval = msg.exec_()
#Conversion of images in folders is (almost) independent from what
#is going to be fitted (So I leave the option menu still!)
#In options: Color Mode one can still use RGB mode and export here as
#Grayscale (but this would actually not work since RGB information is lost).
#The other way around works. Therefore it is recommended to export RGB!
if retval==0:
color_mode = "Grayscale"
channels = 1
elif retval==1:
color_mode = "RGB"
channels = 3
else:
return
self.statusbar.showMessage("Color mode' "+color_mode+"' is used",5000)
url_converted = []
for i in range(len(foldernames)):
url = foldernames[i]
print("Start converting images in\n"+url)
#try:
#get a list of files inside this directory:
images,pos_x,pos_y = [],[],[]
for root, dirs, files in os.walk(url):
for file in files:
try:
path = os.path.join(root, file)
img = load_img(path,color_mode=color_mode.lower()) #This uses PIL and supports many many formats!
images.append(np.array(img)) #append nice numpy array to list
#create pos_x and pos_y
pos_x.append( int(np.round(img.width/2.0,0)) )
pos_y.append( int(np.round(img.height/2.0,0)) )
except:
pass
#Thanks to andko76 for pointing that unequal image sizes cause an error:
#https://github.com/maikherbig/AIDeveloper/issues/1
#Check that all images have the same size
# img_shape_errors = 0
# text_error = "Images have unequal dimensions:"
# img_h = [a.shape[0] for a in images]
# img_h_uni = len(np.unique(img_h))
# if img_h_uni!=1:
# text_error += "\n- found unequal heights"
# img_shape_errors=1
# img_w = [a.shape[1] for a in images]
# img_w_uni = len(np.unique(img_w))
# if img_w_uni!=1:
# text_error += "\n- found unequal widths"
# img_shape_errors=1
# img_c = [len(a.shape) for a in images]
# img_c_uni = len(np.unique(img_c))
# if img_c_uni!=1:
# text_error += "\n- found unequal numbers of channels"
# img_shape_errors=1
# #If there were issues detected, show error message
# if img_shape_errors==1:
# msg = QtWidgets.QMessageBox()
# msg.setIcon(QtWidgets.QMessageBox.Warning)
# msg.setText(str(text_error))
# msg.setWindowTitle("Error: Unequal image shapes")
# msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
# msg.exec_()
# return
#Get a list of occuring image dimensions (width and height)
img_shape = [a.shape[0] for a in images] + [a.shape[1] for a in images]
dims = np.unique(img_shape)
#Get a list of occurences of image shapes
img_shape = [str(a.shape[0])+" x "+str(a.shape[1]) for a in images]
occurences = np.unique(img_shape,return_counts=True)
#inform user if there is more than one img shape
if len(occurences[0])>1 or len(dims)>1:
text_detail = "Path: "+url
text_detail += "\nFollowing image shapes are present"
for i in range(len(occurences[0])):
text_detail+="\n- "+str(occurences[1][i])+" times: "+str(occurences[0][i])
self.popup_imgRes = QtGui.QDialog()
self.popup_imgRes_ui = aid_frontend.popup_imageLoadResize()
self.popup_imgRes_ui.setupUi(self.popup_imgRes) #open a popup to show options for image resizing (make image equally sized)
#self.popup_imgRes.setWindowModality(QtCore.Qt.WindowModal)
self.popup_imgRes.setWindowModality(QtCore.Qt.ApplicationModal)
#Insert information into textBrowser
self.popup_imgRes_ui.textBrowser_imgResize_occurences.setText(text_detail)
Image_import_dimension = Default_dict["Image_import_dimension"]
self.popup_imgRes_ui.spinBox_ingResize_h_1.setValue(Image_import_dimension)
self.popup_imgRes_ui.spinBox_ingResize_h_2.setValue(Image_import_dimension)
self.popup_imgRes_ui.spinBox_ingResize_w_1.setValue(Image_import_dimension)
self.popup_imgRes_ui.spinBox_ingResize_w_2.setValue(Image_import_dimension)
Image_import_interpol_method = Default_dict["Image_import_interpol_method"]
index = self.popup_imgRes_ui.comboBox_resizeMethod.findText(Image_import_interpol_method, QtCore.Qt.MatchFixedString)
if index >= 0:
self.popup_imgRes_ui.comboBox_resizeMethod.setCurrentIndex(index)
#Define function for the OK button:
def popup_imgRes_ok(images,channels,pos_x,pos_y):
print("Start resizing operation")
#Get info from GUI
final_h = int(self.popup_imgRes_ui.spinBox_ingResize_h_1.value())
print("Height:"+str(final_h))
final_w = int(self.popup_imgRes_ui.spinBox_ingResize_w_1.value())
print("Width:"+str(final_w))
Default_dict["Image_import_dimension"] = final_h
pix = 1
if self.popup_imgRes_ui.radioButton_imgResize_cropPad.isChecked():#cropping and padding method
images = aid_img.image_crop_pad_cv2(images,pos_x,pos_y,pix,final_h,final_w,padding_mode="cv2.BORDER_CONSTANT")
elif self.popup_imgRes_ui.radioButton_imgResize_interpolate.isChecked():
interpolation_method = str(self.popup_imgRes_ui.comboBox_resizeMethod.currentText())
Default_dict["Image_import_interpol_method"] = interpolation_method
images = aid_img.image_resize_scale(images,pos_x,pos_y,final_h,final_w,channels,interpolation_method,verbose=False)
else:
print("Invalid image resize method!")
#Save the Default_dict
aid_bin.save_aid_settings(Default_dict)
self.popup_imgRes.accept()
return images
#Define function for the Cancel button:
def popup_imgRes_cancel():
self.popup_imgRes.close()
return
self.popup_imgRes_ui.pushButton_imgResize_ok.clicked.connect(lambda: popup_imgRes_ok(images,channels,pos_x,pos_y))
self.popup_imgRes_ui.pushButton_imgResize_cancel.clicked.connect(popup_imgRes_cancel)
retval = self.popup_imgRes.exec_()
#retval is 0 if the user clicked cancel or just closed the window; in this case just exist the function
if retval==0:
return
#get new pos_x, pos_y (after cropping, the pixel value for the middle of the image is different!)
pos_x = [int(np.round(img.shape[1]/2.0,0)) for img in images]
pos_y = [int(np.round(img.shape[0]/2.0,0)) for img in images]
#Now, all images are of identical shape and can be converted to a numpy array
images = np.array((images), dtype="uint8")
pos_x = np.array((pos_x), dtype="uint8")
pos_y = np.array((pos_y), dtype="uint8")
#Save as foldername.rtdc
fname = url+".rtdc"
if os.path.isfile(fname):
#ask user if file can be overwritten
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Question)
text = "<html><head/><body><p>File:"+fname+" already exists. Should it be overwritten?</p></body></html>"
msg.setText(text)
msg.setWindowTitle("Overwrite file?")
msg.addButton(QtGui.QPushButton('Yes'), QtGui.QMessageBox.YesRole)
msg.addButton(QtGui.QPushButton('No'), QtGui.QMessageBox.NoRole)
msg.addButton(QtGui.QPushButton('Cancel'), QtGui.QMessageBox.RejectRole)
retval = msg.exec_()
if retval==0:
try:
os.remove(fname)
aid_img.imgs_2_rtdc(fname,images,pos_x,pos_y)
url_converted.append(fname)
except Exception as e:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Critical)
msg.setText(str(e))
msg.setWindowTitle("Error")
retval = msg.exec_()
elif retval==1:
pass
else:
pass
else:#file does not yet exist. Create it
aid_img.imgs_2_rtdc(fname,images,pos_x,pos_y)
url_converted.append(fname)
print("Finished converting! Final dimension of image tensor is:"+str(images.shape))
#Now load the created files directly to drag/drop-region!
self.dataDropped(url_converted)
#####################For .rtdc files:##################################
#where are files?
ind_true = np.where(np.array(isfile)==True)[0]
filenames = list(np.array(l)[ind_true]) #select the indices that are valid
#check if the file can be opened and get some information
fileinfo = []
for i in range(len(filenames)):
rtdc_path = filenames[i]
try:
failed,rtdc_ds = aid_bin.load_rtdc(rtdc_path)
if failed:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Critical)
msg.setText(str(rtdc_ds))
msg.setWindowTitle("Error occurred during loading file")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
features = list(rtdc_ds["events"].keys())
#Make sure that there is "images", "pos_x" and "pos_y" available
if "image" in features and "pos_x" in features and "pos_y" in features:
nr_images = rtdc_ds["events"]["image"].len()
pix = rtdc_ds.attrs["imaging:pixel size"]
xtra_in_available = len(rtdc_ds.keys())>2 #Is True, only if there are more than 2 elements.
fileinfo.append({"rtdc_ds":rtdc_ds,"rtdc_path":rtdc_path,"features":features,"nr_images":nr_images,"pix":pix,"xtra_in":xtra_in_available})
else:
missing = []
for feat in ["image","pos_x","pos_y"]:
if feat not in features:
missing.append(feat)
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("Essential feature(s) are missing in data-set")
msg.setDetailedText("Data-set: "+rtdc_path+"\nis missing "+str(missing))
msg.setWindowTitle("Missing essential features")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
except Exception as e:
print(e)
#Add the stuff to the combobox on Plot/Peak Tab
url_list = [fileinfo[iterator]["rtdc_path"] for iterator in range(len(fileinfo))]
self.comboBox_chooseRtdcFile.addItems(url_list)
self.comboBox_selectData.addItems(url_list)
if len(url_list)==0: #This fixes the issue that the prog. crashes if accidentially a tableitem is dragged and "dropped" on the table
return
width=self.comboBox_selectData.fontMetrics().boundingRect(max(url_list, key=len)).width()
self.comboBox_selectData.view().setFixedWidth(width+10)
for rowNumber in range(len(fileinfo)):#for url in l:
url = fileinfo[rowNumber]["rtdc_path"]
#add to table
rowPosition = self.table_dragdrop.rowCount()
self.table_dragdrop.insertRow(rowPosition)
columnPosition = 0
line = QtWidgets.QLabel(self.table_dragdrop)
line.setText(url)
line.setDisabled(True)
line.setAlignment(QtCore.Qt.AlignRight | QtCore.Qt.AlignVCenter)
self.table_dragdrop.setCellWidget(rowPosition, columnPosition, line)
# item = QtWidgets.QTableWidgetItem(url)
# item.setFlags( QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable )
# print(item.textAlignment())
# item.setTextAlignment(QtCore.Qt.AlignRight) # change the alignment
# #item.setTextAlignment(QtCore.Qt.AlignLeading | QtCore.Qt.AnchorRight) # change the alignment
# self.table_dragdrop.setItem(rowPosition , columnPosition, item ) #
columnPosition = 1
spinb = QtWidgets.QSpinBox(self.table_dragdrop)
spinb.valueChanged.connect(self.dataOverviewOn)
self.table_dragdrop.setCellWidget(rowPosition, columnPosition, spinb)
for columnPosition in range(2,4):
#for each item, also create 2 checkboxes (train/valid)
item = QtWidgets.QTableWidgetItem()#("item {0} {1}".format(rowNumber, columnNumber))
item.setFlags( QtCore.Qt.ItemIsUserCheckable | QtCore.Qt.ItemIsEnabled )
item.setCheckState(QtCore.Qt.Unchecked)
self.table_dragdrop.setItem(rowPosition, columnPosition, item)
columnPosition = 4
#Place a button which allows to show a plot (scatter, histo...lets see)
btn = QtWidgets.QPushButton(self.table_dragdrop)
btn.setMinimumSize(QtCore.QSize(50, 30))
btn.setMaximumSize(QtCore.QSize(50, 30))
btn.clicked.connect(self.button_hist)
btn.setText('Plot')
self.table_dragdrop.setCellWidget(rowPosition, columnPosition, btn)
self.table_dragdrop.resizeRowsToContents()
# columnPosition = 5
# #Place a combobox with the available features
# cb = QtWidgets.QComboBox(self.table_dragdrop)
# cb.addItems(fileinfo[rowNumber]["features"])
# cb.setMinimumSize(QtCore.QSize(70, 30))
# cb.setMaximumSize(QtCore.QSize(70, 30))
# width=cb.fontMetrics().boundingRect(max(fileinfo[rowNumber]["features"], key=len)).width()
# cb.view().setFixedWidth(width+30)
# self.table_dragdrop.setCellWidget(rowPosition, columnPosition, cb)
columnPosition = 5
#Place a combobox with the available features
item = QtWidgets.QTableWidgetItem()
item.setData(QtCore.Qt.DisplayRole, fileinfo[rowNumber]["nr_images"])
item.setFlags(item.flags() &~QtCore.Qt.ItemIsEnabled &~ QtCore.Qt.ItemIsSelectable )
self.table_dragdrop.setItem(rowPosition, columnPosition, item)
columnPosition = 6
#Field to user-define nr. of cells/epoch
item = QtWidgets.QTableWidgetItem()
item.setData(QtCore.Qt.EditRole,100)
#item.cellChanged.connect(self.dataOverviewOn)
self.table_dragdrop.setItem(rowPosition, columnPosition, item)
columnPosition = 7
#Pixel size
item = QtWidgets.QTableWidgetItem()
pix = float(fileinfo[rowNumber]["pix"])
#print(pix)
item.setData(QtCore.Qt.EditRole,pix)
self.table_dragdrop.setItem(rowPosition, columnPosition, item)
columnPosition = 8
#Should data be shuffled (random?)
item = QtWidgets.QTableWidgetItem()#("item {0} {1}".format(rowNumber, columnNumber))
item.setFlags( QtCore.Qt.ItemIsUserCheckable | QtCore.Qt.ItemIsEnabled )
item.setCheckState(QtCore.Qt.Checked)
self.table_dragdrop.setItem(rowPosition, columnPosition, item)
columnPosition = 9
#Zooming factor
item = QtWidgets.QTableWidgetItem()
zoom = 1.0
item.setData(QtCore.Qt.EditRole,zoom)
self.table_dragdrop.setItem(rowPosition, columnPosition, item)
columnPosition = 10
#Should xtra_data be used?
item = QtWidgets.QTableWidgetItem()#("item {0} {1}".format(rowNumber, columnNumber))
xtra_in_available = fileinfo[rowNumber]["xtra_in"]
if xtra_in_available:
item.setFlags( QtCore.Qt.ItemIsUserCheckable | QtCore.Qt.ItemIsEnabled )
else:
item.setFlags( QtCore.Qt.ItemIsUserCheckable )
item.setCheckState(QtCore.Qt.Unchecked)
self.table_dragdrop.setItem(rowPosition, columnPosition, item)
#Functions for Keras augmentation checkboxes
def keras_changed_rotation(self,on_or_off):
if on_or_off==0:
self.lineEdit_Rotation.setText(str(0))
self.lineEdit_Rotation.setEnabled(False)
elif on_or_off==2:
self.lineEdit_Rotation.setText(str(Default_dict ["rotation"]))
self.lineEdit_Rotation.setEnabled(True)
else:
return
def keras_changed_width_shift(self,on_or_off):
if on_or_off==0:
self.lineEdit_widthShift.setText(str(0))
self.lineEdit_widthShift.setEnabled(False)
elif on_or_off==2:
self.lineEdit_widthShift.setText(str(Default_dict ["width_shift"]))
self.lineEdit_widthShift.setEnabled(True)
else:
return
def keras_changed_height_shift(self,on_or_off):
if on_or_off==0:
self.lineEdit_heightShift.setText(str(0))
self.lineEdit_heightShift.setEnabled(False)
elif on_or_off==2:
self.lineEdit_heightShift.setText(str(Default_dict ["height_shift"]))
self.lineEdit_heightShift.setEnabled(True)
else:
return
def keras_changed_zoom(self,on_or_off):
if on_or_off==0:
self.lineEdit_zoomRange.setText(str(0))
self.lineEdit_zoomRange.setEnabled(False)
elif on_or_off==2:
self.lineEdit_zoomRange.setText(str(Default_dict ["zoom"]))
self.lineEdit_zoomRange.setEnabled(True)
else:
return
def keras_changed_shear(self,on_or_off):
if on_or_off==0:
self.lineEdit_shearRange.setText(str(0))
self.lineEdit_shearRange.setEnabled(False)
elif on_or_off==2:
self.lineEdit_shearRange.setText(str(Default_dict ["shear"]))
self.lineEdit_shearRange.setEnabled(True)
else:
return
def keras_changed_brightplus(self,on_or_off):
if on_or_off==0:
self.spinBox_PlusLower.setValue(0)
self.spinBox_PlusLower.setEnabled(False)
self.spinBox_PlusUpper.setValue(0)
self.spinBox_PlusUpper.setEnabled(False)
elif on_or_off==2:
self.spinBox_PlusLower.setValue(Default_dict ["Brightness add. lower"])
self.spinBox_PlusLower.setEnabled(True)
self.spinBox_PlusUpper.setValue(Default_dict ["Brightness add. upper"])
self.spinBox_PlusUpper.setEnabled(True)
else:
return
def keras_changed_brightmult(self,on_or_off):
if on_or_off==0:
self.doubleSpinBox_MultLower.setValue(1.0)
self.doubleSpinBox_MultLower.setEnabled(False)
self.doubleSpinBox_MultUpper.setValue(1.0)
self.doubleSpinBox_MultUpper.setEnabled(False)
elif on_or_off==2:
self.doubleSpinBox_MultLower.setValue(Default_dict ["Brightness mult. lower"])
self.doubleSpinBox_MultLower.setEnabled(True)
self.doubleSpinBox_MultUpper.setValue(Default_dict ["Brightness mult. upper"])
self.doubleSpinBox_MultUpper.setEnabled(True)
else:
return
def keras_changed_noiseMean(self,on_or_off):
if on_or_off==0:
self.doubleSpinBox_GaussianNoiseMean.setValue(0.0)
self.doubleSpinBox_GaussianNoiseMean.setEnabled(False)
elif on_or_off==2:
self.doubleSpinBox_GaussianNoiseMean.setValue(Default_dict ["Gaussnoise Mean"])
self.doubleSpinBox_GaussianNoiseMean.setEnabled(True)
else:
return
def keras_changed_noiseScale(self,on_or_off):
if on_or_off==0:
self.doubleSpinBox_GaussianNoiseScale.setValue(0.0)
self.doubleSpinBox_GaussianNoiseScale.setEnabled(False)
elif on_or_off==2:
self.doubleSpinBox_GaussianNoiseScale.setValue(Default_dict ["Gaussnoise Scale"])
self.doubleSpinBox_GaussianNoiseScale.setEnabled(True)
else:
return
def keras_changed_contrast(self,on_or_off):
if on_or_off==0:
self.doubleSpinBox_contrastLower.setEnabled(False)
self.doubleSpinBox_contrastHigher.setEnabled(False)
elif on_or_off==2:
self.doubleSpinBox_contrastLower.setEnabled(True)
self.doubleSpinBox_contrastHigher.setEnabled(True)
else:
return
def keras_changed_saturation(self,on_or_off):
if on_or_off==0:
self.doubleSpinBox_saturationLower.setEnabled(False)
self.doubleSpinBox_saturationHigher.setEnabled(False)
elif on_or_off==2:
self.doubleSpinBox_saturationLower.setEnabled(True)
self.doubleSpinBox_saturationHigher.setEnabled(True)
else:
return
def keras_changed_hue(self,on_or_off):
if on_or_off==0:
self.doubleSpinBox_hueDelta.setEnabled(False)
elif on_or_off==2:
self.doubleSpinBox_hueDelta.setEnabled(True)
else:
return
def expert_mode_off(self,on_or_off):
"""
Reset all values on the expert tab to the default values, excluding the metrics
metrics are defined only once when starting fitting and should not be changed
"""
if on_or_off==0: #switch off
self.spinBox_batchSize.setValue(Default_dict["spinBox_batchSize"])
self.spinBox_epochs.setValue(1)
self.checkBox_expt_loss.setChecked(False)
self.expert_loss_off(0)
self.groupBox_learningRate.setChecked(False)
self.expert_learningrate_off(0)
self.checkBox_optimizer.setChecked(False)
self.expert_optimizer_off(0)
def expert_loss_off(self,on_or_off):
if on_or_off==0: #switch off
#switch back to categorical_crossentropy
index = self.comboBox_expt_loss.findText("categorical_crossentropy", QtCore.Qt.MatchFixedString)
if index >= 0:
self.comboBox_expt_loss.setCurrentIndex(index)
def expert_learningrate_off(self,on_or_off):
if on_or_off==0: #switch off
#which optimizer is used? (there are different default learning-rates
#for each optimizer!)
optimizer = str(self.comboBox_optimizer.currentText())
self.doubleSpinBox_learningRate.setValue(Default_dict["doubleSpinBox_learningRate_"+optimizer])
self.radioButton_LrCycl.setChecked(False)
self.radioButton_LrExpo.setChecked(False)
self.radioButton_LrConst.setChecked(True)
def expert_optimizer_off(self,on_or_off):
if on_or_off==0: #switch off, set back to categorical_crossentropy
optimizer = "Adam"
index = self.comboBox_optimizer.findText(optimizer, QtCore.Qt.MatchFixedString)
if index >= 0:
self.comboBox_optimizer.setCurrentIndex(index)
#also reset the learning rate to the default
self.doubleSpinBox_learningRate.setValue(Default_dict["doubleSpinBox_learningRate_"+optimizer])
def expert_optimizer_changed(self,optimizer_text,listindex):
# print("optimizer_text: "+str(optimizer_text))
# print("listindex: "+str(listindex))
if optimizer_text=="":
return
if listindex==-1:
item_ui = self
else:
item_ui = self.fittingpopups_ui[listindex]
#set the learning rate to the default for this optimizer
value_current = float(item_ui.doubleSpinBox_learningRate.value())
value_wanted = Default_dict["doubleSpinBox_learningRate_"+optimizer_text]
#insert the current value in the optimizer_settings:
item_ui.optimizer_settings["doubleSpinBox_lr_"+optimizer_text.lower()] = value_current
item_ui.optimizer_settings["comboBox_optimizer"] = optimizer_text
try: #only works on the fitting-popup
text = str(item_ui.textBrowser_FittingInfo.toPlainText())
except:
text = "Epoch"
# print("text: "+str(text))
if value_current!=value_wanted and "Epoch" in text:#avoid that the message pops up when window is created
item_ui.doubleSpinBox_learningRate.setValue(value_wanted)
item_ui.doubleSpinBox_expDecInitLr.setValue(value_wanted)
#Inform user
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setWindowTitle("Learning rate to default")
msg.setText("Learning rate was set to the default for "+optimizer_text)
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
def expert_lr_changed(self,value,optimizer_text,listindex):
if listindex==-1:
item_ui = self
else:
item_ui = self.fittingpopups_ui[listindex]
item_ui.optimizer_settings["doubleSpinBox_lr_"+optimizer_text.lower()] = value
def update_hist1(self):
feature = str(self.comboBox_feat1.currentText())
feature_values = self.rtdc_ds["events"][feature]
#if len(feature_values)==len(self.rtdc_ds['area_cvx']):
# self.histogram = pg.GraphicsWindow()
#plt1 = self.histogram.addPlot()
y,x = np.histogram(feature_values, bins='auto')
self.plt1.plot(x, y, stepMode=True, fillLevel=0, brush=(0,0,255,150),clear=True)
# self.gridLayout_w2.addWidget(self.histogram,1, 0, 1, 1)
# self.w.show()
def update_hist2(self):
feature = str(self.comboBox_feat2.currentText())
feature_values = self.rtdc_ds["events"][feature]
#if len(feature_values)==len(self.rtdc_ds['area_cvx']):
#self.histogram = pg.GraphicsWindow()
#plt1 = self.histogram.addPlot()
y,x = np.histogram(feature_values, bins='auto')
self.plt1.plot(x, y, stepMode=True, fillLevel=0, brush=(0,0,255,150),clear=True)
# self.gridLayout_w2.addWidget(self.histogram,1, 0, 1, 1)
# self.w.show()
def update_scatter(self):
feature_x = str(self.comboBox_feat1.currentText())
feature_x_values = self.rtdc_ds["events"][feature_x]
feature_y = str(self.comboBox_feat2.currentText())
feature_y_values = self.rtdc_ds["events"][feature_y]
if len(feature_x_values)==len(feature_y_values):
#self.histogram = pg.GraphicsWindow()
#plt1 = self.histogram.addPlot()
#y,x = np.histogram(feature_values, bins='auto')
self.plt1.plot(feature_x_values, feature_y_values,pen=None,symbol='o',clear=True)
# self.gridLayout_w2.addWidget(self.histogram,1, 0, 1, 1)
# self.w.show()
def button_hist(self,item):
buttonClicked = self.sender()
index = self.table_dragdrop.indexAt(buttonClicked.pos())
rowPosition = index.row()
rtdc_path = self.table_dragdrop.cellWidget(rowPosition, 0).text()
rtdc_path = str(rtdc_path)
failed,rtdc_ds = aid_bin.load_rtdc(rtdc_path)
if failed:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Critical)
msg.setText(str(rtdc_ds))
msg.setWindowTitle("Error occurred during loading file")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
self.rtdc_ds = rtdc_ds
# feature_values = rtdc_ds[feature]
#Init a popup window
self.w = MyPopup()
self.w.setWindowTitle(rtdc_path)
self.w.setObjectName(_fromUtf8("w"))
self.gridLayout_w2 = QtWidgets.QGridLayout(self.w)
self.gridLayout_w2.setContentsMargins(0, 0, 0, 0)
self.gridLayout_w2.setObjectName(_fromUtf8("gridLayout_w2"))
self.widget = QtWidgets.QWidget(self.w)
self.widget.setMinimumSize(QtCore.QSize(0, 65))
self.widget.setMaximumSize(QtCore.QSize(16777215, 65))
self.widget.setObjectName(_fromUtf8("widget"))
self.horizontalLayout_w3 = QtWidgets.QHBoxLayout(self.widget)
self.horizontalLayout_w3.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout_w3.setObjectName(_fromUtf8("horizontalLayout_w3"))
self.verticalLayout_w = QtWidgets.QVBoxLayout()
self.verticalLayout_w.setObjectName(_fromUtf8("verticalLayout_w"))
self.horizontalLayout_w = QtWidgets.QHBoxLayout()
self.horizontalLayout_w.setObjectName(_fromUtf8("horizontalLayout_w"))
self.comboBox_feat1 = QtWidgets.QComboBox(self.widget)
self.comboBox_feat1.setObjectName(_fromUtf8("comboBox_feat1"))
features = list(self.rtdc_ds["events"].keys())
self.comboBox_feat1.addItems(features)
self.horizontalLayout_w.addWidget(self.comboBox_feat1)
self.comboBox_feat2 = QtWidgets.QComboBox(self.widget)
self.comboBox_feat2.setObjectName(_fromUtf8("comboBox_feat2"))
self.comboBox_feat2.addItems(features)
self.horizontalLayout_w.addWidget(self.comboBox_feat2)
self.verticalLayout_w.addLayout(self.horizontalLayout_w)
self.horizontalLayout_w2 = QtWidgets.QHBoxLayout()
self.horizontalLayout_w2.setObjectName(_fromUtf8("horizontalLayout_w2"))
self.pushButton_Hist1 = QtWidgets.QPushButton(self.widget)
self.pushButton_Hist1.setObjectName(_fromUtf8("pushButton_Hist1"))
self.horizontalLayout_w2.addWidget(self.pushButton_Hist1)
self.pushButton_Hist2 = QtWidgets.QPushButton(self.widget)
self.pushButton_Hist2.setObjectName(_fromUtf8("pushButton_Hist2"))
self.horizontalLayout_w2.addWidget(self.pushButton_Hist2)
self.verticalLayout_w.addLayout(self.horizontalLayout_w2)
self.horizontalLayout_w3.addLayout(self.verticalLayout_w)
self.verticalLayout_w2 = QtWidgets.QVBoxLayout()
self.verticalLayout_w2.setObjectName(_fromUtf8("verticalLayout_w2"))
self.pushButton_Scatter = QtWidgets.QPushButton(self.widget)
self.pushButton_Scatter.setObjectName(_fromUtf8("pushButton_Scatter"))
self.verticalLayout_w2.addWidget(self.pushButton_Scatter)
self.checkBox_ScalePix = QtWidgets.QCheckBox(self.widget)
self.checkBox_ScalePix.setObjectName(_fromUtf8("checkBox_ScalePix"))
self.verticalLayout_w2.addWidget(self.checkBox_ScalePix)
self.horizontalLayout_w3.addLayout(self.verticalLayout_w2)
self.gridLayout_w2.addWidget(self.widget, 0, 0, 1, 1)
self.pushButton_Hist1.setText("Hist")
self.pushButton_Hist1.clicked.connect(self.update_hist1)
self.pushButton_Hist2.setText("Hist")
self.pushButton_Hist2.clicked.connect(self.update_hist2)
self.pushButton_Scatter.setText("Scatter")
self.pushButton_Scatter.clicked.connect(self.update_scatter)
self.checkBox_ScalePix.setText("Scale by pix")
self.histogram = pg.GraphicsWindow()
self.plt1 = self.histogram.addPlot()
# y,x = np.histogram(feature_values, bins='auto')
# plt1.plot(x, y, stepMode=True, fillLevel=0, brush=(0,0,255,150))
self.gridLayout_w2.addWidget(self.histogram,1, 0, 1, 1)
self.w.show()
def update_historyplot_pop(self,listindex):
#listindex = self.popupcounter-1 #len(self.fittingpopups_ui)-1
#After the first epoch there are checkboxes available. Check, if user checked some:
colcount = int(self.fittingpopups_ui[listindex].tableWidget_HistoryInfo_pop.columnCount())
#Collect items that are checked
selected_items,Colors = [],[]
for colposition in range(colcount):
#is it checked for train?
cb = self.fittingpopups_ui[listindex].tableWidget_HistoryInfo_pop.item(0, colposition)
if not cb==None:
if cb.checkState() == QtCore.Qt.Checked:
selected_items.append(str(cb.text()))
Colors.append(cb.background())
self.Colors = Colors
Histories = self.fittingpopups_ui[listindex].Histories
DF1 = [[ h[h_i][-1] for h_i in h] for h in Histories] #if nb_epoch in .fit() is >1, only save the last history item, beacuse this would a model that could be saved
DF1 = np.r_[DF1]
DF1 = pd.DataFrame( DF1,columns=Histories[0].keys() )
# if len(DF1)>0:
# DF1 = pd.concat(DF1)
# else:
# return
self.fittingpopups_ui[listindex].widget_pop.clear()
#Create fresh plot
plt1 = self.fittingpopups_ui[listindex].widget_pop.addPlot()
plt1.showGrid(x=True,y=True)
plt1.addLegend()
plt1.setLabel('bottom', 'Epoch', units='')
#Create a dict that stores plots for each metric (for real time plotting)
self.fittingpopups_ui[listindex].historyscatters = dict()
for i in range(len(selected_items)):
key = selected_items[i]
df = DF1[key]
color = self.Colors[i]
pen_rollmedi = list(color.color().getRgb())
pen_rollmedi = pg.mkColor(pen_rollmedi)
pen_rollmedi = pg.mkPen(color=pen_rollmedi,width=6)
color = list(color.color().getRgb())
color[-1] = int(0.6*color[-1])
color = tuple(color)
pencolor = pg.mkColor(color)
brush = pg.mkBrush(color=pencolor)
#print(df)
historyscatter = plt1.plot(range(len(df)), df.values, pen=None,symbol='o',symbolPen=None,symbolBrush=brush,name=key,clear=False)
#self.fittingpopups_ui[listindex].historyscatters.append(historyscatter)
self.fittingpopups_ui[listindex].historyscatters[key]=historyscatter
def stop_fitting_pop(self,listindex):
#listindex = len(self.fittingpopups_ui)-1
epochs = self.fittingpopups_ui[listindex].epoch_counter
#Stop button on the fititng popup
#Should stop the fitting process and save the metafile
#1. Change the nr. requested epochs to a smaller number
self.fittingpopups_ui[listindex].spinBox_NrEpochs.setValue(epochs-1)
#2. Check the box which will cause that the new parameters are applied at next epoch
self.fittingpopups_ui[listindex].checkBox_ApplyNextEpoch.setChecked(True)
def pause_fitting_pop(self,listindex):
#Just change the text on the button
if str(self.fittingpopups_ui[listindex].pushButton_Pause_pop.text())==" ":
#If the the text on the button was Pause, change it to Continue
self.fittingpopups_ui[listindex].pushButton_Pause_pop.setText("")
self.fittingpopups_ui[listindex].pushButton_Pause_pop.setStyleSheet("background-color: green")
self.fittingpopups_ui[listindex].pushButton_Pause_pop.setIcon(QtGui.QIcon(os.path.join(dir_root,"art",Default_dict["Icon theme"],"continue.png")))
elif str(self.fittingpopups_ui[listindex].pushButton_Pause_pop.text())=="":
#If the the text on the button was Continue, change it to Pause
self.fittingpopups_ui[listindex].pushButton_Pause_pop.setText(" ")
self.fittingpopups_ui[listindex].pushButton_Pause_pop.setIcon(QtGui.QIcon(os.path.join(dir_root,"art",Default_dict["Icon theme"],"pause.png")))
self.fittingpopups_ui[listindex].pushButton_Pause_pop.setStyleSheet("")
def saveTextWindow_pop(self,listindex):
#Get the entire content of textBrowser_FittingInfo
text = str(self.fittingpopups_ui[listindex].textBrowser_FittingInfo.toPlainText())
#Ask the user where to save the stuff
filename = QtWidgets.QFileDialog.getSaveFileName(self, 'Fitting info', Default_dict["Path of last model"]," (*.txt)")
filename = filename[0]
#Save to this filename
if len(filename)>0:
f = open(filename,'w')
f.write(text)
f.close()
def clearTextWindow_pop(self,listindex):
self.fittingpopups_ui[listindex].textBrowser_FittingInfo.clear()
def showModelSumm_pop(self,listindex):
text5 = "Model summary:\n"
summary = []
self.model_keras.summary(print_fn=summary.append)
summary = "\n".join(summary)
text = text5+summary
self.fittingpopups_ui[listindex].textBrowser_FittingInfo.append(text)
def saveModelSumm_pop(self,listindex):
text5 = "Model summary:\n"
summary = []
self.model_keras.summary(print_fn=summary.append)
summary = "\n".join(summary)
text = text5+summary
#Ask the user where to save the stuff
filename = QtWidgets.QFileDialog.getSaveFileName(self, 'Model summary', Default_dict["Path of last model"]," (*.txt)")
filename = filename[0]
#Save to this filename
f = open(filename,'w')
f.write(text)
f.close()
#class_weight = self.get_class_weight(self.fittingpopups_ui[listindex].SelectedFiles,lossW_expert) #
def get_class_weight(self,SelectedFiles,lossW_expert,custom_check_classes=False):
t1 = time.time()
print("Getting dictionary for class_weight")
if lossW_expert=="None":
return None
elif lossW_expert=="":
return None
elif lossW_expert=="Balanced":
#Which are training files?
ind = [selectedfile["TrainOrValid"] == "Train" for selectedfile in SelectedFiles]
ind = np.where(np.array(ind)==True)[0]
SelectedFiles_train = list(np.array(SelectedFiles)[ind])
classes = [int(selectedfile["class"]) for selectedfile in SelectedFiles_train]
nr_events_epoch = [int(selectedfile["nr_events_epoch"]) for selectedfile in SelectedFiles_train]
classes_uni = np.unique(classes)
counter = {}
for class_ in classes_uni:
ind = np.where(np.array(classes)==class_)[0]
nr_events_epoch_class = np.array(nr_events_epoch)[ind]
counter[class_] = np.sum(nr_events_epoch_class)
max_val = float(max(counter.values()))
return {class_id : max_val/num_images for class_id, num_images in counter.items()}
elif lossW_expert.startswith("{"):#Custom loss weights
class_weights = eval(lossW_expert)
if custom_check_classes:#Check that each element in classes_uni is contained in class_weights.keys()
ind = [selectedfile["TrainOrValid"] == "Train" for selectedfile in SelectedFiles]
ind = np.where(np.array(ind)==True)[0]
SelectedFiles_train = list(np.array(SelectedFiles)[ind])
classes = [int(selectedfile["class"]) for selectedfile in SelectedFiles_train]
classes_uni = np.unique(classes)
classes_uni = np.sort(classes_uni)
class_weights_keys = np.sort([int(a) for a in class_weights.keys()])
#each element in classes_uni has to be equal to class_weights_keys
equal = np.array_equal(classes_uni,class_weights_keys)
if equal == True:
return class_weights
else:
#If the equal is false I'm really in trouble...
#run the function again, but request 'Balanced' weights. I'm not sure if this should be the default...
class_weights = self.get_class_weight(SelectedFiles,"Balanced")
return ["Balanced",class_weights]
else:
return class_weights
t2 = time.time()
dt = np.round(t2-t1,2)
print("Comp. time = "+str(dt))
def accept_lr_range(self):
lr_start = str(self.popup_lrfinder_ui.lineEdit_LrMin.text())
lr_stop = str(self.popup_lrfinder_ui.lineEdit_LrMax.text())
if len(lr_start)>0 and len(lr_stop)>0:
self.lineEdit_cycLrMin.setText(lr_start)
self.lineEdit_cycLrMax.setText(lr_stop)
else:
print("Found no values for LR range")
def accept_lr_value(self):
single_lr = self.popup_lrfinder_ui.lineEdit_singleLr.text()
if len(single_lr)>0:
lr_value = float(single_lr)
self.doubleSpinBox_learningRate.setValue(lr_value)
self.doubleSpinBox_expDecInitLr.setValue(lr_value)
else:
print("Found no value for single LR!")
def reset_lr_settings(self):
self.popup_lrfinder_ui.lineEdit_startLr.setText(_translate("Form_LrFinder", "1e-10", None))
self.popup_lrfinder_ui.lineEdit_stopLr.setText(_translate("Form_LrFinder", "0.1", None))
self.popup_lrfinder_ui.doubleSpinBox_percDataT.setProperty("value", 100.0)
self.popup_lrfinder_ui.doubleSpinBox_percDataV.setProperty("value", 100.0)
self.popup_lrfinder_ui.spinBox_batchSize.setValue(Default_dict["spinBox_batchSize"])
self.popup_lrfinder_ui.spinBox_lineWidth.setProperty("value", 6)
self.popup_lrfinder_ui.spinBox_epochs.setProperty("value", 5)
def reset_lr_value(self):
self.popup_lrfinder_ui.lineEdit_singleLr.setText("")
#Uncheck and Check the groupbox to refresh the line
self.popup_lrfinder_ui.groupBox_singleLr.setChecked(False)
self.popup_lrfinder_ui.groupBox_singleLr.setChecked(True)
def reset_lr_range(self):
self.popup_lrfinder_ui.lineEdit_LrMin.setText("")
self.popup_lrfinder_ui.lineEdit_LrMax.setText("")
#Uncheck and Check the groupbox to refresh the range
self.popup_lrfinder_ui.groupBox_LrRange.setChecked(False)
self.popup_lrfinder_ui.groupBox_LrRange.setChecked(True)
def popup_lr_finder(self):
SelectedFiles = self.items_clicked()
self.popup_lrfinder = MyPopup()
self.popup_lrfinder_ui = aid_frontend.popup_lrfinder()
self.popup_lrfinder_ui.setupUi(self.popup_lrfinder) #open a popup for lr finder
#Get information about the model
#check, which radiobutton is clicked and just copy paste the text from there
if self.radioButton_NewModel.isChecked():
modelname = str(self.comboBox_ModelSelection.currentText())
if modelname==None:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("No model specified!")
msg.setWindowTitle("No model specified!")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
elif self.radioButton_LoadContinueModel.isChecked():
modelname = str(self.lineEdit_LoadModelPath.text())
elif self.radioButton_LoadRestartModel.isChecked():
modelname = str(self.lineEdit_LoadModelPath.text())
else:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("Please specify a model using the radiobuttons on the 'Define Model' -tab")
msg.setWindowTitle("No model specified!")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
in_dim = int(self.spinBox_imagecrop.value())
#Put information onto UI
self.popup_lrfinder_ui.lineEdit_loadModel.setText(modelname)
self.popup_lrfinder_ui.spinBox_Crop_inpImgSize.setValue(in_dim)
color_mode = self.get_color_mode()
self.popup_lrfinder_ui.comboBox_colorMode.addItem(color_mode)
loss_str = str(self.comboBox_expt_loss.currentText())
self.popup_lrfinder_ui.comboBox_expt_loss.addItem(loss_str)
optimizer_str = str(self.comboBox_optimizer.currentText())
self.popup_lrfinder_ui.comboBox_optimizer.addItem(optimizer_str)
batch_size = self.spinBox_batchSize.value()
self.popup_lrfinder_ui.spinBox_batchSize.setValue(batch_size)
#Connect action_lr_finder function to button
self.popup_lrfinder_ui.pushButton_LrFindRun.clicked.connect(lambda: self.action_initialize_model(duties="initialize_lrfind"))
self.popup_lrfinder_ui.pushButton_rangeAccept.clicked.connect(self.accept_lr_range)
self.popup_lrfinder_ui.pushButton_singleAccept.clicked.connect(self.accept_lr_value)
self.popup_lrfinder_ui.pushButton_LrReset.clicked.connect(self.reset_lr_settings)
self.popup_lrfinder_ui.pushButton_singleReset.clicked.connect(self.reset_lr_value)
self.popup_lrfinder_ui.pushButton_rangeReset.clicked.connect(self.reset_lr_range)
#Update the plot when any plotting option is changed
self.popup_lrfinder_ui.comboBox_metric.currentIndexChanged.connect(self.update_lrfind_plot)
self.popup_lrfinder_ui.spinBox_lineWidth.valueChanged.connect(self.update_lrfind_plot)
self.popup_lrfinder_ui.checkBox_smooth.toggled.connect(self.update_lrfind_plot)
#LR single value when groupbox is toggled
self.popup_lrfinder_ui.groupBox_singleLr.toggled.connect(self.get_lr_single)
#LR range when groupbox is toggled
self.popup_lrfinder_ui.groupBox_LrRange.toggled.connect(self.get_lr_range)
#compute the number of steps/epoch
ind = [selectedfile["TrainOrValid"] == "Train" for selectedfile in SelectedFiles]
ind = np.where(np.array(ind)==True)[0]
SelectedFiles_train = np.array(SelectedFiles)[ind]
SelectedFiles_train = list(SelectedFiles_train)
nr_events_train_total = np.sum([int(selectedfile["nr_events_epoch"]) for selectedfile in SelectedFiles_train])
def update_stepsPerEpoch():
batch_size = self.popup_lrfinder_ui.spinBox_batchSize.value()
perc_data = self.popup_lrfinder_ui.doubleSpinBox_percDataT.value()
nr_events = (perc_data/100)*nr_events_train_total
stepsPerEpoch = np.ceil(nr_events / float(batch_size))
self.popup_lrfinder_ui.spinBox_stepsPerEpoch.setValue(stepsPerEpoch)
update_stepsPerEpoch()
self.popup_lrfinder_ui.spinBox_batchSize.valueChanged.connect(update_stepsPerEpoch)
self.popup_lrfinder_ui.doubleSpinBox_percDataT.valueChanged.connect(update_stepsPerEpoch)
self.popup_lrfinder.show()
def popup_clr_settings(self,listindex):
if listindex==-1:
item_ui = self
else:
item_ui = self.fittingpopups_ui[listindex]
item_ui.popup_clrsettings = MyPopup()
item_ui.popup_clrsettings_ui = aid_frontend.Ui_Clr_settings()
item_ui.popup_clrsettings_ui.setupUi(item_ui.popup_clrsettings) #open a popup for lr plotting
##Manual insertion##
item_ui.popup_clrsettings_ui.spinBox_stepSize.setProperty("value", item_ui.clr_settings["step_size"])
item_ui.popup_clrsettings_ui.doubleSpinBox_gamma.setProperty("value", item_ui.clr_settings["gamma"])
def clr_settings_ok():
step_size = int(item_ui.popup_clrsettings_ui.spinBox_stepSize.value())
gamma = float(item_ui.popup_clrsettings_ui.doubleSpinBox_gamma.value())
item_ui.clr_settings["step_size"] = step_size #Number of epochs to fulfill half a cycle
item_ui.clr_settings["gamma"] = gamma #gamma factor for Exponential decrease method (exp_range)
print("Settings for cyclical learning rates were changed.")
#close the popup
item_ui.popup_clrsettings = None
item_ui.popup_clrsettings_ui = None
def clr_settings_cancel():#close the popup
item_ui.popup_clrsettings = None
item_ui.popup_clrsettings_ui = None
item_ui.popup_clrsettings_ui.pushButton_ok.clicked.connect(clr_settings_ok)
item_ui.popup_clrsettings_ui.pushButton_cancel.clicked.connect(clr_settings_cancel)
item_ui.popup_clrsettings.show()
def popup_lr_plot(self,listindex):
if listindex==-1:
item_ui = self
else:
item_ui = self.fittingpopups_ui[listindex]
item_ui.popup_lrplot = MyPopup()
item_ui.popup_lrplot_ui = aid_frontend.popup_lrplot()
item_ui.popup_lrplot_ui.setupUi(item_ui.popup_lrplot) #open a popup for lr plotting
#compute total number of epochs that will be fitted
spinBox_NrEpochs = item_ui.spinBox_NrEpochs.value() #my own loop
spinBox_epochs = item_ui.spinBox_epochs.value() #inside model.fit()
nr_epochs = spinBox_NrEpochs*spinBox_epochs
item_ui.popup_lrplot_ui.spinBox_totalEpochs.setValue(nr_epochs)
#Get the number of training examples
SelectedFiles = self.items_clicked()
ind = [selectedfile["TrainOrValid"] == "Train" for selectedfile in SelectedFiles]
ind = np.where(np.array(ind)==True)[0]
SelectedFiles_train = np.array(SelectedFiles)[ind]
SelectedFiles_train = list(SelectedFiles_train)
nr_events_train_total = np.sum([int(selectedfile["nr_events_epoch"]) for selectedfile in SelectedFiles_train])
if nr_events_train_total==0 and item_ui.radioButton_LrConst.isChecked()==False:
#for Cyclical learning rates and Exponential learning rates, the
#number of training images is needed
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("There is no training data. Nr. of training images is required for this plot.")
msg.setWindowTitle("Nr. of training images = 0")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
text_info = ""
if item_ui.radioButton_LrConst.isChecked():
text_info+="Constant learning rate\n"
epochs_plot = np.array(range(nr_epochs))
const_lr = float(self.doubleSpinBox_learningRate.value())
learningrates = np.repeat(const_lr,nr_epochs)
elif item_ui.radioButton_LrCycl.isChecked():
text_info+="Cyclical learning rates\n"
base_lr = float(item_ui.lineEdit_cycLrMin.text())
max_lr = float(item_ui.lineEdit_cycLrMax.text())
batch_size = int(item_ui.spinBox_batchSize.value())
step_size = item_ui.clr_settings["step_size"] #batch updates in a half cycle
step_size_ = step_size*int(np.round(nr_events_train_total / batch_size))#number of steps in one epoch
mode = str(item_ui.comboBox_cycLrMethod.currentText())
clr_iterations = nr_epochs*int(np.round(nr_events_train_total / batch_size))#number of cycles
nr_cycles = (clr_iterations/step_size_)/2.0#number of cycles
gamma = item_ui.clr_settings["gamma"] #gamma factor for the exp_range
#Generate text to diplay the settings used
text_info+="Nr. of training images: "+str(nr_events_train_total)+"\n"
text_info+="base_lr: "+str(base_lr)+"\n"
text_info+="max_lr: "+str(max_lr)+"\n"
text_info+="batch_size: "+str(batch_size)+"\n"
text_info+="mode: "+str(mode)+"\n"
text_info+="gamma: "+str(gamma)+"\n"
text_info+="Nr. of epochs to fulfill one cycle: "+str(2*step_size)+"\n"
#text_info+="Total nr. of lr adjustmend: "+str(step_size_)+"\n"
text_info+="Total nr. of lr adjustments: "+str(clr_iterations)+"\n"
text_info+="Total nr. of cycles: "+str(nr_cycles)+"\n"
#Request the learning rates from the class cyclicLR
clr_iterations = np.arange(clr_iterations)
clr_1 = aid_dl.cyclicLR(base_lr=base_lr,max_lr=max_lr,step_size=step_size_,mode=mode,gamma=gamma)
clr_1.clr_iterations=clr_iterations#pass the number of clr iterations to the class
learningrates = clr_1.clr() #compute the learning rates for each iteration
#convert clr_iterations back to "epochs"
epochs_plot = clr_iterations/int(np.round(nr_events_train_total / batch_size))
elif item_ui.radioButton_LrExpo.isChecked():
text_info+="Exponentially decreased learning rates\n"
initial_lr = float(item_ui.doubleSpinBox_expDecInitLr.value())
decay_steps = int(item_ui.spinBox_expDecSteps.value())
decay_rate = float(item_ui.doubleSpinBox_expDecRate.value())
batch_size = int(item_ui.spinBox_batchSize.value())
text_info+="Nr. of training images: "+str(nr_events_train_total)+"\n"
text_info+="initial_lr: "+str(initial_lr)+"\n"
text_info+="decay_steps: "+str(decay_steps)+"\n"
text_info+="decay_rate: "+str(decay_rate)+"\n"
#epochs_plot = np.array(range(nr_epochs))
epochs_plot = nr_epochs * int(np.round(nr_events_train_total / batch_size))
epochs_plot = np.arange(epochs_plot)
exp_decay = aid_dl.exponentialDecay(initial_lr=initial_lr, decay_steps=decay_steps, decay_rate=decay_rate)
exp_decay.iterations=epochs_plot#pass the number of clr iterations to the class
learningrates = exp_decay.exp_decay()
epochs_plot = epochs_plot/int(np.round(nr_events_train_total / batch_size))
#learningrates = aid_dl.exponentialDecay(epochs_plot,initial_lr=initial_lr, decay_steps=decay_steps, decay_rate=decay_rate)
def refreshPlot():
try: # try to empty the plot
item_ui.popup_lrplot_ui.lr_plot.removeItem(item_ui.lr_line2)
except:
pass
#Get design settings
color = item_ui.popup_lrplot_ui.pushButton_color.palette().button().color()
width = int(item_ui.popup_lrplot_ui.spinBox_lineWidth.value())
color = list(color.getRgb())
color = tuple(color)
pencolor=pg.mkPen(color, width=width)
#define curve and add to plot
item_ui.lr_line2 = pg.PlotCurveItem(x=epochs_plot, y=learningrates,pen=pencolor)
item_ui.popup_lrplot_ui.lr_plot.addItem(item_ui.lr_line2)
refreshPlot()
item_ui.popup_lrplot_ui.pushButton_refreshPlot.clicked.connect(refreshPlot)
item_ui.popup_lrplot_ui.textBrowser_lrSettings.setText(text_info)
item_ui.popup_lrplot.show()
def lossWeights_activated(self,on_or_off,listindex):
if listindex==-1:
item_ui = self
else:
item_ui = self.fittingpopups_ui[listindex]
if on_or_off==False:#0 means switched OFF
item_ui.lineEdit_lossW.setText("")
item_ui.pushButton_lossW.setEnabled(False)
#this happens when the user activated the expert option "loss weights"
elif on_or_off==True:#2 means switched ON
#Activate button
item_ui.pushButton_lossW.setEnabled(True)
self.lossWeights_popup(listindex)
def lossWeights_popup(self,listindex):
if listindex==-1:
item_ui = self
SelectedFiles = self.items_clicked()
else:
item_ui = self.fittingpopups_ui[listindex]
SelectedFiles = item_ui.SelectedFiles
item_ui.popup_lossW = MyPopup()
item_ui.popup_lossW_ui = aid_frontend.popup_lossweights()
item_ui.popup_lossW_ui.setupUi(item_ui.popup_lossW) #open a popup to show the numbers of events in each class in a table
indices = [SelectedFiles[i]["class"] for i in range(len(SelectedFiles))]
#Initiate the table with 4 columns : this will be ["Index","Nr of cells","Clr","Name"]
item_ui.popup_lossW_ui.tableWidget_lossW.setColumnCount(5)
nr_ind = len(set(indices)) #each index could occur for train and valid
nr_rows = nr_ind
item_ui.popup_lossW_ui.tableWidget_lossW.setRowCount(nr_rows)
#Wich selected file has the most features?
header_labels = ["Class", "Events tot." ,"Events/Epoch", "Events/Epoch[%]", "Loss weight"]
item_ui.popup_lossW_ui.tableWidget_lossW.setHorizontalHeaderLabels(header_labels)
header = item_ui.popup_lossW_ui.tableWidget_lossW.horizontalHeader()
for i in range(len(header_labels)):
header.setResizeMode(i, QtWidgets.QHeaderView.ResizeToContents)
#Fill the table
rowPosition = 0
#Training info
ind = [selectedfile["TrainOrValid"] == "Train" for selectedfile in SelectedFiles]
ind = np.where(np.array(ind)==True)[0]
SelectedFiles_train = np.array(SelectedFiles)[ind]
SelectedFiles_train = list(SelectedFiles_train)
indices_train = [selectedfile["class"] for selectedfile in SelectedFiles_train]
nr_events_train_total = np.sum([int(selectedfile["nr_events_epoch"]) for selectedfile in SelectedFiles_train])
#Total nr of cells for each index
for index in np.unique(indices_train):
colPos = 0 #"Class" #put the index (class!) in column nr. 0
item = QtWidgets.QTableWidgetItem()
item.setFlags(item.flags() &~QtCore.Qt.ItemIsEnabled &~ QtCore.Qt.ItemIsSelectable )
item.setData(QtCore.Qt.EditRole,str(index))
item_ui.popup_lossW_ui.tableWidget_lossW.setItem(rowPosition, colPos, item)
#Get the training files of that index
ind = np.where(indices_train==index)[0]
SelectedFiles_train_index = np.array(SelectedFiles_train)[ind]
colPos = 1 #"Events tot."
nr_events = [int(selectedfile["nr_events"]) for selectedfile in SelectedFiles_train_index]
item = QtWidgets.QTableWidgetItem()
item.setFlags(item.flags() &~QtCore.Qt.ItemIsEnabled &~ QtCore.Qt.ItemIsSelectable )
item.setData(QtCore.Qt.EditRole, str(np.sum(nr_events)))
item_ui.popup_lossW_ui.tableWidget_lossW.setItem(rowPosition, colPos, item)
colPos = 2 #"Events/Epoch"
nr_events_epoch = [int(selectedfile["nr_events_epoch"]) for selectedfile in SelectedFiles_train_index]
item = QtWidgets.QTableWidgetItem()
item.setFlags(item.flags() &~QtCore.Qt.ItemIsEnabled &~ QtCore.Qt.ItemIsSelectable )
item.setData(QtCore.Qt.EditRole, str(np.sum(nr_events_epoch)))
item_ui.popup_lossW_ui.tableWidget_lossW.setItem(rowPosition, colPos, item)
colPos = 3 #"Events/Epoch[%]"
item = QtWidgets.QTableWidgetItem()
item.setFlags(item.flags() &~QtCore.Qt.ItemIsEnabled &~ QtCore.Qt.ItemIsSelectable )
item.setData(QtCore.Qt.EditRole, str(np.round(np.sum(nr_events_epoch)/float(nr_events_train_total),2)))
item_ui.popup_lossW_ui.tableWidget_lossW.setItem(rowPosition, colPos, item)
colPos = 4 #"Loss weights"
#for each item create a spinbopx (trainability)
spinb = QtWidgets.QDoubleSpinBox(item_ui.popup_lossW_ui.tableWidget_lossW)
spinb.setEnabled(False)
spinb.setMinimum(-99999)
spinb.setMaximum(99999)
spinb.setSingleStep(0.1)
spinb.setValue(1.0) #Default in Keras is "None", which means class_weight=1.0
item_ui.popup_lossW_ui.tableWidget_lossW.setCellWidget(rowPosition, colPos, spinb)
rowPosition += 1
item_ui.popup_lossW_ui.tableWidget_lossW.resizeColumnsToContents()
item_ui.popup_lossW_ui.tableWidget_lossW.resizeRowsToContents()
item_ui.popup_lossW.show()
item_ui.popup_lossW_ui.pushButton_pop_lossW_cancel.clicked.connect(lambda: self.lossW_cancel(listindex))
item_ui.popup_lossW_ui.pushButton_pop_lossW_ok.clicked.connect(lambda: self.lossW_ok(listindex))
item_ui.popup_lossW_ui.comboBox_lossW.currentIndexChanged.connect(lambda on_or_off: self.lossW_comboB(on_or_off,listindex))
def optimizer_change_settings_popup(self,listindex):
if listindex==-1:
item_ui = self
else:
item_ui = self.fittingpopups_ui[listindex]
item_ui.popup_optim = MyPopup()
item_ui.popup_optim_ui = aid_frontend.Ui_Form_expt_optim()
item_ui.popup_optim_ui.setupUi(item_ui.popup_optim) #open a popup to show advances settings for optimizer
##Manual insertion##
optimizer_name = item_ui.optimizer_settings["comboBox_optimizer"].lower()
if optimizer_name=='sgd':
item_ui.popup_optim_ui.radioButton_sgd.setChecked(True)
elif optimizer_name=='rmsprop':
item_ui.popup_optim_ui.radioButton_rms.setChecked(True)
elif optimizer_name=='adagrad':
item_ui.popup_optim_ui.radioButton_adagrad.setChecked(True)
elif optimizer_name=='adadelta':
item_ui.popup_optim_ui.radioButton_adadelta.setChecked(True)
elif optimizer_name=='adam':
item_ui.popup_optim_ui.radioButton_adam.setChecked(True)
elif optimizer_name=='adamax':
item_ui.popup_optim_ui.radioButton_adamax.setChecked(True)
elif optimizer_name=='nadam':
item_ui.popup_optim_ui.radioButton_nadam.setChecked(True)
item_ui.popup_optim_ui.doubleSpinBox_lr_sgd.setValue(item_ui.optimizer_settings["doubleSpinBox_lr_sgd"])
item_ui.popup_optim_ui.doubleSpinBox_sgd_momentum.setValue(item_ui.optimizer_settings["doubleSpinBox_sgd_momentum"])
item_ui.popup_optim_ui.checkBox_sgd_nesterov.setChecked(item_ui.optimizer_settings["checkBox_sgd_nesterov"])
item_ui.popup_optim_ui.doubleSpinBox_lr_rmsprop.setValue(item_ui.optimizer_settings["doubleSpinBox_lr_rmsprop"])
item_ui.popup_optim_ui.doubleSpinBox_rms_rho.setValue(item_ui.optimizer_settings["doubleSpinBox_rms_rho"])
item_ui.popup_optim_ui.doubleSpinBox_lr_adam.setValue(item_ui.optimizer_settings["doubleSpinBox_lr_adam"])
item_ui.popup_optim_ui.doubleSpinBox_adam_beta1.setValue(item_ui.optimizer_settings["doubleSpinBox_adam_beta1"])
item_ui.popup_optim_ui.doubleSpinBox_adam_beta2.setValue(item_ui.optimizer_settings["doubleSpinBox_adam_beta2"])
item_ui.popup_optim_ui.checkBox_adam_amsgrad.setChecked(item_ui.optimizer_settings["checkBox_adam_amsgrad"])
item_ui.popup_optim_ui.doubleSpinBox_lr_nadam.setValue(item_ui.optimizer_settings["doubleSpinBox_lr_nadam"])
item_ui.popup_optim_ui.doubleSpinBox_nadam_beta1.setValue(item_ui.optimizer_settings["doubleSpinBox_nadam_beta1"])
item_ui.popup_optim_ui.doubleSpinBox_nadam_beta2.setValue(item_ui.optimizer_settings["doubleSpinBox_nadam_beta2"])
item_ui.popup_optim_ui.doubleSpinBox_lr_adadelta.setValue(item_ui.optimizer_settings["doubleSpinBox_lr_adadelta"])
item_ui.popup_optim_ui.doubleSpinBox_adadelta_rho.setValue(item_ui.optimizer_settings["doubleSpinBox_adadelta_rho"])
item_ui.popup_optim_ui.doubleSpinBox_lr_adagrad.setValue(item_ui.optimizer_settings["doubleSpinBox_lr_adagrad"])
item_ui.popup_optim_ui.doubleSpinBox_lr_adamax.setValue(item_ui.optimizer_settings["doubleSpinBox_lr_adamax"])
item_ui.popup_optim_ui.doubleSpinBox_adamax_beta1.setValue(item_ui.optimizer_settings["doubleSpinBox_adamax_beta1"])
item_ui.popup_optim_ui.doubleSpinBox_adamax_beta2.setValue(item_ui.optimizer_settings["doubleSpinBox_adamax_beta2"])
def change_lr(lr):
item_ui.doubleSpinBox_learningRate.setValue(lr)
item_ui.doubleSpinBox_expDecInitLr.setValue(lr)
item_ui.popup_optim_ui.doubleSpinBox_lr_adam.valueChanged.connect(change_lr)
item_ui.popup_optim_ui.doubleSpinBox_lr_sgd.valueChanged.connect(change_lr)
item_ui.popup_optim_ui.doubleSpinBox_lr_rmsprop.valueChanged.connect(change_lr)
item_ui.popup_optim_ui.doubleSpinBox_lr_adagrad.valueChanged.connect(change_lr)
item_ui.popup_optim_ui.doubleSpinBox_lr_adadelta.valueChanged.connect(change_lr)
item_ui.popup_optim_ui.doubleSpinBox_lr_adamax.valueChanged.connect(change_lr)
item_ui.popup_optim_ui.doubleSpinBox_lr_nadam.valueChanged.connect(change_lr)
def change_optimizer(optimizer_name):
index = item_ui.comboBox_optimizer.findText(optimizer_name, QtCore.Qt.MatchFixedString)
if index >= 0:
item_ui.comboBox_optimizer.setCurrentIndex(index)
#get the learning rate for that optimizer
lr = item_ui.optimizer_settings["doubleSpinBox_lr_"+optimizer_name.lower()]
change_lr(lr)
item_ui.popup_optim_ui.radioButton_adam.toggled.connect(lambda: change_optimizer("Adam"))
item_ui.popup_optim_ui.radioButton_sgd.toggled.connect(lambda: change_optimizer("SGD"))
item_ui.popup_optim_ui.radioButton_rms.toggled.connect(lambda: change_optimizer("RMSprop"))
item_ui.popup_optim_ui.radioButton_adagrad.toggled.connect(lambda: change_optimizer("Adagrad"))
item_ui.popup_optim_ui.radioButton_adadelta.toggled.connect(lambda: change_optimizer("Adadelta"))
item_ui.popup_optim_ui.radioButton_adamax.toggled.connect(lambda: change_optimizer("Adamax"))
item_ui.popup_optim_ui.radioButton_nadam.toggled.connect(lambda: change_optimizer("Nadam"))
def ok():
doubleSpinBox_lr_sgd = float(item_ui.popup_optim_ui.doubleSpinBox_lr_sgd.value())
doubleSpinBox_sgd_momentum = float(item_ui.popup_optim_ui.doubleSpinBox_sgd_momentum.value())
checkBox_sgd_nesterov = bool(item_ui.popup_optim_ui.checkBox_sgd_nesterov.isChecked())
doubleSpinBox_lr_rmsprop = float(item_ui.popup_optim_ui.doubleSpinBox_lr_rmsprop.value())
doubleSpinBox_rms_rho = float(item_ui.popup_optim_ui.doubleSpinBox_rms_rho.value())
doubleSpinBox_lr_adam = float(item_ui.popup_optim_ui.doubleSpinBox_lr_adam.value())
doubleSpinBox_adam_beta1 = float(item_ui.popup_optim_ui.doubleSpinBox_adam_beta1.value())
doubleSpinBox_adam_beta2 = float(item_ui.popup_optim_ui.doubleSpinBox_adam_beta2.value())
checkBox_adam_amsgrad = bool(item_ui.popup_optim_ui.checkBox_adam_amsgrad.isChecked())
doubleSpinBox_lr_adadelta = float(item_ui.popup_optim_ui.doubleSpinBox_lr_adadelta.value())
doubleSpinBox_adadelta_rho = float(item_ui.popup_optim_ui.doubleSpinBox_adadelta_rho.value())
doubleSpinBox_lr_nadam = float(item_ui.popup_optim_ui.doubleSpinBox_lr_nadam.value())
doubleSpinBox_nadam_beta1 = float(item_ui.popup_optim_ui.doubleSpinBox_nadam_beta1.value())
doubleSpinBox_nadam_beta2 = float(item_ui.popup_optim_ui.doubleSpinBox_nadam_beta2.value())
doubleSpinBox_lr_adagrad = float(item_ui.popup_optim_ui.doubleSpinBox_lr_adagrad.value())
doubleSpinBox_lr_adamax = float(item_ui.popup_optim_ui.doubleSpinBox_lr_adamax.value())
doubleSpinBox_adamax_beta2 = float(item_ui.popup_optim_ui.doubleSpinBox_adamax_beta2.value())
doubleSpinBox_adamax_beta1 = float(item_ui.popup_optim_ui.doubleSpinBox_adamax_beta1.value())
item_ui.optimizer_settings["doubleSpinBox_lr_sgd"] = doubleSpinBox_lr_sgd
item_ui.optimizer_settings["doubleSpinBox_sgd_momentum"] = doubleSpinBox_sgd_momentum
item_ui.optimizer_settings["checkBox_sgd_nesterov"] = checkBox_sgd_nesterov
item_ui.optimizer_settings["doubleSpinBox_lr_rmsprop"] = doubleSpinBox_lr_rmsprop
item_ui.optimizer_settings["doubleSpinBox_rms_rho"] = doubleSpinBox_rms_rho
item_ui.optimizer_settings["doubleSpinBox_lr_adam"] = doubleSpinBox_lr_adam
item_ui.optimizer_settings["doubleSpinBox_adam_beta1"] = doubleSpinBox_adam_beta1
item_ui.optimizer_settings["doubleSpinBox_adam_beta2"] = doubleSpinBox_adam_beta2
item_ui.optimizer_settings["checkBox_adam_amsgrad"] = checkBox_adam_amsgrad
item_ui.optimizer_settings["doubleSpinBox_lr_adadelta"] = doubleSpinBox_lr_adadelta
item_ui.optimizer_settings["doubleSpinBox_adadelta_rho"] = doubleSpinBox_adadelta_rho
item_ui.optimizer_settings["doubleSpinBox_lr_nadam"] = doubleSpinBox_lr_nadam
item_ui.optimizer_settings["doubleSpinBox_nadam_beta1"] = doubleSpinBox_nadam_beta1
item_ui.optimizer_settings["doubleSpinBox_nadam_beta2"] = doubleSpinBox_nadam_beta2
item_ui.optimizer_settings["doubleSpinBox_lr_adagrad"] = doubleSpinBox_lr_adagrad
item_ui.optimizer_settings["doubleSpinBox_lr_adamax"] = doubleSpinBox_lr_adamax
item_ui.optimizer_settings["doubleSpinBox_adamax_beta1"] = doubleSpinBox_adamax_beta1
item_ui.optimizer_settings["doubleSpinBox_adamax_beta2"] = doubleSpinBox_adamax_beta2
#close the popup
item_ui.popup_optim = None
item_ui.popup_optim_ui = None
print("Advanced settings for optimizer were changed.")
def cancel():#close the popup
item_ui.popup_optim = None
item_ui.popup_optim_ui = None
def reset():
print("Reset optimizer settings (in UI). To accept, click OK")
optimizer_default = aid_dl.get_optimizer_settings()
item_ui.popup_optim_ui.doubleSpinBox_lr_sgd.setValue(optimizer_default["doubleSpinBox_lr_sgd"])
item_ui.popup_optim_ui.doubleSpinBox_sgd_momentum.setValue(optimizer_default["doubleSpinBox_sgd_momentum"])
item_ui.popup_optim_ui.checkBox_sgd_nesterov.setChecked(optimizer_default["checkBox_sgd_nesterov"])
item_ui.popup_optim_ui.doubleSpinBox_lr_rmsprop.setValue(optimizer_default["doubleSpinBox_lr_rmsprop"])
item_ui.popup_optim_ui.doubleSpinBox_rms_rho.setValue(optimizer_default["doubleSpinBox_rms_rho"])
item_ui.popup_optim_ui.doubleSpinBox_lr_adam.setValue(optimizer_default["doubleSpinBox_lr_adam"])
item_ui.popup_optim_ui.doubleSpinBox_adam_beta1.setValue(optimizer_default["doubleSpinBox_adam_beta1"])
item_ui.popup_optim_ui.doubleSpinBox_adam_beta2.setValue(optimizer_default["doubleSpinBox_adam_beta2"])
item_ui.popup_optim_ui.checkBox_adam_amsgrad.setChecked(optimizer_default["checkBox_adam_amsgrad"])
item_ui.popup_optim_ui.doubleSpinBox_lr_nadam.setValue(optimizer_default["doubleSpinBox_lr_nadam"])
item_ui.popup_optim_ui.doubleSpinBox_nadam_beta1.setValue(optimizer_default["doubleSpinBox_nadam_beta1"])
item_ui.popup_optim_ui.doubleSpinBox_nadam_beta2.setValue(optimizer_default["doubleSpinBox_nadam_beta2"])
item_ui.popup_optim_ui.doubleSpinBox_lr_adadelta.setValue(optimizer_default["doubleSpinBox_lr_adadelta"])
item_ui.popup_optim_ui.doubleSpinBox_adadelta_rho.setValue(optimizer_default["doubleSpinBox_adadelta_rho"])
item_ui.popup_optim_ui.doubleSpinBox_lr_adagrad.setValue(optimizer_default["doubleSpinBox_lr_adagrad"])
item_ui.popup_optim_ui.doubleSpinBox_lr_adamax.setValue(optimizer_default["doubleSpinBox_lr_adamax"])
item_ui.popup_optim_ui.doubleSpinBox_adamax_beta1.setValue(optimizer_default["doubleSpinBox_adamax_beta1"])
item_ui.popup_optim_ui.doubleSpinBox_adamax_beta2.setValue(optimizer_default["doubleSpinBox_adamax_beta2"])
item_ui.popup_optim_ui.pushButton_ok.clicked.connect(ok)
item_ui.popup_optim_ui.pushButton_cancel.clicked.connect(cancel)
item_ui.popup_optim_ui.pushButton_reset.clicked.connect(reset)
item_ui.popup_optim.show()
def onLayoutChange(self,app):
#Get the text of the triggered layout
layout_trig = (self.sender().text()).split(" layout")[0]
layout_current = Default_dict["Layout"]
if layout_trig == layout_current:
self.statusbar.showMessage(layout_current+" layout is already in use",2000)
return
elif layout_trig == "Normal":
#Change Layout in Defaultdict to "Normal", such that next start will use Normal layout
Default_dict["Layout"] = "Normal"
app.setStyleSheet("")
#Standard is with tooltip
self.actionTooltipOnOff.setChecked(True)
elif layout_trig == "Dark":
#Change Layout in Defaultdict to "Dark", such that next start will use Dark layout
Default_dict["Layout"] = "Dark"
dir_layout = os.path.join(dir_root,"layout_dark.txt")#dir to settings
f = open(dir_layout, "r") #I obtained the layout file from: https://github.com/ColinDuquesnoy/QDarkStyleSheet/blob/master/qdarkstyle/style.qss
f = f.read()
app.setStyleSheet(f)
#Standard is with tooltip
self.actionTooltipOnOff.setChecked(True)
elif layout_trig == "DarkOrange":
#Change Layout in Defaultdict to "Dark", such that next start will use Dark layout
Default_dict["Layout"] = "DarkOrange"
dir_layout = os.path.join(dir_root,"layout_darkorange.txt")#dir to settings
f = open(dir_layout, "r") #I obtained the layout file from: https://github.com/nphase/qt-ping-grapher/blob/master/resources/darkorange.stylesheet
f = f.read()
app.setStyleSheet(f)
#Standard is with tooltip
self.actionTooltipOnOff.setChecked(True)
#Save the layout to Default_dict
with open(dir_settings, 'w') as f:
json.dump(Default_dict,f)
def onTooltipOnOff(self,app):
#what is the current layout?
if bool(self.actionLayout_Normal.isChecked())==True: #use normal layout
if bool(self.actionTooltipOnOff.isChecked())==True: #with tooltips
app.setStyleSheet("")
elif bool(self.actionTooltipOnOff.isChecked())==False: #no tooltips
app.setStyleSheet("""QToolTip {
opacity: 0
}""")
elif bool(self.actionLayout_Dark.isChecked())==True: #use dark layout
if bool(self.actionTooltipOnOff.isChecked())==True: #with tooltips
dir_layout = os.path.join(dir_root,"layout_dark.txt")#dir to settings
f = open(dir_layout, "r") #I obtained the layout file from: https://github.com/ColinDuquesnoy/QDarkStyleSheet/blob/master/qdarkstyle/style.qss
f = f.read()
app.setStyleSheet(f)
elif bool(self.actionTooltipOnOff.isChecked())==False: #no tooltips
dir_layout = os.path.join(dir_root,"layout_dark_notooltip.txt")#dir to settings
f = open(dir_layout, "r")#I obtained the layout file from: https://github.com/ColinDuquesnoy/QDarkStyleSheet/blob/master/qdarkstyle/style.qss
f = f.read()
app.setStyleSheet(f)
elif bool(self.actionLayout_DarkOrange.isChecked())==True: #use darkorange layout
if bool(self.actionTooltipOnOff.isChecked())==True: #with tooltips
dir_layout = os.path.join(dir_root,"layout_darkorange.txt")#dir to settings
f = open(dir_layout, "r") #I obtained the layout file from: https://github.com/nphase/qt-ping-grapher/blob/master/resources/darkorange.stylesheet
f = f.read()
app.setStyleSheet(f)
elif bool(self.actionTooltipOnOff.isChecked())==False: #no tooltips
dir_layout = os.path.join(dir_root,"layout_darkorange_notooltip.txt")#dir to settings
f = open(dir_layout, "r")
f = f.read()
app.setStyleSheet(f)
def onIconThemeChange(self):
#Get the text of the triggered icon theme
icontheme_trig = self.sender().text()
icontheme_currenent = Default_dict["Icon theme"]
if icontheme_trig == icontheme_currenent:
self.statusbar.showMessage(icontheme_currenent+" is already in use",2000)
return
elif icontheme_trig == "Icon theme 1":
Default_dict["Icon theme"] = "Icon theme 1"
self.statusbar.showMessage("Icon theme 1 will be used after restart",2000)
elif icontheme_trig == "Icon theme 2":
Default_dict["Icon theme"] = "Icon theme 2"
self.statusbar.showMessage("Icon theme 2 will be used after restart",2000)
#Save the layout to Default_dict
with open(dir_settings, 'w') as f:
json.dump(Default_dict,f)
def items_clicked(self):
#This function checks, which data has been checked on table_dragdrop and returns the necessary data
rowCount = self.table_dragdrop.rowCount()
#Collect urls to files that are checked
SelectedFiles = []
for rowPosition in range(rowCount):
#get the filename/path
rtdc_path = str(self.table_dragdrop.cellWidget(rowPosition, 0).text())
#get the index (celltype) of it
index = int(self.table_dragdrop.cellWidget(rowPosition, 1).value())
#is it checked for train?
cb_t = self.table_dragdrop.item(rowPosition, 2)
#How many Events contains dataset in total?
nr_events = int(self.table_dragdrop.item(rowPosition, 5).text())
#how many cells/epoch during training or validation?
nr_events_epoch = int(self.table_dragdrop.item(rowPosition, 6).text())
#should the dataset be randomized (shuffled?)
shuffle = bool(self.table_dragdrop.item(rowPosition, 8).checkState())
#should the images be zoomed in/out by a factor?
zoom_factor = float(self.table_dragdrop.item(rowPosition, 9).text())
#should xtra_data be used for training?
xtra_in = bool(self.table_dragdrop.item(rowPosition, 10).checkState())
if cb_t.checkState() == QtCore.Qt.Checked and nr_events_epoch>0: #add to training files if the user wants more than 0 images per epoch
failed,rtdc_ds = aid_bin.load_rtdc(rtdc_path)
if failed:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Critical)
msg.setText(str(rtdc_ds))
msg.setWindowTitle("Error occurred during loading file")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
hash_ = aid_bin.hashfunction(rtdc_path)#rtdc_ds.hash
features = list(rtdc_ds["events"].keys())
nr_images = rtdc_ds["events"]["image"].len()
SelectedFiles.append({"rtdc_ds":rtdc_ds,"rtdc_path":rtdc_path,"features":features,"nr_images":nr_images,"class":index,"TrainOrValid":"Train","nr_events":nr_events,"nr_events_epoch":nr_events_epoch,"shuffle":shuffle,"zoom_factor":zoom_factor,"hash":hash_,"xtra_in":xtra_in})
cb_v = self.table_dragdrop.item(rowPosition, 3)
if cb_v.checkState() == QtCore.Qt.Checked and nr_events_epoch>0:
failed,rtdc_ds = aid_bin.load_rtdc(rtdc_path)
if failed:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Critical)
msg.setText(str(rtdc_ds))
msg.setWindowTitle("Error occurred during loading file")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
hash_ = aid_bin.hashfunction(rtdc_path)
features = list(rtdc_ds["events"].keys())
nr_images = rtdc_ds["events"]["image"].len()
SelectedFiles.append({"rtdc_ds":rtdc_ds,"rtdc_path":rtdc_path,"features":features,"nr_images":nr_images,"class":index,"TrainOrValid":"Valid","nr_events":nr_events,"nr_events_epoch":nr_events_epoch,"shuffle":shuffle,"zoom_factor":zoom_factor,"hash":hash_,"xtra_in":xtra_in})
return SelectedFiles
def items_available(self):
"""
Function grabs all information from table_dragdrop. Checked and Unchecked
Does not load rtdc_ds (save time)
"""
rowCount = self.table_dragdrop.rowCount()
#Collect urls to files that are checked
SelectedFiles = []
for rowPosition in range(rowCount):
#get the filename/path
rtdc_path = str(self.table_dragdrop.cellWidget(rowPosition, 0).text())
#get the index (celltype) of it
index = int(self.table_dragdrop.cellWidget(rowPosition, 1).value())
#How many Events contains dataset in total?
nr_events = int(self.table_dragdrop.item(rowPosition, 5).text())
#how many cells/epoch during training or validation?
nr_events_epoch = int(self.table_dragdrop.item(rowPosition, 6).text())
#should the dataset be randomized (shuffled?)
shuffle = bool(self.table_dragdrop.item(rowPosition, 8).checkState())
#should the images be zoomed in/out by a factor?
zoom_factor = float(self.table_dragdrop.item(rowPosition, 9).text())
#should xtra_data be used for training?
xtra_in = bool(self.table_dragdrop.item(rowPosition, 10).checkState())
SelectedFiles.append({"rtdc_path":rtdc_path,"class":index,"TrainOrValid":"NotSpecified","nr_events":nr_events,"nr_events_epoch":nr_events_epoch,"shuffle":shuffle,"zoom_factor":zoom_factor,"xtra_in":xtra_in})
return SelectedFiles
def items_clicked_no_rtdc_ds(self):
#This function checks, which data has been checked on table_dragdrop and returns the necessary data
rowCount = self.table_dragdrop.rowCount()
#Collect urls to files that are checked
SelectedFiles = []
for rowPosition in range(rowCount):
#get the filename/path
rtdc_path = str(self.table_dragdrop.cellWidget(rowPosition, 0).text())
#get the index (celltype) of it
index = int(self.table_dragdrop.cellWidget(rowPosition, 1).value())
#How many Events contains dataset in total?
nr_events = int(self.table_dragdrop.item(rowPosition, 5).text())
#how many cells/epoch during training or validation?
nr_events_epoch = int(self.table_dragdrop.item(rowPosition, 6).text())
#should the dataset be randomized (shuffled?)
shuffle = bool(self.table_dragdrop.item(rowPosition, 8).checkState())
#should the images be zoomed in/out by a factor?
zoom_factor = float(self.table_dragdrop.item(rowPosition, 9).text())
#should xtra_data be used for training?
xtra_in = bool(self.table_dragdrop.item(rowPosition, 10).checkState())
#is it checked for train?
cb_t = self.table_dragdrop.item(rowPosition, 2)
if cb_t.checkState() == QtCore.Qt.Checked and nr_events_epoch>0: #add to training files if the user wants more than 0 images per epoch
#SelectedFiles.append({"nr_images":nr_events,"class":index,"TrainOrValid":"Train","nr_events":nr_events,"nr_events_epoch":nr_events_epoch})
SelectedFiles.append({"rtdc_path":rtdc_path,"class":index,"TrainOrValid":"Train","nr_events":nr_events,"nr_events_epoch":nr_events_epoch,"shuffle":shuffle,"zoom_factor":zoom_factor,"xtra_in":xtra_in})
cb_v = self.table_dragdrop.item(rowPosition, 3)
if cb_v.checkState() == QtCore.Qt.Checked and nr_events_epoch>0:
#SelectedFiles.append({"nr_images":nr_events,"class":index,"TrainOrValid":"Valid","nr_events":nr_events,"nr_events_epoch":nr_events_epoch})
SelectedFiles.append({"rtdc_path":rtdc_path,"class":index,"TrainOrValid":"Valid","nr_events":nr_events,"nr_events_epoch":nr_events_epoch,"shuffle":shuffle,"zoom_factor":zoom_factor,"xtra_in":xtra_in})
return SelectedFiles
def uncheck_if_zero(self,item):
#If the Nr. of epochs is changed to zero:
#uncheck the dataset for train/valid
row = item.row()
col = item.column()
#if the user changed Nr. of cells per epoch to zero
if col==6 and int(item.text())==0:
#get the checkstate of the coresponding T/V
cb_t = self.table_dragdrop.item(row, 2)
if cb_t.checkState() == QtCore.Qt.Checked:
cb_t.setCheckState(False)
cb_v = self.table_dragdrop.item(row, 3)
if cb_v.checkState() == QtCore.Qt.Checked:
cb_v.setCheckState(False)
def item_click(self,item):
colPosition = item.column()
rowPosition = item.row()
#if Shuffle was clicked (col=8), check if this checkbox is not deactivated
if colPosition==8:
if bool(self.table_dragdrop.item(rowPosition, 8).checkState())==False:
rtdc_path = self.table_dragdrop.cellWidget(rowPosition, 0).text()
rtdc_path = str(rtdc_path)
failed,rtdc_ds = aid_bin.load_rtdc(rtdc_path)
if failed:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Critical)
msg.setText(str(rtdc_ds))
msg.setWindowTitle("Error occurred during loading file")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
nr_images = rtdc_ds["events"]["image"].len()
columnPosition = 6
item = QtWidgets.QTableWidgetItem()
item.setData(QtCore.Qt.DisplayRole, nr_images)
item.setFlags(item.flags() &~QtCore.Qt.ItemIsEnabled &~ QtCore.Qt.ItemIsSelectable )
self.table_dragdrop.setItem(rowPosition, columnPosition, item)
if bool(self.table_dragdrop.item(rowPosition, 8).checkState())==True:
#Inspect this table item. If shuffle was checked before, it will be grayed out. Invert normal cell then
item = self.table_dragdrop.item(rowPosition, 6)
item.setFlags(item.flags() |QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable )
if len(self.ram)>0:
self.statusbar.showMessage("Make sure to update RAM (->Edit->Data to RAM now) after changing Data-set",2000)
self.ram = dict() #clear the ram, since the data was changed
self.dataOverviewOn()
#When data is clicked, always reset the validation set (only important for 'Assess Model'-tab)
self.ValidationSet = None
self.Metrics = dict() #Also reset the metrics
def dataOverviewOn(self):
if self.groupBox_DataOverview.isChecked()==True:
if self.threadpool_single_queue == 0:
SelectedFiles = self.items_clicked_no_rtdc_ds()
self.update_data_overview(SelectedFiles)
self.update_data_overview_2(SelectedFiles)
def dataOverviewOn_OnChange(self,item):
#When a value is entered in Events/Epoch and enter is hit
#there is no update of the table called
if self.groupBox_DataOverview.isChecked()==True:
if self.threadpool_single_queue == 0:
rowPosition = item.row()
colPosition = item.column()
if colPosition==6:#one when using the spinbox (Class),or when entering a new number in "Events/Epoch", the table is not updated.
#get the new value
nr_cells = self.table_dragdrop.cellWidget(rowPosition, colPosition)
if nr_cells==None:
return
else:
SelectedFiles = self.items_clicked_no_rtdc_ds()
self.update_data_overview(SelectedFiles)
self.update_data_overview_2(SelectedFiles)
def update_data_overview(self,SelectedFiles):
#Check if there are custom class names (determined by user)
rows = self.tableWidget_Info.rowCount()
self.classes_custom = [] #by default assume there are no custom classes
classes_custom_bool = False
if rows>0:#if >0, then there is already a table existing
classes,self.classes_custom = [],[]
for row in range(rows):
try:
class_ = self.tableWidget_Info.item(row,0).text()
if class_.isdigit():
classes.append(class_)#get the classes
except:
pass
try:
self.classes_custom.append(self.tableWidget_Info.item(row,3).text())#get the classes
except:
pass
classes = np.unique(classes)
if len(classes)==len(self.classes_custom):#equal in length
same = [i for i, j in zip(classes, self.classes_custom) if i == j] #which items are identical?
if len(same)==0:
#apparently there are custom classes! Save them
classes_custom_bool = True
if len(SelectedFiles)==0:#reset the table
#Table1
#Prepare a table in tableWidget_Info
self.tableWidget_Info.setColumnCount(0)
self.tableWidget_Info.setRowCount(0)
self.tableWidget_Info.setColumnCount(4)
header = self.tableWidget_Info.horizontalHeader()
header_labels = ["Class","Events tot.","Events/Epoch","Name"]
self.tableWidget_Info.setHorizontalHeaderLabels(header_labels)
header = self.tableWidget_Info.horizontalHeader()
for i in range(4):
header.setResizeMode(i, QtWidgets.QHeaderView.ResizeToContents)
return
#Prepare a table in tableWidget_Info
self.tableWidget_Info.setColumnCount(0)
self.tableWidget_Info.setRowCount(0)
indices = [SelectedFiles[i]["class"] for i in range(len(SelectedFiles))]
self.tableWidget_Info.setColumnCount(4)
header = self.tableWidget_Info.horizontalHeader()
nr_ind = len(set(indices)) #each index could occur for train and valid
nr_rows = 2*nr_ind+2 #add two rows for intermediate headers (Train/Valid)
self.tableWidget_Info.setRowCount(nr_rows)
#Wich selected file has the most features?
header_labels = ["Class","Events tot.","Events/Epoch","Name"]
self.tableWidget_Info.setHorizontalHeaderLabels(header_labels)
#self.tableWidget_Info.resizeColumnsToContents()
header = self.tableWidget_Info.horizontalHeader()
for i in range(4):
header.setResizeMode(i, QtWidgets.QHeaderView.ResizeToContents)
#Training info
rowPosition = 0
self.tableWidget_Info.setSpan(rowPosition, 0, 1, 2)
item = QtWidgets.QTableWidgetItem("Train. data")
item.setFlags(QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable)
self.tableWidget_Info.setItem(rowPosition, 0, item)
rowPosition += 1
ind = [selectedfile["TrainOrValid"] == "Train" for selectedfile in SelectedFiles]
ind = np.where(np.array(ind)==True)[0]
SelectedFiles_train = np.array(SelectedFiles)[ind]
SelectedFiles_train = list(SelectedFiles_train)
indices_train = [selectedfile["class"] for selectedfile in SelectedFiles_train]
classes = np.unique(indices_train)
if len(classes)==len(self.classes_custom):
classes_custom_bool = True
else:
classes_custom_bool = False
#display information for each individual class
for index_ in range(len(classes)):
#for index in np.unique(indices_train):
index = classes[index_]
#put the index in column nr. 0
item = QtWidgets.QTableWidgetItem()
item.setFlags(QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable)
item.setData(QtCore.Qt.EditRole,str(index))
self.tableWidget_Info.setItem(rowPosition, 0, item)
#Get the training files of that index
ind = np.where(indices_train==index)[0]
SelectedFiles_train_index = np.array(SelectedFiles_train)[ind]
#Total nr of cells for each class
nr_events = [int(selectedfile["nr_events"]) for selectedfile in SelectedFiles_train_index]
item = QtWidgets.QTableWidgetItem()
item.setFlags(QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable)
item.setData(QtCore.Qt.EditRole, str(np.sum(nr_events)))
self.tableWidget_Info.setItem(rowPosition, 1, item)
nr_events_epoch = [int(selectedfile["nr_events_epoch"]) for selectedfile in SelectedFiles_train_index]
item = QtWidgets.QTableWidgetItem()
item.setFlags(QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable)
item.setData(QtCore.Qt.EditRole, str(np.sum(nr_events_epoch)))
self.tableWidget_Info.setItem(rowPosition, 2, item)
item = QtWidgets.QTableWidgetItem()
if classes_custom_bool==False:
item.setData(QtCore.Qt.EditRole,str(index))
else:
item.setData(QtCore.Qt.EditRole,self.classes_custom[index_])
self.tableWidget_Info.setItem(rowPosition, 3, item)
rowPosition += 1
#Validation info
self.tableWidget_Info.setSpan(rowPosition, 0, 1, 2)
item = QtWidgets.QTableWidgetItem("Val. data")
item.setFlags(QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable)
self.tableWidget_Info.setItem(rowPosition, 0, item)
rowPosition += 1
ind = [selectedfile["TrainOrValid"] == "Valid" for selectedfile in SelectedFiles]
ind = np.where(np.array(ind)==True)[0]
SelectedFiles_valid = np.array(SelectedFiles)[ind]
SelectedFiles_valid = list(SelectedFiles_valid)
indices_valid = [selectedfile["class"] for selectedfile in SelectedFiles_valid]
#Total nr of cells for each index
for index in np.unique(indices_valid):
#put the index in column nr. 0
item = QtWidgets.QTableWidgetItem()
item.setFlags(QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable)
item.setData(QtCore.Qt.EditRole,str(index))
self.tableWidget_Info.setItem(rowPosition, 0, item)
#Get the validation files of that index
ind = np.where(indices_valid==index)[0]
SelectedFiles_valid_index = np.array(SelectedFiles_valid)[ind]
nr_events = [int(selectedfile["nr_events"]) for selectedfile in SelectedFiles_valid_index]
item = QtWidgets.QTableWidgetItem()
item.setFlags(QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable)
item.setData(QtCore.Qt.EditRole, str(np.sum(nr_events)))
self.tableWidget_Info.setItem(rowPosition, 1, item)
nr_events_epoch = [int(selectedfile["nr_events_epoch"]) for selectedfile in SelectedFiles_valid_index]
item = QtWidgets.QTableWidgetItem()
item.setFlags(QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable)
item.setData(QtCore.Qt.EditRole, str(np.sum(nr_events_epoch)))
self.tableWidget_Info.setItem(rowPosition, 2, item)
rowPosition += 1
self.tableWidget_Info.resizeColumnsToContents()
self.tableWidget_Info.resizeRowsToContents()
def update_data_overview_2(self,SelectedFiles):
if len(SelectedFiles)==0:
#Table2
self.tableWidget_Info_2.setColumnCount(0)
self.tableWidget_Info_2.setRowCount(0)
#In case user specified X_valid and y_valid before, delete it again:
self.ValidationSet = None
self.Metrics = dict() #Also reset the metrics
#Initiate the table with 4 columns : this will be ["Index","Nr of cells","Clr","Name"]
self.tableWidget_Info_2.setColumnCount(4)
header_labels = ["Class","Nr of cells","Clr","Name"]
self.tableWidget_Info_2.setHorizontalHeaderLabels(header_labels)
header = self.tableWidget_Info_2.horizontalHeader()
for i in range(4):
header.setResizeMode(i, QtWidgets.QHeaderView.ResizeToContents)
return
#Prepare a table in tableWidget_Info
self.tableWidget_Info_2.setColumnCount(0)
self.tableWidget_Info_2.setRowCount(0)
#In case user specified X_valid and y_valid before, delete it again:
self.ValidationSet = None
self.Metrics = dict() #Also reset the metrics
indices = [SelectedFiles[i]["class"] for i in range(len(SelectedFiles))]
#Initiate the table with 4 columns : this will be ["Index","Nr of cells","Clr","Name"]
self.tableWidget_Info_2.setColumnCount(4)
nr_ind = len(set(indices)) #each index could occur for train and valid
nr_rows = nr_ind
self.tableWidget_Info_2.setRowCount(nr_rows)
#Wich selected file has the most features?
header_labels = ["Class","Nr of cells","Clr","Name"]
self.tableWidget_Info_2.setHorizontalHeaderLabels(header_labels)
header = self.tableWidget_Info_2.horizontalHeader()
for i in range(4):
header.setResizeMode(i, QtWidgets.QHeaderView.ResizeToContents)
rowPosition = 0
#Validation info
ind = [selectedfile["TrainOrValid"] == "Valid" for selectedfile in SelectedFiles]
ind = np.where(np.array(ind)==True)[0]
SelectedFiles_valid = np.array(SelectedFiles)[ind]
SelectedFiles_valid = list(SelectedFiles_valid)
indices_valid = [selectedfile["class"] for selectedfile in SelectedFiles_valid]
#Total nr of cells for each index
for index in np.unique(indices_valid):
#put the index in column nr. 0
item = QtWidgets.QTableWidgetItem()
item.setFlags(item.flags() &~QtCore.Qt.ItemIsEnabled &~ QtCore.Qt.ItemIsSelectable )
item.setData(QtCore.Qt.EditRole,str(index))
self.tableWidget_Info_2.setItem(rowPosition, 0, item)
#Get the validation files of that index
ind = np.where(indices_valid==index)[0]
SelectedFiles_valid_index = np.array(SelectedFiles_valid)[ind]
nr_events_epoch = [int(selectedfile["nr_events_epoch"]) for selectedfile in SelectedFiles_valid_index]
item = QtWidgets.QTableWidgetItem()
item.setFlags(item.flags() &~QtCore.Qt.ItemIsEnabled &~ QtCore.Qt.ItemIsSelectable )
#item.setFlags(QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable)
item.setData(QtCore.Qt.EditRole, str(np.sum(nr_events_epoch)))
self.tableWidget_Info_2.setItem(rowPosition, 1, item)
#Column for color
item = QtWidgets.QTableWidgetItem()
item.setFlags(item.flags() &~QtCore.Qt.ItemIsEnabled &~ QtCore.Qt.ItemIsSelectable )
#item.setFlags(QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable)
item.setData(QtCore.Qt.EditRole, "")
item.setBackground(QtGui.QColor(self.colorsQt[index]))
self.tableWidget_Info_2.setItem(rowPosition, 2, item)
#Column for User specified name
item = QtWidgets.QTableWidgetItem()
#item.setFlags(QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable)
item.setData(QtCore.Qt.EditRole,str(index))
self.tableWidget_Info_2.setItem(rowPosition, 3, item)
rowPosition += 1
self.tableWidget_Info_2.resizeColumnsToContents()
self.tableWidget_Info_2.resizeRowsToContents()
def tableWidget_Info_2_click(self,item):
if item is not None:
if item.column()==2:
tableitem = self.tableWidget_Info_2.item(item.row(), item.column())
color = QtGui.QColorDialog.getColor()
if color.getRgb()==(0, 0, 0, 255):#no black!
return
else:
tableitem.setBackground(color)
def tableWidget_HistoryItems_dclick(self,item):
if item is not None:
tableitem = self.tableWidget_HistoryItems.item(item.row(), item.column())
if str(tableitem.text())!="Show saved only":
color = QtGui.QColorDialog.getColor()
if color.getRgb()==(0, 0, 0, 255):#no black!
return
else:
tableitem.setBackground(color)
self.update_historyplot()
def select_all(self,col):
"""
Check/Uncheck items on table_dragdrop
"""
apply_at_col = [2,3,8,10]
if col not in apply_at_col:
return
#otherwiese continue
rows = range(self.table_dragdrop.rowCount()) #Number of rows of the table
tableitems = [self.table_dragdrop.item(row, col) for row in rows]
checkStates = [tableitem.checkState() for tableitem in tableitems]
#Checked?
checked = [state==QtCore.Qt.Checked for state in checkStates]
if set(checked)=={True}:#all are checked!
#Uncheck all!
for tableitem in tableitems:
tableitem.setCheckState(QtCore.Qt.Unchecked)
else:#otherwise check all
for tableitem in tableitems:
tableitem.setCheckState(QtCore.Qt.Checked)
#If shuffle column was clicked do some extra
if col==8:
for rowPosition in rows:
if bool(self.table_dragdrop.item(rowPosition, 8).checkState())==False:
rtdc_path = self.table_dragdrop.cellWidget(rowPosition, 0).text()
rtdc_path = str(rtdc_path)
failed,rtdc_ds = aid_bin.load_rtdc(rtdc_path)
if failed:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Critical)
msg.setText(str(rtdc_ds))
msg.setWindowTitle("Error occurred during loading file")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
nr_images = rtdc_ds["events"]["image"].len()
columnPosition = 6
item = QtWidgets.QTableWidgetItem()
item.setData(QtCore.Qt.DisplayRole, nr_images)
item.setFlags(item.flags() &~QtCore.Qt.ItemIsEnabled &~ QtCore.Qt.ItemIsSelectable )
self.table_dragdrop.setItem(rowPosition, columnPosition, item)
if bool(self.table_dragdrop.item(rowPosition, 8).checkState())==True:
#Inspect this table item. If shuffle was checked before, it will be grayed out. Invert normal cell then
item = self.table_dragdrop.item(rowPosition, 6)
item.setFlags(item.flags() |QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable )
#Finally, update the Data-Overview-Box
self.dataOverviewOn()#update the overview box
def item_dclick(self, item):
#Check/Uncheck if item is from column 2 or 3
tableitem = self.table_dragdrop.item(item.row(), item.column())
if item.column() in [2,3]:
#If the item is unchecked ->check it!
if tableitem.checkState() == QtCore.Qt.Unchecked:
tableitem.setCheckState(QtCore.Qt.Checked)
#else, the other way around
elif tableitem.checkState() == QtCore.Qt.Checked:
tableitem.setCheckState(QtCore.Qt.Unchecked)
#Show example image if item on column 0 was dclicked
if item.column() == 0:
#rtdc_path = str(item.text())
#rtdc_path = tableitem.text()
rtdc_path = self.table_dragdrop.cellWidget(item.row(), item.column()).text()
failed,rtdc_ds = aid_bin.load_rtdc(rtdc_path)
if failed:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Critical)
msg.setText(str(rtdc_ds))
msg.setWindowTitle("Error occurred during loading file")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
nr_images = rtdc_ds["events"]["image"].len()
ind = np.random.randint(0,nr_images)
img = rtdc_ds["events"]["image"][ind]
if len(img.shape)==2:
height, width = img.shape
channels = 1
elif len(img.shape)==3:
height, width, channels = img.shape
else:
print("Invalid image format: "+str(img.shape))
return
self.w = MyPopup()
self.gridLayout_w = QtWidgets.QGridLayout(self.w)
self.label_image = QtWidgets.QLabel(self.w)
self.label_cropimage = QtWidgets.QLabel(self.w)
#zoom image such that longest side is 512
zoom_factor = np.round(float(512.0/np.max(img.shape)),0)
#Get the order, specified in Image processing->Zoom Order
zoom_order = int(self.comboBox_zoomOrder.currentIndex()) #the combobox-index is already the zoom order
#Convert to corresponding cv2 zooming method
zoom_interpol_method = aid_img.zoom_arguments_scipy2cv(zoom_factor,zoom_order)
img_zoomed = cv2.resize(img, dsize=None,fx=zoom_factor, fy=zoom_factor, interpolation=eval(zoom_interpol_method))
if channels==1:
height, width = img_zoomed.shape
if channels==3:
height, width, _ = img_zoomed.shape
if channels==1:
qi=QtGui.QImage(img_zoomed.data, width, height,width, QtGui.QImage.Format_Indexed8)
if channels==3:
qi = QtGui.QImage(img_zoomed.data,img_zoomed.shape[1], img_zoomed.shape[0], QtGui.QImage.Format_RGB888)
self.label_image.setPixmap(QtGui.QPixmap.fromImage(qi))
self.gridLayout_w.addWidget(self.label_image, 1,1)
#get the location of the cell
rowPosition = item.row()
pix = float(self.table_dragdrop.item(rowPosition, 7).text())
#pix = rtdc_ds.config["imaging"]["pixel size"]
PIX = pix
pos_x,pos_y = rtdc_ds["events"]["pos_x"][ind]/PIX,rtdc_ds["events"]["pos_y"][ind]/PIX
cropsize = self.spinBox_imagecrop.value()
y1 = int(round(pos_y))-cropsize/2
x1 = int(round(pos_x))-cropsize/2
y2 = y1+cropsize
x2 = x1+cropsize
#Crop the image
img_crop = img[int(y1):int(y2),int(x1):int(x2)]
#zoom image such that the height gets the same as for non-cropped img
zoom_factor = float(img_zoomed.shape[0])/img_crop.shape[0]
if zoom_factor == np.inf:
factor = 1
if self.actionVerbose.isChecked()==True:
print("Set resize factor to 1. Before, it was: "+str(factor))
#Get the order, specified in Image processing->Zoom Order
zoom_order = str(self.comboBox_zoomOrder.currentText()) #
zoom_interpol_method = aid_img.zoom_arguments_scipy2cv(zoom_factor,zoom_order)
img_crop = cv2.resize(img_crop, dsize=None,fx=zoom_factor, fy=zoom_factor, interpolation=eval(zoom_interpol_method))
if channels==1:
height, width = img_crop.shape
qi=QtGui.QImage(img_crop.data, width, height,width, QtGui.QImage.Format_Indexed8)
if channels==3:
height, width, _ = img_crop.shape
qi = QtGui.QImage(img_crop.data,width, height, QtGui.QImage.Format_RGB888)
self.label_cropimage.setPixmap(QtGui.QPixmap.fromImage(qi))
self.gridLayout_w.addWidget(self.label_cropimage, 1,2)
self.w.show()
def get_norm_from_modelparafile(self):
#Get the normalization method from a modelparafile
filename = QtWidgets.QFileDialog.getOpenFileName(self, 'Open meta-data', Default_dict["Path of last model"],"AIDeveloper Meta file (*meta.xlsx)")
filename = filename[0]
if len(str(filename))==0:
return
norm = pd.read_excel(filename,sheet_name='Parameters')["Normalization"]
norm = str(norm[0])
index = self.comboBox_Normalization.findText(norm, QtCore.Qt.MatchFixedString)
if index >= 0:
self.comboBox_Normalization.setCurrentIndex(index)
self.w.close()
else:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("Invalid normalization method was specified.\
Likely this version of AIDeveloper does not support that normalization method\
Please define a valid normalization method")
msg.setDetailedText("Supported normalization methods are: "+"\n".join(self.norm_methods))
msg.setWindowTitle("Invalid Normalization method")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
#raise ValueError("Invalid Normalization method")
def update_plottingTab(self):
#Get current text of combobox (url to data set)
url = str(self.comboBox_chooseRtdcFile.currentText())
if len(url)==0:
return
failed,rtdc_ds = aid_bin.load_rtdc(url)
if failed:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Critical)
msg.setText(str(rtdc_ds))
msg.setWindowTitle("Error occurred during loading file")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
keys = list(rtdc_ds["events"].keys())
#find keys of image_channels
keys_0d,keys_1d,keys_2d = [],[],[]
for key in keys:
if type(rtdc_ds["events"][key])==h5py._hl.dataset.Dataset:
shape = rtdc_ds["events"][key].shape
if len(shape)==1: #zero-dimensional info (single number per cell)
keys_0d.append(key)
elif len(shape)==2: #one-dimensional info (multiple numbers per cell)
keys_1d.append(key)
elif len(shape)==3: #two-dimensional info (images)
keys_2d.append(key)
#add the traces to the 1d features
if "trace" in keys:
for key_trace in list(rtdc_ds["events"]["trace"].keys()):
keys_1d.append(key_trace+" (RTFDC)")
#Sort keys_2d: "image" first; "mask" last
keys_2d.insert(0, keys_2d.pop(keys_2d.index("image")))
keys_2d.insert(len(keys_2d), keys_2d.pop(keys_2d.index("mask")))
#Fill those feautues in the comboboxes at the scatterplot
self.comboBox_featurex.addItems(keys_0d)
self.comboBox_featurey.addItems(keys_0d)
#check if masks or contours are available
cont_available = "mask" in keys or "contour" in keys
self.checkBox_contour.setEnabled(cont_available)
self.checkBox_contour.setChecked(cont_available)
#Centroid is always available (prerequisite for AIDeveloper)
self.checkBox_centroid.setEnabled(True)
self.checkBox_centroid.setChecked(True)
#Intialize option menus
self.contour_options_nr = 0
self.centroid_options_nr = 0
self.show_1d_options_nr = 0
self.show_2d_options_nr = 0
self.init_contour_options(keys_2d)
self.init_centroid_options(keys_1d)
self.init_2d_options(keys_2d)
self.init_1d_options(keys_1d)
def init_contour_options(self,keys_2d):
print("Work in progress")
# self.popup_layercontrols = MyPopup()
# self.popup_layercontrols_ui = frontend.Ui_LayerControl()
# self.popup_layercontrols_ui.setupUi(self.popup_layercontrols,keys_2d) #open a popup
def init_centroid_options(self,keys_image):
print("Work in progress")
# self.popup_centroid_options = MyPopup()
# self.popup_centroid_options_ui = aid_frontend.Ui_centroid_options()
# self.popup_centroid_options_ui.setupUi(self.popup_centroid_options,keys_image) #open a popup
def init_2d_options(self,keys_2d):
#Initialize 2d Option Menu. Range values are saved and manipulated here
self.popup_2dOptions = MyPopup()
self.popup_2dOptions_ui = aid_frontend.Ui_2dOptions()
self.popup_2dOptions_ui.setupUi(self.popup_2dOptions,keys_2d) #open a popup
def init_1d_options(self,keys_1d):
self.popup_1dOptions = MyPopup()
self.popup_1dOptions_ui = aid_frontend.Ui_1dOptions()
self.popup_1dOptions_ui.setupUi(self.popup_1dOptions,keys_1d) #open a popup
def show_contour_options():
self.contour_options_nr += 1
print("Work in progress")
def show_centroid_options(self):
print("Work in progress")
self.centroid_options_nr += 1
#self.popup_layercontrols_ui.pushButton_close.clicked.connect(self.visualization_settings)
if self.centroid_options_nr==1:
for iterator in range(len(self.popup_layercontrols_ui.spinBox_minChX)):
print(1)
def show_2d_options(self):
self.show_2d_options_nr += 1
#self.popup_layercontrols_ui.pushButton_close.clicked.connect(self.visualization_settings)
if self.show_2d_options_nr==1:
for iterator in range(len(self.popup_2dOptions_ui.spinBox_minChX)):
slider = self.popup_2dOptions_ui.horizontalSlider_chX[iterator]
slider.startValueChanged.connect(lambda _, b=None: self.put_image(ind=b))
slider.endValueChanged.connect(lambda _, b=None: self.put_image(ind=b))
checkBox = self.popup_2dOptions_ui.checkBox_show_chX[iterator]
checkBox.stateChanged.connect(lambda _, b=None: self.put_image(ind=b))
comboBox = self.popup_2dOptions_ui.comboBox_cmap_chX[iterator]
comboBox.currentIndexChanged.connect(lambda _, b=None: self.put_image(ind=b))
checkBox = self.popup_2dOptions_ui.checkBox_auto_chX[iterator]
checkBox.stateChanged.connect(lambda _, b=None: self.put_image(ind=b))
self.popup_2dOptions.show()
def show_1d_options(self):
self.show_1d_options_nr += 1
#self.popup_layercontrols_ui.pushButton_close.clicked.connect(self.visualization_settings)
if self.show_1d_options_nr==1:
for iterator in range(len(self.popup_1dOptions_ui.checkBox_show_chX)):
checkBox = self.popup_1dOptions_ui.checkBox_show_chX[iterator]
checkBox.stateChanged.connect(lambda _, b=None: self.put_line(index=b))
comboBox = self.popup_1dOptions_ui.comboBox_cmap_chX[iterator]
comboBox.clicked.connect(lambda _, b=None: self.put_line(index=b))
self.popup_1dOptions.show()
def activate_deactivate_spinbox(self,newstate):
#get the checkstate of the Input model crop
if newstate==2:
#activate the spinbox
self.spinBox_imagecrop.setEnabled(True)
elif newstate==0:
self.spinBox_imagecrop.setEnabled(False)
def gray_or_rgb_augmentation(self,index):
#When Color-Mode is changed:
#Get the new colormode:
new_colormode = self.colorModes[index]
#when the new Color Mode is Grayscale, disable saturation and hue augmentation
if new_colormode=="Grayscale":
self.checkBox_contrast.setEnabled(True)
self.checkBox_contrast.setChecked(True)
self.doubleSpinBox_contrastLower.setEnabled(True)
self.doubleSpinBox_contrastHigher.setEnabled(True)
self.checkBox_saturation.setEnabled(False)
self.checkBox_saturation.setChecked(False)
self.doubleSpinBox_saturationLower.setEnabled(False)
self.doubleSpinBox_saturationHigher.setEnabled(False)
self.checkBox_hue.setEnabled(False)
self.checkBox_hue.setChecked(False)
self.doubleSpinBox_hueDelta.setEnabled(False)
elif new_colormode=="RGB":
self.checkBox_contrast.setEnabled(True)
self.checkBox_contrast.setChecked(True)
self.doubleSpinBox_contrastLower.setEnabled(True)
self.doubleSpinBox_contrastHigher.setEnabled(True)
self.checkBox_saturation.setEnabled(True)
self.checkBox_saturation.setChecked(True)
self.doubleSpinBox_saturationLower.setEnabled(True)
self.doubleSpinBox_saturationHigher.setEnabled(True)
self.checkBox_hue.setEnabled(True)
self.checkBox_hue.setChecked(True)
self.doubleSpinBox_hueDelta.setEnabled(True)
else:
print("Invalid Color Mode")
def onClick(self,points,pointermethod):
#delete the last item if the user selected already one:
try:
self.scatter_xy.removeItem(self.point_clicked)
except:
pass
if pointermethod=="point":
points = points[0]
p = points.pos()
clicked_x, clicked_y = p.x(), p.y()
a1 = (clicked_x)/float(np.max(self.feature_x))
a2 = (clicked_y)/float(np.max(self.feature_y))
#Which is the closest scatter point?
dist = np.sqrt(( a1-self.scatter_x_norm )**2 + ( a2-self.scatter_y_norm )**2)
index = np.argmin(dist)
elif pointermethod=="index":
index = points
clicked_x = self.feature_x[index]
clicked_y = self.feature_y[index]
self.point_clicked = pg.ScatterPlotItem()
self.point_clicked.setData([clicked_x], [clicked_y],brush="r",symbol='o',symbolPen="w",size=15)
self.scatter_xy.addItem(self.point_clicked)
#self.scatter_xy.plot([clicked_x], [clicked_y],pen=None,symbol='o',symbolPen='w',clear=False)
self.point_was_selected_before = True
#I dont care if the user click or used the slider->always adjust spinbox and slider without running the onChange functions
self.changedbyuser = False
self.spinBox_cellInd.setValue(index)
self.horizontalSlider_cellInd.setValue(index)
self.changedbyuser = True
self.put_image(index)
self.put_line(index)
def put_image(self,ind):
#check that the user is looking at the plotting tab
curr_ind = self.tabWidget_Modelbuilder.currentIndex()
if curr_ind!=3:
return
try:
self.widget_showCell.removeItem(self.dot)
except:
pass
try:
self.widget_showCell.removeItem(self.plot_contour)
except:
pass
if ind==None:
index = int(self.spinBox_cellInd.value())
else:
index = ind
rtdc_ds = self.rtdc_ds
#which channel shouldbe displayed
channels = len(self.popup_2dOptions_ui.spinBox_minChX)
keys_2d = [self.popup_2dOptions_ui.label_layername_chX[i].text() for i in range(channels)]
#Define variable on self that carries all image information
if channels==1:
img = np.expand_dims(rtdc_ds["events"]["image"][index],-1)
elif channels>1:
img = np.stack( [rtdc_ds["events"][key][index] for key in keys_2d] ,axis=-1)
if len(img.shape)==2:
channels = 1
elif len(img.shape)==3:
height, width, channels = img.shape
else:
print("Invalid image format: "+str(img.shape))
return
color_mode = str(self.comboBox_GrayOrRGB_2.currentText())
if color_mode=="Grayscale": #Slider allows to show individual layers: each is shown as grayscale
img = img
elif color_mode == "RGB":#User can define, which layers are shown in R,G,and B
#Retrieve the setting from self.popup_layercontrols_ui
ui_item = self.popup_2dOptions_ui
layer_names = [obj.text() for obj in ui_item.label_layername_chX]
layer_active = [obj.isChecked() for obj in ui_item.checkBox_show_chX]
layer_range = [obj.getRange() for obj in ui_item.horizontalSlider_chX]
layer_auto = [obj.isChecked() for obj in ui_item.checkBox_auto_chX]
layer_cmap = [obj.currentText() for obj in ui_item.comboBox_cmap_chX]
#Assemble the image according to the settings in self.popup_layercontrols_ui
#Find activated layers for each color:
ind_active_r,ind_active_g,ind_active_b = [],[],[]
for ch in range(len(layer_cmap)):
#for color,active in zip(layer_cmap,layer_active):
if layer_cmap[ch]=="Red" and layer_active[ch]==True:
ind_active_r.append(ch)
if layer_cmap[ch]=="Green" and layer_active[ch]==True:
ind_active_g.append(ch)
if layer_cmap[ch]=="Blue" and layer_active[ch]==True:
ind_active_b.append(ch)
if len(ind_active_r)>0:
img_ch = img[:,:,np.array(ind_active_r)]
layer_range_ch = np.array(layer_range)[np.array(ind_active_r)] #Range of all red channels
layer_auto_ch = np.array(layer_auto)[np.array(ind_active_r)] #Automatic range
#Scale each red channel according to layer_range
for layer in range(img_ch.shape[-1]):
limits,auto = layer_range_ch[layer],layer_auto_ch[layer]
img_ch[:,:,layer] = aid_img.clip_contrast(img=img_ch[:,:,layer],low=limits[0],high=limits[1],auto=auto)
img_r = np.mean(img_ch,axis=-1).astype(np.uint8)
else:
img_r = np.zeros(shape=(img.shape[0],img.shape[1]),dtype=np.uint8)
if len(ind_active_g)>0:
img_ch = img[:,:,np.array(ind_active_g)]
layer_range_ch = np.array(layer_range)[np.array(ind_active_g)] #Range of all red channels
layer_auto_ch = np.array(layer_auto)[np.array(ind_active_g)] #Automatic range
#Scale each red channel according to layer_range
for layer in range(img_ch.shape[-1]):
limits,auto = layer_range_ch[layer],layer_auto_ch[layer]
img_ch[:,:,layer] = aid_img.clip_contrast(img=img_ch[:,:,layer],low=limits[0],high=limits[1],auto=auto)
img_g = np.mean(img_ch,axis=-1).astype(np.uint8)
else:
img_g = np.zeros(shape=(img.shape[0],img.shape[1]),dtype=np.uint8)
if len(ind_active_b)>0:
img_ch = img[:,:,np.array(ind_active_b)]
layer_range_ch = np.array(layer_range)[np.array(ind_active_b)] #Range of all red channels
layer_auto_ch = np.array(layer_auto)[np.array(ind_active_b)] #Automatic range
#Scale each red channel according to layer_range
for layer in range(img_ch.shape[-1]):
limits,auto = layer_range_ch[layer],layer_auto_ch[layer]
img_ch[:,:,layer] = aid_img.clip_contrast(img=img_ch[:,:,layer],low=limits[0],high=limits[1],auto=auto)
img_b = np.mean(img_ch,axis=-1).astype(np.uint8)
else:
img_b = np.zeros(shape=(img.shape[0],img.shape[1]),dtype=np.uint8)
#Assemble image by stacking all layers
img = np.stack([img_r,img_g,img_b],axis=-1)
#Get the levels of the previous frame
levels_init = self.widget_showCell.getLevels()
if levels_init==(0,1.0):
levels_init = (0,255)
#Get the layer index of the previous frame
index_ = self.widget_showCell.currentIndex
if color_mode=="Grayscale":
self.widget_showCell.setImage(img.T,autoRange=False,levels=levels_init,levelMode="mono")
self.widget_showCell.setCurrentIndex(index_)
elif color_mode=="RGB":
self.widget_showCell.setImage(np.swapaxes(img,0,1))
pix = rtdc_ds.attrs["imaging:pixel size"]
pos_x = rtdc_ds["events"]["pos_x"][index]/pix
pos_y = rtdc_ds["events"]["pos_y"][index]/pix
#Indicate the centroid of the cell
if self.checkBox_centroid.isChecked():
self.dot = pg.CircleROI(pos=(pos_x-2, pos_y-2), size=4, pen=QtGui.QPen(QtCore.Qt.red, 0.1), movable=False)
self.widget_showCell.getView().addItem(self.dot)
self.widget_showCell.show()
if self.checkBox_contour.isChecked():
#get the contour based on the mask
contour,_ = cv2.findContours(rtdc_ds["events"]["mask"][index], cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
contour = contour[0][:,0,:]
self.plot_contour = pg.PlotCurveItem(contour[:,0],contour[:,1],width=6,pen="r")
self.widget_showCell.getView().addItem(self.plot_contour)
def put_line(self,index):
curr_ind = self.tabWidget_Modelbuilder.currentIndex()
if curr_ind!=3:
return
#Fluorescence traces: clear first
try:
self.plot_fl_trace_.clear() #clear the plot
self.plot_fl_trace.clear() #clear the plot
except:
pass
if index==None:
index = int(self.spinBox_cellInd.value())
rtdc_ds = self.rtdc_ds
feature_keys = list(rtdc_ds.keys())
#which features shouldbe displayed
features_nr = len(self.popup_1dOptions_ui.checkBox_show_chX)
keys_1d = [self.popup_1dOptions_ui.checkBox_show_chX[i].text() for i in range(features_nr)]
keys_1d_on = [self.popup_1dOptions_ui.checkBox_show_chX[i].isChecked() for i in range(features_nr)]
colors = [self.popup_1dOptions_ui.comboBox_cmap_chX[i].palette().button().color() for i in range(features_nr)]
colors = [list(c.getRgb()) for c in colors]
colors = [tuple(c) for c in colors]
ind = np.where(np.array(keys_1d_on)==True)[0]
keys_1d = list(np.array(keys_1d)[ind])
colors = list(np.array(colors)[ind])
for key_1d,color in zip(keys_1d,colors):
if key_1d.endswith(" (RTFDC)"):
key_1d = key_1d.split(" (RTFDC)")[0]
trace_flx = rtdc_ds["events"]["trace"][key_1d][index]
pencolor = pg.mkPen(color, width=2)
self.plot_fl_trace_ = self.plot_fl_trace.plot(range(len(trace_flx)),trace_flx,width=6,pen=pencolor,clear=False)
# if "fl1_max" in feature_keys and "fl1_pos" in feature_keys: #if also the maxima and position of the max are available: use it to put the region accordingly
# fl1_max,fl1_pos = rtdc_ds["events"]["fl1_max"][index],rtdc_ds["events"]["fl1_pos"][index]
else:
values = rtdc_ds["events"][key_1d][index]
pencolor = pg.mkPen(color, width=2)
self.plot_fl_trace_ = self.plot_fl_trace.plot(range(len(trace_flx)),trace_flx,width=6,pen=pencolor,clear=False)
#get the maximum of [fl1_max,fl2_max,fl3_max] and put the region to the corresponding fl-position
# ind = np.argmax(np.array([fl1_max,fl2_max,fl3_max]))
# region_pos = np.array([fl1_pos,fl2_pos,fl3_pos])[ind] #this region is already given in us. translate this back to range
# peak_height = np.array([fl1_max,fl2_max,fl3_max])[ind]
# sample_rate = rtdc_ds.attrs["fluorescence:sample rate"]
# fl_pos_ind = float((sample_rate*region_pos))/1E6 #
# #Indicate the used flx_max and flx_pos by a scatter dot
# self.peak_dot = self.plot_fl_trace.plot([float(fl_pos_ind)], [float(peak_height)],pen=None,symbol='o',symbolPen='w',clear=False)
def onScatterClick(self,event, points):
pointermethod = 'point'
if self.changedbyuser:
self.onClick(points,pointermethod)
def onIndexChange(self,index):
pointermethod = 'index'
if self.changedbyuser:
self.onClick(index,pointermethod)
#Set self.changedbyuser to False and change the spinbox and slider. changedbyuser=False prevents onClick function
self.changedbyuser = False
self.spinBox_cellInd.setValue(index)
self.horizontalSlider_cellInd.setValue(index)
self.changedbyuser = True
def updateScatterPlot(self):
#If the Plot is updated, delete the dot in the cell-image
try:
self.widget_showCell.removeItem(self.dot)
except:
pass
try:
self.scatter_xy.removeItem(self.point_clicked)
except:
pass
self.point_was_selected_before = False
#read url from current comboBox_chooseRtdcFile
url = str(self.comboBox_chooseRtdcFile.currentText())
if len(url)==0:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("Please use the 'Build' tab to load files first")
msg.setWindowTitle("No file selected")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
failed,rtdc_ds = aid_bin.load_rtdc(url)
if failed:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Critical)
msg.setText(str(rtdc_ds))
msg.setWindowTitle("Error occurred during loading file")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
self.rtdc_ds = rtdc_ds
feature_x_name = str(self.comboBox_featurex.currentText())
feature_y_name = str(self.comboBox_featurey.currentText())
features = list(self.rtdc_ds["events"].keys())
if feature_x_name in features:
self.feature_x = self.rtdc_ds["events"][feature_x_name]
else:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("Feature on x axis is not contained in data set")
msg.setWindowTitle("Invalid x feature")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
if feature_y_name in features:
self.feature_y = self.rtdc_ds["events"][feature_y_name]
else:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("Feature on y axis is not contained in data set")
msg.setWindowTitle("Invalid y feature")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
self.changedbyuser = True #variable used to prevent plotting if spinbox or slider is changed programmatically
#density estimation
kde = self.comboBox_kde.currentText()
if kde=="None":
brush = "b"
elif kde=="2d Histogram" or kde=="Gauss":
if kde=="2d Histogram":
density = aid_bin.kde_histogram(np.array(self.feature_x), np.array(self.feature_y))
elif kde=="Gauss":
density = aid_bin.kde_gauss(np.array(self.feature_x), np.array(self.feature_y))
density_min,density_max = np.min(density),np.max(density)
density = (density-density_min)/density_max
# define colormap
brush = []
from pyqtgraph.graphicsItems.GradientEditorItem import Gradients
cmap = pg.ColorMap(*zip(*Gradients["viridis"]["ticks"]))
for k in density:
brush.append(cmap.mapToQColor(k))
#Add plot
#self.scatter = self.scatter_xy.plot(np.array(self.feature_x), np.array(self.feature_y),symbolPen=None,pen=None,symbol='o',brush=brush[100],clear=True)
#try to remove existing scatterplot
try:
self.scatter_xy.removeItem(self.scatter)
except:
print("Not cleared")
self.scatter = pg.ScatterPlotItem()
self.scatter.setData(np.array(self.feature_x), np.array(self.feature_y),brush=brush,symbolPen=None,pen=None,symbol='o',size=10)
self.scatter_xy.addItem(self.scatter)
#pen=None,symbol='o',symbolPen=None,symbolBrush=density,clear=True)
self.scatter.sigClicked.connect(self.onScatterClick) #When scatterplot is clicked, show the desired cell
#Fill histogram for x-axis; widget_histx
y,x = np.histogram(self.feature_x, bins='auto')
self.hist_x.plot(x, y, stepMode=True, fillLevel=0, brush=(0,0,255,150),clear=True)
#Manually clear y hist first. Only clear=True did not do the job
self.hist_y.clear()
#Fill histogram for y-axis; widget_histy
y,x = np.histogram(self.feature_y, bins='auto')
curve = pg.PlotCurveItem(-1.*x, y, stepMode=True, fillLevel=0, brush=(0, 0, 255, 150),clear=True)
curve.rotate(-90)
self.hist_y.addItem(curve)
self.scatter_x_norm = (np.array(self.feature_x).astype(np.float32))/float(np.max(self.feature_x))
self.scatter_y_norm = (np.array(self.feature_y).astype(np.float32))/float(np.max(self.feature_y))
#Adjust the horizontalSlider_cellInd and spinBox_cellInd
self.horizontalSlider_cellInd.setSingleStep(1)
self.horizontalSlider_cellInd.setMinimum(0)
self.horizontalSlider_cellInd.setMaximum(len(self.feature_x)-1)
self.spinBox_cellInd.setMinimum(0)
self.spinBox_cellInd.setMaximum(len(self.feature_x)-1)
def selectPeakPos(self):
#Check if self.region exists
#If not, show a message and return:
if not hasattr(self, 'region'):
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("There is no region defined yet")
msg.setWindowTitle("No region defined")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
#Try to get the user defined peak position
if not hasattr(self, 'new_peak'):
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("There is no peak defined yet")
msg.setWindowTitle("No peak defined")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
#how much rows are already in table?
rowcount = self.tableWidget_showSelectedPeaks.rowCount()
self.tableWidget_showSelectedPeaks.setRowCount(rowcount+1)
rowPosition = rowcount
item = QtWidgets.QTableWidgetItem()
item.setData(QtCore.Qt.EditRole, float(self.new_peak["fl_max"]))
self.tableWidget_showSelectedPeaks.setItem(rowPosition, 0, item)
item = QtWidgets.QTableWidgetItem()
fl_pos_us = float(float(self.new_peak["fl_pos"])*float(1E6))/float(self.rtdc_ds.attrs["fluorescence:sample rate"])
item.setData(QtCore.Qt.EditRole,fl_pos_us)
self.tableWidget_showSelectedPeaks.setItem(rowPosition, 1, item)
item = QtWidgets.QTableWidgetItem()
pos_x_um = float(self.new_peak["pos_x"])*float(self.rtdc_ds.attrs["imaging:pixel size"])
item.setData(QtCore.Qt.EditRole,pos_x_um)
self.tableWidget_showSelectedPeaks.setItem(rowPosition, 2, item)
item = QtWidgets.QTableWidgetItem()
item.setData(QtCore.Qt.EditRole, float(self.new_peak["fl_pos"]))
self.tableWidget_showSelectedPeaks.setItem(rowPosition, 3, item)
item = QtWidgets.QTableWidgetItem()
item.setData(QtCore.Qt.EditRole, float(self.new_peak["pos_x"]))
self.tableWidget_showSelectedPeaks.setItem(rowPosition, 4, item)
self.tableWidget_showSelectedPeaks.resizeColumnsToContents()
self.tableWidget_showSelectedPeaks.resizeRowsToContents()
#Update the widget_showSelectedPeaks
self.update_peak_plot()
def selectPeakRange(self):
new_region = self.region.getRegion()
region_width = np.max(new_region) - np.min(new_region) #in [samples]
sample_rate = self.rtdc_ds.attrs["fluorescence:sample rate"]
region_width = (float(region_width)/float(sample_rate))*1E6 #range[samples]*(1/sample_rate[1/s]) = range[s]; div by 1E6 to conver to us
self.region_width = region_width
#put this in the table
item = QtWidgets.QTableWidgetItem()
item.setData(QtCore.Qt.EditRole, "Range [us]")
self.tableWidget_peakModelParameters.setItem(1, 0, item)
item = QtWidgets.QTableWidgetItem()
item.setData(QtCore.Qt.EditRole, float(self.region_width))
self.tableWidget_peakModelParameters.setItem(1, 1, item)
item = QtWidgets.QTableWidgetItem()
def onPeaksPlotClick(self,event, points):
points = points[0]
p = points.pos()
clicked_x, clicked_y = p.x(), p.y()
a1 = (clicked_x)/float(np.max(self.Pos_x))
a2 = (clicked_y)/float(np.max(self.Fl_pos))
#Which is the closest scatter point?
pos_x_norm = self.Pos_x/np.max(self.Pos_x)#normalized pos_x
fl_pos_norm = self.Fl_pos/np.max(self.Fl_pos)#normalized fl_pos
dist = np.sqrt(( a1-pos_x_norm )**2 + ( a2-fl_pos_norm )**2)
index = np.argmin(dist)
#Highlight this row
self.tableWidget_showSelectedPeaks.selectRow(index)
#Delete the highlighted rows
# try:
# self.actionRemoveSelectedPeaks_function()
# except:
# pass
def update_peak_plot(self):
#This function reads tableWidget_showSelectedPeaks and
#fits a function and
#puts fitting parameters on tableWidget_peakModelParameters
#read the data on tableWidget_showSelectedPeaks
rowcount = self.tableWidget_showSelectedPeaks.rowCount()
Fl_pos,Pos_x = [],[]
for row in range(rowcount):
line = [float(self.tableWidget_showSelectedPeaks.item(row, col).text()) for col in [1,2]] #use the values for [us] and [um]
Fl_pos.append(line[0])
Pos_x.append(line[1])
self.Fl_pos = np.array(Fl_pos)
self.Pos_x = np.array(Pos_x)
self.selectedPeaksPlotPlot = self.selectedPeaksPlot.plot(self.Pos_x, self.Fl_pos,pen=None,symbol='o',symbolPen=None,symbolBrush='b',clear=True)
#if user clicks in the plot, show him the corresponding row in the table
self.selectedPeaksPlotPlot.sigPointsClicked.connect(self.onPeaksPlotClick)
if not hasattr(self, 'region_width'): #if there was no region_width defined yet...
#to get a reasonable initial range, use 20% of the nr. of availeble samples
samples_per_event = self.rtdc_ds.attrs["fluorescence:samples per event"]
self.region_width = 0.2*samples_per_event #width of the region in samples
#Convert to SI unit:
sample_rate = self.rtdc_ds.attrs["fluorescence:sample rate"]
self.region_width = (float(self.region_width)/float(sample_rate))*1E6 #range[samples]*(1/sample_rate[1/s]) = range[s]; div by 1E6 to convert to us
#which model should be used?
if str(self.comboBox_peakDetModel.currentText()) == "Linear dependency and max in range" and len(Pos_x)>1:
slope,intercept = np.polyfit(Pos_x, Fl_pos,deg=1) #Linear FIT, y=mx+n; y=FL_pos[us] x=Pos_x[um]
xlin = np.round(np.linspace(np.min(Pos_x),np.max(Pos_x),25),1)
ylin = intercept + slope*xlin
self.selectedPeaksPlot.plot(xlin, ylin,width=6,pen='b',clear=False)
#Put info to tableWidget_peakModelParameters
self.tableWidget_peakModelParameters.setColumnCount(2)
self.tableWidget_peakModelParameters.setRowCount(5)
item = QtWidgets.QTableWidgetItem()
item.setData(QtCore.Qt.EditRole, "Model")
self.tableWidget_peakModelParameters.setItem(0, 0, item)
item = QtWidgets.QTableWidgetItem()
item.setData(QtCore.Qt.EditRole, "Linear dependency and max in range")
self.tableWidget_peakModelParameters.setItem(0, 1, item)
item = QtWidgets.QTableWidgetItem()
item.setData(QtCore.Qt.EditRole, "Range [us]")
self.tableWidget_peakModelParameters.setItem(1, 0, item)
item = QtWidgets.QTableWidgetItem()
item.setData(QtCore.Qt.EditRole, float(self.region_width))
self.tableWidget_peakModelParameters.setItem(1, 1, item)
item = QtWidgets.QTableWidgetItem()
item.setData(QtCore.Qt.EditRole, "Intercept [us]")
self.tableWidget_peakModelParameters.setItem(2, 0, item)
item = QtWidgets.QTableWidgetItem()
item.setData(QtCore.Qt.EditRole, float(intercept))
self.tableWidget_peakModelParameters.setItem(2, 1, item)
item = QtWidgets.QTableWidgetItem()
item.setData(QtCore.Qt.EditRole, "Slope [us/um]")
self.tableWidget_peakModelParameters.setItem(3, 0, item)
item = QtWidgets.QTableWidgetItem()
item.setData(QtCore.Qt.EditRole, float(slope))
self.tableWidget_peakModelParameters.setItem(3, 1, item)
#Calculate velocity
item = QtWidgets.QTableWidgetItem()
item.setData(QtCore.Qt.EditRole, "Velocity[m/s]")
self.tableWidget_peakModelParameters.setItem(4, 0, item)
velocity = float(1.0/float(slope))
item = QtWidgets.QTableWidgetItem()
item.setData(QtCore.Qt.EditRole, float(velocity))
self.tableWidget_peakModelParameters.setItem(4, 1, item)
def addHighestXPctPeaks(self):
#how many x%?
x_pct = float(self.doubleSpinBox_highestXPercent.value())
#Get the flourescence traces and maxima/positions of maxima
#->it could be that the user did not yet load the dataset:
if not hasattr(self,"rtdc_ds"):
#run the function updateScatterPlot()
self.updateScatterPlot()
trace = self.rtdc_ds["events"]["trace"]
fl_keys = list(trace.keys())
fl1_max,fl1_pos,fl2_max,fl2_pos,fl3_max,fl3_pos,pos_x = [],[],[],[],[],[],[]
for i in range(len(fl_keys)):
if "fl1_median" in fl_keys[i] and self.checkBox_fl1.isChecked():
for index in range(len(trace[fl_keys[i]])):
trace_flx = trace[fl_keys[i]][index]
ind = np.argmax(trace_flx)
fl1_max.append(trace_flx[ind])
fl1_pos.append(ind)
#Get the x% maxima
fl1_max = np.array(fl1_max)
fl1_pos = np.array(fl1_pos)
sorter = np.argsort(fl1_max)[::-1]
sorter = sorter[0:int(x_pct/100.0*len(fl1_max))]
fl1_max = fl1_max[sorter]
fl1_pos = fl1_pos[sorter]
pos_x.append(self.rtdc_ds["events"]["pos_x"][sorter])
elif "fl2_median" in fl_keys[i] and self.checkBox_fl2.isChecked():
for index in range(len(trace[fl_keys[i]])):
trace_flx = trace[fl_keys[i]][index]
ind = np.argmax(trace_flx)
fl2_max.append(trace_flx[ind])
fl2_pos.append(ind)
#Get the x% maxima
fl2_max = np.array(fl2_max)
fl2_pos = np.array(fl2_pos)
sorter = np.argsort(fl2_max)[::-1]
sorter = sorter[0:int(x_pct/100.0*len(fl2_max))]
fl2_max = fl2_max[sorter]
fl2_pos = fl2_pos[sorter]
pos_x.append(self.rtdc_ds["events"]["pos_x"][sorter])
elif "fl3_median" in fl_keys[i] and self.checkBox_fl3.isChecked():
for index in range(len(trace[fl_keys[i]])):
trace_flx = trace[fl_keys[i]][index]
ind = np.argmax(trace_flx)
fl3_max.append(trace_flx[ind])
fl3_pos.append(ind)
#Get the x% maxima
fl3_max = np.array(fl3_max)
fl3_pos = np.array(fl3_pos)
sorter = np.argsort(fl3_max)[::-1]
sorter = sorter[0:int(x_pct/100.0*len(fl3_max))]
fl3_max = fl3_max[sorter]
fl3_pos = fl3_pos[sorter]
pos_x.append(self.rtdc_ds["events"]["pos_x"][sorter])
#Add fl1 fl2 and fl3 information
flx_max = np.array(list(fl1_max)+list(fl2_max)+list(fl3_max))
flx_pos = np.array(list(fl1_pos)+list(fl2_pos)+list(fl3_pos))
pos_x_um = np.concatenate(np.atleast_2d(np.array(pos_x)))
pix = self.rtdc_ds.attrs["imaging:pixel size"]
pos_x = pos_x_um/pix #convert from um to pix
rowcount = self.tableWidget_showSelectedPeaks.rowCount()
self.tableWidget_showSelectedPeaks.setRowCount(rowcount+len(flx_max))
for i in range(len(flx_max)):
rowPosition = rowcount+i
item = QtWidgets.QTableWidgetItem()
item.setData(QtCore.Qt.EditRole, float(flx_max[i]))
self.tableWidget_showSelectedPeaks.setItem(rowPosition, 0, item)
item = QtWidgets.QTableWidgetItem()
fl_pos_us = float(float(flx_pos[i])*float(1E6))/float(self.rtdc_ds.attrs["fluorescence:sample rate"] )
item.setData(QtCore.Qt.EditRole,fl_pos_us)
self.tableWidget_showSelectedPeaks.setItem(rowPosition, 1, item)
item = QtWidgets.QTableWidgetItem()
#pos_x_um = float(pos_x[i])*float(self.rtdc_ds.config["imaging"]["pixel size"])
item.setData(QtCore.Qt.EditRole,float(pos_x_um[i]))
self.tableWidget_showSelectedPeaks.setItem(rowPosition, 2, item)
item = QtWidgets.QTableWidgetItem()
item.setData(QtCore.Qt.EditRole, float(flx_pos[i]))
self.tableWidget_showSelectedPeaks.setItem(rowPosition, 3, item)
item = QtWidgets.QTableWidgetItem()
item.setData(QtCore.Qt.EditRole, float(pos_x[i]))
self.tableWidget_showSelectedPeaks.setItem(rowPosition, 4, item)
#Update the widget_showSelectedPeaks
self.update_peak_plot()
def savePeakDetModel(self):
#Get tableWidget_peakModelParameters and write it to excel file
#Get filename from user:
filename = QtWidgets.QFileDialog.getSaveFileName(self, 'Save peak fitting model', Default_dict["Path of last model"],"Excel file (*.xlsx)")
filename = filename[0]
if len(filename)==0:
return
#add the suffix .csv
if not filename.endswith(".xlsx"):
filename = filename +".xlsx"
table = self.tableWidget_peakModelParameters
cols = table.columnCount()
header = range(cols)
rows = table.rowCount()
model_df = pd.DataFrame(columns=header,index=range(rows))
for i in range(rows):
for j in range(cols):
try:
model_df.iloc[i, j] = table.item(i, j).text()
except:
model_df.iloc[i, j] = np.nan
table = self.tableWidget_showSelectedPeaks
cols = table.columnCount()
header = [table.horizontalHeaderItem(col).text() for col in range(cols)]
rows = table.rowCount()
peaks_df = pd.DataFrame(columns=header,index=range(rows))
for i in range(rows):
for j in range(cols):
try:
peaks_df.iloc[i, j] = table.item(i, j).text()
except:
peaks_df.iloc[i, j] = np.nan
writer = pd.ExcelWriter(filename, engine='openpyxl')
#Used files go to a separate sheet on the MetaFile.xlsx
pd.DataFrame().to_excel(writer,sheet_name='Model') #initialize empty Sheet
model_df.to_excel(writer,sheet_name='Model') #initialize empty Sheet
pd.DataFrame().to_excel(writer,sheet_name='Peaks') #initialize empty Sheet
peaks_df.to_excel(writer,sheet_name='Peaks')
writer.save()
writer.close()
def loadPeakDetModel(self):
filename = QtWidgets.QFileDialog.getOpenFileName(self, 'Open peak fitting model', Default_dict["Path of last model"],"Excel file (*.xlsx)")
filename = filename[0]
if len(str(filename))==0:
return
peak_model_df = pd.read_excel(filename,sheet_name='Model')
model = peak_model_df.iloc[0,1]
if model=="Linear dependency and max in range":
#set the combobox accordingly
index = self.comboBox_peakDetModel.findText(model, QtCore.Qt.MatchFixedString)
if index >= 0:
self.comboBox_peakDetModel.setCurrentIndex(index)
else:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("Could not find a valid model in the chosen file. Did you accidentially load a session or history file?!")
msg.setWindowTitle("No valid model found")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
range_ = float(peak_model_df.iloc[1,1])
intercept = float(peak_model_df.iloc[2,1])
slope = float(peak_model_df.iloc[3,1])
velocity = float(peak_model_df.iloc[4,1])
#put the information in the table
xlin = np.round(np.linspace(np.min(0),np.max(100),25),1)
ylin = intercept + slope*xlin
self.selectedPeaksPlot.plot(xlin, ylin,width=6,pen='b',clear=False)
#Put info to tableWidget_peakModelParameters
self.tableWidget_peakModelParameters.setColumnCount(2)
self.tableWidget_peakModelParameters.setRowCount(5)
item = QtWidgets.QTableWidgetItem()
item.setData(QtCore.Qt.EditRole, "Model")
self.tableWidget_peakModelParameters.setItem(0, 0, item)
item = QtWidgets.QTableWidgetItem()
item.setData(QtCore.Qt.EditRole, "Linear dependency and max in range")
self.tableWidget_peakModelParameters.setItem(0, 1, item)
item = QtWidgets.QTableWidgetItem()
item.setData(QtCore.Qt.EditRole, "Range [us]")
self.tableWidget_peakModelParameters.setItem(1, 0, item)
item = QtWidgets.QTableWidgetItem()
item.setData(QtCore.Qt.EditRole, float(range_))
self.tableWidget_peakModelParameters.setItem(1, 1, item)
item = QtWidgets.QTableWidgetItem()
item.setData(QtCore.Qt.EditRole, "Intercept [us]")
self.tableWidget_peakModelParameters.setItem(2, 0, item)
item = QtWidgets.QTableWidgetItem()
item.setData(QtCore.Qt.EditRole, float(intercept))
self.tableWidget_peakModelParameters.setItem(2, 1, item)
item = QtWidgets.QTableWidgetItem()
item.setData(QtCore.Qt.EditRole, "Slope [us/um]")
self.tableWidget_peakModelParameters.setItem(3, 0, item)
item = QtWidgets.QTableWidgetItem()
item.setData(QtCore.Qt.EditRole, float(slope))
self.tableWidget_peakModelParameters.setItem(3, 1, item)
item = QtWidgets.QTableWidgetItem()
item.setData(QtCore.Qt.EditRole, "Velocity[m/s]")
self.tableWidget_peakModelParameters.setItem(4, 0, item)
item = QtWidgets.QTableWidgetItem()
item.setData(QtCore.Qt.EditRole, float(velocity))
self.tableWidget_peakModelParameters.setItem(4, 1, item)
def applyPeakModel_and_export(self):
#On which files should the action be performed?
Files = []
if self.radioButton_exportAll.isChecked():
#Grab all items of comboBox_chooseRtdcFile
Files = [self.comboBox_chooseRtdcFile.itemText(i) for i in range(self.comboBox_chooseRtdcFile.count())]
else:
file = self.comboBox_chooseRtdcFile.currentText()
Files.append(str(file))
#Get the model from tableWidget_peakModelParameters
table = self.tableWidget_peakModelParameters
cols = table.columnCount()
header = range(cols)
rows = table.rowCount()
model_df = pd.DataFrame(columns=header,index=range(rows))
for i in range(rows):
for j in range(cols):
try:
model_df.iloc[i, j] = table.item(i, j).text()
except:
model_df.iloc[i, j] = np.nan
model = model_df.iloc[0,1]
if model == "Linear dependency and max in range":
range_us = float(model_df.iloc[1,1]) #[us]
intercept_us = float(model_df.iloc[2,1])
slope_us_um = float(model_df.iloc[3,1])
#velocity_m_s = float(model_df.iloc[4,1])
#Get a directory from the user!
folder = QtWidgets.QFileDialog.getExistingDirectory(self, 'Select directory', Default_dict["Path of last model"])
if len(folder)==0:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("Invalid directory")
msg.setWindowTitle("Invalid directory")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
for rtdc_path in Files:
path, rtdc_file = os.path.split(rtdc_path)
savename = os.path.join(folder,rtdc_file)
#Avoid to save to an existing file:
addon = 1
while os.path.isfile(savename):
savename = savename.split(".rtdc")[0]
if addon>1:
savename = savename.split("_"+str(addon-1))[0]
savename = savename+"_"+str(addon)+".rtdc"
addon += 1
print("Saving to : "+savename)
failed,rtdc_ds = aid_bin.load_rtdc(rtdc_path)
if failed:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Critical)
msg.setText(str(rtdc_ds))
msg.setWindowTitle("Error occurred during loading file")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
#Convert quantities to [index]
sample_rate = rtdc_ds.attrs["fluorescence:sample rate"]
range_ = (range_us*float(sample_rate))/1E6 #range was given in us->Divide by 1E6 to get to s and then multiply by the sample rate
# #check if an rtdc_ds is already chosen:
# if not hasattr(self,'rtdc_ds'):
# msg = QtWidgets.QMessageBox()
# msg.setIcon(QtWidgets.QMessageBox.Information)
# msg.setText("No measurement chosen yet. Use 'Update' button")
# msg.setWindowTitle("No measurement")
# msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
# msg.exec_()
# return
trace = rtdc_ds["events"]["trace"]
fl_keys = list(trace.keys()) #Which traces are available
fl1_max,fl1_pos,fl2_max,fl2_pos,fl3_max,fl3_pos,pos_x = [],[],[],[],[],[],[]
#Iterate over the available cells
pos_x = rtdc_ds["events"]["pos_x"] #is already given in [um]
indices = range(len(pos_x))
if model == "Linear dependency and max in range":
#Use the linear model to get the estimated location of the fluorescence peaks
fl_peak_position_us = intercept_us+slope_us_um*pos_x
#Convert to index
fl_peak_position_ = (fl_peak_position_us*float(sample_rate))/1E6
#Now we have the estimated peak position of each cell. Look at the traces on these spots
def ind_to_us(x):
return x*1E6/sample_rate
#iterate over the cells:
for cellindex in range(len(pos_x)):
#Iterate over the availble traces
for i in range(len(fl_keys)):
if "_median" in fl_keys[i]:
trace_flx = trace[fl_keys[i]][cellindex]
trace_pos = np.array(range(len(trace_flx)))
left = int(fl_peak_position_[cellindex]-range_/2.0)
right = int(fl_peak_position_[cellindex]+range_/2.0)
trace_flx_range = trace_flx[left:right]
trace_pos_range = trace_pos[left:right]
ind = np.argmax(trace_flx_range)
if "fl1_median" in fl_keys[i]:
fl1_max.append(trace_flx_range[ind])
fl1_pos.append(ind_to_us(trace_pos_range[ind]))
if "fl2_median" in fl_keys[i]:
fl2_max.append(trace_flx_range[ind])
fl2_pos.append(ind_to_us(trace_pos_range[ind]))
if "fl3_median" in fl_keys[i]:
fl3_max.append(trace_flx_range[ind])
fl3_pos.append(ind_to_us(trace_pos_range[ind]))
#Save those new fluorescence features into free spots in .rtdc file
#Those names can be found via dclab.dfn.feature_names called (userdef0...userdef9)
#TODO (dont use dclab anymore for saving)
#But just in case anyone uses that function?!
#get metadata of the dataset
meta = {}
# only export configuration meta data (no user-defined config)
for sec in dclab.definitions.CFG_METADATA:
if sec in ["fmt_tdms"]:
# ignored sections
continue
if sec in rtdc_ds.config:
meta[sec] = rtdc_ds.config[sec].copy()
#features = rtdc_ds._events.keys() #Get the names of the online features
compression = 'gzip'
nev = len(rtdc_ds)
#["Overwrite Fl_max and Fl_pos","Save to userdef"]
features = list(rtdc_ds["events"].keys())
if str(self.comboBox_toFlOrUserdef.currentText())=='Save to userdef':
features = features+["userdef"+str(i) for i in range(10)]
with dclab.rtdc_dataset.write_hdf5.write(path_or_h5file=savename,meta=meta, mode="append") as h5obj:
# write each feature individually
for feat in features:
# event-wise, because
# - tdms-based datasets don't allow indexing with numpy
# - there might be memory issues
if feat == "contour":
cont_list = [rtdc_ds["events"]["contour"][ii] for ii in indices]
dclab.rtdc_dataset.write_hdf5.write(h5obj,
data={"contour": cont_list},
mode="append",
compression=compression)
elif feat == "userdef0":
if "fl1_median" in fl_keys:
print("writing fl1_max to userdef0")
dclab.rtdc_dataset.write_hdf5.write(h5obj,
data={"userdef0": np.array(fl1_max)},
mode="append",
compression=compression)
elif feat == "userdef1":
if "fl2_median" in fl_keys:
print("writing fl2_max to userdef1")
dclab.rtdc_dataset.write_hdf5.write(h5obj,
data={"userdef1": np.array(fl2_max)},
mode="append",
compression=compression)
elif feat == "userdef2":
if "fl3_median" in fl_keys:
print("writing fl3_max to userdef2")
dclab.rtdc_dataset.write_hdf5.write(h5obj,
data={"userdef2": np.array(fl3_max)},
mode="append",
compression=compression)
elif feat == "userdef3":
if "fl1_pos" in features:
print("writing fl1_pos to userdef3")
dclab.rtdc_dataset.write_hdf5.write(h5obj,
data={"userdef3": np.array(fl1_pos)},
mode="append",
compression=compression)
elif feat == "userdef4":
if "fl2_pos" in features:
print("writing fl2_pos to userdef4")
dclab.rtdc_dataset.write_hdf5.write(h5obj,
data={"userdef4": np.array(fl2_pos)},
mode="append",
compression=compression)
elif feat == "userdef5":
if "fl3_pos" in features:
print("writing fl3_pos to userdef5")
dclab.rtdc_dataset.write_hdf5.write(h5obj,
data={"userdef5": np.array(fl3_pos)},
mode="append",
compression=compression)
elif feat in ["userdef"+str(i) for i in range(5,10)]:
pass
elif feat == "fl1_max":
if str(self.comboBox_toFlOrUserdef.currentText())=='Overwrite Fl_max and Fl_pos':
print("overwriting fl1_max")
dclab.rtdc_dataset.write_hdf5.write(h5obj,
data={feat: np.array(fl1_max)},mode="append",compression=compression)
elif feat == "fl2_max":
if str(self.comboBox_toFlOrUserdef.currentText())=='Overwrite Fl_max and Fl_pos':
print("overwriting fl2_max")
dclab.rtdc_dataset.write_hdf5.write(h5obj,
data={feat: np.array(fl2_max)},mode="append",compression=compression)
elif feat == "fl3_max":
if str(self.comboBox_toFlOrUserdef.currentText())=='Overwrite Fl_max and Fl_pos':
print("overwriting fl3_max")
dclab.rtdc_dataset.write_hdf5.write(h5obj,
data={feat: np.array(fl3_max)},mode="append",compression=compression)
elif feat == "fl1_pos":
if str(self.comboBox_toFlOrUserdef.currentText())=='Overwrite Fl_max and Fl_pos':
print("overwriting fl1_pos")
dclab.rtdc_dataset.write_hdf5.write(h5obj,
data={feat: np.array(fl1_pos)},mode="append",compression=compression)
elif feat == "fl2_pos":
if str(self.comboBox_toFlOrUserdef.currentText())=='Overwrite Fl_max and Fl_pos':
print("overwriting fl2_pos")
dclab.rtdc_dataset.write_hdf5.write(h5obj,
data={feat: np.array(fl2_pos)},mode="append",compression=compression)
elif feat == "fl3_pos":
if str(self.comboBox_toFlOrUserdef.currentText())=='Overwrite Fl_max and Fl_pos':
print("overwriting fl3_pos")
dclab.rtdc_dataset.write_hdf5.write(h5obj,
data={feat: np.array(fl3_pos)},mode="append",compression=compression)
elif feat == "index":
dclab.rtdc_dataset.write_hdf5.write(h5obj,
data={"index": np.array(indices)+1}, #ShapeOut likes to start with index=1
mode="append",
compression=compression)
elif feat in ["mask", "image"]:
# store image stacks (reduced file size and save time)
m = 64
if feat=='mask':
im0 = rtdc_ds["events"][feat][0]
if feat=="image":
im0 = rtdc_ds["events"][feat][0]
imstack = np.zeros((m, im0.shape[0], im0.shape[1]),
dtype=im0.dtype)
jj = 0
if feat=='mask':
image_list = [rtdc_ds["events"][feat][ii] for ii in indices]
elif feat=='image':
image_list = [rtdc_ds["events"][feat][ii] for ii in indices]
for ii in range(len(image_list)):
dat = image_list[ii]
#dat = rtdc_ds[feat][ii]
imstack[jj] = dat
if (jj + 1) % m == 0:
jj = 0
dclab.rtdc_dataset.write_hdf5.write(h5obj,
data={feat: imstack},
mode="append",
compression=compression)
else:
jj += 1
# write rest
if jj:
dclab.rtdc_dataset.write_hdf5.write(h5obj,
data={feat: imstack[:jj, :, :]},
mode="append",
compression=compression)
elif feat == "trace":
for tr in rtdc_ds["events"]["trace"].keys():
tr0 = rtdc_ds["events"]["trace"][tr][0]
trdat = np.zeros((nev, tr0.size), dtype=tr0.dtype)
jj = 0
trace_list = [rtdc_ds["events"]["trace"][tr][ii] for ii in indices]
for ii in range(len(trace_list)):
trdat[jj] = trace_list[ii]
jj += 1
dclab.rtdc_dataset.write_hdf5.write(h5obj,
data={"trace": {tr: trdat}},
mode="append",
compression=compression)
else:
dclab.rtdc_dataset.write_hdf5.write(h5obj,
data={feat: rtdc_ds["events"][feat][indices]},mode="append")
h5obj.close()
def partialtrainability_activated(self,on_or_off):
if on_or_off==False:#0 means switched OFF
self.lineEdit_partialTrainability.setText("")
#self.lineEdit_partialTrainability.setEnabled(False)#enables the lineEdit which shows the trainability status of each layer.
self.pushButton_partialTrainability.setEnabled(False)
#Also, remove the model from self!
self.model_keras = None
self.radioButton_NewModel.setChecked(False)
self.radioButton_LoadRestartModel.setChecked(False)
self.radioButton_LoadContinueModel.setChecked(False)
self.lineEdit_LoadModelPath.setText("")#put the filename in the lineedit
#this happens when the user activated the expert option "partial trainability"
elif on_or_off==True:#2 means switched ON
#Has the user already chosen a model?
if self.model_keras == None: #if there is no model yet chosen
self.action_initialize_model(duties="initialize")
#If there is still no model...
if self.model_keras == None:# or self.model_keras_path==None: #if there is no model yet chosen
#Tell the user to initiate a model first!
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("<html><head/><body><p>To use this option please first select and load a model. To do that choose/load a model in 'Define Model'-Tab and hit the button 'Initialize/Fit Model'. Choose to only initialize the model.</p></body></html>")
msg.setWindowTitle("Please load a model first")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
#Switch off
self.lineEdit_partialTrainability.setText("")
self.radioButton_NewModel.setChecked(False)
self.radioButton_LoadRestartModel.setChecked(False)
self.radioButton_LoadContinueModel.setChecked(False)
self.lineEdit_LoadModelPath.setText("")
#self.lineEdit_partialTrainability.setEnabled(False)#enables the lineEdit which shows the trainability status of each layer.
self.pushButton_partialTrainability.setEnabled(False)
self.checkBox_partialTrainability.setChecked(False)
return
#Otherwise, there is a model on self and we can continue :)
#Collections are not supported
if type(self.model_keras)==tuple:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("<html><head/><body><p>Partial trainability is not available for collections of models. Please specify a single model.</p></body></html>")
msg.setWindowTitle("Collections of models not supported for collections of models")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
#Switch on lineedit and the button
#self.lineEdit_partialTrainability.setEnabled(True)#enables the lineEdit which shows the trainability status of each layer.
self.pushButton_partialTrainability.setEnabled(True)#enables the lineEdit which shows the trainability status of each layer.
#Load trainability states of the model
Layer_types = [self.model_keras.layers[i].__class__.__name__ for i in range(len(self.model_keras.layers))]
#Count Dense and Conv layers
is_dense_or_conv = [layer_type in ["Dense","Conv2D"] for layer_type in Layer_types]
index = np.where(np.array(is_dense_or_conv)==True)[0]
Layer_train_status = [self.model_keras.layers[layerindex].trainable for layerindex in index]
self.lineEdit_partialTrainability.setText(str(Layer_train_status))#enables the lineEdit which shows the trainability status of each layer.
def partialTrainability(self):
self.popup_trainability = MyPopup()
self.popup_trainability_ui = aid_frontend.popup_trainability()
self.popup_trainability_ui.setupUi(self.popup_trainability) #open a popup to show the layers in a table
#One can only activate this function when there was a model loaded already!
#self.model_keras has to exist!!!
if self.model_keras == None: #if there is no model yet chosen
self.action_initialize_model(duties="initialize")
if self.model_keras == None: #if there is still no model...
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("<html><head/><body><p>To use this option please first select and load a model. To do that choose/load a model in 'Define Model'-Tab and hit the button 'Initialize/Fit Model'. Choose to only initialize the model.</p></body></html>")
msg.setWindowTitle("Please load a model first")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
#Switch this On in the final version
self.lineEdit_partialTrainability.setText("")
self.lineEdit_partialTrainability.setEnabled(False)#enables the lineEdit which shows the trainability status of each layer.
self.pushButton_partialTrainability.setEnabled(False)
return
#Fill information about the model
if self.radioButton_NewModel.isChecked():#a new model is loaded
self.popup_trainability_ui.lineEdit_pop_pTr_modelPath.setText("New model")
elif self.radioButton_LoadRestartModel.isChecked():#a new model is loaded
load_model_path = str(self.lineEdit_LoadModelPath.text())
self.popup_trainability_ui.lineEdit_pop_pTr_modelPath.setText("Restart model: "+load_model_path)
elif self.radioButton_LoadContinueModel.isChecked():#a new model is loaded
load_model_path = str(self.lineEdit_LoadModelPath.text())
self.popup_trainability_ui.lineEdit_pop_pTr_modelPath.setText("Continue model: "+load_model_path)
in_dim = self.model_keras.input_shape
#Retrieve the color_mode from the model (nr. of channels in last in_dim)
channels = in_dim[-1] #TensorFlow: channels in last dimension
out_dim = self.model_keras.output_shape[-1]
self.popup_trainability_ui.spinBox_pop_pTr_inpSize.setValue(int(in_dim[1]))
self.popup_trainability_ui.spinBox_pop_pTr_outpSize.setValue(int(out_dim))
if channels==1:
self.popup_trainability_ui.comboBox_pop_pTr_colorMode.addItem("Grayscale")
elif channels==3:
self.popup_trainability_ui.comboBox_pop_pTr_colorMode.addItem("RGB")
#Model summary to textBrowser_pop_pTr_modelSummary
summary = []
self.model_keras.summary(print_fn=summary.append)
summary = "\n".join(summary)
self.popup_trainability_ui.textBrowser_pop_pTr_modelSummary.setText(summary)
#Work on the tableWidget_pop_pTr_layersTable
Layer_types = [self.model_keras.layers[i].__class__.__name__ for i in range(len(self.model_keras.layers))]
#Count Dense and Conv layers
is_dense_or_conv = [layer_type in ["Dense","Conv2D"] for layer_type in Layer_types]
index = np.where(np.array(is_dense_or_conv)==True)[0]
nr_layers = len(index) #total nr. of dense and conv layers with parameters
for rowNumber in range(nr_layers):
layerindex = index[rowNumber]
columnPosition = 0
layer = self.model_keras.layers[layerindex]
rowPosition = self.popup_trainability_ui.tableWidget_pop_pTr_layersTable.rowCount()
self.popup_trainability_ui.tableWidget_pop_pTr_layersTable.insertRow(rowPosition)
Name = layer.name
item = QtWidgets.QTableWidgetItem(Name)
item.setFlags( QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable )
item.setTextAlignment(QtCore.Qt.AlignCenter) # change the alignment
self.popup_trainability_ui.tableWidget_pop_pTr_layersTable.setItem(rowPosition , columnPosition, item ) #
columnPosition = 1
layer_type = layer.__class__.__name__
item = QtWidgets.QTableWidgetItem(layer_type)
item.setFlags( QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable )
item.setTextAlignment(QtCore.Qt.AlignCenter) # change the alignment
self.popup_trainability_ui.tableWidget_pop_pTr_layersTable.setItem(rowPosition , columnPosition, item ) #
columnPosition = 2
Params = layer.count_params()
item = QtWidgets.QTableWidgetItem()
item.setData(QtCore.Qt.DisplayRole, Params)
item.setFlags(item.flags() &~QtCore.Qt.ItemIsEnabled &~ QtCore.Qt.ItemIsSelectable )
self.popup_trainability_ui.tableWidget_pop_pTr_layersTable.setItem(rowPosition, columnPosition, item)
columnPosition = 3
if layer_type == "Dense":
split_property = "units" #'units' are the number of nodes in dense layers
elif layer_type == "Conv2D":
split_property = "filters"
else:
print("other splitprop!")
return
layer_config = layer.get_config()
nr_units = layer_config[split_property] #units are either nodes or filters for dense and convolutional layer, respectively
item = QtWidgets.QTableWidgetItem()
item.setData(QtCore.Qt.DisplayRole, int(nr_units))
item.setFlags(item.flags() &~QtCore.Qt.ItemIsEnabled &~ QtCore.Qt.ItemIsSelectable )
self.popup_trainability_ui.tableWidget_pop_pTr_layersTable.setItem(rowPosition, columnPosition, item)
columnPosition = 4
#for each item create a spinbopx (trainability)
spinb = QtWidgets.QDoubleSpinBox(self.popup_trainability_ui.tableWidget_pop_pTr_layersTable)
spinb.setMinimum(0)
spinb.setMaximum(1)
spinb.setSingleStep(0.1)
trainability = int(layer.trainable) #.trainable actually returns True or False. Make it integer
spinb.setValue(trainability) #this should be always 1
self.popup_trainability_ui.tableWidget_pop_pTr_layersTable.setCellWidget(rowPosition, columnPosition, spinb)
self.popup_trainability.show()
#self.popup_trainability_ui.pushButton_pop_pTr_reset.clicked.connect(self.pop_pTr_reset)
self.popup_trainability_ui.pushButton_pop_pTr_update.clicked.connect(self.pop_pTr_update_2)
self.popup_trainability_ui.pushButton_pop_pTr_ok.clicked.connect(self.pop_pTr_ok)
###############Functions for the partial trainability popup################
def pop_pTr_reset(self):
#Reset the model to initial state, with partial trainability
print("Not implemented yet")
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("<html><head/><body><p>Not implemented yet.</p></body></html>")
msg.setWindowTitle("Not implemented")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
def pop_pTr_update_1(self):#main worker function
#Apply the requested changes and display updated model in table
pTr_table = self.popup_trainability_ui.tableWidget_pop_pTr_layersTable
#Read the table:
Layer_names,Layer_trainabilities = [],[]
rowCount = pTr_table.rowCount()
for row in range(rowCount):
#Layer_indices.append(str(pTr_table.item(row, 0).text()))
Layer_names.append(str(pTr_table.item(row, 0).text()))
Layer_trainabilities.append(float(pTr_table.cellWidget(row, 4).value()))
Layer_trainabilities = np.array(Layer_trainabilities)
#What are the current trainability statuses of the model
Layer_trainabilities_orig = np.array([self.model_keras.get_layer(l_name).trainable for l_name in Layer_names])
diff = abs( Layer_trainabilities - Layer_trainabilities_orig )
ind = np.where( diff>0 )[0]
#Where do we have a trainability between 0 and 1
#ind = np.where( (Layer_trainabilities>0) & (Layer_trainabilities<1) )[0]
if len(ind)>0:
Layer_trainabilities = list(Layer_trainabilities[ind])
Layer_names = list(np.array(Layer_names)[ind])
#Update the model using user-specified trainabilities
self.model_keras = partial_trainability(self.model_keras,Layer_names,Layer_trainabilities)
#Update lineEdit_partialTrainability
Layer_types = [self.model_keras.layers[i].__class__.__name__ for i in range(len(self.model_keras.layers))]
#Count Dense and Conv layers
is_dense_or_conv = [layer_type in ["Dense","Conv2D"] for layer_type in Layer_types]
index = np.where(np.array(is_dense_or_conv)==True)[0]
Layer_train_status = [self.model_keras.layers[layerindex].trainable for layerindex in index]
self.lineEdit_partialTrainability.setText(str(Layer_train_status))#enables the lineEdit which shows the trainability status of each layer.
else:
print("Nothing to do. All trainabilities are either 0 or 1")
def pop_pTr_update_2(self):#call pop_pTr_update_1 to do the work and then update the window
try:
self.pop_pTr_update_1()#Change the model on self.model_keras according to the table
self.partialTrainability()#Update the popup window by calling the partialTrainability function
except Exception as e:
#There is an issue building the model!
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Critical)
msg.setText(str(e))
msg.setWindowTitle("Error occured when building model:")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
def pop_pTr_ok(self):
self.pop_pTr_update_1()#Change the model on self.model_keras according to the table; If 'Update' was used before, there will not be done work again, but the model is used as it is
#To make the model accessible, it has to be saved to a new .model file
filename = QtWidgets.QFileDialog.getSaveFileName(self, 'Save model', Default_dict["Path of last model"],"AIDeveloper model file (*.model)")
filename = filename[0]
path, fname = os.path.split(filename)
if len(fname)==0:
return
#add the suffix _session.xlsx
if not fname.endswith(".model"):
fname = fname +".model"
filename = os.path.join(path,fname)
self.model_keras.save(filename)
#Activate 'load and restart' and put this file
#Avoid the automatic popup
self.radioButton_NewModel.setChecked(False)
self.radioButton_LoadRestartModel.setChecked(False)
self.radioButton_LoadContinueModel.setChecked(True)
self.lineEdit_LoadModelPath.setText(filename)#put the filename in the lineedit
#Destroy the window
self.popup_trainability = None
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText(tooltips["modelsaved_success"])
msg.setWindowTitle("Sucessfully created and selected model")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
def lossW_comboB(self,state_nr,listindex):
if listindex==-1:
ui_item = self.popup_lossW_ui
else:
ui_item = self.fittingpopups_ui[listindex].popup_lossW_ui
state_str = ui_item.comboBox_lossW.itemText(int(state_nr))
rows_nr = int(ui_item.tableWidget_lossW.rowCount())
if rows_nr==0:
state_str = "None"
if state_str=="None":
for rowPos in range(rows_nr):
colPos = 4 #"Loss weights"
ui_item.tableWidget_lossW.cellWidget(rowPos,colPos).setEnabled(False)
ui_item.tableWidget_lossW.cellWidget(rowPos,colPos).setValue(1.0)
elif state_str=="Custom":
for rowPos in range(rows_nr):
colPos = 4 #"Loss weights"
ui_item.tableWidget_lossW.cellWidget(rowPos,colPos).setEnabled(True)
elif state_str=="Balanced":
#How many cells in total per epoch
events_epoch = [int(ui_item.tableWidget_lossW.item(rowPos,2).text()) for rowPos in range(rows_nr)]
classes = [int(ui_item.tableWidget_lossW.item(rowPos,0).text()) for rowPos in range(rows_nr)]
counter = {}
for i in range(len(classes)):
counter[classes[i]]=events_epoch[i]
max_val = float(max(counter.values()))
class_weights = {class_id : max_val/num_images for class_id, num_images in counter.items()}
class_weights = list(class_weights.values())
for rowPos in range(rows_nr):
colPos = 4 #"Loss weights"
ui_item.tableWidget_lossW.cellWidget(rowPos,colPos).setEnabled(False)
ui_item.tableWidget_lossW.cellWidget(rowPos,colPos).setValue(class_weights[rowPos])
def lossW_ok(self,listindex):
#This happens when the user presses the OK button on the popup for
#custom loss weights
if listindex==-1:
ui_item = self
else:
ui_item = self.fittingpopups_ui[listindex]
#Which option was used on comboBox_lossW?
state_str = ui_item.popup_lossW_ui.comboBox_lossW.currentText()
if state_str=="None":#User left None. This actually means its off
ui_item.lineEdit_lossW.setText("")
ui_item.pushButton_lossW.setEnabled(False)
ui_item.checkBox_lossW.setChecked(False)
elif state_str=="Custom":#User left None. This actually means its off
#There are custom values
#Read the loss values on the table
rows_nr = int(ui_item.popup_lossW_ui.tableWidget_lossW.rowCount())
classes = [int(ui_item.popup_lossW_ui.tableWidget_lossW.item(rowPos,0).text()) for rowPos in range(rows_nr)]
loss_weights = [float(ui_item.popup_lossW_ui.tableWidget_lossW.cellWidget(rowPos,4).value()) for rowPos in range(rows_nr)]
counter = {}
for i in range(len(classes)):
counter[classes[i]]=loss_weights[i]
#Put counter (its a dictionary) to lineedit
ui_item.lineEdit_lossW.setText(str(counter))
elif state_str=="Balanced":#Balanced, the values are computed later fresh, even when user changes the cell-numbers again
ui_item.lineEdit_lossW.setText("Balanced")
#Destroy the window
ui_item.popup_lossW = None
def lossW_cancel(self,listindex):
#This happens when the user presses the Cancel button on the popup for
#custom loss weights
if listindex==-1:
ui_item = self
else:
ui_item = self.fittingpopups_ui[listindex]
if ui_item.lineEdit_lossW.text()=="":
#if state_str=="None":#User left None. This actually means its off
ui_item.lineEdit_lossW.setText("")
ui_item.pushButton_lossW.setEnabled(False)
ui_item.checkBox_lossW.setChecked(False)
ui_item.popup_lossW = None
return
#Destroy the window
ui_item.popup_lossW = None
def get_norm_from_manualselection(self):
norm = self.comboBox_w.currentText()
index = self.comboBox_Normalization.findText(norm, QtCore.Qt.MatchFixedString)
if index >= 0:
self.comboBox_Normalization.setCurrentIndex(index)
self.w.close()
def popup_normalization(self):
self.w = MyPopup()
self.gridLayout_w = QtWidgets.QGridLayout(self.w)
self.gridLayout_w.setObjectName(_fromUtf8("gridLayout"))
self.verticalLayout_w = QtWidgets.QVBoxLayout()
self.verticalLayout_w.setObjectName(_fromUtf8("verticalLayout"))
self.label_w = QtWidgets.QLabel(self.w)
self.label_w.setAlignment(QtCore.Qt.AlignCenter)
self.label_w.setObjectName(_fromUtf8("label_w"))
self.verticalLayout_w.addWidget(self.label_w)
self.horizontalLayout_2_w = QtWidgets.QHBoxLayout()
self.horizontalLayout_2_w.setObjectName(_fromUtf8("horizontalLayout_2"))
self.pushButton_w = QtWidgets.QPushButton(self.w)
self.pushButton_w.setObjectName(_fromUtf8("pushButton"))
self.horizontalLayout_2_w.addWidget(self.pushButton_w)
self.horizontalLayout_w = QtWidgets.QHBoxLayout()
self.horizontalLayout_w.setObjectName(_fromUtf8("horizontalLayout"))
self.label_2_w = QtWidgets.QLabel(self.w)
self.label_2_w.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label_2_w.setObjectName(_fromUtf8("label_2_w"))
self.horizontalLayout_w.addWidget(self.label_2_w)
self.comboBox_w = QtWidgets.QComboBox(self.w)
self.comboBox_w.setObjectName(_fromUtf8("comboBox"))
self.comboBox_w.addItems(["Select"]+self.norm_methods)
self.comboBox_w.setMinimumSize(QtCore.QSize(200,22))
self.comboBox_w.setMaximumSize(QtCore.QSize(200, 22))
width=self.comboBox_w.fontMetrics().boundingRect(max(self.norm_methods, key=len)).width()
self.comboBox_w.view().setFixedWidth(width+10)
self.comboBox_w.currentIndexChanged.connect(self.get_norm_from_manualselection)
self.horizontalLayout_w.addWidget(self.comboBox_w)
self.horizontalLayout_2_w.addLayout(self.horizontalLayout_w)
self.verticalLayout_w.addLayout(self.horizontalLayout_2_w)
self.gridLayout_w.addLayout(self.verticalLayout_w, 0, 0, 1, 1)
self.w.setWindowTitle("Select normalization method")
self.label_w.setText("You are about to continue training a pretrained model\n"
"Please select the meta file of that model to load the normalization method\n"
"or choose the normalization method manually")
self.pushButton_w.setText("Load meta file")
self.label_2_w.setText("Manual \n"
"selection")
#one button that allows to load a meta file containing the norm-method
self.pushButton_w.clicked.connect(self.get_norm_from_modelparafile)
self.w.show()
def action_preview_model(self,enabled):#function runs when radioButton_LoadRestartModel or radioButton_LoadContinueModel was clicked
if enabled:
#if the "Load and restart" radiobutton was clicked:
if self.radioButton_LoadRestartModel.isChecked():
modelname = QtWidgets.QFileDialog.getOpenFileName(self, 'Open model architecture', Default_dict["Path of last model"],"Architecture or model (*.arch *.model)")
modelname = modelname[0]
#modelname_for_dict = modelname
#if the "Load and continue" radiobutton was clicked:
elif self.radioButton_LoadContinueModel.isChecked():
modelname = QtWidgets.QFileDialog.getOpenFileName(self, 'Open model with all parameters', Default_dict["Path of last model"],"Keras model (*.model)")
modelname = modelname[0]
#modelname_for_dict = modelname
self.lineEdit_LoadModelPath.setText(modelname) #Put the filename to the line edit
#Remember the location for next time
if len(str(modelname))>0:
Default_dict["Path of last model"] = os.path.split(modelname)[0]
aid_bin.save_aid_settings(Default_dict)
#If user wants to load and restart a model
if self.radioButton_LoadRestartModel.isChecked():
#load the model and print summary
if modelname.endswith(".arch"):
json_file = open(modelname, 'r')
model_config = json_file.read()
json_file.close()
model_config = json.loads(model_config)
#cut the .json off
modelname = modelname.split(".arch")[0]
#Or a .model (FULL model with trained weights) , but for display only load the architecture
elif modelname.endswith(".model"):
#Load the model config (this is the architecture)
model_full_h5 = h5py.File(modelname, 'r')
model_config = model_full_h5.attrs['model_config']
model_full_h5.close() #close the hdf5
model_config = json.loads(str(model_config)[2:-1])
#model = model_from_config(model_config)
modelname = modelname.split(".model")[0]
else:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("No valid file was chosen. Please specify a file that was created using AIDeveloper or Keras")
msg.setWindowTitle("No valid file was chosen")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
#raise ValueError("No valid file was chosen")
text1 = "Architecture: loaded from .arch\nWeights: will be randomly initialized'\n"
#Try to find the corresponding .meta
#All models have a number:
metaname = modelname.rsplit('_',1)[0]+"_meta.xlsx"
if os.path.isfile(metaname):
#open the metafile
meta = pd.read_excel(metaname,sheet_name="Parameters")
if "Chosen Model" in list(meta.keys()):
chosen_model = meta["Chosen Model"].iloc[-1]
else:
#Try to get the model architecture and adjust the combobox
try:
ismlp,chosen_model = model_zoo.mlpconfig_to_str(model_config)
except:#No model could be identified
chosen_model = "None"
else:
#Try to get the model architecture and adjust the combobox
try:
ismlp,chosen_model = model_zoo.mlpconfig_to_str(model_config)
except:#No model could be identified
chosen_model = "None"
if chosen_model is not None:
#chosen_model is a string that should be contained in comboBox_ModelSelection
index = self.comboBox_ModelSelection.findText(chosen_model, QtCore.Qt.MatchFixedString)
if index >= 0:
self.comboBox_ModelSelection.setCurrentIndex(index)
else:
index = self.comboBox_ModelSelection.findText('None', QtCore.Qt.MatchFixedString)
if index >= 0:
self.comboBox_ModelSelection.setCurrentIndex(index)
#Otherwise, user wants to load and continue training a model
elif self.radioButton_LoadContinueModel.isChecked():
#User can only choose a .model (FULL model with trained weights) , but for display only load the architecture
if modelname.endswith(".model"):
#Load the model config (this is the architecture)
model_full_h5 = h5py.File(modelname, 'r')
model_config = model_full_h5.attrs['model_config']
model_full_h5.close() #close the hdf5
model_config = json.loads(str(model_config)[2:-1])
#model = model_from_config(model_config)
modelname = modelname.split(".model")[0]
#Try to find the corresponding .meta
#All models have a number:
metaname = modelname.rsplit('_',1)[0]+"_meta.xlsx"
if os.path.isfile(metaname):
#open the metafile
meta = pd.read_excel(metaname,sheet_name="Parameters")
if "Chosen Model" in list(meta.keys()):
chosen_model = meta["Chosen Model"].iloc[-1]
else:
#Try to get the model architecture and adjust the combobox
try:
ismlp,chosen_model = model_zoo.mlpconfig_to_str(model_config)
except:#No model could be identified
chosen_model = "None"
else:
#Try to get the model architecture and adjust the combobox
try:
ismlp,chosen_model = model_zoo.mlpconfig_to_str(model_config)
except:#No model could be identified
chosen_model = "None"
if chosen_model is not None:
#chosen_model is a string that should be contained in comboBox_ModelSelection
index = self.comboBox_ModelSelection.findText(chosen_model, QtCore.Qt.MatchFixedString)
if index >= 0:
self.comboBox_ModelSelection.setCurrentIndex(index)
else:
index = self.comboBox_ModelSelection.findText('None', QtCore.Qt.MatchFixedString)
if index >= 0:
self.comboBox_ModelSelection.setCurrentIndex(index)
text1 = "Architecture: loaded from .model\nWeights: pretrained weights will be loaded and used when hitting button 'Initialize model!'\n"
else:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("No valid file was chosen. Please specify a file that was created using AIDeveloper or Keras")
msg.setWindowTitle("No valid file was chosen")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
#raise ValueError("No valid file was chosen")
#In both cases (restart or continue) the input dimensions have to fit
#The number of output classes should also fit but this is not essential
#but most users certainly want the same number of classes (output)->Give Info
try: #Sequential Model
in_dim = model_config['config'][0]['config']['batch_input_shape']
except: #Functional Api
in_dim = model_config['config']["layers"][0]["config"]["batch_input_shape"]
try: #Sequential Model
out_dim = model_config['config'][-2]['config']['units']
except: #Functional Api
out_dim = model_config['config']["layers"][-2]["config"]["units"]
#
# in_dim = model_config['config'][0]['config']['batch_input_shape']
# out_dim = model_config['config'][-2]['config']['units']
#Retrieve the color_mode from the model (nr. of channels in last in_dim)
channels = in_dim[-1] #TensorFlow: channels in last dimension
if channels==1:
channel_text = "1 channel (Grayscale)"
if self.get_color_mode()!="Grayscale":
#when model needs Grayscale, set the color mode in comboBox_GrayOrRGB to that
index = self.comboBox_GrayOrRGB.findText("Grayscale", QtCore.Qt.MatchFixedString)
if index >= 0:
self.comboBox_GrayOrRGB.setCurrentIndex(index)
self.statusbar.showMessage("Color Mode set to Grayscale",5000)
elif channels==3:
channel_text = "3 channels (RGB)"
if self.get_color_mode()!="RGB":
#when model needs RGB, set the color mode in the ui to that
index = self.comboBox_GrayOrRGB.findText("RGB", QtCore.Qt.MatchFixedString)
if index >= 0:
self.comboBox_GrayOrRGB.setCurrentIndex(index)
self.statusbar.showMessage("Color Mode set to RGB",5000)
text2 = "Model Input: loaded Model takes: "+str(in_dim[-3])+" x "+str(in_dim[-2]) + " pixel images and "+channel_text+"\n"
if int(self.spinBox_imagecrop.value())!=int(in_dim[-2]):
self.spinBox_imagecrop.setValue(in_dim[-2])
text2 = text2+ "'Input image size' in GUI was changed accordingly\n"
#check that the nr. of classes are equal to the model out put
SelectedFiles = self.items_clicked_no_rtdc_ds()
indices = [s["class"] for s in SelectedFiles]
nr_classes = np.max(indices)+1
if int(nr_classes)==int(out_dim):
text3 = "Output: "+str(out_dim)+" classes\n"
elif int(nr_classes)>int(out_dim):#Dataset has more classes than the model provides!
text3 = "Loaded model has only "+(str(out_dim))+\
" output nodes (classes) but your selected data has "+str(nr_classes)+\
" classes. Therefore, the model will be adjusted before fitting, by customizing the final Dense layer.\n"
#aid_dl.model_add_classes(model_keras,nr_classes)#this function changes model_keras inplace
elif int(nr_classes)<int(out_dim):#Dataset has less classes than the model provides!
text3 = "Model output: The architecture you chose has "+(str(out_dim))+\
" output nodes (classes) and your selected data has only "+str(nr_classes)+\
" classes. This is fine. The model will essentially have some excess classes that are not used.\n"
text = text1+text2+text3
self.textBrowser_Info.setText(text)
if self.radioButton_LoadContinueModel.isChecked():
#"Load the parameter file of the model that should be continued and apply the same normalization"
#Make a popup: You are about to continue to train a pretrained model
#Please select the parameter file of that model to load the normalization method
#or choose the normalization method manually:
#this is important
self.popup_normalization()
def get_metrics(self,nr_classes):
Metrics = []
f1 = bool(self.checkBox_expertF1.isChecked())
if f1==True:
Metrics.append("f1_score")
precision = bool(self.checkBox_expertPrecision.isChecked())
if precision==True:
Metrics.append("precision")
recall = bool(self.checkBox_expertRecall.isChecked())
if recall==True:
Metrics.append("recall")
metrics = ['accuracy'] + Metrics
metrics = aid_dl.get_metrics_tensors(metrics,nr_classes)
return metrics
def action_set_modelpath_and_name(self):
#Get the path and filename for the new model
filename = QtWidgets.QFileDialog.getSaveFileName(self, 'Save model', Default_dict["Path of last model"],"Keras Model file (*.model)")
filename = filename[0]
if len(filename)==0:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("No valid filename was chosen.")
msg.setWindowTitle("No valid filename was chosen")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
if filename.endswith(".arch"):
filename = filename.split(".arch")[0]
#add the suffix .model
if not filename.endswith(".model"):
filename = filename +".model"
self.lineEdit_modelname.setText(filename)
#Write to Default_dict
Default_dict["Path of last model"] = os.path.split(filename)[0]
aid_bin.save_aid_settings(Default_dict)
def get_dataOverview(self):
table = self.tableWidget_Info
cols = table.columnCount()
header = [table.horizontalHeaderItem(col).text() for col in range(cols)]
rows = table.rowCount()
tmp_df = pd.DataFrame(columns=header,index=range(rows))
for i in range(rows):
for j in range(cols):
try:
tmp_df.iloc[i, j] = table.item(i, j).text()
except:
tmp_df.iloc[i, j] = np.nan
return tmp_df
def action_initialize_model(self,duties="initialize_train"):
"""
duties: which tasks should be performed: "initialize", "initialize_train", "initialize_lrfind"
"""
#print("duties: "+str(duties))
#Create config (define which device to use)
if self.radioButton_cpu.isChecked():
deviceSelected = str(self.comboBox_cpu.currentText())
elif self.radioButton_gpu.isChecked():
deviceSelected = str(self.comboBox_gpu.currentText())
gpu_memory = float(self.doubleSpinBox_memory.value())
config_gpu = aid_dl.get_config(cpu_nr,gpu_nr,deviceSelected,gpu_memory)
# try:
# K.clear_session()
# except:
# print("Could not clear_session (7)")
with tf.Session(graph = tf.Graph(), config=config_gpu) as sess:
#Initialize the model
#######################Load and restart model##########################
if self.radioButton_LoadRestartModel.isChecked():
load_modelname = str(self.lineEdit_LoadModelPath.text())
text0 = "Loaded model: "+load_modelname
#load the model and print summary
if load_modelname.endswith(".arch"):
json_file = open(load_modelname, 'r')
model_config = json_file.read()
json_file.close()
model_keras = model_from_json(model_config)
model_config = json.loads(model_config)
text1 = "\nArchitecture: loaded from .arch\nWeights: randomly initialized\n"
#Or a .model (FULL model with trained weights) , but for display only load the architecture
elif load_modelname.endswith(".model"):
#Load the model config (this is the architecture)
model_full_h5 = h5py.File(load_modelname, 'r')
model_config = model_full_h5.attrs['model_config']
model_full_h5.close() #close the hdf5
model_config = json.loads(str(model_config)[2:-1])
model_keras = model_from_config(model_config)
text1 = "\nArchitecture: loaded from .model\nWeights: randomly initialized\n"
else:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("No valid file was chosen. Please specify a file that was created using AIDeveloper or Keras")
msg.setWindowTitle("No valid file was chosen")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
try:
metaname = load_modelname.rsplit('_',1)[0]+"_meta.xlsx"
if os.path.isfile(metaname):
#open the metafile
meta = pd.read_excel(metaname,sheet_name="Parameters")
if "Chosen Model" in list(meta.keys()):
chosen_model = meta["Chosen Model"].iloc[-1]
except:
chosen_model = str(self.comboBox_ModelSelection.currentText())
#In both cases (restart or continue) the input dimensions have to fit
#The number of output classes should also fit but this is not essential
#but most users certainly want the same number of classes (output)->Give Info
try: #Sequential Model
in_dim = model_config['config'][0]['config']['batch_input_shape']
except: #Functional Api
in_dim = model_config['config']["layers"][0]["config"]["batch_input_shape"]
try: #Sequential Model
out_dim = model_config['config'][-2]['config']['units']
except: #Functional Api
out_dim = model_config['config']["layers"][-2]["config"]["units"]
# in_dim = model_config['config'][0]['config']['batch_input_shape']
# out_dim = model_config['config'][-2]['config']['units']
channels = in_dim[-1] #TensorFlow: channels in last dimension
#Compile model (consider user-specific metrics)
model_metrics = self.get_metrics(out_dim)
model_keras.compile(loss='categorical_crossentropy',optimizer='adam',metrics=model_metrics)#dont specify loss and optimizer yet...expert stuff will follow and model will be recompiled
if channels==1:
channel_text = "1 channel (Grayscale)"
if self.get_color_mode()!="Grayscale":
#when model needs Grayscale, set the color mode in comboBox_GrayOrRGB to that
index = self.comboBox_GrayOrRGB.findText("Grayscale", QtCore.Qt.MatchFixedString)
if index >= 0:
self.comboBox_GrayOrRGB.setCurrentIndex(index)
self.statusbar.showMessage("Color Mode set to Grayscale",5000)
elif channels==3:
channel_text = "3 channels (RGB)"
if self.get_color_mode()!="RGB":
#when model needs RGB, set the color mode in the ui to that
index = self.comboBox_GrayOrRGB.findText("RGB", QtCore.Qt.MatchFixedString)
if index >= 0:
self.comboBox_GrayOrRGB.setCurrentIndex(index)
self.statusbar.showMessage("Color Mode set to RGB",5000)
text2 = "Model Input: "+str(in_dim[-3])+" x "+str(in_dim[-2]) + " pixel images and "+channel_text+"\n"
if int(self.spinBox_imagecrop.value())!=int(in_dim[-2]):
self.spinBox_imagecrop.setValue(in_dim[-2])
text2 = text2+ "'Input image size' in GUI was changed accordingly\n"
#check that the nr. of classes are equal to the model out put
SelectedFiles = self.items_clicked()
indices = [s["class"] for s in SelectedFiles]
nr_classes = np.max(indices)+1
if int(nr_classes)==int(out_dim):
text3 = "Output: "+str(out_dim)+" classes\n"
elif int(nr_classes)>int(out_dim):#Dataset has more classes than the model provides!
text3 = "Loaded model has only "+(str(out_dim))+\
" output nodes (classes) but your selected data has "+str(nr_classes)+\
" classes. Therefore, the model will be adjusted before fitting, by customizing the final Dense layer.\n"
aid_dl.model_add_classes(model_keras,nr_classes)#this function changes model_keras inplace
elif int(nr_classes)<int(out_dim):#Dataset has less classes than the model provides!
text3 = "Model output: The architecture you chose has "+(str(out_dim))+\
" output nodes (classes) and your selected data has only "+str(nr_classes)+\
" classes. This is fine. The model will essentially have some excess classes that are not used.\n"
###############Load and continue training the model####################
elif self.radioButton_LoadContinueModel.isChecked():
load_modelname = str(self.lineEdit_LoadModelPath.text())
text0 = "Loaded model: "+load_modelname+"\n"
#User can only choose a .model (FULL model with trained weights) , but for display only load the architecture
if load_modelname.endswith(".model"):
#Load the full model
try:
model_keras = load_model(load_modelname,custom_objects=aid_dl.get_custom_metrics())
except:
K.clear_session() #On linux It happened that there was an error, if another fitting run before
model_keras = load_model(load_modelname,custom_objects=aid_dl.get_custom_metrics())
#model_config = model_keras.config() #Load the model config (this is the architecture)
#load_modelname = load_modelname.split(".model")[0]
text1 = "Architecture: loaded from .model\nWeights: pretrained weights were loaded\n"
else:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("No valid file was chosen. Please specify a file that was created using AIDeveloper or Keras")
msg.setWindowTitle("No valid file was chosen")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
#raise ValueError("No valid file was chosen")
try:
metaname = load_modelname.rsplit('_',1)[0]+"_meta.xlsx"
if os.path.isfile(metaname):
#open the metafile
meta = pd.read_excel(metaname,sheet_name="Parameters")
if "Chosen Model" in list(meta.keys()):
chosen_model = meta["Chosen Model"].iloc[-1]
else:
chosen_model = str(self.comboBox_ModelSelection.currentText())
except:
chosen_model = str(self.comboBox_ModelSelection.currentText())
#Check input dimensions
#The number of output classes should also fit but this is not essential
#but most users certainly want the same number of classes (output)->Give Info
# in_dim = model_config['config'][0]['config']['batch_input_shape']
# out_dim = model_config['config'][-2]['config']['units']
in_dim = model_keras.get_input_shape_at(0)
out_dim = model_keras.get_output_shape_at(0)[1]
channels = in_dim[-1] #TensorFlow: channels in last dimension
if channels==1:
channel_text = "1 channel (Grayscale)"
if self.get_color_mode()!="Grayscale":
#when model needs Grayscale, set the color mode in comboBox_GrayOrRGB to that
index = self.comboBox_GrayOrRGB.findText("Grayscale", QtCore.Qt.MatchFixedString)
if index >= 0:
self.comboBox_GrayOrRGB.setCurrentIndex(index)
self.statusbar.showMessage("Color Mode set to Grayscale",5000)
elif channels==3:
channel_text = "3 channels (RGB)"
if self.get_color_mode()!="RGB":
#when model needs RGB, set the color mode in the ui to that
index = self.comboBox_GrayOrRGB.findText("RGB", QtCore.Qt.MatchFixedString)
if index >= 0:
self.comboBox_GrayOrRGB.setCurrentIndex(index)
self.statusbar.showMessage("Color Mode set to RGB",5000)
text2 = "Model Input: "+str(in_dim[-3])+" x "+str(in_dim[-2]) + " pixel images and "+channel_text+"\n"
if int(self.spinBox_imagecrop.value())!=int(in_dim[-2]):
self.spinBox_imagecrop.setValue(in_dim[-2])
text2 = text2+ "'Input image size' in GUI was changed accordingly\n"
#check that the nr. of classes are equal to the model out put
SelectedFiles = self.items_clicked()
indices = [s["class"] for s in SelectedFiles]
nr_classes = np.max(indices)+1
if int(nr_classes)==int(out_dim):
text3 = "Output: "+str(out_dim)+" classes\n"
elif int(nr_classes)>int(out_dim):#Dataset has more classes than the model provides!
text3 = "Loaded model has only "+(str(out_dim))+\
" output nodes (classes) but your selected data has "+str(nr_classes)+\
" classes. Therefore, the model will be adjusted before fitting, by customizing the final Dense layer.\n"
aid_dl.model_add_classes(model_keras,nr_classes)#this function changes model_keras inplace
elif int(nr_classes)<int(out_dim):#Dataset has less classes than the model provides!
text3 = "Model output: The architecture you chose has "+(str(out_dim))+\
" output nodes (classes) and your selected data has only "+str(nr_classes)+\
" classes. This is fine. The model will essentially have some excess classes that are not used.\n"
###########################New model###################################
elif self.radioButton_NewModel.isChecked():
load_modelname = "" #No model is loaded
text0 = load_modelname
#Create a new model!
#Get what the user wants from the dropdown menu!
chosen_model = str(self.comboBox_ModelSelection.currentText())
if chosen_model==None:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("No model specified!")
msg.setWindowTitle("No model specified!")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
in_dim = int(self.spinBox_imagecrop.value())
SelectedFiles = self.items_clicked()
#rtdc_ds = SelectedFiles[0]["rtdc_ds"]
if str(self.comboBox_GrayOrRGB.currentText())=="Grayscale":
channels=1
elif str(self.comboBox_GrayOrRGB.currentText())=="RGB":
channels=3
indices = [s["class"] for s in SelectedFiles]
indices_unique = np.unique(np.array(indices))
if len(indices_unique)<2:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("Need at least two classes to fit. Please specify .rtdc files and corresponding indeces")
msg.setWindowTitle("No valid file was chosen")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
out_dim = np.max(indices)+1
nr_classes = out_dim
if chosen_model=="None":
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("No model specified!")
msg.setWindowTitle("No model specified!")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
try:
model_keras = model_zoo.get_model(chosen_model,in_dim,channels,out_dim)
except Exception as e:
#There is an issue building the model!
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Critical)
msg.setText(str(e))
msg.setWindowTitle("Error occured when building model:")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
text1 = "Architecture: created "+chosen_model+" design\nWeights: Initialized random weights\n"
if self.get_color_mode()=="Grayscale":
channels = 1
channel_text = "1 channel (Grayscale)"
elif self.get_color_mode()=="RGB":
channels = 3
channel_text = "3 channels (RGB)"
text2 = "Model Input: "+str(in_dim)+" x "+str(in_dim) + " pixel images and "+channel_text+"\n"
if int(nr_classes)==int(out_dim):
text3 = "Output: "+str(out_dim)+" classes\n"
elif int(nr_classes)>int(out_dim):#Dataset has more classes than the model provides!
text3 = "Loaded model has only "+(str(out_dim))+\
" output nodes (classes) but your selected data has "+str(nr_classes)+\
" classes. Therefore, the model will be adjusted before fitting, by customizing the final Dense layer.\n"
aid_dl.model_add_classes(model_keras,nr_classes)#this function changes model_keras inplace
elif int(nr_classes)<int(out_dim):#Dataset has less classes than the model provides!
text3 = "Model output: The architecture you chose has "+(str(out_dim))+\
" output nodes (classes) and your selected data has only "+str(nr_classes)+\
" classes. This is fine. The model will essentially have some excess classes that are not used.\n"
else:
#No radio-button was chosen
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("Please use the radiobuttons to define the model")
msg.setWindowTitle("No model defined")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
#If expert mode is on, apply the requested options
#This affects learning rate, trainability of layers and dropout rate
expert_mode = bool(self.groupBox_expertMode.isChecked())
learning_rate_const = float(self.doubleSpinBox_learningRate.value())
learning_rate_expert_on = bool(self.groupBox_learningRate.isChecked())
train_last_layers = bool(self.checkBox_trainLastNOnly.isChecked())
train_last_layers_n = int(self.spinBox_trainLastNOnly.value())
train_dense_layers = bool(self.checkBox_trainDenseOnly.isChecked())
dropout_expert_on = bool(self.checkBox_dropout.isChecked())
loss_expert_on = bool(self.checkBox_expt_loss.isChecked())
loss_expert = str(self.comboBox_expt_loss.currentText()).lower()
optimizer_expert_on = bool(self.checkBox_optimizer.isChecked())
optimizer_expert = str(self.comboBox_optimizer.currentText()).lower()
optimizer_settings = self.optimizer_settings.copy() #get the current optimizer settings
paddingMode = str(self.comboBox_paddingMode.currentText())#.lower()
model_metrics = self.get_metrics(nr_classes)
if "collection" in chosen_model.lower():
for m in model_keras[1]: #in a collection, model_keras[0] are the names of the models and model_keras[1] is a list of all models
aid_dl.model_compile(m,loss_expert,optimizer_settings,learning_rate_const,self.get_metrics(nr_classes),nr_classes)
if not "collection" in chosen_model.lower():
aid_dl.model_compile(model_keras,loss_expert,optimizer_settings,learning_rate_const,model_metrics,nr_classes)
try:
dropout_expert = str(self.lineEdit_dropout.text()) #due to the validator, there are no squ.brackets
dropout_expert = "["+dropout_expert+"]"
dropout_expert = ast.literal_eval(dropout_expert)
except:
dropout_expert = []
if type(model_keras)==tuple:#when user chose a Collection of models, a tuple is returned by get_model
collection = True
else:
collection = False
if collection==False: #if there is a single model:
#Original learning rate (before expert mode is switched on!)
try:
self.learning_rate_original = K.eval(model_keras.optimizer.lr)
except:
print("Session busy. Try again in fresh session...")
#tf.reset_default_graph() #Make sure to start with a fresh session
K.clear_session()
sess = tf.Session(graph = tf.Graph(), config=config_gpu)
#K.set_session(sess)
self.learning_rate_original = K.eval(model_keras.optimizer.lr)
#Get initial trainability states of model
self.trainable_original, self.layer_names = aid_dl.model_get_trainable_list(model_keras)
trainable_original, layer_names = self.trainable_original, self.layer_names
self.do_list_original = aid_dl.get_dropout(model_keras)#Get a list of dropout values of the current model
do_list_original = self.do_list_original
if collection==True: #if there is a collection of models:
#Original learning rate (before expert mode is switched on!)
self.learning_rate_original = [K.eval(model_keras[1][i].optimizer.lr) for i in range(len(model_keras[1]))]
#Get initial trainability states of model
trainable_layerName = [aid_dl.model_get_trainable_list(model_keras[1][i]) for i in range(len(model_keras[1]))]
self.trainable_original = [trainable_layerName[i][0] for i in range(len(trainable_layerName))]
self.layer_names = [trainable_layerName[i][1] for i in range(len(trainable_layerName))]
trainable_original, layer_names = self.trainable_original, self.layer_names
self.do_list_original = [aid_dl.get_dropout(model_keras[1][i]) for i in range(len(model_keras[1]))]#Get a list of dropout values of the current model
do_list_original = self.do_list_original
#TODO add expert mode ability for collection of models. Maybe define self.model_keras as a list in general. So, fitting a single model is just a special case
if expert_mode==True:
#Apply the changes to trainable states:
if train_last_layers==True:#Train only the last n layers
print("Train only the last "+str(train_last_layers_n)+ " layer(s)")
trainable_new = (len(trainable_original)-train_last_layers_n)*[False]+train_last_layers_n*[True]
aid_dl.model_change_trainability(model_keras,trainable_new,model_metrics,out_dim,loss_expert,optimizer_settings,learning_rate_const)
if train_dense_layers==True:#Train only dense layers
print("Train only dense layers")
layer_dense_ind = ["Dense" in x for x in layer_names]
layer_dense_ind = np.where(np.array(layer_dense_ind)==True)[0] #at which indices are dropout layers?
#create a list of trainable states
trainable_new = len(trainable_original)*[False]
for index in layer_dense_ind:
trainable_new[index] = True
aid_dl.model_change_trainability(model_keras,trainable_new,model_metrics,out_dim,loss_expert,optimizer_settings,learning_rate_const)
if dropout_expert_on==True:
#The user apparently want to change the dropout rates
do_list = aid_dl.get_dropout(model_keras)#Get a list of dropout values of the current model
#Compare the dropout values in the model to the dropout values requested by user
if len(dropout_expert)==1:#if the user gave a float
dropout_expert_list=len(do_list)*dropout_expert #convert to list
elif len(dropout_expert)>1:
dropout_expert_list = dropout_expert
if not len(dropout_expert_list)==len(do_list):
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("Issue with dropout: you defined "+str(len(dropout_expert_list))+" dropout rates, but model has "+str(len(do_list))+" dropout layers")
msg.setWindowTitle("Issue with Expert->Dropout")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
dropout_expert_list = []
return
else:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("Could not understand user input at Expert->Dropout")
msg.setWindowTitle("Issue with Expert->Dropout")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
dropout_expert_list = []
if len(dropout_expert_list)>0 and do_list!=dropout_expert_list:#if the dropout rates of the current model is not equal to the required do_list from user...
do_changed = aid_dl.change_dropout(model_keras,dropout_expert_list,model_metrics,nr_classes,loss_expert,optimizer_settings,learning_rate_const)
if do_changed==1:
text_do = "Dropout rate(s) in model was/were changed to: "+str(dropout_expert_list)
else:
text_do = "Dropout rate(s) in model was/were not changed"
else:
text_do = "Dropout rate(s) in model was/were not changed"
print(text_do)
text_updates = ""
#Learning Rate: Compare current lr and the lr on expert tab:
if collection == False:
lr_current = K.eval(model_keras.optimizer.lr)
else:
lr_current = K.eval(model_keras[1][0].optimizer.lr)
lr_diff = learning_rate_const-lr_current
if abs(lr_diff) > 1e-6: #If there is a difference, change lr accordingly
K.set_value(model_keras.optimizer.lr, learning_rate_const)
text_updates += "Learning rate: "+str(lr_current)+"\n"
recompile = False
#Compare current optimizer and the optimizer on expert tab:
if collection==False:
optimizer_current = aid_dl.get_optimizer_name(model_keras).lower()#get the current optimizer of the model
else:
optimizer_current = aid_dl.get_optimizer_name(model_keras[1][0]).lower()#get the current optimizer of the model
if optimizer_current!=optimizer_expert.lower():#if the current model has a different optimizer
recompile = True
text_updates+="Optimizer: "+optimizer_expert+"\n"
#Loss function: Compare current loss function and the loss-function on expert tab:
if collection==False:
if model_keras.loss!=loss_expert:
recompile = True
if collection==True:
if model_keras[1][0].loss!=loss_expert:
recompile = True
text_updates += "Loss function: "+loss_expert+"\n"
if recompile==True:
if collection==False:
print("Recompiling...")
aid_dl.model_compile(model_keras,loss_expert,optimizer_settings,learning_rate_const,model_metrics,nr_classes)
if collection==True:
for m in model_keras[1]:
print("Recompiling...")
aid_dl.model_compile(m,loss_expert,optimizer_settings,learning_rate_const,model_metrics,nr_classes)
self.model_keras = model_keras #overwrite the model in self
if collection == False:
#Get user-specified filename for the new model
new_modelname = str(self.lineEdit_modelname.text())
if len(new_modelname)>0:
text_new_modelname = "Model will be saved as: "+new_modelname+"\n"
else:
text_new_modelname = "Please specify a model path (name for the model to be fitted)\n"
if collection == True:
new_modelname = str(self.lineEdit_modelname.text())
if len(new_modelname)>0:
new_modelname = os.path.split(new_modelname)
text_new_modelname = "Collection of Models will be saved into: "+new_modelname[0]+"\n"
else:
text_new_modelname = "Please specify a model path (name for the model to be fitted)\n"
#Info about normalization method
norm = str(self.comboBox_Normalization.currentText())
text4 = "Input image normalization method: "+norm+"\n"
#Check if there are dropout layers:
#do_list = aid_dl.get_dropout(model_keras)#Get a list of dropout values of the current model
if len(do_list_original)>0:
text4 = text4+"Found "+str(len(do_list_original)) +" dropout layers with rates: "+str(do_list_original)+"\n"
else:
text4 = text4+"Found no dropout layers\n"
if expert_mode==True:
if dropout_expert_on:
text4 = text4+text_do+"\n"
# if learning_rate_expert_on==True:
# if K.eval(model_keras.optimizer.lr) != learning_rate_const: #if the learning rate in UI is NOT equal to the lr of the model...
# text_lr = "Changed the learning rate to: "+ str(learning_rate_const)+"\n"
# text4 = text4+text_lr
text5 = "Model summary:\n"
summary = []
if collection==False:
model_keras.summary(print_fn=summary.append)
summary = "\n".join(summary)
text = text_new_modelname+text0+text1+text2+text3+text4+text_updates+text5+summary
self.textBrowser_Info.setText(text)
#Save the model architecture: serialize to JSON
model_json = model_keras.to_json()
with open(new_modelname.split(".model")[0]+".arch", "w") as json_file:
json_file.write(model_json)
elif collection==True:
if self.groupBox_expertMode.isChecked()==True:
self.groupBox_expertMode.setChecked(False)
print("Turned off expert mode. Not implemented yet for collections of models. This does not affect user-specified metrics (precision/recall/f1)")
self.model_keras_arch_path = [new_modelname[0]+os.sep+new_modelname[1].split(".model")[0]+"_"+model_keras[0][i]+".arch" for i in range(len(model_keras[0]))]
for i in range(len(model_keras[1])):
model_keras[1][i].summary(print_fn=summary.append)
#Save the model architecture: serialize to JSON
model_json = model_keras[1][i].to_json()
with open(self.model_keras_arch_path[i], "w") as json_file:
json_file.write(model_json)
summary = "\n".join(summary)
text = text_new_modelname+text0+text1+text2+text3+text4+text_updates+text5+summary
self.textBrowser_Info.setText(text)
#Save the model to a variable on self
self.model_keras = model_keras
#Get the user-defined cropping size
crop = int(self.spinBox_imagecrop.value())
#Make the cropsize a bit larger since the images will later be rotated
cropsize2 = np.sqrt(crop**2+crop**2)
cropsize2 = np.ceil(cropsize2 / 2.) * 2 #round to the next even number
#Estimate RAM needed
nr_imgs = np.sum([np.array(list(SelectedFiles)[i]["nr_images"]) for i in range(len(list(SelectedFiles)))])
ram_needed = np.round(nr_imgs * aid_bin.calc_ram_need(cropsize2),2)
if duties=="initialize":#Stop here if the model just needs to be intialized (for expert mode->partial trainability)
return
elif duties=="initialize_train":
#Tell the user if the data is stored and read from ram or not
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Question)
text = "<html><head/><body><p>Should the model only be initialized,\
or do you want to start fitting right after? For fitting, data will\
be loaded to RAM (since Edit->Data to RAM is enabled), which will\
require "+str(ram_needed)+"MB of RAM.</p></body></html>"
msg.setText(text)
msg.setWindowTitle("Initialize model or initialize and fit model?")
msg.addButton(QtGui.QPushButton('Stop after model initialization'), QtGui.QMessageBox.RejectRole)
msg.addButton(QtGui.QPushButton('Start fitting'), QtGui.QMessageBox.ApplyRole)
retval = msg.exec_()
elif duties=="initialize_lrfind":
retval = 1
else:
print("Invalid duties: "+duties)
return
if retval==0: #yes role: Only initialize model
print("Closing session")
del model_keras
sess.close()
return
elif retval == 1:
if self.actionDataToRam.isChecked():
color_mode = self.get_color_mode()
zoom_factors = [selectedfile["zoom_factor"] for selectedfile in SelectedFiles]
#zoom_order = [self.actionOrder0.isChecked(),self.actionOrder1.isChecked(),self.actionOrder2.isChecked(),self.actionOrder3.isChecked(),self.actionOrder4.isChecked(),self.actionOrder5.isChecked()]
#zoom_order = int(np.where(np.array(zoom_order)==True)[0])
zoom_order = int(self.comboBox_zoomOrder.currentIndex()) #the combobox-index is already the zoom order
#Check if there is data already available in RAM
if len(self.ram)==0:#if there is already data stored on ram
print("No data on RAM. I have to load")
dic = aid_img.crop_imgs_to_ram(list(SelectedFiles),cropsize2,zoom_factors=zoom_factors,zoom_order=zoom_order,color_mode=color_mode)
self.ram = dic
else:
print("There is already some data on RAM")
new_fileinfo = {"SelectedFiles":list(SelectedFiles),"cropsize2":cropsize2,"zoom_factors":zoom_factors,"zoom_order":zoom_order,"color_mode":color_mode}
identical = aid_bin.ram_compare_data(self.ram,new_fileinfo)
if not identical:
#Load the data
dic = aid_img.crop_imgs_to_ram(list(SelectedFiles),cropsize2,zoom_factors=zoom_factors,zoom_order=zoom_order,color_mode=color_mode)
self.ram = dic
if identical:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Question)
text = "Data was loaded before! Should same data be reused? If not, click 'Reload data', e.g. if you altered the Data-table."
text = "<html><head/><body><p>"+text+"</p></body></html>"
msg.setText(text)
msg.setWindowTitle("Found data on RAM")
msg.addButton(QtGui.QPushButton('Reuse data'), QtGui.QMessageBox.YesRole)
msg.addButton(QtGui.QPushButton('Reload data'), QtGui.QMessageBox.NoRole)
retval = msg.exec_()
if retval==0:
print("Re-use data")
#Re-use same data
elif retval==1:
print("Re-load data")
dic = aid_img.crop_imgs_to_ram(list(SelectedFiles),cropsize2,zoom_factors=zoom_factors,zoom_order=zoom_order,color_mode=color_mode)
self.ram = dic
#Finally, activate the 'Fit model' button again
#self.pushButton_FitModel.setEnabled(True)
if duties=="initialize_train":
self.action_fit_model()
if duties=="initialize_lrfind":
self.action_lr_finder()
del model_keras
def action_fit_model_worker(self,progress_callback,history_callback):
if self.radioButton_cpu.isChecked():
gpu_used = False
deviceSelected = str(self.comboBox_cpu.currentText())
elif self.radioButton_gpu.isChecked():
gpu_used = True
deviceSelected = str(self.comboBox_gpu.currentText())
gpu_memory = float(self.doubleSpinBox_memory.value())
#Retrieve more Multi-GPU Options from Menubar:
cpu_merge = bool(self.actioncpu_merge.isEnabled())
cpu_relocation = bool(self.actioncpu_relocation.isEnabled())
cpu_weight_merge = bool(self.actioncpu_weightmerge.isEnabled())
#Create config (define which device to use)
config_gpu = aid_dl.get_config(cpu_nr,gpu_nr,deviceSelected,gpu_memory)
with tf.Session(graph = tf.Graph(), config=config_gpu) as sess:
#get an index of the fitting popup
listindex = self.popupcounter-1
#Get user-specified filename for the new model
new_modelname = str(self.lineEdit_modelname.text())
model_keras_path = self.model_keras_path
if type(model_keras_path)==list:
collection = True
#Take the initialized models
model_keras_path = self.model_keras_path
model_keras = [load_model(model_keras_path[i],custom_objects=aid_dl.get_custom_metrics()) for i in range(len(model_keras_path)) ]
model_architecture_names = self.model_keras[0]
print(model_architecture_names)
#self.model_keras = None
else:
collection = False
if deviceSelected=="Multi-GPU" and cpu_weight_merge==True:
with tf.device("/cpu:0"):
model_keras = load_model(model_keras_path,custom_objects=aid_dl.get_custom_metrics())
else:
model_keras = load_model(model_keras_path,custom_objects=aid_dl.get_custom_metrics())
#self.model_keras = None
#Initialize a variable for the parallel model
model_keras_p = None
#Multi-GPU
if deviceSelected=="Multi-GPU":
if collection==False:
print("Adjusting the model for Multi-GPU")
model_keras_p = multi_gpu_model(model_keras, gpus=gpu_nr, cpu_merge=cpu_merge, cpu_relocation=cpu_relocation)#indicate the numbers of gpus that you have
if self.radioButton_LoadContinueModel.isChecked():#calling multi_gpu_model resets the weights. Hence, they need to be put in place again
model_keras_p.layers[-2].set_weights(model_keras.get_weights())
elif collection==True:
print("Collection & Multi-GPU is not supported yet")
return
# model_keras_p = []
# for m in model_keras_p:
# print("Adjusting the model for Multi-GPU")
# model_keras_p.append(multi_gpu_model(m, gpus=gpu_nr)) #indicate the numbers of gpus that you have
##############Main function after hitting FIT MODEL####################
if self.radioButton_LoadRestartModel.isChecked():
load_modelname = str(self.lineEdit_LoadModelPath.text())
elif self.radioButton_LoadContinueModel.isChecked():
load_modelname = str(self.lineEdit_LoadModelPath.text())
elif self.radioButton_NewModel.isChecked():
load_modelname = "" #No model is loaded
if collection==False:
#model_config = model_keras.get_config()#["layers"]
nr_classes = int(model_keras.output.shape.dims[1])
if collection==True:
#model_config = model_keras.get_config()#["layers"]
nr_classes = int(model_keras[0].output.shape.dims[1])
#Metrics to be displayed during fitting (real-time)
model_metrics = self.get_metrics(nr_classes)
#Compile model
if collection==False and deviceSelected=="Single-GPU":
model_keras.compile(loss='categorical_crossentropy',optimizer='adam',metrics=model_metrics)#dont specify loss and optimizer yet...expert stuff will follow and model will be recompiled
elif collection==False and deviceSelected=="Multi-GPU":
model_keras_p.compile(loss='categorical_crossentropy',optimizer='adam',metrics=model_metrics)#dont specify loss and optimizer yet...expert stuff will follow and model will be recompiled
elif collection==True and deviceSelected=="Single-GPU":
#Switch off the expert tab!
self.fittingpopups_ui[listindex].groupBox_expertMode_pop.setChecked(False)
self.fittingpopups_ui[listindex].groupBox_expertMode_pop.setEnabled(False)
for m in model_keras:
m.compile(loss='categorical_crossentropy',optimizer='adam',metrics=self.get_metrics(nr_classes))#dont specify loss and optimizer yet...expert stuff will follow and model will be recompiled
elif collection==True and deviceSelected=="Multi-GPU":
print("Collection & Multi-GPU is not supported yet")
return
#Original learning rate:
#learning_rate_original = self.learning_rate_original#K.eval(model_keras.optimizer.lr)
#Original trainable states of layers with parameters
trainable_original, layer_names = self.trainable_original, self.layer_names
do_list_original = self.do_list_original
#Collect all information about the fitting routine that was user
#defined
if self.actionVerbose.isChecked()==True:
verbose = 1
else:
verbose = 0
new_model = self.radioButton_NewModel.isChecked()
chosen_model = str(self.comboBox_ModelSelection.currentText())
crop = int(self.spinBox_imagecrop.value())
color_mode = str(self.comboBox_GrayOrRGB.currentText())
loadrestart_model = self.radioButton_LoadRestartModel.isChecked()
loadcontinue_model = self.radioButton_LoadContinueModel.isChecked()
norm = str(self.comboBox_Normalization.currentText())
nr_epochs = int(self.spinBox_NrEpochs.value())
keras_refresh_nr_epochs = int(self.spinBox_RefreshAfterEpochs.value())
h_flip = bool(self.checkBox_HorizFlip.isChecked())
v_flip = bool(self.checkBox_VertFlip.isChecked())
rotation = float(self.lineEdit_Rotation.text())
width_shift = float(self.lineEdit_widthShift.text())
height_shift = float(self.lineEdit_heightShift.text())
zoom = float(self.lineEdit_zoomRange.text())
shear = float(self.lineEdit_shearRange.text())
brightness_refresh_nr_epochs = int(self.spinBox_RefreshAfterNrEpochs.value())
brightness_add_lower = float(self.spinBox_PlusLower.value())
brightness_add_upper = float(self.spinBox_PlusUpper.value())
brightness_mult_lower = float(self.doubleSpinBox_MultLower.value())
brightness_mult_upper = float(self.doubleSpinBox_MultUpper.value())
gaussnoise_mean = float(self.doubleSpinBox_GaussianNoiseMean.value())
gaussnoise_scale = float(self.doubleSpinBox_GaussianNoiseScale.value())
contrast_on = bool(self.checkBox_contrast.isChecked())
contrast_lower = float(self.doubleSpinBox_contrastLower.value())
contrast_higher = float(self.doubleSpinBox_contrastHigher.value())
saturation_on = bool(self.checkBox_saturation.isChecked())
saturation_lower = float(self.doubleSpinBox_saturationLower.value())
saturation_higher = float(self.doubleSpinBox_saturationHigher.value())
hue_on = bool(self.checkBox_hue.isChecked())
hue_delta = float(self.doubleSpinBox_hueDelta.value())
avgBlur_on = bool(self.checkBox_avgBlur.isChecked())
avgBlur_min = int(self.spinBox_avgBlurMin.value())
avgBlur_max = int(self.spinBox_avgBlurMax.value())
gaussBlur_on = bool(self.checkBox_gaussBlur.isChecked())
gaussBlur_min = int(self.spinBox_gaussBlurMin.value())
gaussBlur_max = int(self.spinBox_gaussBlurMax.value())
motionBlur_on = bool(self.checkBox_motionBlur.isChecked())
motionBlur_kernel = str(self.lineEdit_motionBlurKernel.text())
motionBlur_angle = str(self.lineEdit_motionBlurAngle.text())
motionBlur_kernel = tuple(ast.literal_eval(motionBlur_kernel)) #translate string in the lineEdits to a tuple
motionBlur_angle = tuple(ast.literal_eval(motionBlur_angle)) #translate string in the lineEdits to a tuple
if collection==False:
expert_mode = bool(self.groupBox_expertMode.isChecked())
elif collection==True:
expert_mode = self.groupBox_expertMode.setChecked(False)
print("Expert mode was switched off. Not implemented yet for collections")
expert_mode = False
batchSize_expert = int(self.spinBox_batchSize.value())
epochs_expert = int(self.spinBox_epochs.value())
learning_rate_expert_on = bool(self.groupBox_learningRate.isChecked())
learning_rate_const_on = bool(self.radioButton_LrConst.isChecked())
learning_rate_const = float(self.doubleSpinBox_learningRate.value())
learning_rate_cycLR_on = bool(self.radioButton_LrCycl.isChecked())
try:
cycLrMin = float(self.lineEdit_cycLrMin.text())
cycLrMax = float(self.lineEdit_cycLrMax.text())
except:
cycLrMin = []
cycLrMax = []
cycLrMethod = str(self.comboBox_cycLrMethod.currentText())
#clr_settings = self.fittingpopups_ui[listindex].clr_settings.copy()
cycLrGamma = self.clr_settings["gamma"]
SelectedFiles = self.items_clicked()#to compute cycLrStepSize, the number of training images is needed
cycLrStepSize = aid_dl.get_cyclStepSize(SelectedFiles,self.clr_settings["step_size"],batchSize_expert)
#put clr_settings onto fittingpopup,
self.fittingpopups_ui[listindex].clr_settings = self.clr_settings.copy()#assign a copy. Otherwise values in both dicts are changed when manipulating one dict
#put optimizer_settings onto fittingpopup,
self.fittingpopups_ui[listindex].optimizer_settings = self.optimizer_settings.copy()#assign a copy. Otherwise values in both dicts are changed when manipulating one dict
learning_rate_expo_on = bool(self.radioButton_LrExpo.isChecked())
expDecInitLr = float(self.doubleSpinBox_expDecInitLr.value())
expDecSteps = int(self.spinBox_expDecSteps.value())
expDecRate = float(self.doubleSpinBox_expDecRate.value())
loss_expert_on = bool(self.checkBox_expt_loss.isChecked())
loss_expert = str(self.comboBox_expt_loss.currentText()).lower()
optimizer_expert_on = bool(self.checkBox_optimizer.isChecked())
optimizer_expert = str(self.comboBox_optimizer.currentText()).lower()
optimizer_settings = self.fittingpopups_ui[listindex].optimizer_settings.copy()#make a copy to make sure that changes in the UI are not immediately used
paddingMode = str(self.comboBox_paddingMode.currentText())#.lower()
train_last_layers = bool(self.checkBox_trainLastNOnly.isChecked())
train_last_layers_n = int(self.spinBox_trainLastNOnly.value())
train_dense_layers = bool(self.checkBox_trainDenseOnly.isChecked())
dropout_expert_on = bool(self.checkBox_dropout.isChecked())
try:
dropout_expert = str(self.lineEdit_dropout.text()) #due to the validator, there are no squ.brackets
dropout_expert = "["+dropout_expert+"]"
dropout_expert = ast.literal_eval(dropout_expert)
except:
dropout_expert = []
lossW_expert_on = bool(self.checkBox_lossW.isChecked())
lossW_expert = str(self.lineEdit_lossW.text())
#To get the class weights (loss), the SelectedFiles are required
#SelectedFiles = self.items_clicked()
#Check if xtra_data should be used for training
xtra_in = [s["xtra_in"] for s in SelectedFiles]
if len(set(xtra_in))==1:
xtra_in = list(set(xtra_in))[0]
elif len(set(xtra_in))>1:# False and True is present. Not supported
print("Xtra data is used only for some files. Xtra data needs to be used either by all or by none!")
return
self.fittingpopups_ui[listindex].SelectedFiles = SelectedFiles #save to self. to make it accessible for popup showing loss weights
#Get the class weights. This function runs now the first time in the fitting routine.
#It is possible that the user chose Custom weights and then changed the classes. Hence first check if
#there is a weight for each class available.
class_weight = self.get_class_weight(self.fittingpopups_ui[listindex].SelectedFiles,lossW_expert,custom_check_classes=True)
if type(class_weight)==list:
#There has been a mismatch between the classes described in class_weight and the classes available in SelectedFiles!
lossW_expert = class_weight[0] #overwrite
class_weight = class_weight[1]
print("class_weight:" +str(class_weight))
print("There has been a mismatch between the classes described in \
Loss weights and the classes available in the selected files! \
Hence, the Loss weights are set to Balanced")
#Get callback for the learning rate scheduling
callback_lr = aid_dl.get_lr_callback(learning_rate_const_on,learning_rate_const,
learning_rate_cycLR_on,cycLrMin,cycLrMax,
cycLrMethod,cycLrStepSize,
learning_rate_expo_on,
expDecInitLr,expDecSteps,expDecRate,cycLrGamma)
#save a dictionary with initial values
lr_dict_original = aid_dl.get_lr_dict(learning_rate_const_on,learning_rate_const,
learning_rate_cycLR_on,cycLrMin,cycLrMax,
cycLrMethod,cycLrStepSize,
learning_rate_expo_on,
expDecInitLr,expDecSteps,expDecRate,cycLrGamma)
if collection==False:
#Create an excel file
writer = pd.ExcelWriter(new_modelname.split(".model")[0]+'_meta.xlsx', engine='openpyxl')
self.fittingpopups_ui[listindex].writer = writer
#Used files go to a separate sheet on the MetaFile.xlsx
SelectedFiles_df = pd.DataFrame(SelectedFiles)
pd.DataFrame().to_excel(writer,sheet_name='UsedData') #initialize empty Sheet
SelectedFiles_df.to_excel(writer,sheet_name='UsedData')
DataOverview_df = self.get_dataOverview()
DataOverview_df.to_excel(writer,sheet_name='DataOverview') #write data overview to separate sheet
pd.DataFrame().to_excel(writer,sheet_name='Parameters') #initialize empty Sheet
pd.DataFrame().to_excel(writer,sheet_name='History') #initialize empty Sheet
elif collection==True:
SelectedFiles_df = pd.DataFrame(SelectedFiles)
Writers = []
#Create excel files
for i in range(len(model_keras_path)):
writer = pd.ExcelWriter(model_keras_path[i].split(".model")[0]+'_meta.xlsx', engine='openpyxl')
Writers.append(writer)
for writer in Writers:
#Used files go to a separate sheet on the MetaFile.xlsx
pd.DataFrame().to_excel(writer,sheet_name='UsedData') #initialize empty Sheet
SelectedFiles_df.to_excel(writer,sheet_name='UsedData')
DataOverview_df = self.get_dataOverview()
DataOverview_df.to_excel(writer,sheet_name='DataOverview') #write data overview to separate sheet
pd.DataFrame().to_excel(writer,sheet_name='Parameters') #initialize empty Sheet
pd.DataFrame().to_excel(writer,sheet_name='History') #initialize empty Sheet
###############################Expert Mode values##################
expert_mode_before = False #There was no expert mode used before.
if expert_mode==True:
#activate groupBox_expertMode_pop
self.fittingpopups_ui[listindex].groupBox_expertMode_pop.setChecked(True)
expert_mode_before = True
#Some settings only need to be changed once, after user clicked apply at next epoch
#Apply the changes to trainable states:
if train_last_layers==True:#Train only the last n layers
print("Train only the last "+str(train_last_layers_n)+ " layer(s)")
trainable_new = (len(trainable_original)-train_last_layers_n)*[False]+train_last_layers_n*[True]
summary = aid_dl.model_change_trainability(model_keras,trainable_new,model_metrics,nr_classes,loss_expert,optimizer_settings,learning_rate_const)
if model_keras_p!=None:#if this is NOT None, there exists a parallel model, which also needs to be re-compiled
aid_dl.model_compile(model_keras_p,loss_expert,optimizer_settings,learning_rate_const,model_metrics,nr_classes)
print("Recompiled parallel model for train_last_layers==True")
text1 = "Expert mode: Request for custom trainability states: train only the last "+str(train_last_layers_n)+ " layer(s)\n"
#text2 = "\n--------------------\n"
self.fittingpopups_ui[listindex].textBrowser_FittingInfo.append(text1+summary)
if train_dense_layers==True:#Train only dense layers
print("Train only dense layers")
layer_dense_ind = ["Dense" in x for x in layer_names]
layer_dense_ind = np.where(np.array(layer_dense_ind)==True)[0] #at which indices are dropout layers?
#create a list of trainable states
trainable_new = len(trainable_original)*[False]
for index in layer_dense_ind:
trainable_new[index] = True
summary = aid_dl.model_change_trainability(model_keras,trainable_new,model_metrics,nr_classes,loss_expert,optimizer_settings,learning_rate_const)
if model_keras_p!=None:#if this is NOT None, there exists a parallel model, which also needs to be re-compiled
aid_dl.model_compile(model_keras_p,loss_expert,optimizer_settings,learning_rate_const,model_metrics,nr_classes)
print("Recompiled parallel model for train_dense_layers==True")
text1 = "Expert mode: Request for custom trainability states: train only dense layer(s)\n"
#text2 = "\n--------------------\n"
self.fittingpopups_ui[listindex].textBrowser_FittingInfo.append(text1+summary)
if dropout_expert_on==True:
#The user apparently want to change the dropout rates
do_list = aid_dl.get_dropout(model_keras)#Get a list of dropout values of the current model
#Compare the dropout values in the model to the dropout values requested by user
if len(dropout_expert)==1:#if the user gave a single float
dropout_expert_list = len(do_list)*dropout_expert #convert to list
elif len(dropout_expert)>1:
dropout_expert_list = dropout_expert
if not len(dropout_expert_list)==len(do_list):
text = "Issue with dropout: you defined "+str(len(dropout_expert_list))+" dropout rates, but model has "+str(len(do_list))+" dropout layers"
self.fittingpopups_ui[listindex].textBrowser_FittingInfo.append(text)
else:
text = "Could not understand user input at Expert->Dropout"
self.fittingpopups_ui[listindex].textBrowser_FittingInfo.append(text)
dropout_expert_list = []
if len(dropout_expert_list)>0 and do_list!=dropout_expert_list:#if the dropout rates of the current model is not equal to the required do_list from user...
do_changed = aid_dl.change_dropout(model_keras,dropout_expert_list,model_metrics,nr_classes,loss_expert,optimizer_settings,learning_rate_const)
if model_keras_p!=None:#if this is NOT None, there exists a parallel model, which also needs to be re-compiled
aid_dl.model_compile(model_keras_p,loss_expert,optimizer_settings,learning_rate_const,model_metrics,nr_classes)
print("Recompiled parallel model to change dropout. I'm not sure if this works already!")
if do_changed==1:
text_do = "Dropout rate(s) in model was/were changed to: "+str(dropout_expert_list)
else:
text_do = "Dropout rate(s) in model was/were not changed"
else:
text_do = "Dropout rate(s) in model was/were not changed"
print(text_do)
self.fittingpopups_ui[listindex].textBrowser_FittingInfo.append(text_do)
text_updates = ""
#Compare current lr and the lr on expert tab:
if collection == False:
lr_current = K.eval(model_keras.optimizer.lr)
else:
lr_current = K.eval(model_keras[0].optimizer.lr)
lr_diff = learning_rate_const-lr_current
if abs(lr_diff) > 1e-6:
if collection == False:
K.set_value(model_keras.optimizer.lr, learning_rate_const)
if collection == True:
for m in model_keras:
K.set_value(m.optimizer.lr, learning_rate_const)
text_updates += "Changed the learning rate to "+ str(learning_rate_const)+"\n"
#Check if model has to be compiled again
recompile = False #by default, dont recompile (happens for "Load and continue" training a model)
if new_model==True:
recompile = True
#Compare current optimizer and the optimizer on expert tab:
if collection==False:
optimizer_current = aid_dl.get_optimizer_name(model_keras).lower()#get the current optimizer of the model
if collection==True:
optimizer_current = aid_dl.get_optimizer_name(model_keras[0]).lower()#get the current optimizer of the model
if optimizer_current!=optimizer_expert.lower():#if the current model has a different optimizer
recompile = True
text_updates+="Changed the optimizer to "+optimizer_expert+"\n"
#Compare current loss function and the loss-function on expert tab:
if collection==False:
if model_keras.loss!=loss_expert:
recompile = True
text_updates+="Changed the loss function to "+loss_expert+"\n"
if collection==True:
if model_keras[0].loss!=loss_expert:
recompile = True
text_updates+="Changed the loss function to "+loss_expert+"\n"
if recompile==True:
print("Recompiling...")
if collection==False:
aid_dl.model_compile(model_keras,loss_expert,optimizer_settings,learning_rate_const,model_metrics,nr_classes)
if collection==True:
for m in model_keras:
aid_dl.model_compile(m, loss_expert, optimizer_settings, learning_rate_const,model_metrics, nr_classes)
if model_keras_p!=None:#if this is NOT None, there exists a parallel model, which also needs to be re-compiled
aid_dl.model_compile(model_keras_p,loss_expert,optimizer_settings,learning_rate_const,model_metrics,nr_classes)
print("Recompiled parallel model to adjust learning rate, loss, optimizer")
self.fittingpopups_ui[listindex].textBrowser_FittingInfo.append(text_updates)
#self.model_keras = model_keras #overwrite the model on self
######################Load the Training Data################################
ind = [selectedfile["TrainOrValid"] == "Train" for selectedfile in SelectedFiles]
ind = np.where(np.array(ind)==True)[0]
SelectedFiles_train = np.array(SelectedFiles)[ind]
SelectedFiles_train = list(SelectedFiles_train)
indices_train = [selectedfile["class"] for selectedfile in SelectedFiles_train]
nr_events_epoch_train = [selectedfile["nr_events_epoch"] for selectedfile in SelectedFiles_train]
rtdc_path_train = [selectedfile["rtdc_path"] for selectedfile in SelectedFiles_train]
zoom_factors_train = [selectedfile["zoom_factor"] for selectedfile in SelectedFiles_train]
#zoom_order = [self.actionOrder0.isChecked(),self.actionOrder1.isChecked(),self.actionOrder2.isChecked(),self.actionOrder3.isChecked(),self.actionOrder4.isChecked(),self.actionOrder5.isChecked()]
#zoom_order = int(np.where(np.array(zoom_order)==True)[0])
zoom_order = int(self.comboBox_zoomOrder.currentIndex()) #the combobox-index is already the zoom order
shuffle_train = [selectedfile["shuffle"] for selectedfile in SelectedFiles_train]
xtra_in = set([selectedfile["xtra_in"] for selectedfile in SelectedFiles_train])
if len(xtra_in)>1:# False and True is present. Not supported
print("Xtra data is used only for some files. Xtra data needs to be used either by all or by none!")
return
xtra_in = list(xtra_in)[0]#this is either True or False
#read self.ram to new variable ; next clear ram. This is required for multitasking (training multiple models with maybe different data)
DATA = self.ram
if verbose==1:
print("Length of DATA (in RAM) = "+str(len(DATA)))
#clear the ram again if desired
if not self.actionKeep_Data_in_RAM.isChecked():
self.ram = dict()
print("Removed data from self.ram. For further training sessions, data has to be reloaded.")
#If the scaling method is "divide by mean and std of the whole training set":
if norm == "StdScaling using mean and std of all training data":
mean_trainingdata,std_trainingdata = [],[]
for i in range(len(SelectedFiles_train)):
#if Data_to_RAM was not enabled:
#if not self.actionDataToRam.isChecked():
if len(DATA)==0: #Here, the entire training set needs to be used! Not only random images!
#Replace=true: means individual cells could occur several times
gen_train = aid_img.gen_crop_img(crop,rtdc_path_train[i],random_images=False,zoom_factor=zoom_factors_train[i],zoom_order=zoom_order,color_mode=self.get_color_mode(),padding_mode=paddingMode)
# else: #get a similar generator, using the ram-data
# if len(DATA)==0:
# gen_train = aid_img.gen_crop_img(crop,rtdc_path_train[i],random_images=False) #Replace true means that individual cells could occur several times
else:
gen_train = aid_img.gen_crop_img_ram(DATA,rtdc_path_train[i],random_images=False) #Replace true means that individual cells could occur several times
if self.actionVerbose.isChecked():
print("Loaded data from RAM")
images = next(gen_train)[0]
mean_trainingdata.append(np.mean(images))
std_trainingdata.append(np.std(images))
mean_trainingdata = np.mean(np.array(mean_trainingdata))
std_trainingdata = np.mean(np.array(std_trainingdata))
if np.allclose(std_trainingdata,0):
std_trainingdata = 0.0001
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
text = "<html><head/><body><p>The standard deviation of your training data is zero! This would lead to division by zero. To avoid this, I will divide by 0.0001 instead.</p></body></html>"
msg.setText(text)
msg.setWindowTitle("Std. is zero")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
Para_dict = pd.DataFrame()
def update_para_dict():
#Document changes in the meta-file
Para_dict["AIDeveloper_Version"]=VERSION,
Para_dict["model_zoo_version"]=model_zoo_version,
try:
Para_dict["OS"]=platform.platform(),
Para_dict["CPU"]=platform.processor(),
except:
Para_dict["OS"]="Unknown",
Para_dict["CPU"]="Unknown",
Para_dict["Modelname"]=new_modelname,
Para_dict["Chosen Model"]=chosen_model,
Para_dict["new_model"]=new_model,
Para_dict["loadrestart_model"]=loadrestart_model,
Para_dict["loadcontinue_model"]=loadcontinue_model,
Para_dict["Continued_Fitting_From"]=load_modelname,
Para_dict["Input image size"]=crop,
Para_dict["Color Mode"]=color_mode,
Para_dict["Zoom order"]=zoom_order,
Para_dict["Device"]=deviceSelected,
Para_dict["gpu_used"]=gpu_used,
Para_dict["gpu_memory"]=gpu_memory,
Para_dict["Output Nr. classes"]=nr_classes,
Para_dict["Normalization"]=norm,
Para_dict["Nr. epochs"]=nr_epochs,
Para_dict["Keras refresh after nr. epochs"]=keras_refresh_nr_epochs,
Para_dict["Horz. flip"]=h_flip,
Para_dict["Vert. flip"]=v_flip,
Para_dict["rotation"]=rotation,
Para_dict["width_shift"]=width_shift,
Para_dict["height_shift"]=height_shift,
Para_dict["zoom"]=zoom,
Para_dict["shear"]=shear,
Para_dict["Brightness refresh after nr. epochs"]=brightness_refresh_nr_epochs,
Para_dict["Brightness add. lower"]=brightness_add_lower,
Para_dict["Brightness add. upper"]=brightness_add_upper,
Para_dict["Brightness mult. lower"]=brightness_mult_lower,
Para_dict["Brightness mult. upper"]=brightness_mult_upper,
Para_dict["Gaussnoise Mean"]=gaussnoise_mean,
Para_dict["Gaussnoise Scale"]=gaussnoise_scale,
Para_dict["Contrast on"]=contrast_on,
Para_dict["Contrast Lower"]=contrast_lower,
Para_dict["Contrast Higher"]=contrast_higher,
Para_dict["Saturation on"]=saturation_on,
Para_dict["Saturation Lower"]=saturation_lower,
Para_dict["Saturation Higher"]=saturation_higher,
Para_dict["Hue on"]=hue_on,
Para_dict["Hue delta"]=hue_delta,
Para_dict["Average blur on"]=avgBlur_on,
Para_dict["Average blur Lower"]=avgBlur_min,
Para_dict["Average blur Higher"]=avgBlur_max,
Para_dict["Gauss blur on"]=gaussBlur_on,
Para_dict["Gauss blur Lower"]=gaussBlur_min,
Para_dict["Gauss blur Higher"]=gaussBlur_max,
Para_dict["Motion blur on"]=motionBlur_on,
Para_dict["Motion blur Kernel"]=motionBlur_kernel,
Para_dict["Motion blur Angle"]=motionBlur_angle,
Para_dict["Epoch_Started_Using_These_Settings"]=counter,
Para_dict["expert_mode"]=expert_mode,
Para_dict["batchSize_expert"]=batchSize_expert,
Para_dict["epochs_expert"]=epochs_expert,
Para_dict["learning_rate_expert_on"]=learning_rate_expert_on,
Para_dict["learning_rate_const_on"]=learning_rate_const_on,
Para_dict["learning_rate_const"]=learning_rate_const,
Para_dict["learning_rate_cycLR_on"]=learning_rate_cycLR_on,
Para_dict["cycLrMin"]=cycLrMin,
Para_dict["cycLrMax"]=cycLrMax,
Para_dict["cycLrMethod"] = cycLrMethod,
Para_dict["clr_settings"] = self.fittingpopups_ui[listindex].clr_settings,
Para_dict["learning_rate_expo_on"]=learning_rate_expo_on,
Para_dict["expDecInitLr"]=expDecInitLr,
Para_dict["expDecSteps"]=expDecSteps,
Para_dict["expDecRate"]=expDecRate,
Para_dict["loss_expert_on"]=loss_expert_on,
Para_dict["loss_expert"]=loss_expert,
Para_dict["optimizer_expert_on"]=optimizer_expert_on,
Para_dict["optimizer_expert"]=optimizer_expert,
Para_dict["optimizer_settings"]=optimizer_settings,
Para_dict["paddingMode"]=paddingMode,
Para_dict["train_last_layers"]=train_last_layers,
Para_dict["train_last_layers_n"]=train_last_layers_n,
Para_dict["train_dense_layers"]=train_dense_layers,
Para_dict["dropout_expert_on"]=dropout_expert_on,
Para_dict["dropout_expert"]=dropout_expert,
Para_dict["lossW_expert_on"]=lossW_expert_on,
Para_dict["lossW_expert"]=lossW_expert,
Para_dict["class_weight"]=class_weight,
Para_dict["metrics"]=model_metrics,
#training data cannot be changed during training
if norm == "StdScaling using mean and std of all training data":
#This needs to be saved into Para_dict since it will be required for inference
Para_dict["Mean of training data used for scaling"]=mean_trainingdata,
Para_dict["Std of training data used for scaling"]=std_trainingdata,
if collection==False:
if counter == 0:
Para_dict.to_excel(self.fittingpopups_ui[listindex].writer,sheet_name='Parameters')
else:
Para_dict.to_excel(self.fittingpopups_ui[listindex].writer,sheet_name='Parameters',startrow=self.fittingpopups_ui[listindex].writer.sheets['Parameters'].max_row,header= False)
if os.path.isfile(new_modelname.split(".model")[0]+'_meta.xlsx'):
os.chmod(new_modelname.split(".model")[0]+'_meta.xlsx', S_IREAD|S_IRGRP|S_IROTH|S_IWRITE|S_IWGRP|S_IWOTH)#change to read/write
try:
self.fittingpopups_ui[listindex].writer.save()
except:
pass
os.chmod(new_modelname.split(".model")[0]+'_meta.xlsx', S_IREAD|S_IRGRP|S_IROTH)#change to only readable
if collection==True:
for i in range(len(Writers)):
Para_dict["Chosen Model"]=model_architecture_names[i],
writer = Writers[i]
if counter==0:
Para_dict.to_excel(Writers[i],sheet_name='Parameters')
else:
Para_dict.to_excel(writer,sheet_name='Parameters',startrow=writer.sheets['Parameters'].max_row,header= False)
if os.path.isfile(model_keras_path[i].split(".model")[0]+'_meta.xlsx'):
os.chmod(model_keras_path[i].split(".model")[0]+'_meta.xlsx', S_IREAD|S_IRGRP|S_IROTH|S_IWRITE|S_IWGRP|S_IWOTH) #read/write
try:
writer.save()
except:
pass
os.chmod(model_keras_path[i].split(".model")[0]+'_meta.xlsx', S_IREAD|S_IRGRP|S_IROTH) #read only
######################Load the Validation Data################################
ind = [selectedfile["TrainOrValid"] == "Valid" for selectedfile in SelectedFiles]
ind = np.where(np.array(ind)==True)[0]
SelectedFiles_valid = np.array(SelectedFiles)[ind]
SelectedFiles_valid = list(SelectedFiles_valid)
indices_valid = [selectedfile["class"] for selectedfile in SelectedFiles_valid]
nr_events_epoch_valid = [selectedfile["nr_events_epoch"] for selectedfile in SelectedFiles_valid]
rtdc_path_valid = [selectedfile["rtdc_path"] for selectedfile in SelectedFiles_valid]
zoom_factors_valid = [selectedfile["zoom_factor"] for selectedfile in SelectedFiles_valid]
#zoom_order = [self.actionOrder0.isChecked(),self.actionOrder1.isChecked(),self.actionOrder2.isChecked(),self.actionOrder3.isChecked(),self.actionOrder4.isChecked(),self.actionOrder5.isChecked()]
#zoom_order = int(np.where(np.array(zoom_order)==True)[0])
zoom_order = int(self.comboBox_zoomOrder.currentIndex()) #the combobox-index is already the zoom order
shuffle_valid = [selectedfile["shuffle"] for selectedfile in SelectedFiles_valid]
xtra_in = set([selectedfile["xtra_in"] for selectedfile in SelectedFiles_valid])
if len(xtra_in)>1:# False and True is present. Not supported
print("Xtra data is used only for some files. Xtra data needs to be used either by all or by none!")
return
xtra_in = list(xtra_in)[0]#this is either True or False
############Cropping#####################
X_valid,y_valid,Indices,xtra_valid = [],[],[],[]
for i in range(len(SelectedFiles_valid)):
if not self.actionDataToRam.isChecked():
#Replace=true means individual cells could occur several times
gen_valid = aid_img.gen_crop_img(crop,rtdc_path_valid[i],nr_events_epoch_valid[i],random_images=shuffle_valid[i],replace=True,zoom_factor=zoom_factors_valid[i],zoom_order=zoom_order,color_mode=self.get_color_mode(),padding_mode=paddingMode,xtra_in=xtra_in)
else: #get a similar generator, using the ram-data
if len(DATA)==0:
#Replace=true means individual cells could occur several times
gen_valid = aid_img.gen_crop_img(crop,rtdc_path_valid[i],nr_events_epoch_valid[i],random_images=shuffle_valid[i],replace=True,zoom_factor=zoom_factors_valid[i],zoom_order=zoom_order,color_mode=self.get_color_mode(),padding_mode=paddingMode,xtra_in=xtra_in)
else:
gen_valid = aid_img.gen_crop_img_ram(DATA,rtdc_path_valid[i],nr_events_epoch_valid[i],random_images=shuffle_valid[i],replace=True,xtra_in=xtra_in) #Replace true means that individual cells could occur several times
if self.actionVerbose.isChecked():
print("Loaded data from RAM")
generator_cropped_out = next(gen_valid)
X_valid.append(generator_cropped_out[0])
#y_valid.append(np.repeat(indices_valid[i],nr_events_epoch_valid[i]))
y_valid.append(np.repeat(indices_valid[i],X_valid[-1].shape[0]))
Indices.append(generator_cropped_out[1])
xtra_valid.append(generator_cropped_out[2])
del generator_cropped_out
#Save the validation set (BEFORE normalization!)
#Write to.rtdc files
if bool(self.actionExport_Original.isChecked())==True:
print("Export original images")
save_cropped = False
aid_bin.write_rtdc(new_modelname.split(".model")[0]+'_Valid_Data.rtdc',rtdc_path_valid,X_valid,Indices,cropped=save_cropped,color_mode=self.get_color_mode(),xtra_in=xtra_valid)
elif bool(self.actionExport_Cropped.isChecked())==True:
print("Export cropped images")
save_cropped = True
aid_bin.write_rtdc(new_modelname.split(".model")[0]+'_Valid_Data.rtdc',rtdc_path_valid,X_valid,Indices,cropped=save_cropped,color_mode=self.get_color_mode(),xtra_in=xtra_valid)
elif bool(self.actionExport_Off.isChecked())==True:
print("Exporting is turned off")
# msg = QtWidgets.QMessageBox()
# msg.setIcon(QtWidgets.QMessageBox.Information)
# msg.setText("Use a different Exporting option in ->Edit if you want to export the data")
# msg.setWindowTitle("Export is turned off!")
# msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
# msg.exec_()
X_valid = np.concatenate(X_valid)
y_valid = np.concatenate(y_valid)
Y_valid = np_utils.to_categorical(y_valid, nr_classes)# * 2 - 1
xtra_valid = np.concatenate(xtra_valid)
if not bool(self.actionExport_Off.isChecked())==True:
#Save the labels
np.savetxt(new_modelname.split(".model")[0]+'_Valid_Labels.txt',y_valid.astype(int),fmt='%i')
if len(X_valid.shape)==4:
channels=3
elif len(X_valid.shape)==3:
channels=1
else:
print("Invalid data dimension:" +str(X_valid.shape))
if channels==1:
#Add the "channels" dimension
X_valid = np.expand_dims(X_valid,3)
#get it to theano image format (channels first)
#X_valid = X_valid.swapaxes(-1,-2).swapaxes(-2,-3)
if norm == "StdScaling using mean and std of all training data":
X_valid = aid_img.image_normalization(X_valid,norm,mean_trainingdata,std_trainingdata)
else:
X_valid = aid_img.image_normalization(X_valid,norm)
#Validation data can be cropped to final size already since no augmentation
#will happen on this data set
dim_val = X_valid.shape
print("Current dim. of validation set (pixels x pixels) = "+str(dim_val[2]))
if dim_val[2]!=crop:
print("Change dim. (pixels x pixels) of validation set to = "+str(crop))
remove = int(dim_val[2]/2.0 - crop/2.0)
X_valid = X_valid[:,remove:remove+crop,remove:remove+crop,:] #crop to crop x crop pixels #TensorFlow
if xtra_in==True:
print("Add Xtra Data to X_valid")
X_valid = [X_valid,xtra_valid]
####################Update the PopupFitting########################
self.fittingpopups_ui[listindex].lineEdit_modelname_pop.setText(new_modelname) #set the progress bar to zero
self.fittingpopups_ui[listindex].spinBox_imagecrop_pop.setValue(crop)
self.fittingpopups_ui[listindex].spinBox_NrEpochs.setValue(nr_epochs)
self.fittingpopups_ui[listindex].comboBox_ModelSelection_pop.addItems(self.predefined_models)
chosen_model = str(self.comboBox_ModelSelection.currentText())
index = self.fittingpopups_ui[listindex].comboBox_ModelSelection_pop.findText(chosen_model, QtCore.Qt.MatchFixedString)
if index >= 0:
self.fittingpopups_ui[listindex].comboBox_ModelSelection_pop.setCurrentIndex(index)
self.fittingpopups_ui[listindex].comboBox_Normalization_pop.addItems(self.norm_methods)
index = self.fittingpopups_ui[listindex].comboBox_Normalization_pop.findText(norm, QtCore.Qt.MatchFixedString)
if index >= 0:
self.fittingpopups_ui[listindex].comboBox_Normalization_pop.setCurrentIndex(index)
#padding
index = self.fittingpopups_ui[listindex].comboBox_paddingMode_pop.findText(paddingMode, QtCore.Qt.MatchFixedString)
if index >= 0:
self.fittingpopups_ui[listindex].comboBox_paddingMode_pop.setCurrentIndex(index)
#zoom_order
self.fittingpopups_ui[listindex].comboBox_zoomOrder.setCurrentIndex(zoom_order)
#CPU setting
self.fittingpopups_ui[listindex].comboBox_cpu_pop.addItem("Default CPU")
if gpu_used==False:
self.fittingpopups_ui[listindex].radioButton_cpu_pop.setChecked(True)
self.fittingpopups_ui[listindex].doubleSpinBox_memory_pop.setValue(gpu_memory)
#GPU setting
if gpu_used==True:
self.fittingpopups_ui[listindex].radioButton_gpu_pop.setChecked(True)
self.fittingpopups_ui[listindex].comboBox_gpu_pop.addItem(deviceSelected)
self.fittingpopups_ui[listindex].doubleSpinBox_memory_pop.setValue(gpu_memory)
self.fittingpopups_ui[listindex].spinBox_RefreshAfterEpochs_pop.setValue(keras_refresh_nr_epochs)
self.fittingpopups_ui[listindex].checkBox_HorizFlip_pop.setChecked(h_flip)
self.fittingpopups_ui[listindex].checkBox_VertFlip_pop.setChecked(v_flip)
self.fittingpopups_ui[listindex].lineEdit_Rotation_pop.setText(str(rotation))
self.fittingpopups_ui[listindex].lineEdit_widthShift_pop.setText(str(width_shift))
self.fittingpopups_ui[listindex].lineEdit_heightShift_pop.setText(str(height_shift))
self.fittingpopups_ui[listindex].lineEdit_zoomRange_pop.setText(str(zoom))
self.fittingpopups_ui[listindex].lineEdit_shearRange_pop.setText(str(shear))
self.fittingpopups_ui[listindex].spinBox_RefreshAfterNrEpochs_pop.setValue(brightness_refresh_nr_epochs)
self.fittingpopups_ui[listindex].spinBox_PlusLower_pop.setValue(brightness_add_lower)
self.fittingpopups_ui[listindex].spinBox_PlusUpper_pop.setValue(brightness_add_upper)
self.fittingpopups_ui[listindex].doubleSpinBox_MultLower_pop.setValue(brightness_mult_lower)
self.fittingpopups_ui[listindex].doubleSpinBox_MultUpper_pop.setValue(brightness_mult_upper)
self.fittingpopups_ui[listindex].doubleSpinBox_GaussianNoiseMean_pop.setValue(gaussnoise_mean)
self.fittingpopups_ui[listindex].doubleSpinBox_GaussianNoiseScale_pop.setValue(gaussnoise_scale)
self.fittingpopups_ui[listindex].checkBox_contrast_pop.setChecked(contrast_on)
self.fittingpopups_ui[listindex].doubleSpinBox_contrastLower_pop.setValue(contrast_lower)
self.fittingpopups_ui[listindex].doubleSpinBox_contrastHigher_pop.setValue(contrast_higher)
self.fittingpopups_ui[listindex].checkBox_saturation_pop.setChecked(saturation_on)
self.fittingpopups_ui[listindex].doubleSpinBox_saturationLower_pop.setValue(saturation_lower)
self.fittingpopups_ui[listindex].doubleSpinBox_saturationHigher_pop.setValue(saturation_higher)
self.fittingpopups_ui[listindex].checkBox_hue_pop.setChecked(hue_on)
self.fittingpopups_ui[listindex].doubleSpinBox_hueDelta_pop.setValue(hue_delta)
#Special for saturation and hue. Only enabled for RGB:
saturation_enabled = bool(self.checkBox_saturation.isEnabled())
self.fittingpopups_ui[listindex].checkBox_saturation_pop.setEnabled(saturation_enabled)
self.fittingpopups_ui[listindex].doubleSpinBox_saturationLower_pop.setEnabled(saturation_enabled)
self.fittingpopups_ui[listindex].doubleSpinBox_saturationHigher_pop.setEnabled(saturation_enabled)
hue_enabled = bool(self.checkBox_hue.isEnabled())
self.fittingpopups_ui[listindex].checkBox_hue_pop.setEnabled(hue_enabled)
self.fittingpopups_ui[listindex].doubleSpinBox_hueDelta_pop.setEnabled(hue_enabled)
self.fittingpopups_ui[listindex].checkBox_avgBlur_pop.setChecked(avgBlur_on)
self.fittingpopups_ui[listindex].spinBox_avgBlurMin_pop.setEnabled(avgBlur_on)
self.fittingpopups_ui[listindex].label_avgBlurMin_pop.setEnabled(avgBlur_on)
self.fittingpopups_ui[listindex].spinBox_avgBlurMin_pop.setValue(avgBlur_min)
self.fittingpopups_ui[listindex].spinBox_avgBlurMax_pop.setEnabled(avgBlur_on)
self.fittingpopups_ui[listindex].label_avgBlurMax_pop.setEnabled(avgBlur_on)
self.fittingpopups_ui[listindex].spinBox_avgBlurMax_pop.setValue(avgBlur_max)
self.fittingpopups_ui[listindex].checkBox_gaussBlur_pop.setChecked(gaussBlur_on)
self.fittingpopups_ui[listindex].spinBox_gaussBlurMin_pop.setEnabled(gaussBlur_on)
self.fittingpopups_ui[listindex].label_gaussBlurMin_pop.setEnabled(gaussBlur_on)
self.fittingpopups_ui[listindex].spinBox_gaussBlurMin_pop.setValue(gaussBlur_min)
self.fittingpopups_ui[listindex].spinBox_gaussBlurMax_pop.setEnabled(gaussBlur_on)
self.fittingpopups_ui[listindex].label_gaussBlurMax_pop.setEnabled(gaussBlur_on)
self.fittingpopups_ui[listindex].spinBox_gaussBlurMax_pop.setValue(gaussBlur_max)
self.fittingpopups_ui[listindex].checkBox_motionBlur_pop.setChecked(motionBlur_on)
self.fittingpopups_ui[listindex].label_motionBlurKernel_pop.setEnabled(motionBlur_on)
self.fittingpopups_ui[listindex].lineEdit_motionBlurKernel_pop.setEnabled(motionBlur_on)
self.fittingpopups_ui[listindex].label_motionBlurAngle_pop.setEnabled(motionBlur_on)
self.fittingpopups_ui[listindex].lineEdit_motionBlurAngle_pop.setEnabled(motionBlur_on)
if len(motionBlur_kernel)==1:
self.fittingpopups_ui[listindex].lineEdit_motionBlurKernel_pop.setText(str(motionBlur_kernel[0]))
if len(motionBlur_kernel)==2:
self.fittingpopups_ui[listindex].lineEdit_motionBlurKernel_pop.setText(str(motionBlur_kernel[0])+","+str(motionBlur_kernel[1]))
if len(motionBlur_angle)==1:
self.fittingpopups_ui[listindex].lineEdit_motionBlurAngle_pop.setText(str(motionBlur_angle[0]))
if len(motionBlur_kernel)==2:
self.fittingpopups_ui[listindex].lineEdit_motionBlurAngle_pop.setText(str(motionBlur_angle[0])+","+str(motionBlur_angle[1]))
self.fittingpopups_ui[listindex].groupBox_expertMode_pop.setChecked(expert_mode)
self.fittingpopups_ui[listindex].spinBox_batchSize.setValue(batchSize_expert)
self.fittingpopups_ui[listindex].spinBox_epochs.setValue(epochs_expert)
self.fittingpopups_ui[listindex].groupBox_learningRate_pop.setChecked(learning_rate_expert_on)
self.fittingpopups_ui[listindex].radioButton_LrConst.setChecked(learning_rate_const_on)
self.fittingpopups_ui[listindex].doubleSpinBox_learningRate.setValue(learning_rate_const)
self.fittingpopups_ui[listindex].radioButton_LrCycl.setChecked(learning_rate_cycLR_on)
self.fittingpopups_ui[listindex].lineEdit_cycLrMin.setText(str(cycLrMin))
self.fittingpopups_ui[listindex].lineEdit_cycLrMax.setText(str(cycLrMax))
index = self.fittingpopups_ui[listindex].comboBox_cycLrMethod.findText(cycLrMethod, QtCore.Qt.MatchFixedString)
if index >= 0:
self.fittingpopups_ui[listindex].comboBox_cycLrMethod.setCurrentIndex(index)
self.fittingpopups_ui[listindex].radioButton_LrExpo.setChecked(learning_rate_expo_on)
self.fittingpopups_ui[listindex].doubleSpinBox_expDecInitLr.setValue(expDecInitLr)
self.fittingpopups_ui[listindex].spinBox_expDecSteps.setValue(expDecSteps)
self.fittingpopups_ui[listindex].doubleSpinBox_expDecRate.setValue(expDecRate)
self.fittingpopups_ui[listindex].checkBox_expt_loss_pop.setChecked(loss_expert_on)
self.fittingpopups_ui[listindex].checkBox_expt_loss_pop.setChecked(loss_expert_on)
index = self.fittingpopups_ui[listindex].comboBox_expt_loss_pop.findText(loss_expert, QtCore.Qt.MatchFixedString)
if index >= 0:
self.fittingpopups_ui[listindex].comboBox_expt_loss_pop.setCurrentIndex(index)
self.fittingpopups_ui[listindex].checkBox_optimizer_pop.setChecked(optimizer_expert_on)
index = self.fittingpopups_ui[listindex].comboBox_optimizer.findText(optimizer_expert, QtCore.Qt.MatchFixedString)
if index >= 0:
self.fittingpopups_ui[listindex].comboBox_optimizer.setCurrentIndex(index)
self.fittingpopups_ui[listindex].doubleSpinBox_learningRate.setValue(learning_rate_const)
index = self.fittingpopups_ui[listindex].comboBox_paddingMode_pop.findText(paddingMode, QtCore.Qt.MatchFixedString)
if index >= 0:
self.fittingpopups_ui[listindex].comboBox_paddingMode_pop.setCurrentIndex(index)
self.fittingpopups_ui[listindex].checkBox_trainLastNOnly_pop.setChecked(train_last_layers)
self.fittingpopups_ui[listindex].spinBox_trainLastNOnly_pop.setValue(train_last_layers_n)
self.fittingpopups_ui[listindex].checkBox_trainDenseOnly_pop.setChecked(train_dense_layers)
self.fittingpopups_ui[listindex].checkBox_dropout_pop.setChecked(dropout_expert_on)
do_text = [str(do_i) for do_i in dropout_expert]
self.fittingpopups_ui[listindex].lineEdit_dropout_pop.setText((', '.join(do_text)))
self.fittingpopups_ui[listindex].checkBox_lossW.setChecked(lossW_expert_on)
self.fittingpopups_ui[listindex].pushButton_lossW.setEnabled(lossW_expert_on)
self.fittingpopups_ui[listindex].lineEdit_lossW.setText(str(lossW_expert))
if channels==1:
channel_text = "Grayscale"
elif channels==3:
channel_text = "RGB"
self.fittingpopups_ui[listindex].comboBox_colorMode_pop.addItems([channel_text])
###############Continue with training data:augmentation############
#Rotating could create edge effects. Avoid this by making crop a bit larger for now
#Worst case would be a 45degree rotation:
cropsize2 = np.sqrt(crop**2+crop**2)
cropsize2 = np.ceil(cropsize2 / 2.) * 2 #round to the next even number
#Dictionary defining affine image augmentation options:
aug_paras = {"v_flip":v_flip,"h_flip":h_flip,"rotation":rotation,"width_shift":width_shift,"height_shift":height_shift,"zoom":zoom,"shear":shear}
Histories,Index,Saved,Stopwatch,LearningRate = [],[],[],[],[]
if collection==True:
HISTORIES = [ [] for model in model_keras]
SAVED = [ [] for model in model_keras]
counter = 0
saving_failed = False #when saving fails, this becomes true and the user will be informed at the end of training
#Save the initial values (Epoch 1)
update_para_dict()
model_metrics_names = []
for met in model_metrics:
if type(met)==str:
model_metrics_names.append(met)
else:
metname = met.name
metlabel = met.label
if metlabel>0:
metname = metname+"_"+str(metlabel)
model_metrics_names.append(metname)
#Dictionary for records in metrics
model_metrics_records = {}
model_metrics_records["acc"] = 0 #accuracy starts at zero and approaches 1 during training
model_metrics_records["val_acc"] = 0 #accuracy starts at zero and approaches 1 during training
model_metrics_records["loss"] = 9E20 ##loss starts very high and approaches 0 during training
model_metrics_records["val_loss"] = 9E20 ##loss starts very high and approaches 0 during training
for key in model_metrics_names:
if 'precision' in key or 'recall' in key or 'f1_score' in key:
model_metrics_records[key] = 0 #those metrics start at zero and approach 1
model_metrics_records["val_"+key] = 0 #those metrics start at zero and approach 1
gen_train_refresh = False
time_start = time.time()
t1 = time.time() #Initialize a timer; this is used to save the meta file every few seconds
t2 = time.time() #Initialize a timer; this is used update the fitting parameters
while counter < nr_epochs:#nr_epochs: #resample nr_epochs times
#Only keep fitting if the respective window is open:
isVisible = self.fittingpopups[listindex].isVisible()
if isVisible:
############Keras image augmentation#####################
#Start the first iteration:
X_train,y_train,xtra_train = [],[],[]
t3 = time.time()
for i in range(len(SelectedFiles_train)):
if len(DATA)==0 or gen_train_refresh:
#Replace true means that individual cells could occur several times
gen_train = aid_img.gen_crop_img(cropsize2,rtdc_path_train[i],nr_events_epoch_train[i],random_images=shuffle_train[i],replace=True,zoom_factor=zoom_factors_train[i],zoom_order=zoom_order,color_mode=self.get_color_mode(),padding_mode=paddingMode,xtra_in=xtra_in)
gen_train_refresh = False
else:
gen_train = aid_img.gen_crop_img_ram(DATA,rtdc_path_train[i],nr_events_epoch_train[i],random_images=shuffle_train[i],replace=True,xtra_in=xtra_in) #Replace true means that individual cells could occur several times
if self.actionVerbose.isChecked():
print("Loaded data from RAM")
data_ = next(gen_train)
X_train.append(data_[0])
y_train.append(np.repeat(indices_train[i],X_train[-1].shape[0]))
if xtra_in==True:
xtra_train.append(data_[2])
del data_
X_train = np.concatenate(X_train)
X_train = X_train.astype(np.uint8)
y_train = np.concatenate(y_train)
if xtra_in==True:
print("Retrieve Xtra Data...")
xtra_train = np.concatenate(xtra_train)
t4 = time.time()
if verbose == 1:
print("Time to load data (from .rtdc or RAM) and crop="+str(t4-t3))
if len(X_train.shape)==4:
channels=3
elif len(X_train.shape)==3:
channels=1
else:
print("Invalid data dimension:" +str(X_train.shape))
if channels==1:
#Add the "channels" dimension
X_train = np.expand_dims(X_train,3)
t3 = time.time()
#Some parallellization: use nr_threads (number of CPUs)
nr_threads = 1 #Somehow for MNIST and CIFAR, processing always took longer for nr_threads>1 . I tried nr_threads=2,4,8,16,24
if nr_threads == 1:
X_batch = aid_img.affine_augm(X_train,v_flip,h_flip,rotation,width_shift,height_shift,zoom,shear) #Affine image augmentation
y_batch = np.copy(y_train)
else:
#Divde data in 4 batches
X_train = np.array_split(X_train,nr_threads)
y_train = np.array_split(y_train,nr_threads)
self.X_batch = [False] * nr_threads
self.y_batch = [False] * nr_threads
self.counter_aug = 0
self.Workers_augm = []
def imgaug_worker(aug_paras,progress_callback,history_callback):
i = aug_paras["i"]
self.X_batch[i] = aid_img.affine_augm(aug_paras["X_train"],v_flip,h_flip,rotation,width_shift,height_shift,zoom,shear)
self.y_batch[i] = aug_paras["y_train"]
self.counter_aug+=1
t3_a = time.time()
for i in range(nr_threads):
aug_paras_ = copy.deepcopy(aug_paras)
aug_paras_["i"] = i
aug_paras_["X_train"]=X_train[i]#augparas contains rotation and so on. X_train and y_train are overwritten in each iteration (for each worker new X_train)
aug_paras_["y_train"]=y_train[i]
self.Workers_augm.append(Worker(imgaug_worker,aug_paras_))
self.threadpool.start(self.Workers_augm[i])
while self.counter_aug < nr_threads:
time.sleep(0.01)#Wait 0.1s, then check the len again
t3_b = time.time()
if verbose == 1:
print("Time to perform affine augmentation_internal ="+str(t3_b-t3_a))
X_batch = np.concatenate(self.X_batch)
y_batch = np.concatenate(self.y_batch)
Y_batch = np_utils.to_categorical(y_batch, nr_classes)# * 2 - 1
t4 = time.time()
if verbose == 1:
print("Time to perform affine augmentation ="+str(t4-t3))
t3 = time.time()
#Now do the final cropping to the actual size that was set by user
dim = X_batch.shape
if dim[2]!=crop:
remove = int(dim[2]/2.0 - crop/2.0)
X_batch = X_batch[:,remove:remove+crop,remove:remove+crop,:] #crop to crop x crop pixels #TensorFlow
t4 = time.time()
# if verbose == 1:
# print("Time to crop to final size="+str(t4-t3))
X_batch_orig = np.copy(X_batch) #save into new array and do some iterations with varying noise/brightness
#reuse this X_batch_orig a few times since this augmentation was costly
keras_iter_counter = 0
while keras_iter_counter < keras_refresh_nr_epochs and counter < nr_epochs:
keras_iter_counter+=1
#if t2-t1>5: #check for changed settings every 5 seconds
if self.actionVerbose.isChecked()==True:
verbose = 1
else:
verbose = 0
#Another while loop if the user wants to reuse the keras-augmented data
#several times and only apply brightness augmentation:
brightness_iter_counter = 0
while brightness_iter_counter < brightness_refresh_nr_epochs and counter < nr_epochs:
#In each iteration, start with non-augmented data
X_batch = np.copy(X_batch_orig)#copy from X_batch_orig, X_batch will be altered without altering X_batch_orig
X_batch = X_batch.astype(np.uint8)
#########X_batch = X_batch.astype(float)########## No float yet :) !!!
brightness_iter_counter += 1
if self.actionVerbose.isChecked()==True:
verbose = 1
else:
verbose = 0
if self.fittingpopups_ui[listindex].checkBox_ApplyNextEpoch.isChecked():
nr_epochs = int(self.fittingpopups_ui[listindex].spinBox_NrEpochs.value())
#Keras stuff
keras_refresh_nr_epochs = int(self.fittingpopups_ui[listindex].spinBox_RefreshAfterEpochs_pop.value())
h_flip = bool(self.fittingpopups_ui[listindex].checkBox_HorizFlip_pop.isChecked())
v_flip = bool(self.fittingpopups_ui[listindex].checkBox_VertFlip_pop.isChecked())
rotation = float(self.fittingpopups_ui[listindex].lineEdit_Rotation_pop.text())
width_shift = float(self.fittingpopups_ui[listindex].lineEdit_widthShift_pop.text())
height_shift = float(self.fittingpopups_ui[listindex].lineEdit_heightShift_pop.text())
zoom = float(self.fittingpopups_ui[listindex].lineEdit_zoomRange_pop.text())
shear = float(self.fittingpopups_ui[listindex].lineEdit_shearRange_pop.text())
#Brightness stuff
brightness_refresh_nr_epochs = int(self.fittingpopups_ui[listindex].spinBox_RefreshAfterNrEpochs_pop.value())
brightness_add_lower = float(self.fittingpopups_ui[listindex].spinBox_PlusLower_pop.value())
brightness_add_upper = float(self.fittingpopups_ui[listindex].spinBox_PlusUpper_pop.value())
brightness_mult_lower = float(self.fittingpopups_ui[listindex].doubleSpinBox_MultLower_pop.value())
brightness_mult_upper = float(self.fittingpopups_ui[listindex].doubleSpinBox_MultUpper_pop.value())
gaussnoise_mean = float(self.fittingpopups_ui[listindex].doubleSpinBox_GaussianNoiseMean_pop.value())
gaussnoise_scale = float(self.fittingpopups_ui[listindex].doubleSpinBox_GaussianNoiseScale_pop.value())
contrast_on = bool(self.fittingpopups_ui[listindex].checkBox_contrast_pop.isChecked())
contrast_lower = float(self.fittingpopups_ui[listindex].doubleSpinBox_contrastLower_pop.value())
contrast_higher = float(self.fittingpopups_ui[listindex].doubleSpinBox_contrastHigher_pop.value())
saturation_on = bool(self.fittingpopups_ui[listindex].checkBox_saturation_pop.isChecked())
saturation_lower = float(self.fittingpopups_ui[listindex].doubleSpinBox_saturationLower_pop.value())
saturation_higher = float(self.fittingpopups_ui[listindex].doubleSpinBox_saturationHigher_pop.value())
hue_on = bool(self.fittingpopups_ui[listindex].checkBox_hue_pop.isChecked())
hue_delta = float(self.fittingpopups_ui[listindex].doubleSpinBox_hueDelta_pop.value())
avgBlur_on = bool(self.fittingpopups_ui[listindex].checkBox_avgBlur_pop.isChecked())
avgBlur_min = int(self.fittingpopups_ui[listindex].spinBox_avgBlurMin_pop.value())
avgBlur_max = int(self.fittingpopups_ui[listindex].spinBox_avgBlurMax_pop.value())
gaussBlur_on = bool(self.fittingpopups_ui[listindex].checkBox_gaussBlur_pop.isChecked())
gaussBlur_min = int(self.fittingpopups_ui[listindex].spinBox_gaussBlurMin_pop.value())
gaussBlur_max = int(self.fittingpopups_ui[listindex].spinBox_gaussBlurMax_pop.value())
motionBlur_on = bool(self.fittingpopups_ui[listindex].checkBox_motionBlur_pop.isChecked())
motionBlur_kernel = str(self.fittingpopups_ui[listindex].lineEdit_motionBlurKernel_pop.text())
motionBlur_angle = str(self.fittingpopups_ui[listindex].lineEdit_motionBlurAngle_pop.text())
motionBlur_kernel = tuple(ast.literal_eval(motionBlur_kernel)) #translate string in the lineEdits to a tuple
motionBlur_angle = tuple(ast.literal_eval(motionBlur_angle)) #translate string in the lineEdits to a tuple
#Expert mode stuff
expert_mode = bool(self.fittingpopups_ui[listindex].groupBox_expertMode_pop.isChecked())
batchSize_expert = int(self.fittingpopups_ui[listindex].spinBox_batchSize.value())
epochs_expert = int(self.fittingpopups_ui[listindex].spinBox_epochs.value())
learning_rate_expert_on = bool(self.fittingpopups_ui[listindex].groupBox_learningRate_pop.isChecked())
learning_rate_const_on = bool(self.fittingpopups_ui[listindex].radioButton_LrConst.isChecked())
learning_rate_const = float(self.fittingpopups_ui[listindex].doubleSpinBox_learningRate.value())
learning_rate_cycLR_on = bool(self.fittingpopups_ui[listindex].radioButton_LrCycl.isChecked())
try:
cycLrMin = float(self.fittingpopups_ui[listindex].lineEdit_cycLrMin.text())
cycLrMax = float(self.fittingpopups_ui[listindex].lineEdit_cycLrMax.text())
except:
cycLrMin = []
cycLrMax = []
cycLrMethod = str(self.fittingpopups_ui[listindex].comboBox_cycLrMethod.currentText())
clr_settings = self.fittingpopups_ui[listindex].clr_settings.copy() #Get a copy of the current optimizer_settings. .copy prevents that changes in the UI have immediate effect
cycLrStepSize = aid_dl.get_cyclStepSize(SelectedFiles,clr_settings["step_size"],batchSize_expert)
cycLrGamma = clr_settings["gamma"]
learning_rate_expo_on = bool(self.fittingpopups_ui[listindex].radioButton_LrExpo.isChecked())
expDecInitLr = float(self.fittingpopups_ui[listindex].doubleSpinBox_expDecInitLr.value())
expDecSteps = int(self.fittingpopups_ui[listindex].spinBox_expDecSteps.value())
expDecRate = float(self.fittingpopups_ui[listindex].doubleSpinBox_expDecRate.value())
loss_expert_on = bool(self.fittingpopups_ui[listindex].checkBox_expt_loss_pop.isChecked())
loss_expert = str(self.fittingpopups_ui[listindex].comboBox_expt_loss_pop.currentText())
optimizer_expert_on = bool(self.fittingpopups_ui[listindex].checkBox_optimizer_pop.isChecked())
optimizer_expert = str(self.fittingpopups_ui[listindex].comboBox_optimizer.currentText())
optimizer_settings = self.fittingpopups_ui[listindex].optimizer_settings.copy() #Get a copy of the current optimizer_settings. .copy prevents that changes in the UI have immediate effect
paddingMode_ = str(self.fittingpopups_ui[listindex].comboBox_paddingMode_pop.currentText())
print("paddingMode_:"+str(paddingMode_))
if paddingMode_ != paddingMode:
print("Changed the padding mode!")
gen_train_refresh = True#otherwise changing paddingMode will not have any effect
paddingMode = paddingMode_
train_last_layers = bool(self.fittingpopups_ui[listindex].checkBox_trainLastNOnly_pop.isChecked())
train_last_layers_n = int(self.fittingpopups_ui[listindex].spinBox_trainLastNOnly_pop.value())
train_dense_layers = bool(self.fittingpopups_ui[listindex].checkBox_trainDenseOnly_pop.isChecked())
dropout_expert_on = bool(self.fittingpopups_ui[listindex].checkBox_dropout_pop.isChecked())
try:
dropout_expert = str(self.fittingpopups_ui[listindex].lineEdit_dropout_pop.text()) #due to the validator, there are no squ.brackets
dropout_expert = "["+dropout_expert+"]"
dropout_expert = ast.literal_eval(dropout_expert)
except:
dropout_expert = []
lossW_expert_on = bool(self.fittingpopups_ui[listindex].checkBox_lossW.isChecked())
lossW_expert = str(self.fittingpopups_ui[listindex].lineEdit_lossW.text())
class_weight = self.get_class_weight(self.fittingpopups_ui[listindex].SelectedFiles,lossW_expert) #
print("Updating parameter file (meta.xlsx)!")
update_para_dict()
#Changes in expert mode can affect the model: apply changes now:
if expert_mode==True:
if collection==False: #Expert mode is currently not supported for Collections
expert_mode_before = True
#Apply changes to the trainable states:
if train_last_layers==True:#Train only the last n layers
if verbose:
print("Train only the last "+str(train_last_layers_n)+ " layer(s)")
trainable_new = (len(trainable_original)-train_last_layers_n)*[False]+train_last_layers_n*[True]
#Change the trainability states. Model compilation is done inside model_change_trainability
summary = aid_dl.model_change_trainability(model_keras,trainable_new,model_metrics,nr_classes,loss_expert,optimizer_settings,learning_rate_const)
if model_keras_p!=None:#if this is NOT None, there exists a parallel model, which also needs to be re-compiled
aid_dl.model_compile(model_keras_p,loss_expert,optimizer_settings,learning_rate_const,model_metrics,nr_classes)
print("Recompiled parallel model due to train_last_layers==True")
text1 = "Expert mode: Request for custom trainability states: train only the last "+str(train_last_layers_n)+ " layer(s)\n"
#text2 = "\n--------------------\n"
self.fittingpopups_ui[listindex].textBrowser_FittingInfo.append(text1+summary)
if train_dense_layers==True:#Train only dense layers
if verbose:
print("Train only dense layers")
layer_dense_ind = ["Dense" in x for x in layer_names]
layer_dense_ind = np.where(np.array(layer_dense_ind)==True)[0] #at which indices are dropout layers?
#create a list of trainable states
trainable_new = len(trainable_original)*[False]
for index in layer_dense_ind:
trainable_new[index] = True
#Change the trainability states. Model compilation is done inside model_change_trainability
summary = aid_dl.model_change_trainability(model_keras,trainable_new,model_metrics,nr_classes,loss_expert,optimizer_settings,learning_rate_const)
if model_keras_p!=None:#if this is NOT None, there exists a parallel model, which also needs to be re-compiled
aid_dl.model_compile(model_keras_p,loss_expert,optimizer_settings,learning_rate_const,model_metrics,nr_classes)
print("Recompiled parallel model due to train_dense_layers==True")
text1 = "Expert mode: Request for custom trainability states: train only dense layer(s)\n"
#text2 = "\n--------------------\n"
self.fittingpopups_ui[listindex].textBrowser_FittingInfo.append(text1+summary)
if dropout_expert_on==True:
#The user apparently want to change the dropout rates
do_list = aid_dl.get_dropout(model_keras)#Get a list of dropout values of the current model
#Compare the dropout values in the model to the dropout values requested by user
if len(dropout_expert)==1:#if the user gave a float
dropout_expert_list = len(do_list)*dropout_expert #convert to list
elif len(dropout_expert)>1:
dropout_expert_list = dropout_expert
if not len(dropout_expert_list)==len(do_list):
text = "Issue with dropout: you defined "+str(len(dropout_expert_list))+" dropout rates, but model has "+str(len(do_list))+" dropout layers"
self.fittingpopups_ui[listindex].textBrowser_FittingInfo.append(text)
else:
text = "Could not understand user input at Expert->Dropout"
self.fittingpopups_ui[listindex].textBrowser_FittingInfo.append(text)
dropout_expert_list = []
if len(dropout_expert_list)>0 and do_list!=dropout_expert_list:#if the dropout rates of the current model is not equal to the required do_list from user...
#Change dropout. Model .compile happens inside change_dropout function
do_changed = aid_dl.change_dropout(model_keras,dropout_expert_list,model_metrics,nr_classes,loss_expert,optimizer_settings,learning_rate_const)
if model_keras_p!=None:#if model_keras_p is NOT None, there exists a parallel model, which also needs to be re-compiled
aid_dl.model_compile(model_keras_p,loss_expert,optimizer_settings,learning_rate_const,model_metrics,nr_classes)
print("Recompiled parallel model due to changed dropout. I'm not sure if this works already!")
if do_changed==1:
text_do = "Dropout rate(s) in model was/were changed to: "+str(dropout_expert_list)
else:
text_do = "Dropout rate(s) in model was/were not changed"
else:
text_do = "Dropout rate(s) in model was/were not changed"
if verbose:
print(text_do)
self.fittingpopups_ui[listindex].textBrowser_FittingInfo.append(text_do)
if learning_rate_expert_on==True:
#get the current lr_dict
lr_dict_now = aid_dl.get_lr_dict(learning_rate_const_on,learning_rate_const,
learning_rate_cycLR_on,cycLrMin,cycLrMax,
cycLrMethod,cycLrStepSize,
learning_rate_expo_on,
expDecInitLr,expDecSteps,expDecRate,cycLrGamma)
if not lr_dict_now.equals(lr_dict_original):#in case the dataframes dont equal...
#generate a new callback
callback_lr = aid_dl.get_lr_callback(learning_rate_const_on,learning_rate_const,
learning_rate_cycLR_on,cycLrMin,cycLrMax,
cycLrMethod,cycLrStepSize,
learning_rate_expo_on,
expDecInitLr,expDecSteps,expDecRate,cycLrGamma)
#update lr_dict_original
lr_dict_original = lr_dict_now.copy()
else:
callback_lr = None
if optimizer_expert_on==True:
optimizer_settings_now = self.fittingpopups_ui[listindex].optimizer_settings.copy()
if not optimizer_settings_now == optimizer_settings:#in case the dataframes dont equal...
#grab these new optimizer values
optimizer_settings = optimizer_settings_now.copy()
############################Invert 'expert' settings#########################
if expert_mode==False and expert_mode_before==True: #if the expert mode was selected before, change the parameters back to original vlaues
if verbose:
print("Expert mode was used before and settings are now inverted")
#Re-set trainable states back to original state
if verbose:
print("Change 'trainable' layers back to original state")
summary = aid_dl.model_change_trainability(model_keras,trainable_original,model_metrics,nr_classes,loss_expert,optimizer_settings,learning_rate_const)
if model_keras_p!=None:#if model_keras_p is NOT None, there exists a parallel model, which also needs to be re-compiled
aid_dl.model_compile(model_keras_p,loss_expert,optimizer_settings,learning_rate_const,model_metrics,nr_classes)
print("Recompiled parallel model to change 'trainable' layers back to original state")
text1 = "Expert mode turns off: Request for orignal trainability states:\n"
#text2 = "\n--------------------\n"
self.fittingpopups_ui[listindex].textBrowser_FittingInfo.append(text1+summary)
if verbose:
print("Change dropout rates in dropout layers back to original values")
callback_lr = None#remove learning rate callback
if verbose:
print("Set learning rate callback to None")
if len(do_list_original)>0:
do_changed = aid_dl.change_dropout(model_keras,do_list_original,model_metrics,nr_classes,loss_expert,optimizer_settings,learning_rate_const)
if model_keras_p!=None:#if model_keras_p is NOT None, there exists a parallel model, which also needs to be re-compiled
aid_dl.model_compile(model_keras_p,loss_expert,optimizer_settings,learning_rate_const,model_metrics,nr_classes)
print("Recompiled parallel model to change dropout values back to original state. I'm not sure if this works!")
if do_changed==1:
text_do = "Dropout rate(s) in model was/were changed to original values: "+str(do_list_original)
else:
text_do = "Dropout rate(s) in model was/were not changed"
self.fittingpopups_ui[listindex].textBrowser_FittingInfo.append(text_do+"\n")
text_updates = ""
#Compare current lr and the lr on expert tab:
if collection==False:
lr_current = K.eval(model_keras.optimizer.lr)
else:
lr_current = K.eval(model_keras[0].optimizer.lr)
lr_diff = learning_rate_const-lr_current
if abs(lr_diff) > 1e-6:
if collection==False:
K.set_value(model_keras.optimizer.lr, learning_rate_const)
else:
K.set_value(model_keras[0].optimizer.lr, learning_rate_const)
text_updates += "Changed the learning rate to "+ str(learning_rate_const)+"\n"
recompile = False
#Compare current optimizer and the optimizer on expert tab:
if collection==False:
optimizer_current = aid_dl.get_optimizer_name(model_keras).lower()#get the current optimizer of the model
else:
optimizer_current = aid_dl.get_optimizer_name(model_keras[0]).lower()#get the current optimizer of the model
if optimizer_current!=optimizer_expert.lower():#if the current model has a different optimizer
recompile = True
text_updates+="Changed the optimizer to "+optimizer_expert+"\n"
#Compare current loss function and the loss-function on expert tab:
if collection==False:
loss_ = model_keras.loss
else:
loss_ = model_keras[0].loss
if loss_!=loss_expert:
recompile = True
model_metrics_records["loss"] = 9E20 #Reset the record for loss because new loss function could converge to a different min. value
model_metrics_records["val_loss"] = 9E20 #Reset the record for loss because new loss function could converge to a different min. value
text_updates+="Changed the loss function to "+loss_expert+"\n"
if recompile==True and collection==False:
print("Recompiling...")
aid_dl.model_compile(model_keras,loss_expert,optimizer_settings,learning_rate_const,model_metrics,nr_classes)
if model_keras_p!=None:#if model_keras_p is NOT None, there exists a parallel model, which also needs to be re-compiled
aid_dl.model_compile(model_keras_p,loss_expert,optimizer_settings,learning_rate_const,model_metrics,nr_classes)
print("Recompiled parallel model to change optimizer, loss and learninig rate.")
elif recompile==True and collection==True:
if model_keras_p!=None:#if model_keras_p is NOT None, there exists a parallel model, which also needs to be re-compiled
print("Altering learning rate is not suported for collections (yet)")
return
print("Recompiling...")
for m in model_keras:
aid_dl.model_compile(m,loss_expert,optimizer_settings,learning_rate_const,model_metrics,nr_classes)
self.fittingpopups_ui[listindex].textBrowser_FittingInfo.append(text_updates)
#self.model_keras = model_keras #overwrite the model in self
self.fittingpopups_ui[listindex].checkBox_ApplyNextEpoch.setChecked(False)
##########Contrast/Saturation/Hue augmentation#########
#is there any of contrast/saturation/hue augmentation to do?
X_batch = X_batch.astype(np.uint8)
if contrast_on:
t_con_aug_1 = time.time()
X_batch = aid_img.contrast_augm_cv2(X_batch,contrast_lower,contrast_higher) #this function is almost 15 times faster than random_contrast from tf!
t_con_aug_2 = time.time()
if verbose == 1:
print("Time to augment contrast="+str(t_con_aug_2-t_con_aug_1))
if saturation_on or hue_on:
t_sat_aug_1 = time.time()
X_batch = aid_img.satur_hue_augm_cv2(X_batch.astype(np.uint8),saturation_on,saturation_lower,saturation_higher,hue_on,hue_delta) #Gray and RGB; both values >0!
t_sat_aug_2 = time.time()
if verbose == 1:
print("Time to augment saturation/hue="+str(t_sat_aug_2-t_sat_aug_1))
##########Average/Gauss/Motion blurring#########
#is there any of blurring to do?
if avgBlur_on:
t_avgBlur_1 = time.time()
X_batch = aid_img.avg_blur_cv2(X_batch,avgBlur_min,avgBlur_max)
t_avgBlur_2 = time.time()
if verbose == 1:
print("Time to perform average blurring="+str(t_avgBlur_2-t_avgBlur_1))
if gaussBlur_on:
t_gaussBlur_1 = time.time()
X_batch = aid_img.gauss_blur_cv(X_batch,gaussBlur_min,gaussBlur_max)
t_gaussBlur_2 = time.time()
if verbose == 1:
print("Time to perform gaussian blurring="+str(t_gaussBlur_2-t_gaussBlur_1))
if motionBlur_on:
t_motionBlur_1 = time.time()
X_batch = aid_img.motion_blur_cv(X_batch,motionBlur_kernel,motionBlur_angle)
t_motionBlur_2 = time.time()
if verbose == 1:
print("Time to perform motion blurring="+str(t_motionBlur_2-t_motionBlur_1))
##########Brightness noise#########
t3 = time.time()
X_batch = aid_img.brightn_noise_augm_cv2(X_batch,brightness_add_lower,brightness_add_upper,brightness_mult_lower,brightness_mult_upper,gaussnoise_mean,gaussnoise_scale)
t4 = time.time()
if verbose == 1:
print("Time to augment brightness="+str(t4-t3))
t3 = time.time()
if norm == "StdScaling using mean and std of all training data":
X_batch = aid_img.image_normalization(X_batch,norm,mean_trainingdata,std_trainingdata)
else:
X_batch = aid_img.image_normalization(X_batch,norm)
t4 = time.time()
if verbose == 1:
print("Time to apply normalization="+str(t4-t3))
#Fitting can be paused
while str(self.fittingpopups_ui[listindex].pushButton_Pause_pop.text())=="":
time.sleep(2) #wait 2 seconds and then check the text on the button again
if verbose == 1:
print("X_batch.shape")
print(X_batch.shape)
if xtra_in==True:
print("Add Xtra Data to X_batch")
X_batch = [X_batch,xtra_train]
#generate a list of callbacks, get empty list if callback_lr is none
callbacks = []
if callback_lr!=None:
callbacks.append(callback_lr)
###################################################
###############Actual fitting######################
###################################################
if collection==False:
if model_keras_p == None:
history = model_keras.fit(X_batch, Y_batch, batch_size=batchSize_expert, epochs=epochs_expert,verbose=verbose, validation_data=(X_valid, Y_valid),class_weight=class_weight,callbacks=callbacks)
elif model_keras_p != None:
history = model_keras_p.fit(X_batch, Y_batch, batch_size=batchSize_expert, epochs=epochs_expert,verbose=verbose, validation_data=(X_valid, Y_valid),class_weight=class_weight,callbacks=callbacks)
Histories.append(history.history)
Stopwatch.append(time.time()-time_start)
learningrate = K.get_value(history.model.optimizer.lr)
LearningRate.append(learningrate)
#Check if any metric broke a record
record_broken = False #initially, assume there is no new record
for key in history.history.keys():
value = history.history[key][-1]
record = model_metrics_records[key]
if 'val_acc' in key or 'val_precision' in key or 'val_recall' in key or 'val_f1_score' in key:
#These metrics should go up (towards 1)
if value>record:
model_metrics_records[key] = value
record_broken = True
print(key+" broke record -> Model will be saved" )
elif 'val_loss' in key:
#This metric should go down (towards 0)
if value<record:
model_metrics_records[key] = value
record_broken = True
print(key+" broke record -> Model will be saved")
#self.fittingpopups_ui[listindex].textBrowser_FittingInfo.append(text)
if record_broken:#if any record was broken...
if deviceSelected=="Multi-GPU":#in case of Multi-GPU...
#In case of multi-GPU, first copy the weights of the parallel model to the normal model
model_keras.set_weights(model_keras_p.layers[-2].get_weights())
#Save the model
text = "Save model to following directory: \n"+os.path.dirname(new_modelname)
print(text)
if os.path.exists(os.path.dirname(new_modelname)):
model_keras.save(new_modelname.split(".model")[0]+"_"+str(counter)+".model")
text = "Record was broken -> saved model"
print(text)
self.fittingpopups_ui[listindex].textBrowser_FittingInfo.append(text)
else:#in case the folder does not exist (anymore), create a folder in temp
#what is the foldername of the model?
text = "Saving failed. Create folder in temp"
print(text)
self.fittingpopups_ui[listindex].textBrowser_FittingInfo.append(text)
saving_failed = True
temp_path = aid_bin.create_temp_folder()#create a temp folder if it does not already exist
text = "Your temp. folder is here: "+str(temp_path)
print(text)
self.fittingpopups_ui[listindex].textBrowser_FittingInfo.append(text)
parentfolder = aid_bin.splitall(new_modelname)[-2]
fname = os.path.split(new_modelname)[-1]
#create that folder in temp if it not exists already
if not os.path.exists(os.path.join(temp_path,parentfolder)):
text = "Create folder in temp:\n"+os.path.join(temp_path,parentfolder)
print(text)
self.fittingpopups_ui[listindex].textBrowser_FittingInfo.append(text)
os.mkdir(os.path.join(temp_path,parentfolder))
#change the new_modelname to a path in temp
new_modelname = os.path.join(temp_path,parentfolder,fname)
#inform user!
text = "Could not find original folder. Files are now saved to "+new_modelname
text = "<span style=\' color: red;\'>" +text+"</span>"
self.fittingpopups_ui[listindex].textBrowser_FittingInfo.append(text)
text = "<span style=\' color: black;\'>" +""+"</span>"
self.fittingpopups_ui[listindex].textBrowser_FittingInfo.append(text)
#Save the model
model_keras.save(new_modelname.split(".model")[0]+"_"+str(counter)+".model")
text = "Model saved successfully to temp"
print(text)
self.fittingpopups_ui[listindex].textBrowser_FittingInfo.append(text)
#Also update the excel writer!
writer = pd.ExcelWriter(new_modelname.split(".model")[0]+'_meta.xlsx', engine='openpyxl')
self.fittingpopups_ui[listindex].writer = writer
pd.DataFrame().to_excel(writer,sheet_name='UsedData') #initialize empty Sheet
SelectedFiles_df.to_excel(writer,sheet_name='UsedData')
DataOverview_df.to_excel(writer,sheet_name='DataOverview') #write data overview to separate sheet
pd.DataFrame().to_excel(writer,sheet_name='Parameters') #initialize empty Sheet
pd.DataFrame().to_excel(writer,sheet_name='History') #initialize empty Sheet
Saved.append(1)
#Also save the model upon user-request
elif bool(self.fittingpopups_ui[listindex].checkBox_saveEpoch_pop.isChecked())==True:
if deviceSelected=="Multi-GPU":#in case of Multi-GPU...
#In case of multi-GPU, first copy the weights of the parallel model to the normal model
model_keras.set_weights(model_keras_p.layers[-2].get_weights())
model_keras.save(new_modelname.split(".model")[0]+"_"+str(counter)+".model")
Saved.append(1)
self.fittingpopups_ui[listindex].checkBox_saveEpoch_pop.setChecked(False)
else:
Saved.append(0)
elif collection==True:
for i in range(len(model_keras)):
#Expert-settings return automatically to default values when Expert-mode is unchecked
history = model_keras[i].fit(X_batch, Y_batch, batch_size=batchSize_expert, epochs=epochs_expert,verbose=verbose, validation_data=(X_valid, Y_valid),class_weight=class_weight,callbacks=callbacks)
HISTORIES[i].append(history.history)
learningrate = K.get_value(history.model.optimizer.lr)
print("model_keras_path[i]")
print(model_keras_path[i])
#Check if any metric broke a record
record_broken = False #initially, assume there is no new record
for key in history.history.keys():
value = history.history[key][-1]
record = model_metrics_records[key]
if 'val_acc' in key or 'val_precision' in key or 'val_recall' in key or 'val_f1_score' in key:
#These metrics should go up (towards 1)
if value>record:
model_metrics_records[key] = value
record_broken = True
text = key+" broke record -> Model will be saved"
print(text)
self.fittingpopups_ui[listindex].textBrowser_FittingInfo.append(text)
#one could 'break' here, but I want to update all records
elif 'val_loss' in key:
#This metric should go down (towards 0)
if value<record:
model_metrics_records[key] = value
record_broken = True
text = key+" broke record -> Model will be saved"
print(text)
self.fittingpopups_ui[listindex].textBrowser_FittingInfo.append(text)
#For collections of models:
if record_broken:
#Save the model
model_keras[i].save(model_keras_path[i].split(".model")[0]+"_"+str(counter)+".model")
SAVED[i].append(1)
elif bool(self.fittingpopups_ui[listindex].checkBox_saveEpoch_pop.isChecked())==True:
model_keras[i].save(model_keras_path[i].split(".model")[0]+"_"+str(counter)+".model")
SAVED[i].append(1)
self.fittingpopups_ui[listindex].checkBox_saveEpoch_pop.setChecked(False)
else:
SAVED[i].append(0)
callback_progessbar = float(counter)/nr_epochs
progress_callback.emit(100.0*callback_progessbar)
history_emit = history.history
history_emit["LearningRate"] = [learningrate]
history_callback.emit(history_emit)
Index.append(counter)
t2 = time.time()
if collection==False:
if counter==0:
#If this runs the first time, create the file with header
DF1 = [[ h[h_i][-1] for h_i in h] for h in Histories] #if nb_epoch in .fit() is >1, only save the last history item, beacuse this would a model that could be saved
DF1 = np.r_[DF1]
DF1 = pd.DataFrame( DF1,columns=Histories[0].keys() )
DF1["Saved"] = Saved
DF1["Time"] = Stopwatch
DF1["LearningRate"] = LearningRate
DF1.index = Index
#If this runs the first time, create the file with header
if os.path.isfile(new_modelname.split(".model")[0]+'_meta.xlsx'):
os.chmod(new_modelname.split(".model")[0]+'_meta.xlsx', S_IREAD|S_IRGRP|S_IROTH|S_IWRITE|S_IWGRP|S_IWOTH) #read/write
DF1.to_excel(writer,sheet_name='History')
writer.save()
os.chmod(new_modelname.split(".model")[0]+'_meta.xlsx', S_IREAD|S_IRGRP|S_IROTH)
meta_saving_t = int(self.fittingpopups_ui[listindex].spinBox_saveMetaEvery.value())
text = "meta.xlsx was saved (automatic saving every "+str(meta_saving_t)+"s)"
print(text)
self.fittingpopups_ui[listindex].textBrowser_FittingInfo.append(text)
#self.fittingpopups_ui[listindex].backup.append({"DF1":DF1})
Index,Histories,Saved,Stopwatch,LearningRate = [],[],[],[],[]#reset the lists
#Get a sensible frequency for saving the dataframe (every 20s)
elif t2-t1>int(self.fittingpopups_ui[listindex].spinBox_saveMetaEvery.value()):
#elif counter%50==0: #otherwise save the history to excel after each n epochs
DF1 = [[ h[h_i][-1] for h_i in h] for h in Histories] #if nb_epoch in .fit() is >1, only save the last history item, beacuse this would a model that could be saved
DF1 = np.r_[DF1]
DF1 = pd.DataFrame( DF1,columns=Histories[0].keys() )
DF1["Saved"] = Saved
DF1["Time"] = Stopwatch
DF1["LearningRate"] = LearningRate
DF1.index = Index
#Saving
if os.path.exists(os.path.dirname(new_modelname)):#check if folder is (still) available
if os.path.isfile(new_modelname.split(".model")[0]+'_meta.xlsx'):
os.chmod(new_modelname.split(".model")[0]+'_meta.xlsx', S_IREAD|S_IRGRP|S_IROTH|S_IWRITE|S_IWGRP|S_IWOTH) #make read/write
DF1.to_excel(writer,sheet_name='History', startrow=writer.sheets['History'].max_row,header= False)
writer.save()
os.chmod(new_modelname.split(".model")[0]+'_meta.xlsx', S_IREAD|S_IRGRP|S_IROTH) #make read only
meta_saving_t = int(self.fittingpopups_ui[listindex].spinBox_saveMetaEvery.value())
text = "meta.xlsx was saved (automatic saving every "+str(meta_saving_t)+"s to directory:\n)"+new_modelname
print(text)
self.fittingpopups_ui[listindex].textBrowser_FittingInfo.append(text)
Index,Histories,Saved,Stopwatch,LearningRate = [],[],[],[],[]#reset the lists
t1 = time.time()
else:#If folder not available, create a folder in temp
text = "Failed to save meta.xlsx. -> Create folder in temp\n"
saving_failed = True
temp_path = aid_bin.create_temp_folder()#create a temp folder if it does not already exist
text += "Your temp folder is here: "+str(temp_path)+"\n"
folder = os.path.split(new_modelname)[-2]
folder = os.path.split(folder)[-1]
fname = os.path.split(new_modelname)[-1]
#create that folder in temp if it does'nt exist already
if not os.path.exists(os.path.join(temp_path,folder)):
os.mkdir(os.path.join(temp_path,folder))
text +="Created directory in temp:\n"+os.path.join(temp_path,folder)
print(text)
#change the new_modelname to a path in temp
new_modelname = os.path.join(temp_path,folder,fname)
#inform user!
text = "Could not find original folder. Files are now saved to "+new_modelname
text = "<span style=\' color: red;\'>" +text+"</span>"#put red text to the infobox
self.fittingpopups_ui[listindex].textBrowser_FittingInfo.append(text)
text = "<span style=\' color: black;\'>" +""+"</span>"#reset textcolor to black
self.fittingpopups_ui[listindex].textBrowser_FittingInfo.append(text)
#update the excel writer
writer = pd.ExcelWriter(new_modelname.split(".model")[0]+'_meta.xlsx', engine='openpyxl')
self.fittingpopups_ui[listindex].writer = writer
pd.DataFrame().to_excel(writer,sheet_name='UsedData') #initialize empty Sheet
SelectedFiles_df.to_excel(writer,sheet_name='UsedData')
DataOverview_df.to_excel(writer,sheet_name='DataOverview') #write data overview to separate sheet
pd.DataFrame().to_excel(writer,sheet_name='Parameters') #initialize empty Sheet
pd.DataFrame().to_excel(writer,sheet_name='History') #initialize empty Sheet
if os.path.isfile(new_modelname.split(".model")[0]+'_meta.xlsx'):
print("There is already such a file...AID will add new data to it. Please check if this is OK")
os.chmod(new_modelname.split(".model")[0]+'_meta.xlsx', S_IREAD|S_IRGRP|S_IROTH|S_IWRITE|S_IWGRP|S_IWOTH) #read/write
DF1.to_excel(writer,sheet_name='History')
writer.save()
os.chmod(new_modelname.split(".model")[0]+'_meta.xlsx', S_IREAD|S_IRGRP|S_IROTH)
print("meta.xlsx was saved")
Index,Histories,Saved,Stopwatch,LearningRate = [],[],[],[],[]#reset the lists
if collection==True:
if counter==0:
for i in range(len(HISTORIES)):
Histories = HISTORIES[i]
Saved = SAVED[i]
#If this runs the first time, create the file with header
DF1 = [[ h[h_i][-1] for h_i in h] for h in Histories] #if nb_epoch in .fit() is >1, only save the last history item, beacuse this would a model that could be saved
DF1 = np.r_[DF1]
DF1 = pd.DataFrame( DF1,columns=Histories[0].keys() )
DF1["Saved"] = Saved
DF1.index = Index
HISTORIES[i] = []#reset the Histories list
SAVED[i] = []
#If this runs the first time, create the file with header
if os.path.isfile(model_keras_path[i].split(".model")[0]+'_meta.xlsx'):
os.chmod(model_keras_path[i].split(".model")[0]+'_meta.xlsx', S_IREAD|S_IRGRP|S_IROTH|S_IWRITE|S_IWGRP|S_IWOTH) #read/write
DF1.to_excel(Writers[i],sheet_name='History')
Writers[i].save()
os.chmod(model_keras_path[i].split(".model")[0]+'_meta.xlsx', S_IREAD|S_IRGRP|S_IROTH)
print("meta.xlsx was saved")
Index = []#reset the Index list
#Get a sensible frequency for saving the dataframe (every 20s)
elif t2-t1>int(self.fittingpopups_ui[listindex].spinBox_saveMetaEvery.value()):
for i in range(len(HISTORIES)):
Histories = HISTORIES[i]
Saved = SAVED[i]
DF1 = [[ h[h_i][-1] for h_i in h] for h in Histories] #if nb_epoch in .fit() is >1, only save the last history item, beacuse this would a model that could be saved
DF1 = np.r_[DF1]
DF1 = pd.DataFrame( DF1,columns=Histories[0].keys() )
DF1["Saved"] = Saved
DF1.index = Index
HISTORIES[i] = []#reset the Histories list
SAVED[i] = []
#Saving
#TODO: save to temp, if harddisk not available to prevent crash.
if os.path.isfile(model_keras_path[i].split(".model")[0]+'_meta.xlsx'):
os.chmod(model_keras_path[i].split(".model")[0]+'_meta.xlsx', S_IREAD|S_IRGRP|S_IROTH|S_IWRITE|S_IWGRP|S_IWOTH) #make read/write
DF1.to_excel(Writers[i],sheet_name='History', startrow=Writers[i].sheets['History'].max_row,header= False)
Writers[i].save()
os.chmod(model_keras_path[i].split(".model")[0]+'_meta.xlsx', S_IREAD|S_IRGRP|S_IROTH) #make read only
print("meta.xlsx was saved")
t1 = time.time()
Index = []#reset the Index list
counter+=1
progress_callback.emit(100.0)
#If the original storing locating became inaccessible (folder name changed, HD unplugged...)
#the models and meta are saved to temp folder. Inform the user!!!
if saving_failed==True:
path_orig = str(self.fittingpopups_ui[listindex].lineEdit_modelname_pop.text())
text = "<html><head/><body><p>Original path:<br>"+path_orig+\
"<br>became inaccessible during training! Files were then saved to:<br>"+\
new_modelname.split(".model")[0]+"<br>To bring both parts back together\
, you have manually open the meta files (excel) and copy;paste each sheet. \
Sorry for the inconvenience.<br>If that happens often, you may contact \
the main developer and ask him to improve that.</p></body></html>"
text = "<span style=\' font-weight:600; color: red;\'>" +text+"</span>"#put red text to the infobox
self.fittingpopups_ui[listindex].textBrowser_FittingInfo.append(text)
print('\a')#make a noise
self.fittingpopups_ui[listindex].textBrowser_FittingInfo.setStyleSheet("background-color: yellow;")
self.fittingpopups_ui[listindex].textBrowser_FittingInfo.moveCursor(QtGui.QTextCursor.End)
if collection==False:
if len(Histories)>0: #if the list for History files is not empty, process it!
DF1 = [[ h[h_i][-1] for h_i in h] for h in Histories] #if nb_epoch in .fit() is >1, only save the last history item, beacuse this would a model that could be saved
DF1 = np.r_[DF1]
DF1 = pd.DataFrame( DF1,columns=Histories[0].keys() )
DF1["Saved"] = Saved
DF1["Time"] = Stopwatch
DF1["LearningRate"] = LearningRate
DF1.index = Index
Index = []#reset the Index list
Histories = []#reset the Histories list
Saved = []
#does such a file exist already? append!
if not os.path.isfile(new_modelname.split(".model")[0]+'_meta.xlsx'):
DF1.to_excel(writer,sheet_name='History')
else: # else it exists so append without writing the header
DF1.to_excel(writer,sheet_name='History', startrow=writer.sheets['History'].max_row,header= False)
if os.path.isfile(new_modelname.split(".model")[0]+'_meta.xlsx'):
os.chmod(new_modelname.split(".model")[0]+'_meta.xlsx', S_IREAD|S_IRGRP|S_IROTH|S_IWRITE|S_IWGRP|S_IWOTH) #make read/write
writer.save()
writer.close()
if collection==True:
for i in range(len(HISTORIES)):
Histories = HISTORIES[i]
Saved = SAVED[i]
if len(Histories)>0: #if the list for History files is not empty, process it!
DF1 = [[ h[h_i][-1] for h_i in h] for h in Histories] #if nb_epoch in .fit() is >1, only save the last history item, beacuse this would a model that could be saved
DF1 = np.r_[DF1]
DF1 = pd.DataFrame( DF1,columns=Histories[0].keys() )
DF1["Saved"] = Saved
DF1.index = Index
HISTORIES[i] = []#reset the Histories list
SAVED[i] = []
#does such a file exist already? append!
if not os.path.isfile(model_keras_path[i].split(".model")[0]+'_meta.xlsx'):
DF1.to_excel(Writers[i],sheet_name='History')
else: # else it exists so append without writing the header
DF1.to_excel(writer,sheet_name='History', startrow=writer.sheets['History'].max_row,header= False)
if os.path.isfile(model_keras_path[i].split(".model")[0]+'_meta.xlsx'):
os.chmod(model_keras_path[i].split(".model")[0]+'_meta.xlsx', S_IREAD|S_IRGRP|S_IROTH|S_IWRITE|S_IWGRP|S_IWOTH) #make read/write
Writers[i].save()
Writers[i].close()
Index = []#reset the Index list
sess.close()
# try:
# aid_dl.reset_keras(model_keras)
# except:
# pass
def action_fit_model(self):
#Take the initialized model
#Unfortunately, in TensorFlow it is not possile to pass a model from
#one thread to another. Therefore I have to load and save the models each time :(
model_keras = self.model_keras
if type(model_keras)==tuple:
collection=True
else:
collection=False
#Check if there was a model initialized:
new_modelname = str(self.lineEdit_modelname.text())
if len(new_modelname)==0:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("Please define a path/filename for the model to be fitted!")
msg.setWindowTitle("Model path/ filename missing!")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
if model_keras==None:#in case the model got deleted in another task
self.action_initialize_model(duties="initialize_train")
print("Had to re-run action_initialize_model!")
model_keras = self.model_keras
self.model_keras = None#delete this copy
if model_keras==None:
# msg = QtWidgets.QMessageBox()
# msg.setIcon(QtWidgets.QMessageBox.Information)
# msg.setText("Model could not be initialized")
# msg.setWindowTitle("Error")
# msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
# msg.exec_()
return
if not model_keras==None:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("Model is now initialized for you, Please check Model summary window below if everything is correct and then press Fit again!")
msg.setWindowTitle("No initilized model found!")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
#There should be at least two outputs (index 0 and 1)
if collection==False:
#model_config = model_keras.get_config()#["layers"]
nr_classes = int(model_keras.output.shape.dims[1])
if collection==True:
#model_config = model_keras[1][0].get_config()#["layers"]
nr_classes = int(model_keras[1][0].output.shape.dims[1])
if nr_classes<2:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("Please define at least two classes")
msg.setWindowTitle("Not enough classes")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
if collection==False:
#define a variable on self which allows the fit_model_worker to load this model and fit
#(sorry, this is necessary since TensorFlow does not support passing models between threads)
self.model_keras_path = new_modelname.split(".model")[0]+"_0.model"
#save a first version of the .model
model_keras.save(self.model_keras_path)
#Delete the variable to save RAM
model_keras = None #Since this uses TensorFlow, I have to reload the model action_fit_model_worker anyway
if collection==True:
#define a variable on self which allows the fit_model_worker to load this model and fit
#(sorry, this is necessary since TensorFlow does not support passing models between threads)
self.model_keras_path = [new_modelname.split(".model")[0]+"_"+model_keras[0][i]+".model" for i in range(len(model_keras[0]))]
for i in range(len(self.model_keras_path)):
#save a first version of the .model
model_keras[1][i].save(self.model_keras_path[i])
#Delete the variable to save RAM
model_keras = None #Since this uses TensorFlow, I have to reload the model action_fit_model_worker anyway
#Check that Data is on RAM
DATA_len = len(self.ram) #this returns the len of a dictionary. The dictionary is supposed to contain the training/validation data; otherwise the data is read from .rtdc data directly (SLOW unless you have ultra-good SSD)
def popup_data_to_ram(button):
yes_or_no = button.text()
if yes_or_no == "&Yes":
print("Moving data to ram")
self.actionDataToRamNow_function()
elif yes_or_no == "&No":
pass
if DATA_len==0:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("Would you like transfer the Data to RAM now?\n(Currently the data is not in RAM and would be read from .rtdc, which slows down fitting dramatically unless you have a super-fast SSD.)")
msg.setWindowTitle("Data to RAM now?")
msg.setStandardButtons(QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No)
msg.buttonClicked.connect(popup_data_to_ram)
msg.exec_()
###################Popup Window####################################
self.fittingpopups.append(MyPopup())
ui = aid_frontend.Fitting_Ui()
ui.setupUi(self.fittingpopups[-1]) #append the ui to the last element on the list
self.fittingpopups_ui.append(ui)
# Increase the popupcounter by one; this will help to coordinate the data flow between main ui and popup
self.popupcounter += 1
listindex=self.popupcounter-1
##############################Define functions#########################
self.fittingpopups_ui[listindex].pushButton_UpdatePlot_pop.clicked.connect(lambda: self.update_historyplot_pop(listindex))
self.fittingpopups_ui[listindex].pushButton_Stop_pop.clicked.connect(lambda: self.stop_fitting_pop(listindex))
self.fittingpopups_ui[listindex].pushButton_Pause_pop.clicked.connect(lambda: self.pause_fitting_pop(listindex))
self.fittingpopups_ui[listindex].pushButton_saveTextWindow_pop.clicked.connect(lambda: self.saveTextWindow_pop(listindex))
self.fittingpopups_ui[listindex].pushButton_clearTextWindow_pop.clicked.connect(lambda: self.clearTextWindow_pop(listindex))
self.fittingpopups_ui[listindex].pushButton_showModelSumm_pop.clicked.connect(lambda: self.showModelSumm_pop(listindex))
self.fittingpopups_ui[listindex].pushButton_saveModelSumm_pop.clicked.connect(lambda: self.saveModelSumm_pop(listindex))
#Expert mode functions
#self.fittingpopups_ui[listindex].checkBox_pTr_pop.toggled.connect(lambda on_or_off: self.partialtrainability_activated_pop(on_or_off,listindex))
self.fittingpopups_ui[listindex].pushButton_lossW.clicked.connect(lambda: self.lossWeights_popup(listindex))
self.fittingpopups_ui[listindex].checkBox_lossW.clicked.connect(lambda on_or_off: self.lossWeights_activated(on_or_off,listindex))
self.fittingpopups_ui[listindex].Form.setWindowTitle(os.path.split(new_modelname)[1])
self.fittingpopups_ui[listindex].progressBar_Fitting_pop.setValue(0) #set the progress bar to zero
self.fittingpopups_ui[listindex].pushButton_ShowExamleImgs_pop.clicked.connect(lambda: self.action_show_example_imgs_pop(listindex))
self.fittingpopups_ui[listindex].tableWidget_HistoryInfo_pop.doubleClicked.connect(lambda item: self.tableWidget_HistoryInfo_pop_dclick(item,listindex))
#Cyclical learning rate extra settings
self.fittingpopups_ui[listindex].pushButton_cycLrPopup.clicked.connect(lambda: self.popup_clr_settings(listindex))
self.fittingpopups_ui[listindex].comboBox_optimizer.currentTextChanged.connect(lambda: self.expert_optimizer_changed(optimizer_text=self.fittingpopups_ui[listindex].comboBox_optimizer.currentText(),listindex=listindex))
self.fittingpopups_ui[listindex].pushButton_LR_plot.clicked.connect(lambda: self.popup_lr_plot(listindex))
self.fittingpopups_ui[listindex].doubleSpinBox_learningRate.valueChanged.connect(lambda: self.expert_lr_changed(value=self.fittingpopups_ui[listindex].doubleSpinBox_learningRate.value(),optimizer_text=self.fittingpopups_ui[listindex].comboBox_optimizer.currentText(),listindex=listindex))
self.fittingpopups_ui[listindex].doubleSpinBox_expDecInitLr.valueChanged.connect(lambda: self.expert_lr_changed(value=self.fittingpopups_ui[listindex].doubleSpinBox_learningRate.value(),optimizer_text=self.fittingpopups_ui[listindex].comboBox_optimizer.currentText(),listindex=listindex))
self.fittingpopups_ui[listindex].pushButton_optimizer_pop.clicked.connect(lambda: self.optimizer_change_settings_popup(listindex))
worker = Worker(self.action_fit_model_worker)
#Get a signal from the worker to update the progressbar
worker.signals.progress.connect(self.fittingpopups_ui[listindex].progressBar_Fitting_pop.setValue)
#Define a func which prints information during fitting to textbrowser
#And furthermore provide option to do real-time plotting
def real_time_info(dic):
self.fittingpopups_ui[listindex].Histories.append(dic) #append to a list. Will be used for plotting in the "Update plot" function
OtherMetrics_keys = self.fittingpopups_ui[listindex].RealTime_OtherMetrics.keys()
#Append to lists for real-time plotting
self.fittingpopups_ui[listindex].RealTime_Acc.append(dic["acc"][0])
self.fittingpopups_ui[listindex].RealTime_ValAcc.append(dic["val_acc"][0])
self.fittingpopups_ui[listindex].RealTime_Loss.append(dic["loss"][0])
self.fittingpopups_ui[listindex].RealTime_ValLoss.append(dic["val_loss"][0])
keys = list(dic.keys())
#sort keys alphabetically
keys_ = [l.lower() for l in keys]
ind_sort = np.argsort(keys_)
keys = list(np.array(keys)[ind_sort])
#First keys should always be acc,loss,val_acc,val_loss -in this order
keys_first = ["acc","loss","val_acc","val_loss"]
for i in range(len(keys_first)):
if keys_first[i] in keys:
ind = np.where(np.array(keys)==keys_first[i])[0][0]
if ind!=i:
del keys[ind]
keys.insert(i,keys_first[i])
for key in keys:
if "precision" in key or "f1" in key or "recall" in key or "LearningRate" in key:
if not key in OtherMetrics_keys: #if this key is missing in self.fittingpopups_ui[listindex].RealTime_OtherMetrics attach it!
self.fittingpopups_ui[listindex].RealTime_OtherMetrics[key] = []
self.fittingpopups_ui[listindex].RealTime_OtherMetrics[key].append(dic[key])
dic_text = [("{} {}".format(item, np.round(amount[0],4))) for item, amount in dic.items()]
text = "Epoch "+str(self.fittingpopups_ui[listindex].epoch_counter)+"\n"+" ".join(dic_text)
self.fittingpopups_ui[listindex].textBrowser_FittingInfo.append(text)
self.fittingpopups_ui[listindex].epoch_counter+=1
if self.fittingpopups_ui[listindex].epoch_counter==1:
#for each key, put a checkbox on the tableWidget_HistoryInfo_pop
rowPosition = self.fittingpopups_ui[listindex].tableWidget_HistoryInfo_pop.rowCount()
self.fittingpopups_ui[listindex].tableWidget_HistoryInfo_pop.insertRow(rowPosition)
self.fittingpopups_ui[listindex].tableWidget_HistoryInfo_pop.setColumnCount(len(keys))
for columnPosition in range(len(keys)):#(2,4):
key = keys[columnPosition]
#for each item, also create 2 checkboxes (train/valid)
item = QtWidgets.QTableWidgetItem(str(key))#("item {0} {1}".format(rowNumber, columnNumber))
item.setBackground(QtGui.QColor(self.colorsQt[columnPosition]))
item.setFlags( QtCore.Qt.ItemIsUserCheckable | QtCore.Qt.ItemIsEnabled )
item.setCheckState(QtCore.Qt.Unchecked)
self.fittingpopups_ui[listindex].tableWidget_HistoryInfo_pop.setItem(rowPosition, columnPosition, item)
self.fittingpopups_ui[listindex].tableWidget_HistoryInfo_pop.resizeColumnsToContents()
self.fittingpopups_ui[listindex].tableWidget_HistoryInfo_pop.resizeRowsToContents()
########################Real-time plotting#########################
if self.fittingpopups_ui[listindex].checkBox_realTimePlotting_pop.isChecked():
#get the range for the real time fitting
if hasattr(self.fittingpopups_ui[listindex], 'historyscatters'):#if update plot was hit before
x = range(len(self.fittingpopups_ui[listindex].Histories))
realTimeEpochs = self.fittingpopups_ui[listindex].spinBox_realTimeEpochs.value()
if len(x)>realTimeEpochs:
x = x[-realTimeEpochs:]
#is any metric checked on the table?
colcount = int(self.fittingpopups_ui[listindex].tableWidget_HistoryInfo_pop.columnCount())
#Collect items that are checked
selected_items,Colors = [],[]
for colposition in range(colcount):
#is it checked?
cb = self.fittingpopups_ui[listindex].tableWidget_HistoryInfo_pop.item(0, colposition)
if not cb==None:
if cb.checkState() == QtCore.Qt.Checked:
selected_items.append(str(cb.text()))
Colors.append(cb.background())
for i in range(len(self.fittingpopups_ui[listindex].historyscatters)): #iterate over all available plots
key = list(self.fittingpopups_ui[listindex].historyscatters.keys())[i]
if key in selected_items:
if key=="acc":
y = np.array(self.fittingpopups_ui[listindex].RealTime_Acc).astype(float)
elif key=="val_acc":
y = np.array(self.fittingpopups_ui[listindex].RealTime_ValAcc).astype(float)
elif key=="loss":
y = np.array(self.fittingpopups_ui[listindex].RealTime_Loss).astype(float)
elif key=="val_loss":
y = np.array(self.fittingpopups_ui[listindex].RealTime_ValLoss).astype(float)
elif "precision" in key or "f1" in key or "recall" in key or "LearningRate" in key:
y = np.array(self.fittingpopups_ui[listindex].RealTime_OtherMetrics[key]).astype(float).reshape(-1,)
else:
return
#Only show the last 250 epochs
if y.shape[0]>realTimeEpochs:
y = y[-realTimeEpochs:]
if y.shape[0]==len(x):
self.fittingpopups_ui[listindex].historyscatters[key].setData(x, y)#,pen=None,symbol='o',symbolPen=None,symbolBrush=brush,clear=False)
else:
print("x and y are not the same size! Omitted plotting. I will try again to plot after the next epoch.")
pg.QtGui.QApplication.processEvents()
self.fittingpopups_ui[listindex].epoch_counter = 0
#self.fittingpopups_ui[listindex].backup = [] #backup of the meta information -> in case the original folder is not accessible anymore
worker.signals.history.connect(real_time_info)
#Finally start the worker!
self.threadpool.start(worker)
self.fittingpopups[listindex].show()
def action_lr_finder(self):
#lr_find
model_keras = self.model_keras
if type(model_keras)==tuple:
collection=True
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Warning)
msg.setText("LR screening is not supported for Collections of models. Please select single model")
msg.setWindowTitle("LR screening not supported for Collections!")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
else:
collection=False
#Check if there was a model initialized:
new_modelname = str(self.lineEdit_modelname.text())
if len(new_modelname)==0:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("Please define a path/filename for the model to be fitted!")
msg.setWindowTitle("Model path/ filename missing!")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
if model_keras==None:#in case the model got deleted in another task
self.action_initialize_model(duties="initialize_train")
print("Had to re-run action_initialize_model!")
model_keras = self.model_keras
self.model_keras = None#delete this copy
if model_keras==None:
return
if not model_keras==None:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("Model is now initialized for you, Please check Model summary window below if everything is correct and then press Fit again!")
msg.setWindowTitle("No initilized model found!")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
nr_classes = int(model_keras.output.shape.dims[1])
if nr_classes<2:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("Please define at least two classes")
msg.setWindowTitle("Not enough classes")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
#define a variable on self which allows the fit_model_worker to load this model and fit
#(sorry, this is necessary since TensorFlow does not support passing models between threads)
self.model_keras_path = new_modelname.split(".model")[0]+"_0.model"
#save a first version of the .model
model_keras.save(self.model_keras_path)
#Delete the variable to save RAM
model_keras = None #Since this uses TensorFlow, I have to reload the model action_fit_model_worker anyway
#Check that Data is on RAM
DATA_len = len(self.ram) #this returns the len of a dictionary. The dictionary is supposed to contain the training/validation data; otherwise the data is read from .rtdc data directly (SLOW unless you have ultra-good SSD)
def popup_data_to_ram(button):
yes_or_no = button.text()
if yes_or_no == "&Yes":
print("Moving data to ram")
self.actionDataToRamNow_function()
elif yes_or_no == "&No":
pass
if DATA_len==0:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("Would you like transfer the Data to RAM now?\n(Currently the data is not in RAM and would be read from .rtdc, which slows down fitting dramatically unless you have a super-fast SSD.)")
msg.setWindowTitle("Data to RAM now?")
msg.setStandardButtons(QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No)
msg.buttonClicked.connect(popup_data_to_ram)
msg.exec_()
worker = Worker(self.action_lr_finder_worker)
#Get a signal from the worker to update the progressbar
worker.signals.progress.connect(print)
worker.signals.history.connect(print)
#Finally start the worker!
self.threadpool.start(worker)
def action_lr_finder_worker(self,progress_callback,history_callback):
if self.radioButton_cpu.isChecked():
gpu_used = False
deviceSelected = str(self.comboBox_cpu.currentText())
elif self.radioButton_gpu.isChecked():
gpu_used = True
deviceSelected = str(self.comboBox_gpu.currentText())
gpu_memory = float(self.doubleSpinBox_memory.value())
#Retrieve more Multi-GPU Options from Menubar:
cpu_merge = bool(self.actioncpu_merge.isEnabled())
cpu_relocation = bool(self.actioncpu_relocation.isEnabled())
cpu_weight_merge = bool(self.actioncpu_weightmerge.isEnabled())
#Create config (define which device to use)
config_gpu = aid_dl.get_config(cpu_nr,gpu_nr,deviceSelected,gpu_memory)
with tf.Session(graph = tf.Graph(), config=config_gpu) as sess:
#get an index of the fitting popup
#listindex = self.popupcounter-1
#Get user-specified filename for the new model
model_keras_path = self.model_keras_path
if type(model_keras_path)==list:
collection = True
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Warning)
msg.setText("LR screening is currently not supported for Collections of models. Please use single model")
msg.setWindowTitle("LR screening not supported for Collections")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
else:
collection = False
if deviceSelected=="Multi-GPU" and cpu_weight_merge==True:
with tf.device("/cpu:0"):
model_keras = load_model(model_keras_path,custom_objects=aid_dl.get_custom_metrics())
else:
model_keras = load_model(model_keras_path,custom_objects=aid_dl.get_custom_metrics())
#Initialize a variable for the parallel model
model_keras_p = None
#Multi-GPU
if deviceSelected=="Multi-GPU":
if collection==False:
print("Adjusting the model for Multi-GPU")
model_keras_p = multi_gpu_model(model_keras, gpus=gpu_nr, cpu_merge=cpu_merge, cpu_relocation=cpu_relocation)#indicate the numbers of gpus that you have
if self.radioButton_LoadContinueModel.isChecked():#calling multi_gpu_model resets the weights. Hence, they need to be put in place again
model_keras_p.layers[-2].set_weights(model_keras.get_weights())
elif collection==True:
print("Collection & Multi-GPU is not supported yet")
return
##############Main function after hitting FIT MODEL####################
if self.radioButton_LoadRestartModel.isChecked():
load_modelname = str(self.lineEdit_LoadModelPath.text())
elif self.radioButton_LoadContinueModel.isChecked():
load_modelname = str(self.lineEdit_LoadModelPath.text())
elif self.radioButton_NewModel.isChecked():
load_modelname = "" #No model is loaded
if collection==False:
#model_config = model_keras.get_config()#["layers"]
nr_classes = int(model_keras.output.shape.dims[1])
if collection==True:
#model_config = model_keras.get_config()#["layers"]
nr_classes = int(model_keras[0].output.shape.dims[1])
#Metrics to be displayed during fitting (real-time)
model_metrics = self.get_metrics(nr_classes)
#Compile model
if deviceSelected=="Single-GPU":
model_keras.compile(loss='categorical_crossentropy',optimizer='adam',metrics=model_metrics)#dont specify loss and optimizer yet...expert stuff will follow and model will be recompiled
elif deviceSelected=="Multi-GPU":
model_keras_p.compile(loss='categorical_crossentropy',optimizer='adam',metrics=model_metrics)#dont specify loss and optimizer yet...expert stuff will follow and model will be recompiled
#Collect all information about the fitting routine that was user
#defined
if self.actionVerbose.isChecked()==True:
verbose = 1
else:
verbose = 0
trainable_original, layer_names = self.trainable_original, self.layer_names
crop = int(self.spinBox_imagecrop.value())
norm = str(self.comboBox_Normalization.currentText())
nr_epochs = int(self.spinBox_NrEpochs.value())
h_flip = bool(self.checkBox_HorizFlip.isChecked())
v_flip = bool(self.checkBox_VertFlip.isChecked())
rotation = float(self.lineEdit_Rotation.text())
width_shift = float(self.lineEdit_widthShift.text())
height_shift = float(self.lineEdit_heightShift.text())
zoom = float(self.lineEdit_zoomRange.text())
shear = float(self.lineEdit_shearRange.text())
brightness_add_lower = float(self.spinBox_PlusLower.value())
brightness_add_upper = float(self.spinBox_PlusUpper.value())
brightness_mult_lower = float(self.doubleSpinBox_MultLower.value())
brightness_mult_upper = float(self.doubleSpinBox_MultUpper.value())
gaussnoise_mean = float(self.doubleSpinBox_GaussianNoiseMean.value())
gaussnoise_scale = float(self.doubleSpinBox_GaussianNoiseScale.value())
contrast_on = bool(self.checkBox_contrast.isChecked())
contrast_lower = float(self.doubleSpinBox_contrastLower.value())
contrast_higher = float(self.doubleSpinBox_contrastHigher.value())
saturation_on = bool(self.checkBox_saturation.isChecked())
saturation_lower = float(self.doubleSpinBox_saturationLower.value())
saturation_higher = float(self.doubleSpinBox_saturationHigher.value())
hue_on = bool(self.checkBox_hue.isChecked())
hue_delta = float(self.doubleSpinBox_hueDelta.value())
avgBlur_on = bool(self.checkBox_avgBlur.isChecked())
avgBlur_min = int(self.spinBox_avgBlurMin.value())
avgBlur_max = int(self.spinBox_avgBlurMax.value())
gaussBlur_on = bool(self.checkBox_gaussBlur.isChecked())
gaussBlur_min = int(self.spinBox_gaussBlurMin.value())
gaussBlur_max = int(self.spinBox_gaussBlurMax.value())
motionBlur_on = bool(self.checkBox_motionBlur.isChecked())
motionBlur_kernel = str(self.lineEdit_motionBlurKernel.text())
motionBlur_angle = str(self.lineEdit_motionBlurAngle.text())
motionBlur_kernel = tuple(ast.literal_eval(motionBlur_kernel)) #translate string in the lineEdits to a tuple
motionBlur_angle = tuple(ast.literal_eval(motionBlur_angle)) #translate string in the lineEdits to a tuple
if collection==False:
expert_mode = bool(self.groupBox_expertMode.isChecked())
elif collection==True:
expert_mode = self.groupBox_expertMode.setChecked(False)
print("Expert mode was switched off. Not implemented yet for collections")
expert_mode = False
learning_rate_const = float(self.doubleSpinBox_learningRate.value())
loss_expert = str(self.comboBox_expt_loss.currentText()).lower()
optimizer_expert = str(self.comboBox_optimizer.currentText()).lower()
optimizer_settings = self.optimizer_settings.copy()
paddingMode = str(self.comboBox_paddingMode.currentText())#.lower()
train_last_layers = bool(self.checkBox_trainLastNOnly.isChecked())
train_last_layers_n = int(self.spinBox_trainLastNOnly.value())
train_dense_layers = bool(self.checkBox_trainDenseOnly.isChecked())
dropout_expert_on = bool(self.checkBox_dropout.isChecked())
try:
dropout_expert = str(self.lineEdit_dropout.text()) #due to the validator, there are no squ.brackets
dropout_expert = "["+dropout_expert+"]"
dropout_expert = ast.literal_eval(dropout_expert)
except:
dropout_expert = []
lossW_expert = str(self.lineEdit_lossW.text())
#To get the class weights (loss), the SelectedFiles are required
SelectedFiles = self.items_clicked_no_rtdc_ds()
#Check if xtra_data should be used for training
xtra_in = [s["xtra_in"] for s in SelectedFiles]
if len(set(xtra_in))==1:
xtra_in = list(set(xtra_in))[0]
elif len(set(xtra_in))>1:# False and True is present. Not supported
print("Xtra data is used only for some files. Xtra data needs to be used either by all or by none!")
return
#Get the class weights. This function runs now the first time in the fitting routine.
#It is possible that the user chose Custom weights and then changed the classes. Hence first check if
#there is a weight for each class available.
class_weight = self.get_class_weight(SelectedFiles,lossW_expert,custom_check_classes=True)
if type(class_weight)==list:
#There has been a mismatch between the classes described in class_weight and the classes available in SelectedFiles!
lossW_expert = class_weight[0] #overwrite
class_weight = class_weight[1]
print(class_weight)
print("There has been a mismatch between the classes described in \
Loss weights and the classes available in the selected files! \
Hence, the Loss weights are set to Balanced")
###############################Expert Mode values##################
if expert_mode==True:
#Some settings only need to be changed once, after user clicked apply at next epoch
#Apply the changes to trainable states:
if train_last_layers==True:#Train only the last n layers
print("Train only the last "+str(train_last_layers_n)+ " layer(s)")
trainable_new = (len(trainable_original)-train_last_layers_n)*[False]+train_last_layers_n*[True]
summary = aid_dl.model_change_trainability(model_keras,trainable_new,model_metrics,nr_classes,loss_expert,optimizer_settings,learning_rate_const)
if model_keras_p!=None:#if this is NOT None, there exists a parallel model, which also needs to be re-compiled
aid_dl.model_compile(model_keras_p,loss_expert,optimizer_settings,learning_rate_const,model_metrics,nr_classes)
print("Recompiled parallel model for train_last_layers==True")
text1 = "Expert mode: Request for custom trainability states: train only the last "+str(train_last_layers_n)+ " layer(s)\n"
#text2 = "\n--------------------\n"
print(text1+summary)
if train_dense_layers==True:#Train only dense layers
print("Train only dense layers")
layer_dense_ind = ["Dense" in x for x in layer_names]
layer_dense_ind = np.where(np.array(layer_dense_ind)==True)[0] #at which indices are dropout layers?
#create a list of trainable states
trainable_new = len(trainable_original)*[False]
for index in layer_dense_ind:
trainable_new[index] = True
summary = aid_dl.model_change_trainability(model_keras,trainable_new,model_metrics,nr_classes,loss_expert,optimizer_settings,learning_rate_const)
if model_keras_p!=None:#if this is NOT None, there exists a parallel model, which also needs to be re-compiled
aid_dl.model_compile(model_keras_p,loss_expert,optimizer_settings,learning_rate_const,model_metrics,nr_classes)
print("Recompiled parallel model for train_dense_layers==True")
text1 = "Expert mode: Request for custom trainability states: train only dense layer(s)\n"
#text2 = "\n--------------------\n"
print(text1+summary)
if dropout_expert_on==True:
#The user apparently want to change the dropout rates
do_list = aid_dl.get_dropout(model_keras)#Get a list of dropout values of the current model
#Compare the dropout values in the model to the dropout values requested by user
if len(dropout_expert)==1:#if the user gave a single float
dropout_expert_list = len(do_list)*dropout_expert #convert to list
elif len(dropout_expert)>1:
dropout_expert_list = dropout_expert
if not len(dropout_expert_list)==len(do_list):
text = "Issue with dropout: you defined "+str(len(dropout_expert_list))+" dropout rates, but model has "+str(len(do_list))+" dropout layers"
print(text)
else:
text = "Could not understand user input at Expert->Dropout"
print(text)
dropout_expert_list = []
if len(dropout_expert_list)>0 and do_list!=dropout_expert_list:#if the dropout rates of the current model is not equal to the required do_list from user...
do_changed = aid_dl.change_dropout(model_keras,dropout_expert_list,model_metrics,nr_classes,loss_expert,optimizer_settings,learning_rate_const)
if model_keras_p!=None:#if this is NOT None, there exists a parallel model, which also needs to be re-compiled
aid_dl.model_compile(model_keras_p,loss_expert,optimizer_settings,learning_rate_const,model_metrics,nr_classes)
print("Recompiled parallel model to change dropout. I'm not sure if this works already!")
if do_changed==1:
text_do = "Dropout rate(s) in model was/were changed to: "+str(dropout_expert_list)
else:
text_do = "Dropout rate(s) in model was/were not changed"
else:
text_do = "Dropout rate(s) in model was/were not changed"
print(text_do)
text_updates = ""
#Compare current lr and the lr on expert tab:
if collection == False:
lr_current = K.eval(model_keras.optimizer.lr)
else:
lr_current = K.eval(model_keras[0].optimizer.lr)
lr_diff = learning_rate_const-lr_current
if abs(lr_diff) > 1e-6:
if collection == False:
K.set_value(model_keras.optimizer.lr, learning_rate_const)
if collection == True:
for m in model_keras:
K.set_value(m.optimizer.lr, learning_rate_const)
text_updates += "Changed the learning rate to "+ str(learning_rate_const)+"\n"
recompile = False
#Compare current optimizer and the optimizer on expert tab:
if collection==False:
optimizer_current = aid_dl.get_optimizer_name(model_keras).lower()#get the current optimizer of the model
if collection==True:
optimizer_current = aid_dl.get_optimizer_name(model_keras[0]).lower()#get the current optimizer of the model
if optimizer_current!=optimizer_expert.lower():#if the current model has a different optimizer
recompile = True
text_updates+="Changed the optimizer to "+optimizer_expert+"\n"
#Compare current loss function and the loss-function on expert tab:
if collection==False:
if model_keras.loss!=loss_expert:
recompile = True
text_updates+="Changed the loss function to "+loss_expert+"\n"
if collection==True:
if model_keras[0].loss!=loss_expert:
recompile = True
text_updates+="Changed the loss function to "+loss_expert+"\n"
if recompile==True:
print("Recompiling...")
if collection==False:
aid_dl.model_compile(model_keras,loss_expert,optimizer_settings,learning_rate_const,model_metrics,nr_classes)
if collection==True:
for m in model_keras[1]:
aid_dl.model_compile(m, loss_expert, optimizer_settings, learning_rate_const,model_metrics, nr_classes)
if model_keras_p!=None:#if this is NOT None, there exists a parallel model, which also needs to be re-compiled
aid_dl.model_compile(model_keras_p,loss_expert,optimizer_settings,learning_rate_const,model_metrics,nr_classes)
print("Recompiled parallel model to adjust learning rate, loss, optimizer")
print(text_updates)
######################Load the Training Data################################
ind = [selectedfile["TrainOrValid"] == "Train" for selectedfile in SelectedFiles]
ind = np.where(np.array(ind)==True)[0]
SelectedFiles_train = np.array(SelectedFiles)[ind]
SelectedFiles_train = list(SelectedFiles_train)
indices_train = [selectedfile["class"] for selectedfile in SelectedFiles_train]
nr_events_epoch_train = [selectedfile["nr_events_epoch"] for selectedfile in SelectedFiles_train]
rtdc_path_train = [selectedfile["rtdc_path"] for selectedfile in SelectedFiles_train]
zoom_factors_train = [selectedfile["zoom_factor"] for selectedfile in SelectedFiles_train]
#zoom_order = [self.actionOrder0.isChecked(),self.actionOrder1.isChecked(),self.actionOrder2.isChecked(),self.actionOrder3.isChecked(),self.actionOrder4.isChecked(),self.actionOrder5.isChecked()]
#zoom_order = int(np.where(np.array(zoom_order)==True)[0])
zoom_order = int(self.comboBox_zoomOrder.currentIndex()) #the combobox-index is already the zoom order
shuffle_train = [selectedfile["shuffle"] for selectedfile in SelectedFiles_train]
xtra_in = set([selectedfile["xtra_in"] for selectedfile in SelectedFiles_train])
if len(xtra_in)>1:# False and True is present. Not supported
print("Xtra data is used only for some files. Xtra data needs to be used either by all or by none!")
return
xtra_in = list(xtra_in)[0]#this is either True or False
if verbose==1:
print("Length of DATA (in RAM) = "+str(len(self.ram)))
#If the scaling method is "divide by mean and std of the whole training set":
if norm == "StdScaling using mean and std of all training data":
mean_trainingdata,std_trainingdata = [],[]
for i in range(len(SelectedFiles_train)):
if len(self.ram)==0: #Here, the entire training set needs to be used! Not only random images!
#Replace=true: means individual cells could occur several times
gen_train = aid_img.gen_crop_img(crop,rtdc_path_train[i],random_images=False,zoom_factor=zoom_factors_train[i],zoom_order=zoom_order,color_mode=self.get_color_mode(),padding_mode=paddingMode)
else:
gen_train = aid_img.gen_crop_img_ram(self.ram,rtdc_path_train[i],random_images=False) #Replace true means that individual cells could occur several times
if self.actionVerbose.isChecked():
print("Loaded data from RAM")
images = next(gen_train)[0]
mean_trainingdata.append(np.mean(images))
std_trainingdata.append(np.std(images))
mean_trainingdata = np.mean(np.array(mean_trainingdata))
std_trainingdata = np.mean(np.array(std_trainingdata))
if np.allclose(std_trainingdata,0):
std_trainingdata = 0.0001
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
text = "<html><head/><body><p>The standard deviation of your training data is zero! This would lead to division by zero. To avoid this, I will divide by 0.0001 instead.</p></body></html>"
msg.setText(text)
msg.setWindowTitle("Std. is zero")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
######################Load the Validation Data################################
ind = [selectedfile["TrainOrValid"] == "Valid" for selectedfile in SelectedFiles]
ind = np.where(np.array(ind)==True)[0]
SelectedFiles_valid = np.array(SelectedFiles)[ind]
SelectedFiles_valid = list(SelectedFiles_valid)
indices_valid = [selectedfile["class"] for selectedfile in SelectedFiles_valid]
nr_events_epoch_valid = [selectedfile["nr_events_epoch"] for selectedfile in SelectedFiles_valid]
rtdc_path_valid = [selectedfile["rtdc_path"] for selectedfile in SelectedFiles_valid]
zoom_factors_valid = [selectedfile["zoom_factor"] for selectedfile in SelectedFiles_valid]
#zoom_order = [self.actionOrder0.isChecked(),self.actionOrder1.isChecked(),self.actionOrder2.isChecked(),self.actionOrder3.isChecked(),self.actionOrder4.isChecked(),self.actionOrder5.isChecked()]
#zoom_order = int(np.where(np.array(zoom_order)==True)[0])
zoom_order = int(self.comboBox_zoomOrder.currentIndex()) #the combobox-index is already the zoom order
shuffle_valid = [selectedfile["shuffle"] for selectedfile in SelectedFiles_valid]
xtra_in = set([selectedfile["xtra_in"] for selectedfile in SelectedFiles_valid])
if len(xtra_in)>1:# False and True is present. Not supported
print("Xtra data is used only for some files. Xtra data needs to be used either by all or by none!")
return
xtra_in = list(xtra_in)[0]#this is either True or False
############Cropping#####################
percDataV = float(self.popup_lrfinder_ui.doubleSpinBox_percDataV.value())
percDataV = percDataV/100.0
X_valid,y_valid,Indices,xtra_valid = [],[],[],[]
for i in range(len(SelectedFiles_valid)):
if len(self.ram)==0:#if there is no data available on ram
#replace=true means individual cells could occur several times
gen_valid = aid_img.gen_crop_img(crop,rtdc_path_valid[i],int(np.rint(percDataV*nr_events_epoch_valid[i])),random_images=shuffle_valid[i],replace=True,zoom_factor=zoom_factors_valid[i],zoom_order=zoom_order,color_mode=self.get_color_mode(),padding_mode=paddingMode,xtra_in=xtra_in)
else:#get a similar generator, using the ram-data
gen_valid = aid_img.gen_crop_img_ram(self.ram,rtdc_path_valid[i],int(np.rint(percDataV*nr_events_epoch_valid[i])),random_images=shuffle_valid[i],replace=True,xtra_in=xtra_in) #Replace true means that individual cells could occur several times
if self.actionVerbose.isChecked():
print("Loaded data from RAM")
generator_cropped_out = next(gen_valid)
X_valid.append(generator_cropped_out[0])
#y_valid.append(np.repeat(indices_valid[i],nr_events_epoch_valid[i]))
y_valid.append(np.repeat(indices_valid[i],X_valid[-1].shape[0]))
Indices.append(generator_cropped_out[1])
xtra_valid.append(generator_cropped_out[2])
del generator_cropped_out
X_valid = np.concatenate(X_valid)
y_valid = np.concatenate(y_valid)
Y_valid = np_utils.to_categorical(y_valid, nr_classes)# * 2 - 1
xtra_valid = np.concatenate(xtra_valid)
if len(X_valid.shape)==4:
channels=3
elif len(X_valid.shape)==3:
channels=1
else:
print("Invalid data dimension:" +str(X_valid.shape))
if channels==1:
#Add the "channels" dimension
X_valid = np.expand_dims(X_valid,3)
if norm == "StdScaling using mean and std of all training data":
X_valid = aid_img.image_normalization(X_valid,norm,mean_trainingdata,std_trainingdata)
else:
X_valid = aid_img.image_normalization(X_valid,norm)
#Validation data can be cropped to final size already since no augmentation
#will happen on this data set
dim_val = X_valid.shape
print("Current dim. of validation set (pixels x pixels) = "+str(dim_val[2]))
if dim_val[2]!=crop:
print("Change dim. (pixels x pixels) of validation set to = "+str(crop))
remove = int(dim_val[2]/2.0 - crop/2.0)
X_valid = X_valid[:,remove:remove+crop,remove:remove+crop,:] #crop to crop x crop pixels #TensorFlow
if xtra_in==True:
print("Add Xtra Data to X_valid")
X_valid = [X_valid,xtra_valid]
###################Load training data####################
#####################and perform#########################
##################Image augmentation#####################
#Rotating could create edge effects. Avoid this by making crop a bit larger for now
#Worst case would be a 45degree rotation:
cropsize2 = np.sqrt(crop**2+crop**2)
cropsize2 = np.ceil(cropsize2 / 2.) * 2 #round to the next even number
#Should only a certain percentage of the numbers given in the table be sampled?
percDataT = float(self.popup_lrfinder_ui.doubleSpinBox_percDataT.value())
percDataT = percDataT/100.0
X_train,y_train,xtra_train = [],[],[]
t3 = time.time()
for i in range(len(SelectedFiles_train)):
if len(self.ram)==0:
#Replace true means that individual cells could occur several times
gen_train = aid_img.gen_crop_img(cropsize2,rtdc_path_train[i],int(np.rint(percDataT*nr_events_epoch_train[i])),random_images=shuffle_train[i],replace=True,zoom_factor=zoom_factors_train[i],zoom_order=zoom_order,color_mode=self.get_color_mode(),padding_mode=paddingMode,xtra_in=xtra_in)
else:
gen_train = aid_img.gen_crop_img_ram(self.ram,rtdc_path_train[i],int(np.rint(percDataT*nr_events_epoch_train[i])),random_images=shuffle_train[i],replace=True,xtra_in=xtra_in) #Replace true means that individual cells could occur several times
if self.actionVerbose.isChecked():
print("Loaded data from RAM")
data_ = next(gen_train)
X_train.append(data_[0])
y_train.append(np.repeat(indices_train[i],X_train[-1].shape[0]))
if xtra_in==True:
xtra_train.append(data_[2])
del data_
X_train = np.concatenate(X_train)
X_train = X_train.astype(np.uint8)
y_train = np.concatenate(y_train)
if xtra_in==True:
print("Retrieve Xtra Data...")
xtra_train = np.concatenate(xtra_train)
t4 = time.time()
if verbose == 1:
print("Time to load data (from .rtdc or RAM) and crop="+str(t4-t3))
if len(X_train.shape)==4:
channels=3
elif len(X_train.shape)==3:
channels=1
else:
print("Invalid data dimension:" +str(X_train.shape))
if channels==1:
#Add the "channels" dimension
X_train = np.expand_dims(X_train,3)
t3 = time.time()
#Affine augmentation
X_train = aid_img.affine_augm(X_train,v_flip,h_flip,rotation,width_shift,height_shift,zoom,shear) #Affine image augmentation
y_train = np.copy(y_train)
Y_train = np_utils.to_categorical(y_train, nr_classes)# * 2 - 1
t4 = time.time()
if verbose == 1:
print("Time to perform affine augmentation ="+str(t4-t3))
t3 = time.time()
#Now do the final cropping to the actual size that was set by user
dim = X_train.shape
if dim[2]!=crop:
remove = int(dim[2]/2.0 - crop/2.0)
X_train = X_train[:,remove:remove+crop,remove:remove+crop,:] #crop to crop x crop pixels #TensorFlow
t4 = time.time()
#X_train = np.copy(X_train) #save into new array and do some iterations with varying noise/brightness
#reuse this X_batch_orig a few times since this augmentation was costly
if self.actionVerbose.isChecked()==True:
verbose = 1
else:
verbose = 0
#In each iteration, start with non-augmented data
#X_batch = np.copy(X_batch_orig)#copy from X_batch_orig, X_batch will be altered without altering X_batch_orig
#X_train = X_train.astype(np.uint8)
##########Contrast/Saturation/Hue augmentation#########
#is there any of contrast/saturation/hue augmentation to do?
X_train = X_train.astype(np.uint8)
if contrast_on:
t_con_aug_1 = time.time()
X_train = aid_img.contrast_augm_cv2(X_train,contrast_lower,contrast_higher) #this function is almost 15 times faster than random_contrast from tf!
t_con_aug_2 = time.time()
if verbose == 1:
print("Time to augment contrast="+str(t_con_aug_2-t_con_aug_1))
if saturation_on or hue_on:
t_sat_aug_1 = time.time()
X_train = aid_img.satur_hue_augm_cv2(X_train.astype(np.uint8),saturation_on,saturation_lower,saturation_higher,hue_on,hue_delta) #Gray and RGB; both values >0!
t_sat_aug_2 = time.time()
if verbose == 1:
print("Time to augment saturation/hue="+str(t_sat_aug_2-t_sat_aug_1))
##########Average/Gauss/Motion blurring#########
#is there any of blurring to do?
if avgBlur_on:
t_avgBlur_1 = time.time()
X_train = aid_img.avg_blur_cv2(X_train,avgBlur_min,avgBlur_max)
t_avgBlur_2 = time.time()
if verbose == 1:
print("Time to perform average blurring="+str(t_avgBlur_2-t_avgBlur_1))
if gaussBlur_on:
t_gaussBlur_1 = time.time()
X_train = aid_img.gauss_blur_cv(X_train,gaussBlur_min,gaussBlur_max)
t_gaussBlur_2 = time.time()
if verbose == 1:
print("Time to perform gaussian blurring="+str(t_gaussBlur_2-t_gaussBlur_1))
if motionBlur_on:
t_motionBlur_1 = time.time()
X_train = aid_img.motion_blur_cv(X_train,motionBlur_kernel,motionBlur_angle)
t_motionBlur_2 = time.time()
if verbose == 1:
print("Time to perform motion blurring="+str(t_motionBlur_2-t_motionBlur_1))
##########Brightness noise#########
t3 = time.time()
X_train = aid_img.brightn_noise_augm_cv2(X_train,brightness_add_lower,brightness_add_upper,brightness_mult_lower,brightness_mult_upper,gaussnoise_mean,gaussnoise_scale)
t4 = time.time()
if verbose == 1:
print("Time to augment brightness="+str(t4-t3))
t3 = time.time()
if norm == "StdScaling using mean and std of all training data":
X_train = aid_img.image_normalization(X_train,norm,mean_trainingdata,std_trainingdata)
else:
X_train = aid_img.image_normalization(X_train,norm)
t4 = time.time()
if verbose == 1:
print("Time to apply normalization="+str(t4-t3))
if verbose == 1:
print("X_train.shape")
print(X_train.shape)
if xtra_in==True:
print("Add Xtra Data to X_train")
X_train = [X_train,xtra_train]
###################################################
###############Actual fitting######################
###################################################
batch_size = int(self.popup_lrfinder_ui.spinBox_batchSize.value())
stepsPerEpoch = int(self.popup_lrfinder_ui.spinBox_stepsPerEpoch.value())
epochs = int(self.popup_lrfinder_ui.spinBox_epochs.value())
start_lr = float(self.popup_lrfinder_ui.lineEdit_startLr.text())
stop_lr = float(self.popup_lrfinder_ui.lineEdit_stopLr.text())
valMetrics = bool(self.popup_lrfinder_ui.checkBox_valMetrics.isChecked())
####################lr_find algorithm####################
if model_keras_p == None:
lrf = aid_dl.LearningRateFinder(model_keras)
elif model_keras_p != None:
lrf = aid_dl.LearningRateFinder(model_keras_p)
if valMetrics==True:
lrf.find([X_train,Y_train],[X_valid,Y_valid],start_lr,stop_lr,stepsPerEpoch=stepsPerEpoch,batchSize=batch_size,epochs=epochs)
else:
lrf.find([X_train,Y_train],None,start_lr,stop_lr,stepsPerEpoch=stepsPerEpoch,batchSize=batch_size,epochs=epochs)
skipBegin,skipEnd = 10,1
self.learning_rates = lrf.lrs[skipBegin:-skipEnd]
self.losses_or = lrf.losses_or[skipBegin:-skipEnd]
self.losses_sm = lrf.losses_sm[skipBegin:-skipEnd]
self.accs_or = lrf.accs_or[skipBegin:-skipEnd]
self.accs_sm = lrf.accs_sm[skipBegin:-skipEnd]
self.val_losses_sm = lrf.val_losses_sm[skipBegin:-skipEnd]
self.val_losses_or = lrf.val_losses_or[skipBegin:-skipEnd]
self.val_accs_sm = lrf.val_accs_sm[skipBegin:-skipEnd]
self.val_accs_or = lrf.val_accs_or[skipBegin:-skipEnd]
# Enable the groupboxes
self.popup_lrfinder_ui.groupBox_singleLr.setEnabled(True)
self.popup_lrfinder_ui.groupBox_LrRange.setEnabled(True)
self.update_lrfind_plot()
def update_lrfind_plot(self):
if not hasattr(self, 'learning_rates'):
return
metric = str(self.popup_lrfinder_ui.comboBox_metric.currentText())
color = self.popup_lrfinder_ui.pushButton_color.palette().button().color()
width = int(self.popup_lrfinder_ui.spinBox_lineWidth.value())
color = list(color.getRgb())
color = tuple(color)
pencolor = pg.mkPen(color, width=width)
smooth = bool(self.popup_lrfinder_ui.checkBox_smooth.isChecked())
try:# try to empty the plot
self.popup_lrfinder_ui.lr_plot.clear()
#self.popup_lrfinder_ui.lr_plot.removeItem(self.lr_line)
except:
pass
if metric=="Loss" and smooth==True:
self.y_values = self.losses_sm
elif metric=="Loss" and smooth==False:
self.y_values = self.losses_or
elif metric=="Loss 1st derivative" and smooth==True:
self.y_values = np.diff(self.losses_sm,n=1)
elif metric=="Loss 1st derivative" and smooth==False:
self.y_values = np.diff(self.losses_or,n=1)
elif metric=="Accuracy" and smooth==True:
self.y_values = self.accs_sm
elif metric=="Accuracy" and smooth==False:
self.y_values = self.accs_or
elif metric=="Accuracy 1st derivative" and smooth==True:
self.y_values = np.diff(self.accs_sm,n=1)
elif metric=="Accuracy 1st derivative" and smooth==False:
self.y_values = np.diff(self.accs_or,n=1)
elif metric=="Val. loss" and smooth==True:
self.y_values = self.val_losses_sm
elif metric=="Val. loss" and smooth==False:
self.y_values = self.val_losses_or
elif metric=="Val. loss 1st derivative" and smooth==True:
self.y_values = np.diff(self.val_losses_sm,n=1)
elif metric=="Val. loss 1st derivative" and smooth==False:
self.y_values = np.diff(self.val_losses_or,n=1)
elif metric=="Val. accuracy" and smooth==True:
self.y_values = self.val_accs_sm
elif metric=="Val. accuracy" and smooth==False:
self.y_values = self.val_accs_or
elif metric=="Val. accuracy 1st derivative" and smooth==True:
self.y_values = np.diff(self.val_accs_sm,n=1)
elif metric=="Val. accuracy 1st derivative" and smooth==False:
self.y_values = np.diff(self.val_accs_or,n=1)
else:
print("The combination of "+str(metric)+" and smooth="+str(smooth)+" is not supported!")
if len(self.learning_rates)==len(self.y_values):
self.lr_line = pg.PlotCurveItem(x=np.log10(self.learning_rates), y=self.y_values,pen=pencolor,name=metric)
elif len(self.learning_rates)-1==len(self.y_values):
self.lr_line = pg.PlotCurveItem(x=np.log10(self.learning_rates)[1:], y=self.y_values,pen=pencolor,name=metric)
else:
print("No data available. Probably, validation metrics were not computed. Please click Run again.")
return
self.popup_lrfinder_ui.lr_plot.addItem(self.lr_line)
#In case the groupBox_singleLr is already checked, carry out the function:
if self.popup_lrfinder_ui.groupBox_singleLr.isChecked():
self.get_lr_single(on_or_off=True)
#In case the groupBox_LrRange is already checked, carry out the function:
if self.popup_lrfinder_ui.groupBox_LrRange.isChecked():
self.get_lr_range(on_or_off=True)
def get_lr_single(self,on_or_off):
if on_or_off==True: #bool(self.popup_lrfinder_ui.groupBox_LrRange.isChecked()):
ind = np.argmin(self.y_values)#find location of loss-minimum
mini_x = self.learning_rates[ind]
mini_x = np.log10(mini_x)
pen = pg.mkPen(color="w")
self.lr_single = pg.InfiniteLine(pos=mini_x, angle=90, pen=pen, movable=True)
self.popup_lrfinder_ui.lr_plot.addItem(self.lr_single)
def position_changed():
#where did the user drag the region_linfit to?
new_position = 10**(self.lr_single.value())
self.popup_lrfinder_ui.lineEdit_singleLr.setText(str(new_position))
self.lr_single.sigPositionChangeFinished.connect(position_changed)
if on_or_off==False: #user unchecked the groupbox->remove the InfiniteLine if possible
try:
self.popup_lrfinder_ui.lr_plot.removeItem(self.lr_single)
except:
pass
def get_lr_range(self,on_or_off):
#print(on_or_off)
#start_lr = float(self.popup_lrfinder_ui.lineEdit_startLr.text())
#stop_lr = float(self.popup_lrfinder_ui.lineEdit_stopLr.text())
if on_or_off==True: #bool(self.popup_lrfinder_ui.groupBox_LrRange.isChecked()):
start_x = 0.00001
start_x = np.log10(start_x)
ind = np.argmin(self.y_values)#find location of loss-minimum
end_x = self.learning_rates[ind]
end_x = np.log10(end_x)
self.lr_region = pg.LinearRegionItem([start_x, end_x], movable=True)
self.popup_lrfinder_ui.lr_plot.addItem(self.lr_region)
def region_changed():
#where did the user drag the region_linfit to?
new_region = self.lr_region.getRegion()
new_region_left = 10**(new_region[0])
new_region_right = 10**(new_region[1])
self.popup_lrfinder_ui.lineEdit_LrMin.setText(str(new_region_left))
self.popup_lrfinder_ui.lineEdit_LrMax.setText(str(new_region_right))
self.lr_region.sigRegionChangeFinished.connect(region_changed)
if on_or_off==False: #bool(self.popup_lrfinder_ui.groupBox_LrRange.isChecked()):
try:
self.popup_lrfinder_ui.lr_plot.removeItem(self.lr_region)
except:
pass
def action_show_example_imgs(self): #this function is only for the main window
if self.actionVerbose.isChecked()==True:
verbose = 1
else:
verbose = 0
#Get state of the comboboxes!
tr_or_valid = str(self.comboBox_ShowTrainOrValid.currentText())
w_or_wo_augm = str(self.comboBox_ShowWOrWoAug.currentText())
#most of it should be similar to action_fit_model_worker
#Used files go to a separate sheet on the MetaFile.xlsx
SelectedFiles = self.items_clicked_no_rtdc_ds()
#Collect all information about the fitting routine that was user defined
crop = int(self.spinBox_imagecrop.value())
norm = str(self.comboBox_Normalization.currentText())
h_flip = bool(self.checkBox_HorizFlip.isChecked())
v_flip = bool(self.checkBox_VertFlip.isChecked())
rotation = float(self.lineEdit_Rotation.text())
width_shift = float(self.lineEdit_widthShift.text())
height_shift = float(self.lineEdit_heightShift.text())
zoom = float(self.lineEdit_zoomRange.text())
shear = float(self.lineEdit_shearRange.text())
brightness_add_lower = float(self.spinBox_PlusLower.value())
brightness_add_upper = float(self.spinBox_PlusUpper.value())
brightness_mult_lower = float(self.doubleSpinBox_MultLower.value())
brightness_mult_upper = float(self.doubleSpinBox_MultUpper.value())
gaussnoise_mean = float(self.doubleSpinBox_GaussianNoiseMean.value())
gaussnoise_scale = float(self.doubleSpinBox_GaussianNoiseScale.value())
contrast_on = bool(self.checkBox_contrast.isChecked())
contrast_lower = float(self.doubleSpinBox_contrastLower.value())
contrast_higher = float(self.doubleSpinBox_contrastHigher.value())
saturation_on = bool(self.checkBox_saturation.isChecked())
saturation_lower = float(self.doubleSpinBox_saturationLower.value())
saturation_higher = float(self.doubleSpinBox_saturationHigher.value())
hue_on = bool(self.checkBox_hue.isChecked())
hue_delta = float(self.doubleSpinBox_hueDelta.value())
avgBlur_on = bool(self.checkBox_avgBlur.isChecked())
avgBlur_min = int(self.spinBox_avgBlurMin.value())
avgBlur_max = int(self.spinBox_avgBlurMax.value())
gaussBlur_on = bool(self.checkBox_gaussBlur.isChecked())
gaussBlur_min = int(self.spinBox_gaussBlurMin.value())
gaussBlur_max = int(self.spinBox_gaussBlurMax.value())
motionBlur_on = bool(self.checkBox_motionBlur.isChecked())
motionBlur_kernel = str(self.lineEdit_motionBlurKernel.text())
motionBlur_angle = str(self.lineEdit_motionBlurAngle.text())
motionBlur_kernel = tuple(ast.literal_eval(motionBlur_kernel)) #translate string in the lineEdits to a tuple
motionBlur_angle = tuple(ast.literal_eval(motionBlur_angle)) #translate string in the lineEdits to a tuple
paddingMode = str(self.comboBox_paddingMode.currentText())#.lower()
#which index is requested by user:?
req_index = int(self.spinBox_ShowIndex.value())
if tr_or_valid=='Training':
######################Load the Training Data################################
ind = [selectedfile["TrainOrValid"] == "Train" for selectedfile in SelectedFiles]
elif tr_or_valid=='Validation':
ind = [selectedfile["TrainOrValid"] == "Valid" for selectedfile in SelectedFiles]
ind = np.where(np.array(ind)==True)[0]
SelectedFiles = np.array(SelectedFiles)[ind]
SelectedFiles = list(SelectedFiles)
indices = [selectedfile["class"] for selectedfile in SelectedFiles]
ind = np.where(np.array(indices)==req_index)[0]
if len(ind)<1:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("There is no data for this class available")
msg.setWindowTitle("Class not available")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
indices = list(np.array(indices)[ind])
SelectedFiles = list(np.array(SelectedFiles)[ind])
nr_events_epoch = len(indices)*[10] #[selectedfile["nr_events_epoch"] for selectedfile in SelectedFiles]
rtdc_path = [selectedfile["rtdc_path"] for selectedfile in SelectedFiles]
zoom_factors = [selectedfile["zoom_factor"] for selectedfile in SelectedFiles]
#zoom_order = [self.actionOrder0.isChecked(),self.actionOrder1.isChecked(),self.actionOrder2.isChecked(),self.actionOrder3.isChecked(),self.actionOrder4.isChecked(),self.actionOrder5.isChecked()]
#zoom_order = int(np.where(np.array(zoom_order)==True)[0])
zoom_order = int(self.comboBox_zoomOrder.currentIndex()) #the combobox-index is already the zoom order
shuffle = [selectedfile["shuffle"] for selectedfile in SelectedFiles]
#If the scaling method is "divide by mean and std of the whole training set":
if norm == "StdScaling using mean and std of all training data":
mean_trainingdata,std_trainingdata = [],[]
for i in range(len(SelectedFiles)):
if not self.actionDataToRam.isChecked():
#Replace true means that individual cells could occur several times
gen = aid_img.gen_crop_img(crop,rtdc_path[i],random_images=False,zoom_factor=zoom_factors[i],zoom_order=zoom_order,color_mode=self.get_color_mode(),padding_mode=paddingMode)
else:
if len(self.ram)==0:
#Replace true means that individual cells could occur several times
gen = aid_img.gen_crop_img(crop,rtdc_path[i],random_images=False,zoom_factor=zoom_factors[i],zoom_order=zoom_order,color_mode=self.get_color_mode(),padding_mode=paddingMode)
else:
gen = aid_img.gen_crop_img_ram(self.ram,rtdc_path[i],random_images=False) #Replace true means that individual cells could occur several times
if self.actionVerbose.isChecked():
print("Loaded data from RAM")
images = next(gen)[0]
mean_trainingdata.append(np.mean(images))
std_trainingdata.append(np.std(images))
mean_trainingdata = np.mean(np.array(mean_trainingdata))
std_trainingdata = np.mean(np.array(std_trainingdata))
if np.allclose(std_trainingdata,0):
std_trainingdata = 0.0001
print("std_trainingdata was zero and is now set to 0.0001 to avoid div. by zero!")
if self.actionVerbose.isChecked():
print("Used all training data to get mean and std for normalization")
if w_or_wo_augm=='With Augmentation':
###############Continue with training data:augmentation############
#Rotating could create edge effects. Avoid this by making crop a bit larger for now
#Worst case would be a 45degree rotation:
cropsize2 = np.sqrt(crop**2+crop**2)
cropsize2 = np.ceil(cropsize2 / 2.) * 2 #round to the next even number
############Cropping and image augmentation#####################
#Start the first iteration:
X,y = [],[]
for i in range(len(SelectedFiles)):
if not self.actionDataToRam.isChecked():
#Replace true means that individual cells could occur several times
gen = aid_img.gen_crop_img(cropsize2,rtdc_path[i],10,random_images=True,replace=True,zoom_factor=zoom_factors[i],zoom_order=zoom_order,color_mode=self.get_color_mode(),padding_mode=paddingMode)
else:
if len(self.ram)==0:
#Replace true means that individual cells could occur several times
gen = aid_img.gen_crop_img(cropsize2,rtdc_path[i],10,random_images=True,replace=True,zoom_factor=zoom_factors[i],zoom_order=zoom_order,color_mode=self.get_color_mode(),padding_mode=paddingMode)
else:
gen = aid_img.gen_crop_img_ram(self.ram,rtdc_path[i],10,random_images=True,replace=True) #Replace true means that individual cells could occur several times
if self.actionVerbose.isChecked():
print("Loaded data from RAM")
try: #When all cells are at the border of the image, the generator will be empty. Avoid program crash by try, except
X.append(next(gen)[0])
except StopIteration:
print("All events at border of image and discarded")
return
y.append(np.repeat(indices[i],X[-1].shape[0]))
X = np.concatenate(X)
X = X.astype(np.uint8) #make sure we stay in uint8
y = np.concatenate(y)
if len(X.shape)==4:
channels=3
elif len(X.shape)==3:
channels=1
X = np.expand_dims(X,3)#Add the "channels" dimension
else:
print("Invalid data dimension:" +str(X.shape))
X_batch, y_batch = aid_img.affine_augm(X,v_flip,h_flip,rotation,width_shift,height_shift,zoom,shear), y #Affine image augmentation
X_batch = X_batch.astype(np.uint8) #make sure we stay in uint8
#Now do the final cropping to the actual size that was set by user
dim = X_batch.shape
if dim[2]!=crop:
remove = int(dim[2]/2.0 - crop/2.0)
#X_batch = X_batch[:,:,remove:-remove,remove:-remove] #crop to crop x crop pixels #Theano
X_batch = X_batch[:,remove:remove+crop,remove:remove+crop,:] #crop to crop x crop pixels #TensorFlow
##########Contrast/Saturation/Hue augmentation#########
#is there any of contrast/saturation/hue augmentation to do?
if contrast_on:
X_batch = aid_img.contrast_augm_cv2(X_batch,contrast_lower,contrast_higher) #this function is almost 15 times faster than random_contrast from tf!
if saturation_on or hue_on:
X_batch = aid_img.satur_hue_augm_cv2(X_batch.astype(np.uint8),saturation_on,saturation_lower,saturation_higher,hue_on,hue_delta)
##########Average/Gauss/Motion blurring#########
#is there any of blurring to do?
if avgBlur_on:
X_batch = aid_img.avg_blur_cv2(X_batch,avgBlur_min,avgBlur_max)
if gaussBlur_on:
X_batch = aid_img.gauss_blur_cv(X_batch,gaussBlur_min,gaussBlur_max)
if motionBlur_on:
X_batch = aid_img.motion_blur_cv(X_batch,motionBlur_kernel,motionBlur_angle)
X_batch = aid_img.brightn_noise_augm_cv2(X_batch,brightness_add_lower,brightness_add_upper,brightness_mult_lower,brightness_mult_upper,gaussnoise_mean,gaussnoise_scale)
if norm == "StdScaling using mean and std of all training data":
X_batch = aid_img.image_normalization(X_batch,norm,mean_trainingdata,std_trainingdata)
else:
X_batch = aid_img.image_normalization(X_batch,norm)
X = X_batch
if verbose: print("Shape of the shown images is:"+str(X.shape))
elif w_or_wo_augm=='Original image':
############Cropping#####################
X,y = [],[]
for i in range(len(SelectedFiles)):
if not self.actionDataToRam.isChecked():
#Replace true means that individual cells could occur several times
gen = aid_img.gen_crop_img(crop,rtdc_path[i],10,random_images=True,replace=True,zoom_factor=zoom_factors[i],zoom_order=zoom_order,color_mode=self.get_color_mode(),padding_mode=paddingMode)
else:
if len(self.ram)==0:
#Replace true means that individual cells could occur several times
gen = aid_img.gen_crop_img(crop,rtdc_path[i],10,random_images=True,replace=True,zoom_factor=zoom_factors[i],zoom_order=zoom_order,color_mode=self.get_color_mode(),padding_mode=paddingMode)
else:
gen = aid_img.gen_crop_img_ram(self.ram,rtdc_path[i],10,random_images=True,replace=True) #Replace true means that individual cells could occur several times
if self.actionVerbose.isChecked():
print("Loaded data from RAM")
try:
X.append(next(gen)[0])
except:
return
y.append(np.repeat(indices[i],X[-1].shape[0]))
X = np.concatenate(X)
y = np.concatenate(y)
if len(X.shape)==4:
channels=3
elif len(X.shape)==3:
channels=1
X = np.expand_dims(X,3) #Add the "channels" dimension
else:
print("Invalid data dimension: " +str(X.shape))
if norm == "StdScaling using mean and std of all training data":
X = aid_img.image_normalization(X,norm,mean_trainingdata,std_trainingdata)
else:
X = aid_img.image_normalization(X,norm)
if verbose: print("Shape of the shown images is: "+str(X.shape))
#Is there already anything shown on the widget?
children = self.widget_ViewImages.findChildren(QtWidgets.QGridLayout)
if len(children)>0: #if there is something, delete it!
for i in reversed(range(self.gridLayout_ViewImages.count())):
widgetToRemove = self.gridLayout_ViewImages.itemAt(i).widget()
widgetToRemove.setParent(None)
widgetToRemove.deleteLater()
else: #else, create a Gridlayout to put the images
self.gridLayout_ViewImages = QtWidgets.QGridLayout(self.widget_ViewImages)
for i in range(5):
if channels==1:
img = X[i,:,:,0] #TensorFlow
if channels==3:
img = X[i,:,:,:] #TensorFlow
#Stretch pixel value to full 8bit range (0-255); only for display
img = img-np.min(img)
fac = np.max(img)
img = (img/fac)*255.0
img = img.astype(np.uint8)
if channels==1:
height, width = img.shape
if channels==3:
height, width, _ = img.shape
# qi=QtGui.QImage(img_zoom.data, width, height,width, QtGui.QImage.Format_Indexed8)
# self.label_image_show = QtWidgets.QLabel(self.widget_ViewImages)
# self.label_image_show.setPixmap(QtGui.QPixmap.fromImage(qi))
# self.gridLayout_ViewImages.addWidget(self.label_image_show, 1,i)
# self.label_image_show.show()
#Use pygtgraph instead, in order to allow for exporting images
self.image_show = pg.ImageView(self.widget_ViewImages)
self.image_show.show()
if verbose: print("Shape of zoomed image: "+str(img.shape))
if channels==1:
self.image_show.setImage(img.T,autoRange=False)
if channels==3:
self.image_show.setImage(np.swapaxes(img,0,1),autoRange=False)
self.image_show.ui.histogram.hide()
self.image_show.ui.roiBtn.hide()
self.image_show.ui.menuBtn.hide()
self.gridLayout_ViewImages.addWidget(self.image_show, 1,i)
self.widget_ViewImages.show()
def tableWidget_HistoryInfo_pop_dclick(self,item,listindex):
if item is not None:
tableitem = self.fittingpopups_ui[listindex].tableWidget_HistoryInfo_pop.item(item.row(), item.column())
if str(tableitem.text())!="Show saved only":
color = QtGui.QColorDialog.getColor()
if color.getRgb()==(0, 0, 0, 255):#no black!
return
else:
tableitem.setBackground(color)
#self.update_historyplot_pop(listindex)
def action_show_example_imgs_pop(self,listindex): #this function is only for the main window
#Get state of the comboboxes!
tr_or_valid = str(self.fittingpopups_ui[listindex].comboBox_ShowTrainOrValid_pop.currentText())
w_or_wo_augm = str(self.fittingpopups_ui[listindex].comboBox_ShowWOrWoAug_pop.currentText())
#most of it should be similar to action_fit_model_worker
#Used files go to a separate sheet on the MetaFile.xlsx
SelectedFiles = self.items_clicked_no_rtdc_ds()
#Collect all information about the fitting routine that was user defined
crop = int(self.fittingpopups_ui[listindex].spinBox_imagecrop_pop.value())
norm = str(self.fittingpopups_ui[listindex].comboBox_Normalization_pop.currentText())
h_flip = bool(self.fittingpopups_ui[listindex].checkBox_HorizFlip_pop.isChecked())
v_flip = bool(self.fittingpopups_ui[listindex].checkBox_VertFlip_pop.isChecked())
rotation = float(self.fittingpopups_ui[listindex].lineEdit_Rotation_pop.text())
width_shift = float(self.fittingpopups_ui[listindex].lineEdit_widthShift_pop.text())
height_shift = float(self.fittingpopups_ui[listindex].lineEdit_heightShift_pop.text())
zoom = float(self.fittingpopups_ui[listindex].lineEdit_zoomRange_pop.text())
shear = float(self.fittingpopups_ui[listindex].lineEdit_shearRange_pop.text())
brightness_add_lower = float(self.fittingpopups_ui[listindex].spinBox_PlusLower_pop.value())
brightness_add_upper = float(self.fittingpopups_ui[listindex].spinBox_PlusUpper_pop.value())
brightness_mult_lower = float(self.fittingpopups_ui[listindex].doubleSpinBox_MultLower_pop.value())
brightness_mult_upper = float(self.fittingpopups_ui[listindex].doubleSpinBox_MultUpper_pop.value())
gaussnoise_mean = float(self.fittingpopups_ui[listindex].doubleSpinBox_GaussianNoiseMean_pop.value())
gaussnoise_scale = float(self.fittingpopups_ui[listindex].doubleSpinBox_GaussianNoiseScale_pop.value())
contrast_on = bool(self.fittingpopups_ui[listindex].checkBox_contrast_pop.isChecked())
contrast_lower = float(self.fittingpopups_ui[listindex].doubleSpinBox_contrastLower_pop.value())
contrast_higher = float(self.fittingpopups_ui[listindex].doubleSpinBox_contrastHigher_pop.value())
saturation_on = bool(self.fittingpopups_ui[listindex].checkBox_saturation_pop.isChecked())
saturation_lower = float(self.fittingpopups_ui[listindex].doubleSpinBox_saturationLower_pop.value())
saturation_higher = float(self.fittingpopups_ui[listindex].doubleSpinBox_saturationHigher_pop.value())
hue_on = bool(self.fittingpopups_ui[listindex].checkBox_hue_pop.isChecked())
hue_delta = float(self.fittingpopups_ui[listindex].doubleSpinBox_hueDelta_pop.value())
avgBlur_on = bool(self.fittingpopups_ui[listindex].checkBox_avgBlur_pop.isChecked())
avgBlur_min = int(self.fittingpopups_ui[listindex].spinBox_avgBlurMin_pop.value())
avgBlur_max = int(self.fittingpopups_ui[listindex].spinBox_avgBlurMax_pop.value())
gaussBlur_on = bool(self.fittingpopups_ui[listindex].checkBox_gaussBlur_pop.isChecked())
gaussBlur_min = int(self.fittingpopups_ui[listindex].spinBox_gaussBlurMin_pop.value())
gaussBlur_max = int(self.fittingpopups_ui[listindex].spinBox_gaussBlurMax_pop.value())
motionBlur_on = bool(self.fittingpopups_ui[listindex].checkBox_motionBlur_pop.isChecked())
motionBlur_kernel = str(self.fittingpopups_ui[listindex].lineEdit_motionBlurKernel_pop.text())
motionBlur_angle = str(self.fittingpopups_ui[listindex].lineEdit_motionBlurAngle_pop.text())
motionBlur_kernel = tuple(ast.literal_eval(motionBlur_kernel)) #translate string in the lineEdits to a tuple
motionBlur_angle = tuple(ast.literal_eval(motionBlur_angle)) #translate string in the lineEdits to a tuple
paddingMode = str(self.fittingpopups_ui[listindex].comboBox_paddingMode_pop.currentText()).lower()
#which index is requested by user:?
req_index = int(self.fittingpopups_ui[listindex].spinBox_ShowIndex_pop.value())
if tr_or_valid=='Training':
######################Load the Training Data################################
ind = [selectedfile["TrainOrValid"] == "Train" for selectedfile in SelectedFiles]
elif tr_or_valid=='Validation':
ind = [selectedfile["TrainOrValid"] == "Valid" for selectedfile in SelectedFiles]
ind = np.where(np.array(ind)==True)[0]
SelectedFiles = np.array(SelectedFiles)[ind]
SelectedFiles = list(SelectedFiles)
indices = [selectedfile["class"] for selectedfile in SelectedFiles]
ind = np.where(np.array(indices)==req_index)[0]
if len(ind)<1:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("There is no data for this class available")
msg.setWindowTitle("Class not available")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
indices = list(np.array(indices)[ind])
SelectedFiles = list(np.array(SelectedFiles)[ind])
nr_events_epoch = len(indices)*[10] #[selectedfile["nr_events_epoch"] for selectedfile in SelectedFiles]
rtdc_path = [selectedfile["rtdc_path"] for selectedfile in SelectedFiles]
zoom_factors = [selectedfile["zoom_factor"] for selectedfile in SelectedFiles]
#zoom_order = [self.actionOrder0.isChecked(),self.actionOrder1.isChecked(),self.actionOrder2.isChecked(),self.actionOrder3.isChecked(),self.actionOrder4.isChecked(),self.actionOrder5.isChecked()]
#zoom_order = int(np.where(np.array(zoom_order)==True)[0])
zoom_order = int(self.comboBox_zoomOrder.currentIndex()) #the combobox-index is already the zoom order
shuffle = [selectedfile["shuffle"] for selectedfile in SelectedFiles]
xtra_in = set([selectedfile["xtra_in"] for selectedfile in SelectedFiles])
if len(xtra_in)>1:# False and True is present. Not supported
print("Xtra data is used only for some files. Xtra data needs to be used either by all or by none!")
return
xtra_in = list(xtra_in)[0]#this is either True or False
#If the scaling method is "divide by mean and std of the whole training set":
if norm == "StdScaling using mean and std of all training data":
mean_trainingdata,std_trainingdata = [],[]
for i in range(len(SelectedFiles)):
if not self.actionDataToRam.isChecked():
gen = aid_img.gen_crop_img(crop,rtdc_path[i],random_images=False,zoom_factor=zoom_factors[i],zoom_order=zoom_order,color_mode=self.get_color_mode(),padding_mode=paddingMode,xtra_in=xtra_in) #Replace true means that individual cells could occur several times
else:
if len(self.ram)==0:
gen = aid_img.gen_crop_img(crop,rtdc_path[i],random_images=False,zoom_factor=zoom_factors[i],zoom_order=zoom_order,color_mode=self.get_color_mode(),padding_mode=paddingMode,xtra_in=xtra_in) #Replace true means that individual cells could occur several times
else:
gen = aid_img.gen_crop_img_ram(self.ram,rtdc_path[i],random_images=False,xtra_in=xtra_in) #Replace true means that individual cells could occur several times
if self.actionVerbose.isChecked():
print("Loaded data from RAM")
images = next(gen)[0]
mean_trainingdata.append(np.mean(images))
std_trainingdata.append(np.std(images))
mean_trainingdata = np.mean(np.array(mean_trainingdata))
std_trainingdata = np.mean(np.array(std_trainingdata))
if np.allclose(std_trainingdata,0):
std_trainingdata = 0.0001
print("std_trainingdata turned out to be zero. I set it to 0.0001, to avoid division by zero!")
if self.actionVerbose.isChecked():
print("Used all training data to get mean and std for normalization")
if w_or_wo_augm=='With Augmentation':
###############Continue with training data:augmentation############
#Rotating could create edge effects. Avoid this by making crop a bit larger for now
#Worst case would be a 45degree rotation:
cropsize2 = np.sqrt(crop**2+crop**2)
cropsize2 = np.ceil(cropsize2 / 2.) * 2 #round to the next even number
############Get cropped images with image augmentation#####################
#Start the first iteration:
X,y = [],[]
for i in range(len(SelectedFiles)):
if not self.actionDataToRam.isChecked():
#Replace true means that individual cells could occur several times
gen = aid_img.gen_crop_img(cropsize2,rtdc_path[i],10,random_images=shuffle[i],replace=True,zoom_factor=zoom_factors[i],zoom_order=zoom_order,color_mode=self.get_color_mode(),padding_mode=paddingMode)
else:
if len(self.ram)==0:
#Replace true means that individual cells could occur several times
gen = aid_img.gen_crop_img(cropsize2,rtdc_path[i],10,random_images=shuffle[i],replace=True,zoom_factor=zoom_factors[i],zoom_order=zoom_order,color_mode=self.get_color_mode(),padding_mode=paddingMode)
else:
gen = aid_img.gen_crop_img_ram(self.ram,rtdc_path[i],10,random_images=shuffle[i],replace=True) #Replace true means that individual cells could occur several times
if self.actionVerbose.isChecked():
print("Loaded data from RAM")
X.append(next(gen)[0])
#y.append(np.repeat(indices[i],nr_events_epoch[i]))
y.append(np.repeat(indices[i],X[-1].shape[0]))
X = np.concatenate(X)
y = np.concatenate(y)
if len(X.shape)==4:
channels=3
elif len(X.shape)==3:
channels=1
else:
print("Invalid data dimension:" +str(X.shape))
if channels==1:
#Add the "channels" dimension
X = np.expand_dims(X,3)
X_batch, y_batch = aid_img.affine_augm(X,v_flip,h_flip,rotation,width_shift,height_shift,zoom,shear), y #Affine image augmentation
X_batch = X_batch.astype(np.uint8) #make sure we stay in uint8
#Now do the final cropping to the actual size that was set by user
dim = X_batch.shape
if dim[2]!=crop:
remove = int(dim[2]/2.0 - crop/2.0)
#X_batch = X_batch[:,:,remove:-remove,remove:-remove] #crop to crop x crop pixels #Theano
X_batch = X_batch[:,remove:remove+crop,remove:remove+crop,:] #crop to crop x crop pixels #TensorFlow
##########Contrast/Saturation/Hue augmentation#########
#is there any of contrast/saturation/hue augmentation to do?
if contrast_on:
X_batch = aid_img.contrast_augm_cv2(X_batch,contrast_lower,contrast_higher) #this function is almost 15 times faster than random_contrast from tf!
if saturation_on or hue_on:
X_batch = aid_img.satur_hue_augm_cv2(X_batch.astype(np.uint8),saturation_on,saturation_lower,saturation_higher,hue_on,hue_delta)
##########Average/Gauss/Motion blurring#########
#is there any of blurring to do?
if avgBlur_on:
X_batch = aid_img.avg_blur_cv2(X_batch,avgBlur_min,avgBlur_max)
if gaussBlur_on:
X_batch = aid_img.gauss_blur_cv(X_batch,gaussBlur_min,gaussBlur_max)
if motionBlur_on:
X_batch = aid_img.motion_blur_cv(X_batch,motionBlur_kernel,motionBlur_angle)
X_batch = aid_img.brightn_noise_augm_cv2(X_batch,brightness_add_lower,brightness_add_upper,brightness_mult_lower,brightness_mult_upper,gaussnoise_mean,gaussnoise_scale)
if norm == "StdScaling using mean and std of all training data":
X_batch = aid_img.image_normalization(X_batch,norm,mean_trainingdata,std_trainingdata)
else:
X_batch = aid_img.image_normalization(X_batch,norm)
X = X_batch
elif w_or_wo_augm=='Original image':
############Cropping#####################
X,y = [],[]
for i in range(len(SelectedFiles)):
if not self.actionDataToRam.isChecked():
#Replace true means that individual cells could occur several times
gen = aid_img.gen_crop_img(crop,rtdc_path[i],10,random_images=shuffle[i],replace=True,zoom_factor=zoom_factors[i],zoom_order=zoom_order,color_mode=self.get_color_mode(),padding_mode=paddingMode)
else:
if len(self.ram)==0:
#Replace true means that individual cells could occur several times
gen = aid_img.gen_crop_img(crop,rtdc_path[i],10,random_images=shuffle[i],replace=True,zoom_factor=zoom_factors[i],zoom_order=zoom_order,color_mode=self.get_color_mode(),padding_mode=paddingMode)
else:
gen = aid_img.gen_crop_img_ram(self.ram,rtdc_path[i],10,random_images=shuffle[i],replace=True) #Replace true means that individual cells could occur several times
if self.actionVerbose.isChecked():
print("Loaded data from RAM")
X.append(next(gen)[0])
#y.append(np.repeat(indices[i],nr_events_epoch[i]))
y.append(np.repeat(indices[i],X[-1].shape[0]))
X = np.concatenate(X)
y = np.concatenate(y)
if len(X.shape)==4:
channels = 3
elif len(X.shape)==3:
channels = 1
X = np.expand_dims(X,3)#Add the "channels" dimension
else:
print("Invalid data dimension:" +str(X.shape))
if norm == "StdScaling using mean and std of all training data":
X = aid_img.image_normalization(X,norm,mean_trainingdata,std_trainingdata)
else:
X = aid_img.image_normalization(X,norm)
#Is there already anything shown on the widget?
children = self.fittingpopups_ui[listindex].widget_ViewImages_pop.findChildren(QtWidgets.QGridLayout)
if len(children)>0: #if there is something, delete it!
for i in reversed(range(self.fittingpopups_ui[listindex].gridLayout_ViewImages_pop.count())):
widgetToRemove = self.fittingpopups_ui[listindex].gridLayout_ViewImages_pop.itemAt(i).widget()
widgetToRemove.setParent(None)
widgetToRemove.deleteLater()
else: #else, create a Gridlayout to put the images
self.fittingpopups_ui[listindex].gridLayout_ViewImages_pop = QtWidgets.QGridLayout(self.fittingpopups_ui[listindex].widget_ViewImages_pop)
for i in range(5):
if channels==1:
img = X[i,:,:,0]
if channels==3:
img = X[i,:,:,:]
#Normalize image to full 8bit range (from 0 to 255)
img = img-np.min(img)
fac = np.max(img)
img = (img/fac)*255.0
img = img.astype(np.uint8)
# height, width = img_zoom.shape
# qi=QtGui.QImage(img_zoom.data, width, height,width, QtGui.QImage.Format_Indexed8)
# self.label_image_show = QtWidgets.QLabel(self.widget_ViewImages)
# self.label_image_show.setPixmap(QtGui.QPixmap.fromImage(qi))
# self.gridLayout_ViewImages_pop.addWidget(self.label_image_show, 1,i)
# self.label_image_show.show()
#Use pygtgraph instead, in order to allow for exporting images
self.fittingpopups_ui[listindex].image_show_pop = pg.ImageView(self.fittingpopups_ui[listindex].widget_ViewImages_pop)
self.fittingpopups_ui[listindex].image_show_pop.show()
if channels==1:
self.fittingpopups_ui[listindex].image_show_pop.setImage(img.T,autoRange=False)
if channels==3:
self.fittingpopups_ui[listindex].image_show_pop.setImage(np.swapaxes(img,0,1),autoRange=False)
self.fittingpopups_ui[listindex].image_show_pop.ui.histogram.hide()
self.fittingpopups_ui[listindex].image_show_pop.ui.roiBtn.hide()
self.fittingpopups_ui[listindex].image_show_pop.ui.menuBtn.hide()
self.fittingpopups_ui[listindex].gridLayout_ViewImages_pop.addWidget(self.fittingpopups_ui[listindex].image_show_pop, 1,i)
self.fittingpopups_ui[listindex].widget_ViewImages_pop.show()
def get_color_mode(self):
if str(self.comboBox_GrayOrRGB.currentText())=="Grayscale":
return "Grayscale"
elif str(self.comboBox_GrayOrRGB.currentText())=="RGB":
return "RGB"
else:
return None
def checkBox_rollingMedian_statechange(self,item):#used in frontend
self.horizontalSlider_rollmedi.setEnabled(item)
def update_historyplot(self):
#After loading a history, there are checkboxes available. Check, if user checked some:
colcount = self.tableWidget_HistoryItems.columnCount()
#Collect items that are checked
selected_items = []
Colors = []
for colposition in range(colcount):
#get checkbox item and; is it checked?
cb = self.tableWidget_HistoryItems.item(0, colposition)
if not cb==None:
if cb.checkState() == QtCore.Qt.Checked:
selected_items.append(str(cb.text()))
Colors.append(cb.background())
#Get a list of the color from the background of the table items
DF1 = self.loaded_history
#Clear the plot
self.widget_Scatterplot.clear()
#Add plot
self.plt1 = self.widget_Scatterplot.addPlot()
self.plt1.showGrid(x=True,y=True)
self.plt1.addLegend()
self.plt1.setLabel('bottom', 'Epoch', units='')
self.plot_rollmedis = [] #list for plots of rolling medians
if "Show saved only" in selected_items:
#nr_of_selected_items = len(selected_items)-1
#get the "Saved" column from DF1
saved = DF1["Saved"]
saved = np.where(np.array(saved==1))[0]
# else:
# nr_of_selected_items = len(selected_items)
self.Colors = Colors
scatter_x,scatter_y = [],[]
for i in range(len(selected_items)):
key = selected_items[i]
if key!="Show saved only":
df = DF1[key]
epochs = range(len(df))
win = int(self.horizontalSlider_rollmedi.value())
rollmedi = df.rolling(window=win).median()
if "Show saved only" in selected_items:
df = np.array(df)[saved]
epochs = np.array(epochs)[saved]
rollmedi = | pd.DataFrame(df) | pandas.DataFrame |
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/0.1_mgmnt.prep.conv.ipynb (unless otherwise specified).
__all__ = ['englishStemmer', 'default_params', 'ConventionalPreprocessing', 'open_file', 'get_files', 'get_file_zip']
# Cell
from typing import List, Set, Callable, Tuple, Dict, Optional
import re
from nltk.stem.snowball import SnowballStemmer
import nltk
import pandas as pd
import glob
import os
import pathlib
from string import punctuation
import csv
from nltk.stem.snowball import SnowballStemmer
englishStemmer=SnowballStemmer("english")
# Cell
from tensorflow.keras.preprocessing import text
from pathlib import Path
import glob
from datetime import datetime
# Cell
# Imports
import pandas as pd
import sentencepiece as sp
import numpy as np
import json
from pathlib import Path
import sys
import sentencepiece as spm
from tokenizers import ByteLevelBPETokenizer
from tokenizers.processors import BertProcessing
# Cell
import logging
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
# Cell
def default_params():
return {
'system': 'sacp-python-common',
'path_zip': Path("cisco/sacp-python-common.zip"),
'saving_path': 'cisco/sacp_data/',
'language': 'english',
'model_prefix':'test_data/sentencepiece/wiki_py_java_bpe_8k' #For BPE Analysis
}
# Cell
class ConventionalPreprocessing():
'''NLTK libraries for Conventional Preprocessing'''
def __init__(self, params, bpe = False):
self.params = params
#If BPE provided, then preprocessing with BPE is allowed on CONV
if bpe:
self.sp_bpe = spm.SentencePieceProcessor()
self.sp_bpe.load(params['model_prefix']+'.model')
else:
self.sp_bpe = None
pass
def bpe_pieces_pipeline(self, doc_list):
'''Computes BPE preprocessing according to params'''
encoded_str = ''
if self.sp_bpe is None:
logging.info('Provide a BPE Model!')
else:
encoded_str = [self.sp_bpe.encode_as_pieces(doc) for doc in doc_list]
return encoded_str
#ToDo Transforme it into a For-Comprenhension
def clean_punctuation(self, token):
#remove terms !"#$%&\'()*+,-./:;<=>?@[\\]^_`{|}~0123456789
return re.sub(r'[^a-zA-Z\s]', ' ', token, re.I|re.A)
def split_camel_case_token(self, token):
return re.sub('([a-z])([A-Z])', r'\1 \2', token)
def remove_terms(self, filtered_tokens):
remove_terms = punctuation + '0123456789'
return [token for token in filtered_tokens if token not in remove_terms and len(token)>2 and len(token)<21]
def stemmer(self, filtered_tokens):
return [englishStemmer.stem(token) for token in filtered_tokens ]
def stop_words(self, filtered_tokens):
stop_words = nltk.corpus.stopwords.words(self.params['language'])
return [token for token in filtered_tokens if token not in stop_words]
def basic_pipeline(self, dict_filenames):
'''@dict_filenames: {filename: code}'''
pre_process = [( key.replace('.txt', '-pre.txt') , self.clean_punctuation(dict_filenames[key][0]) ) for key in dict_filenames]
pre_process = [( doc[0] , self.split_camel_case_token(doc[1]) ) for doc in pre_process]
pre_process = [( doc[0] , doc[1].lower() ) for doc in pre_process]
pre_process = [( doc[0] , doc[1].strip()) for doc in pre_process] # Leading whitepsace are removed
pre_process_tokens = [(doc[0] , nltk.WordPunctTokenizer().tokenize(doc[1])) for doc in pre_process]
filtered_tokens = [(doc[0], self.stop_words(doc[1]) ) for doc in pre_process_tokens] #Stop Words
filtered_tokens = [(doc[0], self.stemmer(doc[1]) ) for doc in filtered_tokens] #Filtering Stemmings
filtered_tokens = [(doc[0], self.remove_terms(doc[1])) for doc in filtered_tokens] #Filtering remove-terms
pre_process = [(doc[0], ' '.join(doc[1])) for doc in filtered_tokens]
return pre_process
def fromdocs_pipeline(self, docs):
#TODO
"""@tokenized_file: a list of tokens that represents a document/code"""
pre_process = [ self.clean_punctuation(doc) for doc in docs]
logging.info('fromtokens_pipeline: clean punctuation')
pre_process = [ self.split_camel_case_token(doc) for doc in pre_process]
logging.info('fromtokens_pipeline: camel case')
pre_process = [ doc.lower() for doc in pre_process]
logging.info('fromtokens_pipeline: lowe case')
pre_process = [ doc.strip() for doc in pre_process] # Leading whitepsace are removed
logging.info('fromtokens_pipeline: white space removed')
pre_process_tokens = [ nltk.WordPunctTokenizer().tokenize(doc) for doc in pre_process]
logging.info('fromtokens_pipeline: WordPunctTokenizer')
filtered_tokens = [ self.stop_words(doc) for doc in pre_process_tokens] #Stop Words
logging.info('fromtokens_pipeline: Stop words')
filtered_tokens = [ self.stemmer(doc) for doc in filtered_tokens] #Filtering Stemmings
logging.info('fromtokens_pipeline: Stemmings')
filtered_tokens = [ self.remove_terms(doc) for doc in filtered_tokens] #Filtering remove-terms
logging.info('fromtokens_pipeline: Removed Special Terns')
pre_process = [ ' '.join(doc) for doc in filtered_tokens]
logging.info('fromtokens_pipeline END')
return pre_process
def frombatch_pipeline(self, batch):
#TODO
"""@batch: a TensorFlow Dataset Batch"""
pre_process = [ self.clean_punctuation( doc.decode("utf-8") ) for doc in batch]
logging.info('frombatch_pipeline: clean punctuation')
pre_process = [ self.split_camel_case_token(doc) for doc in pre_process]
logging.info('frombatch_pipeline: camel case')
pre_process = [ doc.lower() for doc in pre_process]
logging.info('frombatch_pipeline: lowe case')
pre_process = [ doc.strip() for doc in pre_process] # Leading whitepsace are removed
logging.info('frombatch_pipeline: white space removed')
pre_process_tokens = [ nltk.WordPunctTokenizer().tokenize(doc) for doc in pre_process]
logging.info('frombatch_pipeline: WordPunctTokenizer')
filtered_tokens = [ self.stop_words(doc) for doc in pre_process_tokens] #Stop Words
logging.info('frombatch_pipeline: Stop words')
filtered_tokens = [ self.stemmer(doc) for doc in filtered_tokens] #Filtering Stemmings
logging.info('frombatch_pipeline: Stemmings')
filtered_tokens = [ self.remove_terms(doc) for doc in filtered_tokens] #Filtering remove-terms
logging.info('frombatch_pipeline: Removed Special Terns')
#pre_process = [ ' '.join(doc) for doc in filtered_tokens]
logging.info('frombatch_pipeline [END]')
return filtered_tokens
def fromtensor_pipeline(self, ts_x):
"""@ts_x: es un elemento del tensor"""
#TODO
pre_process = self.clean_punctuation(ts_x)
pre_process = self.split_camel_case_token(pre_process)
pre_process = pre_process.lower()
pre_process = pre_process.strip()
pre_process = nltk.WordPunctTokenizer().tokenize(pre_process)
filtered_tokens = self.stop_words(pre_process)
filtered_tokens = self.stemmer(filtered_tokens)
filtered_tokens = self.remove_terms(filtered_tokens)
pre_process = ' '.join(filtered_tokens)
logging.info('fromtokens_pipeline END')
return pre_process
def SaveCorpus(self, df, language='js', sep=',', mode='a'):
timestamp = datetime.timestamp(datetime.now())
path_to_link = self.params['saving_path'] + '['+ self.params['system'] + '-' + language + '-{}].csv'.format(timestamp)
df.to_csv(path_to_link, header=True, index=True, sep=sep, mode=mode)
logging.info('Saving in...' + path_to_link)
pass
def LoadCorpus(self, timestamp, language='js', sep=',', mode='a'):
path_to_link = self.params['saving_path'] + '['+ self.params['system'] + '-' + language + '-{}].csv'.format(timestamp)
return | pd.read_csv(path_to_link, header=0, index_col=0, sep=sep) | pandas.read_csv |
# -*- coding: utf-8 -*-
"""IATI_DFID_DATA.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1jkrOj2Ma_W14Tjh6bOuDaKVHfAQKZz8H
# This code pulls in a CSV file on DFID's work in Uganda, which has been produced using the IATI data standard, chooses the specific fields (columns) that I want to work on, and exports it to an Excel sheet for ease of use.
This is the the full pandas code. It's broken down into sections below.
"""
# this imports pandas and renames it as pd
import pandas as pd
# this fetches the CSV file from a URL and reads the contents
datadoc = | pd.read_csv("https://raw.githubusercontent.com/sj21446/IATI_data/master/DFID_Uganda_current_activities_04062020.csv") | pandas.read_csv |
import numpy as np
import pandas as pd
from lib import pandas_option as pd_op
from lib import repository
def save_entry(date, side, price):
sql = "insert into backtest_entry values('{date}','{side}',{price},0)" \
.format(date=date, side=side, price=price)
repository.execute(database=database, sql=sql, log=False)
volume_ma = 10
diff_ratio = 2
back_min = 5
print("----------------------------------------------")
print("volume_ma", volume_ma, "diff_ratio", diff_ratio, "back_min", back_min)
asset = 1000000
database = "tradingbot"
sql = "truncate backtest_entry"
repository.execute(database=database, sql=sql, log=False)
pd_op.display_max_columns()
pd_op.display_round_down()
sql = """
select
b2.Date as date,
b3.Close as fr_Price,
b2.Close as to_Price,
b1.Volume as v1,
b2.Volume as v2,
b1.Date as bb1,
b2.Date as bb2,
b3.Date as bb3
from
bitflyer_btc_ohlc_1M b1
inner join
bitflyer_btc_ohlc_1M b2
on (b1.Date + interval 1 minute) = b2.Date
inner join
bitflyer_btc_ohlc_1M b3
on (b3.Date + interval {back_min} minute) = b2.Date
order by
Date
""".format(back_min=back_min)
be = repository.read_sql(database=database, sql=sql)
df = be["v1"]
sma = df.rolling(volume_ma).mean()[:volume_ma]
be["v1_ma"] = | pd.concat([sma, df[volume_ma:]]) | pandas.concat |
"""
Test parameters i.e. sample data, known past errors, etc.
"""
from functools import lru_cache
from pathlib import Path
from traceback import print_tb
from typing import List
import geopandas as gpd
import numpy as np
import pandas as pd
import pytest
from click.testing import Result
from hypothesis.strategies import floats, integers, tuples
from shapely.geometry import (
LineString,
MultiLineString,
MultiPolygon,
Point,
Polygon,
box,
)
from shapely.wkt import loads
from fractopo import general
from fractopo.analysis import length_distributions, parameters
from fractopo.general import (
CC_branch,
CI_branch,
E_node,
I_node,
II_branch,
X_node,
Y_node,
bounding_polygon,
determine_azimuth,
read_geofile,
)
from fractopo.tval import trace_validation
from fractopo.tval.trace_validators import (
GeomNullValidator,
GeomTypeValidator,
MultiJunctionValidator,
MultipleCrosscutValidator,
SharpCornerValidator,
StackedTracesValidator,
TargetAreaSnapValidator,
UnderlappingSnapValidator,
VNodeValidator,
)
from tests import trace_builder
from tests.sample_data.py_samples.samples import (
results_in_false_positive_stacked_traces_list,
results_in_false_positive_underlapping_ls,
results_in_multijunction_why_ls_list,
results_in_multijunction_why_ls_list_2,
results_in_overlapping_ls_list,
should_result_in_multij_ls_list,
should_result_in_some_error_ls_list,
should_result_in_target_area_underlapping_ls,
should_result_in_target_area_underlapping_poly,
should_result_in_vnode_ls_list,
v_node_network_error_ls_list,
)
GEOMETRY_COLUMN = trace_validation.Validation.GEOMETRY_COLUMN
ERROR_COLUMN = trace_validation.Validation.ERROR_COLUMN
SNAP_THRESHOLD = 0.001
SNAP_THRESHOLD_ERROR_MULTIPLIER = 1.1
AREA_EDGE_SNAP_MULTIPLIER = 5
def click_error_print(result: Result):
"""
Print click result traceback.
"""
if result.exit_code == 0:
return
assert result.exc_info is not None
_, _, tb = result.exc_info
# print(err_class, err)
print_tb(tb)
print(result.output)
raise Exception(result.exception)
class Helpers:
"""
Parameters for tests.
"""
valid_geom = LineString(((0, 0), (1, 1)))
invalid_geom_empty = LineString()
invalid_geom_none = None
invalid_geom_multilinestring = MultiLineString(
[((0, 0), (1, 1)), ((-1, 0), (1, 0))]
)
mergeable_geom_multilinestring = MultiLineString(
[((0, 0), (1, 1)), ((1, 1), (2, 2))]
)
(
valid_traces,
invalid_traces,
valid_areas_geoseries,
invalid_areas_geoseries,
) = trace_builder.main(False, SNAP_THRESHOLD, SNAP_THRESHOLD_ERROR_MULTIPLIER)
valid_error_srs = | pd.Series([[] for _ in valid_traces.geometry.values]) | pandas.Series |
# FLOWDO
# FlowDo is an application created for the purpose of managing business activities like Inventory Maintenance, Billing, Sales analysis and other business functions.
# Developed by:
# <NAME> (@Moulishankar10)
# <NAME> (@ToastCoder)
# REQUIRED MODULES
import numpy as np
import pandas as pd
from datetime import datetime
import matplotlib.pyplot as plt
# VISUALIZER FUNCTIONS:
# Used to display keymaps for main menu and every submenu
# LIST OF KEYMAPS TO BE DISPLAYED IN MAIN MENU
def mainOptionsVisualizer():
print("""
\n******************************************** MAIN MENU ***********************************************\n
Press 1 to take a New Order
Press 2 to explore the Revenue options
Press 3 to explore the Inventory properties
Press 9 for exit
\n******************************************************************************************************\n
""")
# LIST OF KEYMAPS TO BE DISPLAYED IN ORDER OPTIONS
def orderOptionsVisualizer():
print("""
\n******************************************** ORDER MENU **********************************************\n
Press 1 to add a new product
Press 2 to remove a product
Press 3 to view the bill
Press 4 to modify your order
Press 5 to proceed your order
Press 9 for exit
\n******************************************************************************************************\n
""")
# LIST OF KEYMAPS TO BE DISPLAYED IN REVENUE OPTIONS
def revenueOptionsVisualizer():
print("""
\n******************************************* REVENUE MENU *********************************************\n
Press 1 to view the Revenue Database
Press 2 to view a Month's Total Revenue
Press 3 to view the product which generated the Maximum Profit in any month
Press 4 to view the product which generated the Minimum Profit in any month
Press 5 to view the Revenue Trend Graph for any year
Press 9 for exit
\n******************************************************************************************************\n
""")
# LIST OF KEYMAPS TO BE DISPLAYED IN INVENTORY OPTIONS
def inventoryOptionsVisualizer():
print("""
\n****************************************** INVENTORY MENU *********************************************\n
Press 1 to view the Stock Inventory
Press 2 to add a new product to your inventory
Press 3 to remove a product from your inventory
Press 4 to modify the properties of existing products
Press 9 for exit
\n*******************************************************************************************************\n
""")
# USED TO CHECK IF THE COLUMN FOR THE MONTH IS CREATED OR NOT
def revMonthChecker():
today = datetime.now()
frmt = today.strftime('%m-%Y')
rev_data = pd.read_csv('data/revenue.csv')
header = list(rev_data.columns)
if frmt not in header:
x = [0]*len(rev_data)
rev_data[frmt] = x
rev_data.to_csv("data/revenue.csv", index = False)
# CLASS FOR BILLING OPERATIONS
class Biller:
def __init__(self,l):
self.prod_name=[]
self.quantity=[]
self.price=[]
self.total_price=[]
self.limit=l
self.ordered = False
self.item_selected = False
def isFull(self):
return len(self.prod_name) == self.limit
def isEmpty(self):
return len(self.prod_name) == 0
# FUNCTION TO ADD A NEW PRODUCT TO THE BILL
def enqueue(self,ele,qn):
if self.isFull():
print("\nMaximum limit reached !")
elif ele.upper() in self.prod_name:
print(f"\n!! '{ele.upper()}' is already in the ordered list !!")
print("\n--- Please refer the 'ORDER MENU' to modify the ordered items ---\n")
else:
inv_data = pd.read_csv('data/inventory.csv')
flag = 0
for i in range(len(inv_data)):
flag = 0
if inv_data["Product_Name"][i] == ele.upper():
if qn.isnumeric() == True:
if int(qn) <= inv_data["Available_Stock"][i]:
self.prod_name.append(ele.upper())
self.quantity.append(int(qn))
self.price.append(inv_data["Selling_Price"][i])
self.item_selected = True
print("\n>>>>>>>> Product is Added to the Order <<<<<<<<\n")
break
else:
print("\n!! Sorry for the inconvenience... Your required product is Out of Stock !!")
break
else:
print("\n!! Invalid Amount of Quantity !!")
break
else:
flag = 1
if flag == 1:
print("\n!! Unavailable Product or Invalid Product !!")
# FUNCTION TO REMOVE A PRODUCT FROM THE BILL
def remove(self):
if self.isEmpty():
print("\n!!! You haven't ordered any product(s) yet to remove !!!\n")
else:
ele = input("\nEnter the product name : ").upper()
if ele in self.prod_name:
ind = self.prod_name.index(ele)
del self.prod_name[ind]
del self.quantity[ind]
del self.price[ind]
del self.total_price[ind]
print("\n>>>>>>>> Product is Removed from the Order <<<<<<<<\n")
else:
print("\n!!! The Specified Product is not in the Order !!!\n")
# FUNCTION TO DISPLAY CONTENTS OF THE BILL
def display(self):
if self.isEmpty():
print("\n!!! You haven't ordered any product(s) yet to generate bill !!!\n")
else:
self.total_price = list(np.array(self.quantity)*np.array(self.price))
form = {'Product Name':self.prod_name,'Quantity':self.quantity,'Cost(1)':self.price,'Total Cost':self.total_price}
res = pd.DataFrame(form)
res.index=list(range(1,len(self.prod_name)+1))
print(res)
print("\n=============================================================\n")
print(f"Total Items : {len(self.prod_name)}")
print(f"Total Quantities : {sum(self.quantity)}")
print(f"Grand Total : Rs.{sum(self.total_price)}")
print("\n=============================================================\n")
# FUNCTION TO MODIFY A PRODUCT NAME OR QUANTITY IN THE BILL
def modify(self):
if self.isEmpty():
print("\n!!! You haven't ordered any product(s) yet to modify !!!\n")
else:
ele = input("\nEnter the product name : ").upper()
if ele in self.prod_name:
ind = self.prod_name.index(ele.upper())
key = int(input("\n Press 1 to modify the product name ..... \n\n Press 2 to modify the quantity .....\n\nYour Option : "))
if key == 1:
self.prod_name[ind] = input("\nEnter the new product name : ").upper()
elif key == 2:
self.quantity[ind] = int(input("\nEnter the new amount of quantity : "))
print("\n>>>>>>>> Updated the Order <<<<<<<<\n")
else:
print("\n!!! The Specified Product is not in the Order !!!\n")
# FUNCTION TO PERFORM THE POST PROCESSING ACTIVITIES ONCE THE BILL IS CONFIRMED
def postProcessor(self):
today = datetime.now()
frmt = today.strftime('%m-%Y')
inv_data = pd.read_csv('data/inventory.csv')
rev_data = pd.read_csv("data/revenue.csv")
for i in range(len(inv_data)):
for j in range(len(self.prod_name)):
if inv_data["Product_Name"][i] == self.prod_name[j]:
inv_data["Available_Stock"][i] -= self.quantity[j]
inv_data.to_csv('data/inventory.csv', index=False)
for i in range(len(rev_data)):
for j in range(len(self.prod_name)):
if rev_data["Product_Name"][i] == self.prod_name[j]:
rev_data[str(frmt)][i] += self.total_price[j]
rev_data.to_csv('data/revenue.csv', index=False)
self.ordered = True
print("\n\n\n -------- Updated the Inventory Data ! -------- \n")
#INDIVIDUAL FUNCTIONS USED IN REVENUE SUB MENU
month = ["January","February","March","April","May","June","July","August","September","October","November","December"]
# FUNCTION TO VIEW THE REVENUE DATABASE
def viewRevenue():
rev_data = pd.read_csv('data/revenue.csv')
print("\n------------------------------------------ REVENUE DATABASE --------------------------------------------\n\n",rev_data.to_string(index=False))
# FUNCTION TO DISPLAY THE REVENUE GENERATED BY THIS MONTH
def viewMonthRevenue():
rev_data = pd.read_csv('data/revenue.csv')
frmt = input("\nEnter the time period (MM-YYYY) : ")
if frmt[:2] in ['01','02','03','04','05','06','07','08','09','10','11','12'] and frmt in rev_data.columns:
month_revenue = sum(list(rev_data[frmt]))
print(f"\n The revenue generated in {month[int(frmt[:2])-1]} {int(frmt[-4:])} -- Rs.{month_revenue}")
else:
print("\n!!!! Invalid Time Period or Non-Entried Time Period !!!!\n")
# FUNCTION TO DISPLAY THE MAXIMUM PROFIT GENERATED PRODUCTS
def maxProfit():
rev_data = pd.read_csv('data/revenue.csv')
frmt = input("\nEnter the time period (MM-YYYY) : ")
if frmt[:2] in ['01','02','03','04','05','06','07','08','09','10','11','12'] and frmt in rev_data.columns:
if list(rev_data[frmt]) == [0 for i in range(len(rev_data))]:
today = datetime.now()
if frmt[:2] == today.strftime('%m'):
print(f"\n\n!! No Products are sold in {month[int(frmt[:2])-1]} {int(frmt[-4:])} !!\n")
else:
print(f"\n\n!! No Products were sold in {month[int(frmt[:2])-1]} {int(frmt[-4:])} !!\n")
else:
max_amt = max(list(rev_data[frmt]))
print(f"\n The following product(s) generated the maximum profit on {month[int(frmt[:2])-1]} {int(frmt[-4:])} : \n")
for i in range(len(rev_data)):
if rev_data[frmt][i] == max_amt:
print(" * {} - Rs.{}".format(rev_data["Product_Name"][i],max_amt))
else:
print("\n\n!!!! Invalid Time Period or Non-Entried Time Period !!!!\n")
# FUNCTION TO DISPLAY THE MINIMUM PROFIT GENERATED PRODUCTS
def minProfit():
rev_data = pd.read_csv('data/revenue.csv')
frmt = input("\nEnter the time period (MM-YYYY) : ")
if frmt[:2] in ['01','02','03','04','05','06','07','08','09','10','11','12'] and frmt in rev_data.columns:
if list(rev_data[frmt]) == [0 for i in range(len(rev_data))]:
today = datetime.now()
if frmt[:2] == today.strftime('%m'):
print(f"\n\n!! No Products are sold in {month[int(frmt[:2])-1]} {int(frmt[-4:])} !!\n")
else:
print(f"\n\n!! No Products were sold in {month[int(frmt[:2])-1]} {int(frmt[-4:])} !!\n")
else:
min_amt = min(list(rev_data[frmt]))
print(f"\n The following product(s) generated the least profit on {month[int(frmt[:2])-1]} {int(frmt[-4:])} : \n")
for i in range(len(rev_data)):
if rev_data[frmt][i] == min_amt:
print(" * {} - Rs.{}".format(rev_data["Product_Name"][i],min_amt))
else:
print("\n\n!!!! Invalid Time Period or Non-Entried Time Period !!!!\n")
# FUNCTION TO VISUALIZE THE REVENUE GENERATED BY MONTHS THROUGH A GRAPH
def viewRevenueGraph():
rev_data = pd.read_csv('data/revenue.csv')
profits =[]
months = []
year = input("\nEnter the Year (YYYY) : ")
for i in rev_data.columns:
if year in i:
rev_data = | pd.read_csv("data/revenue.csv") | pandas.read_csv |
# -*- coding: utf-8 -*-
import re
import warnings
from datetime import timedelta
from itertools import product
import pytest
import numpy as np
import pandas as pd
from pandas import (CategoricalIndex, DataFrame, Index, MultiIndex,
compat, date_range, period_range)
from pandas.compat import PY3, long, lrange, lzip, range, u, PYPY
from pandas.errors import PerformanceWarning, UnsortedIndexError
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.core.indexes.base import InvalidIndexError
from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
from pandas._libs.tslib import Timestamp
import pandas.util.testing as tm
from pandas.util.testing import assert_almost_equal, assert_copy
from .common import Base
class TestMultiIndex(Base):
_holder = MultiIndex
_compat_props = ['shape', 'ndim', 'size']
def setup_method(self, method):
major_axis = Index(['foo', 'bar', 'baz', 'qux'])
minor_axis = Index(['one', 'two'])
major_labels = np.array([0, 0, 1, 2, 3, 3])
minor_labels = np.array([0, 1, 0, 1, 0, 1])
self.index_names = ['first', 'second']
self.indices = dict(index=MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels
], names=self.index_names,
verify_integrity=False))
self.setup_indices()
def create_index(self):
return self.index
def test_can_hold_identifiers(self):
idx = self.create_index()
key = idx[0]
assert idx._can_hold_identifiers_and_holds_name(key) is True
def test_boolean_context_compat2(self):
# boolean context compat
# GH7897
i1 = MultiIndex.from_tuples([('A', 1), ('A', 2)])
i2 = MultiIndex.from_tuples([('A', 1), ('A', 3)])
common = i1.intersection(i2)
def f():
if common:
pass
tm.assert_raises_regex(ValueError, 'The truth value of a', f)
def test_labels_dtypes(self):
# GH 8456
i = MultiIndex.from_tuples([('A', 1), ('A', 2)])
assert i.labels[0].dtype == 'int8'
assert i.labels[1].dtype == 'int8'
i = MultiIndex.from_product([['a'], range(40)])
assert i.labels[1].dtype == 'int8'
i = MultiIndex.from_product([['a'], range(400)])
assert i.labels[1].dtype == 'int16'
i = MultiIndex.from_product([['a'], range(40000)])
assert i.labels[1].dtype == 'int32'
i = pd.MultiIndex.from_product([['a'], range(1000)])
assert (i.labels[0] >= 0).all()
assert (i.labels[1] >= 0).all()
def test_where(self):
i = MultiIndex.from_tuples([('A', 1), ('A', 2)])
def f():
i.where(True)
pytest.raises(NotImplementedError, f)
def test_where_array_like(self):
i = MultiIndex.from_tuples([('A', 1), ('A', 2)])
klasses = [list, tuple, np.array, pd.Series]
cond = [False, True]
for klass in klasses:
def f():
return i.where(klass(cond))
pytest.raises(NotImplementedError, f)
def test_repeat(self):
reps = 2
numbers = [1, 2, 3]
names = np.array(['foo', 'bar'])
m = MultiIndex.from_product([
numbers, names], names=names)
expected = MultiIndex.from_product([
numbers, names.repeat(reps)], names=names)
tm.assert_index_equal(m.repeat(reps), expected)
with tm.assert_produces_warning(FutureWarning):
result = m.repeat(n=reps)
tm.assert_index_equal(result, expected)
def test_numpy_repeat(self):
reps = 2
numbers = [1, 2, 3]
names = np.array(['foo', 'bar'])
m = MultiIndex.from_product([
numbers, names], names=names)
expected = MultiIndex.from_product([
numbers, names.repeat(reps)], names=names)
tm.assert_index_equal(np.repeat(m, reps), expected)
msg = "the 'axis' parameter is not supported"
tm.assert_raises_regex(
ValueError, msg, np.repeat, m, reps, axis=1)
def test_set_name_methods(self):
# so long as these are synonyms, we don't need to test set_names
assert self.index.rename == self.index.set_names
new_names = [name + "SUFFIX" for name in self.index_names]
ind = self.index.set_names(new_names)
assert self.index.names == self.index_names
assert ind.names == new_names
with tm.assert_raises_regex(ValueError, "^Length"):
ind.set_names(new_names + new_names)
new_names2 = [name + "SUFFIX2" for name in new_names]
res = ind.set_names(new_names2, inplace=True)
assert res is None
assert ind.names == new_names2
# set names for specific level (# GH7792)
ind = self.index.set_names(new_names[0], level=0)
assert self.index.names == self.index_names
assert ind.names == [new_names[0], self.index_names[1]]
res = ind.set_names(new_names2[0], level=0, inplace=True)
assert res is None
assert ind.names == [new_names2[0], self.index_names[1]]
# set names for multiple levels
ind = self.index.set_names(new_names, level=[0, 1])
assert self.index.names == self.index_names
assert ind.names == new_names
res = ind.set_names(new_names2, level=[0, 1], inplace=True)
assert res is None
assert ind.names == new_names2
@pytest.mark.parametrize('inplace', [True, False])
def test_set_names_with_nlevel_1(self, inplace):
# GH 21149
# Ensure that .set_names for MultiIndex with
# nlevels == 1 does not raise any errors
expected = pd.MultiIndex(levels=[[0, 1]],
labels=[[0, 1]],
names=['first'])
m = pd.MultiIndex.from_product([[0, 1]])
result = m.set_names('first', level=0, inplace=inplace)
if inplace:
result = m
tm.assert_index_equal(result, expected)
def test_set_levels_labels_directly(self):
# setting levels/labels directly raises AttributeError
levels = self.index.levels
new_levels = [[lev + 'a' for lev in level] for level in levels]
labels = self.index.labels
major_labels, minor_labels = labels
major_labels = [(x + 1) % 3 for x in major_labels]
minor_labels = [(x + 1) % 1 for x in minor_labels]
new_labels = [major_labels, minor_labels]
with pytest.raises(AttributeError):
self.index.levels = new_levels
with pytest.raises(AttributeError):
self.index.labels = new_labels
def test_set_levels(self):
# side note - you probably wouldn't want to use levels and labels
# directly like this - but it is possible.
levels = self.index.levels
new_levels = [[lev + 'a' for lev in level] for level in levels]
def assert_matching(actual, expected, check_dtype=False):
# avoid specifying internal representation
# as much as possible
assert len(actual) == len(expected)
for act, exp in zip(actual, expected):
act = np.asarray(act)
exp = np.asarray(exp)
tm.assert_numpy_array_equal(act, exp, check_dtype=check_dtype)
# level changing [w/o mutation]
ind2 = self.index.set_levels(new_levels)
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
# level changing [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels, inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, new_levels)
# level changing specific level [w/o mutation]
ind2 = self.index.set_levels(new_levels[0], level=0)
assert_matching(ind2.levels, [new_levels[0], levels[1]])
assert_matching(self.index.levels, levels)
ind2 = self.index.set_levels(new_levels[1], level=1)
assert_matching(ind2.levels, [levels[0], new_levels[1]])
assert_matching(self.index.levels, levels)
# level changing multiple levels [w/o mutation]
ind2 = self.index.set_levels(new_levels, level=[0, 1])
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
# level changing specific level [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels[0], level=0, inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, [new_levels[0], levels[1]])
assert_matching(self.index.levels, levels)
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels[1], level=1, inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, [levels[0], new_levels[1]])
assert_matching(self.index.levels, levels)
# level changing multiple levels [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels, level=[0, 1],
inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
# illegal level changing should not change levels
# GH 13754
original_index = self.index.copy()
for inplace in [True, False]:
with tm.assert_raises_regex(ValueError, "^On"):
self.index.set_levels(['c'], level=0, inplace=inplace)
assert_matching(self.index.levels, original_index.levels,
check_dtype=True)
with tm.assert_raises_regex(ValueError, "^On"):
self.index.set_labels([0, 1, 2, 3, 4, 5], level=0,
inplace=inplace)
assert_matching(self.index.labels, original_index.labels,
check_dtype=True)
with tm.assert_raises_regex(TypeError, "^Levels"):
self.index.set_levels('c', level=0, inplace=inplace)
assert_matching(self.index.levels, original_index.levels,
check_dtype=True)
with tm.assert_raises_regex(TypeError, "^Labels"):
self.index.set_labels(1, level=0, inplace=inplace)
assert_matching(self.index.labels, original_index.labels,
check_dtype=True)
def test_set_labels(self):
# side note - you probably wouldn't want to use levels and labels
# directly like this - but it is possible.
labels = self.index.labels
major_labels, minor_labels = labels
major_labels = [(x + 1) % 3 for x in major_labels]
minor_labels = [(x + 1) % 1 for x in minor_labels]
new_labels = [major_labels, minor_labels]
def assert_matching(actual, expected):
# avoid specifying internal representation
# as much as possible
assert len(actual) == len(expected)
for act, exp in zip(actual, expected):
act = np.asarray(act)
exp = np.asarray(exp, dtype=np.int8)
tm.assert_numpy_array_equal(act, exp)
# label changing [w/o mutation]
ind2 = self.index.set_labels(new_labels)
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
# label changing [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels, inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, new_labels)
# label changing specific level [w/o mutation]
ind2 = self.index.set_labels(new_labels[0], level=0)
assert_matching(ind2.labels, [new_labels[0], labels[1]])
assert_matching(self.index.labels, labels)
ind2 = self.index.set_labels(new_labels[1], level=1)
assert_matching(ind2.labels, [labels[0], new_labels[1]])
assert_matching(self.index.labels, labels)
# label changing multiple levels [w/o mutation]
ind2 = self.index.set_labels(new_labels, level=[0, 1])
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
# label changing specific level [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels[0], level=0, inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, [new_labels[0], labels[1]])
assert_matching(self.index.labels, labels)
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels[1], level=1, inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, [labels[0], new_labels[1]])
assert_matching(self.index.labels, labels)
# label changing multiple levels [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels, level=[0, 1],
inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
# label changing for levels of different magnitude of categories
ind = pd.MultiIndex.from_tuples([(0, i) for i in range(130)])
new_labels = range(129, -1, -1)
expected = pd.MultiIndex.from_tuples(
[(0, i) for i in new_labels])
# [w/o mutation]
result = ind.set_labels(labels=new_labels, level=1)
assert result.equals(expected)
# [w/ mutation]
result = ind.copy()
result.set_labels(labels=new_labels, level=1, inplace=True)
assert result.equals(expected)
def test_set_levels_labels_names_bad_input(self):
levels, labels = self.index.levels, self.index.labels
names = self.index.names
with tm.assert_raises_regex(ValueError, 'Length of levels'):
self.index.set_levels([levels[0]])
with tm.assert_raises_regex(ValueError, 'Length of labels'):
self.index.set_labels([labels[0]])
with tm.assert_raises_regex(ValueError, 'Length of names'):
self.index.set_names([names[0]])
# shouldn't scalar data error, instead should demand list-like
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_levels(levels[0])
# shouldn't scalar data error, instead should demand list-like
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_labels(labels[0])
# shouldn't scalar data error, instead should demand list-like
with tm.assert_raises_regex(TypeError, 'list-like'):
self.index.set_names(names[0])
# should have equal lengths
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_levels(levels[0], level=[0, 1])
with tm.assert_raises_regex(TypeError, 'list-like'):
self.index.set_levels(levels, level=0)
# should have equal lengths
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_labels(labels[0], level=[0, 1])
with tm.assert_raises_regex(TypeError, 'list-like'):
self.index.set_labels(labels, level=0)
# should have equal lengths
with tm.assert_raises_regex(ValueError, 'Length of names'):
self.index.set_names(names[0], level=[0, 1])
with tm.assert_raises_regex(TypeError, 'string'):
self.index.set_names(names, level=0)
def test_set_levels_categorical(self):
# GH13854
index = MultiIndex.from_arrays([list("xyzx"), [0, 1, 2, 3]])
for ordered in [False, True]:
cidx = CategoricalIndex(list("bac"), ordered=ordered)
result = index.set_levels(cidx, 0)
expected = MultiIndex(levels=[cidx, [0, 1, 2, 3]],
labels=index.labels)
tm.assert_index_equal(result, expected)
result_lvl = result.get_level_values(0)
expected_lvl = CategoricalIndex(list("bacb"),
categories=cidx.categories,
ordered=cidx.ordered)
tm.assert_index_equal(result_lvl, expected_lvl)
def test_metadata_immutable(self):
levels, labels = self.index.levels, self.index.labels
# shouldn't be able to set at either the top level or base level
mutable_regex = re.compile('does not support mutable operations')
with tm.assert_raises_regex(TypeError, mutable_regex):
levels[0] = levels[0]
with tm.assert_raises_regex(TypeError, mutable_regex):
levels[0][0] = levels[0][0]
# ditto for labels
with tm.assert_raises_regex(TypeError, mutable_regex):
labels[0] = labels[0]
with tm.assert_raises_regex(TypeError, mutable_regex):
labels[0][0] = labels[0][0]
# and for names
names = self.index.names
with tm.assert_raises_regex(TypeError, mutable_regex):
names[0] = names[0]
def test_inplace_mutation_resets_values(self):
levels = [['a', 'b', 'c'], [4]]
levels2 = [[1, 2, 3], ['a']]
labels = [[0, 1, 0, 2, 2, 0], [0, 0, 0, 0, 0, 0]]
mi1 = MultiIndex(levels=levels, labels=labels)
mi2 = MultiIndex(levels=levels2, labels=labels)
vals = mi1.values.copy()
vals2 = mi2.values.copy()
assert mi1._tuples is not None
# Make sure level setting works
new_vals = mi1.set_levels(levels2).values
tm.assert_almost_equal(vals2, new_vals)
# Non-inplace doesn't kill _tuples [implementation detail]
tm.assert_almost_equal(mi1._tuples, vals)
# ...and values is still same too
tm.assert_almost_equal(mi1.values, vals)
# Inplace should kill _tuples
mi1.set_levels(levels2, inplace=True)
tm.assert_almost_equal(mi1.values, vals2)
# Make sure label setting works too
labels2 = [[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0]]
exp_values = np.empty((6,), dtype=object)
exp_values[:] = [(long(1), 'a')] * 6
# Must be 1d array of tuples
assert exp_values.shape == (6,)
new_values = mi2.set_labels(labels2).values
# Not inplace shouldn't change
tm.assert_almost_equal(mi2._tuples, vals2)
# Should have correct values
tm.assert_almost_equal(exp_values, new_values)
# ...and again setting inplace should kill _tuples, etc
mi2.set_labels(labels2, inplace=True)
tm.assert_almost_equal(mi2.values, new_values)
def test_copy_in_constructor(self):
levels = np.array(["a", "b", "c"])
labels = np.array([1, 1, 2, 0, 0, 1, 1])
val = labels[0]
mi = MultiIndex(levels=[levels, levels], labels=[labels, labels],
copy=True)
assert mi.labels[0][0] == val
labels[0] = 15
assert mi.labels[0][0] == val
val = levels[0]
levels[0] = "PANDA"
assert mi.levels[0][0] == val
def test_set_value_keeps_names(self):
# motivating example from #3742
lev1 = ['hans', 'hans', 'hans', 'grethe', 'grethe', 'grethe']
lev2 = ['1', '2', '3'] * 2
idx = pd.MultiIndex.from_arrays([lev1, lev2], names=['Name', 'Number'])
df = pd.DataFrame(
np.random.randn(6, 4),
columns=['one', 'two', 'three', 'four'],
index=idx)
df = df.sort_index()
assert df._is_copy is None
assert df.index.names == ('Name', 'Number')
df.at[('grethe', '4'), 'one'] = 99.34
assert df._is_copy is None
assert df.index.names == ('Name', 'Number')
def test_copy_names(self):
# Check that adding a "names" parameter to the copy is honored
# GH14302
multi_idx = pd.Index([(1, 2), (3, 4)], names=['MyName1', 'MyName2'])
multi_idx1 = multi_idx.copy()
assert multi_idx.equals(multi_idx1)
assert multi_idx.names == ['MyName1', 'MyName2']
assert multi_idx1.names == ['MyName1', 'MyName2']
multi_idx2 = multi_idx.copy(names=['NewName1', 'NewName2'])
assert multi_idx.equals(multi_idx2)
assert multi_idx.names == ['MyName1', 'MyName2']
assert multi_idx2.names == ['NewName1', 'NewName2']
multi_idx3 = multi_idx.copy(name=['NewName1', 'NewName2'])
assert multi_idx.equals(multi_idx3)
assert multi_idx.names == ['MyName1', 'MyName2']
assert multi_idx3.names == ['NewName1', 'NewName2']
def test_names(self):
# names are assigned in setup
names = self.index_names
level_names = [level.name for level in self.index.levels]
assert names == level_names
# setting bad names on existing
index = self.index
tm.assert_raises_regex(ValueError, "^Length of names",
setattr, index, "names",
list(index.names) + ["third"])
tm.assert_raises_regex(ValueError, "^Length of names",
setattr, index, "names", [])
# initializing with bad names (should always be equivalent)
major_axis, minor_axis = self.index.levels
major_labels, minor_labels = self.index.labels
tm.assert_raises_regex(ValueError, "^Length of names", MultiIndex,
levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels],
names=['first'])
tm.assert_raises_regex(ValueError, "^Length of names", MultiIndex,
levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels],
names=['first', 'second', 'third'])
# names are assigned
index.names = ["a", "b"]
ind_names = list(index.names)
level_names = [level.name for level in index.levels]
assert ind_names == level_names
def test_astype(self):
expected = self.index.copy()
actual = self.index.astype('O')
assert_copy(actual.levels, expected.levels)
assert_copy(actual.labels, expected.labels)
self.check_level_names(actual, expected.names)
with tm.assert_raises_regex(TypeError, "^Setting.*dtype.*object"):
self.index.astype(np.dtype(int))
@pytest.mark.parametrize('ordered', [True, False])
def test_astype_category(self, ordered):
# GH 18630
msg = '> 1 ndim Categorical are not supported at this time'
with tm.assert_raises_regex(NotImplementedError, msg):
self.index.astype(CategoricalDtype(ordered=ordered))
if ordered is False:
# dtype='category' defaults to ordered=False, so only test once
with tm.assert_raises_regex(NotImplementedError, msg):
self.index.astype('category')
def test_constructor_single_level(self):
result = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux']],
labels=[[0, 1, 2, 3]], names=['first'])
assert isinstance(result, MultiIndex)
expected = Index(['foo', 'bar', 'baz', 'qux'], name='first')
tm.assert_index_equal(result.levels[0], expected)
assert result.names == ['first']
def test_constructor_no_levels(self):
tm.assert_raises_regex(ValueError, "non-zero number "
"of levels/labels",
MultiIndex, levels=[], labels=[])
both_re = re.compile('Must pass both levels and labels')
with tm.assert_raises_regex(TypeError, both_re):
MultiIndex(levels=[])
with tm.assert_raises_regex(TypeError, both_re):
MultiIndex(labels=[])
def test_constructor_mismatched_label_levels(self):
labels = [np.array([1]), np.array([2]), np.array([3])]
levels = ["a"]
tm.assert_raises_regex(ValueError, "Length of levels and labels "
"must be the same", MultiIndex,
levels=levels, labels=labels)
length_error = re.compile('>= length of level')
label_error = re.compile(r'Unequal label lengths: \[4, 2\]')
# important to check that it's looking at the right thing.
with tm.assert_raises_regex(ValueError, length_error):
MultiIndex(levels=[['a'], ['b']],
labels=[[0, 1, 2, 3], [0, 3, 4, 1]])
with tm.assert_raises_regex(ValueError, label_error):
MultiIndex(levels=[['a'], ['b']], labels=[[0, 0, 0, 0], [0, 0]])
# external API
with tm.assert_raises_regex(ValueError, length_error):
self.index.copy().set_levels([['a'], ['b']])
with tm.assert_raises_regex(ValueError, label_error):
self.index.copy().set_labels([[0, 0, 0, 0], [0, 0]])
def test_constructor_nonhashable_names(self):
# GH 20527
levels = [[1, 2], [u'one', u'two']]
labels = [[0, 0, 1, 1], [0, 1, 0, 1]]
names = ((['foo'], ['bar']))
message = "MultiIndex.name must be a hashable type"
tm.assert_raises_regex(TypeError, message,
MultiIndex, levels=levels,
labels=labels, names=names)
# With .rename()
mi = MultiIndex(levels=[[1, 2], [u'one', u'two']],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=('foo', 'bar'))
renamed = [['foor'], ['barr']]
tm.assert_raises_regex(TypeError, message, mi.rename, names=renamed)
# With .set_names()
tm.assert_raises_regex(TypeError, message, mi.set_names, names=renamed)
@pytest.mark.parametrize('names', [['a', 'b', 'a'], ['1', '1', '2'],
['1', 'a', '1']])
def test_duplicate_level_names(self, names):
# GH18872
pytest.raises(ValueError, pd.MultiIndex.from_product,
[[0, 1]] * 3, names=names)
# With .rename()
mi = pd.MultiIndex.from_product([[0, 1]] * 3)
tm.assert_raises_regex(ValueError, "Duplicated level name:",
mi.rename, names)
# With .rename(., level=)
mi.rename(names[0], level=1, inplace=True)
tm.assert_raises_regex(ValueError, "Duplicated level name:",
mi.rename, names[:2], level=[0, 2])
def assert_multiindex_copied(self, copy, original):
# Levels should be (at least, shallow copied)
tm.assert_copy(copy.levels, original.levels)
tm.assert_almost_equal(copy.labels, original.labels)
# Labels doesn't matter which way copied
tm.assert_almost_equal(copy.labels, original.labels)
assert copy.labels is not original.labels
# Names doesn't matter which way copied
assert copy.names == original.names
assert copy.names is not original.names
# Sort order should be copied
assert copy.sortorder == original.sortorder
def test_copy(self):
i_copy = self.index.copy()
self.assert_multiindex_copied(i_copy, self.index)
def test_shallow_copy(self):
i_copy = self.index._shallow_copy()
self.assert_multiindex_copied(i_copy, self.index)
def test_view(self):
i_view = self.index.view()
self.assert_multiindex_copied(i_view, self.index)
def check_level_names(self, index, names):
assert [level.name for level in index.levels] == list(names)
def test_changing_names(self):
# names should be applied to levels
level_names = [level.name for level in self.index.levels]
self.check_level_names(self.index, self.index.names)
view = self.index.view()
copy = self.index.copy()
shallow_copy = self.index._shallow_copy()
# changing names should change level names on object
new_names = [name + "a" for name in self.index.names]
self.index.names = new_names
self.check_level_names(self.index, new_names)
# but not on copies
self.check_level_names(view, level_names)
self.check_level_names(copy, level_names)
self.check_level_names(shallow_copy, level_names)
# and copies shouldn't change original
shallow_copy.names = [name + "c" for name in shallow_copy.names]
self.check_level_names(self.index, new_names)
def test_get_level_number_integer(self):
self.index.names = [1, 0]
assert self.index._get_level_number(1) == 0
assert self.index._get_level_number(0) == 1
pytest.raises(IndexError, self.index._get_level_number, 2)
tm.assert_raises_regex(KeyError, 'Level fourth not found',
self.index._get_level_number, 'fourth')
def test_from_arrays(self):
arrays = []
for lev, lab in zip(self.index.levels, self.index.labels):
arrays.append(np.asarray(lev).take(lab))
# list of arrays as input
result = MultiIndex.from_arrays(arrays, names=self.index.names)
tm.assert_index_equal(result, self.index)
# infer correctly
result = MultiIndex.from_arrays([[pd.NaT, Timestamp('20130101')],
['a', 'b']])
assert result.levels[0].equals(Index([Timestamp('20130101')]))
assert result.levels[1].equals(Index(['a', 'b']))
def test_from_arrays_iterator(self):
# GH 18434
arrays = []
for lev, lab in zip(self.index.levels, self.index.labels):
arrays.append(np.asarray(lev).take(lab))
# iterator as input
result = MultiIndex.from_arrays(iter(arrays), names=self.index.names)
tm.assert_index_equal(result, self.index)
# invalid iterator input
with tm.assert_raises_regex(
TypeError, "Input must be a list / sequence of array-likes."):
MultiIndex.from_arrays(0)
def test_from_arrays_index_series_datetimetz(self):
idx1 = pd.date_range('2015-01-01 10:00', freq='D', periods=3,
tz='US/Eastern')
idx2 = pd.date_range('2015-01-01 10:00', freq='H', periods=3,
tz='Asia/Tokyo')
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_series_timedelta(self):
idx1 = pd.timedelta_range('1 days', freq='D', periods=3)
idx2 = pd.timedelta_range('2 hours', freq='H', periods=3)
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_series_period(self):
idx1 = pd.period_range('2011-01-01', freq='D', periods=3)
idx2 = pd.period_range('2015-01-01', freq='H', periods=3)
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_datetimelike_mixed(self):
idx1 = pd.date_range('2015-01-01 10:00', freq='D', periods=3,
tz='US/Eastern')
idx2 = pd.date_range('2015-01-01 10:00', freq='H', periods=3)
idx3 = pd.timedelta_range('1 days', freq='D', periods=3)
idx4 = pd.period_range('2011-01-01', freq='D', periods=3)
result = pd.MultiIndex.from_arrays([idx1, idx2, idx3, idx4])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
tm.assert_index_equal(result.get_level_values(2), idx3)
tm.assert_index_equal(result.get_level_values(3), idx4)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1),
pd.Series(idx2),
pd.Series(idx3),
pd.Series(idx4)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result2.get_level_values(2), idx3)
tm.assert_index_equal(result2.get_level_values(3), idx4)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_series_categorical(self):
# GH13743
idx1 = pd.CategoricalIndex(list("abcaab"), categories=list("bac"),
ordered=False)
idx2 = pd.CategoricalIndex(list("abcaab"), categories=list("bac"),
ordered=True)
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
result3 = pd.MultiIndex.from_arrays([idx1.values, idx2.values])
tm.assert_index_equal(result3.get_level_values(0), idx1)
tm.assert_index_equal(result3.get_level_values(1), idx2)
def test_from_arrays_empty(self):
# 0 levels
with tm.assert_raises_regex(
ValueError, "Must pass non-zero number of levels/labels"):
MultiIndex.from_arrays(arrays=[])
# 1 level
result = MultiIndex.from_arrays(arrays=[[]], names=['A'])
assert isinstance(result, MultiIndex)
expected = Index([], name='A')
tm.assert_index_equal(result.levels[0], expected)
# N levels
for N in [2, 3]:
arrays = [[]] * N
names = list('ABC')[:N]
result = MultiIndex.from_arrays(arrays=arrays, names=names)
expected = MultiIndex(levels=[[]] * N, labels=[[]] * N,
names=names)
tm.assert_index_equal(result, expected)
def test_from_arrays_invalid_input(self):
invalid_inputs = [1, [1], [1, 2], [[1], 2],
'a', ['a'], ['a', 'b'], [['a'], 'b']]
for i in invalid_inputs:
pytest.raises(TypeError, MultiIndex.from_arrays, arrays=i)
def test_from_arrays_different_lengths(self):
# see gh-13599
idx1 = [1, 2, 3]
idx2 = ['a', 'b']
tm.assert_raises_regex(ValueError, '^all arrays must '
'be same length$',
MultiIndex.from_arrays, [idx1, idx2])
idx1 = []
idx2 = ['a', 'b']
tm.assert_raises_regex(ValueError, '^all arrays must '
'be same length$',
MultiIndex.from_arrays, [idx1, idx2])
idx1 = [1, 2, 3]
idx2 = []
tm.assert_raises_regex(ValueError, '^all arrays must '
'be same length$',
MultiIndex.from_arrays, [idx1, idx2])
def test_from_product(self):
first = ['foo', 'bar', 'buz']
second = ['a', 'b', 'c']
names = ['first', 'second']
result = MultiIndex.from_product([first, second], names=names)
tuples = [('foo', 'a'), ('foo', 'b'), ('foo', 'c'), ('bar', 'a'),
('bar', 'b'), ('bar', 'c'), ('buz', 'a'), ('buz', 'b'),
('buz', 'c')]
expected = MultiIndex.from_tuples(tuples, names=names)
tm.assert_index_equal(result, expected)
def test_from_product_iterator(self):
# GH 18434
first = ['foo', 'bar', 'buz']
second = ['a', 'b', 'c']
names = ['first', 'second']
tuples = [('foo', 'a'), ('foo', 'b'), ('foo', 'c'), ('bar', 'a'),
('bar', 'b'), ('bar', 'c'), ('buz', 'a'), ('buz', 'b'),
('buz', 'c')]
expected = MultiIndex.from_tuples(tuples, names=names)
# iterator as input
result = MultiIndex.from_product(iter([first, second]), names=names)
tm.assert_index_equal(result, expected)
# Invalid non-iterable input
with tm.assert_raises_regex(
TypeError, "Input must be a list / sequence of iterables."):
MultiIndex.from_product(0)
def test_from_product_empty(self):
# 0 levels
with tm.assert_raises_regex(
ValueError, "Must pass non-zero number of levels/labels"):
MultiIndex.from_product([])
# 1 level
result = MultiIndex.from_product([[]], names=['A'])
expected = pd.Index([], name='A')
tm.assert_index_equal(result.levels[0], expected)
# 2 levels
l1 = [[], ['foo', 'bar', 'baz'], []]
l2 = [[], [], ['a', 'b', 'c']]
names = ['A', 'B']
for first, second in zip(l1, l2):
result = MultiIndex.from_product([first, second], names=names)
expected = MultiIndex(levels=[first, second],
labels=[[], []], names=names)
tm.assert_index_equal(result, expected)
# GH12258
names = ['A', 'B', 'C']
for N in range(4):
lvl2 = lrange(N)
result = MultiIndex.from_product([[], lvl2, []], names=names)
expected = MultiIndex(levels=[[], lvl2, []],
labels=[[], [], []], names=names)
tm.assert_index_equal(result, expected)
def test_from_product_invalid_input(self):
invalid_inputs = [1, [1], [1, 2], [[1], 2],
'a', ['a'], ['a', 'b'], [['a'], 'b']]
for i in invalid_inputs:
pytest.raises(TypeError, MultiIndex.from_product, iterables=i)
def test_from_product_datetimeindex(self):
dt_index = date_range('2000-01-01', periods=2)
mi = pd.MultiIndex.from_product([[1, 2], dt_index])
etalon = construct_1d_object_array_from_listlike([(1, pd.Timestamp(
'2000-01-01')), (1, pd.Timestamp('2000-01-02')), (2, pd.Timestamp(
'2000-01-01')), (2, pd.Timestamp('2000-01-02'))])
tm.assert_numpy_array_equal(mi.values, etalon)
def test_from_product_index_series_categorical(self):
# GH13743
first = ['foo', 'bar']
for ordered in [False, True]:
idx = pd.CategoricalIndex(list("abcaab"), categories=list("bac"),
ordered=ordered)
expected = pd.CategoricalIndex(list("abcaab") + list("abcaab"),
categories=list("bac"),
ordered=ordered)
for arr in [idx, pd.Series(idx), idx.values]:
result = pd.MultiIndex.from_product([first, arr])
tm.assert_index_equal(result.get_level_values(1), expected)
def test_values_boxed(self):
tuples = [(1, pd.Timestamp('2000-01-01')), (2, pd.NaT),
(3, pd.Timestamp('2000-01-03')),
(1, pd.Timestamp('2000-01-04')),
(2, pd.Timestamp('2000-01-02')),
(3, pd.Timestamp('2000-01-03'))]
result = pd.MultiIndex.from_tuples(tuples)
expected = construct_1d_object_array_from_listlike(tuples)
tm.assert_numpy_array_equal(result.values, expected)
# Check that code branches for boxed values produce identical results
tm.assert_numpy_array_equal(result.values[:4], result[:4].values)
def test_values_multiindex_datetimeindex(self):
# Test to ensure we hit the boxing / nobox part of MI.values
ints = np.arange(10 ** 18, 10 ** 18 + 5)
naive = pd.DatetimeIndex(ints)
aware = pd.DatetimeIndex(ints, tz='US/Central')
idx = pd.MultiIndex.from_arrays([naive, aware])
result = idx.values
outer = pd.DatetimeIndex([x[0] for x in result])
tm.assert_index_equal(outer, naive)
inner = pd.DatetimeIndex([x[1] for x in result])
tm.assert_index_equal(inner, aware)
# n_lev > n_lab
result = idx[:2].values
outer = pd.DatetimeIndex([x[0] for x in result])
tm.assert_index_equal(outer, naive[:2])
inner = pd.DatetimeIndex([x[1] for x in result])
tm.assert_index_equal(inner, aware[:2])
def test_values_multiindex_periodindex(self):
# Test to ensure we hit the boxing / nobox part of MI.values
ints = np.arange(2007, 2012)
pidx = pd.PeriodIndex(ints, freq='D')
idx = pd.MultiIndex.from_arrays([ints, pidx])
result = idx.values
outer = pd.Int64Index([x[0] for x in result])
tm.assert_index_equal(outer, pd.Int64Index(ints))
inner = pd.PeriodIndex([x[1] for x in result])
tm.assert_index_equal(inner, pidx)
# n_lev > n_lab
result = idx[:2].values
outer = pd.Int64Index([x[0] for x in result])
tm.assert_index_equal(outer, pd.Int64Index(ints[:2]))
inner = pd.PeriodIndex([x[1] for x in result])
tm.assert_index_equal(inner, pidx[:2])
def test_append(self):
result = self.index[:3].append(self.index[3:])
assert result.equals(self.index)
foos = [self.index[:1], self.index[1:3], self.index[3:]]
result = foos[0].append(foos[1:])
assert result.equals(self.index)
# empty
result = self.index.append([])
assert result.equals(self.index)
def test_append_mixed_dtypes(self):
# GH 13660
dti = date_range('2011-01-01', freq='M', periods=3, )
dti_tz = date_range('2011-01-01', freq='M', periods=3, tz='US/Eastern')
pi = period_range('2011-01', freq='M', periods=3)
mi = MultiIndex.from_arrays([[1, 2, 3],
[1.1, np.nan, 3.3],
['a', 'b', 'c'],
dti, dti_tz, pi])
assert mi.nlevels == 6
res = mi.append(mi)
exp = MultiIndex.from_arrays([[1, 2, 3, 1, 2, 3],
[1.1, np.nan, 3.3, 1.1, np.nan, 3.3],
['a', 'b', 'c', 'a', 'b', 'c'],
dti.append(dti),
dti_tz.append(dti_tz),
pi.append(pi)])
tm.assert_index_equal(res, exp)
other = MultiIndex.from_arrays([['x', 'y', 'z'], ['x', 'y', 'z'],
['x', 'y', 'z'], ['x', 'y', 'z'],
['x', 'y', 'z'], ['x', 'y', 'z']])
res = mi.append(other)
exp = MultiIndex.from_arrays([[1, 2, 3, 'x', 'y', 'z'],
[1.1, np.nan, 3.3, 'x', 'y', 'z'],
['a', 'b', 'c', 'x', 'y', 'z'],
dti.append(pd.Index(['x', 'y', 'z'])),
dti_tz.append(pd.Index(['x', 'y', 'z'])),
pi.append(pd.Index(['x', 'y', 'z']))])
tm.assert_index_equal(res, exp)
def test_get_level_values(self):
result = self.index.get_level_values(0)
expected = Index(['foo', 'foo', 'bar', 'baz', 'qux', 'qux'],
name='first')
tm.assert_index_equal(result, expected)
assert result.name == 'first'
result = self.index.get_level_values('first')
expected = self.index.get_level_values(0)
tm.assert_index_equal(result, expected)
# GH 10460
index = MultiIndex(
levels=[CategoricalIndex(['A', 'B']),
CategoricalIndex([1, 2, 3])],
labels=[np.array([0, 0, 0, 1, 1, 1]),
np.array([0, 1, 2, 0, 1, 2])])
exp = CategoricalIndex(['A', 'A', 'A', 'B', 'B', 'B'])
tm.assert_index_equal(index.get_level_values(0), exp)
exp = CategoricalIndex([1, 2, 3, 1, 2, 3])
tm.assert_index_equal(index.get_level_values(1), exp)
def test_get_level_values_int_with_na(self):
# GH 17924
arrays = [['a', 'b', 'b'], [1, np.nan, 2]]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(1)
expected = Index([1, np.nan, 2])
tm.assert_index_equal(result, expected)
arrays = [['a', 'b', 'b'], [np.nan, np.nan, 2]]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(1)
expected = Index([np.nan, np.nan, 2])
tm.assert_index_equal(result, expected)
def test_get_level_values_na(self):
arrays = [[np.nan, np.nan, np.nan], ['a', np.nan, 1]]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(0)
expected = pd.Index([np.nan, np.nan, np.nan])
tm.assert_index_equal(result, expected)
result = index.get_level_values(1)
expected = pd.Index(['a', np.nan, 1])
tm.assert_index_equal(result, expected)
arrays = [['a', 'b', 'b'], pd.DatetimeIndex([0, 1, pd.NaT])]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(1)
expected = pd.DatetimeIndex([0, 1, pd.NaT])
tm.assert_index_equal(result, expected)
arrays = [[], []]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(0)
expected = pd.Index([], dtype=object)
tm.assert_index_equal(result, expected)
def test_get_level_values_all_na(self):
# GH 17924 when level entirely consists of nan
arrays = [[np.nan, np.nan, np.nan], ['a', np.nan, 1]]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(0)
expected = pd.Index([np.nan, np.nan, np.nan], dtype=np.float64)
tm.assert_index_equal(result, expected)
result = index.get_level_values(1)
expected = pd.Index(['a', np.nan, 1], dtype=object)
tm.assert_index_equal(result, expected)
def test_reorder_levels(self):
# this blows up
tm.assert_raises_regex(IndexError, '^Too many levels',
self.index.reorder_levels, [2, 1, 0])
def test_nlevels(self):
assert self.index.nlevels == 2
def test_iter(self):
result = list(self.index)
expected = [('foo', 'one'), ('foo', 'two'), ('bar', 'one'),
('baz', 'two'), ('qux', 'one'), ('qux', 'two')]
assert result == expected
def test_legacy_pickle(self):
if PY3:
pytest.skip("testing for legacy pickles not "
"support on py3")
path = tm.get_data_path('multiindex_v1.pickle')
obj = pd.read_pickle(path)
obj2 = MultiIndex.from_tuples(obj.values)
assert obj.equals(obj2)
res = obj.get_indexer(obj)
exp = np.arange(len(obj), dtype=np.intp)
assert_almost_equal(res, exp)
res = obj.get_indexer(obj2[::-1])
exp = obj.get_indexer(obj[::-1])
exp2 = obj2.get_indexer(obj2[::-1])
assert_almost_equal(res, exp)
assert_almost_equal(exp, exp2)
def test_legacy_v2_unpickle(self):
# 0.7.3 -> 0.8.0 format manage
path = tm.get_data_path('mindex_073.pickle')
obj = pd.read_pickle(path)
obj2 = MultiIndex.from_tuples(obj.values)
assert obj.equals(obj2)
res = obj.get_indexer(obj)
exp = np.arange(len(obj), dtype=np.intp)
assert_almost_equal(res, exp)
res = obj.get_indexer(obj2[::-1])
exp = obj.get_indexer(obj[::-1])
exp2 = obj2.get_indexer(obj2[::-1])
assert_almost_equal(res, exp)
assert_almost_equal(exp, exp2)
def test_roundtrip_pickle_with_tz(self):
# GH 8367
# round-trip of timezone
index = MultiIndex.from_product(
[[1, 2], ['a', 'b'], date_range('20130101', periods=3,
tz='US/Eastern')
], names=['one', 'two', 'three'])
unpickled = tm.round_trip_pickle(index)
assert index.equal_levels(unpickled)
def test_from_tuples_index_values(self):
result = MultiIndex.from_tuples(self.index)
assert (result.values == self.index.values).all()
def test_contains(self):
assert ('foo', 'two') in self.index
assert ('bar', 'two') not in self.index
assert None not in self.index
def test_contains_top_level(self):
midx = MultiIndex.from_product([['A', 'B'], [1, 2]])
assert 'A' in midx
assert 'A' not in midx._engine
def test_contains_with_nat(self):
# MI with a NaT
mi = MultiIndex(levels=[['C'],
pd.date_range('2012-01-01', periods=5)],
labels=[[0, 0, 0, 0, 0, 0], [-1, 0, 1, 2, 3, 4]],
names=[None, 'B'])
assert ('C', pd.Timestamp('2012-01-01')) in mi
for val in mi.values:
assert val in mi
def test_is_all_dates(self):
assert not self.index.is_all_dates
def test_is_numeric(self):
# MultiIndex is never numeric
assert not self.index.is_numeric()
def test_getitem(self):
# scalar
assert self.index[2] == ('bar', 'one')
# slice
result = self.index[2:5]
expected = self.index[[2, 3, 4]]
assert result.equals(expected)
# boolean
result = self.index[[True, False, True, False, True, True]]
result2 = self.index[np.array([True, False, True, False, True, True])]
expected = self.index[[0, 2, 4, 5]]
assert result.equals(expected)
assert result2.equals(expected)
def test_getitem_group_select(self):
sorted_idx, _ = self.index.sortlevel(0)
assert sorted_idx.get_loc('baz') == slice(3, 4)
assert sorted_idx.get_loc('foo') == slice(0, 2)
def test_get_loc(self):
assert self.index.get_loc(('foo', 'two')) == 1
assert self.index.get_loc(('baz', 'two')) == 3
pytest.raises(KeyError, self.index.get_loc, ('bar', 'two'))
pytest.raises(KeyError, self.index.get_loc, 'quux')
pytest.raises(NotImplementedError, self.index.get_loc, 'foo',
method='nearest')
# 3 levels
index = MultiIndex(levels=[Index(lrange(4)), Index(lrange(4)), Index(
lrange(4))], labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])])
pytest.raises(KeyError, index.get_loc, (1, 1))
assert index.get_loc((2, 0)) == slice(3, 5)
def test_get_loc_duplicates(self):
index = Index([2, 2, 2, 2])
result = index.get_loc(2)
expected = slice(0, 4)
assert result == expected
# pytest.raises(Exception, index.get_loc, 2)
index = Index(['c', 'a', 'a', 'b', 'b'])
rs = index.get_loc('c')
xp = 0
assert rs == xp
def test_get_value_duplicates(self):
index = MultiIndex(levels=[['D', 'B', 'C'],
[0, 26, 27, 37, 57, 67, 75, 82]],
labels=[[0, 0, 0, 1, 2, 2, 2, 2, 2, 2],
[1, 3, 4, 6, 0, 2, 2, 3, 5, 7]],
names=['tag', 'day'])
assert index.get_loc('D') == slice(0, 3)
with pytest.raises(KeyError):
index._engine.get_value(np.array([]), 'D')
def test_get_loc_level(self):
index = MultiIndex(levels=[Index(lrange(4)), Index(lrange(4)), Index(
lrange(4))], labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])])
loc, new_index = index.get_loc_level((0, 1))
expected = slice(1, 2)
exp_index = index[expected].droplevel(0).droplevel(0)
assert loc == expected
assert new_index.equals(exp_index)
loc, new_index = index.get_loc_level((0, 1, 0))
expected = 1
assert loc == expected
assert new_index is None
pytest.raises(KeyError, index.get_loc_level, (2, 2))
index = MultiIndex(levels=[[2000], lrange(4)], labels=[np.array(
[0, 0, 0, 0]), np.array([0, 1, 2, 3])])
result, new_index = index.get_loc_level((2000, slice(None, None)))
expected = slice(None, None)
assert result == expected
assert new_index.equals(index.droplevel(0))
@pytest.mark.parametrize('level', [0, 1])
@pytest.mark.parametrize('null_val', [np.nan, pd.NaT, None])
def test_get_loc_nan(self, level, null_val):
# GH 18485 : NaN in MultiIndex
levels = [['a', 'b'], ['c', 'd']]
key = ['b', 'd']
levels[level] = np.array([0, null_val], dtype=type(null_val))
key[level] = null_val
idx = MultiIndex.from_product(levels)
assert idx.get_loc(tuple(key)) == 3
def test_get_loc_missing_nan(self):
# GH 8569
idx = MultiIndex.from_arrays([[1.0, 2.0], [3.0, 4.0]])
assert isinstance(idx.get_loc(1), slice)
pytest.raises(KeyError, idx.get_loc, 3)
pytest.raises(KeyError, idx.get_loc, np.nan)
pytest.raises(KeyError, idx.get_loc, [np.nan])
@pytest.mark.parametrize('dtype1', [int, float, bool, str])
@pytest.mark.parametrize('dtype2', [int, float, bool, str])
def test_get_loc_multiple_dtypes(self, dtype1, dtype2):
# GH 18520
levels = [np.array([0, 1]).astype(dtype1),
np.array([0, 1]).astype(dtype2)]
idx = pd.MultiIndex.from_product(levels)
assert idx.get_loc(idx[2]) == 2
@pytest.mark.parametrize('level', [0, 1])
@pytest.mark.parametrize('dtypes', [[int, float], [float, int]])
def test_get_loc_implicit_cast(self, level, dtypes):
# GH 18818, GH 15994 : as flat index, cast int to float and vice-versa
levels = [['a', 'b'], ['c', 'd']]
key = ['b', 'd']
lev_dtype, key_dtype = dtypes
levels[level] = np.array([0, 1], dtype=lev_dtype)
key[level] = key_dtype(1)
idx = MultiIndex.from_product(levels)
assert idx.get_loc(tuple(key)) == 3
def test_get_loc_cast_bool(self):
# GH 19086 : int is casted to bool, but not vice-versa
levels = [[False, True], np.arange(2, dtype='int64')]
idx = MultiIndex.from_product(levels)
assert idx.get_loc((0, 1)) == 1
assert idx.get_loc((1, 0)) == 2
pytest.raises(KeyError, idx.get_loc, (False, True))
pytest.raises(KeyError, idx.get_loc, (True, False))
def test_slice_locs(self):
df = tm.makeTimeDataFrame()
stacked = df.stack()
idx = stacked.index
slob = slice(*idx.slice_locs(df.index[5], df.index[15]))
sliced = stacked[slob]
expected = df[5:16].stack()
tm.assert_almost_equal(sliced.values, expected.values)
slob = slice(*idx.slice_locs(df.index[5] + timedelta(seconds=30),
df.index[15] - timedelta(seconds=30)))
sliced = stacked[slob]
expected = df[6:15].stack()
tm.assert_almost_equal(sliced.values, expected.values)
def test_slice_locs_with_type_mismatch(self):
df = tm.makeTimeDataFrame()
stacked = df.stack()
idx = stacked.index
tm.assert_raises_regex(TypeError, '^Level type mismatch',
idx.slice_locs, (1, 3))
tm.assert_raises_regex(TypeError, '^Level type mismatch',
idx.slice_locs,
df.index[5] + timedelta(
seconds=30), (5, 2))
df = tm.makeCustomDataframe(5, 5)
stacked = df.stack()
idx = stacked.index
with tm.assert_raises_regex(TypeError, '^Level type mismatch'):
idx.slice_locs(timedelta(seconds=30))
# TODO: Try creating a UnicodeDecodeError in exception message
with tm.assert_raises_regex(TypeError, '^Level type mismatch'):
idx.slice_locs(df.index[1], (16, "a"))
def test_slice_locs_not_sorted(self):
index = MultiIndex(levels=[Index(lrange(4)), Index(lrange(4)), Index(
lrange(4))], labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])])
tm.assert_raises_regex(KeyError, "[Kk]ey length.*greater than "
"MultiIndex lexsort depth",
index.slice_locs, (1, 0, 1), (2, 1, 0))
# works
sorted_index, _ = index.sortlevel(0)
# should there be a test case here???
sorted_index.slice_locs((1, 0, 1), (2, 1, 0))
def test_slice_locs_partial(self):
sorted_idx, _ = self.index.sortlevel(0)
result = sorted_idx.slice_locs(('foo', 'two'), ('qux', 'one'))
assert result == (1, 5)
result = sorted_idx.slice_locs(None, ('qux', 'one'))
assert result == (0, 5)
result = sorted_idx.slice_locs(('foo', 'two'), None)
assert result == (1, len(sorted_idx))
result = sorted_idx.slice_locs('bar', 'baz')
assert result == (2, 4)
def test_slice_locs_not_contained(self):
# some searchsorted action
index = MultiIndex(levels=[[0, 2, 4, 6], [0, 2, 4]],
labels=[[0, 0, 0, 1, 1, 2, 3, 3, 3],
[0, 1, 2, 1, 2, 2, 0, 1, 2]], sortorder=0)
result = index.slice_locs((1, 0), (5, 2))
assert result == (3, 6)
result = index.slice_locs(1, 5)
assert result == (3, 6)
result = index.slice_locs((2, 2), (5, 2))
assert result == (3, 6)
result = index.slice_locs(2, 5)
assert result == (3, 6)
result = index.slice_locs((1, 0), (6, 3))
assert result == (3, 8)
result = index.slice_locs(-1, 10)
assert result == (0, len(index))
def test_consistency(self):
# need to construct an overflow
major_axis = lrange(70000)
minor_axis = lrange(10)
major_labels = np.arange(70000)
minor_labels = np.repeat(lrange(10), 7000)
# the fact that is works means it's consistent
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
# inconsistent
major_labels = np.array([0, 0, 1, 1, 1, 2, 2, 3, 3])
minor_labels = np.array([0, 1, 0, 1, 1, 0, 1, 0, 1])
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
assert not index.is_unique
def test_truncate(self):
major_axis = Index(lrange(4))
minor_axis = Index(lrange(2))
major_labels = np.array([0, 0, 1, 2, 3, 3])
minor_labels = np.array([0, 1, 0, 1, 0, 1])
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
result = index.truncate(before=1)
assert 'foo' not in result.levels[0]
assert 1 in result.levels[0]
result = index.truncate(after=1)
assert 2 not in result.levels[0]
assert 1 in result.levels[0]
result = index.truncate(before=1, after=2)
assert len(result.levels[0]) == 2
# after < before
pytest.raises(ValueError, index.truncate, 3, 1)
def test_get_indexer(self):
major_axis = Index(lrange(4))
minor_axis = Index(lrange(2))
major_labels = np.array([0, 0, 1, 2, 2, 3, 3], dtype=np.intp)
minor_labels = np.array([0, 1, 0, 0, 1, 0, 1], dtype=np.intp)
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
idx1 = index[:5]
idx2 = index[[1, 3, 5]]
r1 = idx1.get_indexer(idx2)
assert_almost_equal(r1, np.array([1, 3, -1], dtype=np.intp))
r1 = idx2.get_indexer(idx1, method='pad')
e1 = np.array([-1, 0, 0, 1, 1], dtype=np.intp)
assert_almost_equal(r1, e1)
r2 = idx2.get_indexer(idx1[::-1], method='pad')
assert_almost_equal(r2, e1[::-1])
rffill1 = idx2.get_indexer(idx1, method='ffill')
assert_almost_equal(r1, rffill1)
r1 = idx2.get_indexer(idx1, method='backfill')
e1 = np.array([0, 0, 1, 1, 2], dtype=np.intp)
assert_almost_equal(r1, e1)
r2 = idx2.get_indexer(idx1[::-1], method='backfill')
assert_almost_equal(r2, e1[::-1])
rbfill1 = idx2.get_indexer(idx1, method='bfill')
assert_almost_equal(r1, rbfill1)
# pass non-MultiIndex
r1 = idx1.get_indexer(idx2.values)
rexp1 = idx1.get_indexer(idx2)
assert_almost_equal(r1, rexp1)
r1 = idx1.get_indexer([1, 2, 3])
assert (r1 == [-1, -1, -1]).all()
# create index with duplicates
idx1 = Index(lrange(10) + lrange(10))
idx2 = Index(lrange(20))
msg = "Reindexing only valid with uniquely valued Index objects"
with tm.assert_raises_regex(InvalidIndexError, msg):
idx1.get_indexer(idx2)
def test_get_indexer_nearest(self):
midx = MultiIndex.from_tuples([('a', 1), ('b', 2)])
with pytest.raises(NotImplementedError):
midx.get_indexer(['a'], method='nearest')
with pytest.raises(NotImplementedError):
midx.get_indexer(['a'], method='pad', tolerance=2)
def test_hash_collisions(self):
# non-smoke test that we don't get hash collisions
index = MultiIndex.from_product([np.arange(1000), np.arange(1000)],
names=['one', 'two'])
result = index.get_indexer(index.values)
tm.assert_numpy_array_equal(result, np.arange(
len(index), dtype='intp'))
for i in [0, 1, len(index) - 2, len(index) - 1]:
result = index.get_loc(index[i])
assert result == i
def test_format(self):
self.index.format()
self.index[:0].format()
def test_format_integer_names(self):
index = MultiIndex(levels=[[0, 1], [0, 1]],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]], names=[0, 1])
index.format(names=True)
def test_format_sparse_display(self):
index = MultiIndex(levels=[[0, 1], [0, 1], [0, 1], [0]],
labels=[[0, 0, 0, 1, 1, 1], [0, 0, 1, 0, 0, 1],
[0, 1, 0, 0, 1, 0], [0, 0, 0, 0, 0, 0]])
result = index.format()
assert result[3] == '1 0 0 0'
def test_format_sparse_config(self):
warn_filters = warnings.filters
warnings.filterwarnings('ignore', category=FutureWarning,
module=".*format")
# GH1538
pd.set_option('display.multi_sparse', False)
result = self.index.format()
assert result[1] == 'foo two'
tm.reset_display_options()
warnings.filters = warn_filters
def test_to_frame(self):
tuples = [(1, 'one'), (1, 'two'), (2, 'one'), (2, 'two')]
index = MultiIndex.from_tuples(tuples)
result = index.to_frame(index=False)
expected = DataFrame(tuples)
tm.assert_frame_equal(result, expected)
result = index.to_frame()
expected.index = index
tm.assert_frame_equal(result, expected)
tuples = [(1, 'one'), (1, 'two'), (2, 'one'), (2, 'two')]
index = MultiIndex.from_tuples(tuples, names=['first', 'second'])
result = index.to_frame(index=False)
expected = DataFrame(tuples)
expected.columns = ['first', 'second']
tm.assert_frame_equal(result, expected)
result = index.to_frame()
expected.index = index
tm.assert_frame_equal(result, expected)
index = MultiIndex.from_product([range(5),
pd.date_range('20130101', periods=3)])
result = index.to_frame(index=False)
expected = DataFrame(
{0: np.repeat(np.arange(5, dtype='int64'), 3),
1: np.tile(pd.date_range('20130101', periods=3), 5)})
tm.assert_frame_equal(result, expected)
index = MultiIndex.from_product([range(5),
pd.date_range('20130101', periods=3)])
result = index.to_frame()
expected.index = index
tm.assert_frame_equal(result, expected)
def test_to_hierarchical(self):
index = MultiIndex.from_tuples([(1, 'one'), (1, 'two'), (2, 'one'), (
2, 'two')])
result = index.to_hierarchical(3)
expected = MultiIndex(levels=[[1, 2], ['one', 'two']],
labels=[[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1]])
tm.assert_index_equal(result, expected)
assert result.names == index.names
# K > 1
result = index.to_hierarchical(3, 2)
expected = MultiIndex(levels=[[1, 2], ['one', 'two']],
labels=[[0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1],
[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]])
tm.assert_index_equal(result, expected)
assert result.names == index.names
# non-sorted
index = MultiIndex.from_tuples([(2, 'c'), (1, 'b'),
(2, 'a'), (2, 'b')],
names=['N1', 'N2'])
result = index.to_hierarchical(2)
expected = MultiIndex.from_tuples([(2, 'c'), (2, 'c'), (1, 'b'),
(1, 'b'),
(2, 'a'), (2, 'a'),
(2, 'b'), (2, 'b')],
names=['N1', 'N2'])
tm.assert_index_equal(result, expected)
assert result.names == index.names
def test_bounds(self):
self.index._bounds
def test_equals_multi(self):
assert self.index.equals(self.index)
assert not self.index.equals(self.index.values)
assert self.index.equals(Index(self.index.values))
assert self.index.equal_levels(self.index)
assert not self.index.equals(self.index[:-1])
assert not self.index.equals(self.index[-1])
# different number of levels
index = MultiIndex(levels=[Index(lrange(4)), Index(lrange(4)), Index(
lrange(4))], labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])])
index2 = MultiIndex(levels=index.levels[:-1], labels=index.labels[:-1])
assert not index.equals(index2)
assert not index.equal_levels(index2)
# levels are different
major_axis = Index(lrange(4))
minor_axis = Index(lrange(2))
major_labels = np.array([0, 0, 1, 2, 2, 3])
minor_labels = np.array([0, 1, 0, 0, 1, 0])
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
assert not self.index.equals(index)
assert not self.index.equal_levels(index)
# some of the labels are different
major_axis = Index(['foo', 'bar', 'baz', 'qux'])
minor_axis = Index(['one', 'two'])
major_labels = np.array([0, 0, 2, 2, 3, 3])
minor_labels = np.array([0, 1, 0, 1, 0, 1])
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
assert not self.index.equals(index)
def test_equals_missing_values(self):
# make sure take is not using -1
i = pd.MultiIndex.from_tuples([(0, pd.NaT),
(0, pd.Timestamp('20130101'))])
result = i[0:1].equals(i[0])
assert not result
result = i[1:2].equals(i[1])
assert not result
def test_identical(self):
mi = self.index.copy()
mi2 = self.index.copy()
assert mi.identical(mi2)
mi = mi.set_names(['new1', 'new2'])
assert mi.equals(mi2)
assert not mi.identical(mi2)
mi2 = mi2.set_names(['new1', 'new2'])
assert mi.identical(mi2)
mi3 = Index(mi.tolist(), names=mi.names)
mi4 = Index(mi.tolist(), names=mi.names, tupleize_cols=False)
assert mi.identical(mi3)
assert not mi.identical(mi4)
assert mi.equals(mi4)
def test_is_(self):
mi = MultiIndex.from_tuples(lzip(range(10), range(10)))
assert mi.is_(mi)
assert mi.is_(mi.view())
assert mi.is_(mi.view().view().view().view())
mi2 = mi.view()
# names are metadata, they don't change id
mi2.names = ["A", "B"]
assert mi2.is_(mi)
assert mi.is_(mi2)
assert mi.is_(mi.set_names(["C", "D"]))
mi2 = mi.view()
mi2.set_names(["E", "F"], inplace=True)
assert mi.is_(mi2)
# levels are inherent properties, they change identity
mi3 = mi2.set_levels([lrange(10), lrange(10)])
assert not mi3.is_(mi2)
# shouldn't change
assert mi2.is_(mi)
mi4 = mi3.view()
# GH 17464 - Remove duplicate MultiIndex levels
mi4.set_levels([lrange(10), lrange(10)], inplace=True)
assert not mi4.is_(mi3)
mi5 = mi.view()
mi5.set_levels(mi5.levels, inplace=True)
assert not mi5.is_(mi)
def test_union(self):
piece1 = self.index[:5][::-1]
piece2 = self.index[3:]
the_union = piece1 | piece2
tups = sorted(self.index.values)
expected = MultiIndex.from_tuples(tups)
assert the_union.equals(expected)
# corner case, pass self or empty thing:
the_union = self.index.union(self.index)
assert the_union is self.index
the_union = self.index.union(self.index[:0])
assert the_union is self.index
# won't work in python 3
# tuples = self.index.values
# result = self.index[:4] | tuples[4:]
# assert result.equals(tuples)
# not valid for python 3
# def test_union_with_regular_index(self):
# other = Index(['A', 'B', 'C'])
# result = other.union(self.index)
# assert ('foo', 'one') in result
# assert 'B' in result
# result2 = self.index.union(other)
# assert result.equals(result2)
def test_intersection(self):
piece1 = self.index[:5][::-1]
piece2 = self.index[3:]
the_int = piece1 & piece2
tups = sorted(self.index[3:5].values)
expected = MultiIndex.from_tuples(tups)
assert the_int.equals(expected)
# corner case, pass self
the_int = self.index.intersection(self.index)
assert the_int is self.index
# empty intersection: disjoint
empty = self.index[:2] & self.index[2:]
expected = self.index[:0]
assert empty.equals(expected)
# can't do in python 3
# tuples = self.index.values
# result = self.index & tuples
# assert result.equals(tuples)
def test_sub(self):
first = self.index
# - now raises (previously was set op difference)
with pytest.raises(TypeError):
first - self.index[-3:]
with pytest.raises(TypeError):
self.index[-3:] - first
with pytest.raises(TypeError):
self.index[-3:] - first.tolist()
with pytest.raises(TypeError):
first.tolist() - self.index[-3:]
def test_difference(self):
first = self.index
result = first.difference(self.index[-3:])
expected = MultiIndex.from_tuples(sorted(self.index[:-3].values),
sortorder=0,
names=self.index.names)
assert isinstance(result, MultiIndex)
assert result.equals(expected)
assert result.names == self.index.names
# empty difference: reflexive
result = self.index.difference(self.index)
expected = self.index[:0]
assert result.equals(expected)
assert result.names == self.index.names
# empty difference: superset
result = self.index[-3:].difference(self.index)
expected = self.index[:0]
assert result.equals(expected)
assert result.names == self.index.names
# empty difference: degenerate
result = self.index[:0].difference(self.index)
expected = self.index[:0]
assert result.equals(expected)
assert result.names == self.index.names
# names not the same
chunklet = self.index[-3:]
chunklet.names = ['foo', 'baz']
result = first.difference(chunklet)
assert result.names == (None, None)
# empty, but non-equal
result = self.index.difference(self.index.sortlevel(1)[0])
assert len(result) == 0
# raise Exception called with non-MultiIndex
result = first.difference(first.values)
assert result.equals(first[:0])
# name from empty array
result = first.difference([])
assert first.equals(result)
assert first.names == result.names
# name from non-empty array
result = first.difference([('foo', 'one')])
expected = pd.MultiIndex.from_tuples([('bar', 'one'), ('baz', 'two'), (
'foo', 'two'), ('qux', 'one'), ('qux', 'two')])
expected.names = first.names
assert first.names == result.names
tm.assert_raises_regex(TypeError, "other must be a MultiIndex "
"or a list of tuples",
first.difference, [1, 2, 3, 4, 5])
def test_from_tuples(self):
tm.assert_raises_regex(TypeError, 'Cannot infer number of levels '
'from empty list',
MultiIndex.from_tuples, [])
expected = MultiIndex(levels=[[1, 3], [2, 4]],
labels=[[0, 1], [0, 1]],
names=['a', 'b'])
# input tuples
result = MultiIndex.from_tuples(((1, 2), (3, 4)), names=['a', 'b'])
tm.assert_index_equal(result, expected)
def test_from_tuples_iterator(self):
# GH 18434
# input iterator for tuples
expected = MultiIndex(levels=[[1, 3], [2, 4]],
labels=[[0, 1], [0, 1]],
names=['a', 'b'])
result = MultiIndex.from_tuples(zip([1, 3], [2, 4]), names=['a', 'b'])
tm.assert_index_equal(result, expected)
# input non-iterables
with tm.assert_raises_regex(
TypeError, 'Input must be a list / sequence of tuple-likes.'):
MultiIndex.from_tuples(0)
def test_from_tuples_empty(self):
# GH 16777
result = MultiIndex.from_tuples([], names=['a', 'b'])
expected = MultiIndex.from_arrays(arrays=[[], []],
names=['a', 'b'])
tm.assert_index_equal(result, expected)
def test_argsort(self):
result = self.index.argsort()
expected = self.index.values.argsort()
tm.assert_numpy_array_equal(result, expected)
def test_sortlevel(self):
import random
tuples = list(self.index)
random.shuffle(tuples)
index = MultiIndex.from_tuples(tuples)
sorted_idx, _ = index.sortlevel(0)
expected = MultiIndex.from_tuples(sorted(tuples))
assert sorted_idx.equals(expected)
sorted_idx, _ = index.sortlevel(0, ascending=False)
assert sorted_idx.equals(expected[::-1])
sorted_idx, _ = index.sortlevel(1)
by1 = sorted(tuples, key=lambda x: (x[1], x[0]))
expected = MultiIndex.from_tuples(by1)
assert sorted_idx.equals(expected)
sorted_idx, _ = index.sortlevel(1, ascending=False)
assert sorted_idx.equals(expected[::-1])
def test_sortlevel_not_sort_remaining(self):
mi = MultiIndex.from_tuples([[1, 1, 3], [1, 1, 1]], names=list('ABC'))
sorted_idx, _ = mi.sortlevel('A', sort_remaining=False)
assert sorted_idx.equals(mi)
def test_sortlevel_deterministic(self):
tuples = [('bar', 'one'), ('foo', 'two'), ('qux', 'two'),
('foo', 'one'), ('baz', 'two'), ('qux', 'one')]
index = MultiIndex.from_tuples(tuples)
sorted_idx, _ = index.sortlevel(0)
expected = MultiIndex.from_tuples(sorted(tuples))
assert sorted_idx.equals(expected)
sorted_idx, _ = index.sortlevel(0, ascending=False)
assert sorted_idx.equals(expected[::-1])
sorted_idx, _ = index.sortlevel(1)
by1 = sorted(tuples, key=lambda x: (x[1], x[0]))
expected = MultiIndex.from_tuples(by1)
assert sorted_idx.equals(expected)
sorted_idx, _ = index.sortlevel(1, ascending=False)
assert sorted_idx.equals(expected[::-1])
def test_dims(self):
pass
def test_drop(self):
dropped = self.index.drop([('foo', 'two'), ('qux', 'one')])
index = MultiIndex.from_tuples([('foo', 'two'), ('qux', 'one')])
dropped2 = self.index.drop(index)
expected = self.index[[0, 2, 3, 5]]
tm.assert_index_equal(dropped, expected)
tm.assert_index_equal(dropped2, expected)
dropped = self.index.drop(['bar'])
expected = self.index[[0, 1, 3, 4, 5]]
tm.assert_index_equal(dropped, expected)
dropped = self.index.drop('foo')
expected = self.index[[2, 3, 4, 5]]
tm.assert_index_equal(dropped, expected)
index = MultiIndex.from_tuples([('bar', 'two')])
pytest.raises(KeyError, self.index.drop, [('bar', 'two')])
pytest.raises(KeyError, self.index.drop, index)
pytest.raises(KeyError, self.index.drop, ['foo', 'two'])
# partially correct argument
mixed_index = MultiIndex.from_tuples([('qux', 'one'), ('bar', 'two')])
pytest.raises(KeyError, self.index.drop, mixed_index)
# error='ignore'
dropped = self.index.drop(index, errors='ignore')
expected = self.index[[0, 1, 2, 3, 4, 5]]
tm.assert_index_equal(dropped, expected)
dropped = self.index.drop(mixed_index, errors='ignore')
expected = self.index[[0, 1, 2, 3, 5]]
tm.assert_index_equal(dropped, expected)
dropped = self.index.drop(['foo', 'two'], errors='ignore')
expected = self.index[[2, 3, 4, 5]]
tm.assert_index_equal(dropped, expected)
# mixed partial / full drop
dropped = self.index.drop(['foo', ('qux', 'one')])
expected = self.index[[2, 3, 5]]
tm.assert_index_equal(dropped, expected)
# mixed partial / full drop / error='ignore'
mixed_index = ['foo', ('qux', 'one'), 'two']
pytest.raises(KeyError, self.index.drop, mixed_index)
dropped = self.index.drop(mixed_index, errors='ignore')
expected = self.index[[2, 3, 5]]
tm.assert_index_equal(dropped, expected)
def test_droplevel_with_names(self):
index = self.index[self.index.get_loc('foo')]
dropped = index.droplevel(0)
assert dropped.name == 'second'
index = MultiIndex(
levels=[Index(lrange(4)), Index(lrange(4)), Index(lrange(4))],
labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])],
names=['one', 'two', 'three'])
dropped = index.droplevel(0)
assert dropped.names == ('two', 'three')
dropped = index.droplevel('two')
expected = index.droplevel(1)
assert dropped.equals(expected)
def test_droplevel_list(self):
index = MultiIndex(
levels=[Index(lrange(4)), Index(lrange(4)), Index(lrange(4))],
labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])],
names=['one', 'two', 'three'])
dropped = index[:2].droplevel(['three', 'one'])
expected = index[:2].droplevel(2).droplevel(0)
assert dropped.equals(expected)
dropped = index[:2].droplevel([])
expected = index[:2]
assert dropped.equals(expected)
with pytest.raises(ValueError):
index[:2].droplevel(['one', 'two', 'three'])
with pytest.raises(KeyError):
index[:2].droplevel(['one', 'four'])
def test_drop_not_lexsorted(self):
# GH 12078
# define the lexsorted version of the multi-index
tuples = [('a', ''), ('b1', 'c1'), ('b2', 'c2')]
lexsorted_mi = MultiIndex.from_tuples(tuples, names=['b', 'c'])
assert lexsorted_mi.is_lexsorted()
# and the not-lexsorted version
df = pd.DataFrame(columns=['a', 'b', 'c', 'd'],
data=[[1, 'b1', 'c1', 3], [1, 'b2', 'c2', 4]])
df = df.pivot_table(index='a', columns=['b', 'c'], values='d')
df = df.reset_index()
not_lexsorted_mi = df.columns
assert not not_lexsorted_mi.is_lexsorted()
# compare the results
tm.assert_index_equal(lexsorted_mi, not_lexsorted_mi)
with tm.assert_produces_warning(PerformanceWarning):
tm.assert_index_equal(lexsorted_mi.drop('a'),
not_lexsorted_mi.drop('a'))
def test_insert(self):
# key contained in all levels
new_index = self.index.insert(0, ('bar', 'two'))
assert new_index.equal_levels(self.index)
assert new_index[0] == ('bar', 'two')
# key not contained in all levels
new_index = self.index.insert(0, ('abc', 'three'))
exp0 = Index(list(self.index.levels[0]) + ['abc'], name='first')
tm.assert_index_equal(new_index.levels[0], exp0)
exp1 = Index(list(self.index.levels[1]) + ['three'], name='second')
tm.assert_index_equal(new_index.levels[1], exp1)
assert new_index[0] == ('abc', 'three')
# key wrong length
msg = "Item must have length equal to number of levels"
with tm.assert_raises_regex(ValueError, msg):
self.index.insert(0, ('foo2',))
left = pd.DataFrame([['a', 'b', 0], ['b', 'd', 1]],
columns=['1st', '2nd', '3rd'])
left.set_index(['1st', '2nd'], inplace=True)
ts = left['3rd'].copy(deep=True)
left.loc[('b', 'x'), '3rd'] = 2
left.loc[('b', 'a'), '3rd'] = -1
left.loc[('b', 'b'), '3rd'] = 3
left.loc[('a', 'x'), '3rd'] = 4
left.loc[('a', 'w'), '3rd'] = 5
left.loc[('a', 'a'), '3rd'] = 6
ts.loc[('b', 'x')] = 2
ts.loc['b', 'a'] = -1
ts.loc[('b', 'b')] = 3
ts.loc['a', 'x'] = 4
ts.loc[('a', 'w')] = 5
ts.loc['a', 'a'] = 6
right = pd.DataFrame([['a', 'b', 0], ['b', 'd', 1], ['b', 'x', 2],
['b', 'a', -1], ['b', 'b', 3], ['a', 'x', 4],
['a', 'w', 5], ['a', 'a', 6]],
columns=['1st', '2nd', '3rd'])
right.set_index(['1st', '2nd'], inplace=True)
# FIXME data types changes to float because
# of intermediate nan insertion;
tm.assert_frame_equal(left, right, check_dtype=False)
tm.assert_series_equal(ts, right['3rd'])
# GH9250
idx = [('test1', i) for i in range(5)] + \
[('test2', i) for i in range(6)] + \
[('test', 17), ('test', 18)]
left = pd.Series(np.linspace(0, 10, 11),
pd.MultiIndex.from_tuples(idx[:-2]))
left.loc[('test', 17)] = 11
left.loc[('test', 18)] = 12
right = pd.Series(np.linspace(0, 12, 13),
pd.MultiIndex.from_tuples(idx))
tm.assert_series_equal(left, right)
def test_take_preserve_name(self):
taken = self.index.take([3, 0, 1])
assert taken.names == self.index.names
def test_take_fill_value(self):
# GH 12631
vals = [['A', 'B'],
[pd.Timestamp('2011-01-01'), pd.Timestamp('2011-01-02')]]
idx = pd.MultiIndex.from_product(vals, names=['str', 'dt'])
result = idx.take(np.array([1, 0, -1]))
exp_vals = [('A', pd.Timestamp('2011-01-02')),
('A', pd.Timestamp('2011-01-01')),
('B', pd.Timestamp('2011-01-02'))]
expected = pd.MultiIndex.from_tuples(exp_vals, names=['str', 'dt'])
tm.assert_index_equal(result, expected)
# fill_value
result = idx.take(np.array([1, 0, -1]), fill_value=True)
exp_vals = [('A', pd.Timestamp('2011-01-02')),
('A', pd.Timestamp('2011-01-01')),
(np.nan, pd.NaT)]
expected = pd.MultiIndex.from_tuples(exp_vals, names=['str', 'dt'])
tm.assert_index_equal(result, expected)
# allow_fill=False
result = idx.take(np.array([1, 0, -1]), allow_fill=False,
fill_value=True)
exp_vals = [('A', pd.Timestamp('2011-01-02')),
('A', pd.Timestamp('2011-01-01')),
('B', pd.Timestamp('2011-01-02'))]
expected = pd.MultiIndex.from_tuples(exp_vals, names=['str', 'dt'])
tm.assert_index_equal(result, expected)
msg = ('When allow_fill=True and fill_value is not None, '
'all indices must be >= -1')
with tm.assert_raises_regex(ValueError, msg):
idx.take(np.array([1, 0, -2]), fill_value=True)
with tm.assert_raises_regex(ValueError, msg):
idx.take(np.array([1, 0, -5]), fill_value=True)
with pytest.raises(IndexError):
idx.take(np.array([1, -5]))
def take_invalid_kwargs(self):
vals = [['A', 'B'],
[pd.Timestamp('2011-01-01'), pd.Timestamp('2011-01-02')]]
idx = pd.MultiIndex.from_product(vals, names=['str', 'dt'])
indices = [1, 2]
msg = r"take\(\) got an unexpected keyword argument 'foo'"
tm.assert_raises_regex(TypeError, msg, idx.take,
indices, foo=2)
msg = "the 'out' parameter is not supported"
tm.assert_raises_regex(ValueError, msg, idx.take,
indices, out=indices)
msg = "the 'mode' parameter is not supported"
tm.assert_raises_regex(ValueError, msg, idx.take,
indices, mode='clip')
@pytest.mark.parametrize('other',
[Index(['three', 'one', 'two']),
Index(['one']),
Index(['one', 'three'])])
def test_join_level(self, other, join_type):
join_index, lidx, ridx = other.join(self.index, how=join_type,
level='second',
return_indexers=True)
exp_level = other.join(self.index.levels[1], how=join_type)
assert join_index.levels[0].equals(self.index.levels[0])
assert join_index.levels[1].equals(exp_level)
# pare down levels
mask = np.array(
[x[1] in exp_level for x in self.index], dtype=bool)
exp_values = self.index.values[mask]
tm.assert_numpy_array_equal(join_index.values, exp_values)
if join_type in ('outer', 'inner'):
join_index2, ridx2, lidx2 = \
self.index.join(other, how=join_type, level='second',
return_indexers=True)
assert join_index.equals(join_index2)
tm.assert_numpy_array_equal(lidx, lidx2)
tm.assert_numpy_array_equal(ridx, ridx2)
tm.assert_numpy_array_equal(join_index2.values, exp_values)
def test_join_level_corner_case(self):
# some corner cases
idx = Index(['three', 'one', 'two'])
result = idx.join(self.index, level='second')
assert isinstance(result, MultiIndex)
tm.assert_raises_regex(TypeError, "Join.*MultiIndex.*ambiguous",
self.index.join, self.index, level=1)
def test_join_self(self, join_type):
res = self.index
joined = res.join(res, how=join_type)
assert res is joined
def test_join_multi(self):
# GH 10665
midx = pd.MultiIndex.from_product(
[np.arange(4), np.arange(4)], names=['a', 'b'])
idx = pd.Index([1, 2, 5], name='b')
# inner
jidx, lidx, ridx = midx.join(idx, how='inner', return_indexers=True)
exp_idx = pd.MultiIndex.from_product(
[np.arange(4), [1, 2]], names=['a', 'b'])
exp_lidx = np.array([1, 2, 5, 6, 9, 10, 13, 14], dtype=np.intp)
exp_ridx = np.array([0, 1, 0, 1, 0, 1, 0, 1], dtype=np.intp)
tm.assert_index_equal(jidx, exp_idx)
tm.assert_numpy_array_equal(lidx, exp_lidx)
tm.assert_numpy_array_equal(ridx, exp_ridx)
# flip
jidx, ridx, lidx = idx.join(midx, how='inner', return_indexers=True)
tm.assert_index_equal(jidx, exp_idx)
tm.assert_numpy_array_equal(lidx, exp_lidx)
tm.assert_numpy_array_equal(ridx, exp_ridx)
# keep MultiIndex
jidx, lidx, ridx = midx.join(idx, how='left', return_indexers=True)
exp_ridx = np.array([-1, 0, 1, -1, -1, 0, 1, -1, -1, 0, 1, -1, -1, 0,
1, -1], dtype=np.intp)
tm.assert_index_equal(jidx, midx)
assert lidx is None
tm.assert_numpy_array_equal(ridx, exp_ridx)
# flip
jidx, ridx, lidx = idx.join(midx, how='right', return_indexers=True)
tm.assert_index_equal(jidx, midx)
assert lidx is None
tm.assert_numpy_array_equal(ridx, exp_ridx)
def test_reindex(self):
result, indexer = self.index.reindex(list(self.index[:4]))
assert isinstance(result, MultiIndex)
self.check_level_names(result, self.index[:4].names)
result, indexer = self.index.reindex(list(self.index))
assert isinstance(result, MultiIndex)
assert indexer is None
self.check_level_names(result, self.index.names)
def test_reindex_level(self):
idx = Index(['one'])
target, indexer = self.index.reindex(idx, level='second')
target2, indexer2 = idx.reindex(self.index, level='second')
exp_index = self.index.join(idx, level='second', how='right')
exp_index2 = self.index.join(idx, level='second', how='left')
assert target.equals(exp_index)
exp_indexer = np.array([0, 2, 4])
tm.assert_numpy_array_equal(indexer, exp_indexer, check_dtype=False)
assert target2.equals(exp_index2)
exp_indexer2 = np.array([0, -1, 0, -1, 0, -1])
tm.assert_numpy_array_equal(indexer2, exp_indexer2, check_dtype=False)
tm.assert_raises_regex(TypeError, "Fill method not supported",
self.index.reindex, self.index,
method='pad', level='second')
tm.assert_raises_regex(TypeError, "Fill method not supported",
idx.reindex, idx, method='bfill',
level='first')
def test_duplicates(self):
assert not self.index.has_duplicates
assert self.index.append(self.index).has_duplicates
index = MultiIndex(levels=[[0, 1], [0, 1, 2]], labels=[
[0, 0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 0, 1, 2]])
assert index.has_duplicates
# GH 9075
t = [(u('x'), u('out'), u('z'), 5, u('y'), u('in'), u('z'), 169),
(u('x'), u('out'), u('z'), 7, u('y'), u('in'), u('z'), 119),
(u('x'), u('out'), u('z'), 9, u('y'), u('in'), u('z'), 135),
(u('x'), u('out'), u('z'), 13, u('y'), u('in'), u('z'), 145),
(u('x'), u('out'), u('z'), 14, u('y'), u('in'), u('z'), 158),
(u('x'), u('out'), u('z'), 16, u('y'), u('in'), u('z'), 122),
(u('x'), u('out'), u('z'), 17, u('y'), u('in'), u('z'), 160),
(u('x'), u('out'), u('z'), 18, u('y'), u('in'), u('z'), 180),
(u('x'), u('out'), u('z'), 20, u('y'), u('in'), u('z'), 143),
(u('x'), u('out'), u('z'), 21, u('y'), u('in'), u('z'), 128),
(u('x'), u('out'), u('z'), 22, u('y'), u('in'), u('z'), 129),
(u('x'), u('out'), u('z'), 25, u('y'), u('in'), u('z'), 111),
(u('x'), u('out'), u('z'), 28, u('y'), u('in'), u('z'), 114),
(u('x'), u('out'), u('z'), 29, u('y'), u('in'), u('z'), 121),
(u('x'), u('out'), u('z'), 31, u('y'), u('in'), u('z'), 126),
(u('x'), u('out'), u('z'), 32, u('y'), u('in'), u('z'), 155),
(u('x'), u('out'), u('z'), 33, u('y'), u('in'), u('z'), 123),
(u('x'), u('out'), u('z'), 12, u('y'), u('in'), u('z'), 144)]
index = pd.MultiIndex.from_tuples(t)
assert not index.has_duplicates
# handle int64 overflow if possible
def check(nlevels, with_nulls):
labels = np.tile(np.arange(500), 2)
level = np.arange(500)
if with_nulls: # inject some null values
labels[500] = -1 # common nan value
labels = [labels.copy() for i in range(nlevels)]
for i in range(nlevels):
labels[i][500 + i - nlevels // 2] = -1
labels += [np.array([-1, 1]).repeat(500)]
else:
labels = [labels] * nlevels + [np.arange(2).repeat(500)]
levels = [level] * nlevels + [[0, 1]]
# no dups
index = MultiIndex(levels=levels, labels=labels)
assert not index.has_duplicates
# with a dup
if with_nulls:
def f(a):
return np.insert(a, 1000, a[0])
labels = list(map(f, labels))
index = MultiIndex(levels=levels, labels=labels)
else:
values = index.values.tolist()
index = MultiIndex.from_tuples(values + [values[0]])
assert index.has_duplicates
# no overflow
check(4, False)
check(4, True)
# overflow possible
check(8, False)
check(8, True)
# GH 9125
n, k = 200, 5000
levels = [np.arange(n), tm.makeStringIndex(n), 1000 + np.arange(n)]
labels = [np.random.choice(n, k * n) for lev in levels]
mi = MultiIndex(levels=levels, labels=labels)
for keep in ['first', 'last', False]:
left = mi.duplicated(keep=keep)
right = pd._libs.hashtable.duplicated_object(mi.values, keep=keep)
tm.assert_numpy_array_equal(left, right)
# GH5873
for a in [101, 102]:
mi = MultiIndex.from_arrays([[101, a], [3.5, np.nan]])
assert not mi.has_duplicates
with warnings.catch_warnings(record=True):
# Deprecated - see GH20239
assert mi.get_duplicates().equals(MultiIndex.from_arrays(
[[], []]))
tm.assert_numpy_array_equal(mi.duplicated(), np.zeros(
2, dtype='bool'))
for n in range(1, 6): # 1st level shape
for m in range(1, 5): # 2nd level shape
# all possible unique combinations, including nan
lab = product(range(-1, n), range(-1, m))
mi = MultiIndex(levels=[list('abcde')[:n], list('WXYZ')[:m]],
labels=np.random.permutation(list(lab)).T)
assert len(mi) == (n + 1) * (m + 1)
assert not mi.has_duplicates
with warnings.catch_warnings(record=True):
# Deprecated - see GH20239
assert mi.get_duplicates().equals(MultiIndex.from_arrays(
[[], []]))
tm.assert_numpy_array_equal(mi.duplicated(), np.zeros(
len(mi), dtype='bool'))
def test_duplicate_meta_data(self):
# GH 10115
index = MultiIndex(
levels=[[0, 1], [0, 1, 2]],
labels=[[0, 0, 0, 0, 1, 1, 1],
[0, 1, 2, 0, 0, 1, 2]])
for idx in [index,
index.set_names([None, None]),
index.set_names([None, 'Num']),
index.set_names(['Upper', 'Num']), ]:
assert idx.has_duplicates
assert idx.drop_duplicates().names == idx.names
def test_get_unique_index(self):
idx = self.index[[0, 1, 0, 1, 1, 0, 0]]
expected = self.index._shallow_copy(idx[[0, 1]])
for dropna in [False, True]:
result = idx._get_unique_index(dropna=dropna)
assert result.unique
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('names', [None, ['first', 'second']])
def test_unique(self, names):
mi = pd.MultiIndex.from_arrays([[1, 2, 1, 2], [1, 1, 1, 2]],
names=names)
res = mi.unique()
exp = pd.MultiIndex.from_arrays([[1, 2, 2], [1, 1, 2]], names=mi.names)
tm.assert_index_equal(res, exp)
mi = pd.MultiIndex.from_arrays([list('aaaa'), list('abab')],
names=names)
res = mi.unique()
exp = pd.MultiIndex.from_arrays([list('aa'), list('ab')],
names=mi.names)
tm.assert_index_equal(res, exp)
mi = pd.MultiIndex.from_arrays([list('aaaa'), list('aaaa')],
names=names)
res = mi.unique()
exp = pd.MultiIndex.from_arrays([['a'], ['a']], names=mi.names)
tm.assert_index_equal(res, exp)
# GH #20568 - empty MI
mi = pd.MultiIndex.from_arrays([[], []], names=names)
res = mi.unique()
tm.assert_index_equal(mi, res)
@pytest.mark.parametrize('level', [0, 'first', 1, 'second'])
def test_unique_level(self, level):
# GH #17896 - with level= argument
result = self.index.unique(level=level)
expected = self.index.get_level_values(level).unique()
tm.assert_index_equal(result, expected)
# With already unique level
mi = pd.MultiIndex.from_arrays([[1, 3, 2, 4], [1, 3, 2, 5]],
names=['first', 'second'])
result = mi.unique(level=level)
expected = mi.get_level_values(level)
tm.assert_index_equal(result, expected)
# With empty MI
mi = pd.MultiIndex.from_arrays([[], []], names=['first', 'second'])
result = mi.unique(level=level)
expected = mi.get_level_values(level)
def test_unique_datetimelike(self):
idx1 = pd.DatetimeIndex(['2015-01-01', '2015-01-01', '2015-01-01',
'2015-01-01', 'NaT', 'NaT'])
idx2 = pd.DatetimeIndex(['2015-01-01', '2015-01-01', '2015-01-02',
'2015-01-02', 'NaT', '2015-01-01'],
tz='Asia/Tokyo')
result = pd.MultiIndex.from_arrays([idx1, idx2]).unique()
eidx1 = pd.DatetimeIndex(['2015-01-01', '2015-01-01', 'NaT', 'NaT'])
eidx2 = pd.DatetimeIndex(['2015-01-01', '2015-01-02',
'NaT', '2015-01-01'],
tz='Asia/Tokyo')
exp = pd.MultiIndex.from_arrays([eidx1, eidx2])
tm.assert_index_equal(result, exp)
def test_tolist(self):
result = self.index.tolist()
exp = list(self.index.values)
assert result == exp
def test_repr_with_unicode_data(self):
with pd.core.config.option_context("display.encoding", 'UTF-8'):
d = {"a": [u("\u05d0"), 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]}
index = pd.DataFrame(d).set_index(["a", "b"]).index
assert "\\u" not in repr(index) # we don't want unicode-escaped
def test_repr_roundtrip(self):
mi = MultiIndex.from_product([list('ab'), range(3)],
names=['first', 'second'])
str(mi)
if PY3:
tm.assert_index_equal(eval(repr(mi)), mi, exact=True)
else:
result = eval(repr(mi))
# string coerces to unicode
tm.assert_index_equal(result, mi, exact=False)
assert mi.get_level_values('first').inferred_type == 'string'
assert result.get_level_values('first').inferred_type == 'unicode'
mi_u = MultiIndex.from_product(
[list(u'ab'), range(3)], names=['first', 'second'])
result = eval(repr(mi_u))
tm.assert_index_equal(result, mi_u, exact=True)
# formatting
if PY3:
str(mi)
else:
compat.text_type(mi)
# long format
mi = MultiIndex.from_product([list('abcdefg'), range(10)],
names=['first', 'second'])
if PY3:
tm.assert_index_equal(eval(repr(mi)), mi, exact=True)
else:
result = eval(repr(mi))
# string coerces to unicode
tm.assert_index_equal(result, mi, exact=False)
assert mi.get_level_values('first').inferred_type == 'string'
assert result.get_level_values('first').inferred_type == 'unicode'
result = eval(repr(mi_u))
tm.assert_index_equal(result, mi_u, exact=True)
def test_str(self):
# tested elsewhere
pass
def test_unicode_string_with_unicode(self):
d = {"a": [u("\u05d0"), 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]}
idx = pd.DataFrame(d).set_index(["a", "b"]).index
if PY3:
str(idx)
else:
compat.text_type(idx)
def test_bytestring_with_unicode(self):
d = {"a": [u("\u05d0"), 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]}
idx = | pd.DataFrame(d) | pandas.DataFrame |
'''
Created on Aug 28, 2018
@author: cef
object handlers for the fdmg tab
'''
#===============================================================================
# # IMPORT STANDARD MODS -------------------------------------------------------
#===============================================================================
import logging, re #os, sys, imp, time, re, math, copy, inspect
import pandas as pd
import numpy as np
#import scipy.integrate
#from collections import OrderedDict
#===============================================================================
# IMPORT CUSTOM MODS ---------------------------------------------------------
#===============================================================================
import hp.pd
import hp.data
mod_logger = logging.getLogger(__name__)
mod_logger.debug('initilized')
class Fdmgo_data_wrap(object): #generic methods for Fdmg data objects
def clean_binv_data(self, df_raw): #generic cleaning for binv style data
#=======================================================================
# defaults
#=======================================================================
logger = self.logger.getChild('clean_binv_data')
binv_df = self.parent.kids_d['binv'].childmeta_df
if self.db_f:
if not isinstance(binv_df, pd.DataFrame):
raise IOError
if not len(binv_df) > 0:
raise IOError
#try and drop the FID column
df = df_raw.drop('FID', axis=1, errors = 'ignore')
df = df.drop('rank', axis=1, errors = 'ignore')
#switch the index to the mind
df.loc[:,self.mind] = df.loc[:,self.mind].astype(int) #change the type
df2 = df.set_index(self.mind).sort_index()
#===================================================================
# slice for the binv
#===================================================================
boolind = np.isin(df2.index, binv_df.index)
df3 = df2[boolind]
if self.db_f:
if not boolind.sum() == len(binv_df):
boolind2 = np.isin(binv_df.index, df2.index)
logger.error('failed to find %i entries specified in the binv: \n %s'
%(len(binv_df) - boolind.sum(),binv_df.index[~boolind2].values.tolist()))
raise IOError #check data trimming?
logger.debug('dropped %i (of %i) not found in teh binv to get %s'%
(len(df2) - len(df3), len(df2), str(df3.shape)))
return df3
def check_binv_data(self, df):
#=======================================================================
# defaults
#=======================================================================
logger = self.logger.getChild('check_binv_data')
binv_df = self.parent.kids_d['binv'].childmeta_df
#null check
if np.any(pd.isnull(df)):
logger.error('got %i nulls'%pd.isnull(df).sum().sum())
logger.debug('\n %s'%df[pd.isnull(df).sum(axis=1)==1])
raise IOError
#length check
if not len(df) == len(binv_df):
logger.error('my data length (%i) does not match the binv length (%i)'%(len(df), len(binv_df)))
raise IOError
#check for index match
if not np.all(df.index == binv_df.index):
raise IOError
def apply_on_binv(self, #apply the passed key data to the binv
data_attn, hse_attn,
coln = None
):
#=======================================================================
# defaults
#=======================================================================
logger = self.logger.getChild('apply_on_binv')
if coln is None: coln = hse_attn #assume this is how the data is labeld
df = getattr(self, data_attn)
binv = self.model.binv
if self.db_f:
if not isinstance(df, pd.DataFrame):
raise IOError
if not coln in df.columns:
raise IOError
ser = df.loc[:, coln]
"""
type(ser)
"""
logger.debug('from \'%s\' with %s applied to \'%s\''%
(data_attn, str(df.shape), hse_attn))
#=======================================================================
# mid session dynamic update to the objects
#=======================================================================
if not self.session.state == 'init':
#=======================================================================
# tell teh binv to update its houses
#=======================================================================
binv.set_all_hse_atts(hse_attn, ser = ser)
#=======================================================================
# pre run just update the binv_df
#=======================================================================
else:
if self.db_f:
binv_df = binv.childmeta_df.copy()
if not np.all(binv_df.index == ser.index):
raise IOError
binv.childmeta_df.loc[:,hse_attn] = ser
logger.debug('merged %i entries for \'%s\' onto the binv_df %s'
%(len(ser), hse_attn, str(binv.childmeta_df.shape)))
return
class Rfda_curve_data(#class object for rfda legacy pars
hp.data.Data_wrapper,
hp.oop.Child):
'made this a class for easy tracking/nesting of parameters'
def __init__(self, *vars, **kwargs):
super(Rfda_curve_data, self).__init__(*vars, **kwargs) #initilzie teh baseclass
self.load_data()
self.logger.debug('fdmg.Rfda_curve_data initilized')
def load_data(self): #load legacy pars from the df
#=======================================================================
# defaults
#=======================================================================
logger = self.logger.getChild('load_data')
#test pars
if self.session._parlo_f: test_trim_row = self.test_trim_row
else: test_trim_row = None
self.filepath = self.get_filepath()
#load from file
df_raw = hp.pd.load_xls_df(self.filepath, logger=logger, test_trim_row = test_trim_row,
header = 0, index_col = None)
self.data = df_raw
logger.debug('attached rfda_curve with %s'%str(self.data.shape))
class Dfeat_tbl( #holder/generator fo all the dmg_feats
hp.data.Data_wrapper,
hp.sim.Sim_o,
hp.oop.Parent,
hp.oop.Child):
"""
holder/generator fo all the dmg_feats
"""
#===========================================================================
# progran pars
#===========================================================================
extra_hse_types = ['AD'] #always load these house types
def __init__(self, *vars, **kwargs):
logger = mod_logger.getChild('Dfeat_tbl')
logger.debug('start _init_ ')
self.inherit_parent_ans=set(['mind'])
super(Dfeat_tbl, self).__init__(*vars, **kwargs) #initilzie teh baseclass
#=======================================================================
# properties
#=======================================================================
import fdmg.scripts
self.kid_class = fdmg.scripts.Dmg_feat #mannually pass/attach this
if self.session.wdfeats_f: #only bother if we're using dfeats
logger.debug('load_data() \n')
self.load_data()
self.logger.debug('fdmg.Dfeat_tbl initilized')
if self.db_f:
if self.model is None:
raise IOError
def load_data(self):
#=======================================================================
# defaults
#=======================================================================
logger = self.logger.getChild('load_data')
#test pars
if self.session._parlo_f: test_trim_row = self.test_trim_row
else: test_trim_row = None
self.filepath = self.get_filepath()
#load from file
df_dict = hp.pd.load_xls_df(self.filepath, logger=logger, test_trim_row = test_trim_row,
skiprows = [1],header = 0, index_col = None, sheetname=None)
'wrong function?'
'todo: add some template check'
for tabname, df_raw in df_dict.iteritems():
#=======================================================================
# send for cleaning
#=======================================================================
df_clean = self.clean_df(df_raw)
#rewrite this
df_dict[tabname] = df_clean
logger.debug('loaded dynamic danage curve table for %s with %s'%(tabname, str(df_clean.shape)))
self.df_dict = df_dict
self.data = None
#=======================================================================
# wrap up
#=======================================================================
if self.session._write_ins: _ = hp.basic.copy_file(self.filepath,self.session.inscopy_path)
logger.debug('attached df_dict with %i entries'%len(df_dict))
def clean_df(self, df_raw): #custom cleaner
logger = self.logger.getChild('clean_df')
df1 = self.generic_clean_df(df_raw)
df2 = df1.dropna(how = 'all', axis='columns') #drop columns where ANY values are na
#drop the 'note' column from the frame
df3 = df2.drop('note', axis=1, errors='ignore')
#=======================================================================
# exclude small dfeats
#=======================================================================
if self.model.dfeat_xclud_price > 0:
boolidx = df3['base_price'] <= self.model.dfeat_xclud_price
df4 = df3.loc[~boolidx,:] #trim to just these
if boolidx.sum() > 0:
logger.warning('trimmed %i (of %i) dfeats below %.2f '%(boolidx.sum(), len(df3), self.model.dfeat_xclud_price))
"""
hp.pd.v(df4.sort_values('base_price'))
hp.pd.v(df3.sort_values('base_price'))
"""
else:
df4 = df3
'todo: drop any columns where name == np.nan'
df_clean = df4
hp.pd.cleaner_report(df_raw, df_clean, logger = logger)
#=======================================================================
# #post formatters
#=======================================================================
df_clean.loc[:,'depth'] = df_clean['depth_dflt'].values #duplicate this column
""" This is throwing the SettingWithCopy warning.
Tried for 20mins to figure this out, but couldnt find any chained indexing.
"""
df_clean.loc[:,'calc_price'] = np.nan #add this as a blank column
return df_clean
"""
df_clean._is_view
df_clean.values.base
hp.pd.v(df_clean)
self.name
"""
def raise_all_dfeats(self):
#=======================================================================
# defaults
#=======================================================================
logger = self.logger.getChild('raise_all_dfeats')
d = self.df_dict
dfeats_d = dict() #container of containers
#=======================================================================
# get the list of house types provided in teh binv
#=======================================================================
hse_types_l = set(self.parent.kids_d['binv'].hse_types_l) #pull from binv
hse_types_l.update(self.extra_hse_types) #add the extras
"""
self.parent.kids_d.keys()
"""
#=======================================================================
# load all the dfeats for this
#=======================================================================
logger.debug('on %i house types: %s \n'%(len(d), d.keys()))
for hse_type, df in d.iteritems():
if not hse_type in hse_types_l:
logger.debug(' hse_type = \'%s\' not found in the binv. skipping'%hse_type)
continue
#get all teh place codes
place_codes_l = df['place_code'].unique().tolist()
#===================================================================
# raise children on each of these
#===================================================================
logger.debug('building set for hse_type = \'%s\' with %i place_codes \n'%(hse_type, len(place_codes_l)))
cnt = 0
for place_code in place_codes_l:
tag = hse_type + place_code #unique tag
#get this metadata
boolidx = df['place_code'] == place_code
df_slice = df[boolidx].reset_index(drop=True)
'need thsi so the dfloc aligns'
"""
hp.pd.v(df_slice)
df_slice.columns
import pandas as pd
pd.reset_option('all')
pd.get_option("display.max_rows")
pd.get_option("display.max_columns")
pd.set_option("display.max_columns", 6)
d.keys()
d['AD']
"""
#spawn this subset
logger.debug('for \'%s\' raising children from %s'%(tag, str(df_slice.shape)))
#run teh generic child raiser for all dfeats of this type
'raise these as shadow children'
dfeats_d[tag] = self.raise_children_df( df_slice,
kid_class = self.kid_class,
dup_sibs_f = True,
shadow = True)
logger.debug('finished with %i dfeats on tag \'%s\''%(len(dfeats_d[tag]), tag))
cnt += len(dfeats_d[tag])
logger.debug('finish loop \'%s\' with %i'%(hse_type, len(dfeats_d)))
logger.debug("finished with %i dfeats in %i sets raised: %s \n"%(cnt, len(dfeats_d), dfeats_d.keys()))
return dfeats_d
class Flood_tbl( #flood table worker
hp.data.Data_wrapper,
hp.oop.Child,
Fdmgo_data_wrap):
#===========================================================================
# program
#===========================================================================
expected_tabn = ['wet', 'dry', 'aprot']
#===========================================================================
# from user
#===========================================================================
na_value = None #extra value to consider as null
min_chk = 800 #minimum value to pass through checking
max_chk = 2000 #maximumv alue to allow through checking
wetnull_code = 'take_wet'
wetdry_tol = 0.1
damp_build_code = 'average'
#area exposure grade
area_egrd00 = None
area_egrd01 = None
area_egrd02 = None
#===========================================================================
# calculated
#===========================================================================
aprot_df = None
def __init__(self, *vars, **kwargs):
logger = mod_logger.getChild('Flood_tbl')
logger.debug('start _init_')
super(Flood_tbl, self).__init__(*vars, **kwargs) #initilzie teh baseclass
#=======================================================================
# custom atts
#=======================================================================
self.mind = self.parent.mind
self.model = self.parent
self.wsl_d = dict()
logger.debug('load_data() \n')
self.load_data() #execute the standard data loader
self.treat_wetnull()
self.wetdry_logic_fix()
self.build_damp()
if self.db_f: self.check_data()
""" NO! only want the named flood table to set this
self.set_area_prot_lvl()"""
logger.debug('finish _init_ \n')
return
def load_data(self):
logger = self.logger.getChild('load_data')
self.filepath = self.get_filepath()
d = self.loadr_real(self.filepath, multi = True)
#=======================================================================
# sort and attach
#=======================================================================
for k, v in d.iteritems():
logger.debug('sending \'%s\' for cleaning'%k)
df1 = self.clean_binv_data(v)
if k in ['dry', 'wet']:
df2 = self.wsl_clean(df1)
self.wsl_d[k] = df2
elif k == 'aprot':
self.aprot_df = df1.astype(np.int)
else:
logger.error('got unexpected tab name \'%s\''%k)
raise IOError
return
def wsl_clean(self, df_raw):
logger = self.logger.getChild('wsl_clean')
#===================================================================
# headers
#===================================================================
#reformat columns
try:
df_raw.columns = df_raw.columns.astype(int) #reformat the aeps as ints
except:
logger.error('failed to recast columns as int: \n %s'%(df_raw.columns))
raise IOError
#sort the columns
df2 = df_raw.reindex(columns = sorted(df_raw.columns))
#reformat values
df2 = df2.astype(float)
#=======================================================================
# clean the user provided null
#=======================================================================
if not self.na_value is None:
boolar = df2.values == self.na_value
df2[boolar] = np.nan
logger.warning('for set %i user identified values to null with \'%s\''%
(boolar.sum().sum(), self.na_value))
"""not working for some reason
hp.pd.cleaner_report(df_raw, df2)"""
logger.debug('cleaned to %s'%str(df2.shape))
return df2
def treat_wetnull(self): #apply the wetnull_code algorhitim to the dry
#=======================================================================
# defaults
#=======================================================================
logger = self.logger.getChild('treat_wetnull')
dfwet = self.wsl_d['wet']
dfdry = self.wsl_d['dry']
dfwet_raw = dfwet.copy()
dfdry_raw = dfdry.copy()
#=======================================================================
# precheck
#=======================================================================
if self.db_f:
if np.any(pd.isnull(dfdry)):
logger.error('got %i null values for dfdry'%pd.isnull(dfdry).sum().sum())
logger.debug('%s'%pd.isnull(dfdry).sum(axis=0))
logger.debug('%s'% | pd.isnull(dfdry) | pandas.isnull |
""" I/O functions of the aecg package: tools for annotated ECG HL7 XML files
This module implements helper functions to parse and read annotated
electrocardiogram (ECG) stored in XML files following HL7
specification.
See authors, license and disclaimer at the top level directory of this project.
"""
# Imports =====================================================================
from typing import Dict, Tuple
from lxml import etree
from aecg import validate_xpath, new_validation_row, VALICOLS, \
TIME_CODES, SEQUENCE_CODES, \
Aecg, AecgLead, AecgAnnotationSet
import copy
import logging
import pandas as pd
import re
import zipfile
# Python logging ==============================================================
logger = logging.getLogger(__name__)
def parse_annotations(xml_filename: str,
zip_filename: str,
aecg_doc: etree._ElementTree,
aecgannset: AecgAnnotationSet,
path_prefix: str,
annsset_xmlnode_path: str,
valgroup: str = "RHYTHM",
log_validation: bool = False) -> Tuple[
AecgAnnotationSet, pd.DataFrame]:
"""Parses `aecg_doc` XML document and extracts annotations
Args:
xml_filename (str): Filename of the aECG XML file.
zip_filename (str): Filename of zip file containint the aECG XML file.
If '', then xml file is not stored in a zip file.
aecg_doc (etree._ElementTree): XML document of the aECG XML file.
aecgannset (AecgAnnotationSet): Annotation set to which append found
annotations.
path_prefix (str): Prefix of xml path from which start searching for
annotations.
annsset_xmlnode_path (str): Path to xml node of the annotation set
containing the annotations.
valgroup (str, optional): Indicates whether to search annotations in
rhythm or derived waveform. Defaults to "RHYTHM".
log_validation (bool, optional): Indicates whether to maintain the
validation results in `aecg.validatorResults`. Defaults to
False.
Returns:
Tuple[AecgAnnotationSet, pd.DataFrame]: Annotation set updated with
found annotations and dataframe with results of validation.
"""
anngrpid = 0
# Annotations stored within a beat
beatnodes = aecg_doc.xpath((
path_prefix +
"/component/annotation/code[@code=\'MDC_ECG_BEAT\']").replace(
'/', '/ns:'), namespaces={'ns': 'urn:hl7-org:v3'})
beatnum = 0
valpd = pd.DataFrame()
if len(beatnodes) > 0:
logger.info(
f'{xml_filename},{zip_filename},'
f'{valgroup} {len(beatnodes)} annotated beats found')
for beatnode in beatnodes:
for rel_path in ["../component/annotation/"
"code[contains(@code, \"MDC_ECG_\")]"]:
annsnodes = beatnode.xpath(rel_path.replace('/', '/ns:'),
namespaces={'ns': 'urn:hl7-org:v3'})
rel_path2 = "../value"
for annsnode in annsnodes:
ann = {"anngrpid": anngrpid, "beatnum": "", "code": "",
"codetype": "",
"wavecomponent": "", "wavecomponent2": "",
"timecode": "",
"value": "", "value_unit": "",
"low": "", "low_unit": "",
"high": "", "high_unit": "",
"lead": ""}
# Annotation code
valrow2 = validate_xpath(
annsnode,
".",
"urn:hl7-org:v3",
"code",
new_validation_row(xml_filename,
valgroup,
"ANNSET_BEAT_ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/" + rel_path
if valrow2["VALIOUT"] == "PASSED":
ann["code"] = valrow2["VALUE"]
# Annotation type from top level value
valrow2 = validate_xpath(annsnode,
"../value",
"urn:hl7-org:v3",
"code",
new_validation_row(
xml_filename, valgroup,
"ANNSET_BEAT_ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/value"
if log_validation:
valpd = valpd.append(pd.DataFrame(
[valrow2], columns=VALICOLS), ignore_index=True)
if valrow2["VALIOUT"] == "PASSED":
ann["codetype"] = valrow2["VALUE"]
# Annotations type
valrow2 = validate_xpath(
annsnode,
rel_path2,
"urn:hl7-org:v3",
"code",
new_validation_row(xml_filename,
valgroup,
"ANNSET_BEAT_ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/" + rel_path + \
"/" + rel_path2
if valrow2["VALIOUT"] == "PASSED":
ann["beatnum"] = beatnum
ann["codetype"] = valrow2["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
subannsnodes = annsnode.xpath(
rel_path.replace('/', '/ns:'),
namespaces={'ns': 'urn:hl7-org:v3'})
if len(subannsnodes) == 0:
subannsnodes = [annsnode]
else:
subannsnodes += [annsnode]
# Exclude annotations reporting interval values only
subannsnodes = [
sa for sa in subannsnodes
if not sa.get("code").startswith("MDC_ECG_TIME_PD_")]
for subannsnode in subannsnodes:
# Annotations type
valrow2 = validate_xpath(subannsnode,
rel_path2,
"urn:hl7-org:v3",
"code",
new_validation_row(
xml_filename,
valgroup,
"ANNSET_BEAT_ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/" + \
rel_path + "/" + rel_path2
if valrow2["VALIOUT"] == "PASSED":
ann["wavecomponent"] = valrow2["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
# Annotations value
valrow2 = validate_xpath(subannsnode,
rel_path2,
"urn:hl7-org:v3",
"value",
new_validation_row(
xml_filename,
valgroup,
"ANNSET_BEAT_ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/" + \
rel_path + "/" + rel_path2
if valrow2["VALIOUT"] == "PASSED":
ann["value"] = valrow2["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
# Annotations value units
valrow2 = validate_xpath(subannsnode,
rel_path2,
"urn:hl7-org:v3",
"unit",
new_validation_row(
xml_filename,
valgroup,
"ANNSET_BEAT_ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/" + \
rel_path + "/" + rel_path2
if valrow2["VALIOUT"] == "PASSED":
ann["value_unit"] = valrow2["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
# annotations info from supporting ROI
rel_path3 = "../support/supportingROI/component/"\
"boundary/value"
for n in ["", "low", "high"]:
if n != "":
rp = rel_path3 + "/" + n
else:
rp = rel_path3
valrow3 = validate_xpath(
subannsnode,
rp,
"urn:hl7-org:v3",
"value",
new_validation_row(xml_filename,
valgroup,
"ANNSET_BEAT_ANNS"),
failcat="WARNING")
valrow3["XPATH"] = annsset_xmlnode_path + "/" + \
rel_path + "/" + rp
if valrow3["VALIOUT"] == "PASSED":
if n != "":
ann[n] = valrow3["VALUE"]
else:
ann["value"] = valrow3["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow3], columns=VALICOLS),
ignore_index=True)
valrow3 = validate_xpath(
subannsnode,
rp,
"urn:hl7-org:v3",
"unit",
new_validation_row(xml_filename,
valgroup,
"ANNSET_BEAT_ANNS"),
failcat="WARNING")
valrow3["XPATH"] = annsset_xmlnode_path + "/" + \
rel_path + "/" + rp
if valrow3["VALIOUT"] == "PASSED":
if n != "":
ann[n + "_unit"] = valrow3["VALUE"]
else:
ann["value_unit"] = valrow3["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow3], columns=VALICOLS),
ignore_index=True)
# annotations time encoding, lead and other info used
# by value and supporting ROI
rel_path4 = "../support/supportingROI/component/"\
"boundary/code"
roinodes = subannsnode.xpath(
rel_path4.replace('/', '/ns:'),
namespaces={'ns': 'urn:hl7-org:v3'})
for roinode in roinodes:
valrow4 = validate_xpath(
roinode,
".",
"urn:hl7-org:v3",
"code",
new_validation_row(xml_filename,
valgroup,
"ANNSET_BEAT_ANNS"),
failcat="WARNING")
valrow4["XPATH"] = annsset_xmlnode_path + "/" + \
rel_path + "/" + rel_path4
if valrow4["VALIOUT"] == "PASSED":
if valrow4["VALUE"] in ["TIME_ABSOLUTE",
"TIME_RELATIVE"]:
ann["timecode"] = valrow4["VALUE"]
else:
ann["lead"] = valrow4["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow4], columns=VALICOLS),
ignore_index=True)
aecgannset.anns.append(copy.deepcopy(ann))
else:
# Annotations type
valrow2 = validate_xpath(annsnode,
".",
"urn:hl7-org:v3",
"code",
new_validation_row(xml_filename,
valgroup,
"ANNSET_BEAT_"
"ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/" + rel_path +\
"/" + rel_path2
if valrow2["VALIOUT"] == "PASSED":
ann["beatnum"] = beatnum
ann["codetype"] = valrow2["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
# Annotations value
valrow2 = validate_xpath(annsnode,
rel_path2,
"urn:hl7-org:v3",
"value",
new_validation_row(
xml_filename,
valgroup,
"ANNSET_BEAT_ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/" + \
rel_path + "/" + rel_path2
if valrow2["VALIOUT"] == "PASSED":
ann["value"] = valrow2["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
# Annotations value units
valrow2 = validate_xpath(annsnode,
rel_path2,
"urn:hl7-org:v3",
"unit",
new_validation_row(
xml_filename,
valgroup,
"ANNSET_BEAT_ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/" + \
rel_path + "/" + rel_path2
if valrow2["VALIOUT"] == "PASSED":
ann["value_unit"] = valrow2["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
# annotations time encoding, lead and other info used
# by value and supporting ROI
rel_path4 = "../support/supportingROI/component/" \
"boundary/code"
roinodes = annsnode.xpath(
rel_path4.replace('/', '/ns:'),
namespaces={'ns': 'urn:hl7-org:v3'})
for roinode in roinodes:
valrow4 = validate_xpath(roinode,
".",
"urn:hl7-org:v3",
"code",
new_validation_row(
xml_filename,
valgroup,
"ANNSET_BEAT_ANNS"),
failcat="WARNING")
valrow4["XPATH"] = annsset_xmlnode_path + "/" + \
rel_path + "/" + rel_path4
if valrow4["VALIOUT"] == "PASSED":
if valrow4["VALUE"] in ["TIME_ABSOLUTE",
"TIME_RELATIVE"]:
ann["timecode"] = valrow4["VALUE"]
else:
ann["lead"] = valrow4["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow4],
columns=VALICOLS),
ignore_index=True)
aecgannset.anns.append(copy.deepcopy(ann))
else:
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
anngrpid = anngrpid + 1
beatnum = beatnum + 1
if len(beatnodes) > 0:
logger.info(
f'{xml_filename},{zip_filename},'
f'{valgroup} {beatnum} annotated beats and {anngrpid} '
f'annotations groups found')
anngrpid_from_beats = anngrpid
# Annotations stored without an associated beat
for codetype_path in ["/component/annotation/code["
"(contains(@code, \"MDC_ECG_\") and"
" not (@code=\'MDC_ECG_BEAT\'))]"]:
annsnodes = aecg_doc.xpath(
(path_prefix + codetype_path).replace('/', '/ns:'),
namespaces={'ns': 'urn:hl7-org:v3'})
rel_path2 = "../value"
for annsnode in annsnodes:
ann = {"anngrpid": anngrpid, "beatnum": "", "code": "",
"codetype": "",
"wavecomponent": "", "wavecomponent2": "",
"timecode": "",
"value": "", "value_unit": "",
"low": "", "low_unit": "",
"high": "", "high_unit": "",
"lead": ""}
# Annotations code
valrow2 = validate_xpath(annsnode,
".",
"urn:hl7-org:v3",
"code",
new_validation_row(xml_filename, valgroup,
"ANNSET_NOBEAT_ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path
if log_validation:
valpd = valpd.append(pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
if valrow2["VALIOUT"] == "PASSED":
ann["code"] = valrow2["VALUE"]
# Annotation type from top level value
valrow2 = validate_xpath(annsnode,
"../value",
"urn:hl7-org:v3",
"code",
new_validation_row(xml_filename, valgroup,
"ANNSET_NOBEAT_ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/value"
if log_validation:
valpd = valpd.append(pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
if valrow2["VALIOUT"] == "PASSED":
ann["codetype"] = valrow2["VALUE"]
subannsnodes = annsnode.xpath(
(".." + codetype_path).replace('/', '/ns:'),
namespaces={'ns': 'urn:hl7-org:v3'})
if len(subannsnodes) == 0:
subannsnodes = [annsnode]
for subannsnode in subannsnodes:
subsubannsnodes = subannsnode.xpath(
(".." + codetype_path).replace('/', '/ns:'),
namespaces={'ns': 'urn:hl7-org:v3'})
tmpnodes = [subannsnode]
if len(subsubannsnodes) > 0:
tmpnodes = tmpnodes + subsubannsnodes
for subsubannsnode in tmpnodes:
ann["wavecomponent"] = ""
ann["wavecomponent2"] = ""
ann["timecode"] = ""
ann["value"] = ""
ann["value_unit"] = ""
ann["low"] = ""
ann["low_unit"] = ""
ann["high"] = ""
ann["high_unit"] = ""
roi_base = "../support/supportingROI/component/boundary"
rel_path3 = roi_base + "/value"
valrow2 = validate_xpath(
subsubannsnode,
".",
"urn:hl7-org:v3",
"code",
new_validation_row(xml_filename,
valgroup,
"ANNSET_NOBEAT_"
"ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/.." + \
codetype_path + "/code"
if valrow2["VALIOUT"] == "PASSED":
if not ann["codetype"].endswith("WAVE"):
ann["codetype"] = valrow2["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
# Annotations type
valrow2 = validate_xpath(
subsubannsnode,
rel_path2,
"urn:hl7-org:v3",
"code",
new_validation_row(xml_filename,
valgroup,
"ANNSET_NOBEAT_"
"ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/.." + \
codetype_path + "/" + rel_path2
if valrow2["VALIOUT"] == "PASSED":
ann["wavecomponent"] = valrow2["VALUE"]
# if ann["wavecomponent"] == "":
# ann["wavecomponent"] = valrow2["VALUE"]
# else:
# ann["wavecomponent2"] = valrow2["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
# Annotations value
valrow2 = validate_xpath(
subsubannsnode,
rel_path2,
"urn:hl7-org:v3",
"",
new_validation_row(xml_filename,
valgroup,
"ANNSET_NOBEAT_"
"ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/.." + \
codetype_path + "/" + rel_path2
if valrow2["VALIOUT"] == "PASSED":
ann["value"] = valrow2["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
# Annotations value as attribute
valrow2 = validate_xpath(
subsubannsnode,
rel_path2,
"urn:hl7-org:v3",
"value",
new_validation_row(xml_filename,
valgroup,
"ANNSET_NOBEAT_"
"ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/.." + \
codetype_path + "/" + rel_path2
if valrow2["VALIOUT"] == "PASSED":
ann["value"] = valrow2["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
# Annotations value units
valrow2 = validate_xpath(
subsubannsnode,
rel_path2,
"urn:hl7-org:v3",
"unit",
new_validation_row(xml_filename,
valgroup,
"ANNSET_NOBEAT_"
"ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/.." + \
codetype_path + "/" + rel_path2
if valrow2["VALIOUT"] == "PASSED":
ann["value_unit"] = valrow2["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
# annotations info from supporting ROI
for n in ["", "low", "high"]:
if n != "":
rp = rel_path3 + "/" + n
else:
rp = rel_path3
valrow3 = validate_xpath(
subsubannsnode,
rp,
"urn:hl7-org:v3",
"value",
new_validation_row(xml_filename,
valgroup,
"ANNSET_NOBEAT_"
"ANNS"),
failcat="WARNING")
valrow3["XPATH"] = annsset_xmlnode_path + "/.." + \
codetype_path + "/" + rp
if valrow3["VALIOUT"] == "PASSED":
if n != "":
ann[n] = valrow3["VALUE"]
else:
ann["value"] = valrow3["VALUE"]
else:
roi_base = "../component/annotation/support/"\
"supportingROI/component/boundary"
# Annotations type
valrow2 = validate_xpath(subsubannsnode,
"../component/annotation/"
"value",
"urn:hl7-org:v3",
"code",
new_validation_row(
xml_filename,
valgroup,
"ANNSET_NOBEAT_ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/.." + \
codetype_path + "/" + \
"../component/annotation/value"
if valrow2["VALIOUT"] == "PASSED":
ann["wavecomponent2"] = valrow2["VALUE"]
if log_validation:
valpd = valpd.append(
| pd.DataFrame([valrow2], columns=VALICOLS) | pandas.DataFrame |
import matplotlib
matplotlib.use('TkAgg') # noqa
import matplotlib.pyplot as plt
from matplotlib.gridspec import GridSpec
from matplotlib.colors import LinearSegmentedColormap
import matplotlib.cm as cm
import matplotlib.colors as mcolors
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
import cmocean
import numpy as np
import os
import ast
import pickle
import pandas as pd
from collections import defaultdict
from oggm import workflow, cfg, tasks, utils
from oggm.core.flowline import FileModel
from oggm.graphics import plot_centerlines
from relic.postprocessing import (mae_weighted, optimize_cov, calc_coverage,
get_ensemble_length, get_rcp_ensemble_length)
from relic.preprocessing import name_plus_id, GLCDICT, MERGEDICT
def paramplots(df, glid, pout, y_len=None):
# take care of merged glaciers
rgi_id = glid.split('_')[0]
fig1, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=[20, 7])
allvars = ['prcp_scaling_factor', 'mbbias', 'glena_factor']
varcols = {'mbbias': np.array([-1400, -1200, -1000, -800, -600, -400, -200,
-100, 0, 100, 200, 400, 600, 800, 1000]),
'prcp_scaling_factor': np.arange(0.5, 4.1, 0.25),
'glena_factor': np.arange(1, 4.1, 0.5)}
for var, ax in zip(allvars, [ax1, ax2, ax3]):
notvars = allvars.copy()
notvars.remove(var)
# lets use OGGM HISTALP default
papar = {'glena_factor': 1.0, 'mbbias': 0, 'prcp_scaling_factor': 1.75}
# store specific runs
dfvar = pd.DataFrame([], columns=varcols[var], index=df.index)
# OGGM standard
for run in df.columns:
if run == 'obs':
continue
para = ast.literal_eval('{' + run + '}')
if ((np.isclose(para[notvars[0]],
papar[notvars[0]], atol=0.01)) and
(np.isclose(para[notvars[1]],
papar[notvars[1]], atol=0.01))):
dfvar.loc[:, para[var]] = df.loc[:, run]
if var == 'prcp_scaling_factor':
lbl = 'Precip scaling factor'
cmap = LinearSegmentedColormap('lala', cmocean.tools.get_dict(
cmocean.cm.deep))
normalize = mcolors.Normalize(vmin=0,
vmax=4.5)
bounds = np.arange(0.375, 4.2, 0.25)
cbarticks = np.arange(1, 4.1, 1)
elif var == 'glena_factor':
lbl = 'Glen A factor'
cmap = LinearSegmentedColormap('lala', cmocean.tools.get_dict(
cmocean.cm.matter))
normalize = mcolors.Normalize(vmin=0,
vmax=4.5)
bounds = np.arange(0.75, 4.3, 0.5)
cbarticks = np.arange(1, 4.1, 1)
elif var == 'mbbias':
cmap = LinearSegmentedColormap('lala', cmocean.tools.get_dict(
cmocean.cm.balance))
cmaplist = [cmap(i) for i in range(cmap.N)]
cmaplist[128] = (0.412, 0.847, 0.655, 1.0)
cmap = mcolors.LinearSegmentedColormap.from_list('mcm', cmaplist,
cmap.N)
cbarticks = np.array([-1400, -1000, -600, -200,
0, 200, 600, 1000])
bounds = np.array([-1500, -1300, -1100, -900, -700, -500, -300,
-150, -50, 50, 100, 300, 500, 700, 900, 1100])
normalize = mcolors.Normalize(vmin=-1600,
vmax=1600)
lbl = 'MB bias [mm w.e.]'
colors = [cmap(normalize(n)) for n in varcols[var]]
scalarmappaple = cm.ScalarMappable(norm=normalize, cmap=cmap)
cbaxes = inset_axes(ax, width="3%", height="40%", loc=3)
cbar = plt.colorbar(scalarmappaple, cax=cbaxes,
label=lbl,
boundaries=bounds)
cbar.set_ticks(cbarticks)
cbaxes.tick_params(axis='both', which='major', labelsize=16)
cbar.set_label(label=lbl, size=16)
# plot observations
df.loc[:, 'obs'].rolling(1, min_periods=1).mean(). \
plot(ax=ax, color='k', style='.',
marker='o', label='Observed length change',
markersize=6)
dfvar = dfvar.sort_index(axis=1)
# default parameter column
dc = np.where(dfvar.columns == papar[var])[0][0]
dfvar.loc[:, varcols[var][dc]].rolling(y_len, center=True).mean(). \
plot(ax=ax, color=colors[dc], linewidth=5,
label='{}: {} (OGGM default)'.
format(lbl, str(varcols[var][dc])))
# all parameters
nolbl = ['' for i in np.arange(len(dfvar.columns))]
dfvar.columns = nolbl
dfvar.rolling(y_len, center=True).mean().plot(ax=ax, color=colors,
linewidth=2)
ax.set_xlabel('Year', fontsize=26)
ax.set_xlim([1850, 2010])
ax.set_ylim([-4000, 2000])
ax.tick_params(axis='both', which='major', labelsize=22)
if not ax == ax1:
ax.set_yticklabels([])
ax.grid(True)
ax.set_xticks(np.arange(1880, 2010, 40))
ax.legend(fontsize=16, loc=2)
ax1.set_ylabel('relative length change [m]', fontsize=26)
name = name_plus_id(rgi_id)
fig1.suptitle('%s' % name, fontsize=28)
fig1.subplots_adjust(left=0.09, right=0.99, bottom=0.12, top=0.89,
wspace=0.05)
fn1 = os.path.join(pout, 'calibration_%s.png' % glid)
fig1.savefig(fn1)
def past_simulation_and_params(glcdict, pout, y_len=5):
for glid, df in glcdict.items():
# take care of merged glaciers
rgi_id = glid.split('_')[0]
fig = plt.figure(figsize=[20, 7])
gs = GridSpec(1, 4) # 1 rows, 4 columns
ax1 = fig.add_subplot(gs[0, 0:3])
ax2 = fig.add_subplot(gs[0, 3])
df.loc[:, 'obs'].plot(ax=ax1, color='k', marker='o',
label='Observations')
# OGGM standard
for run in df.columns:
if run == 'obs':
continue
para = ast.literal_eval('{' + run + '}')
if ((np.abs(para['prcp_scaling_factor'] - 1.75) < 0.01) and
(para['mbbias'] == 0) and
(para['glena_factor'] == 1)):
df.loc[:, run].rolling(y_len, center=True). \
mean().plot(ax=ax1, linewidth=2, color='k',
label='OGGM default parameter run')
oggmdefault = run
maes = mae_weighted(df).sort_values()
idx2plot = optimize_cov(df.loc[:, maes.index[:150]],
df.loc[:, 'obs'], glid, minuse=5)
ensmean = df.loc[:, idx2plot].mean(axis=1)
ensmeanmean = ensmean.rolling(y_len, center=True).mean()
ensstdmean = df.loc[:, idx2plot].std(axis=1).rolling(y_len,
center=True).mean()
# coverage
cov = calc_coverage(df, idx2plot, df['obs'])
ax1.fill_between(ensmeanmean.index, ensmeanmean - ensstdmean,
ensmeanmean + ensstdmean, color='xkcd:teal', alpha=0.5)
# nolbl = df.loc[:, idx2plot2].rolling(y_len, center=True).mean().copy()
# nolbl.columns = ['' for i in range(len(nolbl.columns))]
#df.loc[:, idx2plot2].rolling(y_len, center=True).mean().plot(
# ax=ax1, linewidth=0.8)
# plot ens members
ensmeanmean.plot(ax=ax1, linewidth=4.0, color='xkcd:teal',
label='ensemble parameters runs')
# reference run (basically min mae)
df.loc[:, maes.index[0]].rolling(y_len, center=True).mean(). \
plot(ax=ax1, linewidth=3, color='xkcd:lavender',
label='minimum wMAE parameter run')
name = name_plus_id(rgi_id)
mae_ens = mae_weighted(pd.concat([ensmean, df['obs']], axis=1))[0]
mae_best = maes[0]
ax1.set_title('%s' % name, fontsize=28)
ax1.text(2030, -4900, 'wMAE ensemble mean = %.2f m\n'
'wMAE minimum run = %.2f m' %
(mae_ens, mae_best), fontsize=18,
horizontalalignment='right')
ax1.text(2040, -4900, '%d ensemble members\n'
'coverage = %.2f' %
(len(idx2plot), cov), fontsize=18)
ax1.set_ylabel('relative length change [m]', fontsize=26)
ax1.set_xlabel('Year', fontsize=26)
ax1.set_xlim([1850, 2020])
ax1.set_ylim([-3500, 1000])
ax1.tick_params(axis='both', which='major', labelsize=22)
ax1.grid(True)
ax1.legend(bbox_to_anchor=(-0.1, -0.15), loc='upper left',
fontsize=18, ncol=2)
# parameter plots
from colorspace import sequential_hcl
col = sequential_hcl('Blue-Yellow').colors(len(idx2plot) + 3)
for i, run in enumerate(idx2plot):
para = ast.literal_eval('{' + run + '}')
psf = para['prcp_scaling_factor']
gla = para['glena_factor']
mbb = para['mbbias']
mbb = (mbb - -1400) * (4-0.5) / (1000 - -1400) + 0.5
ax2.plot([1, 2, 3], [psf, gla, mbb], color=col[i], linewidth=2)
ax2.set_xlabel('calibration parameters', fontsize=18)
ax2.set_ylabel('Precipitation scaling factor\nGlen A factor',
fontsize=18)
ax2.set_xlim([0.8, 3.2])
ax2.set_ylim([0.3, 4.2])
ax2.set_xticks([1, 2, 3])
ax2.set_xticklabels(['Psf', 'GlenA', 'MB bias'], fontsize=16)
ax2.tick_params(axis='y', which='major', labelsize=16)
ax2.grid(True)
ax3 = ax2.twinx()
# scale to same y lims
scale = (4.2-0.3)/(4.0-0.5)
dy = (2400*scale-2400)/2
ax3.set_ylim([-1400-dy, 1000+dy])
ax3.set_ylabel('mass balance bias [m w.e. ]', fontsize=18)
ax3.set_yticks(np.arange(-1400, 1100, 400))
ax3.set_yticklabels(['-1.4', '-1.0', '-0.6', '-0.2',
'0.2', '0.6', '1.0'])
ax3.tick_params(axis='both', which='major', labelsize=16)
fig.subplots_adjust(left=0.08, right=0.95, bottom=0.24, top=0.93,
wspace=0.5)
fn1 = os.path.join(pout, 'histalp_%s.png' % glid)
fig.savefig(fn1)
used = dict()
used['oggmdefault'] = oggmdefault
used['minmae'] = idx2plot[0]
used['ensemble'] = idx2plot
pickle.dump(used, open(os.path.join(pout, 'runs_%s.p' % glid), 'wb'))
def past_simulation_and_commitment(rgi, allobs, allmeta, histalp_storage,
comit_storage, comit_storage_noseed,
pout, y_len=5, comyears=300):
cols = ['xkcd:teal',
'xkcd:orange',
'xkcd:azure',
'xkcd:tomato',
'xkcd:blue',
'xkcd:chartreuse',
'xkcd:green'
]
obs = allobs.loc[rgi.split('_')[0]]
meta = allmeta.loc[rgi.split('_')[0]]
fn99 = 'model_diagnostics_commitment1999_{:02d}.nc'
df99 = get_ensemble_length(rgi, histalp_storage, comit_storage, fn99, meta)
fn85 = 'model_diagnostics_commitment1885_{:02d}.nc'
df85 = get_ensemble_length(rgi, histalp_storage, comit_storage, fn85, meta)
fn70 = 'model_diagnostics_commitment1970_{:02d}.nc'
df70 = get_ensemble_length(rgi, histalp_storage, comit_storage, fn70, meta)
# plot
fig, ax1 = plt.subplots(1, figsize=[20, 7])
obs.plot(ax=ax1, color='k', marker='o',
label='Observations')
# past
ensmean = df99.mean(axis=1)
ensmeanmean = ensmean.rolling(y_len, center=True).mean()
ensstdmean = df99.std(axis=1).rolling(y_len, center=True).mean()
ax1.fill_between(ensmeanmean.loc[:2015].index,
ensmeanmean.loc[:2015] - ensstdmean.loc[:2015],
ensmeanmean.loc[:2015] + ensstdmean.loc[:2015],
color=cols[0], alpha=0.5)
ensmeanmean.loc[:2015].plot(ax=ax1, linewidth=4.0, color=cols[0],
label='HISTALP climate')
# dummy
ax1.plot(0, 0, 'w-', label=' ')
# 1999
ax1.fill_between(ensmeanmean.loc[2015:].index,
ensmeanmean.loc[2015:] - ensstdmean.loc[2015:],
ensmeanmean.loc[2015:] + ensstdmean.loc[2015:],
color=cols[1], alpha=0.5)
ensmeanmean.loc[2015:].plot(ax=ax1, linewidth=4.0, color=cols[1],
label='Random climate (1984-2014)')
# 1970
ensmean = df70.mean(axis=1)
ensmeanmean = ensmean.rolling(y_len, center=True).mean()
ensstdmean = df70.std(axis=1).rolling(y_len, center=True).mean()
ax1.fill_between(ensmeanmean.loc[2015:].index,
ensmeanmean.loc[2015:] - ensstdmean.loc[2015:],
ensmeanmean.loc[2015:] + ensstdmean.loc[2015:],
color=cols[5], alpha=0.5)
ensmeanmean.loc[2015:].plot(ax=ax1, linewidth=4.0, color=cols[5],
label='Random climate (1960-1980)')
# 1885
ensmean = df85.mean(axis=1)
ensmeanmean = ensmean.rolling(y_len, center=True).mean()
ensstdmean = df85.std(axis=1).rolling(y_len, center=True).mean()
ax1.fill_between(ensmeanmean.loc[2015:].index,
ensmeanmean.loc[2015:] - ensstdmean.loc[2015:],
ensmeanmean.loc[2015:] + ensstdmean.loc[2015:],
color=cols[2], alpha=0.5)
ensmeanmean.loc[2015:].plot(ax=ax1, linewidth=4.0, color=cols[2],
label='Random climate (1870-1900)')
# ---------------------------------------------------------------------
# plot commitment ensemble length
# 1984
efn99 = 'model_diagnostics_commitment1999_{:02d}.nc'
edf99 = get_ensemble_length(rgi, histalp_storage, comit_storage_noseed,
efn99, meta)
ensmean = edf99.mean(axis=1)
ensmeanmean = ensmean.rolling(y_len, center=True).mean()
ensstdmean = edf99.std(axis=1).rolling(y_len, center=True).mean()
postlength = ensmeanmean.dropna().iloc[-30:].mean()
poststd = ensstdmean.dropna().iloc[-30:].mean()
ax1.fill_between([2014+comyears+10, 2014+comyears+25],
postlength + poststd, postlength - poststd,
color=cols[3], alpha=0.5)
ax1.plot([2014+comyears+10.5, 2014+comyears+24.5], [postlength, postlength], linewidth=4.0,
color=cols[3],
label=('Random climate (1984-2014) '
'equlibrium length'))
# 1970
efn70 = 'model_diagnostics_commitment1970_{:02d}.nc'
edf70 = get_ensemble_length(rgi, histalp_storage, comit_storage_noseed,
efn70, meta)
ensmean = edf70.mean(axis=1)
ensmeanmean = ensmean.rolling(y_len, center=True).mean()
ensstdmean = edf70.std(axis=1).rolling(y_len, center=True).mean()
prelength = ensmeanmean.dropna().iloc[-30:].mean()
prestd = ensstdmean.dropna().iloc[-30:].mean()
ax1.fill_between([2014+comyears+10, 2014+comyears+25],
prelength + prestd, prelength - prestd,
color=cols[6], alpha=0.5)
ax1.plot([2014+comyears+10.5, 2014+comyears+24.5], [prelength, prelength],
linewidth=4.0,
color=cols[6],
label=('Random climate (1960-1980) '
'equlibrium length'))
# 1885
efn85 = 'model_diagnostics_commitment1885_{:02d}.nc'
edf85 = get_ensemble_length(rgi, histalp_storage, comit_storage_noseed,
efn85, meta)
ensmean = edf85.mean(axis=1)
ensmeanmean = ensmean.rolling(y_len, center=True).mean()
ensstdmean = edf85.std(axis=1).rolling(y_len, center=True).mean()
prelength = ensmeanmean.dropna().iloc[-30:].mean()
prestd = ensstdmean.dropna().iloc[-30:].mean()
ax1.fill_between([2014+comyears+10, 2014+comyears+25],
prelength + prestd, prelength - prestd,
color=cols[4], alpha=0.5)
ax1.plot([2014+comyears+10.5, 2014+comyears+24.5], [prelength, prelength],
linewidth=4.0,
color=cols[4],
label=('Random climate (1870-1900) '
'equlibrium length'))
# ---------------------------------------------------------------------
ylim = ax1.get_ylim()
#ax1.plot([2015, 2015], ylim, 'k-', linewidth=2)
ax1.set_xlim([1850, 2014+comyears+30])
#ax1.set_ylim(ylim)
ax2 = ax1.twinx()
ax2.set_ylabel('approximate\n absolute glacier length [m]', fontsize=26)
y1, y2 = get_absolute_length(ylim[0], ylim[1], rgi, df99, histalp_storage)
ax2.tick_params(axis='both', which='major', labelsize=22)
ax2.set_ylim([y1, y2])
name = name_plus_id(rgi)
ax1.set_title('%s' % name, fontsize=28)
ax1.set_ylabel('relative length change [m]', fontsize=26)
ax1.set_xlabel('Year', fontsize=26)
ax1.tick_params(axis='both', which='major', labelsize=22)
ax1.set_xticks([1850, 1950, 2014, 2114, 2214, 2314])
ax1.set_xticklabels(['1850', '1950', '2014/0', '100', '200', '300'])
ax1.grid(True)
ax1.legend(bbox_to_anchor=(-0.0, -0.17), loc='upper left', fontsize=18,
ncol=3)
fig.subplots_adjust(left=0.09, right=0.9, bottom=0.3, top=0.93,
wspace=0.5)
fn1 = os.path.join(pout, 'commit_%s.png' % rgi)
fig.savefig(fn1)
def past_simulation_and_projection(rgi, allobs, allmeta, histalp_storage,
proj_storage, comit_storage,
pout, y_len=5,):
cols = ['xkcd:teal',
'xkcd:azure',
'xkcd:lime',
'xkcd:orange',
'xkcd:magenta',
'xkcd:tomato',
'xkcd:blue',
'xkcd:green'
]
obs = allobs.loc[rgi.split('_')[0]]
meta = allmeta.loc[rgi.split('_')[0]]
dfall = pd.DataFrame([], index=np.arange(1850, 2101))
dfallstd = pd.DataFrame([], index=np.arange(1850, 2101))
for rcp in ['rcp26', 'rcp45', 'rcp60', 'rcp85']:
dfrcp = get_rcp_ensemble_length(rgi, histalp_storage, proj_storage,
rcp, meta)
ensmean = dfrcp.mean(axis=1)
dfall.loc[:, rcp] = ensmean.rolling(y_len, center=True).mean()
dfallstd.loc[:, rcp] = dfrcp.std(axis=1).\
rolling(y_len, center=True).mean()
# plot
fig, ax1 = plt.subplots(1, figsize=[20, 7])
obs.plot(ax=ax1, color='k', marker='o',
label='Observations')
# past
ax1.fill_between(dfall.loc[:2015, rcp].index,
dfall.loc[:2015, rcp] - dfallstd.loc[:2015, rcp],
dfall.loc[:2015, rcp] + dfallstd.loc[:2015, rcp],
color=cols[0], alpha=0.5)
dfall.loc[:2015, rcp].plot(ax=ax1, linewidth=4.0, color=cols[0],
label='HISTALP climate')
# dummy
ax1.plot(0, 0, 'w-', label=' ')
# projections
# rcp26
ax1.fill_between(dfall.loc[2015:, 'rcp26'].index,
dfall.loc[2015:, 'rcp26'] - dfallstd.loc[2015:, 'rcp26'],
dfall.loc[2015:, 'rcp26'] + dfallstd.loc[2015:, 'rcp26'],
color=cols[1], alpha=0.5)
dfall.loc[2015:, 'rcp26'].plot(ax=ax1, linewidth=4.0, color=cols[1],
label='RCP 2.6 climate')
# rcp45
dfall.loc[2015:, 'rcp45'].plot(ax=ax1, linewidth=4.0, color=cols[2],
label='RCP 4.5 climate')
# dummy
ax1.plot(0, 0, 'w-', label=' ')
# rcp60
dfall.loc[2015:, 'rcp60'].plot(ax=ax1, linewidth=4.0, color=cols[3],
label='RCP 6.0 climate')
# rcp85
ax1.fill_between(dfall.loc[2015:, 'rcp85'].index,
dfall.loc[2015:, 'rcp85'] - dfallstd.loc[2015:, 'rcp85'],
dfall.loc[2015:, 'rcp85'] + dfallstd.loc[2015:, 'rcp85'],
color=cols[4], alpha=0.5)
dfall.loc[2015:, 'rcp85'].plot(ax=ax1, linewidth=4.0, color=cols[4],
label='RCP 8.5 climate')
# dummy
ax1.plot(0, 0, 'w-', label=' ')
# plot commitment length
# 1984
fn99 = 'model_diagnostics_commitment1999_{:02d}.nc'
df99 = get_ensemble_length(rgi, histalp_storage, comit_storage, fn99, meta)
ensmean = df99.mean(axis=1)
ensmeanmean = ensmean.rolling(y_len, center=True).mean()
ensstdmean = df99.std(axis=1).rolling(y_len, center=True).mean()
postlength = ensmeanmean.dropna().iloc[-30:].mean()
poststd = ensstdmean.dropna().iloc[-30:].mean()
ax1.fill_between([2105, 2111],
postlength + poststd, postlength - poststd,
color=cols[5], alpha=0.5)
ax1.plot([2105.5, 2110.5], [postlength, postlength], linewidth=4.0,
color=cols[5],
label=('Random climate (1984-2014) '
'equilibrium length'))
# 1970
fn70 = 'model_diagnostics_commitment1970_{:02d}.nc'
df70 = get_ensemble_length(rgi, histalp_storage, comit_storage, fn70, meta)
ensmean = df70.mean(axis=1)
ensmeanmean = ensmean.rolling(y_len, center=True).mean()
ensstdmean = df70.std(axis=1).rolling(y_len, center=True).mean()
prelength = ensmeanmean.dropna().iloc[-30:].mean()
prestd = ensstdmean.dropna().iloc[-30:].mean()
ax1.fill_between([2105, 2111],
prelength + prestd, prelength - prestd,
color=cols[7], alpha=0.5)
ax1.plot([2105.5, 2110.5], [prelength, prelength], linewidth=4.0,
color=cols[7],
label=('Random climate (1960-1980) '
'equilibrium length'))
# 1885
fn85 = 'model_diagnostics_commitment1885_{:02d}.nc'
df85 = get_ensemble_length(rgi, histalp_storage, comit_storage, fn85, meta)
ensmean = df85.mean(axis=1)
ensmeanmean = ensmean.rolling(y_len, center=True).mean()
ensstdmean = df85.std(axis=1).rolling(y_len, center=True).mean()
prelength = ensmeanmean.dropna().iloc[-30:].mean()
prestd = ensstdmean.dropna().iloc[-30:].mean()
ax1.fill_between([2105, 2111],
prelength + prestd, prelength - prestd,
color=cols[6], alpha=0.5)
ax1.plot([2105.5, 2110.5], [prelength, prelength], linewidth=4.0,
color=cols[6],
label=('Random climate (1870-1900) '
'equilibrium length'))
ylim = ax1.get_ylim()
ax1.set_xlim([1850, 2112])
ax2 = ax1.twinx()
ax2.set_ylabel('apporixmate\n absolute glacier length [m]', fontsize=26)
y1, y2 = get_absolute_length(ylim[0], ylim[1], rgi, df99, histalp_storage)
ax2.tick_params(axis='both', which='major', labelsize=22)
ax2.set_ylim([y1, y2])
name = name_plus_id(rgi)
ax1.set_title('%s' % name, fontsize=28)
ax1.set_ylabel('relative length change [m]', fontsize=26)
ax1.set_xlabel('Year', fontsize=26)
ax1.tick_params(axis='both', which='major', labelsize=22)
ax1.grid(True)
ax1.legend(bbox_to_anchor=(0.0, -0.17), loc='upper left', fontsize=18,
ncol=4)
fig.subplots_adjust(left=0.09, right=0.9, bottom=0.3, top=0.93,
wspace=0.5)
fn1 = os.path.join(pout, 'proj_%s.png' % rgi)
fig.savefig(fn1)
def get_mean_temps_eq(rgi, histalp_storage, comit_storage, ensmembers):
from oggm import cfg, utils, GlacierDirectory
from oggm.core.massbalance import MultipleFlowlineMassBalance
from oggm.core.flowline import FileModel
import shutil
# 1. get mean surface heights
df85 = pd.DataFrame([])
df99 = pd.DataFrame([])
for i in range(ensmembers):
fnc1 = os.path.join(comit_storage, rgi,
'model_run_commitment1885_{:02d}.nc'.format(i))
fnc2 = os.path.join(comit_storage, rgi,
'model_run_commitment1999_{:02d}.nc'.format(i))
tmpmod1 = FileModel(fnc1)
tmpmod2 = FileModel(fnc2)
for j in np.arange(270, 301):
tmpmod1.run_until(j)
df85.loc[:, '{}{}'.format(i, j)] = tmpmod1.fls[-1].surface_h
tmpmod2.run_until(j)
df99.loc[:, '{}{}'.format(i, j)] = tmpmod2.fls[-1].surface_h
meanhgt99 = df99.mean(axis=1).values
meanhgt85 = df85.mean(axis=1).values
# 2. get the climate
# Initialize OGGM
cfg.initialize()
wd = utils.gettempdir(reset=True)
cfg.PATHS['working_dir'] = wd
utils.mkdir(wd, reset=True)
cfg.PARAMS['baseline_climate'] = 'HISTALP'
# and set standard histalp values
cfg.PARAMS['temp_melt'] = -1.75
i = 0
storage_dir = os.path.join(histalp_storage, rgi, '{:02d}'.format(i),
rgi[:8], rgi[:11], rgi)
new_dir = os.path.join(cfg.PATHS['working_dir'], 'per_glacier',
rgi[:8], rgi[:11], rgi)
shutil.copytree(storage_dir, new_dir)
gdir = GlacierDirectory(rgi)
mb = MultipleFlowlineMassBalance(gdir, filename='climate_monthly',
check_calib_params=False)
# need to do the above for every ensemble member if I consider PRECIP!
# and set cfg.PARAMS['prcp_scaling_factor'] = pdict['prcp_scaling_factor']
df99_2 = pd.DataFrame()
df85_2 = pd.DataFrame()
for i in np.arange(9, 12):
for y in np.arange(1870, 1901):
flyear = utils.date_to_floatyear(y, i)
tmp = mb.flowline_mb_models[-1].get_monthly_climate(meanhgt85,
flyear)[0]
df85_2.loc[y, i] = tmp.mean()
for y in np.arange(1984, 2015):
tmp = mb.flowline_mb_models[-1].get_monthly_climate(meanhgt99,
flyear)[0]
df99_2.loc[y, i] = tmp.mean()
t99 = df99_2.mean().mean()
t85 = df85_2.mean().mean()
return t85, t99
def get_mean_temps_2k(rgi, return_prcp):
from oggm import cfg, utils, workflow, tasks
from oggm.core.massbalance import PastMassBalance
# Initialize OGGM
cfg.initialize()
wd = utils.gettempdir(reset=True)
cfg.PATHS['working_dir'] = wd
utils.mkdir(wd, reset=True)
cfg.PARAMS['baseline_climate'] = 'HISTALP'
# and set standard histalp values
cfg.PARAMS['temp_melt'] = -1.75
cfg.PARAMS['prcp_scaling_factor'] = 1.75
gdir = workflow.init_glacier_regions(rgidf=rgi.split('_')[0],
from_prepro_level=3,
prepro_border=10)[0]
# run histalp climate on glacier!
tasks.process_histalp_data(gdir)
f = gdir.get_filepath('climate_historical')
with utils.ncDataset(f) as nc:
refhgt = nc.ref_hgt
mb = PastMassBalance(gdir, check_calib_params=False)
df = pd.DataFrame()
df2 = pd.DataFrame()
for y in np.arange(1870, 2015):
for i in np.arange(9, 12):
flyear = utils.date_to_floatyear(y, i)
tmp = mb.get_monthly_climate([refhgt], flyear)[0]
df.loc[y, i] = tmp.mean()
if return_prcp:
for i in np.arange(3, 6):
flyear = utils.date_to_floatyear(y, i)
pcp = mb.get_monthly_climate([refhgt], flyear)[3]
df2.loc[y, i] = tmp.mean()
t99 = df.loc[1984:2014, :].mean().mean()
t85 = df.loc[1870:1900, :].mean().mean()
t2k = df.loc[1900:2000, :].mean().mean()
if return_prcp:
p99 = df2.loc[1984:2014, :].mean().mean()
p85 = df2.loc[1870:1900, :].mean().mean()
p2k = df2.loc[1900:2000, :].mean().mean()
return t85, t99, t2k, p85, p99, p2k
return t85, t99, t2k
def get_absolute_length(y0, y1, rgi, df, storage):
rgipath = os.path.join(storage, rgi, '{:02d}'.format(0),
rgi[:8], rgi[:11], rgi)
mfile = os.path.join(rgipath, 'model_run_histalp_{:02d}.nc'.format(0))
tmpmod = FileModel(mfile)
absL = tmpmod.length_m
deltaL = df.loc[int(tmpmod.yr.values), 0]
abs_y0 = absL + (y0 - deltaL)
abs_y1 = absL + (y1 - deltaL)
return abs_y0, abs_y1
def elevation_profiles(rgi, meta, histalp_storage, pout):
name = name_plus_id(rgi)
df1850 = pd.DataFrame()
df2003 = | pd.DataFrame() | pandas.DataFrame |
# https://quantdare.com/risk-parity-in-python/
import pandas as pd
import pandas_datareader.data as web
import numpy as np
import datetime
from scipy.optimize import minimize
TOLERANCE = 1e-10
def _allocation_risk(weights, covariances):
# We calculate the risk of the weights distribution
portfolio_risk = np.sqrt((weights * covariances * weights.T))[0, 0]
# It returns the risk of the weights distribution
return portfolio_risk
def _assets_risk_contribution_to_allocation_risk(weights, covariances):
# We calculate the risk of the weights distribution
portfolio_risk = _allocation_risk(weights, covariances)
# We calculate the contribution of each asset to the risk of the weights
# distribution
assets_risk_contribution = np.multiply(weights.T, covariances * weights.T) \
/ portfolio_risk
# It returns the contribution of each asset to the risk of the weights
# distribution
return assets_risk_contribution
def _risk_budget_objective_error(weights, args):
# The covariance matrix occupies the first position in the variable
covariances = args[0]
# The desired contribution of each asset to the portfolio risk occupies the
# second position
assets_risk_budget = args[1]
# We convert the weights to a matrix
weights = np.matrix(weights)
# We calculate the risk of the weights distribution
portfolio_risk = _allocation_risk(weights, covariances)
# We calculate the contribution of each asset to the risk of the weights
# distribution
assets_risk_contribution = \
_assets_risk_contribution_to_allocation_risk(weights, covariances)
# We calculate the desired contribution of each asset to the risk of the
# weights distribution
assets_risk_target = \
np.asmatrix(np.multiply(portfolio_risk, assets_risk_budget))
# Error between the desired contribution and the calculated contribution of
# each asset
error = \
sum(np.square(assets_risk_contribution - assets_risk_target.T))[0, 0]
# It returns the calculated error
return error
def _get_risk_parity_weights(covariances, assets_risk_budget, initial_weights):
# Restrictions to consider in the optimisation: only long positions whose
# sum equals 100%
constraints = ({'type': 'eq', 'fun': lambda x: np.sum(x) - 1.0},
{'type': 'ineq', 'fun': lambda x: x})
# Optimisation process in scipy
optimize_result = minimize(fun=_risk_budget_objective_error,
x0=initial_weights,
args=[covariances, assets_risk_budget],
method='SLSQP',
constraints=constraints,
tol=TOLERANCE,
options={'disp': False})
# Recover the weights from the optimised object
weights = optimize_result.x
# It returns the optimised weights
return weights
def get_weights(yahoo_tickers=['GOOGL', 'AAPL', 'AMZN'],
start_date=datetime.datetime(2016, 10, 31),
end_date=datetime.datetime(2017, 10, 31)):
# We download the prices from Yahoo Finance
prices = pd.DataFrame([web.DataReader(t,
'yahoo',
start_date,
end_date).loc[:, 'Adj Close']
for t in yahoo_tickers],
index=yahoo_tickers).T.asfreq('B').ffill()
# We calculate the covariance matrix
covariances = 52.0 * \
prices.asfreq('W-FRI').pct_change().iloc[1:, :].cov().values
# The desired contribution of each asset to the portfolio risk: we want all
# asset to contribute equally
assets_risk_budget = [1 / prices.shape[1]] * prices.shape[1]
# Initial weights: equally weighted
init_weights = [1 / prices.shape[1]] * prices.shape[1]
# Optimisation process of weights
weights = \
_get_risk_parity_weights(covariances, assets_risk_budget, init_weights)
# Convert the weights to a pandas Series
weights = | pd.Series(weights, index=prices.columns, name='weight') | pandas.Series |
import warnings
import pytest
import pandas as pd
from mssql_dataframe.connect import connect
from mssql_dataframe.core import custom_errors, create, read
from mssql_dataframe.core.write import insert
pd.options.mode.chained_assignment = "raise"
table_name = "##test_select"
class package:
def __init__(self, connection):
self.connection = connection.connection
self.create = create.create(self.connection)
self.insert = insert.insert(self.connection, include_metadata_timestamps=False, autoadjust_sql_objects=False)
self.read = read.read(self.connection)
@pytest.fixture(scope="session")
def sql():
db = connect(database="tempdb", server="localhost")
yield package(db)
db.connection.close()
@pytest.fixture(scope="session")
def sample(sql):
# create table and insert sample data
sql.create.table(
table_name,
columns={
"ColumnA": "TINYINT",
"ColumnB": "INT",
"ColumnC": "BIGINT",
"ColumnD": "DATE",
"ColumnE": "VARCHAR(1)",
"ColumnF": "VARCHAR(3)",
},
primary_key_column=["ColumnA", "ColumnF"],
)
dataframe = pd.DataFrame(
{
"ColumnA": [5, 6, 7],
"ColumnB": [5, 6, None],
"ColumnC": [pd.NA, 6, 7],
"ColumnD": ["06-22-2021", "06-22-2021", pd.NaT],
"ColumnE": ["a", "b", None],
"ColumnF": ["xxx", "yyy", "zzz"],
}
).set_index(keys=["ColumnA", "ColumnF"])
dataframe["ColumnB"] = dataframe["ColumnB"].astype("Int64")
dataframe["ColumnD"] = | pd.to_datetime(dataframe["ColumnD"]) | pandas.to_datetime |
import os
import tempfile
import numpy as np
import pandas as pd
import datetime as dt
if __name__ == "__main__":
base_dir = "/opt/ml/processing"
#Read Data
df = pd.read_csv(
f"{base_dir}/input/storedata_total.csv"
)
# convert created column to datetime
df["created"] = pd.to_datetime(df["created"])
#Convert firstorder and lastorder to datetime datatype
df["firstorder"] = | pd.to_datetime(df["firstorder"],errors='coerce') | pandas.to_datetime |
# -*- coding: utf-8 -*-
"""
docstring goes here.
:copyright: Copyright 2014 by the Elephant team, see AUTHORS.txt.
:license: Modified BSD, see LICENSE.txt for details.
"""
from __future__ import division, print_function
import unittest
from itertools import chain
from neo.test.generate_datasets import fake_neo
import numpy as np
from numpy.testing.utils import assert_array_equal
import quantities as pq
try:
import pandas as pd
from pandas.util.testing import assert_frame_equal, assert_index_equal
except ImportError:
HAVE_PANDAS = False
else:
import elephant.pandas_bridge as ep
HAVE_PANDAS = True
@unittest.skipUnless(HAVE_PANDAS, 'requires pandas')
class MultiindexFromDictTestCase(unittest.TestCase):
def test__multiindex_from_dict(self):
inds = {'test1': 6.5,
'test2': 5,
'test3': 'test'}
targ = pd.MultiIndex(levels=[[6.5], [5], ['test']],
labels=[[0], [0], [0]],
names=['test1', 'test2', 'test3'])
res0 = ep._multiindex_from_dict(inds)
self.assertEqual(targ.levels, res0.levels)
self.assertEqual(targ.names, res0.names)
self.assertEqual(targ.labels, res0.labels)
def _convert_levels(levels):
"""Convert a list of levels to the format pandas returns for a MultiIndex.
Parameters
----------
levels : list
The list of levels to convert.
Returns
-------
list
The the level in `list` converted to values like what pandas will give.
"""
levels = list(levels)
for i, level in enumerate(levels):
if hasattr(level, 'lower'):
try:
level = unicode(level)
except NameError:
pass
elif hasattr(level, 'date'):
levels[i] = pd.DatetimeIndex(data=[level])
continue
elif level is None:
levels[i] = pd.Index([])
continue
levels[i] = pd.Index([level])
return levels
@unittest.skipUnless(HAVE_PANDAS, 'requires pandas')
class ConvertValueSafeTestCase(unittest.TestCase):
def test__convert_value_safe__float(self):
targ = 5.5
value = targ
res = ep._convert_value_safe(value)
self.assertIs(res, targ)
def test__convert_value_safe__str(self):
targ = 'test'
value = targ
res = ep._convert_value_safe(value)
self.assertIs(res, targ)
def test__convert_value_safe__bytes(self):
targ = 'test'
value = b'test'
res = ep._convert_value_safe(value)
self.assertEqual(res, targ)
def test__convert_value_safe__numpy_int_scalar(self):
targ = 5
value = np.array(5)
res = ep._convert_value_safe(value)
self.assertEqual(res, targ)
self.assertFalse(hasattr(res, 'dtype'))
def test__convert_value_safe__numpy_float_scalar(self):
targ = 5.
value = np.array(5.)
res = ep._convert_value_safe(value)
self.assertEqual(res, targ)
self.assertFalse(hasattr(res, 'dtype'))
def test__convert_value_safe__numpy_unicode_scalar(self):
targ = u'test'
value = np.array('test', dtype='U')
res = ep._convert_value_safe(value)
self.assertEqual(res, targ)
self.assertFalse(hasattr(res, 'dtype'))
def test__convert_value_safe__numpy_str_scalar(self):
targ = u'test'
value = np.array('test', dtype='S')
res = ep._convert_value_safe(value)
self.assertEqual(res, targ)
self.assertFalse(hasattr(res, 'dtype'))
def test__convert_value_safe__quantity_scalar(self):
targ = (10., 'ms')
value = 10. * pq.ms
res = ep._convert_value_safe(value)
self.assertEqual(res, targ)
self.assertFalse(hasattr(res[0], 'dtype'))
self.assertFalse(hasattr(res[0], 'units'))
@unittest.skipUnless(HAVE_PANDAS, 'requires pandas')
class SpiketrainToDataframeTestCase(unittest.TestCase):
def test__spiketrain_to_dataframe__parents_empty(self):
obj = fake_neo('SpikeTrain', seed=0)
res0 = ep.spiketrain_to_dataframe(obj)
res1 = ep.spiketrain_to_dataframe(obj, child_first=True)
res2 = ep.spiketrain_to_dataframe(obj, child_first=False)
res3 = ep.spiketrain_to_dataframe(obj, parents=True)
res4 = ep.spiketrain_to_dataframe(obj, parents=True,
child_first=True)
res5 = ep.spiketrain_to_dataframe(obj, parents=True,
child_first=False)
res6 = ep.spiketrain_to_dataframe(obj, parents=False)
res7 = ep.spiketrain_to_dataframe(obj, parents=False, child_first=True)
res8 = ep.spiketrain_to_dataframe(obj, parents=False,
child_first=False)
targvalues = pq.Quantity(obj.magnitude, units=obj.units)
targvalues = targvalues.rescale('s').magnitude[np.newaxis].T
targindex = np.arange(len(targvalues))
attrs = ep._extract_neo_attrs_safe(obj, parents=True, child_first=True)
keys, values = zip(*sorted(attrs.items()))
values = _convert_levels(values)
self.assertEqual(1, len(res0.columns))
self.assertEqual(1, len(res1.columns))
self.assertEqual(1, len(res2.columns))
self.assertEqual(1, len(res3.columns))
self.assertEqual(1, len(res4.columns))
self.assertEqual(1, len(res5.columns))
self.assertEqual(1, len(res6.columns))
self.assertEqual(1, len(res7.columns))
self.assertEqual(1, len(res8.columns))
self.assertEqual(len(obj), len(res0.index))
self.assertEqual(len(obj), len(res1.index))
self.assertEqual(len(obj), len(res2.index))
self.assertEqual(len(obj), len(res3.index))
self.assertEqual(len(obj), len(res4.index))
self.assertEqual(len(obj), len(res5.index))
self.assertEqual(len(obj), len(res6.index))
self.assertEqual(len(obj), len(res7.index))
self.assertEqual(len(obj), len(res8.index))
assert_array_equal(targvalues, res0.values)
assert_array_equal(targvalues, res1.values)
assert_array_equal(targvalues, res2.values)
assert_array_equal(targvalues, res3.values)
assert_array_equal(targvalues, res4.values)
assert_array_equal(targvalues, res5.values)
assert_array_equal(targvalues, res6.values)
assert_array_equal(targvalues, res7.values)
assert_array_equal(targvalues, res8.values)
assert_array_equal(targindex, res0.index)
assert_array_equal(targindex, res1.index)
assert_array_equal(targindex, res2.index)
assert_array_equal(targindex, res3.index)
assert_array_equal(targindex, res4.index)
assert_array_equal(targindex, res5.index)
assert_array_equal(targindex, res6.index)
assert_array_equal(targindex, res7.index)
assert_array_equal(targindex, res8.index)
self.assertEqual(['spike_number'], res0.index.names)
self.assertEqual(['spike_number'], res1.index.names)
self.assertEqual(['spike_number'], res2.index.names)
self.assertEqual(['spike_number'], res3.index.names)
self.assertEqual(['spike_number'], res4.index.names)
self.assertEqual(['spike_number'], res5.index.names)
self.assertEqual(['spike_number'], res6.index.names)
self.assertEqual(['spike_number'], res7.index.names)
self.assertEqual(['spike_number'], res8.index.names)
self.assertEqual(keys, res0.columns.names)
self.assertEqual(keys, res1.columns.names)
self.assertEqual(keys, res2.columns.names)
self.assertEqual(keys, res3.columns.names)
self.assertEqual(keys, res4.columns.names)
self.assertEqual(keys, res5.columns.names)
self.assertEqual(keys, res6.columns.names)
self.assertEqual(keys, res7.columns.names)
self.assertEqual(keys, res8.columns.names)
for value, level in zip(values, res0.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res1.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res2.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res3.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res4.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res5.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res6.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res7.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res8.columns.levels):
assert_index_equal(value, level)
def test__spiketrain_to_dataframe__noparents(self):
blk = fake_neo('Block', seed=0)
obj = blk.list_children_by_class('SpikeTrain')[0]
res0 = ep.spiketrain_to_dataframe(obj, parents=False)
res1 = ep.spiketrain_to_dataframe(obj, parents=False,
child_first=True)
res2 = ep.spiketrain_to_dataframe(obj, parents=False,
child_first=False)
targvalues = pq.Quantity(obj.magnitude, units=obj.units)
targvalues = targvalues.rescale('s').magnitude[np.newaxis].T
targindex = np.arange(len(targvalues))
attrs = ep._extract_neo_attrs_safe(obj, parents=False,
child_first=True)
keys, values = zip(*sorted(attrs.items()))
values = _convert_levels(values)
self.assertEqual(1, len(res0.columns))
self.assertEqual(1, len(res1.columns))
self.assertEqual(1, len(res2.columns))
self.assertEqual(len(obj), len(res0.index))
self.assertEqual(len(obj), len(res1.index))
self.assertEqual(len(obj), len(res2.index))
assert_array_equal(targvalues, res0.values)
assert_array_equal(targvalues, res1.values)
assert_array_equal(targvalues, res2.values)
assert_array_equal(targindex, res0.index)
assert_array_equal(targindex, res1.index)
assert_array_equal(targindex, res2.index)
self.assertEqual(['spike_number'], res0.index.names)
self.assertEqual(['spike_number'], res1.index.names)
self.assertEqual(['spike_number'], res2.index.names)
self.assertEqual(keys, res0.columns.names)
self.assertEqual(keys, res1.columns.names)
self.assertEqual(keys, res2.columns.names)
for value, level in zip(values, res0.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res1.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res2.columns.levels):
assert_index_equal(value, level)
def test__spiketrain_to_dataframe__parents_childfirst(self):
blk = fake_neo('Block', seed=0)
obj = blk.list_children_by_class('SpikeTrain')[0]
res0 = ep.spiketrain_to_dataframe(obj)
res1 = ep.spiketrain_to_dataframe(obj, child_first=True)
res2 = ep.spiketrain_to_dataframe(obj, parents=True)
res3 = ep.spiketrain_to_dataframe(obj, parents=True, child_first=True)
targvalues = pq.Quantity(obj.magnitude, units=obj.units)
targvalues = targvalues.rescale('s').magnitude[np.newaxis].T
targindex = np.arange(len(targvalues))
attrs = ep._extract_neo_attrs_safe(obj, parents=True, child_first=True)
keys, values = zip(*sorted(attrs.items()))
values = _convert_levels(values)
self.assertEqual(1, len(res0.columns))
self.assertEqual(1, len(res1.columns))
self.assertEqual(1, len(res2.columns))
self.assertEqual(1, len(res3.columns))
self.assertEqual(len(obj), len(res0.index))
self.assertEqual(len(obj), len(res1.index))
self.assertEqual(len(obj), len(res2.index))
self.assertEqual(len(obj), len(res3.index))
assert_array_equal(targvalues, res0.values)
assert_array_equal(targvalues, res1.values)
assert_array_equal(targvalues, res2.values)
assert_array_equal(targvalues, res3.values)
assert_array_equal(targindex, res0.index)
assert_array_equal(targindex, res1.index)
assert_array_equal(targindex, res2.index)
assert_array_equal(targindex, res3.index)
self.assertEqual(['spike_number'], res0.index.names)
self.assertEqual(['spike_number'], res1.index.names)
self.assertEqual(['spike_number'], res2.index.names)
self.assertEqual(['spike_number'], res3.index.names)
self.assertEqual(keys, res0.columns.names)
self.assertEqual(keys, res1.columns.names)
self.assertEqual(keys, res2.columns.names)
self.assertEqual(keys, res3.columns.names)
for value, level in zip(values, res0.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res1.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res2.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res3.columns.levels):
assert_index_equal(value, level)
def test__spiketrain_to_dataframe__parents_parentfirst(self):
blk = fake_neo('Block', seed=0)
obj = blk.list_children_by_class('SpikeTrain')[0]
res0 = ep.spiketrain_to_dataframe(obj, child_first=False)
res1 = ep.spiketrain_to_dataframe(obj, parents=True, child_first=False)
targvalues = pq.Quantity(obj.magnitude, units=obj.units)
targvalues = targvalues.rescale('s').magnitude[np.newaxis].T
targindex = np.arange(len(targvalues))
attrs = ep._extract_neo_attrs_safe(obj, parents=True,
child_first=False)
keys, values = zip(*sorted(attrs.items()))
values = _convert_levels(values)
self.assertEqual(1, len(res0.columns))
self.assertEqual(1, len(res1.columns))
self.assertEqual(len(obj), len(res0.index))
self.assertEqual(len(obj), len(res1.index))
assert_array_equal(targvalues, res0.values)
assert_array_equal(targvalues, res1.values)
assert_array_equal(targindex, res0.index)
assert_array_equal(targindex, res1.index)
self.assertEqual(['spike_number'], res0.index.names)
self.assertEqual(['spike_number'], res1.index.names)
self.assertEqual(keys, res0.columns.names)
self.assertEqual(keys, res1.columns.names)
for value, level in zip(values, res0.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res1.columns.levels):
assert_index_equal(value, level)
@unittest.skipUnless(HAVE_PANDAS, 'requires pandas')
class EventToDataframeTestCase(unittest.TestCase):
def test__event_to_dataframe__parents_empty(self):
obj = fake_neo('Event', seed=42)
res0 = ep.event_to_dataframe(obj)
res1 = ep.event_to_dataframe(obj, child_first=True)
res2 = ep.event_to_dataframe(obj, child_first=False)
res3 = ep.event_to_dataframe(obj, parents=True)
res4 = ep.event_to_dataframe(obj, parents=True, child_first=True)
res5 = ep.event_to_dataframe(obj, parents=True, child_first=False)
res6 = ep.event_to_dataframe(obj, parents=False)
res7 = ep.event_to_dataframe(obj, parents=False, child_first=True)
res8 = ep.event_to_dataframe(obj, parents=False, child_first=False)
targvalues = obj.labels[:len(obj.times)][np.newaxis].T.astype('U')
targindex = obj.times[:len(obj.labels)].rescale('s').magnitude
attrs = ep._extract_neo_attrs_safe(obj, parents=True, child_first=True)
keys, values = zip(*sorted(attrs.items()))
values = _convert_levels(values)
self.assertEqual(1, len(res0.columns))
self.assertEqual(1, len(res1.columns))
self.assertEqual(1, len(res2.columns))
self.assertEqual(1, len(res3.columns))
self.assertEqual(1, len(res4.columns))
self.assertEqual(1, len(res5.columns))
self.assertEqual(1, len(res6.columns))
self.assertEqual(1, len(res7.columns))
self.assertEqual(1, len(res8.columns))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res0.index))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res1.index))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res2.index))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res3.index))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res4.index))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res5.index))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res6.index))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res7.index))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res8.index))
assert_array_equal(targvalues, res0.values)
assert_array_equal(targvalues, res1.values)
assert_array_equal(targvalues, res2.values)
assert_array_equal(targvalues, res3.values)
assert_array_equal(targvalues, res4.values)
assert_array_equal(targvalues, res5.values)
assert_array_equal(targvalues, res6.values)
assert_array_equal(targvalues, res7.values)
assert_array_equal(targvalues, res8.values)
assert_array_equal(targindex, res0.index)
assert_array_equal(targindex, res1.index)
assert_array_equal(targindex, res2.index)
assert_array_equal(targindex, res3.index)
assert_array_equal(targindex, res4.index)
assert_array_equal(targindex, res5.index)
assert_array_equal(targindex, res6.index)
assert_array_equal(targindex, res7.index)
assert_array_equal(targindex, res8.index)
self.assertEqual(['times'], res0.index.names)
self.assertEqual(['times'], res1.index.names)
self.assertEqual(['times'], res2.index.names)
self.assertEqual(['times'], res3.index.names)
self.assertEqual(['times'], res4.index.names)
self.assertEqual(['times'], res5.index.names)
self.assertEqual(['times'], res6.index.names)
self.assertEqual(['times'], res7.index.names)
self.assertEqual(['times'], res8.index.names)
self.assertEqual(keys, res0.columns.names)
self.assertEqual(keys, res1.columns.names)
self.assertEqual(keys, res2.columns.names)
self.assertEqual(keys, res3.columns.names)
self.assertEqual(keys, res4.columns.names)
self.assertEqual(keys, res5.columns.names)
self.assertEqual(keys, res6.columns.names)
self.assertEqual(keys, res7.columns.names)
self.assertEqual(keys, res8.columns.names)
for value, level in zip(values, res0.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res1.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res2.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res3.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res4.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res5.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res6.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res7.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res8.columns.levels):
assert_index_equal(value, level)
def test__event_to_dataframe__noparents(self):
blk = fake_neo('Block', seed=42)
obj = blk.list_children_by_class('Event')[0]
res0 = ep.event_to_dataframe(obj, parents=False)
res1 = ep.event_to_dataframe(obj, parents=False, child_first=False)
res2 = ep.event_to_dataframe(obj, parents=False, child_first=True)
targvalues = obj.labels[:len(obj.times)][np.newaxis].T.astype('U')
targindex = obj.times[:len(obj.labels)].rescale('s').magnitude
attrs = ep._extract_neo_attrs_safe(obj, parents=False,
child_first=True)
keys, values = zip(*sorted(attrs.items()))
values = _convert_levels(values)
self.assertEqual(1, len(res0.columns))
self.assertEqual(1, len(res1.columns))
self.assertEqual(1, len(res2.columns))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res0.index))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res1.index))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res2.index))
assert_array_equal(targvalues, res0.values)
assert_array_equal(targvalues, res1.values)
assert_array_equal(targvalues, res2.values)
assert_array_equal(targindex, res0.index)
assert_array_equal(targindex, res1.index)
assert_array_equal(targindex, res2.index)
self.assertEqual(['times'], res0.index.names)
self.assertEqual(['times'], res1.index.names)
self.assertEqual(['times'], res2.index.names)
self.assertEqual(keys, res0.columns.names)
self.assertEqual(keys, res1.columns.names)
self.assertEqual(keys, res2.columns.names)
for value, level in zip(values, res0.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res1.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res2.columns.levels):
assert_index_equal(value, level)
def test__event_to_dataframe__parents_childfirst(self):
blk = fake_neo('Block', seed=42)
obj = blk.list_children_by_class('Event')[0]
res0 = ep.event_to_dataframe(obj)
res1 = ep.event_to_dataframe(obj, child_first=True)
res2 = ep.event_to_dataframe(obj, parents=True)
res3 = ep.event_to_dataframe(obj, parents=True, child_first=True)
targvalues = obj.labels[:len(obj.times)][np.newaxis].T.astype('U')
targindex = obj.times[:len(obj.labels)].rescale('s').magnitude
attrs = ep._extract_neo_attrs_safe(obj, parents=True, child_first=True)
keys, values = zip(*sorted(attrs.items()))
values = _convert_levels(values)
self.assertEqual(1, len(res0.columns))
self.assertEqual(1, len(res1.columns))
self.assertEqual(1, len(res2.columns))
self.assertEqual(1, len(res3.columns))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res0.index))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res1.index))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res2.index))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res3.index))
assert_array_equal(targvalues, res0.values)
assert_array_equal(targvalues, res1.values)
assert_array_equal(targvalues, res2.values)
assert_array_equal(targvalues, res3.values)
assert_array_equal(targindex, res0.index)
assert_array_equal(targindex, res1.index)
assert_array_equal(targindex, res2.index)
assert_array_equal(targindex, res3.index)
self.assertEqual(['times'], res0.index.names)
self.assertEqual(['times'], res1.index.names)
self.assertEqual(['times'], res2.index.names)
self.assertEqual(['times'], res3.index.names)
self.assertEqual(keys, res0.columns.names)
self.assertEqual(keys, res1.columns.names)
self.assertEqual(keys, res2.columns.names)
self.assertEqual(keys, res3.columns.names)
for value, level in zip(values, res0.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res1.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res2.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res3.columns.levels):
assert_index_equal(value, level)
def test__event_to_dataframe__parents_parentfirst(self):
blk = fake_neo('Block', seed=42)
obj = blk.list_children_by_class('Event')[0]
res0 = ep.event_to_dataframe(obj, child_first=False)
res1 = ep.event_to_dataframe(obj, parents=True, child_first=False)
targvalues = obj.labels[:len(obj.times)][np.newaxis].T.astype('U')
targindex = obj.times[:len(obj.labels)].rescale('s').magnitude
attrs = ep._extract_neo_attrs_safe(obj, parents=True,
child_first=False)
keys, values = zip(*sorted(attrs.items()))
values = _convert_levels(values)
self.assertEqual(1, len(res0.columns))
self.assertEqual(1, len(res1.columns))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res0.index))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res1.index))
assert_array_equal(targvalues, res0.values)
assert_array_equal(targvalues, res1.values)
assert_array_equal(targindex, res0.index)
assert_array_equal(targindex, res1.index)
self.assertEqual(['times'], res0.index.names)
self.assertEqual(['times'], res1.index.names)
self.assertEqual(keys, res0.columns.names)
self.assertEqual(keys, res1.columns.names)
for value, level in zip(values, res0.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res1.columns.levels):
assert_index_equal(value, level)
@unittest.skipUnless(HAVE_PANDAS, 'requires pandas')
class EpochToDataframeTestCase(unittest.TestCase):
def test__epoch_to_dataframe__parents_empty(self):
obj = fake_neo('Epoch', seed=42)
res0 = ep.epoch_to_dataframe(obj)
res1 = ep.epoch_to_dataframe(obj, child_first=True)
res2 = ep.epoch_to_dataframe(obj, child_first=False)
res3 = ep.epoch_to_dataframe(obj, parents=True)
res4 = ep.epoch_to_dataframe(obj, parents=True, child_first=True)
res5 = ep.epoch_to_dataframe(obj, parents=True, child_first=False)
res6 = ep.epoch_to_dataframe(obj, parents=False)
res7 = ep.epoch_to_dataframe(obj, parents=False, child_first=True)
res8 = ep.epoch_to_dataframe(obj, parents=False, child_first=False)
minlen = min([len(obj.times), len(obj.durations), len(obj.labels)])
targvalues = obj.labels[:minlen][np.newaxis].T.astype('U')
targindex = np.vstack([obj.durations[:minlen].rescale('s').magnitude,
obj.times[:minlen].rescale('s').magnitude])
targvalues = targvalues[targindex.argsort()[0], :]
targindex.sort()
attrs = ep._extract_neo_attrs_safe(obj, parents=True,
child_first=True)
keys, values = zip(*sorted(attrs.items()))
values = _convert_levels(values)
self.assertEqual(1, len(res0.columns))
self.assertEqual(1, len(res1.columns))
self.assertEqual(1, len(res2.columns))
self.assertEqual(1, len(res3.columns))
self.assertEqual(1, len(res4.columns))
self.assertEqual(1, len(res5.columns))
self.assertEqual(1, len(res6.columns))
self.assertEqual(1, len(res7.columns))
self.assertEqual(1, len(res8.columns))
self.assertEqual(min(len(obj.times), len(obj.durations),
len(obj.labels)),
len(res0.index))
self.assertEqual(min(len(obj.times), len(obj.durations),
len(obj.labels)),
len(res1.index))
self.assertEqual(min(len(obj.times), len(obj.durations),
len(obj.labels)),
len(res2.index))
self.assertEqual(min(len(obj.times), len(obj.durations),
len(obj.labels)),
len(res3.index))
self.assertEqual(min(len(obj.times), len(obj.durations),
len(obj.labels)),
len(res4.index))
self.assertEqual(min(len(obj.times), len(obj.durations),
len(obj.labels)),
len(res5.index))
self.assertEqual(min(len(obj.times), len(obj.durations),
len(obj.labels)),
len(res6.index))
self.assertEqual(min(len(obj.times), len(obj.durations),
len(obj.labels)),
len(res7.index))
self.assertEqual(min(len(obj.times), len(obj.durations),
len(obj.labels)),
len(res8.index))
assert_array_equal(targvalues, res0.values)
assert_array_equal(targvalues, res1.values)
assert_array_equal(targvalues, res2.values)
assert_array_equal(targvalues, res3.values)
assert_array_equal(targvalues, res4.values)
assert_array_equal(targvalues, res5.values)
assert_array_equal(targvalues, res6.values)
assert_array_equal(targvalues, res7.values)
assert_array_equal(targvalues, res8.values)
self.assertEqual(keys, res0.columns.names)
self.assertEqual(keys, res1.columns.names)
self.assertEqual(keys, res2.columns.names)
self.assertEqual(keys, res3.columns.names)
self.assertEqual(keys, res4.columns.names)
self.assertEqual(keys, res5.columns.names)
self.assertEqual(keys, res6.columns.names)
self.assertEqual(keys, res7.columns.names)
self.assertEqual(keys, res8.columns.names)
self.assertEqual([u'durations', u'times'], res0.index.names)
self.assertEqual([u'durations', u'times'], res1.index.names)
self.assertEqual([u'durations', u'times'], res2.index.names)
self.assertEqual([u'durations', u'times'], res3.index.names)
self.assertEqual([u'durations', u'times'], res4.index.names)
self.assertEqual([u'durations', u'times'], res5.index.names)
self.assertEqual([u'durations', u'times'], res6.index.names)
self.assertEqual([u'durations', u'times'], res7.index.names)
self.assertEqual([u'durations', u'times'], res8.index.names)
self.assertEqual(2, len(res0.index.levels))
self.assertEqual(2, len(res1.index.levels))
self.assertEqual(2, len(res2.index.levels))
self.assertEqual(2, len(res3.index.levels))
self.assertEqual(2, len(res4.index.levels))
self.assertEqual(2, len(res5.index.levels))
self.assertEqual(2, len(res6.index.levels))
self.assertEqual(2, len(res7.index.levels))
self.assertEqual(2, len(res8.index.levels))
assert_array_equal(targindex, res0.index.levels)
assert_array_equal(targindex, res1.index.levels)
assert_array_equal(targindex, res2.index.levels)
assert_array_equal(targindex, res3.index.levels)
assert_array_equal(targindex, res4.index.levels)
assert_array_equal(targindex, res5.index.levels)
assert_array_equal(targindex, res6.index.levels)
assert_array_equal(targindex, res7.index.levels)
assert_array_equal(targindex, res8.index.levels)
for value, level in zip(values, res0.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res1.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res2.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res3.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res4.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res5.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res6.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res7.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res8.columns.levels):
assert_index_equal(value, level)
def test__epoch_to_dataframe__noparents(self):
blk = fake_neo('Block', seed=42)
obj = blk.list_children_by_class('Epoch')[0]
res0 = ep.epoch_to_dataframe(obj, parents=False)
res1 = ep.epoch_to_dataframe(obj, parents=False, child_first=True)
res2 = ep.epoch_to_dataframe(obj, parents=False, child_first=False)
minlen = min([len(obj.times), len(obj.durations), len(obj.labels)])
targvalues = obj.labels[:minlen][np.newaxis].T.astype('U')
targindex = np.vstack([obj.durations[:minlen].rescale('s').magnitude,
obj.times[:minlen].rescale('s').magnitude])
targvalues = targvalues[targindex.argsort()[0], :]
targindex.sort()
attrs = ep._extract_neo_attrs_safe(obj, parents=False,
child_first=True)
keys, values = zip(*sorted(attrs.items()))
values = _convert_levels(values)
self.assertEqual(1, len(res0.columns))
self.assertEqual(1, len(res1.columns))
self.assertEqual(1, len(res2.columns))
self.assertEqual(min(len(obj.times), len(obj.durations),
len(obj.labels)),
len(res0.index))
self.assertEqual(min(len(obj.times), len(obj.durations),
len(obj.labels)),
len(res1.index))
self.assertEqual(min(len(obj.times), len(obj.durations),
len(obj.labels)),
len(res2.index))
assert_array_equal(targvalues, res0.values)
assert_array_equal(targvalues, res1.values)
assert_array_equal(targvalues, res2.values)
self.assertEqual(keys, res0.columns.names)
self.assertEqual(keys, res1.columns.names)
self.assertEqual(keys, res2.columns.names)
self.assertEqual([u'durations', u'times'], res0.index.names)
self.assertEqual([u'durations', u'times'], res1.index.names)
self.assertEqual([u'durations', u'times'], res2.index.names)
self.assertEqual(2, len(res0.index.levels))
self.assertEqual(2, len(res1.index.levels))
self.assertEqual(2, len(res2.index.levels))
assert_array_equal(targindex, res0.index.levels)
assert_array_equal(targindex, res1.index.levels)
assert_array_equal(targindex, res2.index.levels)
for value, level in zip(values, res0.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res1.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res2.columns.levels):
assert_index_equal(value, level)
def test__epoch_to_dataframe__parents_childfirst(self):
blk = fake_neo('Block', seed=42)
obj = blk.list_children_by_class('Epoch')[0]
res0 = ep.epoch_to_dataframe(obj)
res1 = ep.epoch_to_dataframe(obj, child_first=True)
res2 = ep.epoch_to_dataframe(obj, parents=True)
res3 = ep.epoch_to_dataframe(obj, parents=True, child_first=True)
minlen = min([len(obj.times), len(obj.durations), len(obj.labels)])
targvalues = obj.labels[:minlen][np.newaxis].T.astype('U')
targindex = np.vstack([obj.durations[:minlen].rescale('s').magnitude,
obj.times[:minlen].rescale('s').magnitude])
targvalues = targvalues[targindex.argsort()[0], :]
targindex.sort()
attrs = ep._extract_neo_attrs_safe(obj, parents=True, child_first=True)
keys, values = zip(*sorted(attrs.items()))
values = _convert_levels(values)
self.assertEqual(1, len(res0.columns))
self.assertEqual(1, len(res1.columns))
self.assertEqual(1, len(res2.columns))
self.assertEqual(1, len(res3.columns))
self.assertEqual(min(len(obj.times), len(obj.durations),
len(obj.labels)),
len(res0.index))
self.assertEqual(min(len(obj.times), len(obj.durations),
len(obj.labels)),
len(res1.index))
self.assertEqual(min(len(obj.times), len(obj.durations),
len(obj.labels)),
len(res2.index))
self.assertEqual(min(len(obj.times), len(obj.durations),
len(obj.labels)),
len(res3.index))
assert_array_equal(targvalues, res0.values)
assert_array_equal(targvalues, res1.values)
assert_array_equal(targvalues, res2.values)
assert_array_equal(targvalues, res3.values)
self.assertEqual(keys, res0.columns.names)
self.assertEqual(keys, res1.columns.names)
self.assertEqual(keys, res2.columns.names)
self.assertEqual(keys, res3.columns.names)
self.assertEqual([u'durations', u'times'], res0.index.names)
self.assertEqual([u'durations', u'times'], res1.index.names)
self.assertEqual([u'durations', u'times'], res2.index.names)
self.assertEqual([u'durations', u'times'], res3.index.names)
self.assertEqual(2, len(res0.index.levels))
self.assertEqual(2, len(res1.index.levels))
self.assertEqual(2, len(res2.index.levels))
self.assertEqual(2, len(res3.index.levels))
assert_array_equal(targindex, res0.index.levels)
assert_array_equal(targindex, res1.index.levels)
assert_array_equal(targindex, res2.index.levels)
assert_array_equal(targindex, res3.index.levels)
for value, level in zip(values, res0.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res1.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res2.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res3.columns.levels):
assert_index_equal(value, level)
def test__epoch_to_dataframe__parents_parentfirst(self):
blk = fake_neo('Block', seed=42)
obj = blk.list_children_by_class('Epoch')[0]
res0 = ep.epoch_to_dataframe(obj, child_first=False)
res1 = ep.epoch_to_dataframe(obj, parents=True, child_first=False)
minlen = min([len(obj.times), len(obj.durations), len(obj.labels)])
targvalues = obj.labels[:minlen][np.newaxis].T.astype('U')
targindex = np.vstack([obj.durations[:minlen].rescale('s').magnitude,
obj.times[:minlen].rescale('s').magnitude])
targvalues = targvalues[targindex.argsort()[0], :]
targindex.sort()
attrs = ep._extract_neo_attrs_safe(obj, parents=True,
child_first=False)
keys, values = zip(*sorted(attrs.items()))
values = _convert_levels(values)
self.assertEqual(1, len(res0.columns))
self.assertEqual(1, len(res1.columns))
self.assertEqual(min(len(obj.times), len(obj.durations),
len(obj.labels)),
len(res0.index))
self.assertEqual(min(len(obj.times), len(obj.durations),
len(obj.labels)),
len(res1.index))
assert_array_equal(targvalues, res0.values)
assert_array_equal(targvalues, res1.values)
self.assertEqual(keys, res0.columns.names)
self.assertEqual(keys, res1.columns.names)
self.assertEqual([u'durations', u'times'], res0.index.names)
self.assertEqual([u'durations', u'times'], res1.index.names)
self.assertEqual(2, len(res0.index.levels))
self.assertEqual(2, len(res1.index.levels))
assert_array_equal(targindex, res0.index.levels)
assert_array_equal(targindex, res1.index.levels)
for value, level in zip(values, res0.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res1.columns.levels):
assert_index_equal(value, level)
@unittest.skipUnless(HAVE_PANDAS, 'requires pandas')
class MultiSpiketrainsToDataframeTestCase(unittest.TestCase):
def setUp(self):
if hasattr(self, 'assertItemsEqual'):
self.assertCountEqual = self.assertItemsEqual
def test__multi_spiketrains_to_dataframe__single(self):
obj = fake_neo('SpikeTrain', seed=0, n=5)
res0 = ep.multi_spiketrains_to_dataframe(obj)
res1 = ep.multi_spiketrains_to_dataframe(obj, parents=False)
res2 = ep.multi_spiketrains_to_dataframe(obj, parents=True)
res3 = ep.multi_spiketrains_to_dataframe(obj, child_first=True)
res4 = ep.multi_spiketrains_to_dataframe(obj, parents=False,
child_first=True)
res5 = ep.multi_spiketrains_to_dataframe(obj, parents=True,
child_first=True)
res6 = ep.multi_spiketrains_to_dataframe(obj, child_first=False)
res7 = ep.multi_spiketrains_to_dataframe(obj, parents=False,
child_first=False)
res8 = ep.multi_spiketrains_to_dataframe(obj, parents=True,
child_first=False)
targ = ep.spiketrain_to_dataframe(obj)
keys = ep._extract_neo_attrs_safe(obj, parents=True,
child_first=True).keys()
keys = list(keys)
targwidth = 1
targlen = len(obj)
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targwidth, len(res1.columns))
self.assertEqual(targwidth, len(res2.columns))
self.assertEqual(targwidth, len(res3.columns))
self.assertEqual(targwidth, len(res4.columns))
self.assertEqual(targwidth, len(res5.columns))
self.assertEqual(targwidth, len(res6.columns))
self.assertEqual(targwidth, len(res7.columns))
self.assertEqual(targwidth, len(res8.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertEqual(targlen, len(res1.index))
self.assertEqual(targlen, len(res2.index))
self.assertEqual(targlen, len(res3.index))
self.assertEqual(targlen, len(res4.index))
self.assertEqual(targlen, len(res5.index))
self.assertEqual(targlen, len(res6.index))
self.assertEqual(targlen, len(res7.index))
self.assertEqual(targlen, len(res8.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
self.assertCountEqual(keys, res1.columns.names)
self.assertCountEqual(keys, res2.columns.names)
self.assertCountEqual(keys, res3.columns.names)
self.assertCountEqual(keys, res4.columns.names)
self.assertCountEqual(keys, res5.columns.names)
self.assertCountEqual(keys, res6.columns.names)
self.assertCountEqual(keys, res7.columns.names)
self.assertCountEqual(keys, res8.columns.names)
assert_array_equal(targ.values, res0.values)
assert_array_equal(targ.values, res1.values)
assert_array_equal(targ.values, res2.values)
assert_array_equal(targ.values, res3.values)
assert_array_equal(targ.values, res4.values)
assert_array_equal(targ.values, res5.values)
assert_array_equal(targ.values, res6.values)
assert_array_equal(targ.values, res7.values)
assert_array_equal(targ.values, res8.values)
assert_frame_equal(targ, res0)
assert_frame_equal(targ, res0)
assert_frame_equal(targ, res1)
assert_frame_equal(targ, res2)
assert_frame_equal(targ, res3)
assert_frame_equal(targ, res4)
assert_frame_equal(targ, res5)
assert_frame_equal(targ, res6)
assert_frame_equal(targ, res7)
assert_frame_equal(targ, res8)
def test__multi_spiketrains_to_dataframe__unit_default(self):
obj = fake_neo('Unit', seed=0, n=5)
res0 = ep.multi_spiketrains_to_dataframe(obj)
objs = obj.spiketrains
targ = [ep.spiketrain_to_dataframe(iobj) for iobj in objs]
targ = ep._sort_inds(pd.concat(targ, axis=1), axis=1)
keys = ep._extract_neo_attrs_safe(objs[0], parents=True,
child_first=True).keys()
keys = list(keys)
targwidth = len(objs)
targlen = max(len(iobj) for iobj in objs)
self.assertGreater(len(objs), 0)
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
assert_array_equal(targ.values, res0.values)
assert_frame_equal(targ, res0)
def test__multi_spiketrains_to_dataframe__segment_default(self):
obj = fake_neo('Segment', seed=0, n=5)
res0 = ep.multi_spiketrains_to_dataframe(obj)
objs = obj.spiketrains
targ = [ep.spiketrain_to_dataframe(iobj) for iobj in objs]
targ = ep._sort_inds(pd.concat(targ, axis=1), axis=1)
keys = ep._extract_neo_attrs_safe(objs[0], parents=True,
child_first=True).keys()
keys = list(keys)
targwidth = len(objs)
targlen = max(len(iobj) for iobj in objs)
self.assertGreater(len(objs), 0)
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
assert_array_equal(targ.values, res0.values)
assert_frame_equal(targ, res0)
def test__multi_spiketrains_to_dataframe__block_noparents(self):
obj = fake_neo('Block', seed=0, n=3)
res0 = ep.multi_spiketrains_to_dataframe(obj, parents=False)
res1 = ep.multi_spiketrains_to_dataframe(obj, parents=False,
child_first=True)
res2 = ep.multi_spiketrains_to_dataframe(obj, parents=False,
child_first=False)
objs = obj.list_children_by_class('SpikeTrain')
targ = [ep.spiketrain_to_dataframe(iobj,
parents=False, child_first=True)
for iobj in objs]
targ = ep._sort_inds(pd.concat(targ, axis=1), axis=1)
keys = ep._extract_neo_attrs_safe(objs[0], parents=False,
child_first=True).keys()
keys = list(keys)
targwidth = len(objs)
targlen = max(len(iobj) for iobj in objs)
self.assertGreater(len(objs), 0)
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targwidth, len(res1.columns))
self.assertEqual(targwidth, len(res2.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertEqual(targlen, len(res1.index))
self.assertEqual(targlen, len(res2.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
self.assertCountEqual(keys, res1.columns.names)
self.assertCountEqual(keys, res2.columns.names)
assert_array_equal(targ.values, res0.values)
assert_array_equal(targ.values, res1.values)
assert_array_equal(targ.values, res2.values)
assert_frame_equal(targ, res0)
assert_frame_equal(targ, res1)
assert_frame_equal(targ, res2)
def test__multi_spiketrains_to_dataframe__block_parents_childfirst(self):
obj = fake_neo('Block', seed=0, n=3)
res0 = ep.multi_spiketrains_to_dataframe(obj)
res1 = ep.multi_spiketrains_to_dataframe(obj, parents=True)
res2 = ep.multi_spiketrains_to_dataframe(obj, child_first=True)
res3 = ep.multi_spiketrains_to_dataframe(obj, parents=True,
child_first=True)
objs = obj.list_children_by_class('SpikeTrain')
targ = [ep.spiketrain_to_dataframe(iobj,
parents=True, child_first=True)
for iobj in objs]
targ = ep._sort_inds(pd.concat(targ, axis=1), axis=1)
keys = ep._extract_neo_attrs_safe(objs[0], parents=True,
child_first=True).keys()
keys = list(keys)
targwidth = len(objs)
targlen = max(len(iobj) for iobj in objs)
self.assertGreater(len(objs), 0)
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targwidth, len(res1.columns))
self.assertEqual(targwidth, len(res2.columns))
self.assertEqual(targwidth, len(res3.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertEqual(targlen, len(res1.index))
self.assertEqual(targlen, len(res2.index))
self.assertEqual(targlen, len(res3.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
self.assertCountEqual(keys, res1.columns.names)
self.assertCountEqual(keys, res2.columns.names)
self.assertCountEqual(keys, res3.columns.names)
assert_array_equal(targ.values, res0.values)
assert_array_equal(targ.values, res1.values)
assert_array_equal(targ.values, res2.values)
assert_array_equal(targ.values, res3.values)
assert_frame_equal(targ, res0)
assert_frame_equal(targ, res1)
assert_frame_equal(targ, res2)
assert_frame_equal(targ, res3)
def test__multi_spiketrains_to_dataframe__block_parents_parentfirst(self):
obj = fake_neo('Block', seed=0, n=3)
res0 = ep.multi_spiketrains_to_dataframe(obj, child_first=False)
res1 = ep.multi_spiketrains_to_dataframe(obj, parents=True,
child_first=False)
objs = obj.list_children_by_class('SpikeTrain')
targ = [ep.spiketrain_to_dataframe(iobj,
parents=True, child_first=False)
for iobj in objs]
targ = ep._sort_inds(pd.concat(targ, axis=1), axis=1)
keys = ep._extract_neo_attrs_safe(objs[0], parents=True,
child_first=False).keys()
keys = list(keys)
targwidth = len(objs)
targlen = max(len(iobj) for iobj in objs)
self.assertGreater(len(objs), 0)
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targwidth, len(res1.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertEqual(targlen, len(res1.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
self.assertCountEqual(keys, res1.columns.names)
assert_array_equal(targ.values, res0.values)
assert_array_equal(targ.values, res1.values)
assert_frame_equal(targ, res0)
assert_frame_equal(targ, res1)
def test__multi_spiketrains_to_dataframe__list_noparents(self):
obj = [fake_neo('Block', seed=i, n=3) for i in range(3)]
res0 = ep.multi_spiketrains_to_dataframe(obj, parents=False)
res1 = ep.multi_spiketrains_to_dataframe(obj, parents=False,
child_first=True)
res2 = ep.multi_spiketrains_to_dataframe(obj, parents=False,
child_first=False)
objs = (iobj.list_children_by_class('SpikeTrain') for iobj in obj)
objs = list(chain.from_iterable(objs))
targ = [ep.spiketrain_to_dataframe(iobj,
parents=False, child_first=True)
for iobj in objs]
targ = ep._sort_inds(pd.concat(targ, axis=1), axis=1)
keys = ep._extract_neo_attrs_safe(objs[0], parents=False,
child_first=True).keys()
keys = list(keys)
targwidth = len(objs)
targlen = max(len(iobj) for iobj in objs)
self.assertGreater(len(objs), 0)
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targwidth, len(res1.columns))
self.assertEqual(targwidth, len(res2.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertEqual(targlen, len(res1.index))
self.assertEqual(targlen, len(res2.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
self.assertCountEqual(keys, res1.columns.names)
self.assertCountEqual(keys, res2.columns.names)
assert_array_equal(targ.values, res0.values)
assert_array_equal(targ.values, res1.values)
assert_array_equal(targ.values, res2.values)
assert_frame_equal(targ, res0)
assert_frame_equal(targ, res1)
assert_frame_equal(targ, res2)
def test__multi_spiketrains_to_dataframe__list_parents_childfirst(self):
obj = [fake_neo('Block', seed=i, n=3) for i in range(3)]
res0 = ep.multi_spiketrains_to_dataframe(obj)
res1 = ep.multi_spiketrains_to_dataframe(obj, parents=True)
res2 = ep.multi_spiketrains_to_dataframe(obj, child_first=True)
res3 = ep.multi_spiketrains_to_dataframe(obj, parents=True,
child_first=True)
objs = (iobj.list_children_by_class('SpikeTrain') for iobj in obj)
objs = list(chain.from_iterable(objs))
targ = [ep.spiketrain_to_dataframe(iobj,
parents=True, child_first=True)
for iobj in objs]
targ = ep._sort_inds(pd.concat(targ, axis=1), axis=1)
keys = ep._extract_neo_attrs_safe(objs[0], parents=True,
child_first=True).keys()
keys = list(keys)
targwidth = len(objs)
targlen = max(len(iobj) for iobj in objs)
self.assertGreater(len(objs), 0)
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targwidth, len(res1.columns))
self.assertEqual(targwidth, len(res2.columns))
self.assertEqual(targwidth, len(res3.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertEqual(targlen, len(res1.index))
self.assertEqual(targlen, len(res2.index))
self.assertEqual(targlen, len(res3.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
self.assertCountEqual(keys, res1.columns.names)
self.assertCountEqual(keys, res2.columns.names)
self.assertCountEqual(keys, res3.columns.names)
assert_array_equal(targ.values, res0.values)
assert_array_equal(targ.values, res1.values)
assert_array_equal(targ.values, res2.values)
assert_array_equal(targ.values, res3.values)
assert_frame_equal(targ, res0)
assert_frame_equal(targ, res1)
assert_frame_equal(targ, res2)
assert_frame_equal(targ, res3)
def test__multi_spiketrains_to_dataframe__list_parents_parentfirst(self):
obj = [fake_neo('Block', seed=i, n=3) for i in range(3)]
res0 = ep.multi_spiketrains_to_dataframe(obj, child_first=False)
res1 = ep.multi_spiketrains_to_dataframe(obj, parents=True,
child_first=False)
objs = (iobj.list_children_by_class('SpikeTrain') for iobj in obj)
objs = list(chain.from_iterable(objs))
targ = [ep.spiketrain_to_dataframe(iobj,
parents=True, child_first=False)
for iobj in objs]
targ = ep._sort_inds(pd.concat(targ, axis=1), axis=1)
keys = ep._extract_neo_attrs_safe(objs[0], parents=True,
child_first=False).keys()
keys = list(keys)
targwidth = len(objs)
targlen = max(len(iobj) for iobj in objs)
self.assertGreater(len(objs), 0)
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targwidth, len(res1.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertEqual(targlen, len(res1.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
self.assertCountEqual(keys, res1.columns.names)
assert_array_equal(targ.values, res0.values)
assert_array_equal(targ.values, res1.values)
assert_frame_equal(targ, res0)
assert_frame_equal(targ, res1)
def test__multi_spiketrains_to_dataframe__tuple_default(self):
obj = tuple(fake_neo('Block', seed=i, n=3) for i in range(3))
res0 = ep.multi_spiketrains_to_dataframe(obj)
objs = (iobj.list_children_by_class('SpikeTrain') for iobj in obj)
objs = list(chain.from_iterable(objs))
targ = [ep.spiketrain_to_dataframe(iobj) for iobj in objs]
targ = ep._sort_inds(pd.concat(targ, axis=1), axis=1)
keys = ep._extract_neo_attrs_safe(objs[0], parents=True,
child_first=True).keys()
keys = list(keys)
targwidth = len(objs)
targlen = max(len(iobj) for iobj in objs)
self.assertGreater(len(objs), 0)
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
assert_array_equal(targ.values, res0.values)
assert_frame_equal(targ, res0)
def test__multi_spiketrains_to_dataframe__iter_default(self):
obj = [fake_neo('Block', seed=i, n=3) for i in range(3)]
res0 = ep.multi_spiketrains_to_dataframe(iter(obj))
objs = (iobj.list_children_by_class('SpikeTrain') for iobj in obj)
objs = list(chain.from_iterable(objs))
targ = [ep.spiketrain_to_dataframe(iobj) for iobj in objs]
targ = ep._sort_inds( | pd.concat(targ, axis=1) | pandas.concat |
from pyhdx.fileIO import read_dynamx, csv_to_protein, save_fitresult, dataframe_to_file
from pyhdx import PeptideMasterTable, HDXMeasurement
from pyhdx.models import HDXMeasurementSet
from pyhdx.fitting import fit_rates_weighted_average, fit_gibbs_global, fit_gibbs_global_batch, fit_gibbs_global_batch_aligned
from pyhdx.local_cluster import default_client
import numpy as np
import pandas as pd
from pathlib import Path
import torch
"""Run this file to renew the fit results which is used to test against"""
# Toggle to also generate long computation time fits
do_long_fit = False
epochs_long = 20000
torch.manual_seed(43)
np.random.seed(43)
epochs = 1000
sequence = 'MSEQNNTEMTFQIQRIYTKDISFEAPNAPHVFQKDWQPEVKLDLDTASSQLADDVYEVVLRVTVTASLGEETAFLCEVQQGGIFSIAGIEGTQMAHCLGAYCPNILFPYARECITSMVSRGTFPQLNLAPVNFDALFMNYLQQQAGEGTEEHQDA'
sequence_dimer = 'MSEQNNTEMTFQIQRIYTKDISFEAPNAPHVFQKDWQPEVKLDLDTASSQLADDVYEVVLRVTVTASLGEETAFLCEVQQGGIFSIAGIEGTQMAHCLGAYCPNILFPAARECIASMVARGTFPQLNLAPVNFDALFMNYLQQQAGEGTEEHQDA'
cwd = Path(__file__).parent
input_dir = cwd / 'test_data' / 'input'
output_dir = cwd / 'test_data' / 'output'
guess = True
control = ('Full deuteration control', 0.167*60)
data = read_dynamx(input_dir / 'ecSecB_apo.csv', input_dir / 'ecSecB_dimer.csv')
pmt = PeptideMasterTable(data, drop_first=1, ignore_prolines=True, remove_nan=False)
pmt.set_control(control)
temperature, pH = 273.15 + 30, 8.
hdxm = HDXMeasurement(pmt.get_state('SecB WT apo'), sequence=sequence, temperature=temperature, pH=pH)
data = pmt.get_state('SecB WT apo')
reduced_data = data[data['end'] < 40]
hdxm_reduced = HDXMeasurement(reduced_data, temperature=temperature, pH=pH)
result = fit_rates_weighted_average(hdxm_reduced)
reduced_guess = result.output
reduced_guess.to_file(output_dir / 'ecSecB_reduced_guess.csv')
reduced_guess.to_file(output_dir / 'ecSecB_reduced_guess.txt', fmt='pprint')
gibbs_guess = hdxm_reduced.guess_deltaG(reduced_guess['rate'])
fr_torch = fit_gibbs_global(hdxm_reduced, gibbs_guess, epochs=epochs, r1=2)
save_fitresult(output_dir / 'ecsecb_reduced', fr_torch)
if guess:
wt_avg_result = fit_rates_weighted_average(hdxm, bounds=(1e-2/60., 800/60.))
output = wt_avg_result.output
output.to_file(output_dir / 'ecSecB_guess.csv')
output.to_file(output_dir / 'ecSecB_guess.txt', fmt='pprint')
else:
output = csv_to_protein(output_dir / 'ecSecB_guess.csv')
# Export protein sequence and intrinsic rate of exchange
hdxm.coverage.protein.to_file(output_dir / 'ecSecB_info.csv')
hdxm.coverage.protein.to_file(output_dir / 'ecSecB_info.txt', fmt='pprint')
rfu_exposure = {time: rfu for time, rfu in zip(hdxm.timepoints, hdxm.rfu_residues.T)} # Nr x Nt array
rfu_df = | pd.DataFrame(rfu_exposure, index=hdxm.coverage.r_number) | pandas.DataFrame |
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import seaborn as sns
import itertools
import pandas as pd
from pipeline import experiment, ephys, psth
from pipeline.plot import (_plot_with_sem, _extract_one_stim_dur, _get_units_hemisphere,
_plot_stacked_psth_diff, _plot_avg_psth,
_get_photostim_time_and_duration, _get_trial_event_times,
jointplot_w_hue)
m_scale = 1200
_plt_xmin = -3
_plt_xmax = 2
def plot_clustering_quality(probe_insertion):
probe_insertion = probe_insertion.proj()
amp, snr, spk_rate, isi_violation = (ephys.Unit * ephys.UnitStat
* ephys.ProbeInsertion.InsertionLocation & probe_insertion).fetch(
'unit_amp', 'unit_snr', 'avg_firing_rate', 'isi_violation')
metrics = {'amp': amp,
'snr': snr,
'isi': np.array(isi_violation) * 100, # to percentage
'rate': np.array(spk_rate)}
label_mapper = {'amp': 'Amplitude',
'snr': 'Signal to noise ratio (SNR)',
'isi': 'ISI violation (%)',
'rate': 'Firing rate (spike/s)'}
fig, axs = plt.subplots(2, 3, figsize=(12, 8))
fig.subplots_adjust(wspace=0.4)
for (m1, m2), ax in zip(itertools.combinations(list(metrics.keys()), 2), axs.flatten()):
ax.plot(metrics[m1], metrics[m2], '.k')
ax.set_xlabel(label_mapper[m1])
ax.set_ylabel(label_mapper[m2])
# cosmetic
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
def plot_unit_characteristic(probe_insertion, axs=None):
probe_insertion = probe_insertion.proj()
amp, snr, spk_rate, x, y, insertion_depth = (
ephys.Unit * ephys.ProbeInsertion.InsertionLocation * ephys.UnitStat
& probe_insertion & 'unit_quality != "all"').fetch(
'unit_amp', 'unit_snr', 'avg_firing_rate', 'unit_posx', 'unit_posy', 'dv_location')
insertion_depth = np.where(np.isnan(insertion_depth), 0, insertion_depth)
metrics = pd.DataFrame(list(zip(*(amp/amp.max(), snr/snr.max(), spk_rate/spk_rate.max(), x, y + insertion_depth))))
metrics.columns = ['amp', 'snr', 'rate', 'x', 'y']
if axs is None:
fig, axs = plt.subplots(1, 3, figsize=(10, 8))
fig.subplots_adjust(wspace=0.6)
assert axs.size == 3
cosmetic = {'legend': None,
'linewidth': 1.75,
'alpha': 0.9,
'facecolor': 'none', 'edgecolor': 'k'}
sns.scatterplot(data=metrics, x='x', y='y', s=metrics.amp*m_scale, ax=axs[0], **cosmetic)
sns.scatterplot(data=metrics, x='x', y='y', s=metrics.snr*m_scale, ax=axs[1], **cosmetic)
sns.scatterplot(data=metrics, x='x', y='y', s=metrics.rate*m_scale, ax=axs[2], **cosmetic)
# cosmetic
for title, ax in zip(('Amplitude', 'SNR', 'Firing rate'), axs.flatten()):
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.set_title(title)
ax.set_xlim((-10, 60))
def plot_unit_selectivity(probe_insertion, axs=None):
probe_insertion = probe_insertion.proj()
attr_names = ['unit', 'period', 'period_selectivity', 'contra_firing_rate',
'ipsi_firing_rate', 'unit_posx', 'unit_posy', 'dv_location']
selective_units = (psth.PeriodSelectivity * ephys.Unit * ephys.ProbeInsertion.InsertionLocation
* experiment.Period & probe_insertion & 'period_selectivity != "non-selective"').fetch(*attr_names)
selective_units = pd.DataFrame(selective_units).T
selective_units.columns = attr_names
selective_units.period_selectivity.astype('category')
# --- account for insertion depth (manipulator depth)
selective_units.unit_posy = (selective_units.unit_posy
+ np.where(np.isnan(selective_units.dv_location.values.astype(float)),
0, selective_units.dv_location.values.astype(float)))
# --- get ipsi vs. contra firing rate difference
f_rate_diff = np.abs(selective_units.ipsi_firing_rate - selective_units.contra_firing_rate)
selective_units['f_rate_diff'] = f_rate_diff / f_rate_diff.max()
# --- prepare for plotting
cosmetic = {'legend': None,
'linewidth': 0.0001}
ymax = selective_units.unit_posy.max() + 100
# a bit of hack to get 'open circle'
pts = np.linspace(0, np.pi * 2, 24)
circ = np.c_[np.sin(pts) / 2, -np.cos(pts) / 2]
vert = np.r_[circ, circ[::-1] * .7]
open_circle = mpl.path.Path(vert)
# --- plot
if axs is None:
fig, axs = plt.subplots(1, 3, figsize=(10, 8))
fig.subplots_adjust(wspace=0.6)
assert axs.size == 3
for (title, df), ax in zip(((p, selective_units[selective_units.period == p])
for p in ('sample', 'delay', 'response')), axs):
sns.scatterplot(data=df, x='unit_posx', y='unit_posy',
s=df.f_rate_diff.values.astype(float)*m_scale,
hue='period_selectivity', marker=open_circle,
palette={'contra-selective': 'b', 'ipsi-selective': 'r'},
ax=ax, **cosmetic)
contra_p = (df.period_selectivity == 'contra-selective').sum() / len(df) * 100
# cosmetic
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.set_title(f'{title}\n% contra: {contra_p:.2f}\n% ipsi: {100-contra_p:.2f}')
ax.set_xlim((-10, 60))
# ax.set_ylim((0, ymax))
def plot_unit_bilateral_photostim_effect(probe_insertion, axs=None):
probe_insertion = probe_insertion.proj()
cue_onset = (experiment.Period & 'period = "delay"').fetch1('period_start')
no_stim_cond = (psth.TrialCondition
& {'trial_condition_name':
'all_noearlylick_both_alm_nostim'}).fetch1('KEY')
bi_stim_cond = (psth.TrialCondition
& {'trial_condition_name':
'all_noearlylick_both_alm_stim'}).fetch1('KEY')
# get photostim duration
stim_durs = np.unique((experiment.Photostim & experiment.PhotostimEvent
* psth.TrialCondition().get_trials('all_noearlylick_both_alm_stim')
& probe_insertion).fetch('duration'))
stim_dur = _extract_one_stim_dur(stim_durs)
units = ephys.Unit & probe_insertion & 'unit_quality != "all"'
metrics = pd.DataFrame(columns=['unit', 'x', 'y', 'frate_change'])
# XXX: could be done with 1x fetch+join
for u_idx, unit in enumerate(units.fetch('KEY')):
x, y = (ephys.Unit & unit).fetch1('unit_posx', 'unit_posy')
nostim_psth, nostim_edge = (
psth.UnitPsth & {**unit, **no_stim_cond}).fetch1('unit_psth')
bistim_psth, bistim_edge = (
psth.UnitPsth & {**unit, **bi_stim_cond}).fetch1('unit_psth')
# compute the firing rate difference between contra vs. ipsi within the stimulation duration
ctrl_frate = nostim_psth[np.logical_and(nostim_edge[1:] >= cue_onset, nostim_edge[1:] <= cue_onset + stim_dur)]
stim_frate = bistim_psth[np.logical_and(bistim_edge[1:] >= cue_onset, bistim_edge[1:] <= cue_onset + stim_dur)]
frate_change = np.abs(stim_frate.mean() - ctrl_frate.mean()) / ctrl_frate.mean()
metrics.loc[u_idx] = (int(unit['unit']), x, y, frate_change)
metrics.frate_change = metrics.frate_change / metrics.frate_change.max()
if axs is None:
fig, axs = plt.subplots(1, 1, figsize=(4, 8))
cosmetic = {'legend': None,
'linewidth': 1.75,
'alpha': 0.9,
'facecolor': 'none', 'edgecolor': 'k'}
sns.scatterplot(data=metrics, x='x', y='y', s=metrics.frate_change*m_scale,
ax=axs, **cosmetic)
axs.spines['right'].set_visible(False)
axs.spines['top'].set_visible(False)
axs.set_title('% change')
axs.set_xlim((-10, 60))
def plot_stacked_contra_ipsi_psth(units, axs=None):
units = units.proj()
if axs is None:
fig, axs = plt.subplots(1, 2, figsize=(20, 20))
assert axs.size == 2
trial_cond_name = psth.TrialCondition.get_cond_name_from_keywords(['good_noearlylick_', '_hit'])[0]
period_starts = _get_trial_event_times(['sample', 'delay', 'go'], units, trial_cond_name)
hemi = _get_units_hemisphere(units)
conds_i = (psth.TrialCondition
& {'trial_condition_name':
'good_noearlylick_left_hit' if hemi == 'left' else 'good_noearlylick_right_hit'}).fetch1('KEY')
conds_c = (psth.TrialCondition
& {'trial_condition_name':
'good_noearlylick_right_hit' if hemi == 'left' else 'good_noearlylick_left_hit'}).fetch1('KEY')
sel_i = (ephys.Unit * psth.UnitSelectivity
& 'unit_selectivity = "ipsi-selective"' & units)
sel_c = (ephys.Unit * psth.UnitSelectivity
& 'unit_selectivity = "contra-selective"' & units)
# ipsi selective ipsi trials
psth_is_it = (psth.UnitPsth * sel_i.proj('unit_posy') & conds_i).fetch(order_by='unit_posy desc')
# ipsi selective contra trials
psth_is_ct = (psth.UnitPsth * sel_i.proj('unit_posy') & conds_c).fetch(order_by='unit_posy desc')
# contra selective contra trials
psth_cs_ct = (psth.UnitPsth * sel_c.proj('unit_posy') & conds_c).fetch(order_by='unit_posy desc')
# contra selective ipsi trials
psth_cs_it = (psth.UnitPsth * sel_c.proj('unit_posy') & conds_i).fetch(order_by='unit_posy desc')
_plot_stacked_psth_diff(psth_cs_ct, psth_cs_it, ax=axs[0],
vlines=period_starts, flip=True)
_plot_stacked_psth_diff(psth_is_it, psth_is_ct, ax=axs[1],
vlines=period_starts)
# cosmetic
for ax, title in zip(axs, ('Contra-selective Units', 'Ipsi-selective Units')):
ax.set_title(title)
ax.set_ylabel('Unit')
ax.set_xlabel('Time to go-cue (s)')
ax.set_xlim([_plt_xmin, _plt_xmax])
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
def plot_selectivity_sorted_stacked_contra_ipsi_psth(units, axs=None):
units = units.proj()
if axs is None:
fig, axs = plt.subplots(1, 2, figsize=(20, 20))
assert axs.size == 2
trial_cond_name = psth.TrialCondition.get_cond_name_from_keywords(['good_noearlylick_', '_hit'])[0]
period_starts = _get_trial_event_times(['sample', 'delay', 'go'], units, trial_cond_name)
hemi = _get_units_hemisphere(units)
conds_i = (psth.TrialCondition
& {'trial_condition_name':
'good_noearlylick_left_hit' if hemi == 'left' else 'good_noearlylick_right_hit'}).fetch1('KEY')
conds_c = (psth.TrialCondition
& {'trial_condition_name':
'good_noearlylick_right_hit' if hemi == 'left' else 'good_noearlylick_left_hit'}).fetch1('KEY')
# ---- separate units to:
# i) sample or delay not response:
sample_delay_units = units & (psth.PeriodSelectivity
& 'period in ("sample", "delay")'
& 'period_selectivity != "non-selective"')
sample_delay_units = sample_delay_units & (psth.PeriodSelectivity & units
& 'period = "response"'
& 'period_selectivity = "non-selective"')
# ii) sample or delay and response:
sample_delay_response_units = units & (psth.PeriodSelectivity
& 'period in ("sample", "delay")'
& 'period_selectivity != "non-selective"')
sample_delay_response_units = sample_delay_response_units & (psth.PeriodSelectivity & units
& 'period = "response"'
& 'period_selectivity != "non-selective"')
# iii) not sample nor delay and response:
response_units = (units & (psth.PeriodSelectivity & 'period in ("sample")'
& 'period_selectivity = "non-selective"')
& (psth.PeriodSelectivity & 'period in ("delay")'
& 'period_selectivity = "non-selective"'))
response_units = response_units & (psth.PeriodSelectivity & units
& 'period = "response"'
& 'period_selectivity != "non-selective"')
ipsi_selective_psth, contra_selective_psth = [], []
for units in (sample_delay_units, sample_delay_response_units, response_units):
sel_i = (ephys.Unit * psth.UnitSelectivity
& 'unit_selectivity = "ipsi-selective"' & units)
sel_c = (ephys.Unit * psth.UnitSelectivity
& 'unit_selectivity = "contra-selective"' & units)
# ipsi selective ipsi trials
psth_is_it = (psth.UnitPsth * sel_i & conds_i).fetch()
# ipsi selective contra trials
psth_is_ct = (psth.UnitPsth * sel_i & conds_c).fetch()
# contra selective contra trials
psth_cs_ct = (psth.UnitPsth * sel_c & conds_c).fetch()
# contra selective ipsi trials
psth_cs_it = (psth.UnitPsth * sel_c & conds_i).fetch()
contra_selective_psth.append(_plot_stacked_psth_diff(psth_cs_ct, psth_cs_it, ax=axs[0], flip=True, plot=False))
ipsi_selective_psth.append(_plot_stacked_psth_diff(psth_is_it, psth_is_ct, ax=axs[1], plot=False))
contra_boundaries = np.cumsum([len(k) for k in contra_selective_psth[::-1]])
ipsi_boundaries = np.cumsum([len(k) for k in ipsi_selective_psth[::-1]])
contra_selective_psth = np.vstack(contra_selective_psth)
ipsi_selective_psth = np.vstack(ipsi_selective_psth)
xlim = -3, 2
im = axs[0].imshow(contra_selective_psth, cmap=plt.cm.bwr,
aspect=4.5/contra_selective_psth.shape[0],
extent=[-3, 3, 0, contra_selective_psth.shape[0]])
im.set_clim((-1, 1))
im = axs[1].imshow(ipsi_selective_psth, cmap=plt.cm.bwr,
aspect=4.5/ipsi_selective_psth.shape[0],
extent=[-3, 3, 0, ipsi_selective_psth.shape[0]])
im.set_clim((-1, 1))
# cosmetic
for ax, title, hspans in zip(axs, ('Contra-selective Units', 'Ipsi-selective Units'),
(contra_boundaries, ipsi_boundaries)):
for x in period_starts:
ax.axvline(x=x, linestyle='--', color='k')
ax.set_title(title)
ax.set_ylabel('Unit')
ax.set_xlabel('Time to go-cue (s)')
ax.set_xlim(xlim)
for ystart, ystop, color in zip([0]+list(hspans[:-1]), hspans, ('k', 'grey', 'w')):
ax.axhspan(ystart, ystop, 0.98, 1, alpha=1, color=color)
def plot_avg_contra_ipsi_psth(units, axs=None):
units = units.proj()
if axs is None:
fig, axs = plt.subplots(1, 2, figsize=(16, 6))
assert axs.size == 2
period_starts = (experiment.Period
& 'period in ("sample", "delay", "response")').fetch(
'period_start')
hemi = _get_units_hemisphere(units)
good_unit = ephys.Unit & 'unit_quality != "all"'
conds_i = (psth.TrialCondition
& {'trial_condition_name':
'good_noearlylick_left_hit' if hemi == 'left' else 'good_noearlylick_right_hit'}).fetch('KEY')
conds_c = (psth.TrialCondition
& {'trial_condition_name':
'good_noearlylick_right_hit' if hemi == 'left' else 'good_noearlylick_left_hit'}).fetch('KEY')
sel_i = (ephys.Unit * psth.UnitSelectivity
& 'unit_selectivity = "ipsi-selective"' & units)
sel_c = (ephys.Unit * psth.UnitSelectivity
& 'unit_selectivity = "contra-selective"' & units)
psth_is_it = (((psth.UnitPsth & conds_i)
* ephys.Unit.proj('unit_posy'))
& good_unit.proj() & sel_i.proj()).fetch(
'unit_psth', order_by='unit_posy desc')
psth_is_ct = (((psth.UnitPsth & conds_c)
* ephys.Unit.proj('unit_posy'))
& good_unit.proj() & sel_i.proj()).fetch(
'unit_psth', order_by='unit_posy desc')
psth_cs_ct = (((psth.UnitPsth & conds_c)
* ephys.Unit.proj('unit_posy'))
& good_unit.proj() & sel_c.proj()).fetch(
'unit_psth', order_by='unit_posy desc')
psth_cs_it = (((psth.UnitPsth & conds_i)
* ephys.Unit.proj('unit_posy'))
& good_unit.proj() & sel_c.proj()).fetch(
'unit_psth', order_by='unit_posy desc')
_plot_avg_psth(psth_cs_it, psth_cs_ct, period_starts, axs[0],
'Contra-selective')
_plot_avg_psth(psth_is_it, psth_is_ct, period_starts, axs[1],
'Ipsi-selective')
ymax = max([ax.get_ylim()[1] for ax in axs])
for ax in axs:
ax.set_ylim((0, ymax))
ax.set_xlim([_plt_xmin, _plt_xmax])
def plot_psth_photostim_effect(units, condition_name_kw=['both_alm'], axs=None):
"""
For the specified `units`, plot PSTH comparison between stim vs. no-stim with left/right trial instruction
The stim location (or other appropriate search keywords) can be specified in `condition_name_kw` (default: bilateral ALM)
"""
units = units.proj()
if axs is None:
fig, axs = plt.subplots(1, 2, figsize=(16, 6))
assert axs.size == 2
hemi = _get_units_hemisphere(units)
period_starts = (experiment.Period
& 'period in ("sample", "delay", "response")').fetch(
'period_start')
# no photostim:
psth_n_l = psth.TrialCondition.get_cond_name_from_keywords(['_nostim', '_left'])[0]
psth_n_r = psth.TrialCondition.get_cond_name_from_keywords(['_nostim', '_right'])[0]
psth_n_l = (psth.UnitPsth * psth.TrialCondition & units
& {'trial_condition_name': psth_n_l} & 'unit_psth is not NULL').fetch('unit_psth')
psth_n_r = (psth.UnitPsth * psth.TrialCondition & units
& {'trial_condition_name': psth_n_r} & 'unit_psth is not NULL').fetch('unit_psth')
psth_s_l = psth.TrialCondition.get_cond_name_from_keywords(condition_name_kw + ['_stim_left'])[0]
psth_s_r = psth.TrialCondition.get_cond_name_from_keywords(condition_name_kw + ['_stim_right'])[0]
psth_s_l = (psth.UnitPsth * psth.TrialCondition & units
& {'trial_condition_name': psth_s_l} & 'unit_psth is not NULL').fetch('unit_psth')
psth_s_r = (psth.UnitPsth * psth.TrialCondition & units
& {'trial_condition_name': psth_s_r} & 'unit_psth is not NULL').fetch('unit_psth')
# get photostim duration and stim time (relative to go-cue)
stim_trial_cond_name = psth.TrialCondition.get_cond_name_from_keywords(condition_name_kw + ['_stim'])[0]
stim_time, stim_dur = _get_photostim_time_and_duration(units,
psth.TrialCondition().get_trials(stim_trial_cond_name))
if hemi == 'left':
psth_s_i = psth_s_l
psth_n_i = psth_n_l
psth_s_c = psth_s_r
psth_n_c = psth_n_r
else:
psth_s_i = psth_s_r
psth_n_i = psth_n_r
psth_s_c = psth_s_l
psth_n_c = psth_n_l
_plot_avg_psth(psth_n_i, psth_n_c, period_starts, axs[0],
'Control')
_plot_avg_psth(psth_s_i, psth_s_c, period_starts, axs[1],
'Photostim')
# cosmetic
ymax = max([ax.get_ylim()[1] for ax in axs])
for ax in axs:
ax.set_ylim((0, ymax))
ax.set_xlim([_plt_xmin, _plt_xmax])
# add shaded bar for photostim
axs[1].axvspan(stim_time, stim_time + stim_dur, alpha=0.3, color='royalblue')
def plot_selectivity_change_photostim_effect(units, condition_name_kw, recover_time_window=None, ax=None):
"""
For each unit in the specified units, extract:
+ control, left-instruct PSTH (ctrl_left)
+ control, right-instruct PSTH (ctrl_right)
+ stim, left-instruct PSTH (stim_left)
+ stim, right-instruct PSTH (stim_right)
Then, control_PSTH and stim_PSTH is defined as
(ctrl_left - ctrl_right) for ipsi-selective unit that locates on the left-hemisphere, and vice versa
(stim_left - stim_right) for ipsi-selective unit that locates on the left-hemisphere, and vice versa
Selectivity change is then defined as: control_PSTH - stim_PSTH
"""
trial_cond_name = psth.TrialCondition.get_cond_name_from_keywords(['good_noearlylick_', '_hit'])[0]
period_starts = _get_trial_event_times(['sample', 'delay', 'go'], units, trial_cond_name)
stim_trial_cond_name = psth.TrialCondition.get_cond_name_from_keywords(condition_name_kw + ['_stim'])[0]
stim_time, stim_dur = _get_photostim_time_and_duration(units,
psth.TrialCondition().get_trials(stim_trial_cond_name))
ctrl_left_cond_name = 'all_noearlylick_nostim_left'
ctrl_right_cond_name = 'all_noearlylick_nostim_right'
stim_left_cond_name = psth.TrialCondition().get_cond_name_from_keywords(condition_name_kw
+ ['noearlylick', 'stim', 'left'])[0]
stim_right_cond_name = psth.TrialCondition().get_cond_name_from_keywords(condition_name_kw
+ ['noearlylick', 'stim', 'right'])[0]
delta_sels, ctrl_psths = [], []
for unit in (units * psth.UnitSelectivity & 'unit_selectivity != "non-selective"').proj('unit_selectivity').fetch(as_dict=True):
# ---- trial count criteria ----
# no less than 5 trials for control
if (len(psth.TrialCondition.get_trials(ctrl_left_cond_name) & unit) < 5
or len(psth.TrialCondition.get_trials(ctrl_right_cond_name) & unit) < 5):
continue
# no less than 2 trials for stimulation
if (len(psth.TrialCondition.get_trials(stim_left_cond_name) & unit) < 2
or len(psth.TrialCondition.get_trials(stim_right_cond_name) & unit) < 2):
continue
hemi = _get_units_hemisphere(unit)
ctrl_left_psth, t_vec = psth.UnitPsth.get_plotting_data(unit, {'trial_condition_name': ctrl_left_cond_name})['psth']
ctrl_right_psth, _ = psth.UnitPsth.get_plotting_data(unit, {'trial_condition_name': ctrl_right_cond_name})['psth']
try:
stim_left_psth, _ = psth.UnitPsth.get_plotting_data(unit, {'trial_condition_name': stim_left_cond_name})['psth']
stim_right_psth, _ = psth.UnitPsth.get_plotting_data(unit, {'trial_condition_name': stim_right_cond_name})['psth']
except:
continue
if unit['unit_selectivity'] == 'ipsi-selective':
ctrl_psth_diff = ctrl_left_psth - ctrl_right_psth if hemi == 'left' else ctrl_right_psth - ctrl_left_psth
stim_psth_diff = stim_left_psth - stim_right_psth if hemi == 'left' else stim_right_psth - stim_left_psth
elif unit['unit_selectivity'] == 'contra-selective':
ctrl_psth_diff = ctrl_left_psth - ctrl_right_psth if hemi == 'right' else ctrl_right_psth - ctrl_left_psth
stim_psth_diff = stim_left_psth - stim_right_psth if hemi == 'right' else stim_right_psth - stim_left_psth
ctrl_psths.append(ctrl_psth_diff)
delta_sels.append(ctrl_psth_diff - stim_psth_diff)
ctrl_psths = np.vstack(ctrl_psths)
delta_sels = np.vstack(delta_sels)
if ax is None:
fig, ax = plt.subplots(1, 1, figsize=(4, 6))
_plot_with_sem(delta_sels, t_vec, ax)
if recover_time_window:
recovery_times = []
for i in range(1000):
i_sample = np.random.choice(delta_sels.shape[0], delta_sels.shape[0], replace = True)
btstrp_diff = np.nanmean(delta_sels[i_sample, :], axis = 0) / np.nanmean(ctrl_psths[i_sample, :], axis = 0)
t_recovered = t_vec[
(btstrp_diff < 0.2) & (t_vec > recover_time_window[0]) & (t_vec < recover_time_window[1])]
if len(t_recovered) > 0:
recovery_times.append(t_recovered[0])
ax.axvline(x = np.mean(recovery_times), linestyle = '--', color = 'g')
ax.axvspan(np.mean(recovery_times) - np.std(recovery_times), np.mean(recovery_times) + np.std(recovery_times),
alpha = 0.2, color = 'g')
ax.axhline(y=0, color = 'k')
for x in period_starts:
ax.axvline(x=x, linestyle = '--', color = 'k')
# add shaded bar for photostim
ax.axvspan(stim_time, stim_time + stim_dur, 0.95, 1, alpha = 0.3, color = 'royalblue')
ax.set_ylabel('Selectivity change (spike/s)')
ax.set_xlabel('Time (s)')
def plot_coding_direction(units, time_period=None, axs=None):
_, proj_contra_trial, proj_ipsi_trial, time_stamps = psth.compute_CD_projected_psth(
units.fetch('KEY'), time_period=time_period)
period_starts = (experiment.Period & 'period in ("sample", "delay", "response")').fetch('period_start')
if axs is None:
fig, axs = plt.subplots(1, 1, figsize=(8, 6))
# plot
_plot_with_sem(proj_contra_trial, time_stamps, ax=axs, c='b')
_plot_with_sem(proj_ipsi_trial, time_stamps, ax=axs, c='r')
for x in period_starts:
axs.axvline(x=x, linestyle = '--', color = 'k')
# cosmetic
axs.spines['right'].set_visible(False)
axs.spines['top'].set_visible(False)
axs.set_ylabel('CD projection (a.u.)')
axs.set_xlabel('Time (s)')
def plot_paired_coding_direction(unit_g1, unit_g2, labels=None, time_period=None):
"""
Plot trial-to-trial CD-endpoint correlation between CD-projected trial-psth from two unit-groups (e.g. two brain regions)
Note: coding direction is calculated on selective units, contra vs. ipsi, within the specified time_period
"""
_, proj_contra_trial_g1, proj_ipsi_trial_g1, time_stamps = psth.compute_CD_projected_psth(
unit_g1.fetch('KEY'), time_period=time_period)
_, proj_contra_trial_g2, proj_ipsi_trial_g2, time_stamps = psth.compute_CD_projected_psth(
unit_g2.fetch('KEY'), time_period=time_period)
period_starts = (experiment.Period & 'period in ("sample", "delay", "response")').fetch('period_start')
if labels:
assert len(labels) == 2
else:
labels = ('unit group 1', 'unit group 2')
# plot projected trial-psth
fig, axs = plt.subplots(1, 2, figsize=(16, 6))
_plot_with_sem(proj_contra_trial_g1, time_stamps, ax=axs[0], c='b')
_plot_with_sem(proj_ipsi_trial_g1, time_stamps, ax=axs[0], c='r')
_plot_with_sem(proj_contra_trial_g2, time_stamps, ax=axs[1], c='b')
_plot_with_sem(proj_ipsi_trial_g2, time_stamps, ax=axs[1], c='r')
# cosmetic
for ax, label in zip(axs, labels):
for x in period_starts:
ax.axvline(x=x, linestyle = '--', color = 'k')
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.set_ylabel('CD projection (a.u.)')
ax.set_xlabel('Time (s)')
ax.set_title(label)
# plot trial CD-endpoint correlation
p_start, p_end = time_period
contra_cdend_1 = proj_contra_trial_g1[:, np.logical_and(time_stamps >= p_start, time_stamps < p_end)].mean(axis=1)
contra_cdend_2 = proj_contra_trial_g2[:, np.logical_and(time_stamps >= p_start, time_stamps < p_end)].mean(axis=1)
ipsi_cdend_1 = proj_ipsi_trial_g1[:, np.logical_and(time_stamps >= p_start, time_stamps < p_end)].mean(axis=1)
ipsi_cdend_2 = proj_ipsi_trial_g2[:, np.logical_and(time_stamps >= p_start, time_stamps < p_end)].mean(axis=1)
c_df = pd.DataFrame([contra_cdend_1, contra_cdend_2]).T
c_df.columns = labels
c_df['trial-type'] = 'contra'
i_df = | pd.DataFrame([ipsi_cdend_1, ipsi_cdend_2]) | pandas.DataFrame |
import operator
from shutil import get_terminal_size
from typing import Dict, Hashable, List, Type, Union, cast
from warnings import warn
import numpy as np
from pandas._config import get_option
from pandas._libs import algos as libalgos, hashtable as htable
from pandas._typing import ArrayLike, Dtype, Ordered, Scalar
from pandas.compat.numpy import function as nv
from pandas.util._decorators import (
Appender,
Substitution,
cache_readonly,
deprecate_kwarg,
doc,
)
from pandas.util._validators import validate_bool_kwarg, validate_fillna_kwargs
from pandas.core.dtypes.cast import (
coerce_indexer_dtype,
maybe_cast_to_extension_array,
maybe_infer_to_datetimelike,
)
from pandas.core.dtypes.common import (
ensure_int64,
ensure_object,
is_categorical_dtype,
is_datetime64_dtype,
is_dict_like,
is_dtype_equal,
is_extension_array_dtype,
is_integer_dtype,
is_iterator,
is_list_like,
is_object_dtype,
is_scalar,
is_sequence,
is_timedelta64_dtype,
needs_i8_conversion,
)
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.core.dtypes.generic import ABCIndexClass, ABCSeries
from pandas.core.dtypes.inference import is_hashable
from pandas.core.dtypes.missing import isna, notna
from pandas.core import ops
from pandas.core.accessor import PandasDelegate, delegate_names
import pandas.core.algorithms as algorithms
from pandas.core.algorithms import _get_data_algo, factorize, take, take_1d, unique1d
from pandas.core.array_algos.transforms import shift
from pandas.core.arrays.base import ExtensionArray, _extension_array_shared_docs
from pandas.core.base import NoNewAttributesMixin, PandasObject, _shared_docs
import pandas.core.common as com
from pandas.core.construction import array, extract_array, sanitize_array
from pandas.core.indexers import check_array_indexer, deprecate_ndim_indexing
from pandas.core.missing import interpolate_2d
from pandas.core.ops.common import unpack_zerodim_and_defer
from pandas.core.sorting import nargsort
from pandas.io.formats import console
def _cat_compare_op(op):
opname = f"__{op.__name__}__"
@unpack_zerodim_and_defer(opname)
def func(self, other):
if is_list_like(other) and len(other) != len(self):
# TODO: Could this fail if the categories are listlike objects?
raise ValueError("Lengths must match.")
if not self.ordered:
if opname in ["__lt__", "__gt__", "__le__", "__ge__"]:
raise TypeError(
"Unordered Categoricals can only compare equality or not"
)
if isinstance(other, Categorical):
# Two Categoricals can only be be compared if the categories are
# the same (maybe up to ordering, depending on ordered)
msg = "Categoricals can only be compared if 'categories' are the same."
if len(self.categories) != len(other.categories):
raise TypeError(msg + " Categories are different lengths")
elif self.ordered and not (self.categories == other.categories).all():
raise TypeError(msg)
elif not set(self.categories) == set(other.categories):
raise TypeError(msg)
if not (self.ordered == other.ordered):
raise TypeError(
"Categoricals can only be compared if 'ordered' is the same"
)
if not self.ordered and not self.categories.equals(other.categories):
# both unordered and different order
other_codes = _get_codes_for_values(other, self.categories)
else:
other_codes = other._codes
f = getattr(self._codes, opname)
ret = f(other_codes)
mask = (self._codes == -1) | (other_codes == -1)
if mask.any():
# In other series, the leads to False, so do that here too
if opname == "__ne__":
ret[(self._codes == -1) & (other_codes == -1)] = True
else:
ret[mask] = False
return ret
if is_scalar(other):
if other in self.categories:
i = self.categories.get_loc(other)
ret = getattr(self._codes, opname)(i)
if opname not in {"__eq__", "__ge__", "__gt__"}:
# check for NaN needed if we are not equal or larger
mask = self._codes == -1
ret[mask] = False
return ret
else:
if opname == "__eq__":
return np.zeros(len(self), dtype=bool)
elif opname == "__ne__":
return np.ones(len(self), dtype=bool)
else:
raise TypeError(
f"Cannot compare a Categorical for op {opname} with a "
"scalar, which is not a category."
)
else:
# allow categorical vs object dtype array comparisons for equality
# these are only positional comparisons
if opname in ["__eq__", "__ne__"]:
return getattr(np.array(self), opname)(np.array(other))
raise TypeError(
f"Cannot compare a Categorical for op {opname} with "
f"type {type(other)}.\nIf you want to compare values, "
"use 'np.asarray(cat) <op> other'."
)
func.__name__ = opname
return func
def contains(cat, key, container):
"""
Helper for membership check for ``key`` in ``cat``.
This is a helper method for :method:`__contains__`
and :class:`CategoricalIndex.__contains__`.
Returns True if ``key`` is in ``cat.categories`` and the
location of ``key`` in ``categories`` is in ``container``.
Parameters
----------
cat : :class:`Categorical`or :class:`categoricalIndex`
key : a hashable object
The key to check membership for.
container : Container (e.g. list-like or mapping)
The container to check for membership in.
Returns
-------
is_in : bool
True if ``key`` is in ``self.categories`` and location of
``key`` in ``categories`` is in ``container``, else False.
Notes
-----
This method does not check for NaN values. Do that separately
before calling this method.
"""
hash(key)
# get location of key in categories.
# If a KeyError, the key isn't in categories, so logically
# can't be in container either.
try:
loc = cat.categories.get_loc(key)
except (KeyError, TypeError):
return False
# loc is the location of key in categories, but also the *value*
# for key in container. So, `key` may be in categories,
# but still not in `container`. Example ('b' in categories,
# but not in values):
# 'b' in Categorical(['a'], categories=['a', 'b']) # False
if is_scalar(loc):
return loc in container
else:
# if categories is an IntervalIndex, loc is an array.
return any(loc_ in container for loc_ in loc)
class Categorical(ExtensionArray, PandasObject):
"""
Represent a categorical variable in classic R / S-plus fashion.
`Categoricals` can only take on only a limited, and usually fixed, number
of possible values (`categories`). In contrast to statistical categorical
variables, a `Categorical` might have an order, but numerical operations
(additions, divisions, ...) are not possible.
All values of the `Categorical` are either in `categories` or `np.nan`.
Assigning values outside of `categories` will raise a `ValueError`. Order
is defined by the order of the `categories`, not lexical order of the
values.
Parameters
----------
values : list-like
The values of the categorical. If categories are given, values not in
categories will be replaced with NaN.
categories : Index-like (unique), optional
The unique categories for this categorical. If not given, the
categories are assumed to be the unique values of `values` (sorted, if
possible, otherwise in the order in which they appear).
ordered : bool, default False
Whether or not this categorical is treated as a ordered categorical.
If True, the resulting categorical will be ordered.
An ordered categorical respects, when sorted, the order of its
`categories` attribute (which in turn is the `categories` argument, if
provided).
dtype : CategoricalDtype
An instance of ``CategoricalDtype`` to use for this categorical.
Attributes
----------
categories : Index
The categories of this categorical
codes : ndarray
The codes (integer positions, which point to the categories) of this
categorical, read only.
ordered : bool
Whether or not this Categorical is ordered.
dtype : CategoricalDtype
The instance of ``CategoricalDtype`` storing the ``categories``
and ``ordered``.
Methods
-------
from_codes
__array__
Raises
------
ValueError
If the categories do not validate.
TypeError
If an explicit ``ordered=True`` is given but no `categories` and the
`values` are not sortable.
See Also
--------
CategoricalDtype : Type for categorical data.
CategoricalIndex : An Index with an underlying ``Categorical``.
Notes
-----
See the `user guide
<https://pandas.pydata.org/pandas-docs/stable/user_guide/categorical.html>`_
for more.
Examples
--------
>>> pd.Categorical([1, 2, 3, 1, 2, 3])
[1, 2, 3, 1, 2, 3]
Categories (3, int64): [1, 2, 3]
>>> pd.Categorical(['a', 'b', 'c', 'a', 'b', 'c'])
[a, b, c, a, b, c]
Categories (3, object): [a, b, c]
Ordered `Categoricals` can be sorted according to the custom order
of the categories and can have a min and max value.
>>> c = pd.Categorical(['a', 'b', 'c', 'a', 'b', 'c'], ordered=True,
... categories=['c', 'b', 'a'])
>>> c
[a, b, c, a, b, c]
Categories (3, object): [c < b < a]
>>> c.min()
'c'
"""
# For comparisons, so that numpy uses our implementation if the compare
# ops, which raise
__array_priority__ = 1000
_dtype = CategoricalDtype(ordered=False)
# tolist is not actually deprecated, just suppressed in the __dir__
_deprecations = PandasObject._deprecations | frozenset(["tolist"])
_typ = "categorical"
def __init__(
self, values, categories=None, ordered=None, dtype=None, fastpath=False
):
dtype = CategoricalDtype._from_values_or_dtype(
values, categories, ordered, dtype
)
# At this point, dtype is always a CategoricalDtype, but
# we may have dtype.categories be None, and we need to
# infer categories in a factorization step further below
if fastpath:
self._codes = coerce_indexer_dtype(values, dtype.categories)
self._dtype = self._dtype.update_dtype(dtype)
return
# null_mask indicates missing values we want to exclude from inference.
# This means: only missing values in list-likes (not arrays/ndframes).
null_mask = np.array(False)
# sanitize input
if is_categorical_dtype(values):
if dtype.categories is None:
dtype = CategoricalDtype(values.categories, dtype.ordered)
elif not isinstance(values, (ABCIndexClass, ABCSeries)):
# sanitize_array coerces np.nan to a string under certain versions
# of numpy
values = maybe_infer_to_datetimelike(values, convert_dates=True)
if not isinstance(values, np.ndarray):
values = _convert_to_list_like(values)
# By convention, empty lists result in object dtype:
sanitize_dtype = np.dtype("O") if len(values) == 0 else None
null_mask = isna(values)
if null_mask.any():
values = [values[idx] for idx in np.where(~null_mask)[0]]
values = sanitize_array(values, None, dtype=sanitize_dtype)
if dtype.categories is None:
try:
codes, categories = factorize(values, sort=True)
except TypeError as err:
codes, categories = factorize(values, sort=False)
if dtype.ordered:
# raise, as we don't have a sortable data structure and so
# the user should give us one by specifying categories
raise TypeError(
"'values' is not ordered, please "
"explicitly specify the categories order "
"by passing in a categories argument."
) from err
except ValueError as err:
# FIXME
raise NotImplementedError(
"> 1 ndim Categorical are not supported at this time"
) from err
# we're inferring from values
dtype = CategoricalDtype(categories, dtype.ordered)
elif is_categorical_dtype(values.dtype):
old_codes = (
values._values.codes if isinstance(values, ABCSeries) else values.codes
)
codes = recode_for_categories(
old_codes, values.dtype.categories, dtype.categories
)
else:
codes = _get_codes_for_values(values, dtype.categories)
if null_mask.any():
# Reinsert -1 placeholders for previously removed missing values
full_codes = -np.ones(null_mask.shape, dtype=codes.dtype)
full_codes[~null_mask] = codes
codes = full_codes
self._dtype = self._dtype.update_dtype(dtype)
self._codes = coerce_indexer_dtype(codes, dtype.categories)
@property
def categories(self):
"""
The categories of this categorical.
Setting assigns new values to each category (effectively a rename of
each individual category).
The assigned value has to be a list-like object. All items must be
unique and the number of items in the new categories must be the same
as the number of items in the old categories.
Assigning to `categories` is a inplace operation!
Raises
------
ValueError
If the new categories do not validate as categories or if the
number of new categories is unequal the number of old categories
See Also
--------
rename_categories : Rename categories.
reorder_categories : Reorder categories.
add_categories : Add new categories.
remove_categories : Remove the specified categories.
remove_unused_categories : Remove categories which are not used.
set_categories : Set the categories to the specified ones.
"""
return self.dtype.categories
@categories.setter
def categories(self, categories):
new_dtype = CategoricalDtype(categories, ordered=self.ordered)
if self.dtype.categories is not None and len(self.dtype.categories) != len(
new_dtype.categories
):
raise ValueError(
"new categories need to have the same number of "
"items as the old categories!"
)
self._dtype = new_dtype
@property
def ordered(self) -> Ordered:
"""
Whether the categories have an ordered relationship.
"""
return self.dtype.ordered
@property
def dtype(self) -> CategoricalDtype:
"""
The :class:`~pandas.api.types.CategoricalDtype` for this instance.
"""
return self._dtype
@property
def _constructor(self) -> Type["Categorical"]:
return Categorical
@classmethod
def _from_sequence(cls, scalars, dtype=None, copy=False):
return Categorical(scalars, dtype=dtype)
def _formatter(self, boxed=False):
# Defer to CategoricalFormatter's formatter.
return None
def copy(self) -> "Categorical":
"""
Copy constructor.
"""
return self._constructor(
values=self._codes.copy(), dtype=self.dtype, fastpath=True
)
def astype(self, dtype: Dtype, copy: bool = True) -> ArrayLike:
"""
Coerce this type to another dtype
Parameters
----------
dtype : numpy dtype or pandas type
copy : bool, default True
By default, astype always returns a newly allocated object.
If copy is set to False and dtype is categorical, the original
object is returned.
"""
if is_categorical_dtype(dtype):
dtype = cast(Union[str, CategoricalDtype], dtype)
# GH 10696/18593
dtype = self.dtype.update_dtype(dtype)
self = self.copy() if copy else self
if dtype == self.dtype:
return self
return self._set_dtype(dtype)
if is_extension_array_dtype(dtype):
return array(self, dtype=dtype, copy=copy) # type: ignore # GH 28770
if is_integer_dtype(dtype) and self.isna().any():
raise ValueError("Cannot convert float NaN to integer")
return np.array(self, dtype=dtype, copy=copy)
@cache_readonly
def size(self) -> int:
"""
Return the len of myself.
"""
return self._codes.size
@cache_readonly
def itemsize(self) -> int:
"""
return the size of a single category
"""
return self.categories.itemsize
def tolist(self) -> List[Scalar]:
"""
Return a list of the values.
These are each a scalar type, which is a Python scalar
(for str, int, float) or a pandas scalar
(for Timestamp/Timedelta/Interval/Period)
"""
return list(self)
to_list = tolist
@classmethod
def _from_inferred_categories(
cls, inferred_categories, inferred_codes, dtype, true_values=None
):
"""
Construct a Categorical from inferred values.
For inferred categories (`dtype` is None) the categories are sorted.
For explicit `dtype`, the `inferred_categories` are cast to the
appropriate type.
Parameters
----------
inferred_categories : Index
inferred_codes : Index
dtype : CategoricalDtype or 'category'
true_values : list, optional
If none are provided, the default ones are
"True", "TRUE", and "true."
Returns
-------
Categorical
"""
from pandas import Index, to_numeric, to_datetime, to_timedelta
cats = Index(inferred_categories)
known_categories = (
isinstance(dtype, CategoricalDtype) and dtype.categories is not None
)
if known_categories:
# Convert to a specialized type with `dtype` if specified.
if dtype.categories.is_numeric():
cats = to_numeric(inferred_categories, errors="coerce")
elif is_datetime64_dtype(dtype.categories):
cats = to_datetime(inferred_categories, errors="coerce")
elif is_timedelta64_dtype(dtype.categories):
cats = to_timedelta(inferred_categories, errors="coerce")
elif dtype.categories.is_boolean():
if true_values is None:
true_values = ["True", "TRUE", "true"]
cats = cats.isin(true_values)
if known_categories:
# Recode from observation order to dtype.categories order.
categories = dtype.categories
codes = recode_for_categories(inferred_codes, cats, categories)
elif not cats.is_monotonic_increasing:
# Sort categories and recode for unknown categories.
unsorted = cats.copy()
categories = cats.sort_values()
codes = recode_for_categories(inferred_codes, unsorted, categories)
dtype = CategoricalDtype(categories, ordered=False)
else:
dtype = CategoricalDtype(cats, ordered=False)
codes = inferred_codes
return cls(codes, dtype=dtype, fastpath=True)
@classmethod
def from_codes(cls, codes, categories=None, ordered=None, dtype=None):
"""
Make a Categorical type from codes and categories or dtype.
This constructor is useful if you already have codes and
categories/dtype and so do not need the (computation intensive)
factorization step, which is usually done on the constructor.
If your data does not follow this convention, please use the normal
constructor.
Parameters
----------
codes : array-like of int
An integer array, where each integer points to a category in
categories or dtype.categories, or else is -1 for NaN.
categories : index-like, optional
The categories for the categorical. Items need to be unique.
If the categories are not given here, then they must be provided
in `dtype`.
ordered : bool, optional
Whether or not this categorical is treated as an ordered
categorical. If not given here or in `dtype`, the resulting
categorical will be unordered.
dtype : CategoricalDtype or "category", optional
If :class:`CategoricalDtype`, cannot be used together with
`categories` or `ordered`.
.. versionadded:: 0.24.0
When `dtype` is provided, neither `categories` nor `ordered`
should be provided.
Returns
-------
Categorical
Examples
--------
>>> dtype = pd.CategoricalDtype(['a', 'b'], ordered=True)
>>> pd.Categorical.from_codes(codes=[0, 1, 0, 1], dtype=dtype)
[a, b, a, b]
Categories (2, object): [a < b]
"""
dtype = CategoricalDtype._from_values_or_dtype(
categories=categories, ordered=ordered, dtype=dtype
)
if dtype.categories is None:
msg = (
"The categories must be provided in 'categories' or "
"'dtype'. Both were None."
)
raise ValueError(msg)
if is_extension_array_dtype(codes) and is_integer_dtype(codes):
# Avoid the implicit conversion of Int to object
if isna(codes).any():
raise ValueError("codes cannot contain NA values")
codes = codes.to_numpy(dtype=np.int64)
else:
codes = np.asarray(codes)
if len(codes) and not is_integer_dtype(codes):
raise ValueError("codes need to be array-like integers")
if len(codes) and (codes.max() >= len(dtype.categories) or codes.min() < -1):
raise ValueError("codes need to be between -1 and len(categories)-1")
return cls(codes, dtype=dtype, fastpath=True)
@property
def codes(self) -> np.ndarray:
"""
The category codes of this categorical.
Codes are an array of integers which are the positions of the actual
values in the categories array.
There is no setter, use the other categorical methods and the normal item
setter to change values in the categorical.
Returns
-------
ndarray[int]
A non-writable view of the `codes` array.
"""
v = self._codes.view()
v.flags.writeable = False
return v
def _set_categories(self, categories, fastpath=False):
"""
Sets new categories inplace
Parameters
----------
fastpath : bool, default False
Don't perform validation of the categories for uniqueness or nulls
Examples
--------
>>> c = pd.Categorical(['a', 'b'])
>>> c
[a, b]
Categories (2, object): [a, b]
>>> c._set_categories(pd.Index(['a', 'c']))
>>> c
[a, c]
Categories (2, object): [a, c]
"""
if fastpath:
new_dtype = CategoricalDtype._from_fastpath(categories, self.ordered)
else:
new_dtype = CategoricalDtype(categories, ordered=self.ordered)
if (
not fastpath
and self.dtype.categories is not None
and len(new_dtype.categories) != len(self.dtype.categories)
):
raise ValueError(
"new categories need to have the same number of "
"items than the old categories!"
)
self._dtype = new_dtype
def _set_dtype(self, dtype: CategoricalDtype) -> "Categorical":
"""
Internal method for directly updating the CategoricalDtype
Parameters
----------
dtype : CategoricalDtype
Notes
-----
We don't do any validation here. It's assumed that the dtype is
a (valid) instance of `CategoricalDtype`.
"""
codes = recode_for_categories(self.codes, self.categories, dtype.categories)
return type(self)(codes, dtype=dtype, fastpath=True)
def set_ordered(self, value, inplace=False):
"""
Set the ordered attribute to the boolean value.
Parameters
----------
value : bool
Set whether this categorical is ordered (True) or not (False).
inplace : bool, default False
Whether or not to set the ordered attribute in-place or return
a copy of this categorical with ordered set to the value.
"""
inplace = validate_bool_kwarg(inplace, "inplace")
new_dtype = CategoricalDtype(self.categories, ordered=value)
cat = self if inplace else self.copy()
cat._dtype = new_dtype
if not inplace:
return cat
def as_ordered(self, inplace=False):
"""
Set the Categorical to be ordered.
Parameters
----------
inplace : bool, default False
Whether or not to set the ordered attribute in-place or return
a copy of this categorical with ordered set to True.
Returns
-------
Categorical
Ordered Categorical.
"""
inplace = validate_bool_kwarg(inplace, "inplace")
return self.set_ordered(True, inplace=inplace)
def as_unordered(self, inplace=False):
"""
Set the Categorical to be unordered.
Parameters
----------
inplace : bool, default False
Whether or not to set the ordered attribute in-place or return
a copy of this categorical with ordered set to False.
Returns
-------
Categorical
Unordered Categorical.
"""
inplace = validate_bool_kwarg(inplace, "inplace")
return self.set_ordered(False, inplace=inplace)
def set_categories(self, new_categories, ordered=None, rename=False, inplace=False):
"""
Set the categories to the specified new_categories.
`new_categories` can include new categories (which will result in
unused categories) or remove old categories (which results in values
set to NaN). If `rename==True`, the categories will simple be renamed
(less or more items than in old categories will result in values set to
NaN or in unused categories respectively).
This method can be used to perform more than one action of adding,
removing, and reordering simultaneously and is therefore faster than
performing the individual steps via the more specialised methods.
On the other hand this methods does not do checks (e.g., whether the
old categories are included in the new categories on a reorder), which
can result in surprising changes, for example when using special string
dtypes, which does not considers a S1 string equal to a single char
python string.
Parameters
----------
new_categories : Index-like
The categories in new order.
ordered : bool, default False
Whether or not the categorical is treated as a ordered categorical.
If not given, do not change the ordered information.
rename : bool, default False
Whether or not the new_categories should be considered as a rename
of the old categories or as reordered categories.
inplace : bool, default False
Whether or not to reorder the categories in-place or return a copy
of this categorical with reordered categories.
Returns
-------
Categorical with reordered categories or None if inplace.
Raises
------
ValueError
If new_categories does not validate as categories
See Also
--------
rename_categories : Rename categories.
reorder_categories : Reorder categories.
add_categories : Add new categories.
remove_categories : Remove the specified categories.
remove_unused_categories : Remove categories which are not used.
"""
inplace = validate_bool_kwarg(inplace, "inplace")
if ordered is None:
ordered = self.dtype.ordered
new_dtype = CategoricalDtype(new_categories, ordered=ordered)
cat = self if inplace else self.copy()
if rename:
if cat.dtype.categories is not None and len(new_dtype.categories) < len(
cat.dtype.categories
):
# remove all _codes which are larger and set to -1/NaN
cat._codes[cat._codes >= len(new_dtype.categories)] = -1
else:
codes = recode_for_categories(
cat.codes, cat.categories, new_dtype.categories
)
cat._codes = codes
cat._dtype = new_dtype
if not inplace:
return cat
def rename_categories(self, new_categories, inplace=False):
"""
Rename categories.
Parameters
----------
new_categories : list-like, dict-like or callable
New categories which will replace old categories.
* list-like: all items must be unique and the number of items in
the new categories must match the existing number of categories.
* dict-like: specifies a mapping from
old categories to new. Categories not contained in the mapping
are passed through and extra categories in the mapping are
ignored.
* callable : a callable that is called on all items in the old
categories and whose return values comprise the new categories.
.. versionadded:: 0.23.0.
inplace : bool, default False
Whether or not to rename the categories inplace or return a copy of
this categorical with renamed categories.
Returns
-------
cat : Categorical or None
With ``inplace=False``, the new categorical is returned.
With ``inplace=True``, there is no return value.
Raises
------
ValueError
If new categories are list-like and do not have the same number of
items than the current categories or do not validate as categories
See Also
--------
reorder_categories : Reorder categories.
add_categories : Add new categories.
remove_categories : Remove the specified categories.
remove_unused_categories : Remove categories which are not used.
set_categories : Set the categories to the specified ones.
Examples
--------
>>> c = pd.Categorical(['a', 'a', 'b'])
>>> c.rename_categories([0, 1])
[0, 0, 1]
Categories (2, int64): [0, 1]
For dict-like ``new_categories``, extra keys are ignored and
categories not in the dictionary are passed through
>>> c.rename_categories({'a': 'A', 'c': 'C'})
[A, A, b]
Categories (2, object): [A, b]
You may also provide a callable to create the new categories
>>> c.rename_categories(lambda x: x.upper())
[A, A, B]
Categories (2, object): [A, B]
"""
inplace = validate_bool_kwarg(inplace, "inplace")
cat = self if inplace else self.copy()
if is_dict_like(new_categories):
cat.categories = [new_categories.get(item, item) for item in cat.categories]
elif callable(new_categories):
cat.categories = [new_categories(item) for item in cat.categories]
else:
cat.categories = new_categories
if not inplace:
return cat
def reorder_categories(self, new_categories, ordered=None, inplace=False):
"""
Reorder categories as specified in new_categories.
`new_categories` need to include all old categories and no new category
items.
Parameters
----------
new_categories : Index-like
The categories in new order.
ordered : bool, optional
Whether or not the categorical is treated as a ordered categorical.
If not given, do not change the ordered information.
inplace : bool, default False
Whether or not to reorder the categories inplace or return a copy of
this categorical with reordered categories.
Returns
-------
cat : Categorical with reordered categories or None if inplace.
Raises
------
ValueError
If the new categories do not contain all old category items or any
new ones
See Also
--------
rename_categories : Rename categories.
add_categories : Add new categories.
remove_categories : Remove the specified categories.
remove_unused_categories : Remove categories which are not used.
set_categories : Set the categories to the specified ones.
"""
inplace = validate_bool_kwarg(inplace, "inplace")
if set(self.dtype.categories) != set(new_categories):
raise ValueError(
"items in new_categories are not the same as in old categories"
)
return self.set_categories(new_categories, ordered=ordered, inplace=inplace)
def add_categories(self, new_categories, inplace=False):
"""
Add new categories.
`new_categories` will be included at the last/highest place in the
categories and will be unused directly after this call.
Parameters
----------
new_categories : category or list-like of category
The new categories to be included.
inplace : bool, default False
Whether or not to add the categories inplace or return a copy of
this categorical with added categories.
Returns
-------
cat : Categorical with new categories added or None if inplace.
Raises
------
ValueError
If the new categories include old categories or do not validate as
categories
See Also
--------
rename_categories : Rename categories.
reorder_categories : Reorder categories.
remove_categories : Remove the specified categories.
remove_unused_categories : Remove categories which are not used.
set_categories : Set the categories to the specified ones.
"""
inplace = validate_bool_kwarg(inplace, "inplace")
if not is_list_like(new_categories):
new_categories = [new_categories]
already_included = set(new_categories) & set(self.dtype.categories)
if len(already_included) != 0:
raise ValueError(
f"new categories must not include old categories: {already_included}"
)
new_categories = list(self.dtype.categories) + list(new_categories)
new_dtype = CategoricalDtype(new_categories, self.ordered)
cat = self if inplace else self.copy()
cat._dtype = new_dtype
cat._codes = coerce_indexer_dtype(cat._codes, new_dtype.categories)
if not inplace:
return cat
def remove_categories(self, removals, inplace=False):
"""
Remove the specified categories.
`removals` must be included in the old categories. Values which were in
the removed categories will be set to NaN
Parameters
----------
removals : category or list of categories
The categories which should be removed.
inplace : bool, default False
Whether or not to remove the categories inplace or return a copy of
this categorical with removed categories.
Returns
-------
cat : Categorical with removed categories or None if inplace.
Raises
------
ValueError
If the removals are not contained in the categories
See Also
--------
rename_categories : Rename categories.
reorder_categories : Reorder categories.
add_categories : Add new categories.
remove_unused_categories : Remove categories which are not used.
set_categories : Set the categories to the specified ones.
"""
inplace = validate_bool_kwarg(inplace, "inplace")
if not is_list_like(removals):
removals = [removals]
removal_set = set(removals)
not_included = removal_set - set(self.dtype.categories)
new_categories = [c for c in self.dtype.categories if c not in removal_set]
# GH 10156
if any(isna(removals)):
not_included = {x for x in not_included if notna(x)}
new_categories = [x for x in new_categories if notna(x)]
if len(not_included) != 0:
raise ValueError(f"removals must all be in old categories: {not_included}")
return self.set_categories(
new_categories, ordered=self.ordered, rename=False, inplace=inplace
)
def remove_unused_categories(self, inplace=False):
"""
Remove categories which are not used.
Parameters
----------
inplace : bool, default False
Whether or not to drop unused categories inplace or return a copy of
this categorical with unused categories dropped.
Returns
-------
cat : Categorical with unused categories dropped or None if inplace.
See Also
--------
rename_categories : Rename categories.
reorder_categories : Reorder categories.
add_categories : Add new categories.
remove_categories : Remove the specified categories.
set_categories : Set the categories to the specified ones.
"""
inplace = validate_bool_kwarg(inplace, "inplace")
cat = self if inplace else self.copy()
idx, inv = np.unique(cat._codes, return_inverse=True)
if idx.size != 0 and idx[0] == -1: # na sentinel
idx, inv = idx[1:], inv - 1
new_categories = cat.dtype.categories.take(idx)
new_dtype = CategoricalDtype._from_fastpath(
new_categories, ordered=self.ordered
)
cat._dtype = new_dtype
cat._codes = coerce_indexer_dtype(inv, new_dtype.categories)
if not inplace:
return cat
def map(self, mapper):
"""
Map categories using input correspondence (dict, Series, or function).
Maps the categories to new categories. If the mapping correspondence is
one-to-one the result is a :class:`~pandas.Categorical` which has the
same order property as the original, otherwise a :class:`~pandas.Index`
is returned. NaN values are unaffected.
If a `dict` or :class:`~pandas.Series` is used any unmapped category is
mapped to `NaN`. Note that if this happens an :class:`~pandas.Index`
will be returned.
Parameters
----------
mapper : function, dict, or Series
Mapping correspondence.
Returns
-------
pandas.Categorical or pandas.Index
Mapped categorical.
See Also
--------
CategoricalIndex.map : Apply a mapping correspondence on a
:class:`~pandas.CategoricalIndex`.
Index.map : Apply a mapping correspondence on an
:class:`~pandas.Index`.
Series.map : Apply a mapping correspondence on a
:class:`~pandas.Series`.
Series.apply : Apply more complex functions on a
:class:`~pandas.Series`.
Examples
--------
>>> cat = pd.Categorical(['a', 'b', 'c'])
>>> cat
[a, b, c]
Categories (3, object): [a, b, c]
>>> cat.map(lambda x: x.upper())
[A, B, C]
Categories (3, object): [A, B, C]
>>> cat.map({'a': 'first', 'b': 'second', 'c': 'third'})
[first, second, third]
Categories (3, object): [first, second, third]
If the mapping is one-to-one the ordering of the categories is
preserved:
>>> cat = pd.Categorical(['a', 'b', 'c'], ordered=True)
>>> cat
[a, b, c]
Categories (3, object): [a < b < c]
>>> cat.map({'a': 3, 'b': 2, 'c': 1})
[3, 2, 1]
Categories (3, int64): [3 < 2 < 1]
If the mapping is not one-to-one an :class:`~pandas.Index` is returned:
>>> cat.map({'a': 'first', 'b': 'second', 'c': 'first'})
Index(['first', 'second', 'first'], dtype='object')
If a `dict` is used, all unmapped categories are mapped to `NaN` and
the result is an :class:`~pandas.Index`:
>>> cat.map({'a': 'first', 'b': 'second'})
Index(['first', 'second', nan], dtype='object')
"""
new_categories = self.categories.map(mapper)
try:
return self.from_codes(
self._codes.copy(), categories=new_categories, ordered=self.ordered
)
except ValueError:
# NA values are represented in self._codes with -1
# np.take causes NA values to take final element in new_categories
if np.any(self._codes == -1):
new_categories = new_categories.insert(len(new_categories), np.nan)
return np.take(new_categories, self._codes)
__eq__ = _cat_compare_op(operator.eq)
__ne__ = _cat_compare_op(operator.ne)
__lt__ = _cat_compare_op(operator.lt)
__gt__ = _cat_compare_op(operator.gt)
__le__ = _cat_compare_op(operator.le)
__ge__ = _cat_compare_op(operator.ge)
# for Series/ndarray like compat
@property
def shape(self):
"""
Shape of the Categorical.
For internal compatibility with numpy arrays.
Returns
-------
shape : tuple
"""
return tuple([len(self._codes)])
def shift(self, periods, fill_value=None):
"""
Shift Categorical by desired number of periods.
Parameters
----------
periods : int
Number of periods to move, can be positive or negative
fill_value : object, optional
The scalar value to use for newly introduced missing values.
.. versionadded:: 0.24.0
Returns
-------
shifted : Categorical
"""
# since categoricals always have ndim == 1, an axis parameter
# doesn't make any sense here.
codes = self.codes
if codes.ndim > 1:
raise NotImplementedError("Categorical with ndim > 1.")
fill_value = self._validate_fill_value(fill_value)
codes = shift(codes.copy(), periods, axis=0, fill_value=fill_value)
return self._constructor(codes, dtype=self.dtype, fastpath=True)
def _validate_fill_value(self, fill_value):
"""
Convert a user-facing fill_value to a representation to use with our
underlying ndarray, raising ValueError if this is not possible.
Parameters
----------
fill_value : object
Returns
-------
fill_value : int
Raises
------
ValueError
"""
if isna(fill_value):
fill_value = -1
elif fill_value in self.categories:
fill_value = self.categories.get_loc(fill_value)
else:
raise ValueError(
f"'fill_value={fill_value}' is not present "
"in this Categorical's categories"
)
return fill_value
def __array__(self, dtype=None) -> np.ndarray:
"""
The numpy array interface.
Returns
-------
numpy.array
A numpy array of either the specified dtype or,
if dtype==None (default), the same dtype as
categorical.categories.dtype.
"""
ret = take_1d(self.categories.values, self._codes)
if dtype and not is_dtype_equal(dtype, self.categories.dtype):
return np.asarray(ret, dtype)
if is_extension_array_dtype(ret):
# When we're a Categorical[ExtensionArray], like Interval,
# we need to ensure __array__ get's all the way to an
# ndarray.
ret = np.asarray(ret)
return ret
def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
# for binary ops, use our custom dunder methods
result = ops.maybe_dispatch_ufunc_to_dunder_op(
self, ufunc, method, *inputs, **kwargs
)
if result is not NotImplemented:
return result
# for all other cases, raise for now (similarly as what happens in
# Series.__array_prepare__)
raise TypeError(
f"Object with dtype {self.dtype} cannot perform "
f"the numpy op {ufunc.__name__}"
)
def __setstate__(self, state):
"""Necessary for making this object picklable"""
if not isinstance(state, dict):
raise Exception("invalid pickle state")
if "_dtype" not in state:
state["_dtype"] = CategoricalDtype(state["_categories"], state["_ordered"])
for k, v in state.items():
setattr(self, k, v)
@property
def T(self) -> "Categorical":
"""
Return transposed numpy array.
"""
return self
@property
def nbytes(self):
return self._codes.nbytes + self.dtype.categories.values.nbytes
def memory_usage(self, deep=False):
"""
Memory usage of my values
Parameters
----------
deep : bool
Introspect the data deeply, interrogate
`object` dtypes for system-level memory consumption
Returns
-------
bytes used
Notes
-----
Memory usage does not include memory consumed by elements that
are not components of the array if deep=False
See Also
--------
numpy.ndarray.nbytes
"""
return self._codes.nbytes + self.dtype.categories.memory_usage(deep=deep)
@doc(_shared_docs["searchsorted"], klass="Categorical")
def searchsorted(self, value, side="left", sorter=None):
# searchsorted is very performance sensitive. By converting codes
# to same dtype as self.codes, we get much faster performance.
if is_scalar(value):
codes = self.categories.get_loc(value)
codes = self.codes.dtype.type(codes)
else:
locs = [self.categories.get_loc(x) for x in value]
codes = np.array(locs, dtype=self.codes.dtype)
return self.codes.searchsorted(codes, side=side, sorter=sorter)
def isna(self):
"""
Detect missing values
Missing values (-1 in .codes) are detected.
Returns
-------
a boolean array of whether my values are null
See Also
--------
isna : Top-level isna.
isnull : Alias of isna.
Categorical.notna : Boolean inverse of Categorical.isna.
"""
ret = self._codes == -1
return ret
isnull = isna
def notna(self):
"""
Inverse of isna
Both missing values (-1 in .codes) and NA as a category are detected as
null.
Returns
-------
a boolean array of whether my values are not null
See Also
--------
notna : Top-level notna.
notnull : Alias of notna.
Categorical.isna : Boolean inverse of Categorical.notna.
"""
return ~self.isna()
notnull = notna
def dropna(self):
"""
Return the Categorical without null values.
Missing values (-1 in .codes) are detected.
Returns
-------
valid : Categorical
"""
result = self[self.notna()]
return result
def value_counts(self, dropna=True):
"""
Return a Series containing counts of each category.
Every category will have an entry, even those with a count of 0.
Parameters
----------
dropna : bool, default True
Don't include counts of NaN.
Returns
-------
counts : Series
See Also
--------
Series.value_counts
"""
from pandas import Series, CategoricalIndex
code, cat = self._codes, self.categories
ncat, mask = len(cat), 0 <= code
ix, clean = np.arange(ncat), mask.all()
if dropna or clean:
obs = code if clean else code[mask]
count = np.bincount(obs, minlength=ncat or 0)
else:
count = np.bincount(np.where(mask, code, ncat))
ix = np.append(ix, -1)
ix = self._constructor(ix, dtype=self.dtype, fastpath=True)
return Series(count, index=CategoricalIndex(ix), dtype="int64")
def _internal_get_values(self):
"""
Return the values.
For internal compatibility with pandas formatting.
Returns
-------
np.ndarray or Index
A numpy array of the same dtype as categorical.categories.dtype or
Index if datetime / periods.
"""
# if we are a datetime and period index, return Index to keep metadata
if needs_i8_conversion(self.categories):
return self.categories.take(self._codes, fill_value=np.nan)
elif is_integer_dtype(self.categories) and -1 in self._codes:
return self.categories.astype("object").take(self._codes, fill_value=np.nan)
return np.array(self)
def check_for_ordered(self, op):
""" assert that we are ordered """
if not self.ordered:
raise TypeError(
f"Categorical is not ordered for operation {op}\n"
"you can use .as_ordered() to change the "
"Categorical to an ordered one\n"
)
def _values_for_argsort(self):
return self._codes
def argsort(self, ascending=True, kind="quicksort", **kwargs):
"""
Return the indices that would sort the Categorical.
.. versionchanged:: 0.25.0
Changed to sort missing values at the end.
Parameters
----------
ascending : bool, default True
Whether the indices should result in an ascending
or descending sort.
kind : {'quicksort', 'mergesort', 'heapsort'}, optional
Sorting algorithm.
**kwargs:
passed through to :func:`numpy.argsort`.
Returns
-------
numpy.array
See Also
--------
numpy.ndarray.argsort
Notes
-----
While an ordering is applied to the category values, arg-sorting
in this context refers more to organizing and grouping together
based on matching category values. Thus, this function can be
called on an unordered Categorical instance unlike the functions
'Categorical.min' and 'Categorical.max'.
Examples
--------
>>> pd.Categorical(['b', 'b', 'a', 'c']).argsort()
array([2, 0, 1, 3])
>>> cat = pd.Categorical(['b', 'b', 'a', 'c'],
... categories=['c', 'b', 'a'],
... ordered=True)
>>> cat.argsort()
array([3, 0, 1, 2])
Missing values are placed at the end
>>> cat = pd.Categorical([2, None, 1])
>>> cat.argsort()
array([2, 0, 1])
"""
return super().argsort(ascending=ascending, kind=kind, **kwargs)
def sort_values(self, inplace=False, ascending=True, na_position="last"):
"""
Sort the Categorical by category value returning a new
Categorical by default.
While an ordering is applied to the category values, sorting in this
context refers more to organizing and grouping together based on
matching category values. Thus, this function can be called on an
unordered Categorical instance unlike the functions 'Categorical.min'
and 'Categorical.max'.
Parameters
----------
inplace : bool, default False
Do operation in place.
ascending : bool, default True
Order ascending. Passing False orders descending. The
ordering parameter provides the method by which the
category values are organized.
na_position : {'first', 'last'} (optional, default='last')
'first' puts NaNs at the beginning
'last' puts NaNs at the end
Returns
-------
Categorical or None
See Also
--------
Categorical.sort
Series.sort_values
Examples
--------
>>> c = pd.Categorical([1, 2, 2, 1, 5])
>>> c
[1, 2, 2, 1, 5]
Categories (3, int64): [1, 2, 5]
>>> c.sort_values()
[1, 1, 2, 2, 5]
Categories (3, int64): [1, 2, 5]
>>> c.sort_values(ascending=False)
[5, 2, 2, 1, 1]
Categories (3, int64): [1, 2, 5]
Inplace sorting can be done as well:
>>> c.sort_values(inplace=True)
>>> c
[1, 1, 2, 2, 5]
Categories (3, int64): [1, 2, 5]
>>>
>>> c = pd.Categorical([1, 2, 2, 1, 5])
'sort_values' behaviour with NaNs. Note that 'na_position'
is independent of the 'ascending' parameter:
>>> c = pd.Categorical([np.nan, 2, 2, np.nan, 5])
>>> c
[NaN, 2, 2, NaN, 5]
Categories (2, int64): [2, 5]
>>> c.sort_values()
[2, 2, 5, NaN, NaN]
Categories (2, int64): [2, 5]
>>> c.sort_values(ascending=False)
[5, 2, 2, NaN, NaN]
Categories (2, int64): [2, 5]
>>> c.sort_values(na_position='first')
[NaN, NaN, 2, 2, 5]
Categories (2, int64): [2, 5]
>>> c.sort_values(ascending=False, na_position='first')
[NaN, NaN, 5, 2, 2]
Categories (2, int64): [2, 5]
"""
inplace = validate_bool_kwarg(inplace, "inplace")
if na_position not in ["last", "first"]:
raise ValueError(f"invalid na_position: {repr(na_position)}")
sorted_idx = nargsort(self, ascending=ascending, na_position=na_position)
if inplace:
self._codes = self._codes[sorted_idx]
else:
return self._constructor(
values=self._codes[sorted_idx], dtype=self.dtype, fastpath=True
)
def _values_for_rank(self):
"""
For correctly ranking ordered categorical data. See GH#15420
Ordered categorical data should be ranked on the basis of
codes with -1 translated to NaN.
Returns
-------
numpy.array
"""
from pandas import Series
if self.ordered:
values = self.codes
mask = values == -1
if mask.any():
values = values.astype("float64")
values[mask] = np.nan
elif self.categories.is_numeric():
values = np.array(self)
else:
# reorder the categories (so rank can use the float codes)
# instead of passing an object array to rank
values = np.array(
self.rename_categories(Series(self.categories).rank().values)
)
return values
def view(self, dtype=None):
if dtype is not None:
raise NotImplementedError(dtype)
return self._constructor(values=self._codes, dtype=self.dtype, fastpath=True)
def to_dense(self):
"""
Return my 'dense' representation
For internal compatibility with numpy arrays.
Returns
-------
dense : array
"""
warn(
"Categorical.to_dense is deprecated and will be removed in "
"a future version. Use np.asarray(cat) instead.",
FutureWarning,
stacklevel=2,
)
return np.asarray(self)
def fillna(self, value=None, method=None, limit=None):
"""
Fill NA/NaN values using the specified method.
Parameters
----------
value : scalar, dict, Series
If a scalar value is passed it is used to fill all missing values.
Alternatively, a Series or dict can be used to fill in different
values for each index. The value should not be a list. The
value(s) passed should either be in the categories or should be
NaN.
method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None
Method to use for filling holes in reindexed Series
pad / ffill: propagate last valid observation forward to next valid
backfill / bfill: use NEXT valid observation to fill gap
limit : int, default None
(Not implemented yet for Categorical!)
If method is specified, this is the maximum number of consecutive
NaN values to forward/backward fill. In other words, if there is
a gap with more than this number of consecutive NaNs, it will only
be partially filled. If method is not specified, this is the
maximum number of entries along the entire axis where NaNs will be
filled.
Returns
-------
filled : Categorical with NA/NaN filled
"""
value, method = validate_fillna_kwargs(
value, method, validate_scalar_dict_value=False
)
if value is None:
value = np.nan
if limit is not None:
raise NotImplementedError(
"specifying a limit for fillna has not been implemented yet"
)
codes = self._codes
# pad / bfill
if method is not None:
# TODO: dispatch when self.categories is EA-dtype
values = np.asarray(self).reshape(-1, len(self))
values = interpolate_2d(values, method, 0, None, value).astype(
self.categories.dtype
)[0]
codes = _get_codes_for_values(values, self.categories)
else:
# If value is a dict or a Series (a dict value has already
# been converted to a Series)
if isinstance(value, (np.ndarray, Categorical, ABCSeries)):
# We get ndarray or Categorical if called via Series.fillna,
# where it will unwrap another aligned Series before getting here
mask = ~algorithms.isin(value, self.categories)
if not isna(value[mask]).all():
raise ValueError("fill value must be in categories")
values_codes = _get_codes_for_values(value, self.categories)
indexer = np.where(codes == -1)
codes = codes.copy()
codes[indexer] = values_codes[indexer]
# If value is not a dict or Series it should be a scalar
elif is_hashable(value):
if not isna(value) and value not in self.categories:
raise ValueError("fill value must be in categories")
mask = codes == -1
if mask.any():
codes = codes.copy()
if isna(value):
codes[mask] = -1
else:
codes[mask] = self.categories.get_loc(value)
else:
raise TypeError(
f"'value' parameter must be a scalar, dict "
f"or Series, but you passed a {type(value).__name__}"
)
return self._constructor(codes, dtype=self.dtype, fastpath=True)
def take(self, indexer, allow_fill: bool = False, fill_value=None):
"""
Take elements from the Categorical.
Parameters
----------
indexer : sequence of int
The indices in `self` to take. The meaning of negative values in
`indexer` depends on the value of `allow_fill`.
allow_fill : bool, default False
How to handle negative values in `indexer`.
* False: negative values in `indices` indicate positional indices
from the right. This is similar to
:func:`numpy.take`.
* True: negative values in `indices` indicate missing values
(the default). These values are set to `fill_value`. Any other
other negative values raise a ``ValueError``.
.. versionchanged:: 1.0.0
Default value changed from ``True`` to ``False``.
fill_value : object
The value to use for `indices` that are missing (-1), when
``allow_fill=True``. This should be the category, i.e. a value
in ``self.categories``, not a code.
Returns
-------
Categorical
This Categorical will have the same categories and ordered as
`self`.
See Also
--------
Series.take : Similar method for Series.
numpy.ndarray.take : Similar method for NumPy arrays.
Examples
--------
>>> cat = pd.Categorical(['a', 'a', 'b'])
>>> cat
[a, a, b]
Categories (2, object): [a, b]
Specify ``allow_fill==False`` to have negative indices mean indexing
from the right.
>>> cat.take([0, -1, -2], allow_fill=False)
[a, b, a]
Categories (2, object): [a, b]
With ``allow_fill=True``, indices equal to ``-1`` mean "missing"
values that should be filled with the `fill_value`, which is
``np.nan`` by default.
>>> cat.take([0, -1, -1], allow_fill=True)
[a, NaN, NaN]
Categories (2, object): [a, b]
The fill value can be specified.
>>> cat.take([0, -1, -1], allow_fill=True, fill_value='a')
[a, a, a]
Categories (2, object): [a, b]
Specifying a fill value that's not in ``self.categories``
will raise a ``TypeError``.
"""
indexer = np.asarray(indexer, dtype=np.intp)
if allow_fill:
# convert user-provided `fill_value` to codes
fill_value = self._validate_fill_value(fill_value)
codes = take(self._codes, indexer, allow_fill=allow_fill, fill_value=fill_value)
return self._constructor(codes, dtype=self.dtype, fastpath=True)
def take_nd(self, indexer, allow_fill: bool = False, fill_value=None):
# GH#27745 deprecate alias that other EAs dont have
warn(
"Categorical.take_nd is deprecated, use Categorical.take instead",
FutureWarning,
stacklevel=2,
)
return self.take(indexer, allow_fill=allow_fill, fill_value=fill_value)
def __len__(self) -> int:
"""
The length of this Categorical.
"""
return len(self._codes)
def __iter__(self):
"""
Returns an Iterator over the values of this Categorical.
"""
return iter(self._internal_get_values().tolist())
def __contains__(self, key) -> bool:
"""
Returns True if `key` is in this Categorical.
"""
# if key is a NaN, check if any NaN is in self.
if is_scalar(key) and isna(key):
return self.isna().any()
return contains(self, key, container=self._codes)
def _tidy_repr(self, max_vals=10, footer=True) -> str:
"""
a short repr displaying only max_vals and an optional (but default
footer)
"""
num = max_vals // 2
head = self[:num]._get_repr(length=False, footer=False)
tail = self[-(max_vals - num) :]._get_repr(length=False, footer=False)
result = f"{head[:-1]}, ..., {tail[1:]}"
if footer:
result = f"{result}\n{self._repr_footer()}"
return str(result)
def _repr_categories(self):
"""
return the base repr for the categories
"""
max_categories = (
10
if get_option("display.max_categories") == 0
else get_option("display.max_categories")
)
from pandas.io.formats import format as fmt
if len(self.categories) > max_categories:
num = max_categories // 2
head = fmt.format_array(self.categories[:num], None)
tail = fmt.format_array(self.categories[-num:], None)
category_strs = head + ["..."] + tail
else:
category_strs = fmt.format_array(self.categories, None)
# Strip all leading spaces, which format_array adds for columns...
category_strs = [x.strip() for x in category_strs]
return category_strs
def _repr_categories_info(self) -> str:
"""
Returns a string representation of the footer.
"""
category_strs = self._repr_categories()
dtype = str(self.categories.dtype)
levheader = f"Categories ({len(self.categories)}, {dtype}): "
width, height = get_terminal_size()
max_width = get_option("display.width") or width
if console.in_ipython_frontend():
# 0 = no breaks
max_width = 0
levstring = ""
start = True
cur_col_len = len(levheader) # header
sep_len, sep = (3, " < ") if self.ordered else (2, ", ")
linesep = sep.rstrip() + "\n" # remove whitespace
for val in category_strs:
if max_width != 0 and cur_col_len + sep_len + len(val) > max_width:
levstring += linesep + (" " * (len(levheader) + 1))
cur_col_len = len(levheader) + 1 # header + a whitespace
elif not start:
levstring += sep
cur_col_len += len(val)
levstring += val
start = False
# replace to simple save space by
return levheader + "[" + levstring.replace(" < ... < ", " ... ") + "]"
def _repr_footer(self) -> str:
info = self._repr_categories_info()
return f"Length: {len(self)}\n{info}"
def _get_repr(self, length=True, na_rep="NaN", footer=True) -> str:
from pandas.io.formats import format as fmt
formatter = fmt.CategoricalFormatter(
self, length=length, na_rep=na_rep, footer=footer
)
result = formatter.to_string()
return str(result)
def __repr__(self) -> str:
"""
String representation.
"""
_maxlen = 10
if len(self._codes) > _maxlen:
result = self._tidy_repr(_maxlen)
elif len(self._codes) > 0:
result = self._get_repr(length=len(self) > _maxlen)
else:
msg = self._get_repr(length=False, footer=True).replace("\n", ", ")
result = f"[], {msg}"
return result
def _maybe_coerce_indexer(self, indexer):
"""
return an indexer coerced to the codes dtype
"""
if isinstance(indexer, np.ndarray) and indexer.dtype.kind == "i":
indexer = indexer.astype(self._codes.dtype)
return indexer
def __getitem__(self, key):
"""
Return an item.
"""
if isinstance(key, (int, np.integer)):
i = self._codes[key]
if i == -1:
return np.nan
else:
return self.categories[i]
key = check_array_indexer(self, key)
result = self._codes[key]
if result.ndim > 1:
| deprecate_ndim_indexing(result) | pandas.core.indexers.deprecate_ndim_indexing |
""""""
"""
Copyright (c) 2021 <NAME> as part of Airlab Amsterdam
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
import torch
import torch.utils.data as torchdata
import pandas as pd
#%% Training class
class timeseries_dataset():
def __init__(self, name, dim_inputseqlen, dim_outputseqlen, dim_maxseqlen):
self.name = name
self.dim_inputseqlen = dim_inputseqlen
self.dim_outputseqlen = dim_outputseqlen
self.dim_maxseqlen = dim_maxseqlen
def load(self, mode):
if self.name == 'uci_electricity':
output = uci_electricity(self.dim_inputseqlen, self.dim_outputseqlen, self.dim_maxseqlen, mode, self.name)
if self.name == 'uci_traffic':
output = uci_traffic(self.dim_inputseqlen, self.dim_outputseqlen, self.dim_maxseqlen, mode, self.name)
if self.name == 'kaggle_webtraffic':
output = kaggle_webtraffic(self.dim_inputseqlen, self.dim_outputseqlen, self.dim_maxseqlen, mode, self.name)
if self.name == 'kaggle_favorita':
output = kaggle_favorita(self.dim_inputseqlen, self.dim_outputseqlen, self.dim_maxseqlen, mode, self.name)
if self.name == 'kaggle_m5':
output = kaggle_m5(self.dim_inputseqlen, self.dim_outputseqlen, self.dim_maxseqlen, mode, self.name)
return output
#%% UCI - Electricity
# Source: https://archive.ics.uci.edu/ml/datasets/ElectricityLoadDiagrams20112014
class uci_electricity(torchdata.Dataset):
def __init__(self, dim_inputseqlen, dim_outputseqlen, dim_maxseqlen, mode, name):
"""
Load UCI Electricity dataset in format [samples, seqlen, features]
"""
self.dim_inputseqlen = dim_inputseqlen
self.dim_outputseqlen = dim_outputseqlen
self.window = dim_inputseqlen + dim_outputseqlen
self.dim_maxseqlen = dim_maxseqlen
self.mode = mode
self.p_train = 0.8
self.p_validate = 0.1
self.name = name
self.X, self.Y = self.get_data()
def __len__(self):
return len(self.index)
def __getitem__(self, idx):
x = self.X[self.index[idx, 0], self.index[idx, 1]]
y = self.Y[self.index[idx, 0], self.index[idx, 1]]
return x, y
def get_data(self):
# Read data from source
df = pd.read_csv('data/uci_electricity/LD2011_2014.txt', sep = ';', parse_dates=[0], infer_datetime_format=True, dtype='float32', decimal=',', index_col=[0])
# Subtract 15 minutes from index to make index the starting time (instead of ending time)
df.index = df.index + pd.Timedelta(minutes=-15)
# Aggregate to hourly level if desired
df = df.groupby([df.index.date, df.index.hour]).sum()
df.index = df.index.set_levels([pd.to_datetime(df.index.levels[0]), df.index.levels[1]])
# Create index for allowable entries (not only zeros)
self.num_series = len(df.columns)
self.num_dates = len(df)
arr_online = np.zeros((self.num_dates, self.num_series))
index = np.empty((0, 2), dtype='int')
for i in range(len(df.columns)):
idx = np.flatnonzero(df.iloc[:, i])
arr_online[idx, i] = 1
idx = np.arange(idx[0] - self.window, np.minimum(idx[-1] + 1, len(df) - self.dim_maxseqlen))
idx = idx[idx >= 0]
arr = np.array([np.repeat(i, len(idx)), idx]).T
index = np.append(index, arr, axis = 0)
# Stack (and recreate Dataframe because stupid Pandas creates a Series otherwise)
df = pd.DataFrame(df.stack())
# Add time-based covariates
df['Series'] = df.index.get_level_values(2)
df['Series'] = df['Series'].str.replace(r'MT_','').astype('int') - 1
# Add scaled time-based features
df['Month_sin'] = np.sin(df.index.get_level_values(0).month * (2 * np.pi / 12))
df['Month_cos'] = np.cos(df.index.get_level_values(0).month * (2 * np.pi / 12))
df['DayOfWeek_sin'] = np.sin(df.index.get_level_values(0).dayofweek * (2 * np.pi / 7))
df['DayOfWeek_cos'] = np.cos(df.index.get_level_values(0).dayofweek * (2 * np.pi / 7))
df['HourOfDay_sin'] = np.sin(df.index.get_level_values(1) * (2 * np.pi / 24))
df['HourOfDay_cos'] = np.cos(df.index.get_level_values(1) * (2 * np.pi / 24))
df['Online'] = arr_online.reshape(-1, 1)
# Rename target column
df.rename(columns={0:'E_consumption'}, inplace=True)
# Add lagged output variable
df['E_consumption_lag'] = df.groupby(level=2)['E_consumption'].shift(1)
# Remove first date (contains NaNs for the lagged column)
df = df.iloc[370:]
# Sort by series
df.index.names = ['date','hour','series']
df.sort_index(level=['series','date','hour'], inplace=True)
# Create feature matrix X and output vector Y
df_Y = df[['E_consumption']]
df.drop(['E_consumption'], axis=1, inplace=True)
df_X = df
# Convert dataframe to numpy and reshape to [series x dates x features] format
X = df_X.to_numpy(dtype='float32').reshape(self.num_series, -1, len(df_X.columns))
Y = df_Y.to_numpy(dtype='float32').reshape(self.num_series, -1, len(df_Y.columns))
# Input and output dimensions
self.dim_input, self.dim_output = X.shape[-1], Y.shape[-1]
# Convert to torch
X, Y = torch.from_numpy(X), torch.from_numpy(Y)
# Create subsequences by unfolding along date dimension with a sliding window
Xt, Yt = X.unfold(-2, self.dim_maxseqlen, 1).permute(0, 1, 3, 2), Y.unfold(-2, self.dim_maxseqlen, 1).permute(0, 1, 3, 2)
# Create train, validate and test sets
num_dates_train = int(self.p_train * Xt.shape[1])
num_dates_validate = int(self.p_validate * Xt.shape[1])
index = torch.from_numpy(index)
# Get datasets
if self.mode == 'train':
self.index = index[index[:, 1] < num_dates_train]
elif self.mode == 'validate':
self.index = index[(index[:, 1] >= num_dates_train) & (index[:, 1] < num_dates_train + num_dates_validate)]
elif self.mode == 'test':
self.index = index[(index[:, 1] >= num_dates_train + num_dates_validate + self.dim_outputseqlen - 1)]
# Useful for use in algorithms - dimension of lags and dimension of covariates (minus dim of time series ID)
self.d_lag = self.dim_output
self.d_emb = 1
self.d_cov = self.dim_input - self.dim_output - 1
return Xt, Yt[:, :, self.dim_inputseqlen:self.window, :]
#%% UCI - Traffic
# Source: http://archive.ics.uci.edu/ml/datasets/PEMS-SF
class uci_traffic(torchdata.Dataset):
def __init__(self, dim_inputseqlen, dim_outputseqlen, dim_maxseqlen, mode, name):
"""
Load UCI Traffic dataset in format [samples, seqlen, features]
"""
self.dim_inputseqlen = dim_inputseqlen
self.dim_outputseqlen = dim_outputseqlen
self.window = dim_inputseqlen + dim_outputseqlen
self.dim_maxseqlen = dim_maxseqlen
self.mode = mode
self.p_train = 0.8
self.p_validate = 0.1
self.name = name
self.X, self.Y = self.get_data()
def __len__(self):
return len(self.index)
def __getitem__(self, idx):
x = self.X[self.index[idx, 0], self.index[idx, 1]]
y = self.Y[self.index[idx, 0], self.index[idx, 1]]
return x, y
def get_data(self):
# Read data from source
df = pd.read_csv('data/uci_traffic/dataset.csv', index_col=0, infer_datetime_format=True, parse_dates=[[0, 1]])
# Extract missing column
missing = df['missing'].copy()
df = df.drop(columns=['missing'])
# Create index for allowable entries
self.num_series = len(df.columns)
self.num_dates = len(df)
index_col1 = np.repeat(np.arange(0, self.num_series), self.num_dates - self.dim_maxseqlen)
index_col2 = np.tile(np.arange(0, self.num_dates - self.dim_maxseqlen), self.num_series)
index = np.stack((index_col1, index_col2), axis=1)
# Stack (and recreate Dataframe because stupid Pandas creates a Series otherwise)
df = pd.DataFrame(df.stack())
# Add series indicator as integer
df['Series'] = df.index.get_level_values(1)
df['Series'] = df['Series'].str.replace(r'carlane_','').astype('int') - 1
# Add time-based covariates
df['DayOfWeek_sin'] = np.sin(df.index.get_level_values(0).dayofweek * (2 * np.pi / 7))
df['DayOfWeek_cos'] = np.cos(df.index.get_level_values(0).dayofweek * (2 * np.pi / 7))
df['HourOfDay_sin'] = np.sin(df.index.get_level_values(0).hour * (2 * np.pi / 24))
df['HourOfDay_cos'] = np.cos(df.index.get_level_values(0).hour * (2 * np.pi / 24))
df['Available'] = 1
df.loc[(missing[missing == 1].index, slice(None)), 'Available'] = 0
# Rename target column
df.rename(columns={0:'Occupancy_rate'}, inplace=True)
# Add lagged output variable
df['Occupancy_rate_lag'] = df.groupby(level=1)['Occupancy_rate'].shift(1)
# Remove first date (contains NaNs for the lagged column)
df = df.iloc[self.num_series:]
# Sort by series
df.index.names = ['date_time','series']
df = df.sort_index(level=['series','date_time'])
# Create feature matrix X and output vector Y
df_Y = df[['Occupancy_rate']]
df.drop(['Occupancy_rate'], axis=1, inplace=True)
df_X = df
# Convert dataframe to numpy and reshape to [series x dates x features] format
X = df_X.to_numpy(dtype='float32').reshape(self.num_series, -1, len(df_X.columns))
Y = df_Y.to_numpy(dtype='float32').reshape(self.num_series, -1, len(df_Y.columns))
# Input and output dimensions
self.dim_input, self.dim_output = X.shape[-1], Y.shape[-1]
# Convert to torch
X, Y = torch.from_numpy(X), torch.from_numpy(Y)
# Create subsequences by unfolding along date dimension with a sliding window
Xt, Yt = X.unfold(-2, self.dim_maxseqlen, 1).permute(0, 1, 3, 2), Y.unfold(-2, self.dim_maxseqlen, 1).permute(0, 1, 3, 2)
# Create train, validate and test sets
num_dates_train = int(self.p_train * Xt.shape[1])
num_dates_validate = int(self.p_validate * Xt.shape[1])
index = torch.from_numpy(index)
# Get datasets
if self.mode == 'train':
self.index = index[index[:, 1] < num_dates_train]
elif self.mode == 'validate':
self.index = index[(index[:, 1] >= num_dates_train) & (index[:, 1] < num_dates_train + num_dates_validate)]
elif self.mode == 'test':
self.index = index[(index[:, 1] >= num_dates_train + num_dates_validate + self.dim_outputseqlen - 1)]
# Useful for use in algorithms - dimension of lags and dimension of covariates (minus dim of time series ID)
self.d_lag = self.dim_output
self.d_emb = 1
self.d_cov = self.dim_input - self.dim_output - 1
return Xt, Yt[:, :, self.dim_inputseqlen:self.window, :]
#%% Kaggle - Webtraffic
# Source: https://www.kaggle.com/c/web-traffic-time-series-forecasting/data
class kaggle_webtraffic(torchdata.Dataset):
def __init__(self, dim_inputseqlen, dim_outputseqlen, dim_maxseqlen, mode, name):
"""
Load Kaggle Web Traffic dataset in format [samples, seqlen, features]
"""
self.dim_inputseqlen = dim_inputseqlen
self.dim_outputseqlen = dim_outputseqlen
self.window = dim_inputseqlen + dim_outputseqlen
self.dim_maxseqlen = dim_maxseqlen
self.mode = mode
self.train_maxdate = '2016-12-31'
self.validate_maxdate = '2017-03-31'
self.name = name
self.X, self.Y = self.get_data()
def __len__(self):
return len(self.index)
def __getitem__(self, idx):
x = self.X[self.index[idx, 0], self.index[idx, 1]]
y = self.Y[self.index[idx, 0], self.index[idx, 1]]
return x, y
def get_data(self):
# Read data from source
df = pd.read_csv('data/kaggle_webtraffic/train_2.csv', index_col=[0]).fillna(0).transpose()
# Set index
df.index = pd.to_datetime(df.index)
# Training, validation, test
num_dates_train = len(df.loc[:self.train_maxdate]) - self.dim_maxseqlen
num_dates_validate = len(df.loc[:self.validate_maxdate]) - self.dim_maxseqlen
# Use only top-10000
pageviews = df.loc[:'2016-12-31'].sum(axis=0)
indices = pageviews.sort_values(ascending=False)[:10000].index
df = df.loc[:, indices]
# Rename pages to simple index - we ignore information in the page name
columns = [str(i) for i in range(len(df.columns))]
df.columns = columns
# Series
self.num_series = len(df.columns)
self.num_dates = len(df) - 1
# Create index for allowable entries
index_col1 = np.repeat(np.arange(0, self.num_series), self.num_dates - self.dim_maxseqlen + 1)
index_col2 = np.tile(np.arange(0, self.num_dates - self.dim_maxseqlen + 1), self.num_series)
index = np.stack((index_col1, index_col2), axis=1)
# Reset index
# Stack (and recreate Dataframe because stupid Pandas creates a Series otherwise)
df = pd.DataFrame(df.stack())
# Add series indicator as integer
df['Series'] = df.index.get_level_values(1)
df['Series'] = df['Series'].astype('int')
# Add time-based covariates
df['DayOfWeek_sin'] = np.sin(df.index.get_level_values(0).dayofweek * (2 * np.pi / 7))
df['DayOfWeek_cos'] = np.cos(df.index.get_level_values(0).dayofweek * (2 * np.pi / 7))
df['DayOfMonth_sin'] = np.sin(df.index.get_level_values(0).day * (2 * np.pi / 31))
df['DayOfMonth_cos'] = np.cos(df.index.get_level_values(0).day * (2 * np.pi / 31))
df['WeekOfYear_sin'] = np.sin(df.index.get_level_values(0).week * (2 * np.pi / 53))
df['WeekOfYear_cos'] = np.cos(df.index.get_level_values(0).week * (2 * np.pi / 53))
df['MonthOfYear_sin'] = np.sin(df.index.get_level_values(0).month * (2 * np.pi / 12))
df['MonthOfYear_cos'] = np.cos(df.index.get_level_values(0).month * (2 * np.pi / 12))
# Rename target column
df.rename(columns={0:'Page_views'}, inplace=True)
# Add lagged output variable
df['Page_views_lag'] = df.groupby(level=1)['Page_views'].shift(1)
# Remove first date (contains NaNs for the lagged column)
df = df.iloc[self.num_series:]
# Sort by series
df.index.names = ['date_time','series']
df = df.sort_index(level=['series','date_time'])
# Create feature matrix X and output vector Y
df_Y = df[['Page_views']]
df.drop(['Page_views'], axis=1, inplace=True)
df_X = df
# Convert dataframe to numpy and reshape to [series x dates x features] format
X = df_X.to_numpy(dtype='float32').reshape(self.num_series, -1, len(df_X.columns))
Y = df_Y.to_numpy(dtype='float32').reshape(self.num_series, -1, len(df_Y.columns))
# Input and output dimensions
self.dim_input, self.dim_output = X.shape[-1], Y.shape[-1]
# Convert to torch
X, Y = torch.from_numpy(X), torch.from_numpy(Y)
# Create subsequences by unfolding along date dimension with a sliding window
Xt, Yt = X.unfold(-2, self.dim_maxseqlen, 1).permute(0, 1, 3, 2), Y.unfold(-2, self.dim_maxseqlen, 1).permute(0, 1, 3, 2)
index = torch.from_numpy(index)
# Get datasets
if self.mode == 'train':
self.index = index[index[:, 1] < num_dates_train]
elif self.mode == 'validate':
self.index = index[(index[:, 1] >= num_dates_train) & (index[:, 1] < num_dates_validate)]
elif self.mode == 'test':
self.index = index[(index[:, 1] >= num_dates_validate)]
# Useful for use in algorithms - dimension of lags and dimension of covariates (minus dim of time series ID)
self.d_lag = self.dim_output
self.d_emb = 1
self.d_cov = self.dim_input - self.dim_output - 1
return Xt, Yt[:, :, self.dim_inputseqlen:self.window, :]
#%% Kaggle - Favorita
# Source: https://www.kaggle.com/c/favorita-grocery-sales-forecasting/data
class kaggle_favorita(torchdata.Dataset):
def __init__(self, dim_inputseqlen, dim_outputseqlen, dim_maxseqlen, mode, name):
"""
Load Favorita dataset in format [samples, seqlen, features]
"""
self.dim_inputseqlen = dim_inputseqlen
self.dim_outputseqlen = dim_outputseqlen
self.window = dim_inputseqlen + dim_outputseqlen
self.dim_maxseqlen = dim_maxseqlen
self.mode = mode
self.train_maxdate = '2013-09-02'
self.validate_maxdate = '2013-10-03'
self.name = name
self.X, self.Y = self.get_data()
def __len__(self):
return len(self.index)
def __getitem__(self, idx):
x = self.X[self.index[idx]:self.index[idx] + self.dim_maxseqlen]
y = self.Y[self.index[idx] + self.dim_inputseqlen:self.index[idx] + self.dim_outputseqlen + self.dim_inputseqlen]
return x, y
def get_data(self):
# Read data from source
df = pd.read_hdf('data/kaggle_favorita/df_full_2013.h5', key='favorita')
index = pd.read_hdf('data/kaggle_favorita/df_full_2013.h5', key='index')
df_Y = df[['unit_sales']]
df_X = df.drop(columns=['unit_sales', 'transactions_missing_lagged', 'oil_price_missing_lagged', 'unit_sales_missing_lagged', 'transactions_lagged', 'oil_price_lagged'])
# Convert dataframe to numpy and reshape to [series x dates x features] format
X = df_X.to_numpy(dtype='float32')
Y = df_Y.to_numpy(dtype='float32')
# Input and output dimensions
self.dim_input, self.dim_output = X.shape[-1], Y.shape[-1]
# Convert to torch
X, Y = torch.from_numpy(X), torch.from_numpy(Y)
# Get datasets
if self.mode == 'train':
idx = index[index['date'] <= self.train_maxdate]['index'].to_numpy()
self.index = torch.from_numpy(idx)
elif self.mode == 'validate':
idx = index[(index['date'] <= self.validate_maxdate) & (index['date'] > self.train_maxdate)]['index'].to_numpy()
self.index = torch.from_numpy(idx)
elif self.mode == 'test':
idx = index[index['date'] > self.validate_maxdate]['index'].to_numpy()
self.index = torch.from_numpy(idx)
# Useful for use in algorithms - dimension of lags and dimension of covariates (minus dim of time series ID)
self.d_lag = 1 # number of lags in input
self.d_emb = 2 # number of embedding categoricals in input
self.d_cov = self.dim_input - self.d_lag - self.d_emb # number of covariates input
return X, Y
#%% Kaggle - M5
# Source: https://www.kaggle.com/c/m5-forecasting-accuracy
class kaggle_m5(torchdata.Dataset):
def __init__(self, dim_inputseqlen, dim_outputseqlen, dim_maxseqlen, mode, name):
"""
Load m5 dataset in format [samples, seqlen, features]
"""
self.dim_inputseqlen = dim_inputseqlen
self.dim_outputseqlen = dim_outputseqlen
self.window = dim_inputseqlen + dim_outputseqlen
self.dim_maxseqlen = dim_maxseqlen
self.mode = mode
self.train_mindate = '2014-01-01'
self.train_maxdate = '2015-12-31'
self.validate_maxdate = '2016-01-31'
self.name = name
self.X, self.Y = self.get_data()
def __len__(self):
return len(self.index)
def __getitem__(self, idx):
x = self.X[self.index[idx]:self.index[idx] + self.dim_maxseqlen]
y = self.Y[(self.index[idx] + self.dim_inputseqlen):(self.index[idx] + self.dim_outputseqlen + self.dim_inputseqlen)]
return x, y
def get_data(self):
# Read data from source
df = | pd.read_feather('data/kaggle_m5/m5_dataset_products.feather') | pandas.read_feather |
import datetime as dt
import json
import os
from datetime import datetime
import bs4
import pandas as pd
import requests
import yfinance as yf
from pandas_datareader import data as pdr
yf.pdr_override()
info_types = ["info", "options", "dividends",
"mutualfund_holders", "institutional_holders",
"major_holders", "calendar", "actions", "splits"]
def date_parse(d):
return datetime.strptime(d, "%Y-%m-%d")
def load_price_history(symbol, start_date=dt.datetime(2000, 1, 1), end_date=dt.datetime.now(), market="us",
reload=True):
now = dt.datetime.now()
symbol = symbol.lower().strip()
symbol_filename = "-".join(symbol.split("."))
file_path = f"data/{market}/price_history/{symbol_filename}.csv"
if reload:
if os.path.isfile(file_path): # download only data from one day after latest date in csv
df_old = pd.read_csv(file_path, index_col=0, parse_dates=True)
if len(df_old) == 0:
df = pdr.get_data_yahoo(symbol, start_date, end_date)
df.reset_index(level=0).to_csv(file_path, index=False, date_format="%Y-%m-%d")
return df
oldest_saved_date = df_old.index[0]
lastest_saved_date = df_old.index[-1]
try:
if start_date < oldest_saved_date:
df_older = pdr.get_data_yahoo(symbol, start_date, oldest_saved_date - dt.timedelta(days=1))
df_older = df_older[(df_older.index >= start_date) & (df_older.index < oldest_saved_date)]
df_old = pd.concat([df_older, df_old])
df_old = df_old[df_old.index < lastest_saved_date]
df_new = pdr.get_data_yahoo(symbol, lastest_saved_date, now)
df_new = df_new[df_new.index >= lastest_saved_date]
df_new = df_new[~df_new.index.duplicated(keep="first")]
df = pd.concat([df_old, df_new])
df.reset_index(level=0).to_csv(file_path, index=False, date_format="%Y-%m-%d")
return df[(df.index >= start_date) & (df.index <= end_date)]
except TypeError:
df = pdr.get_data_yahoo(symbol, start_date, end_date)
df.reset_index(level=0).to_csv(file_path, index=False, date_format="%Y-%m-%d")
return df
else: # no csv exits
df = pdr.get_data_yahoo(symbol, start_date, end_date)
directory = f"data/{market}/price_history"
if not os.path.exists(directory):
os.makedirs(directory)
print(file_path)
df.reset_index(level=0).to_csv(file_path, index=False, date_format="%Y-%m-%d")
return df
else: # don't reload
df = | pd.read_csv(file_path, index_col=0, parse_dates=True) | pandas.read_csv |
"""
Helper module for road-network related functions
"""
__version__ = '0.2'
import logging
import numpy as np
import networkx as nx
from pysal.lib.cg import RTree, Rect
from sklearn.neighbors import KDTree
import pandas as pd
from code_libs.euclidean import euchelper
class Network:
"""Class for the Network graph
"""
def __init__(self, lines_arr, max_point_dist=0.1):
self.pt_dict = {}
self.adj_dict = {}
self.rtree = RTree()
# KD Tree related
self.kd_tree = None
self.points_kd = None
self.kd_ptid_to_nodeid_map = {}
self.kd_nodeid_to_ptid_map = {}
self.kd_pt_data_list = []
self.network = None
self.refine_graph(lines_arr, max_point_dist)
self.build_all_elements()
def check_and_add_pt(self, point, min_dist, next_pt_id):
"""Check if a point is present. A point will be added if
there are no other points within the "min_dist" neighborhood
Arguments:
point {[list]} -- [x,y] of the point
min_dist {[float]} -- The minimum neighborhood distance
next_pt_id {[int]} -- The starting id of the point that should
be assigned for the next point
Returns:
[int] -- The next_id to be used for downstream calls
"""
point_id = None
res = [r.leaf_obj()
for r in self.rtree.query_point(point) if r.is_leaf()]
if(res is not None) and res:
#logging.debug("\tREPEAT: point={}, res={}".format(point,res))
point_id = res[0]
else:
pt0 = ((point[0] - float(min_dist / 2.0)),
(point[1] - float(min_dist / 2.0)))
pt1 = ((point[0] + float(min_dist / 2.0)),
(point[1] + float(min_dist / 2.0)))
self.rtree.insert(next_pt_id, Rect(pt0[0], pt0[1], pt1[0],
pt1[1]))
point_id = next_pt_id
return point_id
def refine_graph(self, lines_arr, max_point_dist):
"""
Refine the graph by removing close points
Arguments:
lines_arr {[list]} -- List of lines
max_point_dist {[float]} -- The max distance between two points
"""
# Put all lines in an r-tree
pt_id = 0
add_reverse_edge = False
#index = 0
for line_string in lines_arr:
prev_pt_id = None
for point in line_string:
add_pt_id = self.check_and_add_pt(point, max_point_dist, pt_id)
if add_pt_id == pt_id:
# Point was added
self.pt_dict[add_pt_id] = point
pt_id += 1
else:
# Point was already found
pass
if prev_pt_id is not None:
# Add edge from src->dest
adj_list = self.adj_dict.get(prev_pt_id, None)
if adj_list is None:
adj_list = []
adj_list.append(add_pt_id)
self.adj_dict[prev_pt_id] = adj_list
# Add reverse edge (dst->src)
if add_reverse_edge:
adj_list = self.adj_dict.get(add_pt_id, None)
if adj_list is None:
adj_list = []
adj_list.append(prev_pt_id)
self.adj_dict[add_pt_id] = adj_list
prev_pt_id = add_pt_id
# logging.debug("pt_dict={}".format(self.pt_dict))
def build_all_elements(self):
"""Build the network and create an indexing structure (kdtree)
"""
self.build_network()
self.create_point_kd()
def create_point_kd(self):
"""Create a kdtree of points
"""
index = 0
node_list = self.network.nodes(data=True)
for node, data in node_list:
self.kd_nodeid_to_ptid_map[node] = index
self.kd_ptid_to_nodeid_map[index] = node
self.kd_pt_data_list.append([data["x"], data["y"]])
index += 1
if self.kd_pt_data_list:
self.kd_pt_data_list = np.array(self.kd_pt_data_list)
self.kd_tree = KDTree(self.kd_pt_data_list)
def get_nearest_point_id(self, thept):
"""Get the first nearest point on the graph for a given "pt"
Arguments:
thept {[list]} -- [x,y] point
Returns:
[list] -- Nearest point
"""
pt_id = None
if self.kd_tree is not None:
dist_arr, pt_ind_arr = self.kd_tree.query([thept], k=1)
dist = dist_arr[0][0]
pt_ind = pt_ind_arr[0][0]
if self.kd_ptid_to_nodeid_map.get(pt_ind, None) is not None:
pt_id = self.kd_ptid_to_nodeid_map[pt_ind]
else:
logging.error("ERROR: Nearest point for %s (ind=%d, pt=%s, "
"dist=%f) is not a part of kdtree",
str(thept), pt_ind,
str(self.kd_pt_data_list[pt_ind]), dist)
return pt_id
def build_network(self):
"""Build the road-network
"""
self.network = nx.Graph()
for pt_id in self.pt_dict:
self.network.add_node(pt_id, x=self.pt_dict[pt_id][0],
y=self.pt_dict[pt_id][1],
pos=(self.pt_dict[pt_id][0],
self.pt_dict[pt_id][1]))
for src in self.adj_dict:
for dst in self.adj_dict[src]:
src_pt = np.array(self.pt_dict[src])
dst_pt = np.array(self.pt_dict[dst])
edge_len = np.linalg.norm(src_pt-dst_pt)
self.network.add_edge(src, dst, weight=edge_len)
def get_shortest_path_bw_id(self, start_point_id, dst_point_id):
"""Get the shortest path between given source and dest
Arguments:
start_point_id {[list]} -- [x,y] of the source point
dst_point_id {[type]} -- [x,y] of the destination point
Returns:
[list] -- list of points [x,y]
"""
retval = []
try:
retval = nx.shortest_path(
self.network, source=start_point_id, target=dst_point_id,
weight='weight')
except nx.NetworkXNoPath:
# No path found between two points
retval = None
return retval
def get_xy(self, node_id):
"""
Get the (x,y) position of a given node with id = "node_id"
Arguments:
node_id {[int]} -- The node id
Returns:
[list] -- The [x,y] of the node
"""
if node_id in self.network:
return (self.network.node[node_id]['x'],
self.network.node[node_id]['y'])
else:
return None
def get_interpolated_path(self, st_pt, end_pt, ts_arr,
interpolate_time_sec=0.5, id_str=None):
"""
Interpolate a path between st_pt and end_pt on the graph
Arguments:
st_pt {int} -- The id of the starting point
end_pt {int} -- The id of the end point
ts_arr {list} -- [list of timestamps for each visit]
Keyword Arguments:
interpolate_time_sec {float} -- The interpolation time
(default: {0.5})
id_str {string} -- The string id provided as a suffix to file
(default: {None})
"""
retval = []
path = self.get_shortest_path_bw_id(st_pt, end_pt)
if path is None:
retval = None
elif path:
last_ts = ts_arr[len(ts_arr) - 1]
start_ts = ts_arr[0]
total_time_sec = (last_ts - start_ts) / np.timedelta64(1, 's')
path_lth = 0
path_pts = []
prev_pt = None
# Compute path length
for node in path:
thept = self.get_xy(node)
if thept is not None:
thept = np.array(thept)
path_pts.append(thept)
if prev_pt is not None:
dist = np.linalg.norm(thept-prev_pt)
path_lth += dist
prev_pt = thept
# Chop the distance into points based on speed and time
time_diff = (ts_arr[len(ts_arr) - 1] -
ts_arr[0]) / np.timedelta64(1, 's')
dist_km_per_hop = (path_lth *
interpolate_time_sec / float(total_time_sec))
path_list = []
prev_node = None
prev_node_pt = None
remaining_dist_on_this_hop = 0.0
curr_ts = start_ts
for node in path:
thept = np.array(self.get_xy(node))
if prev_node_pt is not None:
# Now interpolate with the distance
edge_len_km = np.linalg.norm(thept - prev_node_pt)
traversed_dist_on_this_hop = abs(
remaining_dist_on_this_hop)
if(remaining_dist_on_this_hop < 0 and
abs(remaining_dist_on_this_hop) < edge_len_km):
proportion = (
traversed_dist_on_this_hop / float(edge_len_km))
new_pt = euchelper.interpolate_line(
prev_node_pt, thept, proportion)
curr_lth = abs(remaining_dist_on_this_hop)
time_delta_this_hop_sec = (curr_lth *
time_diff / float(path_lth))
curr_ts = (curr_ts +
pd.Timedelta(time_delta_this_hop_sec, unit='s'))
path_list.append({"ts": curr_ts, "edgept1": prev_node,
"edgept2": node,
"edgeX0": prev_node_pt[0],
"edgeY0": prev_node_pt[1],
"edgeX1": thept[0], "edgeY1": thept[1],
"edgeLth": edge_len_km,
"x": new_pt[0], "y": new_pt[1],
"proportion": proportion,
"currLth": curr_lth,
"timeDelta": time_delta_this_hop_sec,
"distOnHop": traversed_dist_on_this_hop,
"remLth": remaining_dist_on_this_hop}
)
retval.append(
[curr_ts, new_pt[0], new_pt[1], curr_lth])
remaining_dist_on_this_hop = edge_len_km + remaining_dist_on_this_hop
if remaining_dist_on_this_hop <= 0:
# Add entire edge
new_pt = thept
curr_lth = edge_len_km
remaining_dist_on_this_hop -= curr_lth
time_delta_this_hop_sec = curr_lth * \
time_diff / float(path_lth)
curr_ts = curr_ts + \
pd.Timedelta(time_delta_this_hop_sec, unit='s')
retval.append(
[curr_ts, new_pt[0], new_pt[1], curr_lth])
path_list.append({"ts": curr_ts,
"edgeX0": prev_node_pt[0],
"edgeY0": prev_node_pt[1],
"edgeX1": thept[0],
"edgeY1": thept[1],
"x": new_pt[0],
"y": new_pt[1],
"edgeLth": edge_len_km,
"proportion": 1.0,
"currLth": curr_lth,
"timeDelta": time_delta_this_hop_sec,
"distOnHop": curr_lth,
"remLth": remaining_dist_on_this_hop}
)
else:
while remaining_dist_on_this_hop >= 0.0:
# Now keep interpolating
curr_lth = dist_km_per_hop # about 0.7
traversed_dist_on_this_hop += curr_lth
remaining_dist_on_this_hop = edge_len_km - traversed_dist_on_this_hop
if traversed_dist_on_this_hop > edge_len_km:
curr_lth = (edge_len_km -
(traversed_dist_on_this_hop - curr_lth))
traversed_dist_on_this_hop = edge_len_km
proportion = (traversed_dist_on_this_hop /
float(edge_len_km))
new_pt = euchelper.interpolate_line(
prev_node_pt, thept, proportion)
time_delta_this_hop_sec = (curr_lth *
time_diff / float(path_lth))
curr_ts = (curr_ts +
pd.Timedelta(time_delta_this_hop_sec, unit='s'))
path_list.append({"ts": curr_ts,
"edgept1": prev_node,
"edgept2": node,
"edgeX0": prev_node_pt[0],
"edgeY0": prev_node_pt[1],
"edgeX1": thept[0],
"edgeY1": thept[1],
"edgeLth": edge_len_km,
"x": new_pt[0],
"y": new_pt[1],
"proportion": proportion,
"currLth": curr_lth,
"timeDelta": time_delta_this_hop_sec,
"distOnHop": traversed_dist_on_this_hop,
"remLth": remaining_dist_on_this_hop})
retval.append(
[curr_ts, new_pt[0], new_pt[1], curr_lth])
else:
# First point
retval.append([curr_ts, thept[0], thept[1], 0])
path_list.append({"ts": curr_ts, "edgept1": prev_node,
"edgept2": node, "edgeX0": None,
"edgeY0": None,
"edgeX1": thept[0],
"edgeY1": thept[1],
"x": thept[0],
"y": thept[1],
"edgeLth": None,
"proportion": None,
"currLth": None,
"timeDelta": None,
"distOnHop": None,
"remLth": remaining_dist_on_this_hop})
prev_node = node
prev_node_pt = thept
pd.DataFrame(path_list).to_csv(
"pathBreaks_{}.csv".format(id_str), index=False)
if(retval is not None) and (len(retval) > 2):
path_list = []
index = 0
curr_ts = start_ts
lth_vec = [ele[3] for ele in retval]
total_lth = float(np.sum(lth_vec))
curr_ts = start_ts
for ent in retval:
ent[0] = curr_ts
time_inc = ent[3] * total_time_sec / float(total_lth)
curr_ts = curr_ts + pd.Timedelta(time_inc, unit='s')
index += 1
path_list.append(
{"ts": ent[0], "edgeX0": ent[1], "edgeY0": ent[2]})
| pd.DataFrame(path_list) | pandas.DataFrame |
from matplotlib import pyplot as plt
import pandas as pd
import seaborn as sns
import numpy as np
import pylab as pl
import itertools
sns.set(color_codes = True)
rel_dir = './data/measurements/'
figure_directory = './data/figures/'
trajectories = ['segment', 'pentagon', 'M']
mirrors = ['', '_mirror']
m_to_mirror_dict = {'':'', '_m':'_mirror'}
cm_to_inches = 2.54
figure_size = (40/cm_to_inches, 50/cm_to_inches)
trajektorijų_kilmininkas = {'pentagon':'penkiakampio', 'M':'M', 'segment':'atkarpos'}
show_figures = False
write_figures = True
controllers = ['main', '16', '91', '127_stiff']
follow_writer = pd.ExcelWriter('data/figures/trajektoriju_sekimo_matavimai.xlsx')
for c in controllers:
column_names = ['mean line dist', 'mean spline dist', 'mean line angle', 'mean spline angle']
trajectory_deviation_table = pd.DataFrame(np.NaN, index=trajectories, columns=column_names)
for m in ['', '_m']:
for t in trajectories:
short_c_name = c+m
long_c_name = c+m_to_mirror_dict[m]
follow_data = pd.read_csv(rel_dir+short_c_name+'_follow_'+t+'.csv', skipinitialspace=True)
mean_dist_from_line = np.mean(follow_data['distance_from_segment'])
mean_dist_from_spline = np.mean(follow_data['distance_from_spline'])
abs_angle_from_line = np.abs(follow_data['signed_angle_from_segment'])
abs_angle_from_spline = np.abs(follow_data['signed_angle_from_spline'])
mean_angle_from_line = np.mean(abs_angle_from_line)
mean_angle_from_spline = np.mean(abs_angle_from_spline)
trajectory_deviation_table[column_names[0]][t] = mean_dist_from_line
trajectory_deviation_table[column_names[1]][t] = mean_dist_from_spline
trajectory_deviation_table[column_names[2]][t] = mean_angle_from_line
trajectory_deviation_table[column_names[3]][t] = mean_angle_from_spline
plt.figure(figsize=figure_size)
plt.suptitle('Valdiklio \"' + short_c_name + '\" ' + trajektorijų_kilmininkas[t] + ' trajektorijos sekimo paklaidos');
plt.subplot(2, 2, 1)
plt.title("Atstumas iki atkarpos")
plt.plot(follow_data['t'], follow_data['distance_from_segment'], label='momentinis atstumas')
plt.plot(follow_data['t'], np.repeat(mean_dist_from_line, follow_data.index.size), label='vidutinis atstumas')
plt.xlabel("Laikas (s)")
plt.ylabel("Atstumas (m)")
plt.legend(loc=1)
plt.subplot(2, 2, 2)
plt.title("Atstumas iki Catmull-Rom kreivės")
plt.plot(follow_data['t'], follow_data['distance_from_spline'], label='momentinis atstumas')
plt.plot(follow_data['t'], np.repeat(mean_dist_from_spline, follow_data.index.size), label='vidutinis atsumas')
plt.xlabel("Laikas (s)")
plt.ylabel("Atstumas (m)")
plt.legend(loc=1)
plt.subplot(2, 2, 3)
plt.title("Absoliutus kampas su atkarpa")
#plt.plot(follow_data['t'], follow_data['signed_angle_from_segment'])
plt.plot(follow_data['t'], abs_angle_from_line, label='momentinis kampas')
plt.plot(follow_data['t'], np.repeat(mean_angle_from_line, follow_data.index.size), label='vidutinis kampas')
plt.xlabel("Laikas (s)")
plt.ylabel("Nuokrypio kampas ("+u'\N{DEGREE SIGN}'+")")
plt.legend(loc=1)
plt.subplot(2, 2, 4)
plt.title("Absoliutus kampas su Catmull-Rom kreivės liestine")
#plt.plot(follow_data['t'], follow_data['signed_angle_from_spline'])
plt.plot(follow_data['t'], abs_angle_from_spline, label='momentinis kampas')
plt.plot(follow_data['t'], np.repeat(mean_angle_from_spline, follow_data.index.size), label='vidutinis kampas')
plt.ylabel("Nuokrypio kampas ("+u'\N{DEGREE SIGN}'+")")
plt.legend(loc=1)
if write_figures:
plt.savefig(figure_directory+short_c_name+'_nuokrypis.png');
if show_figures:
plt.show()
plt.close('all')
trajectory_deviation_table.round(3).to_excel(follow_writer, short_c_name)
follow_writer.save()
follow_writer.close()
controllers = ['main', '16', '91', '127_stiff']
column_names = controllers + [i+'_m' for i in controllers]
plt.figure(figsize=figure_size)
#facing_change_time_table = pd.DataFrame(np.NaN, index=np.arange(2*len(controllers)), columns=['min reach time', 'mean time to reach', 'max reach time'])
facing_change_time_table = pd.DataFrame(np.NaN, index=column_names, columns=['min reach time', 'mean time to reach', 'max reach time'])
for i, c in enumerate(controllers):
plt.subplot(len(controllers), 1, i+1)
plt.title('Valdiklio \"'+c+'\" žiūrėjimo tiklso kampo pasiekimo laikas')
for j, m in enumerate(['', '_m']):
short_c_name = c+m
facing_data = pd.read_csv(rel_dir+short_c_name+'_facing.csv', skipinitialspace=True)
index = short_c_name #i*len(['','_m'])+j
plt.scatter(facing_data['angle'], facing_data['turn_time'], label=['be veidrodinių animacijų', 'su veidrodinėmis animacijomis'][j])
plt.xlabel("Testuojamas nuokrypio kampas ("+u'\N{DEGREE SIGN}'+")")
plt.ylabel("Konvergavimas iki "+str(int(facing_data['angle_threshold'][0]))+" " +u'\N{DEGREE SIGN}'+" (s)")
facing_change_time_table['min reach time'][index] = np.min(facing_data['turn_time'])
facing_change_time_table['mean time to reach'][index] = np.mean(facing_data['turn_time'])
facing_change_time_table['max reach time'][index] = np.max(facing_data['turn_time'])
plt.legend(loc=1);
if write_figures:
plt.savefig(figure_directory+c+'_kampas.png');
if show_figures:
plt.show()
plt.close('all')
print(facing_change_time_table)
facing_writer = pd.ExcelWriter('data/figures/atsisukimo_laiko_matavimai.xlsx')
facing_change_time_table.round(3).to_excel(facing_writer)
facing_writer.save()
facing_writer.close()
def measure_foot_skate(foot_skate, min_h, max_h, foot_side):
h_diff = max_h-min_h
foot_skate['speed'] = np.sqrt(np.square(foot_skate[foot_side+'_vel_x'])+np.square(foot_skate[foot_side+'_vel_z']))
foot_skate['position_differences'] = foot_skate['dt'] * foot_skate['speed']
foot_skate['height_exponent'] = (foot_skate[foot_side+'_h'] - min_h)/h_diff
foot_skate['clamped_height_exponent'] = np.clip(foot_skate['height_exponent'], 0, 1 )
foot_skate['height_weights'] = 2-np.power(2,foot_skate['clamped_height_exponent'])
#mean_pos_difference = np.sum(foot_skate['position_differences'])/foot_skate['t'].tail(1)
#print(foot_skate["t"].tail(1))
return float(np.sum(foot_skate['position_differences']*foot_skate['height_weights'])/foot_skate["t"].tail(1))
min_h = 0.045 #np.min(foot_skate.l_h)
max_h = 0.06
controllers = ['main', '16', '91', '127_stiff']
controllers = controllers + [i+'_m' for i in controllers]
#FOOT SKATE TESTING
foot_skate_table = pd.DataFrame(np.NaN, index=controllers, columns=['l_controller_skate', 'l_anim_skate', 'l_worse_frac', 'r_controller_skate', 'r_anim_skate', 'r_worse_frac'])
for c in controllers:
anim_names = pd.read_csv(rel_dir+c+'.ctrl_anims', skipinitialspace=True, header=None)
anim_names = anim_names[0]
foot_side = 'l'
for foot_side in ['l', 'r']:
plt.figure(figsize=figure_size)
plt.subplots_adjust(hspace=0.5)
plt.suptitle('\"' + c + '\" valdiklio animacijų rinkinio pėdų slidinjimas')
anim_skate_amounts = pd.DataFrame(np.NaN, columns=['foot_skate', 'count', 'total_time'],index=anim_names);
#ANIMATION FOOT SKATE
for ia, a in enumerate(anim_names):
plt.subplot(len(anim_names), 1, ia+1)
anim_skate_data = pd.read_csv(rel_dir+anim_names[ia].split('.')[0]+'_anim_foot_skate.csv', skipinitialspace=True)
anim_skate_data = anim_skate_data[anim_skate_data['t'] > 0.03]
a_skate_amount = measure_foot_skate(anim_skate_data, min_h, max_h, foot_side)
print(a + " " + str(a_skate_amount) + 'm/s')
anim_skate_amounts['foot_skate'][a] = a_skate_amount
plt.title("Animacijos "+a+" pėdų slydimo kiekis = " + str(a_skate_amount))
plt.xlabel('laikas (s)')
plt.ylabel('Atstumas (m)')
plt.plot(anim_skate_data['t'], anim_skate_data[foot_side+'_h'], label='kairės pėdos aukštis virš žemės')
plt.plot(anim_skate_data['t'], anim_skate_data['height_weights'], label='greičio daugiklis')
plt.legend(loc=1)
#plt.plot(anim_skate_data['t'], anim_skate_data['speed'])
#plt.plot(anim_skate_data['t'], anim_skate_data['position_differences'])
#plt.plot(anim_skate_data['t'], anim_skate_data['height_exponent'])
#plt.plot(anim_skate_data['t'], anim_skate_data['clamped_height_exponent'])
print(a + " " + str(a_skate_amount) + 'm/s')
if write_figures:
plt.savefig(figure_directory+c+'_'+t+'_animacijų_kojų_slydimas.png');
if show_figures:
plt.show()
plt.close('all')
#CONTROLLER FOOT SKATE
short_c_name = c
c_skate_data = pd.read_csv(rel_dir+short_c_name+'_ctrl_skate.csv', skipinitialspace=True)
c_skate_amount = measure_foot_skate(c_skate_data, min_h, max_h, foot_side)
print(c + ' skate: ' + str(c_skate_amount) + 'm/s');
plt.figure(figsize=figure_size)
plt.subplots_adjust(hspace=0.5)
plt.suptitle('Valdiklio \"' + short_c_name +'\" animacijų naudojimas')
for ia, a in enumerate(anim_names):
plt.subplot(len(anim_names), 1, ia+1)
data = [c_skate_data[(c_skate_data['anim_index']==ia) & (c_skate_data['anim_is_mirrored']==0)]['anim_local_time'],
c_skate_data[(c_skate_data['anim_index']==ia) & (c_skate_data['anim_is_mirrored']==1)]['anim_local_time']]
plt.axvspan(0, np.max(c_skate_data[c_skate_data['anim_index']==ia]['anim_local_time']), facecolor='green', alpha=0.4)
plt.hist(data, bins=80, stacked=True)
ax = plt.gca()
ax.set_xlim([0, np.max(c_skate_data['anim_local_time'])])
#ax.set_xlim([0, np.max(c_skate_data[c_skate_data['anim_index']==ia]['anim_local_time'])])
plt.xlabel("Momentas animacijoje "+a+" (s)");
plt.ylabel("Grojimų skaičius");
legend_labels = ['naudota animacijos dalis','originalioji animacija']
legend_labels = legend_labels + ['veidrodinė animacija']
plt.legend(loc=1, labels=legend_labels)
if write_figures:
plt.savefig(figure_directory+c+'_'+t+'_naudojimo_histogramos.png');
if show_figures:
plt.show()
plt.close('all')
plt.figure(figsize=figure_size)
plt.suptitle('Valdiklio \"' + short_c_name +'\" animacijų naudojimas')
plt.subplot(2, 1, 1)
est_percentage = lambda x : 100*(len(x) / len(c_skate_data))
sns.barplot(x='anim_count', y='anim_count', orient='v', data=c_skate_data, estimator=est_percentage)
plt.xlabel('Maišomų animacijų kiekis')
plt.ylabel("Dalis viso testo laiko (%)")
plt.subplot(2, 1, 2)
map_name = lambda x: anim_names[x]
c_skate_data['anim_names'] = c_skate_data['anim_index'].map(map_name)
sns.barplot(x='anim_names', y='anim_names', data=c_skate_data, orient='v', estimator=est_percentage)
plt.xlabel('Grojama animacija')
plt.ylabel("Dalis viso testo laiko (%)")
used_anim_names = c_skate_data['anim_names'].unique()
print(c+' used anim names:\n')
print(used_anim_names)
print(c+' anim names:\n')
print(anim_names)
for ia, a in enumerate(used_anim_names):
anim_entries = c_skate_data[c_skate_data['anim_names']==a]
anim_skate_amounts['count'][a] = len(anim_entries)
anim_skate_amounts['total_time'][a] = np.sum(anim_entries['dt'])
total_time = np.sum(anim_skate_amounts['total_time'])
anim_skate_amounts['fraction'] = anim_skate_amounts['total_time']/total_time
anim_skate_amounts['weighted_anim_skate'] = anim_skate_amounts['fraction']*anim_skate_amounts['foot_skate']
print(anim_skate_amounts)
weighted_anim_skate_amount = np.sum(anim_skate_amounts.weighted_anim_skate)
worse_by_fraction = str(c_skate_amount/weighted_anim_skate_amount - 1)
print('\nweighted anim skate: ' + str(weighted_anim_skate_amount))
print('controller skate: ' + str(c_skate_amount))
print('worse by: '+ worse_by_fraction)
foot_skate_table[foot_side+'_controller_skate'][c] = c_skate_amount
foot_skate_table[foot_side+'_anim_skate'][c] = weighted_anim_skate_amount
foot_skate_table[foot_side+'_worse_frac'][c] = worse_by_fraction
if write_figures:
plt.savefig(figure_directory+c+'_'+t+'_naudojimo_stulpelinės.png');
if show_figures:
plt.show()
plt.close('all')
foot_skate_table['avg_controller_skate'] = (foot_skate_table['l_controller_skate'] + foot_skate_table['r_controller_skate'])/2
foot_skate_table['avg_anim_skate'] = (foot_skate_table['l_anim_skate'] + foot_skate_table['r_anim_skate'])/2
foot_skate_writer = | pd.ExcelWriter('data/figures/slydimo_matavimai.xlsx') | pandas.ExcelWriter |
import pandas as pd
from datetime import datetime, timedelta
from matplotlib import pyplot as plt
from matplotlib import dates as mpl_dates
plt.style.use('seaborn')
data = pd.read_csv('timedata.csv')
data['Date'] = | pd.to_datetime(data['Date']) | pandas.to_datetime |
import bs4 as bs
import datetime as dt
import matplotlib.pyplot as plt
from matplotlib import style
import numpy as np
import os
import pandas as pd
pd.core.common.is_list_like = pd.api.types.is_list_like
import pandas_datareader.data as web
import pickle
import requests
from collections import Counter
from sklearn import svm, model_selection, neighbors
from sklearn.ensemble import VotingClassifier, RandomForestClassifier
style.use('ggplot')
def save_sp500_tickers():
resp = requests.get('http://en.wikipedia.org/wiki/List_of_S%26P_500_companies')
soup = bs.BeautifulSoup(resp.text, 'lxml')
table = soup.find('table', {'class': 'wikitable sortable'})
tickers = []
for row in table.findAll('tr')[1:]:
ticker = row.findAll('td')[0].text
tickers.append(ticker)
with open("sp500tickers.pickle", "wb") as f:
pickle.dump(tickers, f)
return tickers
# save_sp500_tickers()
def get_data_from_yahoo(reload_sp500=False):
if reload_sp500:
tickers = save_sp500_tickers()
else:
with open("sp500tickers.pickle", "rb") as f:
tickers = pickle.load(f)
if not os.path.exists('stock_dfs'):
os.makedirs('stock_dfs')
start = dt.datetime(2010, 1, 1)
end = dt.datetime.now()
for ticker in tickers:
if not os.path.exists('stock_dfs/{}.csv'.format(ticker)):
df = web.DataReader(ticker, 'morningstar', start, end)
df.reset_index(inplace=True)
df.set_index("Date", inplace=True)
df = df.drop("Symbol", axis=1)
df.to_csv('stock_dfs/{}.csv'.format(ticker))
else:
print('Already have {}'.format(ticker))
def compile_data():
with open("sp500tickers.pickle", "rb") as f:
tickers = pickle.load(f)
main_df = pd.DataFrame()
for count, ticker in enumerate(tickers):
df = pd.read_csv('stock_dfs/{}.csv'.format(ticker))
df.set_index('Date', inplace=True)
df.rename(columns={'Adj Close': ticker}, inplace=True)
df.drop(['Open', 'High', 'Low', 'Close', 'Volume'], 1, inplace=True)
if main_df.empty:
main_df = df
else:
main_df = main_df.join(df, how='outer')
if count % 10 == 0:
print(count)
print(main_df.head())
main_df.to_csv('sp500_joined_closes.csv')
def visualize_data():
df = | pd.read_csv('sp500_joined_closes.csv') | pandas.read_csv |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import pandas as pd
import numpy as np
import argparse
import time
import sys
kinetics_filename=sys.argv[1]
time_lag_file=sys.argv[2]
output_filename=sys.argv[3]
final_output_filename=sys.argv[4]
# print(final_output_filename)
df= | pd.read_csv(kinetics_filename) | pandas.read_csv |
import os
import sys
import argparse
import logging
import pandas as pd
import numpy as np
from datasets.SCONES import SCONESDataset
from model.dataset import PreprocessedDataset
from model.scones import SCONESNetwork
def get_ensemble_predictions(opt, model_paths, dataset_path, results_path):
dataset = SCONESDataset()
dataset.deserialize(dataset_path)
pp_dataset = PreprocessedDataset(radius_cutoff=opt["config"]["radius_cutoff"], device=opt["device"])
pp_dataset.add_dataset(dataset, training_filters=True)
pp_dataset.summary()
predictions = {}
for sample in pp_dataset:
predictions[sample["id"]] = []
for path in model_paths:
print(path)
logging.basicConfig(
format="%(asctime)s %(message)s",
datefmt="[%H:%M:%S]",
level=logging.INFO,
)
logger = logging.getLogger('scones')
model = SCONESNetwork(opt, logger)
model.init_eval()
model.set_criterion()
model.load_model(os.path.join(path, "best_val_model.pth"))
model.eval()
for sample in pp_dataset:
sample_id = sample["id"]
if sample_id not in predictions:
continue
_, _, prediction = model.predict(sample)
predictions[sample_id].append(prediction.item())
predictions = [{ "id" : key, "ddG_SCONES" : round(np.mean(preds), 2), "ddG_SCONES_stddev" : round(np.std(preds), 2) } for key, preds in predictions.items()]
df = | pd.DataFrame(predictions) | pandas.DataFrame |
__all__ = ["fasta_file_validator",
"sequence_extractor",
"Data_frame",
"predict_sequence",
"Predict_frame",]
import os
import pandas as pd
def fasta_file_validator(file):
if os.path.isfile(file):
return True
return TypeError("File doesn't exist.")
def sequenceRepair(seq_list):
repaired = []
for index,seq in enumerate(seq_list):
if ("X" in seq):
seq = seq.replace("X","")
if ("B" in seq):
seq = seq.replace("B","")
if ("Z" in seq):
seq = seq.replace("Z","")
repaired.insert(index,seq)
return repaired
def predict_sequence(file):
try:
data = pd.read_csv(file)
except:
raise FileNotFoundError("File handling error occured")
sequence = sequenceRepair(data["sequence"].to_list())
return sequence
def sequence_extractor(file):
try:
data = pd.read_csv(file)
except:
raise FileNotFoundError("File handling error occured")
sequence = sequenceRepair(data["sequence"].to_list())
Target = data["mean_growth_PH"]
return [sequence,Target.to_list()]
def Predict_frame(data,):
descriptor_data = pd.DataFrame(data)
return descriptor_data
def Data_frame(data,target):
descriptor_data = | pd.DataFrame(data) | pandas.DataFrame |
import tkinter as tk
from tkinter import ttk
import pandas as pd
import utils
import psutil
import os
import threading
import time
from math import ceil
from tqdm import tqdm
import random
import numpy as np
import user_agents as ua
from graph import *
from subprocess import run
class Model:
""" Model class handing data loading, modifications and extraction """
def __init__(self, iface):
self.iface = iface
self.file_loaded = False
self.tk_gui= hasattr(self.iface, 'root') and isinstance(self.iface.root, tk.Tk)
self.filename = None
self.data = pd.DataFrame()
try:
self.country_codes = pd.read_csv(utils.COUNTRY_CODES_FILE, na_filter = False)
except FileNotFoundError:
raise utils.MissingFileError("Country file {0} not found".format(utils.COUNTRY_CODES_FILE))
def ram_usage(self):
""" Return the current process RAM usage in MB"""
proc = psutil.Process(os.getpid())
return "Ram : {0} MB |".format(round(proc.memory_info()[0]/(1024*1024), 1))
def check_file_validity(self, filename):
""" Description : Check if a given file is a valid Issuu JSON file
Parameters : Filename to check"""
try:
data = next(pd.read_json(filename, lines=True, chunksize=1))
except ValueError:
return False
if set(['visitor_uuid','visitor_country', 'visitor_useragent', 'subject_doc_id']) <= set(data.keys()):
self.filename = filename
return True
else:
self.filename = None
return False
def check_user_validity(self, user):
""" Description : Check if a given user exists in the currently loading file
Parameters : User to check"""
return user in self.data['visitor_uuid'].tolist()
def check_doc_validity(self, doc):
""" Description : Check if a given user document in the currently loading file
Parameters : Document to check"""
return doc in self.data['subject_doc_id'].tolist()
def get_rnd_doc(self):
""" Description : Returns a random document id from the currently loaded file
Parameters : Random document id from the currently loaded file"""
rnd = random.randrange(0,len(self.data))
return self.data.iloc[rnd]['subject_doc_id']
def get_rnd_user(self):
""" Description : Returns a random user id from the currently loaded file
Parameters : Random user id from the currently loaded file"""
rnd = random.randrange(0,len(self.data))
return self.data.iloc[rnd]['visitor_uuid']
def load_main_file_async(self, file_dict, callback=None, pg_val=None):
""" Description : Asynchronous wrapper around the load_main_file method
Parameters : A dictionnary containing the filename and number of lines in this file [file_dict]. The tkinter progress bar StringVar variable which will be modified to represent progress [pg_val]"""
thread = threading.Thread(target=self.load_main_file, args=(file_dict, callback, pg_val))
thread.daemon = True
thread.start()
def load_main_file(self, file_dict, callback = None, pg_val=None):
""" Description : Main file loading method for issuu data file. Loads the file in chunk into a instance variable
Parameters : A dictionnary containing the filename and number of lines in this file [file_dict]. The tkinter progress bar StringVar variable which will be modified to represent progress [pg_val]"""
utils.logger.info("Started loading file")
start_time = time.time()
tmp = []
pd_reader = pd.read_json(file_dict['filename'], lines=True, chunksize=utils.CHUNK_SIZE)
loop_count = ceil(file_dict['linecount']/utils.CHUNK_SIZE)
for i, df in utils.cli_pg_bar(enumerate(pd_reader), total=loop_count):
df = df.loc[df['event_type'].isin(['read'])]#, 'pageread'])] # Since we are working with a random sample, some read documents don't have the "read" record but have "pageread" records
tmp.append(df[utils.ISSUU_FIELDS])
if self.tk_gui and pg_val is not None:
pg_val.set(i+1)
self.data = pd.concat(tmp, axis=0)
self.data.drop_duplicates(['visitor_uuid', 'subject_doc_id'], inplace=True)
if callback is not None:
callback()
self.file_loaded = True
self.file_size = utils.get_file_size(file_dict['linecount'])
utils.logger.info("Loading done")
df_mem_usage = self.data.memory_usage().sum()/(1024*1024)
utils.logger.debug("Loaded in {0} seconds - dataframe using {1} MB".format(round(time.time() - start_time, 2), round(df_mem_usage, 2)))
def country_to_continent(self, country):
""" Description : Return the corresponding continent a given country belongs to
Parameters : The country in ISO 3166 Country Code format (string) [country]
Returns : The continent on which the country is in. If multiple, returns first, if none, returns 'None'"""
continent_list = self.country_codes.loc[self.country_codes['a-2'] == country]['CC'].values
if len(continent_list):
return continent_list[0]
else:
return 'None'
def get_plot_data(self, var, doc_id = None, callback=None): # From gui -> Running in seperate thread, get data back with callback. From Cli, just run without callback
""" Description : Return the necessary data to plot a given variable for a given document (or all)
Parameters : The variable name (column) to plot [doc_id]. The document id to restrict the plot to [doc_id]. The callback to .. call back when the method is done [callback] (asynchronous mode)
Returns : The plot data in case of a synchronous call
"""
if var not in self.data.columns:
self.preprocess(var)
data = self.data.copy()
if doc_id is not None: # If we are making a plot for a given document or a on the whole dataset
data = data.loc[data['subject_doc_id'] == doc_id]#data.iloc[doc_id]['subject_doc_id']] #'130705172251-3a2a725b2bbd5aa3f2af810acf0aeabb'] '130705172251-3a2a725b2bbd5aa3f2af810acf0aeabb']
data.drop_duplicates(['visitor_uuid'], inplace=True)
# Get sum on the given column and sort in descending order
data = data.groupby([var], as_index=False).size().reset_index(name='counts')
data = data.sort_values('counts', ascending=False)
if callback is not None:
callback(data)
else:
return data
def preprocess(self, var):
""" Description : Preprocesses (add/modify) the required variable (column)
Parameters : The variable name (column) to preprocess [var]
"""
utils.logger.info("Starting preprocessing for {0}".format(var))
start_time = time.time()
if self.tk_gui and hasattr(self.iface, 'progressbar'):
self.iface.progressbar["maximum"] = 100
split = np.array_split(self.data, 100) # Data is not copied, no ram increase !
self.data[var] = np.nan
for i, df in utils.cli_pg_bar(enumerate(split), total=100):
if var == 'visitor_browser':
self._preprocess_browser(df)
elif var == 'visitor_platform':
self._preprocess_platform(df)
elif var == 'visitor_continent':
self._preprocess_continent(df)
if self.tk_gui:
self.iface.pg_val.set(i+1)
self.data = pd.concat(split)
utils.logger.info("Done preprocessing - {0} sec".format(time.time()- start_time))
def _preprocess_continent(self, df):
""" Description : Preprocesses dataframe in place in order to add a new column containing the continent of the readers
Parameters : The dataframe to in-place modify [df]
"""
df['visitor_continent'] = df['visitor_country'].apply(lambda x: self.country_to_continent(x))
def _preprocess_browser(self, df):
""" Description : Preprocesses dataframe in place in order to add a new column containing the browser of the readers
Parameters : The dataframe to in-place modify [df]
"""
df['visitor_browser'] = df['visitor_useragent'].apply(lambda x: ua.parse(x).browser.family)
def _preprocess_platform(self, df):
""" Description : Preprocesses dataframe in place in order to add a new column containing the platforme type of readers
Parameters : The dataframe to in-place modify [df]
"""
df['visitor_platform'] = df['visitor_useragent'].apply(lambda x: 'Mobile' if ua.parse(x).is_mobile else 'Desktop')
def get_document_readers(self, doc_id):
""" Description : Returns the list of visitor having read the given document
Parameters : The document_id we want to know the readers of [doc_id]
Returns : List of visitor having read the input document
"""
data = self.data.loc[self.data['subject_doc_id'] == doc_id]['visitor_uuid'].unique()
return data.tolist()
def get_visitor_read(self, vis_id):
""" Description : Returns the list of documents read by a given visitor
Parameters : The visitor we want to know the read documents of [vis_id]
Returns : List of documents a visitor has read
"""
data = self.data.loc[self.data['visitor_uuid'] == vis_id]['subject_doc_id'].unique() # /!\ np.array not pandas.Series
data = data[~ | pd.isnull(data) | pandas.isnull |
from time import perf_counter
from os import chdir, getcwd
import numpy as np
import pandas as pd
from plot import *
class del_then_inter:
def __init__(self, infile: str, has_imu: bool, conv_time: bool, plot_choice):
self.df, _ = setup(infile, has_imu, conv_time)
self.plot_choice = plot_choice
# Delete unimportant columns
#self.df.drop(self.df.loc[:,'IMU_AngVelX':'IMU_LinearAccZ'].columns, inplace=True, axis=1)
def delete_vals(self) -> None:
print('\n\tdeleting bad values...\n')
self.df = self.df.reset_index()
for i in range(len(self.df.GPS_Long)):
if self.df.SDn[i] > 0.005:
self.df.loc[i,'GPS_Long':'GPS_Alt'] = pd.NA
def interpolate(self) -> None:
print('\tinterpolating...\n')
# Force columns into numeric data types
self.df['GPS_Long'] = pd.to_numeric(self.df['GPS_Long'], errors='coerce')
self.df['GPS_Lat'] = pd.to_numeric(self.df['GPS_Lat'], errors='coerce')
self.df['GPS_Alt'] = pd.to_numeric(self.df['GPS_Alt'], errors='coerce')
# Interpolate all GNSS values in the df as floats
self.df.loc[:, 'GPS_Long':'GPS_Alt'] = self.df.loc[:, 'GPS_Long':'GPS_Alt'].interpolate(method='linear')
self.df['GPS_Status'] = self.df['GPS_Status'].interpolate(method='ffill')
# Remove previously deleted values
self.df = self.df[self.df['GPS_Long'].notna()]
def write_to_file(self, name: str):
# Change 'time' back to rospy.time[]
self.df.Time = self.df.Time.apply(lambda x: f'rospy.Time[{x:19d}]')
self.df.drop('index', axis=1, inplace=True)
# Save the file
self.df.to_csv(f'.\\results\\{name}.csv', index=False)
print(f'\nSaved new file to .\\results\\{name}.csv')
# Plot the desired plot
if self.plot_choice:
print(f'\nPlotting...\n')
choose_plot(self.df, self.plot_choice)
class fix_from_vel:
def __init__(self, infile, has_imu, conv_time):
self.gnss_df, _ = setup(infile, has_imu, conv_time)
self.gnss_df.loc[:, 'GPS_Long':'GPS_Lat'] = [geodetic_to_geocentric(*a) for a in tuple(zip(self.gnss_df['GPS_Long'], self.gnss_df['GPS_Lat'], self.gnss_df['GPS_Alt']))]
self.gnss_df = add_vectors(self.gnss_df)
def rem_vel_outlier(df) -> None:
'''
Status:
nulling values based on Std works, but not based on absolute velocity change
Values are still strecthed when compared to GPS_Long, GPS_Lat
This notably wasnt the case before I force converted the merged df to numeric
'''
df['Rolling_X'] = df.VelX.rolling(5).mean()
df['Rolling_Y'] = df.VelY.rolling(5).mean()
df['Rolling_Z'] = df.VelZ.rolling(5).mean()
#df.loc[df[df.SDn > 10].index, 'VelX':'VelZ'] = pd.NA
df.VelX.map(lambda x: pd.NA if abs(x-df.Rolling_X)/() > 1 else x.VelX)
class fix_from_imu:
def __init__(self, infile, has_imu):
self.gnss_df, self.imu_df = setup(infile, has_imu)
self.gnss_df.loc[:, 'GPS_Long':'GPS_Lat'] = [geodetic_to_geocentric(*a) for a in tuple(zip(self.gnss_df['GPS_Long'], self.gnss_df['GPS_Lat'], self.gnss_df['GPS_Alt']))]
self.gnss_df = add_vectors(self.gnss_df)
self.gnss_df = trim_df_vel(self.gnss_df, 10, 1)
self.df = merge_dfs(self.gnss_df, self.imu_df)
### PLOT FUNCTIONS
def choose_plot(df, plot_choice):
if plot_choice == 1:
plot_track(df)
elif plot_choice == 2:
pass
### MAIN FUNCTIONS
def setup(infile: str, has_imu: bool, conv_time: bool) -> pd.DataFrame:
t1 = perf_counter()
print('\n' + '#'*80 + '\n')
def ingest_file(infile) -> pd.DataFrame:
# Set and Get directory
chdir(r'C:\Users\mikeh\OneDrive\Documents\GitHub\ouster_localization')
dir = getcwd()
print(f'\ndirectory: {dir}\n\n')
print('\treading file...\n')
# Import the comma delimited .txt file as a pandas dataframe
df = pd.read_csv(f'{dir}\\{infile}', delimiter=',')
return df
def edit_dtypes(df) -> pd.DataFrame:
print('\tediting data types...\n')
# Extract only the numbers from the 'Time' column using a reg ex, convert to long integer
df['Time'] = df['Time'].str.extract('(\d+)').astype(np.int64)
#gnss_df.Time = gnss_df.Time.map(lambda x: datetime.fromtimestamp(x))
if conv_time:
# Convert Time into seconds from onset
t0 = df['Time'][0]
df['Time'] = (df['Time']-t0)/10**9
# Forcing proper data types for each column
df = df.apply(lambda x: | pd.to_numeric(x, errors='coerce') | pandas.to_numeric |
import os
import pandas as pd
import argparse
from sklearn.cluster import KMeans
parser = argparse.ArgumentParser()
parser.add_argument("--dir", required=True)
parser.add_argument("--n", type=int, default=10)
parser.add_argument("--score", default="min_dist")
def main():
args = parser.parse_args()
df = pd.read_csv(os.path.join(args.dir, "data.csv"))
X = df[[args.score]]
kmeans = KMeans(n_clusters=args.n, random_state=0).fit(X)
df['label'] = kmeans.labels_
avg_configs = []
for i in range(args.n):
data = df[df.label == i]
avg_configs.append(data.mean(axis=0))
avg_df = | pd.DataFrame(avg_configs) | pandas.DataFrame |
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
"""Create scatterplots of stability versus microstructure characteristics."""
df = pd.read_csv('/Users/colinswaney/Desktop/net/data/stability.txt', index_col=0)
# 1. Stability vs. CRSP
crsp = pd.read_csv('/Users/colinswaney/Desktop/net/data/crsp.txt')
crsp = crsp[crsp['date'] == '07/24/2013']
crsp=crsp.drop(['PERMNO', 'NUMTRD', 'SHROUT', 'COMNAM', 'date'], axis=1)
crsp.columns = ['name', 'exchange', 'price', 'volume']
combined = pd.merge(df, crsp)
# ... vs. price
plt.subplot(121)
plt.scatter(np.log(combined['price']), combined['eig'])
# ... vs. volume **
plt.scatter(np.log(combined['volume']), combined['eig'])
plt.subplot(122)
plt.tight_layout()
# 2. Stability vs. ITCH
volumes = | pd.read_csv('/Users/colinswaney/Desktop/net/data/volumes.txt', index_col=0) | pandas.read_csv |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# This file contains dummy data for the model unit tests
import numpy as np
import pandas as pd
AIR_FCST_LINEAR_95 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("1961-01-01 00:00:00"),
1: pd.Timestamp("1961-02-01 00:00:00"),
2: pd.Timestamp("1961-03-01 00:00:00"),
3: pd.Timestamp("1961-04-01 00:00:00"),
4: pd.Timestamp("1961-05-01 00:00:00"),
5: pd.Timestamp("1961-06-01 00:00:00"),
6: pd.Timestamp("1961-07-01 00:00:00"),
7: pd.Timestamp("1961-08-01 00:00:00"),
8: pd.Timestamp("1961-09-01 00:00:00"),
9: pd.Timestamp("1961-10-01 00:00:00"),
10: pd.Timestamp("1961-11-01 00:00:00"),
11: pd.Timestamp("1961-12-01 00:00:00"),
12: pd.Timestamp("1962-01-01 00:00:00"),
13: pd.Timestamp("1962-02-01 00:00:00"),
14: pd.Timestamp("1962-03-01 00:00:00"),
15: pd.Timestamp("1962-04-01 00:00:00"),
16: pd.Timestamp("1962-05-01 00:00:00"),
17: pd.Timestamp("1962-06-01 00:00:00"),
18: pd.Timestamp("1962-07-01 00:00:00"),
19: pd.Timestamp("1962-08-01 00:00:00"),
20: pd.Timestamp("1962-09-01 00:00:00"),
21: pd.Timestamp("1962-10-01 00:00:00"),
22: pd.Timestamp("1962-11-01 00:00:00"),
23: pd.Timestamp("1962-12-01 00:00:00"),
24: pd.Timestamp("1963-01-01 00:00:00"),
25: pd.Timestamp("1963-02-01 00:00:00"),
26: pd.Timestamp("1963-03-01 00:00:00"),
27: pd.Timestamp("1963-04-01 00:00:00"),
28: pd.Timestamp("1963-05-01 00:00:00"),
29: pd.Timestamp("1963-06-01 00:00:00"),
},
"fcst": {
0: 472.9444444444443,
1: 475.60162835249025,
2: 478.2588122605362,
3: 480.9159961685822,
4: 483.57318007662815,
5: 486.23036398467417,
6: 488.88754789272014,
7: 491.5447318007661,
8: 494.20191570881207,
9: 496.85909961685803,
10: 499.516283524904,
11: 502.17346743295,
12: 504.830651340996,
13: 507.48783524904195,
14: 510.1450191570879,
15: 512.8022030651339,
16: 515.4593869731799,
17: 518.1165708812258,
18: 520.7737547892718,
19: 523.4309386973177,
20: 526.0881226053638,
21: 528.7453065134097,
22: 531.4024904214557,
23: 534.0596743295017,
24: 536.7168582375476,
25: 539.3740421455936,
26: 542.0312260536396,
27: 544.6884099616856,
28: 547.3455938697316,
29: 550.0027777777775,
},
"fcst_lower": {
0: 380.6292037661305,
1: 383.26004701147235,
2: 385.8905370924373,
3: 388.52067431512216,
4: 391.1504589893095,
5: 393.7798914284503,
6: 396.4089719496461,
7: 399.0377008736321,
8: 401.66607852475926,
9: 404.2941052309762,
10: 406.9217813238114,
11: 409.54910713835505,
12: 412.1760830132403,
13: 414.80270929062544,
14: 417.42898631617453,
15: 420.0549144390392,
16: 422.68049401183924,
17: 425.3057253906438,
18: 427.93060893495215,
19: 430.555145007674,
20: 433.1793339751107,
21: 435.8031762069345,
22: 438.42667207616984,
23: 441.0498219591729,
24: 443.6726262356114,
25: 446.2950852884452,
26: 448.91719950390507,
27: 451.53896927147304,
28: 454.1603949838614,
29: 456.78147703699216,
},
"fcst_upper": {
0: 565.2596851227581,
1: 567.9432096935082,
2: 570.6270874286351,
3: 573.3113180220422,
4: 575.9959011639468,
5: 578.680836540898,
6: 581.3661238357942,
7: 584.0517627279,
8: 586.7377528928648,
9: 589.4240940027398,
10: 592.1107857259966,
11: 594.797827727545,
12: 597.4852196687516,
13: 600.1729612074585,
14: 602.8610519980012,
15: 605.5494916912286,
16: 608.2382799345206,
17: 610.9274163718079,
18: 613.6169006435915,
19: 616.3067323869615,
20: 618.9969112356168,
21: 621.6874368198849,
22: 624.3783087667415,
23: 627.0695266998305,
24: 629.7610902394838,
25: 632.4529990027421,
26: 635.145252603374,
27: 637.8378506518982,
28: 640.5307927556019,
29: 643.2240785185628,
},
}
)
AIR_FCST_LINEAR_99 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("1961-01-01 00:00:00"),
1: pd.Timestamp("1961-02-01 00:00:00"),
2: pd.Timestamp("1961-03-01 00:00:00"),
3: pd.Timestamp("1961-04-01 00:00:00"),
4: pd.Timestamp("1961-05-01 00:00:00"),
5: pd.Timestamp("1961-06-01 00:00:00"),
6: pd.Timestamp("1961-07-01 00:00:00"),
7: pd.Timestamp("1961-08-01 00:00:00"),
8: pd.Timestamp("1961-09-01 00:00:00"),
9: pd.Timestamp("1961-10-01 00:00:00"),
10: pd.Timestamp("1961-11-01 00:00:00"),
11: pd.Timestamp("1961-12-01 00:00:00"),
12: pd.Timestamp("1962-01-01 00:00:00"),
13: pd.Timestamp("1962-02-01 00:00:00"),
14: pd.Timestamp("1962-03-01 00:00:00"),
15: pd.Timestamp("1962-04-01 00:00:00"),
16: pd.Timestamp("1962-05-01 00:00:00"),
17: pd.Timestamp("1962-06-01 00:00:00"),
18: pd.Timestamp("1962-07-01 00:00:00"),
19: pd.Timestamp("1962-08-01 00:00:00"),
20: pd.Timestamp("1962-09-01 00:00:00"),
21: pd.Timestamp("1962-10-01 00:00:00"),
22: pd.Timestamp("1962-11-01 00:00:00"),
23: pd.Timestamp("1962-12-01 00:00:00"),
24: pd.Timestamp("1963-01-01 00:00:00"),
25: pd.Timestamp("1963-02-01 00:00:00"),
26: pd.Timestamp("1963-03-01 00:00:00"),
27: pd.Timestamp("1963-04-01 00:00:00"),
28: pd.Timestamp("1963-05-01 00:00:00"),
29: pd.Timestamp("1963-06-01 00:00:00"),
},
"fcst": {
0: 472.9444444444443,
1: 475.60162835249025,
2: 478.2588122605362,
3: 480.9159961685822,
4: 483.57318007662815,
5: 486.23036398467417,
6: 488.88754789272014,
7: 491.5447318007661,
8: 494.20191570881207,
9: 496.85909961685803,
10: 499.516283524904,
11: 502.17346743295,
12: 504.830651340996,
13: 507.48783524904195,
14: 510.1450191570879,
15: 512.8022030651339,
16: 515.4593869731799,
17: 518.1165708812258,
18: 520.7737547892718,
19: 523.4309386973177,
20: 526.0881226053638,
21: 528.7453065134097,
22: 531.4024904214557,
23: 534.0596743295017,
24: 536.7168582375476,
25: 539.3740421455936,
26: 542.0312260536396,
27: 544.6884099616856,
28: 547.3455938697316,
29: 550.0027777777775,
},
"fcst_lower": {
0: 351.01805478037915,
1: 353.64044896268456,
2: 356.2623766991775,
3: 358.883838394139,
4: 361.50483445671773,
5: 364.12536530090745,
6: 366.74543134552374,
7: 369.3650330141812,
8: 371.98417073526997,
9: 374.6028449419319,
10: 377.2210560720369,
11: 379.83880456815905,
12: 382.45609087755207,
13: 385.07291545212513,
14: 387.68927874841813,
15: 390.3051812275768,
16: 392.92062335532785,
17: 395.5356056019535,
18: 398.15012844226646,
19: 400.764192355584,
20: 403.37779782570226,
21: 405.99094534087044,
22: 408.60363539376465,
23: 411.2158684814615,
24: 413.82764510541136,
25: 416.4389657714128,
26: 419.04983098958445,
27: 421.66024127433906,
28: 424.2701971443558,
29: 426.8796991225531,
},
"fcst_upper": {
0: 594.8708341085095,
1: 597.562807742296,
2: 600.255247821895,
3: 602.9481539430253,
4: 605.6415256965386,
5: 608.3353626684409,
6: 611.0296644399166,
7: 613.724430587351,
8: 616.4196606823541,
9: 619.1153542917842,
10: 621.8115109777711,
11: 624.508130297741,
12: 627.2052118044398,
13: 629.9027550459588,
14: 632.6007595657577,
15: 635.299224902691,
16: 637.998150591032,
17: 640.6975361604982,
18: 643.3973811362772,
19: 646.0976850390515,
20: 648.7984473850253,
21: 651.4996676859489,
22: 654.2013454491467,
23: 656.903480177542,
24: 659.6060713696838,
25: 662.3091185197744,
26: 665.0126211176946,
27: 667.716578649032,
28: 670.4209905951075,
29: 673.1258564330019,
},
}
)
PEYTON_FCST_LINEAR_95 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2013-05-01 00:00:00"),
1: pd.Timestamp("2013-05-02 00:00:00"),
2: pd.Timestamp("2013-05-03 00:00:00"),
3: pd.Timestamp("2013-05-04 00:00:00"),
4: pd.Timestamp("2013-05-05 00:00:00"),
5: pd.Timestamp("2013-05-06 00:00:00"),
6: pd.Timestamp("2013-05-07 00:00:00"),
7: pd.Timestamp("2013-05-08 00:00:00"),
8: pd.Timestamp("2013-05-09 00:00:00"),
9: pd.Timestamp("2013-05-10 00:00:00"),
10: pd.Timestamp("2013-05-11 00:00:00"),
11: pd.Timestamp("2013-05-12 00:00:00"),
12: pd.Timestamp("2013-05-13 00:00:00"),
13: pd.Timestamp("2013-05-14 00:00:00"),
14: pd.Timestamp("2013-05-15 00:00:00"),
15: pd.Timestamp("2013-05-16 00:00:00"),
16: pd.Timestamp("2013-05-17 00:00:00"),
17: pd.Timestamp("2013-05-18 00:00:00"),
18: pd.Timestamp("2013-05-19 00:00:00"),
19: pd.Timestamp("2013-05-20 00:00:00"),
20: pd.Timestamp("2013-05-21 00:00:00"),
21: pd.Timestamp("2013-05-22 00:00:00"),
22: pd.Timestamp("2013-05-23 00:00:00"),
23: pd.Timestamp("2013-05-24 00:00:00"),
24: pd.Timestamp("2013-05-25 00:00:00"),
25: pd.Timestamp("2013-05-26 00:00:00"),
26: pd.Timestamp("2013-05-27 00:00:00"),
27: pd.Timestamp("2013-05-28 00:00:00"),
28: pd.Timestamp("2013-05-29 00:00:00"),
29: pd.Timestamp("2013-05-30 00:00:00"),
},
"fcst": {
0: 8.479624727157459,
1: 8.479984673362159,
2: 8.480344619566859,
3: 8.48070456577156,
4: 8.48106451197626,
5: 8.48142445818096,
6: 8.481784404385662,
7: 8.482144350590362,
8: 8.482504296795062,
9: 8.482864242999762,
10: 8.483224189204464,
11: 8.483584135409163,
12: 8.483944081613863,
13: 8.484304027818565,
14: 8.484663974023265,
15: 8.485023920227965,
16: 8.485383866432667,
17: 8.485743812637367,
18: 8.486103758842066,
19: 8.486463705046766,
20: 8.486823651251468,
21: 8.487183597456168,
22: 8.487543543660868,
23: 8.48790348986557,
24: 8.48826343607027,
25: 8.48862338227497,
26: 8.48898332847967,
27: 8.489343274684371,
28: 8.489703220889071,
29: 8.490063167093771,
},
"fcst_lower": {
0: 7.055970485245664,
1: 7.056266316358524,
2: 7.056561800026597,
3: 7.056856936297079,
4: 7.057151725217398,
5: 7.05744616683524,
6: 7.057740261198534,
7: 7.058034008355445,
8: 7.058327408354395,
9: 7.058620461244044,
10: 7.0589131670733005,
11: 7.059205525891312,
12: 7.059497537747475,
13: 7.059789202691431,
14: 7.0600805207730595,
15: 7.060371492042489,
16: 7.060662116550093,
17: 7.060952394346479,
18: 7.06124232548251,
19: 7.0615319100092835,
20: 7.061821147978145,
21: 7.062110039440677,
22: 7.062398584448709,
23: 7.062686783054313,
24: 7.0629746353098,
25: 7.063262141267724,
26: 7.063549300980883,
27: 7.063836114502315,
28: 7.0641225818852975,
29: 7.064408703183352,
},
"fcst_upper": {
0: 9.903278969069254,
1: 9.903703030365794,
2: 9.90412743910712,
3: 9.904552195246042,
4: 9.904977298735123,
5: 9.90540274952668,
6: 9.90582854757279,
7: 9.906254692825279,
8: 9.90668118523573,
9: 9.90710802475548,
10: 9.907535211335626,
11: 9.907962744927016,
12: 9.908390625480251,
13: 9.9088188529457,
14: 9.90924742727347,
15: 9.909676348413441,
16: 9.91010561631524,
17: 9.910535230928254,
18: 9.910965192201623,
19: 9.91139550008425,
20: 9.91182615452479,
21: 9.912257155471659,
22: 9.912688502873028,
23: 9.913120196676825,
24: 9.91355223683074,
25: 9.913984623282214,
26: 9.914417355978456,
27: 9.914850434866427,
28: 9.915283859892844,
29: 9.91571763100419,
},
}
)
PEYTON_FCST_LINEAR_99 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2013-05-01 00:00:00"),
1: pd.Timestamp("2013-05-02 00:00:00"),
2: pd.Timestamp("2013-05-03 00:00:00"),
3: pd.Timestamp("2013-05-04 00:00:00"),
4: pd.Timestamp("2013-05-05 00:00:00"),
5: pd.Timestamp("2013-05-06 00:00:00"),
6: pd.Timestamp("2013-05-07 00:00:00"),
7: pd.Timestamp("2013-05-08 00:00:00"),
8: pd.Timestamp("2013-05-09 00:00:00"),
9: pd.Timestamp("2013-05-10 00:00:00"),
10: pd.Timestamp("2013-05-11 00:00:00"),
11: pd.Timestamp("2013-05-12 00:00:00"),
12: pd.Timestamp("2013-05-13 00:00:00"),
13: pd.Timestamp("2013-05-14 00:00:00"),
14: pd.Timestamp("2013-05-15 00:00:00"),
15: pd.Timestamp("2013-05-16 00:00:00"),
16: pd.Timestamp("2013-05-17 00:00:00"),
17: pd.Timestamp("2013-05-18 00:00:00"),
18: pd.Timestamp("2013-05-19 00:00:00"),
19: pd.Timestamp("2013-05-20 00:00:00"),
20: pd.Timestamp("2013-05-21 00:00:00"),
21: pd.Timestamp("2013-05-22 00:00:00"),
22: pd.Timestamp("2013-05-23 00:00:00"),
23: pd.Timestamp("2013-05-24 00:00:00"),
24: pd.Timestamp("2013-05-25 00:00:00"),
25: pd.Timestamp("2013-05-26 00:00:00"),
26: pd.Timestamp("2013-05-27 00:00:00"),
27: pd.Timestamp("2013-05-28 00:00:00"),
28: pd.Timestamp("2013-05-29 00:00:00"),
29: pd.Timestamp("2013-05-30 00:00:00"),
},
"fcst": {
0: 8.479624727157459,
1: 8.479984673362159,
2: 8.480344619566859,
3: 8.48070456577156,
4: 8.48106451197626,
5: 8.48142445818096,
6: 8.481784404385662,
7: 8.482144350590362,
8: 8.482504296795062,
9: 8.482864242999762,
10: 8.483224189204464,
11: 8.483584135409163,
12: 8.483944081613863,
13: 8.484304027818565,
14: 8.484663974023265,
15: 8.485023920227965,
16: 8.485383866432667,
17: 8.485743812637367,
18: 8.486103758842066,
19: 8.486463705046766,
20: 8.486823651251468,
21: 8.487183597456168,
22: 8.487543543660868,
23: 8.48790348986557,
24: 8.48826343607027,
25: 8.48862338227497,
26: 8.48898332847967,
27: 8.489343274684371,
28: 8.489703220889071,
29: 8.490063167093771,
},
"fcst_lower": {
0: 6.605000045325637,
1: 6.605275566724015,
2: 6.605550630617649,
3: 6.605825237068679,
4: 6.606099386139563,
5: 6.60637307789309,
6: 6.606646312392368,
7: 6.606919089700827,
8: 6.607191409882221,
9: 6.607463273000626,
10: 6.607734679120443,
11: 6.608005628306389,
12: 6.608276120623508,
13: 6.608546156137163,
14: 6.608815734913038,
15: 6.609084857017139,
16: 6.609353522515795,
17: 6.609621731475649,
18: 6.609889483963668,
19: 6.610156780047143,
20: 6.61042361979368,
21: 6.610690003271204,
22: 6.610955930547961,
23: 6.611221401692519,
24: 6.611486416773756,
25: 6.611750975860878,
26: 6.612015079023405,
27: 6.612278726331177,
28: 6.612541917854348,
29: 6.612804653663393,
},
"fcst_upper": {
0: 10.354249408989281,
1: 10.354693780000304,
2: 10.355138608516068,
3: 10.355583894474442,
4: 10.356029637812957,
5: 10.35647583846883,
6: 10.356922496378955,
7: 10.357369611479896,
8: 10.357817183707903,
9: 10.358265212998898,
10: 10.358713699288483,
11: 10.359162642511938,
12: 10.359612042604219,
13: 10.360061899499968,
14: 10.360512213133493,
15: 10.36096298343879,
16: 10.361414210349539,
17: 10.361865893799084,
18: 10.362318033720465,
19: 10.36277063004639,
20: 10.363223682709256,
21: 10.363677191641132,
22: 10.364131156773775,
23: 10.364585578038621,
24: 10.365040455366783,
25: 10.365495788689062,
26: 10.365951577935935,
27: 10.366407823037564,
28: 10.366864523923793,
29: 10.36732168052415,
},
}
)
PEYTON_FCST_LINEAR_INVALID_ZERO = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2012-05-02 00:00:00"),
1: pd.Timestamp("2012-05-03 00:00:00"),
2: pd.Timestamp("2012-05-04 00:00:00"),
3: pd.Timestamp("2012-05-05 00:00:00"),
4: pd.Timestamp("2012-05-06 00:00:00"),
5: pd.Timestamp("2012-05-07 00:00:00"),
6: pd.Timestamp("2012-05-08 00:00:00"),
7: pd.Timestamp("2012-05-09 00:00:00"),
8: pd.Timestamp("2012-05-10 00:00:00"),
9: pd.Timestamp("2012-05-11 00:00:00"),
10: pd.Timestamp("2012-05-12 00:00:00"),
11: pd.Timestamp("2012-05-13 00:00:00"),
12: pd.Timestamp("2012-05-14 00:00:00"),
13: pd.Timestamp("2012-05-15 00:00:00"),
14: pd.Timestamp("2012-05-16 00:00:00"),
15: pd.Timestamp("2012-05-17 00:00:00"),
16: pd.Timestamp("2012-05-18 00:00:00"),
17: pd.Timestamp("2012-05-19 00:00:00"),
18: pd.Timestamp("2012-05-20 00:00:00"),
19: pd.Timestamp("2012-05-21 00:00:00"),
20: pd.Timestamp("2012-05-22 00:00:00"),
21: pd.Timestamp("2012-05-23 00:00:00"),
22: pd.Timestamp("2012-05-24 00:00:00"),
23: pd.Timestamp("2012-05-25 00:00:00"),
24: pd.Timestamp("2012-05-26 00:00:00"),
25: pd.Timestamp("2012-05-27 00:00:00"),
26: pd.Timestamp("2012-05-28 00:00:00"),
27: pd.Timestamp("2012-05-29 00:00:00"),
28: pd.Timestamp("2012-05-30 00:00:00"),
29: pd.Timestamp("2012-05-31 00:00:00"),
30: pd.Timestamp("2012-06-01 00:00:00"),
31: pd.Timestamp("2012-06-02 00:00:00"),
32: pd.Timestamp("2012-06-03 00:00:00"),
33: pd.Timestamp("2012-06-04 00:00:00"),
34: pd.Timestamp("2012-06-05 00:00:00"),
35: pd.Timestamp("2012-06-06 00:00:00"),
36: pd.Timestamp("2012-06-07 00:00:00"),
37: pd.Timestamp("2012-06-08 00:00:00"),
38: pd.Timestamp("2012-06-09 00:00:00"),
39: pd.Timestamp("2012-06-10 00:00:00"),
40: pd.Timestamp("2012-06-11 00:00:00"),
41: pd.Timestamp("2012-06-12 00:00:00"),
42: pd.Timestamp("2012-06-13 00:00:00"),
43: pd.Timestamp("2012-06-14 00:00:00"),
44: pd.Timestamp("2012-06-15 00:00:00"),
45: pd.Timestamp("2012-06-16 00:00:00"),
46: pd.Timestamp("2012-06-17 00:00:00"),
47: pd.Timestamp("2012-06-18 00:00:00"),
48: pd.Timestamp("2012-06-19 00:00:00"),
49: pd.Timestamp("2012-06-20 00:00:00"),
50: pd.Timestamp("2012-06-21 00:00:00"),
51: pd.Timestamp("2012-06-22 00:00:00"),
52: pd.Timestamp("2012-06-23 00:00:00"),
53: pd.Timestamp("2012-06-24 00:00:00"),
54: pd.Timestamp("2012-06-25 00:00:00"),
55: pd.Timestamp("2012-06-26 00:00:00"),
56: pd.Timestamp("2012-06-27 00:00:00"),
57: pd.Timestamp("2012-06-28 00:00:00"),
58: pd.Timestamp("2012-06-29 00:00:00"),
59: pd.Timestamp("2012-06-30 00:00:00"),
60: pd.Timestamp("2012-07-01 00:00:00"),
61: pd.Timestamp("2012-07-02 00:00:00"),
62: pd.Timestamp("2012-07-03 00:00:00"),
63: pd.Timestamp("2012-07-04 00:00:00"),
64: pd.Timestamp("2012-07-05 00:00:00"),
65: pd.Timestamp("2012-07-06 00:00:00"),
66: pd.Timestamp("2012-07-07 00:00:00"),
67: pd.Timestamp("2012-07-08 00:00:00"),
68: pd.Timestamp("2012-07-09 00:00:00"),
69: pd.Timestamp("2012-07-10 00:00:00"),
70: pd.Timestamp("2012-07-11 00:00:00"),
71: pd.Timestamp("2012-07-12 00:00:00"),
72: pd.Timestamp("2012-07-13 00:00:00"),
73: pd.Timestamp("2012-07-14 00:00:00"),
74: pd.Timestamp("2012-07-15 00:00:00"),
75: pd.Timestamp("2012-07-16 00:00:00"),
76: pd.Timestamp("2012-07-17 00:00:00"),
77: pd.Timestamp("2012-07-18 00:00:00"),
78: pd.Timestamp("2012-07-19 00:00:00"),
79: pd.Timestamp("2012-07-20 00:00:00"),
80: pd.Timestamp("2012-07-21 00:00:00"),
81: pd.Timestamp("2012-07-22 00:00:00"),
82: pd.Timestamp("2012-07-23 00:00:00"),
83: pd.Timestamp("2012-07-24 00:00:00"),
84: | pd.Timestamp("2012-07-25 00:00:00") | pandas.Timestamp |
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import gridspec
import seaborn as sns
import pandas as pd
import glob
import re
from itertools import combinations
import matplotlib
matplotlib.rcParams['text.usetex'] = True
def plot_probabilities(X, probabilities, titles, suptitle):
norm = plt.Normalize(0, 1)
n = len(titles)
nrows = int(np.ceil(n / 2))
sns.set_context('paper')
cmap = sns.cubehelix_palette(rot=-.5,light=1.5,dark=-.5,as_cmap=True)
f, axarr = plt.subplots(nrows, min(n,2))
if n < 2:
axarr.scatter(X[:, 0], X[:, 1], c=probabilities[0],
cmap=cmap, norm=norm, edgecolor='k',s=60)
axarr.set_title(titles[0])
#f.set_size_inches(8, 8)
else:
i = j = 0
for idx, t in enumerate(titles):
axarr[i, j].scatter(X[:, 0], X[:, 1], c=probabilities[idx],
cmap=cmap, norm=norm, edgecolor='k')
axarr[i, j].set_title(t)
j += 1
if j == 2:
j = 0
i += 1
if n % 2 != 0:
axarr[-1, -1].axis('off')
f.set_size_inches(10, 30)
f.suptitle(suptitle)
f.subplots_adjust(hspace=0.7)
return f
def plot_parameters(X, delta, a):
sns.set_context('paper')
cmap1 = sns.cubehelix_palette(rot=-.5,light=1.5,dark=-.5,as_cmap=True)
gs = gridspec.GridSpec(2, 2, height_ratios=[4, 2])
f = plt.figure(figsize=(12,6))
axarr = np.array([[None]*2]*2)
for i in range(2):
for j in range(2):
axarr[i,j] = plt.subplot(gs[i*2+j])
axarr[0, 0].scatter(X[:, 0], X[:, 1], c=delta, cmap=cmap1,
edgecolor='k',s=40)
axarr[0, 0].set_title('$\mathbf{\delta}$ (Difficulty)',fontsize=16)
axarr[0, 1].scatter(X[:, 0], X[:, 1], c=a, cmap=cmap1,
edgecolor='k',s=40)
axarr[0, 1].set_title('$\mathbf{a}$ (Discrimination)',fontsize=16)
#axarr[1, 0].hist(delta,bins=100)
sns.distplot(delta,bins=100,ax=axarr[1,0])
axarr[1, 0].set_title('Histogram of $\mathbf{\delta}$',fontsize=16)
#axarr[1, 1].hist(a,bins=100)
sns.distplot(a,bins=100,ax=axarr[1,1])
axarr[1, 1].set_title('Histogram of $\mathbf{a}$',fontsize=16)
f.suptitle('IRT item parameters')
#f.set_size_inches(20, 20)
f.subplots_adjust(hspace=0.3)
return f
def plot_noisy_points(xtest, disc=None):
sns.set_context('paper')
cls = sns.color_palette("BuGn_r")
lgd = []
f = plt.figure()
plt.scatter(xtest.x[xtest.noise==0],xtest.y[xtest.noise==0],facecolors='none',edgecolors='k',s=60)
lgd.append('non-noise item')
plt.scatter(xtest.x[xtest.noise>0],xtest.y[xtest.noise>0],c=cls[3],s=60)
lgd.append('noise item')
if not disc is None:
plt.scatter(xtest.x[disc<0],xtest.y[disc<0],c=cls[0],marker='+',facecolors='none')
lgd.append('detected noise item')
plt.title('True and detected noise items')
l = plt.legend(lgd,frameon=True,fontsize=12)
l.get_frame().set_edgecolor('g')
return f
def plot_item_parameters_corr(irt_prob_avg,difficulty,noise,disc=None):
sns.set_context('paper')
cls = sns.color_palette("BuGn_r")
lgd = []
f = plt.figure()
plt.xlim([0.,1.])
plt.ylim([0.,1.])
plt.scatter(irt_prob_avg[noise>0],difficulty[noise>0],c=cls[3],s=60)
lgd.append('noise item')
if not disc is None:
plt.scatter(irt_prob_avg[disc<0],difficulty[disc<0],c=cls[0],marker='+',facecolors='none')
lgd.append('detected noise item')
plt.scatter(irt_prob_avg[noise==0],difficulty[noise==0],facecolors='none',edgecolors='k',s=60)
lgd.append('non-noise item')
plt.title('Correlation between difficulty and response')
plt.xlabel('Average response',fontsize=14)
plt.ylabel('Difficulty',fontsize=14)
l=plt.legend(lgd,frameon=True,fontsize=12)
l.get_frame().set_edgecolor('g')
return f
def vis_performance(gather_prec,gather_recal,path,asd='as1@5',vtype='nfrac'):
fig = plt.figure()
plt.plot(gather_recal.index, gather_recal.mean(axis=1),marker='o')
plt.plot(gather_prec.index, gather_prec.mean(axis=1),marker='^')
plt.errorbar(gather_recal.index, gather_recal.mean(axis=1), gather_recal.std(axis=1), linestyle='None')
plt.errorbar(gather_prec.index, gather_prec.mean(axis=1), gather_prec.std(axis=1), linestyle='None')
if vtype=='nfrac':
plt.title('Precision and recall under different noise fractions')
plt.xlabel('Noise fraction (percentile)')
plt.ylim(-0.05,1.1)
plt.yticks(np.arange(0,1.2,0.2))
plt.legend(['Recall','Precision'],loc=0)
plt.savefig(path+'gathered_dnoise_performance_nfrac_'+asd+'.pdf')
elif vtype=='astd':
plt.title('Precision and recall under different prior SD')
plt.xlabel('Prior standard deviation of discrimination')
plt.xlim(0.5,3.25)
plt.ylim(-0.05,1.1)
plt.yticks(np.arange(0,1.2,0.2))
plt.legend(['Recall','Precision'],loc=0)
plt.savefig(path+'gathered_dnoise_performance_asd_nfrac20.pdf')
plt.close(fig)
def gather_vary_nfrac(path,dataset,a_prior_std=1.5,clcomb='79',mcomb='m10',idx = [2,5,10,20,30,40,50,55]):
prefix = path+'dnoise_performance_'+dataset+'_s400_'
files = glob.glob(prefix+'*.txt')
#print(len(files))
asd = 'as'+str(a_prior_std).replace('.','@')
files = filter(lambda f: '_'+mcomb+'_' in f and asd in f and 'cl'+clcomb in f , files)
gather_prec = pd.DataFrame(index=idx)
gather_recal = pd.DataFrame(index=idx)
pfix1 = 'precision = '
pfix2 = 'recall = '
err_files = []
for f in files:
parse = re.split('_|\.',f[len(prefix)+1:])
#print(parse)
frac = int(parse[0])
#print(frac)
if frac not in idx:
continue
seed = parse[1]
with open(f,'r') as fr:
l = fr.readlines()
gather_prec.loc[frac,seed] = float(l[0][len(pfix1):])
gather_recal.loc[frac,seed] = float(l[1][len(pfix2):])
if np.isnan(gather_prec.loc[frac,seed]) or \
np.isnan(gather_recal.loc[frac,seed]):
print('find nan:',parse)
err_files.append('./test_data/noise_test/'+dataset+'/bc4/'+mcomb+'/'+parse[2]+'/irt_data_'+dataset+'_s400_f'+parse[0]+'_'+parse[1]+'_'+parse[2]+'_'+mcomb+'.csv')
return gather_prec,gather_recal,err_files
def vis_avg_all_clscombs_perform(dataset='mnist',a_prior_std=1.5,mcomb='m10',rpath='./results/bc4/mnist/m10/'):
errs = []
gather_precs=None
gather_recals=None
gather_prec_allcl = pd.DataFrame()
gather_recal_allcl = pd.DataFrame()
asd = 'as'+str(a_prior_std).replace('.','@')
for i,cls in enumerate(combinations(np.arange(10),2)):
#print(i)
cl1, cl2 = cls[0],cls[1]
comb = str(cl1)+str(cl2)
path = rpath+'cl'+comb+'/'
gather_prec,gather_recal, err = gather_vary_nfrac(path,dataset,a_prior_std,clcomb=comb,mcomb=mcomb)
if len(err)==0:
vis_performance(gather_prec,gather_recal,path,asd=asd)
errs+=err
if gather_precs is None:
gather_precs = gather_prec
gather_recals = gather_recal
gather_prec_allcl = | pd.DataFrame(index=gather_prec.index) | pandas.DataFrame |
#!/usr/bin/env python3
# @Author: LiuXing <EMAIL>
# @Date: 2020-12-03 11:00:53
# @Last Modified by: LiuXing
# @Last Modified time: 2020-12-03 14:27:11
import os, sys
import pandas as pd
import numpy as np
from pandas.core.reshape.merge import merge
import scipy
from scipy import ndimage
import matplotlib as mpl
import matplotlib.pyplot as plt
import json
from scipy import sparse
import logging
import numexpr
from multiprocessing import Pool
from optparse import OptionParser
import time
import h5py
import math
import csv
LOG_FORMAT="%(asctime)s %(levelname)s - %(message)s"
logging.basicConfig(level=logging.DEBUG, format=LOG_FORMAT)
TIME_FORMAT = "%y-%m-%d %H:%M:%S"
def main():
actions = ["tsv2h5ad", "dnbMatting", "cellCluster", "visualization", "convertBinData", "saturationPlot", "mergeGem"]
"""
%prog action [options]
"""
parser = OptionParser(main.__doc__)
parser.add_option("-i", "--in", action = "store", type = "str", dest = "inFile", help = "input gene expression matrix file path.")
parser.add_option("-o", "--out", action = "store", type = "str", dest = "out", help = "output file or directory path.")
parser.add_option("-s", "--binSize", action = "store", type = "int", dest = "binSize", default = 50, help = "The bin size or max bin szie that to combine the dnbs. default=50")
parser.add_option("-m", "--mask" , action = "store", type = "str", dest = "mask", help = "input gene expression matrix file generated from lesso tool of the stereomap system.")
parser.add_option("--geneNumber", action = "store", type = "int", dest = "geneNumber", default = 2000, help = "number of genes will be used to cluster bins.")
parser.add_option("-t", "--thread", action = "store", type = "int", dest = "thread", default = 8, help = "number of thread that will be used to run this program. default=2")
parser.add_option("-w", "--progress", action = "store", type = "int", dest = "progress", default = 1, help = "number of progress that will be used to run this program, only useful for visulization. default=4")
opts, args = parser.parse_args()
if (len(args) < 1):
print("please give the correct action, choose one from: " + ",".join(actions))
sys.exit(not parser.print_help())
if(opts.inFile == None):
sys.exit(not parser.print_help())
numexpr.set_num_threads(opts.thread)
action = args[0].upper()
if (action == "TSV2H5AD"):
slideBin = SlideBin(opts.inFile, opts.out, opts.binSize)
slideBin.process()
elif (action == "CELLCLUSTER"):
cellCluster = CellCluster(opts.inFile, opts.out, opts.binSize)
cellCluster.scanpyCluster()
elif (action == "DNBMATTING"):
dnbMatting = DnbMatting(opts.inFile, opts.out, opts.binSize)
dnbMatting.dnb_matting()
elif (action == "VISUALIZATION"):
visualization = Visualization(opts.inFile, opts.out, opts.binSize, opts.progress)
visualization.process()
elif (action == "CONVERTBINDATA"):
convertBinData = ConvertBinData(opts.mask, opts.inFile, opts.out, opts.binSize)
convertBinData.ConvertData()
elif (action == "SATURATIONPLOT"):
saturationPlot = SaturationPlot(opts.inFile, opts.out)
saturationPlot.process()
elif (action == "MERGEGEM"):
mergeGem = MergeGem(opts.inFile, opts.out)
mergeGem.mergeGem()
else:
raise Exception("invalide action", 3)
class SlideBin():
def __init__(self, geneExpFile, outdir, binSize):
self.geneExpFile = geneExpFile
self.outdir = outdir
self.binSize = binSize
os.makedirs(self.outdir, exist_ok=True)
def bin_stat(self):
import anndata
import scanpy as sc
df = pd.read_csv(self.geneExpFile, sep="\t", quoting=csv.QUOTE_NONE, comment="#")
if "MIDCounts" in df.columns:
df.rename(columns={"MIDCounts": "UMICount"}, inplace=True)
elif 'values' in df.columns:
df.rename(columns={"values": "UMICount"}, inplace=True)
elif 'MIDCount' in df.columns:
df.rename(columns={'MIDCount': 'UMICount'}, inplace=True)
df['x'] = (df['x']/self.binSize).astype(np.uint32)*self.binSize
df['y'] = (df['y']/self.binSize).astype(np.uint32)*self.binSize
df['cell'] = df['x'].astype(str) + "-" + df['y'].astype(str)
bindf = df['UMICount'].groupby([df['cell'], df['geneID']]).sum()
cells = set(str(x[0]) for x in bindf.index)
genes = set(str(x[1]) for x in bindf.index)
cellsdic = dict(zip(cells, range(0, len(cells))))
genesdic = dict(zip(genes, range(0, len(genes))))
rows = [cellsdic[x[0]] for x in bindf.index]
cols = [genesdic[x[1]] for x in bindf.index]
expMtx = sparse.csr_matrix((bindf.values, (rows, cols)))
obs = pd.DataFrame(index = cells)
var = pd.DataFrame(index = genes)
adata = anndata.AnnData(X = expMtx, obs = obs, var = var)
logging.info("anndata generate finished...")
return adata, genesdic
def process(self):
adata, genesdic = self.bin_stat()
resultFile = os.path.join(self.outdir, "{0}x{0}_adata.h5ad".format(self.binSize))
adata.write(resultFile)
class CellCluster():
def __init__(self, geneExpFile, outFile, binSize):
self.geneExpFile = geneExpFile
self.outFile = outFile
self.binSize = binSize
def scanpyCluster(self):
import scanpy as sc
import anndata
if (self.geneExpFile.endswith(".h5ad")):
adata = sc.read_h5ad(self.geneExpFile)
else:
df = pd.read_csv(self.geneExpFile, sep="\t", quoting=csv.QUOTE_NONE, comment="#")
if "MIDCounts" in df.columns:
df.rename(columns={"MIDCounts": "UMICount"}, inplace=True)
elif 'values' in df.columns:
df.rename(columns={"values": "UMICount"}, inplace=True)
elif 'MIDCount' in df.columns:
df.rename(columns={"MIDCount": "UMICount"}, inplace=True)
if 'label' not in df.columns:
df['x'] = (df['x']/self.binSize).astype(np.uint32)*self.binSize
df['y'] = (df['y']/self.binSize).astype(np.uint32)*self.binSize
df['label'] = df['x'].astype(str) + "-" + df['y'].astype(str)
else:
labelFile = os.path.join(os.path.dirname(self.geneExpFile), "merge_GetExp_gene_labeled_stat.txt")
labeldf = pd.read_csv(labelFile, sep="\t")
labeldict=dict(zip(labeldf['label'], labeldf['x'].astype(str)+"_"+labeldf['y'].astype(str)))
df.replace({'label': labeldict}, inplace=True)
bindf = df['UMICount'].groupby([df['label'], df['geneID']]).sum()
cells = set(x[0] for x in bindf.index)
genes = set(x[1] for x in bindf.index)
cellsdic = dict(zip(cells, range(0, len(cells))))
genesdic = dict(zip(genes, range(0, len(genes))))
rows = [cellsdic[x[0]] for x in bindf.index]
cols = [genesdic[x[1]] for x in bindf.index]
expMtx = sparse.csr_matrix((bindf.values, (rows, cols)))
obs = pd.DataFrame(index = cells)
var = pd.DataFrame(index = genes)
adata = anndata.AnnData(X = expMtx, obs = obs, var = var)
adata.write(self.outFile)
del(df)
del(bindf)
adata = sc.read_h5ad(self.outFile)
adata.layers['raw_data'] = adata.X
sc.pp.filter_cells(adata, min_genes=0)
sc.pp.filter_genes(adata, min_cells=0)
adata.var['mt'] = adata.var_names.str.decode('utf-8').str.startswith(('mt-', 'MT-'))
sc.pp.calculate_qc_metrics(adata, qc_vars=['mt'], percent_top=None, log1p=False, inplace=True)
adata.raw = adata
sc.pp.normalize_total(adata, target_sum=1e4)
sc.pp.log1p(adata)
if adata.var.shape[0] < 2000:
return 0
sc.pp.highly_variable_genes(adata, flavor="seurat", n_top_genes=2000)
adata = adata[:, adata.var.highly_variable]
sc.pp.scale(adata, max_value=10)
sc.tl.pca(adata, svd_solver="arpack")
sc.pp.neighbors(adata, n_neighbors=10, n_pcs=10)
sc.tl.tsne(adata)
sc.tl.umap(adata)
sc.tl.leiden(adata)
adata.obs['louvain'] = adata.obs['leiden']
sc.tl.rank_genes_groups(adata, 'leiden', method='wilcoxon', use_raw=False, n_genes=300, pts=True, layer='raw_data')
adata.write(self.outFile)
class DnbMatting():
def __init__(self, geneExpFile, outdir, binSize=50):
self.geneExpFile = geneExpFile
self.outdir = outdir
self.binSize = binSize
os.makedirs(self.outdir, exist_ok=True)
def dnb_matting(self):
import cv2
expColumnTypes = {"barcode": object, "geneID": 'category', "MIDCount": np.uint32}
geneDf = pd.read_csv(self.geneExpFile, header=None, names=["barcode", "geneID", "MIDCount"], sep="\t", dtype=expColumnTypes, quoting=csv.QUOTE_NONE)
geneDf['x'] = geneDf['barcode'].apply(lambda x: int(x.split("_")[-2]))
geneDf['y'] = geneDf['barcode'].apply(lambda x: int(x.split("_")[-1]))
geneDf.drop(['barcode'], axis=1, inplace=True)
#generate bin image
tempDf = geneDf[['x', 'y', 'MIDCount']].copy()
tempDf['x'] = tempDf['x'].map(lambda x: int(x/self.binSize))
tempDf['y'] = tempDf['y'].map(lambda x: int(x/self.binSize))
binDf = tempDf['MIDCount'].groupby([tempDf['x'], tempDf['y']]).sum().reset_index()
x1, x2, y1, y2 = binDf['x'].min(), binDf['x'].max(), binDf['y'].min(), binDf['y'].max()
binDf['x'] = binDf['x'] - x1
binDf['y'] = binDf['y'] - y1
sparseMt = sparse.csr_matrix((binDf['MIDCount'].astype(np.uint8), (binDf['y'], binDf['x'])))
img = sparseMt.toarray()
#median filter by kenel of size 3
median_filtered = ndimage.median_filter(img, size=3)
#normalize
Imin, Imax = median_filtered.min(), median_filtered.max()
Omin, Omax = 0, 255
a = float(Omax-Omin)/(Imax-Imin)
b = Omin - a*Imin
normalize = a*median_filtered + b
#process image
gradx = cv2.Sobel(normalize, ddepth=-1, dx=1, dy=0, ksize=-1)
grady = cv2.Sobel(normalize, ddepth=-1, dx=0, dy=1, ksize=-1)
gradient = cv2.subtract(gradx, grady)
gradient = cv2.convertScaleAbs(gradient)
blurred = cv2.blur(gradient, (3, 3))
(_, thresh) = cv2.threshold(blurred, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (25, 25))
closed = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE, kernel)
closed = cv2.erode(closed, None, iterations=4)
closed = cv2.dilate(closed, None, iterations=4)
(cnts, _) = cv2.findContours(closed.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
imageFile = os.path.join(self.outdir, "{0}x{0}_image.png".format(self.binSize))
if(len(cnts)<1):
filterGene = geneDf
cv2.imwrite(imageFile, img)
else:
c = sorted(cnts, key=cv2.contourArea, reverse=True)[0]
rect = cv2.minAreaRect(c)
box = np.int0(cv2.boxPoints(rect))
Xs = box[...,0]
Ys = box[...,1]
bx1, bx2, by1, by2 = (min(Xs)+x1-0.5)*self.binSize, (max(Xs)+x1+0.5)*self.binSize, (min(Ys)+y1-0.5)*self.binSize, (max(Ys)+y1+0.5)*self.binSize
filterGene = geneDf.loc[(geneDf['x']>=bx1)&(geneDf['x']<=bx2)&(geneDf['y']>=by1)&(geneDf['y']<=by2)]
cv2.drawContours(img, [box], -1, (255, 0, 0), 1)
cv2.imwrite(imageFile, img)
filterGenefile = os.path.join(self.outdir, "merge_GetExp_gene.txt")
filterGene[['geneID', 'x', 'y', 'MIDCount']].to_csv(filterGenefile, index=None, sep="\t")
class Visualization():
def __init__(self, geneExpFile, outdir, maxBinSize=200, progress=4, img_out=True):
self.geneExpFile = geneExpFile
self.outdir = outdir
self.maxBinSize = maxBinSize
self.progress = progress
self.img_out = img_out
self.geneDf = pd.read_csv(self.geneExpFile, sep="\t", quoting=csv.QUOTE_NONE)
if "MIDCounts" in self.geneDf.columns:
self.geneDf.rename(columns={"MIDCounts": "values"}, inplace=True)
elif 'UMICount' in self.geneDf.columns:
self.geneDf.rename(columns = {"UMICount": "values"}, inplace=True)
elif 'MIDCount' in self.geneDf.columns:
self.geneDf.rename(columns={"MIDCount": "values"}, inplace=True)
dtypes = {"x": np.uint32, "y": np.uint32, "geneID": object, "values": np.uint32}
self.geneDf.astype(dtypes, copy=False)
self.geneDf['geneID'] = self.geneDf['geneID'].apply(lambda x: str(x).replace("/", "_"))
os.makedirs(self.outdir, exist_ok=True)
self.dnbOutdir = os.path.join(outdir, "dnb_merge")
os.makedirs(self.dnbOutdir, exist_ok=True)
def write_h5_total(self):
total_outf = os.path.join(self.outdir, 'stereomics_total.h5')
hdf5_fh = h5py.File(total_outf, "w")
hdf5_dnb_group = hdf5_fh.create_group('dnb_merge')
hdf5_gene_group = hdf5_fh.create_group('gene_merge')
# save gene table
gene_table_df = self.geneDf['values'].groupby(self.geneDf['geneID']).sum().reset_index()
gene_table_df = gene_table_df.sort_values(by=['values'], ascending=False)
gene_table_group = hdf5_gene_group.create_group('gene_table')
gene_list = gene_table_df['geneID'].astype('S')
gene_table_group['Gene'] = gene_list
gene_table_group.create_dataset('MIDCounts', data=gene_table_df['values'], dtype='int32')
# save dnb range
dnb_range_dict = {'min_x': int(self.geneDf['x'].min()), 'max_x':int(self.geneDf['x'].max()), 'min_y':int(self.geneDf['y'].min()), 'max_y':int(self.geneDf['y'].max())}
dt = h5py.special_dtype(vlen=str)
dnb_range_arr = hdf5_dnb_group.create_dataset('dnb_range', (1,), dtype=dt)
dnb_range_arr[0] = json.dumps(dnb_range_dict)
hdf5_fh.close()
def write_bin_h5(self, geneBinDf, bin_size, img_out):
bin_file_name = 'stereomics_' + str(bin_size) + '.h5'
bin_outf = os.path.join(self.outdir, bin_file_name)
hdf5_fh_bin = h5py.File(bin_outf, "w")
hdf5_dnb_group_bin = hdf5_fh_bin.create_group('dnb_merge')
hdf5_gene_group_bin = hdf5_fh_bin.create_group('gene_merge')
##gene
merge_gene_dff = self.merge_gene_v2(geneBinDf, bin_size, bin_size)
h5_gene_bin_group = hdf5_gene_group_bin.create_group(f'bin{bin_size}')
for gene, value in merge_gene_dff.groupby(merge_gene_dff.geneID):
h5_gene_bin_group.create_dataset(gene, data=value[['x','y','values']], dtype='int32')
#dnb
merge_dnb_dff = self.merge_dnb_v2(merge_gene_dff, bin_size, bin_size)
if bin_size==200 and img_out:
if not os.path.exists(self.dnbOutdir):
os.makedirs(self.dnbOutdir)
self.getFig(merge_dnb_dff, os.path.join(self.dnbOutdir, 'bin200.png'), scale=20, dpi=72)
hdf5_dnb_group_bin.create_dataset(f'bin{bin_size}', data=merge_dnb_dff, dtype='int32')
hdf5_fh_bin.close()
def merge_gene_v2(self, gene_df, dot_x, dot_y):
#gene merge
gene_dff = gene_df.copy()
if (dot_x > 1 or dot_y > 1):
gene_dff['x'] = (gene_dff['x']/dot_x).astype('int')*dot_x
gene_dff['y'] = (gene_dff['y']/dot_y).astype('int')*dot_y
gene_dff = gene_dff['values'].groupby([gene_dff['x'], gene_dff['y'], gene_dff['geneID']]).sum().reset_index()
return gene_dff
def merge_dnb_v2(self, dnb_df, dot_x, dot_y):
dnb_dff = dnb_df.copy()
#dnb_dff['x'] = (dnb_dff['x']/dot_x).astype('int')*dot_x
#dnb_dff['y'] = (dnb_dff['y']/dot_y).astype('int')*dot_y
dnb_dff_tmp = dnb_dff['values'].groupby([dnb_dff['x'], dnb_dff['y']]).sum().reset_index()
gene_count = dnb_dff['geneID'].groupby([dnb_dff['x'], dnb_dff['y']]).nunique().reset_index()
dnb_dff_tmp['gene_counts'] = gene_count['geneID']
return dnb_dff_tmp
def getFig(self, data, outfile, scale=1, dpi=72):
try:
cmap = mpl.colors.ListedColormap(['#0C3383', '#0A88BA', '#F2D338', '#F28F38', '#D91E1E'])
x_range=max(data['x']) - min(data['x'])
y_range=max(data['y']) - min(data['y'])
x_num = len(data['x'].drop_duplicates())
plt.figure(figsize=(1*scale,y_range/x_range*scale), facecolor='#262B3D', edgecolor='black') ## 设置图像大小 inch
##去掉图像旁边的空白区
plt.gca().xaxis.set_major_locator(plt.NullLocator())
plt.gca().yaxis.set_major_locator(plt.NullLocator())
plt.gca().xaxis.set_ticks_position('top')
plt.gca().invert_yaxis()
plt.subplots_adjust(top = 1, bottom = 0, right = 1, left = 0, hspace = 0, wspace = 0)
plt.margins(0,0)
##添加标题
#plt.title('Interesting Graph',loc ='center')
x=data['x']
y=data['y']
color=data['values']
#factor = math.ceil(scale/5)
#dot_size = math.ceil((dpi * scale)*factor/x_num)
r = scale*72/(x_range/200)
dot_size = r**2
plt.scatter(x, y, c=color, s=dot_size, cmap=cmap)
plt.axis('off')
plt.savefig(outfile,facecolor='#262B3D', dpi=dpi, pad_inches = 0)
return True
except Exception as e:
print(e)
return False
def copy(self, h5_in, h5_out):
if 'gene_merge' in h5_in.keys():
for i in h5_in['gene_merge']:
if i not in h5_out['gene_merge'].keys():
h5_in.copy('gene_merge/' + i, h5_out['gene_merge'])
if 'dnb_merge' in h5_in.keys():
for i in h5_in['dnb_merge']:
if i not in h5_out['dnb_merge'].keys():
h5_in.copy('dnb_merge/' + i, h5_out['dnb_merge'])
def h5_join(self):
d_names = os.listdir(self.outdir)
final_outf = os.path.join(self.outdir, 'stereomics.h5')
h5_out = h5py.File(final_outf, "w")
h5_out.create_group('gene_merge')
h5_out.create_group('dnb_merge')
for h5name in d_names:
if h5name.endswith('h5') and h5name != 'stereomics.h5':
full_file_name = os.path.join(self.outdir, h5name)
h5_in = h5py.File(full_file_name, mode = 'r')
self.copy(h5_in, h5_out)
h5_in.close()
os.remove(full_file_name)
h5_out.close()
def process(self):
binSizeList = [1,2,5,10,15,20,50,80,100,150,200]
binSizeList = filter(lambda x: x<=self.maxBinSize, binSizeList)
self.write_h5_total()
if (self.progress == 1):
for binSize in binSizeList:
self.write_bin_h5(self.geneDf, binSize, self.img_out)
else:
pool = Pool(self.progress)
for binSize in binSizeList:
pool.apply_async(self.write_bin_h5, (self.geneDf, binSize, self.img_out,))
pool.close()
pool.join()
self.h5_join()
class ConvertBinData():
"""
input: The lasso bin gene expression matrix; The complete gene expression matrix
return: Binsize=1 gene expression matrix.
"""
def __init__(self, partfile, genefile, outfile, binSize):
self.typeColumn = {"geneID": 'str', "x": np.uint32, \
"y": np.uint32, "values": np.uint32, 'MIDCount':np.uint32, \
"MIDCounts":np.uint32, "UMICount": np.uint32}
self.partfile = partfile
self.genefile = genefile
self.outfile = outfile
self.binSize = binSize
def __Dumpresult(self, mask, genedf):
dst = np.where(mask > 0)
dstx = dst[1]
dsty = dst[0]
tissue = pd.DataFrame()
tissue['x'] = [ii + self.Xmin for ii in dstx]
tissue['y'] = [ij + self.Ymin for ij in dsty]
mergedf = pd.merge(genedf, tissue, how='inner', on=['x', 'y'])
return mergedf
def __CreateImg(self, df):
bindf = | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
import matplotlib.pyplot as plt
from xgboost import cv
import xgboost as xgb
import joblib
import numpy as np
from sklearn.metrics import roc_curve, auc
from sklearn.model_selection import train_test_split
import seaborn as sns
def plot_roc(fpr, tpr, roc_auc):
""" Plot ROC curve. """
#fig = plt.figure()
plt.plot(fpr, tpr)
plt.plot([0, 1], [0, 1], 'k--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('ROC curve (area = %0.6f)' % roc_auc)
plt.legend(loc="lower right")
plt.show()
def randomised_gt(Y):
""" Get a random Y as a sanity check. """
Y = pd.DataFrame(np.random.randint(0, 2, Y.shape[0]), columns=['STK11'])
return Y
def get_train_test_data(X, df_gt, column_name, test_size, randomise_gt=False):
""" Split the data into training and test"""
Y = df_gt[column_name]
if randomise_gt:
Y = randomised_gt(Y)
X_train, X_test, y_train, y_test = train_test_split(X, Y,
test_size=test_size,
random_state=42,
stratify=Y)
print(X_train.shape, X_test.shape, y_train.shape, y_test.shape)
print('total train samples:', y_train.sum())
print('total test samples', y_test.sum())
dtrain = xgb.DMatrix(X_train, label=y_train)
dtest = xgb.DMatrix(X_test, label=y_test)
return dtrain, dtest, y_test, y_test
def get_params():
""" All of xgboost parameters for training. """
params = {
'learning_rate': 0.01,
'n_estimators': 1000,
'verbose': 1,
'max_depth': 6,
'min_child_weight': 4,
'gamma': 0.6,
'subsample': 0.8,
'colsample_bytree': 0.8,
'reg_alpha': 5e-05,
'max_depth': 10,
'objective': 'binary:logistic',
'nthread': 20,
# 'scale_pos_weight': w,
'seed': 42}
return params
def plot_corr(df_rna, df_gt, column_name):
""" Plot correlation matrices. """
rs = np.random.RandomState(0)
df = pd.concat([df_rna, df_gt[column_name]], axis=1)
corr = df.corr()
corr.style.background_gradient(cmap='coolwarm')
print(corr)
sns.heatmap(corr,
xticklabels=corr.columns,
yticklabels=corr.columns)
plt.show()
def run_cv(dtrain):
""" Run cross validaiton. Important: make sure that your model does not overfit."""
xgb_cv = cv(dtrain=dtrain,
params=get_params(),
nfold=10,
num_boost_round=5,
metrics="auc",
as_pandas=True,
seed=42)
print('Cross validation results: \n', xgb_cv)
def run_ml(path, training_data_filename, training_gt_data_filename, column_name, genes_subset=None, test_size=0.2, save_model=False):
""" Main function to train an xgboost classifier, save it, evaluate, plot importance of its features."""
df_rna = | pd.read_csv(path + training_data_filename) | pandas.read_csv |
from analytic_types.segment import Segment
import utils
import unittest
import numpy as np
import pandas as pd
import math
import random
RELATIVE_TOLERANCE = 1e-1
class TestUtils(unittest.TestCase):
#example test for test's workflow purposes
def test_segment_parsion(self):
self.assertTrue(True)
def test_confidence_all_normal_value(self):
segment = [1, 2, 0, 6, 8, 5, 3]
utils_result = utils.find_confidence(segment)[0]
result = 4.0
self.assertTrue(math.isclose(utils_result, result, rel_tol = RELATIVE_TOLERANCE))
def test_confidence_all_nan_value(self):
segment = [np.nan, np.nan, np.nan, np.nan]
self.assertEqual(utils.find_confidence(segment)[0], 0)
def test_confidence_with_nan_value(self):
data = [np.nan, np.nan, 0, 8]
utils_result = utils.find_confidence(data)[0]
result = 4.0
self.assertTrue(math.isclose(utils_result, result, rel_tol = RELATIVE_TOLERANCE))
def test_interval_all_normal_value(self):
data = [1, 2, 1, 2, 4, 1, 2, 4, 5, 6]
data = | pd.Series(data) | pandas.Series |
import os
import pandas
import pathlib
from scipy.stats import stats
class Output:
def __init__(self, rwr_df, multiplexall, top: int):
self.rwr_result_list = rwr_df
self.multiplexall = multiplexall
self._df = rwr_df
multiplex_node_prob_zero_df = self._df.loc[self._df.score == 0][['multiplex', 'node']].drop_duplicates()
multiplex_node_prob_zero_df['score'] = 0
self._df = (self._df.loc[self._df.score > 0]).groupby(['multiplex', 'node']).agg({'score': stats.gmean}).reset_index()
self._df = pandas.concat([multiplex_node_prob_zero_df, self._df], axis=0)
self._df = self._df.drop_duplicates(['multiplex', 'node'], keep='first')
self._df.sort_values('score', ascending=False, inplace=True)
#######################################################################
#
# Keep only top k nodes
#
#######################################################################
if not (top is None):
self._df = self._df.groupby('multiplex').head(top)
def to_sif(self, bipartiteall, path: str):
pathlib.Path(os.path.dirname(path)).mkdir(exist_ok=True, parents=True)
out_lst = [] # list of edges with relation type to write
selected_nodes = self._df.node.tolist()
# Multiplex undirected edges
for u, v, edgeidx in self.multiplexall.multigraph.edges:
if (u in selected_nodes) or (v in selected_nodes):
edge_data_dic = self.multiplexall.multigraph.get_edge_data(u, v, edgeidx)
out_lst.append((u, edge_data_dic['network_key'], v))
# Multiplex directed edges
for u, v, edgeidx in self.multiplexall.multidigraph.edges:
if u in selected_nodes or v in selected_nodes:
edge_data_dic = self.multiplexall.multigraph.get_edge_data(u, v)
for edge_key in edge_data_dic:
out_lst.append((u, edge_data_dic['network_key'], v))
# Bipartite undirected edges
for u, v in bipartiteall.graph.edges:
if u in selected_nodes or v in selected_nodes:
edge_data_dic = bipartiteall.graph.get_edge_data(u, v)
out_lst.append((u, edge_data_dic['network_key'], v))
# Bipartite directed edges
for u, v in bipartiteall.digraph.edges:
if u in selected_nodes or v in selected_nodes:
edge_data_dic = bipartiteall.digraph.get_edge_data(u, v)
out_lst.append((u, edge_data_dic['network_key'], v))
out_df = pandas.DataFrame(out_lst, columns=['node1', 'relationship_type', 'node2'], dtype=str)
out_df.to_csv(path, sep="\t", header=False, index=False)
def to_tsv(self, outdir: str, degree: bool):
pathlib.Path(outdir).mkdir(exist_ok=True, parents=True)
out_df = self._df
#######################################################################
#
# Annotate nodes with layers and degrees
#
#######################################################################
if degree:
undirdegree_df = pandas.DataFrame(columns=['multiplex', 'layer', 'node', 'degree'])
inoutdegree_df = pandas.DataFrame(columns=['multiplex', 'layer', 'node', 'indegree', 'outdegree'])
for multiplex in self.multiplexall.multiplex_tuple:
for layer in multiplex.layer_tuple:
if layer.graph_type[0] == '0': # undirected graph
degree_layer_df = pandas.DataFrame(layer.networkx.degree, columns=['node', 'degree'])
degree_layer_df['multiplex'] = multiplex.key
degree_layer_df['layer'] = layer.key
undirdegree_df = pandas.concat([undirdegree_df, degree_layer_df], axis=0)
if layer.graph_type[0] == '1': # directed graph
indegree_layer_df = pandas.DataFrame(layer.networkx.in_degree, columns=['node', 'indegree'])
outdegree_layer_df = | pandas.DataFrame(layer.networkx.out_degree, columns=['node', 'outdegree']) | pandas.DataFrame |
# Importing needed libraries
import sys
import pandas as pd
from sqlalchemy import create_engine
def load_data(messages_filepath, categories_filepath):
'''
:param messages_filepath: the location of the file of messages csv
:param categories_filepath: the location of the file of categories csv
:return: a dataframe containing a merge of the messages and the respective categories
'''
# reading with pandas
messages = pd.read_csv(messages_filepath)
categories = | pd.read_csv(categories_filepath) | pandas.read_csv |
import pandas as pd
import numpy as np
import csv
PREG_ID= 'PREG_ID'
Sentrix= 'IID'
def format_df(df):
df[['chr', 'pos', 'ref', 'eff']]= df['index'].str.split(':', expand= True)
cols = list(df.columns.values)
cols= cols[-4:] + cols[:-4]
df= df[cols]
df.drop(['index'], axis= 1, inplace= True)
return df
d= pd.read_csv(snakemake.input[2], sep= '\t', header= 0)
d.dropna(axis= 0, inplace= True)
with open(snakemake.input[0], 'r') as infile:
reader= csv.DictReader(infile, delimiter= '\t')
fets_cols= reader.fieldnames
with open(snakemake.input[1], 'r') as infile:
reader= csv.DictReader(infile, delimiter= '\t')
moms_cols= reader.fieldnames
with open(snakemake.input[3], 'r') as infile:
reader= csv.DictReader(infile, delimiter= '\t')
dads_cols= reader.fieldnames
d= d.loc[d.Child.isin(fets_cols), :]
d= d.loc[d.Mother.isin(moms_cols), :]
d= d.loc[d.Father.isin(dads_cols), :]
fets_snp_list= list()
h1_df_list= list()
h3_df_list= list()
for fets in pd.read_csv(snakemake.input[0], sep='\t', header= 0, chunksize= 500):
fets_snp_list.append(fets.chr.apply(str) + ':' + fets.pos.apply(str) + ':' + fets.ref + ':' + fets.eff)
fets= fets[d.Child]
fets= fets.astype(str)
fets= np.where(fets== '0', '0|0', np.where(fets== '1', '1|1', fets))
h3= np.where((fets== '0|0') | (fets== '0|1'), 0, np.where((fets== '1|0') | (fets== '1|1'), 1,np.nan))
h1= np.where((fets== '0|0') | (fets== '1|0'), 0, np.where((fets== '0|1') | (fets== '1|1'), 1, np.nan))
h1_df_list.append(h1)
h3_df_list.append(h3)
varnames= | pd.concat(fets_snp_list) | pandas.concat |
"""
Functions to validate the input files prior to database insert / upload.
"""
import time
import numpy as np
import pandas as pd
import IEDC_paths, IEDC_pass
from IEDC_tools import dbio, file_io, __version__
def check_datasets_entry(file_meta, create=True, crash_on_exist=True, update=True, replace=False):
"""
Creates an entry in the `datasets` table.
:param file_meta: data file metadata
:param crash_on_exist: if True: function terminates with assertion error if dataset/version already exists
:param update: if True: function updates dataset entry if dataset/version already exists
:param create: if True: funtion creates dataset entry for dataset/version
:param replace: if True: delete existing entry in dataset table and create new one with current data
"""
db_datasets = dbio.get_sql_table_as_df('datasets')
dataset_info = file_meta['dataset_info']
# Check if entry already exists
dataset_name_ver = [i[0] for i in dataset_info.loc[['dataset_name', 'dataset_version']]
.where((pd.notnull(dataset_info.loc[['dataset_name', 'dataset_version']])), None).values]
if dataset_name_ver[1] in ['NULL']:
dataset_name_ver[1] = None
# If exists already
if dataset_name_ver in db_datasets[['dataset_name', 'dataset_version']].values.tolist(): # dataset name + verion already exists in dataset catalog
if crash_on_exist:
raise AssertionError("Database already contains the following dataset (dataset_name, dataset_version):\n %s"
% dataset_name_ver)
elif update:
update_dataset_entry(file_meta)
elif replace:
# get id
if dataset_name_ver[1] == None:
db_id = db_datasets.loc[(db_datasets['dataset_name'] == dataset_name_ver[0]) &
pd.isna(db_datasets['dataset_version'])].index[0]
else:
db_id = db_datasets.loc[(db_datasets['dataset_name'] == dataset_name_ver[0]) &
(db_datasets['dataset_version'] == dataset_name_ver[1])].index[0]
dbio.run_this_command("DELETE FROM %s.datasets WHERE id = %s;" % (IEDC_pass.IEDC_database, db_id))
# add new one
create_dataset_entry(file_meta)
else:
# do nothing
print("Database already contains the following dataset (dataset_name, dataset_version):\n %s"
% dataset_name_ver)
return True
# if it doesn't exist yet
else:
if create:
create_dataset_entry(file_meta)
else: # i.e. crash_on_not_exist
raise AssertionError("Database does not contain the following dataset (dataset_name, dataset_version):\n %s"
% dataset_name_ver)
def create_dataset_entry(file_meta):
dataset_info = file_meta['dataset_info']
dataset_info = dataset_info.replace([np.nan], [None])
dataset_info = dataset_info.replace({'na': None, 'nan': None, 'none': None,
'NULL': None})
dataset_info = dataset_info.to_dict()['Dataset entries']
assert dataset_info['dataset_id'] == 'auto', \
"Was hoping 'dataset_id' in the file template had the value 'auto'. Not sure what to do now..."
# Clean up dict
dataset_info.pop('dataset_id')
if pd.isna(dataset_info['reserve5']):
dataset_info['reserve5'] = 'Created by IEDC_tools v%s' % __version__
# Look up stuff
data_types = dbio.get_sql_table_as_df('types')
dataset_info['data_type'] = data_types.loc[data_types['name'] == dataset_info['data_type']].index[0]
data_layers = dbio.get_sql_table_as_df('layers')
dataset_info['data_layer'] = data_layers.loc[data_layers['name'] == dataset_info['data_layer']].index[0]
data_provenance = dbio.get_sql_table_as_df('provenance')
dataset_info['data_provenance'] = data_provenance.loc[data_provenance['name'] ==
dataset_info['data_provenance']].index[0]
aspects = dbio.get_sql_table_as_df('aspects')
class_defs = dbio.get_sql_table_as_df('classification_definition')
for aspect in [i for i in dataset_info.keys() if i.startswith('aspect_')]:
if dataset_info[aspect] is None or aspect.endswith('classification'):
continue
if dataset_info[aspect+'_classification'] == 'custom':
aspect_class_name = str(dataset_info[aspect]) + '__' + dataset_info['dataset_name']
dataset_info[aspect+'_classification'] = \
class_defs[class_defs['classification_name'] == aspect_class_name].index[0]
dataset_info[aspect] = aspects[aspects['aspect'] == dataset_info[aspect]].index[0]
source_type = dbio.get_sql_table_as_df('source_type')
dataset_info['type_of_source'] = source_type.loc[source_type['name'] == dataset_info['type_of_source']].index[0]
licenses = dbio.get_sql_table_as_df('licences')
dataset_info['project_license'] = licenses.loc[licenses['name'] == dataset_info['project_license']].index[0]
users = dbio.get_sql_table_as_df('users')
dataset_info['submitting_user'] = users.loc[users['name'] == dataset_info['submitting_user']].index[0]
# fix some more
for k in dataset_info:
# not sure why but pymysql doesn't like np.int64
if type(dataset_info[k]) == np.int64:
dataset_info[k] = int(dataset_info[k])
dbio.dict_sql_insert('datasets', dataset_info)
print("Created entry for %s in 'datasets' table." % [dataset_info[k] for k in ['dataset_name', 'dataset_version']])
return None
def update_dataset_entry(file_meta):
raise NotImplementedError
def create_aspects_table(file_meta):
"""
Pulls the info on classification and attributes together, i.e. make sense of the messy attributes in an actual
table... More of a convenience function for tired programmers.
See sheet 'Cover' in template file.
:param file: Filename, string
:return: Dataframe table with name, classification_id, and attribute_no
"""
# Read the file and put metadata and row_classifications in two variables
dataset_info = file_meta['dataset_info']
row_classifications = file_meta['row_classifications']
col_classifications = file_meta['col_classifications']
# Filter relevant rows from the metadata table, i.e. the ones containing 'aspect'
custom_aspects = dataset_info[dataset_info.index.str.startswith('aspect_')]
custom_aspects = custom_aspects[custom_aspects.index.str.endswith('_classification')]
# Get rid of the empty ones
custom_aspects = custom_aspects[custom_aspects['Dataset entries'] != 'none']
# Here comes the fun... Let's put everything into a dict, because that is easily converted to a dataframe
d = {'classification_id': custom_aspects['Dataset entries'].values,
'index': [i.replace('_classification', '') for i in custom_aspects.index],
'name': dataset_info.loc[[i.replace('_classification', '')
for i in custom_aspects.index]]['Dataset entries'].values}
if file_meta['data_type'] == 'LIST':
d['attribute_no'] = row_classifications.reindex(d['name'])['Aspects_Attribute_No'].values
d['position'] = 'row?'
elif file_meta['data_type'] == 'TABLE':
d['attribute_no'] = row_classifications \
.reindex(d['name'])['Row_Aspects_Attribute_No'] \
.fillna(col_classifications.Col_Aspects_Attribute_No).values
# The table format file has no info on the position of aspects. Need to find that.
d['position'] = []
for n in d['name']:
if n in row_classifications.index:
d['position'].append('row' + str(row_classifications.index.get_loc(n)))
if n in col_classifications.index:
d['position'].append('col' + str(col_classifications.index.get_loc(n)))
assert not any([i is None for i in d['attribute_no']]) # 'not any' means 'none'
# Convert to df and get rid of the redundant 'index' column
aspect_table = pd.DataFrame(d, index=d['index']).drop('index', axis=1)
return aspect_table
def get_class_names(file_meta, aspect_table):
"""
Creates and looks up names for classification, i.e. classifications that are not found in the database (custom)
will be generated and existing ones (non-custom) looked up in the classification_definitions table.
The name is generated as a combination of the dataset name and the classification name, e.g.
"1_F_steel_SankeyFlows_2008_Global_origin_process".
The function extends the table created in create_aspects_table() and returns it.
:param file: Filename, string
:return: Dataframe table with name, classification_id, attribute_no, and classification_definition
"""
dataset_info = file_meta['dataset_info']
db_classdef = dbio.get_sql_table_as_df('classification_definition')
r = []
for aspect in aspect_table.index:
if aspect_table.loc[aspect, 'classification_id'] == 'custom':
r.append(aspect_table.loc[aspect, 'name'] + '__' + dataset_info.loc['dataset_name', 'Dataset entries'])
else:
r.append(db_classdef.loc[aspect_table.loc[aspect, 'classification_id'], 'classification_name'])
aspect_table['custom_name'] = r
return aspect_table
def check_classification_definition(class_names, crash=True, warn=True,
custom_only=False, exclude_custom=False):
"""
Checks if classifications exists in the database, i.e. classification_definition.
:param class_names: List of classification names
:param crash: Strongly recommended -- will cause the script to stop if the classification already exists. Otherwise
there could be ambiguous classifications with multiple IDs.
:param warn: Allows to suppress the warning message
:param custom_only: Check only custom classifications
:param exclude_custom: Exclude custom classifications
:return: True or False
"""
db_classdef = dbio.get_sql_table_as_df('classification_definition')
exists = []
for aspect in class_names.index:
attrib_no = class_names.loc[aspect, 'attribute_no']
if attrib_no != 'custom' and custom_only:
continue # skip already existing classifications
if attrib_no == 'custom' and exclude_custom:
continue # skip custom classifications
if class_names.loc[aspect, 'custom_name'] in db_classdef['classification_name'].values:
exists.append(True)
if crash:
raise AssertionError("""Classification '%s' already exists in the DB classification table (ID: %s).
Aspect '%s' cannot be processed.""" %
(class_names.loc[aspect, 'custom_name'],
db_classdef[db_classdef['classification_name']
== 'general_product_categories'].index[0], aspect))
elif warn:
print("WARNING: '%s' already exists in the DB classification table. "
"Adding it again may fail or create ambiguous values." %
class_names.loc[aspect, 'custom_name'])
else:
exists.append(False)
return exists
def check_classification_items(class_names, file_meta, file_data, crash=True, warn=True,
custom_only=False, exclude_custom=False):
"""
Checks in classification_items if a. all classification_ids exists and b. all attributes exist
:param class_names: List of classification names
:param file_data: Dataframe of Excel file, sheet `Data`
:param crash: Strongly recommended -- will cause the script to stop if the classification_id already exists in
classification_items. Otherwise there could be ambiguous values with multiple IDs.
:param custom_only: Check only custom classifications
:param exclude_custom: Exclude custom classifications
:param warn: Allows to suppress the warning message
:return:
"""
db_classdef = dbio.get_sql_table_as_df('classification_definition')
db_classitems = dbio.get_sql_table_as_df('classification_items')
exists = [] # True / False switch
for aspect in class_names.index:
attrib_no = class_names.loc[aspect, 'attribute_no']
# remove garbage from string
try:
attrib_no = attrib_no.strip(' ')
except:
pass
if attrib_no != 'custom' and custom_only:
continue # skip already existing classifications
if attrib_no == 'custom' and exclude_custom:
continue # skip custom classifications
# make sure classification id exists -- must pass, otherwise the next command will fail
assert class_names.loc[aspect, 'custom_name'] in db_classdef['classification_name'].values, \
"Classification '%s' does not exist in table 'classification_definiton'" % \
class_names.loc[aspect, 'custom_name']
# get classification_id
class_id = db_classdef.loc[db_classdef['classification_name'] ==
class_names.loc[aspect, 'custom_name']].index[0]
# Check if the classification_id already exists in classification_items
if class_id in db_classitems['classification_id'].unique():
exists.append(True)
if crash:
raise AssertionError("classification_id '%s' already exists in the table classification_items." %
class_id)
elif warn:
print("WARNING: classification_id '%s' already exists in the table classification_items. "
"Adding its attributes again may fail or create ambiguous values." %
class_id)
else:
exists.append(False)
print(aspect, class_id, 'not in classification_items')
# Next check if all attributes exist
if attrib_no == 'custom':
attrib_no = 'attribute1_oto'
else:
attrib_no = 'attribute' + str(int(attrib_no)) + '_oto'
checkme = db_classitems.loc[db_classitems['classification_id'] == class_id][attrib_no].values
if file_meta['data_type'] == 'LIST':
attributes = file_data[class_names.loc[aspect, 'name']].unique()
elif file_meta['data_type'] == 'TABLE':
if class_names.loc[aspect, 'position'][:3] == 'row':
if len(file_meta['row_classifications'].values) == 1:
attributes = file_data.index.values
else:
attributes = file_data.index.levels[int(class_names.loc[aspect, 'position'][-1])]
elif class_names.loc[aspect, 'position'][:3] == 'col':
if len(file_meta['col_classifications'].values) == 1:
# That means there is only one column level defined, i.e. no MultiIndex
attributes = file_data.columns.values
else:
attributes = file_data.columns.levels[int(class_names.loc[aspect, 'position'][-1])]
for attribute in attributes:
if str(attribute) in checkme:
exists.append(True)
if crash:
raise AssertionError("'%s' already in %s" % (attribute, checkme))
elif warn:
print("WARNING: '%s' already in classification_items" % attribute)
else:
exists.append(False)
print(aspect, attribute, class_id, 'not in classification_items')
return exists
def create_db_class_defs(file_meta, aspect_table):
"""
Writes the custom classification to the table classification_definition.
:param file: The data file to read.
"""
class_names = get_class_names(file_meta, aspect_table)
db_aspects = dbio.get_sql_table_as_df('aspects', index='aspect')
check_classification_definition(class_names, custom_only=True)
for aspect in class_names.index:
if class_names.loc[aspect, 'classification_id'] != 'custom':
continue # skip already existing classifications
d = {'classification_name': str(class_names.loc[aspect, 'custom_name']),
'dimension': str(db_aspects.loc[class_names.loc[aspect, 'name'], 'dimension']),
'description': 'Custom classification, generated by IEDC_tools v%s' % __version__,
'mutually_exclusive': True,
'collectively_exhaustive': False,
'created_from_dataset': True, # signifies that this is a custom classification
'general': False,
'meaning_attribute1': "'%s' aspect of dataset" % aspect # cannot be NULL???
}
dbio.dict_sql_insert('classification_definition', d)
print("Wrote custom classification '%s' to classification_definitions" %
class_names.loc[aspect, 'custom_name'])
def create_db_class_items(file_meta, aspects_table, file_data):
"""
Writes the unique database items / attributes of a custom classification to the database.
:param file: Data file to read
"""
class_names = get_class_names(file_meta, aspects_table)
db_classdef = dbio.get_sql_table_as_df('classification_definition')
check_classification_items(class_names, file_meta, file_data, custom_only=True, crash=True)
for aspect in class_names.index:
if class_names.loc[aspect, 'classification_id'] != 'custom':
continue # skip already existing classifications
# get classification_id
class_id = db_classdef.loc[db_classdef['classification_name'] ==
class_names.loc[aspect, 'custom_name']].index[0]
d = {'classification_id': class_id,
'description': 'Custom classification, generated by IEDC_tools v%s' % __version__,
'reference': class_names.loc[aspect, 'custom_name'].split('__')[1]}
if file_meta['data_type'] == 'LIST':
attributes = sorted(file_data[class_names.loc[aspect, 'name']].apply(str).unique())
elif file_meta['data_type'] == 'TABLE':
if class_names.loc[aspect, 'position'][:-1] == 'col':
if len(file_meta['col_classifications'].values) == 1:
# That means there is only one column level defined, i.e. no MultiIndex
attributes = [str(i) for i in file_data.columns]
else:
attributes = sorted(
[str(i) for i in file_data.columns.levels[int(class_names.loc[aspect, 'position'][-1])]])
elif class_names.loc[aspect, 'position'][:-1] == 'row':
if len(file_meta['row_classifications'].values) == 1:
attributes = [str(i) for i in file_data.index]
else:
attributes = sorted(
[str(i) for i in file_data.index.levels[int(class_names.loc[aspect, 'position'][-1])]])
df = pd.DataFrame({'classification_id': [d['classification_id']] * len(attributes),
'description': [d['description']] * len(attributes),
'reference': [d['reference']] * len(attributes),
'attribute1_oto': attributes})
columns = ('classification_id', 'description', 'reference', 'attribute1_oto')
dbio.bulk_sql_insert('classification_items', columns, df.values.tolist())
print("Wrote attributes for custom classification '%s' to classification_items: %s" % (class_id, attributes))
def add_user(file_meta, quiet=False):
dataset_info = file_meta['dataset_info']
db_user = dbio.get_sql_table_as_df('users')
realname = dataset_info.loc['submitting_user'].values[0]
if realname in db_user['name'].values:
if not quiet:
print("User '%s' already exists in db table users" % realname)
else:
d = {'name': realname,
'username': (realname.split(' ')[0][0] + realname.split(' ')[1]).lower(),
'start_date': time.strftime('%Y-%m-%d %H:%M:%S')
}
dbio.dict_sql_insert('users', d)
print("User '%s' written to db table users" % d['username'])
def add_license(file_meta, quiet=False):
dataset_info = file_meta['dataset_info']
db_licenses = dbio.get_sql_table_as_df('licences')
file_licence = dataset_info.loc['project_license'].values[0]
if file_licence in db_licenses['name'].values:
if not quiet:
print("Licence '%s' already exists in db table 'licences'" % file_licence)
else:
d = {'name': file_licence,
'description': 'n/a, generated by IEDC_tools v%s' % __version__}
dbio.dict_sql_insert('licences', d)
print("Licence '%s' written to db table 'licences'" % file_licence)
def parse_stats_array_list(stats_array_strings):
"""
Parses the 'stats_array string' from the Excel template. E.g. "3;10;3.0;none;" should fill the respecitve columns
in the data table as follows: stats_array_1 = 3, stats_array_2 = 10, stats_array_3 = 3.0, stats_array_4 = none
More info: https://github.com/IndEcol/IE_data_commons/issues/14
:param stats_array_strings:
:return:
"""
temp_list = []
for sa_string in stats_array_strings:
if sa_string == 'none':
temp_list.append([None] * 4)
else:
assert len(sa_string.split(';')) == 4, "The 'stats_array string' is not well formatted: %s" % sa_string
temp_list.append(sa_string.split(';'))
return_df = | pd.DataFrame(temp_list) | pandas.DataFrame |
import logging
log = logging.getLogger(__name__)
import itertools
import importlib
from functools import partial
from collections import defaultdict
import numpy as np
import pandas as pd
import pyqtgraph as pg
from atom.api import (Str, Float, Tuple, Int, Typed, Property, Atom,
Bool, Enum, List, Dict, Callable, Value, observe)
from enaml.application import deferred_call, timed_call
from enaml.colors import parse_color
from enaml.core.api import Looper, Declarative, d_, d_func
from enaml.qt.QtGui import QColor
from psi.util import octave_space, SignalBuffer, ConfigurationException
from psi.core.enaml.api import load_manifests, PSIContribution
from psi.controller.calibration import util
from psi.context.context_item import ContextMeta
################################################################################
# Utility functions
################################################################################
def get_x_fft(fs, duration):
n_time = int(fs * duration)
freq = np.fft.rfftfreq(n_time, fs**-1)
return np.log10(freq)
def get_color_cycle(name):
module_name, cmap_name = name.rsplit('.', 1)
module = importlib.import_module(module_name)
cmap = getattr(module, cmap_name)
return itertools.cycle(cmap.colors)
def make_color(color):
if isinstance(color, tuple):
return QColor(*color)
elif isinstance(color, str):
return QColor(color)
else:
raise ValueError('Unknown color %r', color)
################################################################################
# Style mixins
################################################################################
class ColorCycleMixin(Declarative):
#: Define the pen color cycle. Can be a list of colors or a string
#: indicating the color palette to use in palettable.
pen_color_cycle = d_(Typed(object))
_plot_colors = Typed(dict)
def _default_pen_color_cycle(self):
return ['k']
def _make_plot_cycle(self):
if isinstance(self.pen_color_cycle, str):
cycle = get_color_cycle(self.pen_color_cycle)
else:
cycle = itertools.cycle(self.pen_color_cycle)
return defaultdict(lambda: next(cycle))
@d_func
def get_pen_color(self, key):
if self._plot_colors is None:
self._plot_colors = self._make_plot_cycle()
color = self._plot_colors[key]
if not isinstance(color, str):
return QColor(*color)
else:
return QColor(color)
def _observe_pen_color_cycle(self, event):
self._plot_colors = self._make_plot_cycle()
self._reset_plots()
def _reset_plots(self):
raise NotImplementedError
################################################################################
# Supporting classes
################################################################################
class BaseDataRange(Atom):
container = Typed(object)
# Size of display window
span = Float(1)
# Delay before clearing window once data has "scrolled off" the window.
delay = Float(0)
# Current visible data range
current_range = Tuple(Float(), Float())
def add_source(self, source):
cb = partial(self.source_added, source=source)
source.add_callback(cb)
def _default_current_range(self):
return 0, self.span
def _observe_delay(self, event):
self._update_range()
def _observe_span(self, event):
self._update_range()
def _update_range(self):
raise NotImplementedError
class EpochDataRange(BaseDataRange):
max_duration = Float()
def source_added(self, data, source):
n = [len(d['signal']) for d in data]
max_duration = max(n) / source.fs
self.max_duration = max(max_duration, self.max_duration)
def _observe_max_duration(self, event):
self._update_range()
def _update_range(self):
self.current_range = 0, self.max_duration
class ChannelDataRange(BaseDataRange):
# Automatically updated. Indicates last "seen" time based on all data
# sources reporting to this range.
current_time = Float(0)
current_samples = Typed(defaultdict, (int,))
current_times = Typed(defaultdict, (float,))
def _observe_current_time(self, event):
self._update_range()
def _update_range(self):
low_value = (self.current_time//self.span)*self.span - self.delay
high_value = low_value+self.span
self.current_range = low_value, high_value
def add_event_source(self, source):
cb = partial(self.event_source_added, source=source)
source.add_callback(cb)
def source_added(self, data, source):
self.current_samples[source] += data.shape[-1]
self.current_times[source] = self.current_samples[source]/source.fs
self.current_time = max(self.current_times.values())
def event_source_added(self, data, source):
self.current_times[source] = data[-1][1]
self.current_time = max(self.current_times.values())
def create_container(children, x_axis=None):
log.debug('Creating graphics layout')
container = pg.GraphicsLayout()
container.setSpacing(10)
# Add the x and y axes to the layout, along with the viewbox.
for i, child in enumerate(children):
log.debug('... child %d with viewbox %r', i, child.viewbox)
container.addItem(child.y_axis, i, 0)
container.addItem(child.viewbox, i, 1)
try:
container.addItem(child.viewbox_norm, i, 1)
except AttributeError:
pass
if x_axis is not None:
container.addItem(x_axis, i+1, 1)
# Link the child viewboxes together
for child in children[1:]:
child.viewbox.setXLink(children[0].viewbox)
return container
################################################################################
# Containers (defines a shared set of containers across axes)
################################################################################
class BasePlotContainer(PSIContribution):
label = d_(Str())
container = Typed(pg.GraphicsWidget)
x_axis = Typed(pg.AxisItem)
base_viewbox = Property()
legend = Typed(pg.LegendItem)
x_transform = Callable()
buttons = d_(List())
current_button = d_(Value())
allow_auto_select = d_(Bool(True))
auto_select = d_(Bool(True))
@d_func
def fmt_button(self, key):
return str(key)
def _observe_buttons(self, event):
if not self.buttons:
return
if self.current_button not in self.buttons:
self.current_button = self.buttons[0]
def _observe_allow_auto_select(self, event):
if not self.allow_auto_select:
self.auto_select = False
def _default_x_transform(self):
return lambda x: x
def _default_container(self):
container = pg.GraphicsLayout()
container.setSpacing(10)
# Add the x and y axes to the layout, along with the viewbox.
for i, child in enumerate(self.children):
container.addItem(child.y_axis, i, 0)
container.addItem(child.viewbox, i, 1)
try:
container.addItem(child.viewbox_norm, i, 1)
except AttributeError:
pass
child._configure_viewbox()
if self.x_axis is not None:
container.addItem(self.x_axis, i+1, 1)
# Link the child viewboxes together
for child in self.children[1:]:
child.viewbox.setXLink(self.children[0].viewbox)
return container
def add_legend_item(self, plot, label):
self.legend.addItem(plot, label)
def _default_legend(self):
legend = pg.LegendItem()
legend.setParentItem(self.container)
return legend
def _get_base_viewbox(self):
return self.children[0].viewbox
def _default_x_axis(self):
x_axis = pg.AxisItem('bottom')
x_axis.setGrid(64)
x_axis.linkToView(self.children[0].viewbox)
return x_axis
def update(self, event=None):
pass
def find(self, name):
for child in self.children:
if child.name == name:
return child
def format_container(self):
pass
def _reset_plots(self):
pass
class PlotContainer(BasePlotContainer):
x_min = d_(Float(0))
x_max = d_(Float(0))
@observe('x_min', 'x_max')
def format_container(self, event=None):
# If we want to specify values relative to a psi context variable, we
# cannot do it when initializing the plots.
if (self.x_min != 0) or (self.x_max != 0):
self.base_viewbox.setXRange(self.x_min, self.x_max, padding=0)
def update(self, event=None):
deferred_call(self.format_container)
class BaseTimeContainer(BasePlotContainer):
'''
Contains one or more viewboxes that share the same time-based X-axis
'''
data_range = Typed(BaseDataRange)
span = d_(Float(1))
delay = d_(Float(0.25))
def _default_container(self):
container = super()._default_container()
# Ensure that the x axis shows the planned range
self.base_viewbox.setXRange(0, self.span, padding=0)
self.data_range.observe('current_range', self.update)
return container
def _default_x_axis(self):
x_axis = super()._default_x_axis()
x_axis.setLabel('Time', unitPrefix='sec.')
return x_axis
def update(self, event=None):
low, high = self.data_range.current_range
deferred_call(self.base_viewbox.setXRange, low, high, padding=0)
super().update()
class TimeContainer(BaseTimeContainer):
def _default_data_range(self):
return ChannelDataRange(container=self, span=self.span,
delay=self.delay)
def update(self, event=None):
for child in self.children:
child.update()
super().update()
class EpochTimeContainer(BaseTimeContainer):
def _default_data_range(self):
return EpochDataRange(container=self, span=self.span, delay=self.delay)
def format_log_ticks(values, scale, spacing):
values = 10**np.array(values).astype(np.float)
return ['{:.1f}'.format(v * 1e-3) for v in values]
class FFTContainer(BasePlotContainer):
'''
Contains one or more viewboxes that share the same frequency-based X-axis
'''
freq_lb = d_(Float(500))
freq_ub = d_(Float(50000))
octave_spacing = d_(Bool(True))
def _default_x_transform(self):
return np.log10
@observe('container', 'freq_lb', 'freq_ub')
def _update_x_limits(self, event):
self.base_viewbox.setXRange(np.log10(self.freq_lb),
np.log10(self.freq_ub),
padding=0)
if self.octave_spacing:
major_ticks = octave_space(self.freq_lb / 1e3, self.freq_ub / 1e3, 1.0)
major_ticklabs = [str(t) for t in major_ticks]
major_ticklocs = np.log10(major_ticks * 1e3)
minor_ticks = octave_space(self.freq_lb / 1e3, self.freq_ub / 1e3, 0.125)
minor_ticklabs = [str(t) for t in minor_ticks]
minor_ticklocs = np.log10(minor_ticks * 1e3)
ticks = [
list(zip(major_ticklocs, major_ticklabs)),
list(zip(minor_ticklocs, minor_ticklabs)),
]
self.x_axis.setTicks(ticks)
else:
self.x_axis.setTicks()
def _default_x_axis(self):
x_axis = super()._default_x_axis()
x_axis.setLabel('Frequency (kHz)')
x_axis.logTickStrings = format_log_ticks
x_axis.setLogMode(True)
return x_axis
################################################################################
# ViewBox
################################################################################
class ViewBox(PSIContribution):
# Make this weak-referenceable so we can bind methods to Qt slots.
__slots__ = '__weakref__'
viewbox = Typed(pg.ViewBox)
viewbox_norm = Typed(pg.ViewBox)
y_axis = Typed(pg.AxisItem)
y_min = d_(Float(0))
y_max = d_(Float(0))
y_mode = d_(Enum('mouse', 'fixed'))
data_range = Property()
save_limits = d_(Bool(False))
@observe('y_min', 'y_max')
def _update_limits(self, event=None):
self.viewbox.setYRange(self.y_min, self.y_max, padding=0)
def _default_name(self):
return self.label
def _get_data_range(self):
return self.parent.data_range
def _default_y_axis(self):
y_axis = pg.AxisItem('left')
y_axis.setLabel(self.label)
y_axis.setGrid(64)
return y_axis
def _sync_limits(self, vb=None):
with self.suppress_notifications():
box = self.viewbox.viewRange()
self.y_min = float(box[1][0])
self.y_max = float(box[1][1])
def _default_viewbox(self):
return pg.ViewBox(enableMenu=False)
def _configure_viewbox(self):
viewbox = self.viewbox
viewbox.setMouseEnabled(
x=False,
y=self.y_mode == 'mouse'
)
viewbox.disableAutoRange()
viewbox.setBackgroundColor('w')
self.y_axis.linkToView(viewbox)
viewbox.setYRange(self.y_min, self.y_max, padding=0)
for child in self.children:
plots = child.get_plots()
if isinstance(plots, dict):
for label, plot in plots.items():
deferred_call(self.add_plot, plot, label)
else:
for plot in plots:
deferred_call(self.add_plot, plot)
viewbox.sigRangeChanged.connect(self._sync_limits)
return viewbox
def _default_viewbox_norm(self):
viewbox = pg.ViewBox(enableMenu=False)
viewbox.setMouseEnabled(x=False, y=False)
viewbox.disableAutoRange()
return viewbox
def update(self, event=None):
for child in self.children:
child.update()
def add_plot(self, plot, label=None):
self.viewbox.addItem(plot)
if label:
self.parent.legend.addItem(plot, label)
def plot(self, x, y, color='k', log_x=False, log_y=False, label=None,
kind='line'):
'''
Convenience function used by plugins
This is typically used in post-processing routines to add static plots
to existing view boxes.
'''
if log_x:
x = np.log10(x)
if log_y:
y = np.log10(y)
x = np.asarray(x)
y = np.asarray(y)
m = np.isfinite(x) & np.isfinite(y)
x = x[m]
y = y[m]
if kind == 'line':
item = pg.PlotCurveItem(pen=pg.mkPen(color))
elif kind == 'scatter':
item = pg.ScatterPlotItem(pen=pg.mkPen(color))
item.setData(x, y)
self.add_plot(item)
if label is not None:
self.parent.legend.addItem(item, label)
################################################################################
# Plots
################################################################################
class BasePlot(PSIContribution):
# Make this weak-referenceable so we can bind methods to Qt slots.
__slots__ = '__weakref__'
source_name = d_(Str())
source = Typed(object)
label = d_(Str())
def update(self, event=None):
pass
def _reset_plots(self):
pass
################################################################################
# Single plots
################################################################################
class SinglePlot(BasePlot):
pen_color = d_(Typed(object))
pen_width = d_(Float(0))
antialias = d_(Bool(False))
label = d_(Str())
pen = Typed(object)
plot = Typed(object)
def get_plots(self):
return [self.plot]
def _default_pen_color(self):
return 'black'
def _default_pen(self):
color = make_color(self.pen_color)
return pg.mkPen(color, width=self.pen_width)
def _default_name(self):
return self.source_name + '_plot'
class ChannelPlot(SinglePlot):
downsample = Int(0)
decimate_mode = d_(Enum('extremes', 'mean'))
_cached_time = Typed(np.ndarray)
_buffer = Typed(SignalBuffer)
def _default_name(self):
return self.source_name + '_channel_plot'
def _default_plot(self):
return pg.PlotCurveItem(pen=self.pen, antialias=self.antialias)
def _observe_source(self, event):
if self.source is not None:
self.parent.data_range.add_source(self.source)
self.parent.data_range.observe('span', self._update_time)
self.source.add_callback(self._append_data)
self.parent.viewbox.sigResized.connect(self._update_decimation)
self._update_time(None)
self._update_decimation(self.parent.viewbox)
def _update_time(self, event):
# Precompute the time array since this can be the "slow" point
# sometimes in computations
n = round(self.parent.data_range.span*self.source.fs)
self._cached_time = np.arange(n)/self.source.fs
self._update_decimation()
self._update_buffer()
def _update_buffer(self, event=None):
self._buffer = SignalBuffer(self.source.fs,
self.parent.data_range.span*2)
def _update_decimation(self, viewbox=None):
try:
width, _ = self.parent.viewbox.viewPixelSize()
dt = self.source.fs**-1
self.downsample = round(width/dt/2)
except Exception as e:
pass
def _append_data(self, data):
self._buffer.append_data(data)
self.update()
def update(self, event=None):
low, high = self.parent.data_range.current_range
data = self._buffer.get_range_filled(low, high, np.nan)
t = self._cached_time[:len(data)] + low
if self.downsample > 1:
t = t[::self.downsample]
if self.decimate_mode == 'extremes':
d_min, d_max = decimate_extremes(data, self.downsample)
t = t[:len(d_min)]
x = np.c_[t, t].ravel()
y = np.c_[d_min, d_max].ravel()
if x.shape == y.shape:
deferred_call(self.plot.setData, x, y, connect='pairs')
elif self.decimate_mode == 'mean':
d = decimate_mean(data, self.downsample)
t = t[:len(d)]
if t.shape == d.shape:
deferred_call(self.plot.setData, t, d)
else:
t = t[:len(data)]
deferred_call(self.plot.setData, t, data)
def _reshape_for_decimate(data, downsample):
# Determine the "fragment" size that we are unable to decimate. A
# downsampling factor of 5 means that we perform the operation in chunks of
# 5 samples. If we have only 13 samples of data, then we cannot decimate
# the last 3 samples and will simply discard them.
last_dim = data.ndim
offset = data.shape[-1] % downsample
if offset > 0:
data = data[..., :-offset]
shape = (len(data), -1, downsample) if data.ndim == 2 else (-1, downsample)
return data.reshape(shape)
def decimate_mean(data, downsample):
# If data is empty, return imediately
if data.size == 0:
return np.array([]), np.array([])
data = _reshape_for_decimate(data, downsample).copy()
return data.mean(axis=-1)
def decimate_extremes(data, downsample):
# If data is empty, return imediately
if data.size == 0:
return np.array([]), np.array([])
# Force a copy to be made, which speeds up min()/max(). Apparently min/max
# make a copy of a reshaped array before performing the operation, so we
# force it now so the copy only occurs once.
data = _reshape_for_decimate(data, downsample).copy()
return data.min(axis=-1), data.max(axis=-1)
class FFTChannelPlot(ChannelPlot):
time_span = d_(Float(1))
window = d_(Enum('hamming', 'flattop'))
_x = Typed(np.ndarray)
_buffer = Typed(SignalBuffer)
def _default_name(self):
return self.source_name + '_fft_plot'
def _observe_source(self, event):
if self.source is not None:
self.source.add_callback(self._append_data)
self.source.observe('fs', self._cache_x)
self._update_buffer()
self._cache_x()
def _update_buffer(self, event=None):
self._buffer = SignalBuffer(self.source.fs, self.time_span)
def _append_data(self, data):
self._buffer.append_data(data)
self.update()
def _cache_x(self, event=None):
if self.source.fs:
self._x = get_x_fft(self.source.fs, self.time_span)
def update(self, event=None):
if self._buffer.get_time_ub() >= self.time_span:
log.debug('Time span %f to %f', -self.time_span, 0)
data = self._buffer.get_latest(-self.time_span, 0)
psd = util.psd(data, self.source.fs, self.window)
spl = self.source.calibration.get_spl(self._x, psd)
deferred_call(self.plot.setData, self._x, spl)
class BaseTimeseriesPlot(SinglePlot):
rect_center = d_(Float(0.5))
rect_height = d_(Float(1))
fill_color = d_(Typed(object))
brush = Typed(object)
_rising = Typed(list, ())
_falling = Typed(list, ())
def _default_brush(self):
return pg.mkBrush(self.fill_color)
def _default_plot(self):
plot = pg.QtGui.QGraphicsPathItem()
plot.setPen(self.pen)
plot.setBrush(self.brush)
return plot
def update(self, event=None):
lb, ub = self.parent.data_range.current_range
current_time = self.parent.data_range.current_time
starts = self._rising
ends = self._falling
if len(starts) == 0 and len(ends) == 1:
starts = [0]
elif len(starts) == 1 and len(ends) == 0:
ends = [current_time]
elif len(starts) > 0 and len(ends) > 0:
if starts[0] > ends[0]:
starts = np.r_[0, starts]
if starts[-1] > ends[-1]:
ends = np.r_[ends, current_time]
try:
epochs = np.c_[starts, ends]
except ValueError as e:
log.exception(e)
log.warning('Unable to update %r, starts shape %r, ends shape %r',
self, starts, ends)
return
m = ((epochs >= lb) & (epochs < ub)) | np.isnan(epochs)
epochs = epochs[m.any(axis=-1)]
path = pg.QtGui.QPainterPath()
y_start = self.rect_center - self.rect_height*0.5
for x_start, x_end in epochs:
x_width = x_end-x_start
r = pg.QtCore.QRectF(x_start, y_start, x_width, self.rect_height)
path.addRect(r)
deferred_call(self.plot.setPath, path)
class EventPlot(BaseTimeseriesPlot):
event = d_(Str())
def _observe_event(self, event):
if self.event is not None:
self.parent.data_range.observe('current_time', self.update)
def _default_name(self):
return self.event + '_timeseries'
def _append_data(self, bound, timestamp):
if bound == 'start':
self._rising.append(timestamp)
elif bound == 'end':
self._falling.append(timestamp)
self.update()
class TimeseriesPlot(BaseTimeseriesPlot):
source_name = d_(Str())
source = Typed(object)
def _default_name(self):
return self.source_name + '_timeseries'
def _observe_source(self, event):
if self.source is not None:
self.parent.data_range.add_event_source(self.source)
self.parent.data_range.observe('current_time', self.update)
self.source.add_callback(self._append_data)
def _append_data(self, data):
for (etype, value) in data:
if etype == 'rising':
self._rising.append(value)
elif etype == 'falling':
self._falling.append(value)
################################################################################
# Group plots
################################################################################
class FixedTextItem(pg.TextItem):
def updateTransform(self, force=False):
p = self.parentItem()
if p is None:
pt = pg.QtGui.QTransform()
else:
pt = p.sceneTransform()
if not force and pt == self._lastTransform:
return
t = pt.inverted()[0]
# reset translation
t.setMatrix(1, t.m12(), t.m13(), t.m21(), 1, t.m23(), 0, 0, t.m33())
# apply rotation
angle = -self.angle
if self.rotateAxis is not None:
d = pt.map(self.rotateAxis) - pt.map(Point(0, 0))
a = np.arctan2(d.y(), d.x()) * 180 / np.pi
angle += a
t.rotate(angle)
self.setTransform(t)
self._lastTransform = pt
self.updateTextPos()
class GroupMixin(ColorCycleMixin):
source = Typed(object)
pen_width = d_(Int(0))
antialias = d_(Bool(False))
plots = Dict()
labels = Dict()
_data_cache = Typed(object)
_data_count = Typed(object)
_data_updated = Typed(object)
_data_n_samples = Typed(object)
_pen_color_cycle = Typed(object)
_plot_colors = Typed(object)
_x = Typed(np.ndarray)
n_update = d_(Int(1))
#: List of attributes that define the tab groups
tab_grouping = d_(List())
#: List of attributes that define the plot groups
plot_grouping = d_(List())
#: List of existing tab keys
tab_keys = d_(List())
#: List of existing plot keys
plot_keys = d_(List())
#: Which tab is currently selected?
selected_tab = d_(Value())
#: Should we auto-select the tab based on the most recently acquired data?
auto_select = d_(Bool(False))
#: What was the most recent tab key seen?
last_seen_key = Value()
#: Function that takes the epoch metadata and returns a key that is used to
#: assign the epoch to a group. Return None to exclude the epoch from the
#: group criteria.
@d_func
def group_key(self, md):
plot_key = tuple(md[a] for a in self.plot_grouping)
tab_key = tuple(md[a] for a in self.tab_grouping)
return tab_key, plot_key
@d_func
def fmt_plot_label(self, key):
return None
def _observe_allow_auto_select(self, event):
if not self.allow_auto_select:
self.auto_select = False
def _default_selected_tab(self):
return ()
def _observe_selected_tab(self, event):
self.update(tab_changed=True)
@observe('last_seen_key', 'auto_select')
def _update_selected_tab(self, event):
if not self.auto_select:
return
if self.last_seen_key is None:
return
if self.last_seen_key[0] != self.selected_tab:
self.selected_tab = self.last_seen_key[0]
def _reset_plots(self):
# Clear any existing plots and reset color cycle
for plot in self.plots.items():
self.parent.viewbox.removeItem(plot)
for label in self.labels.items():
self.parent.viewbox_norm.removeItem(label)
self.plots = {}
self._data_cache = defaultdict(list)
self._data_count = defaultdict(int)
self._data_updated = defaultdict(int)
self._data_n_samples = defaultdict(int)
def get_plots(self):
return []
def _make_new_plot(self, key):
try:
pen_color = self.get_pen_color(key)
pen = pg.mkPen(pen_color, width=self.pen_width)
plot = pg.PlotCurveItem(pen=pen, antialias=self.antialias)
self.plots[key] = plot
deferred_call(self.parent.viewbox.addItem, plot)
label = self.fmt_plot_label(key)
if label is not None:
text = pg.TextItem(label, color=pen_color,
border=pg.mkPen(pen_color),
fill=pg.mkBrush('w'))
deferred_call(self.parent.viewbox_norm.addItem, text)
self.labels[key] = text
except KeyError as key_error:
key = key_error.args[0]
m = f'Cannot update plot since a field, {key}, ' \
'required by the plot is missing.'
raise ConfigurationException(m) from key_error
def get_plot(self, key):
if key not in self.plots:
self._make_new_plot(key)
return self.plots[key]
class EpochGroupMixin(GroupMixin):
duration = Float()
def _y(self, epoch):
return np.mean(epoch, axis=0) if len(epoch) \
else np.full_like(self._x, np.nan)
def _update_duration(self, event=None):
self.duration = self.source.duration
def _epochs_acquired(self, epochs):
for d in epochs:
key = self.group_key(d['info']['metadata'])
if key is not None:
signal = d['signal']
self._data_cache[key].append(signal)
self._data_count[key] += 1
# Track number of samples
n = max(self._data_n_samples[key], len(signal))
self._data_n_samples[key] = n
self.last_seen_key = key
# Does at least one epoch need to be updated?
self._check_selected_tab_count()
def _get_selected_tab_keys(self):
return [k for k in self._data_count if k[0] == self.selected_tab]
def _check_selected_tab_count(self):
for key in self._get_selected_tab_keys():
current_n = self._data_count[key]
last_n = self._data_updated[key]
if current_n >= (last_n + self.n_update):
n = max(self._data_n_samples.values())
self.duration = n / self.source.fs
self.update()
break
def _observe_source(self, event):
if self.source is not None:
self.source.add_callback(self._epochs_acquired)
self.source.observe('duration', self._update_duration)
self.source.observe('fs', self._cache_x)
self.observe('duration', self._cache_x)
self._reset_plots()
self._cache_x()
def _observe_selected_tab(self, event):
self.update(tab_changed=True)
def update(self, event=None, tab_changed=False):
todo = []
if self._x is None:
return
for pk in self.plot_keys:
plot = self.get_plot(pk)
key = (self.selected_tab, pk)
try:
last_n = self._data_updated[key]
current_n = self._data_count[key]
needs_update = current_n >= (last_n + self.n_update)
if tab_changed or needs_update:
data = self._data_cache[key]
self._data_updated[key] = len(data)
if data:
x = self._x
y = self._y(data)
else:
x = y = np.array([])
todo.append((plot.setData, x, y))
except KeyError:
if tab_changed:
x = y = np.array([])
todo.append((plot.setData, x, y))
def update():
for setter, x, y in todo:
setter(x, y)
deferred_call(update)
class GroupedEpochAveragePlot(EpochGroupMixin, BasePlot):
def _cache_x(self, event=None):
# Set up the new time axis
if self.source.fs and self.duration:
n_time = round(self.source.fs * self.duration)
self._x = np.arange(n_time)/self.source.fs
def _default_name(self):
return self.source_name + '_grouped_epoch_average_plot'
def _observe_source(self, event):
super()._observe_source(event)
if self.source is not None:
self.parent.data_range.add_source(self.source)
class GroupedEpochFFTPlot(EpochGroupMixin, BasePlot):
def _default_name(self):
return self.source_name + '_grouped_epoch_fft_plot'
def _cache_x(self, event=None):
# Cache the frequency points. Must be in units of log for PyQtGraph.
# TODO: This could be a utility function stored in the parent?
if self.source.fs and self.duration:
self._x = get_x_fft(self.source.fs, self.duration)
def _y(self, epoch):
y = np.mean(epoch, axis=0) if epoch else np.full_like(self._x, np.nan)
return self.source.calibration.get_spl(self._x, util.psd(y, self.source.fs))
class GroupedEpochPhasePlot(EpochGroupMixin, BasePlot):
unwrap = d_(Bool(True))
def _default_name(self):
return self.source_name + '_grouped_epoch_phase_plot'
def _cache_x(self, event=None):
# Cache the frequency points. Must be in units of log for PyQtGraph.
# TODO: This could be a utility function stored in the parent?
if self.source.fs and self.duration:
self._x = get_x_fft(self.source.fs, self.duration)
def _y(self, epoch):
y = np.mean(epoch, axis=0) if epoch else np.full_like(self._x, np.nan)
return util.phase(y, self.source.fs, unwrap=self.unwrap)
class StackedEpochAveragePlot(EpochGroupMixin, BasePlot):
_offset_update_needed = Bool(False)
def _make_new_plot(self, key):
super()._make_new_plot(key)
self._offset_update_needed = True
def _update_offsets(self, vb=None):
vb = self.parent.viewbox
height = vb.height()
n = len(self.plots)
plot_items = sorted(self.plots.items(), reverse=True)
for i, (key, plot) in enumerate(plot_items):
offset = (i+1) * height / (n+1)
point = self.parent.viewbox.mapToView(pg.Point(0, offset))
plot.setPos(0, point.y())
labels = sorted(self.labels.items(), reverse=True)
for i, (key, label) in enumerate(labels):
offset = (i+1) * height / (n+1)
point = self.parent.viewbox_norm.mapToView(pg.Point(0, offset))
label.setPos(0.8, point.y())
def _cache_x(self, event=None):
# Set up the new time axis
if self.source.fs and self.source.duration:
n_time = round(self.source.fs * self.source.duration)
self._x = np.arange(n_time)/self.source.fs
def update(self, *args, **kwargs):
super().update(*args, **kwargs)
if self._offset_update_needed:
deferred_call(self._update_offsets)
self._offset_update_needed = False
def _reset_plots(self):
super()._reset_plots()
self.parent.viewbox \
.sigRangeChanged.connect(self._update_offsets)
self.parent.viewbox \
.sigRangeChangedManually.connect(self._update_offsets)
################################################################################
# Simple plotters
################################################################################
class ResultPlot(GroupMixin, SinglePlot):
x_column = d_(Str())
y_column = d_(Str())
average = d_(Bool())
SYMBOL_MAP = {
'circle': 'o',
'square': 's',
'triangle': 't',
'diamond': 'd',
}
symbol = d_(Enum('circle', 'square', 'triangle', 'diamond'))
symbol_size = d_(Float(10))
symbol_size_unit = d_(Enum('screen', 'data'))
def get_plots(self):
return {self.label: self.plot}
def _default_name(self):
return '.'.join((self.parent.name, self.source_name, 'result_plot',
self.x_column, self.y_column))
def _observe_source(self, event):
if self.source is not None:
self._data_cache = {}
self.source.add_callback(self._data_acquired)
def _data_acquired(self, data):
for d in data:
key = self.group_key(d)
if key is not None:
cache = self._data_cache.setdefault(key, {'x': [], 'y': []})
cache['x'].append(d[self.x_column])
cache['y'].append(d[self.y_column])
self.last_seen_key = key
self.update()
def update(self, event=None, tab_changed=False):
default = {'x': [], 'y': []}
key = (self.selected_tab, ())
data = self._data_cache.get(key, default)
x = np.array(data['x'])
y = np.array(data['y'])
if self.average:
d = | pd.DataFrame({'x': x, 'y': y}) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Sat May 9 17:44:14 2020
@author: GE702PL
"""
import xlwings as xw
from functools import wraps
import datetime
import pandas as pd
from calendar import monthrange
# LOGS
def log(*m):
print(" ".join(map(str, m)))
def black(s):
return '\033[1;30m%s\033[m' % s
def green(s):
return '\033[1;32m%s\033[m' % s
def red(s):
return '\033[1;31m%s\033[m' % s
def yellow(s):
return '\033[1;33m%s\033[m' % s
def to_dates(df, cols):
""" Changes column format to datetime.
Parameters:
----------
df : dataframe
Dataframe with columns which are falsely not recognised as datetime.
cols : list
list of columns, formats of which need to be corrected.
Returns
----------
df : dataframe with corrected column formats
"""
for col in cols:
df[col] = | pd.to_datetime(df[col]) | pandas.to_datetime |
# -*- coding: utf-8 -*-
# pylint: disable=W0612,E1101
from datetime import datetime
import operator
import nose
from functools import wraps
import numpy as np
import pandas as pd
from pandas import Series, DataFrame, Index, isnull, notnull, pivot, MultiIndex
from pandas.core.datetools import bday
from pandas.core.nanops import nanall, nanany
from pandas.core.panel import Panel
from pandas.core.series import remove_na
import pandas.core.common as com
from pandas import compat
from pandas.compat import range, lrange, StringIO, OrderedDict, signature
from pandas import SparsePanel
from pandas.util.testing import (assert_panel_equal, assert_frame_equal,
assert_series_equal, assert_almost_equal,
assert_produces_warning, ensure_clean,
assertRaisesRegexp, makeCustomDataframe as
mkdf, makeMixedDataFrame)
import pandas.core.panel as panelm
import pandas.util.testing as tm
def ignore_sparse_panel_future_warning(func):
"""
decorator to ignore FutureWarning if we have a SparsePanel
can be removed when SparsePanel is fully removed
"""
@wraps(func)
def wrapper(self, *args, **kwargs):
if isinstance(self.panel, SparsePanel):
with assert_produces_warning(FutureWarning,
check_stacklevel=False):
return func(self, *args, **kwargs)
else:
return func(self, *args, **kwargs)
return wrapper
class PanelTests(object):
panel = None
def test_pickle(self):
unpickled = self.round_trip_pickle(self.panel)
assert_frame_equal(unpickled['ItemA'], self.panel['ItemA'])
def test_rank(self):
self.assertRaises(NotImplementedError, lambda: self.panel.rank())
def test_cumsum(self):
cumsum = self.panel.cumsum()
assert_frame_equal(cumsum['ItemA'], self.panel['ItemA'].cumsum())
def not_hashable(self):
c_empty = Panel()
c = Panel(Panel([[[1]]]))
self.assertRaises(TypeError, hash, c_empty)
self.assertRaises(TypeError, hash, c)
class SafeForLongAndSparse(object):
_multiprocess_can_split_ = True
def test_repr(self):
repr(self.panel)
@ignore_sparse_panel_future_warning
def test_copy_names(self):
for attr in ('major_axis', 'minor_axis'):
getattr(self.panel, attr).name = None
cp = self.panel.copy()
getattr(cp, attr).name = 'foo'
self.assertIsNone(getattr(self.panel, attr).name)
def test_iter(self):
tm.equalContents(list(self.panel), self.panel.items)
def test_count(self):
f = lambda s: notnull(s).sum()
self._check_stat_op('count', f, obj=self.panel, has_skipna=False)
def test_sum(self):
self._check_stat_op('sum', np.sum)
def test_mean(self):
self._check_stat_op('mean', np.mean)
def test_prod(self):
self._check_stat_op('prod', np.prod)
def test_median(self):
def wrapper(x):
if isnull(x).any():
return np.nan
return np.median(x)
self._check_stat_op('median', wrapper)
def test_min(self):
self._check_stat_op('min', np.min)
def test_max(self):
self._check_stat_op('max', np.max)
def test_skew(self):
try:
from scipy.stats import skew
except ImportError:
raise nose.SkipTest("no scipy.stats.skew")
def this_skew(x):
if len(x) < 3:
return np.nan
return skew(x, bias=False)
self._check_stat_op('skew', this_skew)
# def test_mad(self):
# f = lambda x: np.abs(x - x.mean()).mean()
# self._check_stat_op('mad', f)
def test_var(self):
def alt(x):
if len(x) < 2:
return np.nan
return np.var(x, ddof=1)
self._check_stat_op('var', alt)
def test_std(self):
def alt(x):
if len(x) < 2:
return np.nan
return np.std(x, ddof=1)
self._check_stat_op('std', alt)
def test_sem(self):
def alt(x):
if len(x) < 2:
return np.nan
return np.std(x, ddof=1) / np.sqrt(len(x))
self._check_stat_op('sem', alt)
# def test_skew(self):
# from scipy.stats import skew
# def alt(x):
# if len(x) < 3:
# return np.nan
# return skew(x, bias=False)
# self._check_stat_op('skew', alt)
def _check_stat_op(self, name, alternative, obj=None, has_skipna=True):
if obj is None:
obj = self.panel
# # set some NAs
# obj.ix[5:10] = np.nan
# obj.ix[15:20, -2:] = np.nan
f = getattr(obj, name)
if has_skipna:
def skipna_wrapper(x):
nona = remove_na(x)
if len(nona) == 0:
return np.nan
return alternative(nona)
def wrapper(x):
return alternative(np.asarray(x))
for i in range(obj.ndim):
result = f(axis=i, skipna=False)
assert_frame_equal(result, obj.apply(wrapper, axis=i))
else:
skipna_wrapper = alternative
wrapper = alternative
for i in range(obj.ndim):
result = f(axis=i)
if not tm._incompat_bottleneck_version(name):
assert_frame_equal(result, obj.apply(skipna_wrapper, axis=i))
self.assertRaises(Exception, f, axis=obj.ndim)
# Unimplemented numeric_only parameter.
if 'numeric_only' in signature(f).args:
self.assertRaisesRegexp(NotImplementedError, name, f,
numeric_only=True)
class SafeForSparse(object):
_multiprocess_can_split_ = True
@classmethod
def assert_panel_equal(cls, x, y):
assert_panel_equal(x, y)
def test_get_axis(self):
assert (self.panel._get_axis(0) is self.panel.items)
assert (self.panel._get_axis(1) is self.panel.major_axis)
assert (self.panel._get_axis(2) is self.panel.minor_axis)
def test_set_axis(self):
new_items = Index(np.arange(len(self.panel.items)))
new_major = Index(np.arange(len(self.panel.major_axis)))
new_minor = Index(np.arange(len(self.panel.minor_axis)))
# ensure propagate to potentially prior-cached items too
item = self.panel['ItemA']
self.panel.items = new_items
if hasattr(self.panel, '_item_cache'):
self.assertNotIn('ItemA', self.panel._item_cache)
self.assertIs(self.panel.items, new_items)
# TODO: unused?
item = self.panel[0] # noqa
self.panel.major_axis = new_major
self.assertIs(self.panel[0].index, new_major)
self.assertIs(self.panel.major_axis, new_major)
# TODO: unused?
item = self.panel[0] # noqa
self.panel.minor_axis = new_minor
self.assertIs(self.panel[0].columns, new_minor)
self.assertIs(self.panel.minor_axis, new_minor)
def test_get_axis_number(self):
self.assertEqual(self.panel._get_axis_number('items'), 0)
self.assertEqual(self.panel._get_axis_number('major'), 1)
self.assertEqual(self.panel._get_axis_number('minor'), 2)
def test_get_axis_name(self):
self.assertEqual(self.panel._get_axis_name(0), 'items')
self.assertEqual(self.panel._get_axis_name(1), 'major_axis')
self.assertEqual(self.panel._get_axis_name(2), 'minor_axis')
def test_get_plane_axes(self):
# what to do here?
index, columns = self.panel._get_plane_axes('items')
index, columns = self.panel._get_plane_axes('major_axis')
index, columns = self.panel._get_plane_axes('minor_axis')
index, columns = self.panel._get_plane_axes(0)
@ignore_sparse_panel_future_warning
def test_truncate(self):
dates = self.panel.major_axis
start, end = dates[1], dates[5]
trunced = self.panel.truncate(start, end, axis='major')
expected = self.panel['ItemA'].truncate(start, end)
assert_frame_equal(trunced['ItemA'], expected)
trunced = self.panel.truncate(before=start, axis='major')
expected = self.panel['ItemA'].truncate(before=start)
assert_frame_equal(trunced['ItemA'], expected)
trunced = self.panel.truncate(after=end, axis='major')
expected = self.panel['ItemA'].truncate(after=end)
assert_frame_equal(trunced['ItemA'], expected)
# XXX test other axes
def test_arith(self):
self._test_op(self.panel, operator.add)
self._test_op(self.panel, operator.sub)
self._test_op(self.panel, operator.mul)
self._test_op(self.panel, operator.truediv)
self._test_op(self.panel, operator.floordiv)
self._test_op(self.panel, operator.pow)
self._test_op(self.panel, lambda x, y: y + x)
self._test_op(self.panel, lambda x, y: y - x)
self._test_op(self.panel, lambda x, y: y * x)
self._test_op(self.panel, lambda x, y: y / x)
self._test_op(self.panel, lambda x, y: y ** x)
self._test_op(self.panel, lambda x, y: x + y) # panel + 1
self._test_op(self.panel, lambda x, y: x - y) # panel - 1
self._test_op(self.panel, lambda x, y: x * y) # panel * 1
self._test_op(self.panel, lambda x, y: x / y) # panel / 1
self._test_op(self.panel, lambda x, y: x ** y) # panel ** 1
self.assertRaises(Exception, self.panel.__add__, self.panel['ItemA'])
@staticmethod
def _test_op(panel, op):
result = op(panel, 1)
assert_frame_equal(result['ItemA'], op(panel['ItemA'], 1))
def test_keys(self):
tm.equalContents(list(self.panel.keys()), self.panel.items)
def test_iteritems(self):
# Test panel.iteritems(), aka panel.iteritems()
# just test that it works
for k, v in self.panel.iteritems():
pass
self.assertEqual(len(list(self.panel.iteritems())),
len(self.panel.items))
@ignore_sparse_panel_future_warning
def test_combineFrame(self):
def check_op(op, name):
# items
df = self.panel['ItemA']
func = getattr(self.panel, name)
result = func(df, axis='items')
assert_frame_equal(result['ItemB'], op(self.panel['ItemB'], df))
# major
xs = self.panel.major_xs(self.panel.major_axis[0])
result = func(xs, axis='major')
idx = self.panel.major_axis[1]
assert_frame_equal(result.major_xs(idx),
op(self.panel.major_xs(idx), xs))
# minor
xs = self.panel.minor_xs(self.panel.minor_axis[0])
result = func(xs, axis='minor')
idx = self.panel.minor_axis[1]
assert_frame_equal(result.minor_xs(idx),
op(self.panel.minor_xs(idx), xs))
ops = ['add', 'sub', 'mul', 'truediv', 'floordiv']
if not compat.PY3:
ops.append('div')
# pow, mod not supported for SparsePanel as flex ops (for now)
if not isinstance(self.panel, SparsePanel):
ops.extend(['pow', 'mod'])
else:
idx = self.panel.minor_axis[1]
with assertRaisesRegexp(ValueError, "Simple arithmetic.*scalar"):
self.panel.pow(self.panel.minor_xs(idx), axis='minor')
with assertRaisesRegexp(ValueError, "Simple arithmetic.*scalar"):
self.panel.mod(self.panel.minor_xs(idx), axis='minor')
for op in ops:
try:
check_op(getattr(operator, op), op)
except:
com.pprint_thing("Failing operation: %r" % op)
raise
if compat.PY3:
try:
check_op(operator.truediv, 'div')
except:
com.pprint_thing("Failing operation: %r" % 'div')
raise
@ignore_sparse_panel_future_warning
def test_combinePanel(self):
result = self.panel.add(self.panel)
self.assert_panel_equal(result, self.panel * 2)
@ignore_sparse_panel_future_warning
def test_neg(self):
self.assert_panel_equal(-self.panel, self.panel * -1)
# issue 7692
def test_raise_when_not_implemented(self):
p = Panel(np.arange(3 * 4 * 5).reshape(3, 4, 5),
items=['ItemA', 'ItemB', 'ItemC'],
major_axis=pd.date_range('20130101', periods=4),
minor_axis=list('ABCDE'))
d = p.sum(axis=1).ix[0]
ops = ['add', 'sub', 'mul', 'truediv', 'floordiv', 'div', 'mod', 'pow']
for op in ops:
with self.assertRaises(NotImplementedError):
getattr(p, op)(d, axis=0)
@ignore_sparse_panel_future_warning
def test_select(self):
p = self.panel
# select items
result = p.select(lambda x: x in ('ItemA', 'ItemC'), axis='items')
expected = p.reindex(items=['ItemA', 'ItemC'])
self.assert_panel_equal(result, expected)
# select major_axis
result = p.select(lambda x: x >= datetime(2000, 1, 15), axis='major')
new_major = p.major_axis[p.major_axis >= datetime(2000, 1, 15)]
expected = p.reindex(major=new_major)
self.assert_panel_equal(result, expected)
# select minor_axis
result = p.select(lambda x: x in ('D', 'A'), axis=2)
expected = p.reindex(minor=['A', 'D'])
self.assert_panel_equal(result, expected)
# corner case, empty thing
result = p.select(lambda x: x in ('foo', ), axis='items')
self.assert_panel_equal(result, p.reindex(items=[]))
def test_get_value(self):
for item in self.panel.items:
for mjr in self.panel.major_axis[::2]:
for mnr in self.panel.minor_axis:
result = self.panel.get_value(item, mjr, mnr)
expected = self.panel[item][mnr][mjr]
assert_almost_equal(result, expected)
@ignore_sparse_panel_future_warning
def test_abs(self):
result = self.panel.abs()
result2 = abs(self.panel)
expected = np.abs(self.panel)
self.assert_panel_equal(result, expected)
self.assert_panel_equal(result2, expected)
df = self.panel['ItemA']
result = df.abs()
result2 = abs(df)
expected = np.abs(df)
assert_frame_equal(result, expected)
assert_frame_equal(result2, expected)
s = df['A']
result = s.abs()
result2 = abs(s)
expected = np.abs(s)
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
self.assertEqual(result.name, 'A')
self.assertEqual(result2.name, 'A')
class CheckIndexing(object):
_multiprocess_can_split_ = True
def test_getitem(self):
self.assertRaises(Exception, self.panel.__getitem__, 'ItemQ')
def test_delitem_and_pop(self):
expected = self.panel['ItemA']
result = self.panel.pop('ItemA')
assert_frame_equal(expected, result)
self.assertNotIn('ItemA', self.panel.items)
del self.panel['ItemB']
self.assertNotIn('ItemB', self.panel.items)
self.assertRaises(Exception, self.panel.__delitem__, 'ItemB')
values = np.empty((3, 3, 3))
values[0] = 0
values[1] = 1
values[2] = 2
panel = Panel(values, lrange(3), lrange(3), lrange(3))
# did we delete the right row?
panelc = panel.copy()
del panelc[0]
assert_frame_equal(panelc[1], panel[1])
assert_frame_equal(panelc[2], panel[2])
panelc = panel.copy()
del panelc[1]
assert_frame_equal(panelc[0], panel[0])
| assert_frame_equal(panelc[2], panel[2]) | pandas.util.testing.assert_frame_equal |
import numpy as np
import pandas as pd
from pathlib import Path
def all_cells_same_opacity(modelpath, ngrid):
cell_opacities = np.array([0.1] * ngrid)
with open(Path(modelpath) / 'opacity.txt', 'w') as fopacity:
fopacity.write(f'{ngrid}\n')
for cellid, opacity in enumerate(cell_opacities):
fopacity.write(f'{cellid+1} {opacity}\n')
def opacity_by_Ye(outputfilepath, griddata):
"""Opacities from Table 1 Tanaka 2020"""
griddata = | pd.DataFrame(griddata) | pandas.DataFrame |
# -*- coding: utf-8 -*-
# @Time : 2018/10/3 下午2:36
# @Author : yidxue
import pandas as pd
from common.util_function import *
df1 = pd.DataFrame(data={'name': ['a', 'b', 'c', 'd'], 'gender': ['male', 'male', 'female', 'female']})
df2 = pd.DataFrame(data={'name': ['a', 'b', 'c', 'e'], 'age': [21, 22, 23, 20]})
print_line("inner join")
print_br( | pd.merge(df1, df2, on=['name'], how='inner') | pandas.merge |
import copy
import itertools
import os
import time
import numpy as np
import pandas as pd
import scipy.io as scio
import yaml
import factorAnalysisIOTools as IOTools
from factorAnalysisCalTools import prepare_RET_dict, time_horizon_dict
def save_df(this: pd.DataFrame,config: dict,file_name):
file_path = os.path.join(config['utility_data_dir'],config[file_name]+".pkl")
if os.path.exists(file_path):
that = pd.read_pickle(file_path)
this = that.append(this)
this.drop_duplicates(subset=['date','code'],inplace=True,keep='last')
this.to_pickle(file_path)
def loadmat2df_interday(path):
temp = scio.loadmat(path)
this = pd.DataFrame(temp['SCORE'],
index=temp['TRADE_DT'].squeeze().tolist(),
columns=[x[0] for x in temp['STOCK_CODE'].squeeze()])
return this
class LoadStockInfo():
def __init__(self,config):
self.config = config
temp = scio.loadmat(os.path.join(config['utility_data_dir'],config['SIZE_file_name']+".mat"))
self.all_date_mat = temp['TRADE_DT'].squeeze()
self.all_code_mat = [x[0] for x in temp['STOCK_CODE'].squeeze()]
self.start_date = self.all_date_mat[self.all_date_mat>=config['start_date']][0]
self.end_date = self.all_date_mat[self.all_date_mat<=config['end_date']][-1]
self.start_indexer = np.where(self.all_date_mat==self.start_date)[0][0]
self.end_indexer = np.where(self.all_date_mat==self.end_date)[0][0]
self.date_list = self.all_date_mat[self.start_indexer:self.end_indexer+1]
self.date_code_product = np.array(list(itertools.product(self.date_list,self.all_code_mat)))
# self.date_code_product = np.array(list(itertools.product(self.date_list,self.all_code_mat)),dtype=[('date','int'),('code','object')])
def load_arr(self,file_name,start_back_days=0,end_forward_days=0):
path = os.path.join(
self.config['utility_data_dir'],
self.config[file_name]+".mat"
)
# log--20210804:对于ZX_1的数据,他们的STOCK_CODE的顺序和数量和其他的可能不一样
# 查找self.all_code_mat的code在stkcd里面的位置,对应的score里面取出这些地方的元素,相当于reindex一下
# update: 直接读取df,然后reindex,然后取value
score = loadmat2df_interday(path).reindex(index=self.all_date_mat,columns=self.all_code_mat).values
print(file_name,score.shape)
start = max(self.start_indexer-start_back_days,0)
end = min(self.end_indexer+end_forward_days+1,len(score))
data = score[start:end]
pad_width_axis0 = (
max(start_back_days-self.start_indexer,0),
max(self.end_indexer+end_forward_days+1-len(score),0)
)
if pad_width_axis0 != (0,0):
data = np.pad(data, pad_width=(pad_width_axis0,(0,0)),mode='edge')
return data
def update_limit_300_688(self,start_back_days=0,end_forward_days=0):
code_300 = np.array([x.startswith("300") for x in self.all_code_mat])
code_688 = np.array([x.startswith("688") for x in self.all_code_mat])
chg_date_300 = np.array([x>=20200824 for x in self.all_date_mat])
code_300_score = np.tile(code_300,(len(chg_date_300),1))
code_688_score = np.tile(code_688,(len(chg_date_300),1))
chg_date_300_score = np.tile(chg_date_300.reshape(-1,1),(1,len(code_300)))
assert code_300_score.shape == (len(self.all_date_mat),len(self.all_code_mat)), "code_300 shape wrong"
start = max(self.start_indexer-start_back_days,0)
end = min(self.end_indexer+end_forward_days+1,len(code_300_score))
code_300_data = code_300_score[start:end]
code_688_data = code_688_score[start:end]
chg_date_data = chg_date_300_score[start:end]
return code_300_data,code_688_data,chg_date_data
class UpdateStockPool():
def __init__(self,config_path):
with open(config_path) as f:
self.config = yaml.load(f.read(),Loader=yaml.FullLoader)
self.config['STOCK_INFO_file_path'] = os.path.join(self.config['utility_data_dir'],self.config['STOCK_INFO_file_name']+".pkl")
def update_stock_info(self):
config = copy.deepcopy(self.config)
stock_info_loader = LoadStockInfo(config)
t = time.time()
# ADJFACTOR_2d_forward1 = stock_info_loader.load_arr('ADJFACTOR_file_name',end_forward_days=1)[1:]
# OPEN_2d_forward1 = stock_info_loader.load_arr('ADJFACTOR_file_name',end_forward_days=1)[1:]
# ADJOPEN_2d_forward1 = ADJFACTOR_2d_forward1*OPEN_2d_forward1
FF_CAPITAL_2d = stock_info_loader.load_arr('FF_CAPITAL_file_name')
PRECLOSE_2d_forward1 = stock_info_loader.load_arr('PRECLOSE_file_name',end_forward_days=1)[1:]
# RETURN_TWAP_Q1_2d = stock_info_loader.load_arr('RETURN_TWAP_Q1_file_name',end_forward_days=1)
FCT_HF_TWAP_H1_forward1 = stock_info_loader.load_arr('FCT_HF_TWAP_H1_file_name',end_forward_days=1)[1:]
RETURN_2d = stock_info_loader.load_arr('RETURN_file_name',end_forward_days=1)
# 20210917: 更新计算涨跌停的方法,算涨跌停的时候,20200824开始创业板涨幅停调整到了20%
# 创业板通过stock_code前3位等于300识别; 还有科创板,前3位是688; 科创板自上市以来就是20%涨跌幅,创业板是20200824开始改20%的
code_300_data,code_688_data,chg_date_data = stock_info_loader.update_limit_300_688()
lmt1_t0 = (abs(RETURN_2d)>0.195)[:-1]
lmt2_t0 = (abs(RETURN_2d)>0.095)[:-1] & (code_688_data==False) & (chg_date_data==False)
lmt3_t0 = (abs(RETURN_2d)>0.095)[:-1] & (code_688_data==False) & (code_300_data==False)
LIMIT_UD_t0_1d = (lmt1_t0 | lmt2_t0 | lmt3_t0).reshape(-1,)
# 20211007: ADJOPEN*limit_ratio*5/6=LIMIT_PRICE_THRESHOLD_H1
lmt1_t1 = (abs(FCT_HF_TWAP_H1_forward1/PRECLOSE_2d_forward1-1)>0.195*5/6)
lmt2_t1 = (abs(FCT_HF_TWAP_H1_forward1/PRECLOSE_2d_forward1-1)>0.095*5/6) & (code_688_data==False) & (chg_date_data==False)
lmt3_t1 = (abs(FCT_HF_TWAP_H1_forward1/PRECLOSE_2d_forward1-1)>0.095*5/6) & (code_688_data==False) & (code_300_data==False)
LIMIT_UD_t1_1d = (lmt1_t1 | lmt2_t1 | lmt3_t1).reshape(-1,)
LIMIT_UD_filter_t0_t1_1d = ((LIMIT_UD_t0_1d + LIMIT_UD_t1_1d) == 0)
# LIMIT_UD_t0_1d = ((abs(RETURN_TWAP_Q1_2d)>0.095)[:-1]).reshape(-1,)
# LIMIT_UD_t1_1d = ((abs(RETURN_TWAP_Q1_2d)>0.095)[1:]).reshape(-1,)
# LIMIT_UD_filter_t0_t1_1d = (LIMIT_UD_t0_1d + LIMIT_UD_t1_1d) == 0
ADJFACTOR_2d = stock_info_loader.load_arr('ADJFACTOR_file_name')
VWAP_2d = stock_info_loader.load_arr('VWAP_file_name')
CLOSE_2d = stock_info_loader.load_arr('CLOSE_file_name')
AMOUNT_1d = stock_info_loader.load_arr('AMOUNT_file_name').reshape(-1,)
ADJVWAP_1d = (ADJFACTOR_2d*VWAP_2d).reshape(-1,)
ADJCLOSE_1d = (ADJFACTOR_2d*CLOSE_2d).reshape(-1,)
CLOSE_1d = CLOSE_2d.reshape(-1,)
FF_SIZE_1d = (FF_CAPITAL_2d*VWAP_2d).reshape(-1,)
ADJFACTOR_2d_back19 = stock_info_loader.load_arr('ADJFACTOR_file_name',start_back_days=19)
VOLUME_2d_back19 = stock_info_loader.load_arr('VOLUME_file_name',start_back_days=19)
ADJVOLUME_2d_back19 = VOLUME_2d_back19/ADJFACTOR_2d_back19
ADJVOLUME_ma20_2d = pd.DataFrame(ADJVOLUME_2d_back19).rolling(20).mean().values[19:]
ADJVOLUME_ma20_1d = ADJVOLUME_ma20_2d.reshape(-1,)
ADJVOLUME_ma20_q20_1d = np.nanquantile(ADJVOLUME_ma20_2d, q=0.2,axis=1,keepdims=False)
ADJVOLUME_ma20_q20_1d = np.repeat(ADJVOLUME_ma20_q20_1d, repeats=ADJVOLUME_ma20_2d.shape[1])
FF_CAPITAL_ma20_2d = stock_info_loader.load_arr('FF_CAPITAL_file_name',start_back_days=19)
FF_CAPITAL_ma20_2d = pd.DataFrame(FF_CAPITAL_ma20_2d).rolling(20).mean().values[19:]
FF_CAPITAL_ma20_1d = FF_CAPITAL_ma20_2d.reshape(-1,)
FF_CAPITAL_ma20_q20_1d = np.nanquantile(FF_CAPITAL_ma20_2d, q=0.2,axis=1,keepdims=False)
FF_CAPITAL_ma20_q20_1d = np.repeat(FF_CAPITAL_ma20_q20_1d, repeats=FF_CAPITAL_ma20_2d.shape[1])
TOTAL_TRADEDAYS_1d = stock_info_loader.load_arr('TOTAL_TRADEDAYS_file_name').reshape(-1,)
HS300_member_1d = stock_info_loader.load_arr('HS300_member_file_name').reshape(-1,)
ZZ500_member_1d = stock_info_loader.load_arr('ZZ500_member_file_name').reshape(-1,)
ISST_1d = stock_info_loader.load_arr('ISST_file_name').reshape(-1,)
ISTRADEDAY_1d = stock_info_loader.load_arr('ISTRADEDAY_file_name').reshape(-1,)
ZX_1_1d = stock_info_loader.load_arr('ZX_1_file_name').reshape(-1,)
SIZE_1d = stock_info_loader.load_arr('SIZE_file_name').reshape(-1,)
LIQUIDTY_1d = stock_info_loader.load_arr('LIQUIDTY_file_name').reshape(-1,)
MOMENTUM_1d = stock_info_loader.load_arr('MOMENTUM_file_name').reshape(-1,)
RESVOL_1d = stock_info_loader.load_arr('RESVOL_file_name').reshape(-1,)
SIZENL_1d = stock_info_loader.load_arr('SIZENL_file_name').reshape(-1,)
SRISK_1d = stock_info_loader.load_arr('SRISK_file_name').reshape(-1,)
ADP_1d = stock_info_loader.load_arr('ADP_file_name').reshape(-1,)
BETA_1d = stock_info_loader.load_arr('BETA_file_name').reshape(-1,)
BTOP_1d = stock_info_loader.load_arr('BTOP_file_name').reshape(-1,)
EARNYILD_1d = stock_info_loader.load_arr('EARNYILD_file_name').reshape(-1,)
GROWTH_1d = stock_info_loader.load_arr('GROWTH_file_name').reshape(-1,)
LEVERAGE_1d = stock_info_loader.load_arr('LEVERAGE_file_name').reshape(-1,)
print("IO time",time.time()-t)
t = time.time()
STOCK_INFO_2d = np.stack(
[
SIZE_1d,ZX_1_1d,ADJVWAP_1d,ADJCLOSE_1d,CLOSE_1d,AMOUNT_1d,FF_SIZE_1d,
HS300_member_1d,ZZ500_member_1d,ISST_1d,
ISTRADEDAY_1d,TOTAL_TRADEDAYS_1d,LIMIT_UD_t0_1d,LIMIT_UD_t1_1d,LIMIT_UD_filter_t0_t1_1d,
ADJVOLUME_ma20_1d,ADJVOLUME_ma20_q20_1d,FF_CAPITAL_ma20_1d,FF_CAPITAL_ma20_q20_1d,
LIQUIDTY_1d,MOMENTUM_1d,RESVOL_1d,SIZENL_1d,SRISK_1d,
ADP_1d,BETA_1d,BTOP_1d,EARNYILD_1d,GROWTH_1d,LEVERAGE_1d
],
axis=1
)
print("concate time",time.time()-t)
t = time.time()
date_code_product_2d = stock_info_loader.date_code_product
STOCK_INFO_cols_name = [
'SIZE','ZX_1','ADJVWAP','ADJCLOSE','CLOSE','AMOUNT','FF_SIZE','HS300_member','ZZ500_member',
'ISST','ISTRADEDAY','TOTAL_TRADEDAYS','LIMIT_UD_t0','LIMIT_UD_t1','LIMIT_UD_filter_t0_t1_1d',
'ADJVOLUME_ma20','ADJVOLUME_ma20_q20','FF_CAPITAL_ma20','FF_CAPITAL_ma20_q20',
'LIQUIDTY','MOMENTUM','RESVOL','SIZENL','SRISK',
'ADP','BETA','BTOP','EARNYILD','GROWTH','LEVERAGE',
'date','code'
]
STOCK_INFO_df = pd.DataFrame(STOCK_INFO_2d.astype('float32'),columns=STOCK_INFO_cols_name[:-2])
STOCK_INFO_df['date'] = date_code_product_2d[:,0].astype('int')
STOCK_INFO_df['code'] = date_code_product_2d[:,1]
print("form df time: ",time.time()-t)
t = time.time()
save_df(STOCK_INFO_df,self.config,'STOCK_INFO_file_name')
self.STOCK_INFO = STOCK_INFO_df
print("save time",time.time()-t)
def update_stock_pool_basic(self,STOCK_INFO_file_path=None):
'''
T0当天结束计算的factor,这个SP用于分组,后续可能在真的计算分组return的时候,需要再筛选一个
能否在第二天买入的SP_T1,(对于分析因子的影响???)
SP_basic:
- 全市场
# - T0当天没有涨跌停(RETURN的abs大于0.095)
# - T1当天没有涨跌停
- 不是ST
- 是TRADEDAY
- 上市满一年(STOCK_INFO.datetime-STOCK_INFO.list_date > 365)
'''
if hasattr(self, 'STOCK_INFO'):
STOCK_INFO = self.STOCK_INFO
else:
if STOCK_INFO_file_path is None:
STOCK_INFO_file_path=self.config['STOCK_INFO_file_path']
STOCK_INFO = pd.read_pickle(STOCK_INFO_file_path)
STOCK_INFO = STOCK_INFO[(STOCK_INFO.date>=self.config['start_date'])&(STOCK_INFO.date<=self.config['end_date'])]
filter_basic = (STOCK_INFO['ISTRADEDAY']==1.0) & (STOCK_INFO['TOTAL_TRADEDAYS']>250) & (STOCK_INFO['ISST']==0.0)
STOCK_POOL_basic = STOCK_INFO.loc[filter_basic,self.config['STOCK_POOL_cols']]
save_df(STOCK_POOL_basic,self.config,'STOCK_POOL_basic_file_name')
def update_stock_pool_cap_vol_drop20(self,STOCK_INFO_file_path=None):
'''
T0当天结束计算的factor,这个SP用于分组,后续可能在真的计算分组return的时候,需要再筛选一个
能否在第二天买入的SP_T1,(对于分析因子的影响???)
SP_cap_vol_drop20:
- 全市场
- 去除流动性后20%(过去20天的voluma ma大于截面的q20)
- 去除流通市值后20%
- T0当天没有涨跌停(RETURN的abs大于0.095)
- T1当天没有涨跌停
- 不是ST
- 是TRADEDAY
- 上市满一年(STOCK_INFO.datetime-STOCK_INFO.list_date > 365)
'''
if hasattr(self, 'STOCK_INFO'):
STOCK_INFO = self.STOCK_INFO
else:
if STOCK_INFO_file_path is None:
STOCK_INFO_file_path=self.config['STOCK_INFO_file_path']
STOCK_INFO = pd.read_pickle(STOCK_INFO_file_path)
STOCK_INFO = STOCK_INFO[(STOCK_INFO.date>=self.config['start_date'])&(STOCK_INFO.date<=self.config['end_date'])]
filter_basic = ( STOCK_INFO['ADJVOLUME_ma20']>STOCK_INFO['ADJVOLUME_ma20_q20'])&\
(STOCK_INFO['FF_CAPITAL_ma20']>STOCK_INFO['FF_CAPITAL_ma20_q20'])&\
(STOCK_INFO['ISTRADEDAY']==1.0) & (STOCK_INFO['TOTAL_TRADEDAYS']>250) & (STOCK_INFO['ISST']==0.0)
STOCK_POOL_basic = STOCK_INFO.loc[filter_basic,self.config['STOCK_POOL_cols']]
save_df(STOCK_POOL_basic,self.config,'STOCK_POOL_cap_vol_drop20_file_name')
def update_stock_HS300(self,STOCK_INFO_file_path=None):
'''
满足basic的要求且是HS300里面的成分股
'''
if hasattr(self, 'STOCK_INFO'):
STOCK_INFO = self.STOCK_INFO
else:
if STOCK_INFO_file_path is None:
STOCK_INFO_file_path=self.config['STOCK_INFO_file_path']
STOCK_INFO = pd.read_pickle(STOCK_INFO_file_path)
STOCK_INFO = STOCK_INFO[(STOCK_INFO.date>=self.config['start_date'])&(STOCK_INFO.date<=self.config['end_date'])]
filter_HS300 = (STOCK_INFO['HS300_member']==1.0) & (STOCK_INFO['LIMIT_UD_t0'] == 0.0) &\
(STOCK_INFO['ISTRADEDAY']==1.0) & (STOCK_INFO['TOTAL_TRADEDAYS']>250) & (STOCK_INFO['ISST']==0.0)
STOCK_POOL_HS300 = STOCK_INFO.loc[filter_HS300,self.config['STOCK_POOL_cols']]
save_df(STOCK_POOL_HS300,self.config,'STOCK_POOL_HS300_file_name')
def update_stock_ZZ500(self,STOCK_INFO_file_path=None):
if hasattr(self, 'STOCK_INFO'):
STOCK_INFO = self.STOCK_INFO
else:
if STOCK_INFO_file_path is None:
STOCK_INFO_file_path=self.config['STOCK_INFO_file_path']
STOCK_INFO = pd.read_pickle(STOCK_INFO_file_path)
STOCK_INFO = STOCK_INFO[(STOCK_INFO.date>=self.config['start_date'])&(STOCK_INFO.date<=self.config['end_date'])]
filter_ZZ500 = (STOCK_INFO['ZZ500_member']==1.0) &\
(STOCK_INFO['ISTRADEDAY']==1.0) & (STOCK_INFO['TOTAL_TRADEDAYS']>250) & (STOCK_INFO['ISST']==0.0)
STOCK_POOL_ZZ500 = STOCK_INFO.loc[filter_ZZ500,self.config['STOCK_POOL_cols']]
save_df(STOCK_POOL_ZZ500,self.config,'STOCK_POOL_ZZ500_file_name')
def update_stock_ZZ800(self,STOCK_INFO_file_path=None):
if hasattr(self, 'STOCK_INFO'):
STOCK_INFO = self.STOCK_INFO
else:
if STOCK_INFO_file_path is None:
STOCK_INFO_file_path=self.config['STOCK_INFO_file_path']
STOCK_INFO = pd.read_pickle(STOCK_INFO_file_path)
STOCK_INFO = STOCK_INFO[(STOCK_INFO.date>=self.config['start_date'])&(STOCK_INFO.date<=self.config['end_date'])]
filter_ZZ500 = ((STOCK_INFO['ZZ500_member']==1.0) | (STOCK_INFO['HS300_member']==1.0))&\
(STOCK_INFO['ISTRADEDAY']==1.0) & (STOCK_INFO['TOTAL_TRADEDAYS']>250) & (STOCK_INFO['ISST']==0.0)
STOCK_POOL_ZZ500 = STOCK_INFO.loc[filter_ZZ500,self.config['STOCK_POOL_cols']]
save_df(STOCK_POOL_ZZ500,self.config,'STOCK_POOL_ZZ800_file_name')
def update_stock_800(self,STOCK_INFO_file_path=None):
if hasattr(self, 'STOCK_INFO'):
STOCK_INFO = self.STOCK_INFO
else:
if STOCK_INFO_file_path is None:
STOCK_INFO_file_path=self.config['STOCK_INFO_file_path']
STOCK_INFO = | pd.read_pickle(STOCK_INFO_file_path) | pandas.read_pickle |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Author : 陈坤泽
# @Email : <EMAIL>
# @Date : 2021/01/20 14:51
"""
对pycocotools的一些功能整合、封装
代码中,gt指ground truth,真实标注
dt指detection,模型检测出来的结果
除了 label.py 中定义的
CocoGtData 专门处理 gt 格式数据
CocoData 同时处理 gt dt 格式数据
这里对外有两个类
CocoEval 计算coco指标
CocoMatch 进行一些高级的结果分析
生成的结果可以用 xllabelme 打开 (pip install xllabelme)
"""
from pyxllib.prog.pupil import check_install_package
check_install_package('xlcocotools')
from collections import ChainMap, defaultdict, Counter
import copy
import json
import os
import pathlib
import random
import sys
import pandas as pd
import PIL
from tqdm import tqdm
from pyxllib.file.packlib.zipfile import ZipFile
from pyxllib.prog.newbie import round_int
from pyxllib.prog.pupil import DictTool
from pyxllib.prog.specialist import mtqdm
from pyxllib.algo.pupil import Groups, make_index_function, matchpairs
from pyxllib.algo.geo import rect_bounds, rect2polygon, reshape_coords, ltrb2xywh, xywh2ltrb, ComputeIou
from pyxllib.algo.stat import dataframes_to_excel
from pyxllib.file.specialist import File, Dir, PathGroups
from pyxllib.debug.specialist import get_xllog
from pyxlpr.data.icdar import IcdarEval
from pyxlpr.data.labelme import LABEL_COLORMAP7, ToLabelmeJson, LabelmeDataset, LabelmeDict
from xlcocotools.coco import COCO
from xlcocotools.cocoeval import COCOeval
class CocoGtData:
""" 类coco格式的json数据处理
不一定要跟coco gt结构完全相同,只要相似就行,
比如images、annotaions、categories都可以扩展自定义字段
"""
def __init__(self, gt):
self.gt_dict = gt if isinstance(gt, dict) else File(gt).read()
@classmethod
def gen_image(cls, image_id, file_name, height=None, width=None, **kwargs):
""" 初始化一个图片标注,使用位置参数,复用的时候可以节省代码量 """
# 没输入height、width时会自动从file_name读取计算
# 但千万注意,这里coco的file_name输入的是相对路径,并不一定在工作目录下能work,一般还是推荐自己输入height、width
if height is None or width is None:
width, height = PIL.Image.open(str(file_name)).size
im = {'id': int(image_id), 'file_name': file_name,
'height': int(height), 'width': int(width)}
if kwargs:
im.update(kwargs)
return im
@classmethod
def gen_images(cls, imdir, start_idx=1):
""" 自动生成标准的images字段
:param imdir: 图片目录
:param start_idx: 图片起始下标
:return: list[dict(id, file_name, width, height)]
"""
files = Dir(imdir).select_files(['*.jpg', '*.png'])
images = []
for i, f in enumerate(files, start=start_idx):
w, h = Image.open(str(f)).size
images.append({'id': i, 'file_name': f.name, 'width': w, 'height': h})
return images
@classmethod
def points2segmentation(cls, pts):
""" labelme的points结构转segmentation分割结构
"""
# 1 两个点要转4个点
if len(pts) == 2:
pts = rect2polygon(pts)
else:
pts = list(pts)
# 2 点集要封闭,末尾要加上第0个点
pts.append(pts[0])
# 多边形因为要画出所有的点,还要封闭,数据有点多,还是只存整数节省空间
pts = [round_int(v) for v in reshape_coords(pts, 1)]
return pts
@classmethod
def gen_annotation(cls, **kwargs):
""" 智能地生成一个annotation字典
这个略微有点过度封装了
但没事,先放着,可以不拿出来用~~
:param points: 必须是n*2的结构
"""
a = kwargs.copy()
# a = {'id': 0, 'area': 0, 'bbox': [0, 0, 0, 0],
# 'category_id': 1, 'image_id': 0, 'iscrowd': 0, 'segmentation': []}
if 'points' in a: # points是一个特殊参数,使用“一个”多边形来标注(注意区别segmentation是多个多边形)
if 'segmentation' not in a:
a['segmentation'] = [cls.points2segmentation(a['points'])]
del a['points']
if 'bbox' not in a:
pts = []
for seg in a['segmentation']:
pts += seg
a['bbox'] = ltrb2xywh(rect_bounds(pts))
if 'area' not in a: # 自动计算面积
a['area'] = int(a['bbox'][2] * a['bbox'][3])
for k in ['id', 'image_id']:
if k not in a:
a[k] = 0
if 'category_id' not in a:
a['category_id'] = 1
if 'iscrowd' not in a:
a['iscrowd'] = 0
return a
@classmethod
def gen_quad_annotations(cls, file, *, image_id, start_box_id, category_id=1, **kwargs):
""" 解析一张图片对应的txt标注文件
:param file: 标注文件,有多行标注
每行是x1,y1,x2,y2,x3,y3,x4,y4[,label] (label可以不存在)
:param image_id: 该图片id
:param start_box_id: box_id起始编号
:param category_id: 归属类别
"""
lines = File(file).read()
box_id = start_box_id
annotations = []
for line in lines.splitlines():
vals = line.split(',', maxsplit=8)
if len(vals) < 2: continue
attrs = {'id': box_id, 'image_id': image_id, 'category_id': category_id}
if len(vals) == 9:
attrs['label'] = vals[8]
# print(vals)
seg = [int(v) for v in vals[:8]]
attrs['segmentation'] = [seg]
attrs['bbox'] = ltrb2xywh(rect_bounds(seg))
if kwargs:
attrs.update(kwargs)
annotations.append(cls.gen_annotation(**attrs))
box_id += 1
return annotations
@classmethod
def gen_categories(cls, cats):
if isinstance(cats, list):
# 如果输入是一个类别列表清单,则按1、2、3的顺序给其编号
return [{'id': i, 'name': x, 'supercategory': ''} for i, x in enumerate(cats, start=1)]
else:
raise TypeError
# TODO 扩展支持其他构造方法
@classmethod
def gen_gt_dict(cls, images, annotations, categories, outfile=None):
data = {'images': images, 'annotations': annotations, 'categories': categories}
if outfile is not None:
File(outfile).write(data)
return data
@classmethod
def is_gt_dict(cls, gt_dict):
if isinstance(gt_dict, (tuple, list)):
return False
has_keys = set('images annotations categories'.split())
return not (has_keys - gt_dict.keys())
def clear_gt_segmentation(self, *, inplace=False):
""" 有的coco json文件太大,如果只做普通的bbox检测任务,可以把segmentation的值删掉
"""
gt_dict = self.gt_dict if inplace else copy.deepcopy(self.gt_dict)
for an in gt_dict['annotations']:
an['segmentation'] = []
return gt_dict
def get_catname_func(self):
id2name = {x['id']: x['name'] for x in self.gt_dict['categories']}
def warpper(cat_id, default=...):
"""
:param cat_id:
:param default: 没匹配到的默认值
... 不是默认值,而是代表匹配不到直接报错
:return:
"""
if cat_id in id2name:
return id2name[cat_id]
else:
if default is ...:
raise IndexError(f'{cat_id}')
else:
return default
return warpper
def _group_base(self, group_anns, reserve_empty=False):
if reserve_empty:
for im in self.gt_dict['images']:
yield im, group_anns.get(im['id'], [])
else:
id2im = {im['id']: im for im in self.gt_dict['images']}
for k, v in group_anns.items():
yield id2im[k], v
def group_gt(self, *, reserve_empty=False):
""" 遍历gt的每一张图片的标注
这个是用字典的方式来实现分组,没用 df.groupby 的功能
:param reserve_empty: 是否保留空im对应的结果
:return: [(im, annos), ...] 每一组是im标注和对应的一组annos标注
"""
group_anns = defaultdict(list)
[group_anns[an['image_id']].append(an) for an in self.gt_dict['annotations']]
return self._group_base(group_anns, reserve_empty)
def select_gt(self, ids, *, inplace=False):
""" 删除一些images标注(会删除对应的annotations),挑选数据,或者减小json大小
:param ids: int类型表示保留的图片id,str类型表示保留的图片名,可以混合使用
[341427, 'PMC4055390_00006.jpg', ...]
:return: 筛选出的新字典
"""
gt_dict = self.gt_dict
# 1 ids 统一为int类型的id值
if not isinstance(ids, (list, tuple, set)):
ids = [ids]
map_name2id = {item['file_name']: item['id'] for item in gt_dict['images']}
ids = set([(map_name2id[x] if isinstance(x, str) else x) for x in ids])
# 2 简化images和annotations
dst = {'images': [x for x in gt_dict['images'] if (x['id'] in ids)],
'annotations': [x for x in gt_dict['annotations'] if (x['image_id'] in ids)],
'categories': gt_dict['categories']}
if inplace: self.gt_dict = dst
return dst
def random_select_gt(self, number=20, *, inplace=False):
""" 从gt中随机抽出number个数据 """
ids = [x['id'] for x in self.gt_dict['images']]
random.shuffle(ids)
gt_dict = self.select_gt(ids[:number])
if inplace: self.gt_dict = gt_dict
return gt_dict
def select_gt_by_imdir(self, imdir, *, inplace=False):
""" 基于imdir目录下的图片来过滤src_json """
# 1 对比下差异
json_images = set([x['file_name'] for x in self.gt_dict['images']])
dir_images = set(os.listdir(str(imdir)))
# df = SetCmper({'json_images': json_images, 'dir_images': dir_images}).intersection()
# print('json_images intersection dir_images:')
# print(df)
# 2 精简json
gt_dict = self.select_gt(json_images & dir_images)
if inplace: self.gt_dict = gt_dict
return gt_dict
def reset_image_id(self, start=1, *, inplace=False):
""" 按images顺序对图片重编号 """
gt_dict = self.gt_dict if inplace else copy.deepcopy(self.gt_dict)
# 1 重置 images 的 id
old2new = {}
for i, im in enumerate(gt_dict['images'], start=start):
old2new[im['id']] = i
im['id'] = i
# 2 重置 annotations 的 id
for anno in gt_dict['annotations']:
anno['image_id'] = old2new[anno['image_id']]
return gt_dict
def reset_box_id(self, start=1, *, inplace=False):
anns = self.gt_dict['annotations']
if not inplace:
anns = copy.deepcopy(anns)
for i, anno in enumerate(anns, start=start):
anno['id'] = i
return anns
def to_labelme_cls(self, root, *, bbox=True, seg=False, info=False):
"""
:param root: 图片根目录
:return:
extdata,存储了一些匹配异常信息
"""
root, data = Dir(root), {}
catid2name = {x['id']: x['name'] for x in self.gt_dict['categories']}
# 1 准备工作,构建文件名索引字典
gs = PathGroups.groupby(root.select_files('**/*'))
# 2 遍历生成labelme数据
not_finds = set() # coco里有的图片,root里没有找到
multimatch = dict() # coco里的某张图片,在root找到多个匹配文件
for img, anns in tqdm(self.group_gt(reserve_empty=True), disable=not info):
# 2.1 文件匹配
imfiles = gs.find_files(img['file_name'])
if not imfiles: # 没有匹配图片的,不处理
not_finds.add(img['file_name'])
continue
elif len(imfiles) > 1:
multimatch[img['file_name']] = imfiles
imfile = imfiles[0]
else:
imfile = imfiles[0]
# 2.2 数据内容转换
lmdict = LabelmeDict.gen_data(imfile)
img = DictTool.or_(img, {'xltype': 'image'})
lmdict['shapes'].append(LabelmeDict.gen_shape(json.dumps(img, ensure_ascii=False), [[-10, 0], [-5, 0]]))
for ann in anns:
if bbox:
ann = DictTool.or_(ann, {'category_name': catid2name[ann['category_id']]})
label = json.dumps(ann, ensure_ascii=False)
shape = LabelmeDict.gen_shape(label, xywh2ltrb(ann['bbox']))
lmdict['shapes'].append(shape)
if seg:
# 把分割也显示出来(用灰色)
for x in ann['segmentation']:
an = {'box_id': ann['id'], 'xltype': 'seg', 'shape_color': [191, 191, 191]}
label = json.dumps(an, ensure_ascii=False)
lmdict['shapes'].append(LabelmeDict.gen_shape(label, x))
f = imfile.with_suffix('.json')
data[f.relpath(root)] = lmdict
return LabelmeDataset(root, data,
extdata={'categories': self.gt_dict['categories'],
'not_finds': not_finds,
'multimatch': Groups(multimatch)})
def to_labelme(self, root, *, bbox=True, seg=False, info=False):
self.to_labelme_cls(root, bbox=bbox, seg=seg, info=info).writes()
def split_data(self, parts, *, shuffle=True):
""" 数据拆分器
:param dict parts: 每个部分要拆分、写入的文件名,以及数据比例
py≥3.6的版本中,dict的key是有序的,会按顺序处理开发者输入的清单
这里比例求和可以不满1,但不能超过1
:param bool shuffle: 是否打乱原有images顺序
:return: 同parts的字典,但值变成了拆分后的coco数据
"""
# 1 读入data
assert sum(parts.values()) <= 1, '比例和不能超过1'
data = self.gt_dict
if shuffle:
data = data.copy()
data['images'] = data['images'].copy()
random.shuffle(data['images'])
# 2 生成每一个部分的文件
def select_annotations(annotations, image_ids):
# 简单的for循环和if操作,可以用“列表推导式”写
return [an for an in annotations if (an['image_id'] in image_ids)]
res = {}
total_num, used_rate = len(data['images']), 0
for k, v in parts.items():
# 2.1 选择子集图片
images = data['images'][int(used_rate * total_num):int((used_rate + v) * total_num)]
image_ids = {im['id'] for im in images}
# 2.2 生成新的字典
res[k] = {'images': images,
'annotations': select_annotations(data['annotations'], image_ids),
'categories': data['categories']}
# 2.4 更新使用率
used_rate += v
return res
class CocoData(CocoGtData):
""" 这个类可以封装一些需要gt和dt衔接的功能 """
def __init__(self, gt, dt=None, *, min_score=0):
"""
:param gt: gt的dict或文件
gt是必须传入的,可以只传入gt
有些任务理论上可以只有dt,但把配套的gt传入,能做更多事
:param dt: dt的list或文件
:param min_score: CocoMatch这个系列的类,初始化增加min_score参数,支持直接滤除dt低置信度的框
"""
super().__init__(gt)
def get_dt_list(dt, min_score=0):
# dt
default_dt = []
# default_dt = [{'image_id': self.gt_dict['images'][0]['id'],
# 'category_id': self.gt_dict['categories'][0]['id'],
# 'bbox': [0, 0, 1, 1],
# 'score': 1}]
# 这样直接填id有很大的风险,可能会报错。但是要正确填就需要gt的信息,传参麻烦~~
# default_dt = [{'image_id': 1, 'category_id': 1, 'bbox': [0, 0, 1, 1], 'score': 1}]
if not dt:
dt_list = default_dt
else:
dt_list = dt if isinstance(dt, (list, tuple)) else File(dt).read()
if min_score:
dt_list = [b for b in dt_list if (b['score'] >= min_score)]
if not dt_list:
dt_list = default_dt
return dt_list
self.dt_list = get_dt_list(dt, min_score)
@classmethod
def is_dt_list(cls, dt_list):
if not isinstance(dt_list, (tuple, list)):
return False
item = dt_list[0]
has_keys = set('score image_id category_id bbox'.split())
return not (has_keys - item.keys())
def select_dt(self, ids, *, inplace=False):
gt_dict, dt_list = self.gt_dict, self.dt_list
# 1 ids 统一为int类型的id值
if not isinstance(ids, (list, tuple, set)):
ids = [ids]
if gt_dict:
map_name2id = {item['file_name']: item['id'] for item in gt_dict['images']}
ids = [(map_name2id[x] if isinstance(x, str) else x) for x in ids]
ids = set(ids)
# 2 简化images
dst = [x for x in dt_list if (x['image_id'] in ids)]
if inplace: self.dt_list = dst
return dst
def group_dt(self, *, reserve_empty=False):
""" 对annos按image_id分组,返回 [(im1, dt_anns1), (im2, dt_anns2), ...] """
group_anns = defaultdict(list)
[group_anns[an['image_id']].append(an) for an in self.dt_list]
return self._group_base(group_anns, reserve_empty)
def group_gt_dt(self, *, reserve_empty=False):
""" 获得一张图片上gt和dt的标注结果
[(im, gt_anns, dt_anns), ...]
"""
raise NotImplementedError
def to_icdar_label_quad(self, outfile, *, min_score=0):
""" 将coco的dt结果转为icdar的标注格式
存成一个zip文件,zip里面每张图对应一个txt标注文件
每个txt文件用quad八个数值代表一个标注框
适用于 sroie 检测格式
"""
# 1 获取dt_list
if min_score:
dt_list = [b for b in self.dt_list if (b['score'] >= min_score)]
else:
dt_list = self.dt_list
# 2 转df,按图片分组处理
df = pd.DataFrame.from_dict(dt_list) # noqa from_dict可以传入List[Dict]
df = df.groupby('image_id')
# 3 建立一个zip文件
myzip = ZipFile(str(outfile), 'w')
# 4 遍历每一组数据,生成一个文件放到zip里面
id2name = {im['id']: pathlib.Path(im['file_name']).stem for im in self.gt_dict['images']}
for image_id, items in df:
label_file = id2name[image_id] + '.txt'
quads = [rect2polygon(xywh2ltrb(x), dtype=int).reshape(-1) for x in items['bbox']]
quads = [','.join(map(str, x)) for x in quads]
myzip.writestr(label_file, '\n'.join(quads))
myzip.close()
class Coco2Labelme(ToLabelmeJson):
""" coco格式的可视化
TODO segmentation 分割 效果的可视化
"""
def add_segmentation(self, row):
""" 分割默认先都用灰色标注 """
r = dict()
r['gt_box_id'] = row['gt_box_id']
r['label'] = 'seg'
r['points'] = row['gt_ltrb']
r['shape_color'] = [191, 191, 191]
# 5 保存
self.add_shape2(**r)
# def _sort_anns(self, anns):
# if anns and 'score' in anns[0]:
# anns = sorted(anns, key=lambda x: -x['score']) # 权重从大到小排序
# return anns
def add_gt_shape(self, row, attrs=None):
"""
:param row: df的一行数据series
:param attrs: 其他扩展字段值
"""
# 1 基本字段
r = dict()
for name in ['gt_box_id', 'gt_category_id', 'gt_area']:
r[name] = row[name]
r['gt_ltrb'] = ','.join(map(str, row['gt_ltrb']))
# 2 主要字段
r['label'] = row['gt_category_name'] # 这个需要上层的anns_match2, labelme_match传入的df实现提供这个字段
r['points'] = row['gt_ltrb']
if row['gt_supercategory'] != '':
r['group_id'] = row['gt_supercategory']
# 3 row中其他自定义字段
# 这些是已经处理过的标准字段,进入黑名单,不显示;其他字段默认白名单都显示
std_an_keys = set('gt_box_id gt_category_id gt_ltrb gt_area iscrowd file_name '
'gt_category_name gt_supercategory gt_segmentation dt_segmentation'.split())
# 如果跟labelme的标准字段重名了,需要区分下:比如 label
std_lm_keys = set('label points group_id shape_type flags'.split()) # labelme的标准字段
ks = set(row.index) - std_an_keys
for k in ks:
if k in std_lm_keys:
r['_' + k] = row[k]
else:
r[k] = row[k]
if 'dt_ltrb' in r:
r['dt_ltrb'] = ','.join(map(str, r['dt_ltrb']))
# 4 精简字段:聚合以dt、gt为前缀的所有字段
group_keys = defaultdict(list)
res = dict()
for k, v in r.items():
for part in ('dt', 'gt'):
if k.startswith(part + '_'):
group_keys[part].append(k)
break
else:
res[k] = v
# 聚合后的属性排序准则
order = ['category_id', 'category_name', 'score', 'ltrb', 'area', 'box_id']
idxfunc = make_index_function(order)
for part in ('dt', 'gt'):
keys = group_keys[part]
m = len(part) + 1
keys.sort(key=lambda k: idxfunc(k[m:]))
res[part] = '/'.join([str(r[k]) for k in keys]) # 数值拼接
res['~' + part] = '/'.join([str(k[m:]) for k in keys]) # 解释key,如果很熟悉了可以选择关闭
# 5 扩展字段
if attrs:
res.update(attrs)
# 6 保存
self.add_shape2(**res)
def add_dt_shape(self, row, attrs=None):
# 1 基本字段
r = dict()
for name in ['iou', 'dt_category_id', 'dt_score']:
r[name] = row[name]
r['dt_ltrb'] = ','.join(map(str, row['dt_ltrb']))
# 2 主要字段
r['label'] = row['dt_category_name']
if 'dt_segmentation' in row:
r['points'] = row['dt_segmentation'][0]
else:
r['points'] = row['dt_ltrb']
# 3 扩展字段
if attrs:
r.update(attrs)
# 4 保存
self.add_shape2(**r)
def _anns_init(self, df, segmentation=False):
df = df.copy()
df.drop(['image_id'], axis=1, inplace=True)
columns = df.columns
if segmentation:
pass
else:
if 'gt_segmentation' in columns:
df.drop('gt_segmentation', axis=1, inplace=True)
if 'dt_segmentation' in columns:
df.drop('dt_segmentation', axis=1, inplace=True)
return df
def anns_gt(self, df, *, segmentation=False, shape_attrs=None):
""" Coco2Df.gt的可视化
:param df: Coco2Df生成的df后,输入特定的某一组image_id、file_name
:param segmentation: 是否显示segmentation分割效果
:param shape_attrs: 人工额外强制设置的字段值
"""
df = self._anns_init(df, segmentation)
for idx, row in df.iterrows():
if segmentation:
self.add_segmentation(row)
self.add_gt_shape(row, shape_attrs)
def anns_match(self, df, *, hide_match_dt=False, segmentation=False, shape_attrs=None):
""" Coco2Df.match的可视化
正确的gt用绿框,位置匹配到但类别错误的用黄框,绿黄根据iou设置颜色深浅,此时dt统一用灰色框
漏检的gt用红框,多余的dt用蓝框
:param hide_match_dt: 不显示灰色的dt框
TODO 研究labelme shape的flags参数含义,支持shape的过滤显示?
"""
df = self._anns_init(df, segmentation)
if not shape_attrs:
shape_attrs = {}
def get_attrs(d):
return dict(ChainMap(shape_attrs, d))
for idx, row in df.iterrows():
r = row
if r['gt_category_id'] == -1: # 多余的dt
self.add_dt_shape(r, get_attrs({'shape_color': [0, 0, 255]}))
elif r['dt_category_id'] == -1: # 没有被匹配到的gt
self.add_gt_shape(r, get_attrs({'shape_color': [255, 0, 0]}))
else: # 匹配到的gt和dt
if not hide_match_dt:
self.add_dt_shape(r, get_attrs({'shape_color': [191, 191, 191]}))
color_value = int(255 * r['iou'])
if r['gt_category_id'] == r['dt_category_id']:
self.add_gt_shape(r, get_attrs({'shape_color': [0, color_value, 0]}))
else:
self.add_gt_shape(r, get_attrs({'shape_color': [color_value, color_value, 0]}))
def anns_match2(self, df, *, hide_match_dt=False, segmentation=False, shape_attrs=None, colormap=None):
""" 按类别区分框颜色
"""
import imgviz
df = self._anns_init(df, segmentation)
if not shape_attrs:
shape_attrs = {}
def get_attrs(d):
return dict(ChainMap(shape_attrs, d))
if not colormap:
colormap = imgviz.label_colormap(value=200)
m = len(colormap)
for idx, row in df.iterrows():
r = row
attrs = {'shape_color': colormap[r['gt_category_id'] % m],
'vertex_fill_color': colormap[r['dt_category_id'] % m]}
if r['gt_category_id'] == -1: # 多余的dt
self.add_dt_shape(r, get_attrs(attrs))
elif r['dt_category_id'] == -1: # 没有被匹配到的gt
self.add_gt_shape(r, get_attrs(attrs))
else: # 匹配到的gt和dt
if not hide_match_dt:
self.add_dt_shape(r, get_attrs({'shape_color': [191, 191, 191]}))
attrs['vertex_fill_color'] = [int(r['iou'] * v) for v in attrs['vertex_fill_color']]
self.add_gt_shape(r, get_attrs(attrs))
class CocoEval(CocoData):
def __init__(self, gt, dt, iou_type='bbox', *, min_score=0, print_mode=False):
"""
TODO coco_gt、coco_dt本来已存储了很多标注信息,有些冗余了,是否可以跟gt_dict、dt_list等整合,去掉些没必要的组件?
"""
super().__init__(gt, dt, min_score=min_score)
# type
self.iou_type = iou_type
# evaluater
self.coco_gt = COCO(gt, print_mode=print_mode) # 这不需要按图片、类型分类处理
self.coco_dt, self.evaluater = None, None
if self.dt_list:
self.coco_dt = self.coco_gt.loadRes(self.dt_list) # 这个返回也是coco对象
self.evaluater = COCOeval(self.coco_gt, self.coco_dt, iou_type, print_mode=print_mode)
@classmethod
def evaluater_eval(cls, et, img_ids=None, *, print_mode=False):
""" coco官方目标检测测评方法
https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb
:param img_ids:
:param print_mode: 注意这里的print_mode不同于初始化的print_mode,指的是不同的东西
:return:
"""
# 1 coco是有方法支持过滤id,只计算部分图的分值结果
# 没有输入img_ids,也要显式重置为全部数据
if not img_ids:
img_ids = et.cocoGt.imgIds.values()
et.params.imgIds = list(img_ids)
# 2 每张图片、每个类别的iou等核心数据的计算
et.evaluate()
# 在不同参数测评指标下的分数
et.accumulate()
# 3 显示结果
if print_mode: # 如果要显示结果则使用标准计算策略
et.summarize(print_mode=print_mode)
return round(et.stats[0], 4)
else: # 否则简化计算过程
return round(et.step_summarize(), 4)
def eval(self, img_ids=None, *, print_mode=False):
return self.evaluater_eval(self.evaluater, img_ids=img_ids, print_mode=print_mode)
def eval_dt_score(self, step=0.1):
""" 计算按一定阈值滤除框后,对coco指标产生的影响 """
dt_list = copy.copy(self.dt_list)
i = 0
records = []
columns = ['≥dt_score', 'n_dt_box', 'coco_score']
while i < 1:
dt_list = [x for x in dt_list if x['score'] >= i]
if not dt_list: break
coco_dt = self.coco_gt.loadRes(dt_list)
evaluater = COCOeval(self.coco_gt, coco_dt, self.iou_type)
records.append([i, len(dt_list), self.evaluater_eval(evaluater)])
i += step
df = | pd.DataFrame.from_records(records, columns=columns) | pandas.DataFrame.from_records |
from itertools import product
import numpy as np
import pandas as pd
import pytest
from pandas.testing import assert_index_equal
@pytest.mark.parametrize("problem_type", ["binary", "multi"])
def test_new_unique_targets_in_score(
X_y_binary,
logistic_regression_binary_pipeline_class,
X_y_multi,
logistic_regression_multiclass_pipeline_class,
problem_type,
):
if problem_type == "binary":
X, y = X_y_binary
pipeline = logistic_regression_binary_pipeline_class(
parameters={"Logistic Regression Classifier": {"n_jobs": 1}}
)
objective = "Log Loss Binary"
elif problem_type == "multi":
X, y = X_y_multi
pipeline = logistic_regression_multiclass_pipeline_class(
parameters={"Logistic Regression Classifier": {"n_jobs": 1}}
)
objective = "Log Loss Multiclass"
pipeline.fit(X, y)
with pytest.raises(ValueError, match="y contains previously unseen labels"):
pipeline.score(X, pd.Series([4] * len(y)), [objective])
@pytest.mark.parametrize(
"problem_type,use_ints", product(["binary", "multi"], [True, False])
)
def test_pipeline_has_classes_property(
breast_cancer_local,
wine_local,
logistic_regression_binary_pipeline_class,
logistic_regression_multiclass_pipeline_class,
problem_type,
use_ints,
):
if problem_type == "binary":
X, y = breast_cancer_local
pipeline = logistic_regression_binary_pipeline_class(
parameters={"Logistic Regression Classifier": {"n_jobs": 1}}
)
if use_ints:
y = y.map({"malignant": 0, "benign": 1})
answer = [0, 1]
else:
answer = ["benign", "malignant"]
elif problem_type == "multi":
X, y = wine_local
pipeline = logistic_regression_multiclass_pipeline_class(
parameters={"Logistic Regression Classifier": {"n_jobs": 1}}
)
if use_ints:
y = y.map({"class_0": 0, "class_1": 1, "class_2": 2})
answer = [0, 1, 2]
else:
answer = ["class_0", "class_1", "class_2"]
# Check that .classes_ is None before fitting
assert pipeline.classes_ is None
pipeline.fit(X, y)
assert pipeline.classes_ == answer
def test_woodwork_classification_pipeline(
breast_cancer_local, logistic_regression_binary_pipeline_class
):
X, y = breast_cancer_local
mock_pipeline = logistic_regression_binary_pipeline_class(
parameters={"Logistic Regression Classifier": {"n_jobs": 1}}
)
mock_pipeline.fit(X, y)
assert not pd.isnull(mock_pipeline.predict(X)).any()
assert not pd.isnull(mock_pipeline.predict_proba(X)).any().any()
@pytest.mark.parametrize(
"index",
[
list(range(-5, 0)),
list(range(100, 105)),
[f"row_{i}" for i in range(5)],
pd.date_range("2020-09-08", periods=5),
],
)
@pytest.mark.parametrize("problem_type", ["binary", "multi"])
def test_pipeline_transform_and_predict_with_custom_index(
index,
problem_type,
logistic_regression_binary_pipeline_class,
logistic_regression_multiclass_pipeline_class,
):
X = pd.DataFrame(
{"categories": [f"cat_{i}" for i in range(5)], "numbers": np.arange(5)},
index=index,
)
X.ww.init(logical_types={"categories": "categorical"})
if problem_type == "binary":
y = pd.Series([0, 1, 1, 1, 0], index=index)
pipeline = logistic_regression_binary_pipeline_class(
parameters={"Logistic Regression Classifier": {"n_jobs": 1}}
)
elif problem_type == "multi":
y = pd.Series([0, 1, 2, 1, 0], index=index)
pipeline = logistic_regression_multiclass_pipeline_class(
parameters={"Logistic Regression Classifier": {"n_jobs": 1}}
)
pipeline.fit(X, y)
predictions = pipeline.predict(X)
predict_proba = pipeline.predict_proba(X)
assert_index_equal(predictions.index, X.index)
| assert_index_equal(predict_proba.index, X.index) | pandas.testing.assert_index_equal |
# -*- coding: utf-8 -*-
"""dlw9383-bandofthehawk-output.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/154b5GvPxORu_mhpHDIsNlWvyBxMIwEw2
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
sns.set(rc={'figure.figsize':(15,10)})
| pd.set_option("precision", 10) | pandas.set_option |
import numpy as np
import os
import pandas as pd
import PySimpleGUI as sg
import csv
class Demonstrativo_cartao:
def __init__(self, nome, validade, lista_devedores, lista_compras, valores=None):
self.nome = nome
self.validade = validade
self.lista_devedores = lista_devedores
self.lista_compras = lista_compras
self.valores = valores
def criar_valores(self):
valores = []
linha = []
linha.append('A')
for i in (range(len(self.lista_compras))):
linha.append(0.0)
for i in range(len(self.lista_devedores)):
valores.append(linha[:])
cont = 0
for i in range(len(self.lista_devedores)):
valores[cont][0] = self.lista_devedores[cont]
cont += 1
print(cont)
return valores
def criar_dataframe(nome, vencimento, ano,mes_referencia, lista_devedores, lista_compras):
if not os.path.exists(os.getcwd() + '/tabelas/'):
os.makedirs(os.getcwd() + '/tabelas/')
lista_devedores_final = ['PARCELA ATUAL','TOTAL DE PARCELAS']
for i in lista_devedores:
lista_devedores_final.append(i)
print(lista_devedores_final)
print(lista_compras)
tabela = pd.DataFrame(columns=lista_compras, index=lista_devedores_final)
tabela.index.name = 'DEVEDORES'
for col in tabela.columns:
tabela[col].values[:] = 0.0
tabela.to_csv(os.getcwd() + '/tabelas/' + ano + '_' + mes_referencia + '_' + vencimento +'_' + nome + '.csv', sep=',', encoding='utf-8')
return True
def criar_planilha(filename):
# --- populate table with file contents --- #
if filename == '':
return
data = []
header_list = []
if filename is not None:
with open(filename, "r") as infile:
reader = csv.reader(infile)
header_list = next(reader)
data = list(reader)
return header_list, data
def lista_pagadores(valores):
pagadores = ''
for i in valores:
pagadores = pagadores + '-' + i[0]
pagadores = pagadores.replace('-PARCELA ATUAL-TOTAL DE PARCELAS-','')
return pagadores
def atualizar_compra(arquivo, opção, texto):
planilha = pd.read_csv(arquivo)
if opção == 'Alterar':
nomeantigo, nomenovo = texto.split()
planilhanova = planilha.rename(columns={nomeantigo: nomenovo})
planilha = pd.DataFrame(planilhanova)
elif opção == 'Remover':
planilha = pd.DataFrame(planilha)
del planilha[texto]
elif opção == 'Adicionar':
planilha = planilha.assign(novo=0.0)
nomecolunanova = 'novo'
planilha = planilha.rename(columns={nomecolunanova: texto})
planilha.to_csv(os.getcwd() + '/tabelas/' + 'novo.csv', sep=',', encoding='utf-8', index= False)
os.remove(arquivo)
os.rename(os.getcwd() + '/tabelas/' + 'novo.csv', arquivo)
def atualizar_devedor(arquivo, opção, indice, nomenovo):
planilha = pd.read_csv(arquivo)
if opção == 'Alterar':
planilha.loc[indice, 'DEVEDORES'] = nomenovo
elif opção == 'Remover':
planilha = planilha.drop(planilha.index[[indice]])
elif opção == 'Adicionar':
planilha = pd.DataFrame(planilha)
print(planilha)
lista = list(planilha)
print(lista)
valor = planilha.columns
print(valor)
valor = (len(valor)-1)
print(valor)
valor = valor
lista = [nomenovo]
print(lista)
for i in range(valor):
lista.append(0)
print(lista)
planilha.loc[len(planilha)]= lista
planilha.to_csv(os.getcwd() + '/tabelas/' + 'novo.csv', sep=',', encoding='utf-8', index= False)
os.remove(arquivo)
os.rename(os.getcwd() + '/tabelas/' + 'novo.csv', arquivo)
def atualizar_valores(arquivo,opcao, nome, valores):
coluna = nome
planilha = pd.read_csv(arquivo)
planilha = pd.DataFrame(planilha)
indice = 0
if opcao == 'Fixa':
for linha in range(len(planilha)):
if indice == 0:
planilha.at[indice, coluna] = 0
indice += 1
planilha = planilha.round(2)
planilha.to_csv(os.getcwd() + '/tabelas/' + 'novo.csv', sep=',', encoding='utf-8', index= False)
os.remove(arquivo)
os.rename(os.getcwd() + '/tabelas/' + 'novo.csv', arquivo)
elif indice == 1:
planilha.at[indice, coluna] = 0
indice += 1
planilha = planilha.round(2)
planilha.to_csv(os.getcwd() + '/tabelas/' + 'novo.csv', sep=',', encoding='utf-8', index= False)
os.remove(arquivo)
os.rename(os.getcwd() + '/tabelas/' + 'novo.csv', arquivo)
else:
valor = valores[indice -2]
valor = valor.replace(',','.')
planilha.at[indice, coluna] = float(valor)
indice += 1
planilha = planilha.round(2)
planilha.to_csv(os.getcwd() + '/tabelas/' + 'novo.csv', sep=',', encoding='utf-8', index= False)
os.remove(arquivo)
os.rename(os.getcwd() + '/tabelas/' + 'novo.csv', arquivo)
elif opcao == 'Parcelada':
for linha in range(len(planilha)):
if indice == 0:
valor = valor = valores[indice]
valor = valor.replace(',','.')
planilha.at[indice, coluna] = float(valor)
indice += 1
planilha = planilha.round(2)
planilha.to_csv(os.getcwd() + '/tabelas/' + 'novo.csv', sep=',', encoding='utf-8', index= False)
os.remove(arquivo)
os.rename(os.getcwd() + '/tabelas/' + 'novo.csv', arquivo)
elif indice == 1:
valor = valores[indice]
valor = valor.replace(',','.')
planilha.at[indice, coluna] = float(valor)
indice += 1
planilha = planilha.round(2)
planilha.to_csv(os.getcwd() + '/tabelas/' + 'novo.csv', sep=',', encoding='utf-8', index= False)
os.remove(arquivo)
os.rename(os.getcwd() + '/tabelas/' + 'novo.csv', arquivo)
else:
valor = valores[indice]
valor = valor.replace(',','.')
planilha.at[indice, coluna] = float(valor)
indice += 1
planilha = planilha.round(2)
planilha.to_csv(os.getcwd() + '/tabelas/' + 'novo.csv', sep=',', encoding='utf-8', index= False)
os.remove(arquivo)
os.rename(os.getcwd() + '/tabelas/' + 'novo.csv', arquivo)
def atualizar_planilha(arquivo, nome_novo):
planilha = pd.read_csv(arquivo)
planilha = pd.DataFrame(planilha)
planilha.to_csv(nome_novo, sep=',', encoding='utf-8', index= False)
planilha = pd.read_csv(nome_novo)
| pd.DataFrame(planilha) | pandas.DataFrame |
import os.path
import datetime
import numpy as np
import pandas
import pytest
import astropy.units as u
from astropy.time import TimeDelta
from sunpy import timeseries
from sunpy.data.test import rootdir
from sunpy.instr import lyra
from sunpy.time import is_time_equal, parse_time
# Define location for test LYTAF database files
TEST_DATA_PATH = rootdir
# Define some test data for test_remove_lytaf_events()
TIME = parse_time(np.array([datetime.datetime(2013, 2, 1) + datetime.timedelta(minutes=i)
for i in range(120)]))
CHANNELS = [np.zeros(len(TIME)) + 0.4, np.zeros(len(TIME)) + 0.1]
EMPTY_LYTAF = np.empty((0,), dtype=[("insertion_time", object),
("begin_time", object),
("reference_time", object),
("end_time", object),
("event_type", object),
("event_definition", object)])
LYTAF_TEST = np.append(
EMPTY_LYTAF,
np.array([(parse_time(datetime.datetime.utcfromtimestamp(1371459961)),
parse_time(datetime.datetime.utcfromtimestamp(1359677220)),
parse_time(datetime.datetime.utcfromtimestamp(1359677250)),
parse_time(datetime.datetime.utcfromtimestamp(1359677400)),
"LAR", "Large Angle Rotation.")],
dtype=EMPTY_LYTAF.dtype))
LYTAF_TEST = np.append(
LYTAF_TEST,
np.array([(parse_time(datetime.datetime.utcfromtimestamp(1371460063)),
parse_time(datetime.datetime.utcfromtimestamp(1359681764)),
parse_time(datetime.datetime.utcfromtimestamp(1359682450)),
parse_time(datetime.datetime.utcfromtimestamp(1359683136)),
"UV occ.", "Occultation in the UV spectrum.")],
dtype=LYTAF_TEST.dtype))
@pytest.mark.remote_data
def test_split_series_using_lytaf():
"""
test the downloading of the LYTAF file and subsequent queries.
"""
# test split_series_using_lytaf
# construct a dummy signal for testing purposes
basetime = parse_time('2010-06-13 02:00')
seconds = 3600
dummy_time = [basetime + TimeDelta(s*u.second) for s in range(seconds)]
dummy_data = np.random.random(seconds)
lytaf_tmp = lyra.get_lytaf_events('2010-06-13 02:00', '2010-06-13 06:00',
combine_files=["ppt"])
split = lyra.split_series_using_lytaf(dummy_time, dummy_data, lytaf_tmp)
assert type(split) == list
assert len(split) == 4
assert is_time_equal(split[0]['subtimes'][0], parse_time((2010, 6, 13, 2, 0)))
assert is_time_equal(split[0]['subtimes'][-1], parse_time((2010, 6, 13, 2, 7, 2)))
assert is_time_equal(split[3]['subtimes'][0], parse_time((2010, 6, 13, 2, 59, 42)))
assert is_time_equal(split[3]['subtimes'][-1], parse_time((2010, 6, 13, 2, 59, 58)))
# Test case when no LYTAF events found in time series.
split_no_lytaf = lyra.split_series_using_lytaf(dummy_time,
dummy_data, LYTAF_TEST)
assert type(split_no_lytaf) == list
assert type(split_no_lytaf[0]) == dict
assert not set(split_no_lytaf[0].keys()).symmetric_difference({'subtimes', 'subdata'})
assert split_no_lytaf[0]["subtimes"] == dummy_time
assert split_no_lytaf[0]["subdata"].all() == dummy_data.all()
@pytest.fixture
def lyra_ts():
# Create sample TimeSeries
lyrats = timeseries.TimeSeries(
os.path.join(rootdir, 'lyra_20150101-000000_lev3_std_truncated.fits.gz'),
source='LYRA')
lyrats.data = pandas.DataFrame(index=TIME,
data={"CHANNEL1": CHANNELS[0],
"CHANNEL2": CHANNELS[1],
"CHANNEL3": CHANNELS[0],
"CHANNEL4": CHANNELS[1]})
return lyrats
@pytest.mark.remote_data
def test_remove_lytaf_events_from_timeseries(lyra_ts):
"""
Test if artifact are correctly removed from a TimeSeries.
"""
# Check correct errors are raised due to bad input
with pytest.raises(AttributeError):
ts_test = lyra.remove_lytaf_events_from_timeseries(
[], force_use_local_lytaf=True)
# Run remove_artifacts_from_timeseries, returning artifact
# status
ts_test, artifact_status_test = \
lyra.remove_lytaf_events_from_timeseries(
lyra_ts, artifacts=["LAR", "Offpoint"], return_artifacts=True,
force_use_local_lytaf=True)
# Generate expected data by calling _remove_lytaf_events and
# constructing expected dataframe manually.
time, channels, artifact_status_expected = lyra._remove_lytaf_events(
lyra_ts.data.index, channels=[np.asanyarray(lyra_ts.data["CHANNEL1"]),
np.asanyarray(lyra_ts.data["CHANNEL2"]),
np.asanyarray(lyra_ts.data["CHANNEL3"]),
np.asanyarray(lyra_ts.data["CHANNEL4"])],
artifacts=["LAR", "Offpoint"], return_artifacts=True,
force_use_local_lytaf=True)
dataframe_expected = pandas.DataFrame(index=time,
data={"CHANNEL1": channels[0],
"CHANNEL2": channels[1],
"CHANNEL3": channels[2],
"CHANNEL4": channels[3]})
# Assert expected result is returned
| pandas.testing.assert_frame_equal(ts_test.data, dataframe_expected) | pandas.testing.assert_frame_equal |
import pandas as pd
import os
inpath = "C:\\Users\\abhij\\OneDrive\\Desktop\\Personal folder\\Academics\\Internships\\Harvard internship\\Files\\Case data files\\injector full\\XY plots\\turbulent kinetic energy"
outpath= "C:\\Users\\abhij\\OneDrive\\Desktop\\"
dirs = os.listdir(inpath)
string = "xy/key/label"
for i in dirs:
current = inpath
listdfs = []
indirs = os.listdir(current)
for j in indirs:
f = open(current +"\\" + j , "r")
k = f.readlines()
print(k)
title = k[1].replace("Series 1 at /LINE:","")
data = k[5:-2]
for d in range(len(data)):
data[d] = data[d].replace("\n","")
data[d] = data[d].replace("\t",",")
data[d] = data[d].split(',')
title = title.replace('"',"")
data.insert(0,["title", title])
df = | pd.DataFrame(data) | pandas.DataFrame |
import glob
import pandas as pd
import os, sys, re
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
#%%
"""
Accumulates 1) unique genes 2) unique sites counts for all samples. Plots the output.
"""
headers = ['#SeqID', 'refPos', 'strand', 'Base', 'cov', 'C_count','methRate']
methRate_counts = {}
methRate_counts_filtered = {}
gene_counts = {}
sites_per_gene = {}
for file in glob.glob("**/*_Genome10xCall_annotate.txt", recursive=True): #*_Genome10xCall*_annotate.txt", recursive=True):
name = re.sub("_Genome10xCall_annotate.txt", "", os.path.basename(file))
print(file)
df = | pd.read_csv(file, sep='\t', low_memory=False) | pandas.read_csv |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.