prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
"""This script performs the inital prefilling of mongoDB atlas from a given data scource"""
""" https://www.pegelonline.wsv.de/webservices/rest-api/v2/stations/HAMBURG-ST.PAULI/W/measurements.json"""
import requests
import json
from config import db
import pandas as pd
###############################################################################
# FUNCTIONS
def delete_data():
'''CAREFUL: deletes all entries in collection'''
db.pegeldata.delete_many({})
def get_initial_data():
'''gets initial data from API and stores it in df'''
response = requests.get("https://www.pegelonline.wsv.de/webservices/rest-api/v2/stations/HAMBURG-ST.PAULI/W/measurements.json?start=2019-02-01T00:00%2B01:00&end=2019-02-25T16:00%2B01:00")
json_data = json.loads(response.text)
df = pd.DataFrame(json_data)
# df.index = pd.to_datetime(df.index, unit='s')
df['timestamp'] = df['timestamp'].astype('datetime64[s]') + pd.DateOffset(hours=1)
df.index =
|
pd.to_datetime(df['timestamp'], unit='s')
|
pandas.to_datetime
|
from multiping import MultiPing
from pandas import DataFrame, read_pickle, datetime
from time import sleep
IPS = ["8.8.8.8", "1.1.1.1", "127.0.0.1", "192.168.1.1"]
PICKLE_PATH = "ping_stats.pkl.zip"
TIMEOUT = 10
INTERVAL_SECONDS = 1
try:
df =
|
read_pickle(PICKLE_PATH)
|
pandas.read_pickle
|
from pathlib import Path
import itertools
import pandas as pd
import numpy as np
import re
import datetime
#from utils import convert_to_int
pd.set_option('display.max_columns', None)
pd.set_option('display.width', 1000)
DATA_FOLDER = Path("data-desc")
VARIABLES = [
"Totaal",
"Ziekenhuisopname",
"Overleden"
]
def get_timeline():
df = pd.read_csv(Path("data", "rivm_NL_covid19_sex.csv"))
dates = sorted(df["Datum"].unique())
return dates
def export_date(df, data_folder, prefix, data_date=None, label=None):
if data_date:
df_date = df.loc[df["Datum"] == data_date, :]
else:
df_date = df
# export with data date
if label is not None:
export_path = Path(DATA_FOLDER, data_folder, f"{prefix}_{label}.csv")
else:
export_path = Path(DATA_FOLDER, data_folder, f"{prefix}.csv")
print(f"Export {export_path}")
df_date.to_csv(export_path, index=False)
def main_sex():
df_reported = pd.read_csv(Path("data", "rivm_NL_covid19_sex.csv"))
df_reported = df_reported.rename(columns={"Aantal": "AantalCumulatief"})
df_reported["Aantal"] = df_reported.loc[df_reported["Datum"] < '2020-07-07'] \
.groupby(['Type', 'Geslacht'], sort=True)['AantalCumulatief'] \
.transform(pd.Series.diff)
#df_reported.loc[df_reported["Datum"] == sorted(df_reported["Datum"].unique())[0], "Aantal"] = \
#df_reported.loc[df_reported["Datum"] == sorted(df_reported["Datum"].unique())[0], "AantalCumulatief"]
df_reported['Aantal'] = df_reported["Aantal"].astype(pd.Int64Dtype())
df_reported['AantalCumulatief'] = df_reported["AantalCumulatief"].astype(pd.Int64Dtype())
# format the columns
df_reported = df_reported[[
"Datum",
"Geslacht",
"Type",
"Aantal",
"AantalCumulatief"
]]
Path(DATA_FOLDER, "data-sex").mkdir(exist_ok=True)
dates = sorted(df_reported["Datum"].unique())
# export by date
for data_date in dates:
export_date(df_reported, "data-sex", "RIVM_NL_sex", data_date, str(data_date).replace("-", ""))
# export latest
export_date(df_reported, "data-sex", "RIVM_NL_sex", data_date=dates[-1], label="latest")
# export all
export_date(df_reported, "data-sex", "RIVM_NL_sex", data_date=None, label=None)
def main_age():
df_reported = pd.read_csv(Path("data", "rivm_NL_covid19_age.csv"))
df_reported = df_reported.rename(columns={"Aantal": "AantalCumulatief"})
df_reported["Aantal"] = df_reported.loc[df_reported["Datum"] < '2020-07-07'] \
.groupby(['Type', 'LeeftijdGroep'], sort=True)['AantalCumulatief'] \
.transform(pd.Series.diff)
#df_reported.loc[df_reported["Datum"] == sorted(df_reported["Datum"].unique())[0], "Aantal"] = \
#df_reported.loc[df_reported["Datum"] == sorted(df_reported["Datum"].unique())[0], "AantalCumulatief"]
df_reported['Aantal'] = df_reported["Aantal"].astype(
|
pd.Int64Dtype()
|
pandas.Int64Dtype
|
# -*- coding: utf-8 -*-
"""
Created on Mon Feb 3 10:50:05 2020
@author: obazgir
"""
import csv
import numpy as np
import pandas as pd
import os
import scipy as sp
from scipy.stats import pearsonr
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestRegressor
from sklearn.svm import SVR
import cv2
import pickle
from Toolbox import NRMSE, Random_Image_Gen, two_d_norm, two_d_eq, Assign_features_to_pixels, MDS_Im_Gen, Bias_Calc, REFINED_Im_Gen
from sklearn.metrics import mean_absolute_error
##########################################
# #
# #
# Data Cleaning #
# #
##########################################
cell_lines = ["HCC_2998","MDA_MB_435", "SNB_78", "NCI_ADR_RES","DU_145", "786_0", "A498","A549_ATCC","ACHN","BT_549","CAKI_1","DLD_1","DMS_114","DMS_273","CCRF_CEM","COLO_205","EKVX"]
#cell_lines = ["HCC_2998"]
Results_Dic = {}
SAVE_PATH = "/home/obazgir/REFINED/Volumetric_REFINED/Geometric_REFINED/"
#%%
for SEL_CEL in cell_lines:
# Loading the the drug responses and their IDs (NSC)
DF = pd.read_csv("/home/obazgir/REFINED/NCI/NCI60_GI50_normalized_April.csv")
FilteredDF = DF.loc[DF.CELL==SEL_CEL] # Pulling out the selected cell line responses
FilteredDF = FilteredDF.drop_duplicates(['NSC']) # Dropping out the duplicates
Feat_DF = pd.read_csv("/home/obazgir/REFINED/NCI/normalized_padel_feats_NCI60_672.csv") # Load the drug descriptors of the drugs applied on the selected cell line
Cell_Features = Feat_DF[Feat_DF.NSC.isin(FilteredDF.NSC)]
TargetDF = FilteredDF[FilteredDF.NSC.isin(Cell_Features.NSC)]
Y = np.array(TargetDF.NORMLOG50)
# Features
X = Cell_Features.values
X = X[:,2:]
# fix random seed for reproducibility
seed = 10
np.random.seed(seed)
# split training, validation and test sets based on each sample NSC ID
NSC_All = np.array(TargetDF['NSC'],dtype = int)
Train_Ind, Rest_Ind, Y_Train, Y_Rest = train_test_split(NSC_All, Y, test_size= 0.2, random_state=seed)
Validation_Ind, Test_Ind, Y_Validation, Y_Test = train_test_split(Rest_Ind, Y_Rest, test_size= 0.5, random_state=seed)
# Sort the NSCs
Train_Ind = np.sort(Train_Ind)
Validation_Ind = np.sort(Validation_Ind)
Test_Ind = np.sort(Test_Ind)
# Extracting the drug descriptors of each set based on their associated NSCs
X_Train_Raw = Cell_Features[Cell_Features.NSC.isin(Train_Ind)]
X_Validation_Raw = Cell_Features[Cell_Features.NSC.isin(Validation_Ind)]
X_Test_Raw = Cell_Features[Cell_Features.NSC.isin(Test_Ind)]
Y_Train = TargetDF[TargetDF.NSC.isin(Train_Ind)]; Y_Train = np.array(Y_Train['NORMLOG50'])
Y_Validation = TargetDF[TargetDF.NSC.isin(Validation_Ind)]; Y_Validation = np.array(Y_Validation['NORMLOG50'])
Y_Test = TargetDF[TargetDF.NSC.isin(Test_Ind)]; Y_Test = np.array(Y_Test['NORMLOG50'])
X_Dummy = X_Train_Raw.values; X_Train = X_Dummy[:,2:]
X_Dummy = X_Validation_Raw.values; X_Validation = X_Dummy[:,2:]
X_Dummy = X_Test_Raw.values; X_Test = X_Dummy[:,2:]
Y_Val_Save = np.zeros(((len(Y_Validation)),6))
Y_Val_Save[:,0] = Y_Validation
Y_Test_Save = np.zeros(((len(Y_Test)),6))
Y_Test_Save[:,0] = Y_Test
#%% REFINED coordinates
# LE
import math
with open('/home/obazgir/REFINED/Volumetric_REFINED/theMapping_Init_Geo5.pickle','rb') as file:
gene_names_Geo,coords_Geo,map_in_int_Geo = pickle.load(file)
#%% importing tensorflow
import tensorflow as tf
from tensorflow.keras import layers, models
from tensorflow.keras.callbacks import EarlyStopping
Model_Names = ["Geometric"]
Results_Data = np.zeros((1,5))
nn = 26
cnt = 0 # Image size = sqrt(#features (drug descriptors))
for modell in Model_Names:
X_Train_REFINED = REFINED_Im_Gen(X_Train,nn, map_in_int_Geo, gene_names_Geo,coords_Geo)
X_Val_REFINED = REFINED_Im_Gen(X_Validation,nn, map_in_int_Geo, gene_names_Geo,coords_Geo)
X_Test_REFINED = REFINED_Im_Gen(X_Test,nn, map_in_int_Geo, gene_names_Geo,coords_Geo)
#%% Defining the CNN Model
sz = X_Train_REFINED.shape
Width = int(math.sqrt(sz[1]))
Height = int(math.sqrt(sz[1]))
CNN_Train = X_Train_REFINED.reshape(-1,Width,Height,1)
CNN_Val = X_Val_REFINED.reshape(-1,Width,Height,1)
CNN_Test = X_Test_REFINED.reshape(-1,Width,Height,1)
def CNN_model(Width,Height,):
nb_filters = 64
nb_conv = 5
model = models.Sequential()
# Convlolutional layers
model.add(layers.Conv2D(35, kernel_size = (nb_conv, nb_conv),padding='valid',strides=2,dilation_rate=1,input_shape=(Width, Height,1)))
model.add(layers.BatchNormalization())
model.add(layers.Activation('relu'))
model.add(layers.Conv2D(117, kernel_size = (nb_conv, nb_conv),padding='valid',strides=2,dilation_rate=1))
model.add(layers.BatchNormalization())
model.add(layers.Activation('relu'))
model.add(layers.Flatten())
model.add(layers.Dense(540))
model.add(layers.BatchNormalization())
model.add(layers.Activation('relu'))
model.add(layers.Dropout(1-0.7))
model.add(layers.Dense(20))
model.add(layers.BatchNormalization())
model.add(layers.Activation('relu'))
model.add(layers.Dropout(1-0.7))
model.add(layers.Dense(1))
initial_learning_rate = 0.0009373467452368672
lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay(
initial_learning_rate,
decay_steps=497753,
decay_rate=0.7331280469176514,
staircase=True)
model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=lr_schedule),
loss='mse',
metrics=['mse'])
#opt = tf.keras.optimizers.Adam(lr=0.0001)
#model.compile(loss='mse', optimizer = opt)
return model
# Training the CNN Model
model = CNN_model(Width,Height)
ES = EarlyStopping(monitor='val_loss', mode='min', verbose=0, patience=50)
CNN_History = model.fit(CNN_Train, Y_Train, batch_size= 128, epochs = 250, verbose=0, validation_data=(CNN_Val, Y_Validation), callbacks = [ES])
Y_Val_Pred_CNN = model.predict(CNN_Val, batch_size= 128, verbose=0)
Y_Pred_CNN = model.predict(CNN_Test, batch_size= 128, verbose=0)
Y_Val_Save[:,cnt+1] = Y_Val_Pred_CNN.reshape(-1)
Y_Test_Save[:,cnt+1] = Y_Pred_CNN.reshape(-1)
#print(model.summary())
# Plot the Model
# plt.plot(CNN_History.history['loss'], label='train')
# plt.plot(CNN_History.history['val_loss'], label='Validation')
# plt.legend()
# plt.show()
# Measuring the REFINED-CNN performance (NRMSE, R2, PCC, Bias)
CNN_NRMSE, CNN_R2 = NRMSE(Y_Test, Y_Pred_CNN)
MAE = mean_absolute_error(Y_Test,Y_Pred_CNN)
print(CNN_NRMSE,"NRMSE of "+ modell + SEL_CEL)
print(CNN_R2,"R2 of " + modell + SEL_CEL)
Y_Test = np.reshape(Y_Test, (Y_Pred_CNN.shape))
CNN_ER = Y_Test - Y_Pred_CNN
CNN_PCC, p_value = pearsonr(Y_Test, Y_Pred_CNN)
print(CNN_PCC,"PCC of " + modell+ SEL_CEL)
Y_Validation = Y_Validation.reshape(len(Y_Validation),1)
Y_Test = Y_Test.reshape(len(Y_Test),1)
Bias = Bias_Calc(Y_Test, Y_Pred_CNN)
Results_Data[0,:] = [CNN_NRMSE,MAE,CNN_PCC,CNN_R2,Bias]
cnt +=1
Results = pd.DataFrame(data = Results_Data , columns = ["NRMSE","MAE","PCC","R2","Bias"], index = Model_Names)
Y_Val_Save_PD = pd.DataFrame(data = Y_Val_Save , columns = ["Y_Val","MDS","LE","LLE","ISO","VOL"])
Y_Test_Save_PD =
|
pd.DataFrame(data = Y_Test_Save , columns = ["Y_Val","MDS","LE","LLE","ISO","VOL"])
|
pandas.DataFrame
|
import numpy as np
import pandas as pd
import matplotlib
from matplotlib import rcParams
import matplotlib.pyplot as plt
from numpy.fft import rfftfreq
try:
from accelerate.mkl.fftpack import rfft
except ImportError:
from numpy.fft import rfft
rcParams.update({'figure.autolayout': True})
def preprocess(df, t_range=None):
"""
Take a NOAA dataframe and do a little cleanup.
Parameters
----------
df : dataframe
Source data. Expected to be read from NOAA csv.
t_range : list
Min and max values to clip temperatures.
Returns
-------
df : dataframe
Cleaned dataframe.
stations : list
List of station names included in dataframe.
"""
if t_range is None:
t_range = [-20, 120]
stations = sorted(set(df['STATION_NAME']))
df['DATE_fmt'] =
|
pd.to_datetime(df['DATE'], format='%Y%m%d')
|
pandas.to_datetime
|
#!/usr/bin/env python
# coding: utf-8
"""
In this script, the results of the friction tests are visualised.
All visualisations are stored in /figures/
"""
__author__ = "<NAME>"
__copyright__ = "Copyright 2021, TU Delft Biomechanical Design"
__credits__ = ["<NAME>, <NAME>, <NAME>"]
__license__ = "CC0-1.0 License"
__version__ = "1.0.0"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
# Imports
import math
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from statistics import mean
# Global variables
# The diameter (in m) of the pneumatic cylinder
d_25 = 25 / 1000
# The radius (in m) of the pneumatic cylinder
r_25 = d_25 / 2
# The surface area (in m^2) of the pneumatic cylinder
area = math.pi * r_25**2
# The diameter (in m) of the pneumatic cylinder for the X-ring and corresponding O-ring
d_257 = 25.7 / 1000
# The radius (in m) of the pneumatic cylinder for the X-ring and corresponding O-ring
r_257 = d_257 / 2
# The surface area (in m^2) of the pneumatic cylinder for the X-ring and corresponding O-ring
large_area = math.pi * r_257**2
# The models with different sealing mechanism used in this test
rings = ['O-ring','NAPN','NAP310','PK','KDN','O-ring257','X-ring257']
# The models with different cross-sectional shape used in this test
shapes = ['Circle','Stadium','Kidney','Stadium_lc','Kidney_lc']
# Remove first 15 data points to avoid deviating starting values
drop_amount = 15
# # Friction force test
# Define a dictionary to store all data from the friction force tests
# For each model all variables are stored in this nested dictionary
friction_force = {}
# For each ring type
for ring in rings:
friction_force[ring] = {}
for bar in [1,3,5,7]:
# Load the data of the corresponding results in .CSV and drop unncessary columns
ring_df = pd.read_csv(f'./data/friction/{ring}_{bar}bar.csv',delimiter='\s+',header=None,names=(['Time','A','B','C','Laser(mm)','Pressure(bar)','Force(N)']))
ring_df.drop(columns=['A','B','C'],index=ring_df.index[range(drop_amount)],axis=1,inplace=True)
# Store the data in our larger dictionary
friction_force[ring][bar] = {}
# Set the time (in s) and laser (in mm)
friction_force[ring][bar]['Time'] = ring_df['Time']/1000
friction_force[ring][bar]['Laser(mm)'] = ring_df['Laser(mm)']
# Set the pressure (in MPa) and force (in N)
friction_force[ring][bar]['Pressure(bar)'] = ring_df['Pressure(bar)']/10
friction_force[ring][bar]['Force(N)'] = ring_df['Force(N)']
# Calculate force Fp based on the measured pressure (see equation 2 in the report)
# The 25.7 mm rings have a different and larger surface area
if '257' in ring:
Fp = ring_df['Pressure(bar)'] * 10**5 * large_area
else:
Fp = ring_df['Pressure(bar)'] * 10**5 * area
# Calculate the friction force by substracting the measured force with Fp (see equation 3 in the report)
FF = ring_df['Force(N)'] - Fp
friction_force[ring][bar]['FrictionForce'] = FF
friction_force[ring][bar]['FrictionFrom'] = FF[FF>FF.mean()].mean()
friction_force[ring][bar]['FrictionTo'] = FF[FF<FF.mean()].mean()
# For each shape type
for shape in shapes:
friction_force[shape] = {}
for bar in [1,2,3,4,5,6,7]:
# Some shapes extrude at higher pressure, no data is available for them
if bar > 3 and shape not in ['Stadium_lc','Kidney_lc','Kidney', 'Circle']:
break
if bar > 4 and shape not in ['Stadium_lc', 'Kidney_lc', 'Circle']:
break
if bar > 5 and shape not in ['Kidney_lc', 'Circle']:
break
# Load the data of the corresponding results in .CSV and drop unncessary columns
shape_df = pd.read_csv(f'./data/friction/{shape}_{bar}bar.csv',delimiter='\s+',header=None,names=(['Time','A','B','C','Laser(mm)','Pressure(bar)','Force(N)']))
shape_df.drop(columns=['A','B','C'],index=shape_df.index[range(drop_amount)],axis=1,inplace=True)
# Store the data in our larger dictionary
friction_force[shape][bar] = {}
# Set the time (in s) and laser (in mm)
friction_force[shape][bar]['Time'] = shape_df['Time']/1000
friction_force[shape][bar]['Laser(mm)'] = shape_df['Laser(mm)']
# Set the pressure (in MPa) and force (in N)
friction_force[shape][bar]['Pressure(bar)'] = shape_df['Pressure(bar)']/10
friction_force[shape][bar]['Force(N)'] = shape_df['Force(N)']
# Calculate force Fp based on the measured pressure (see equation 2 in the report)
Fp = shape_df['Pressure(bar)'] * 10**5 * area
# Calculate the friction force by substracting the measured force with Fp (see equation 3 in the report)
FF = shape_df['Force(N)'] - Fp
friction_force[shape][bar]['FrictionForce'] = FF
friction_force[shape][bar]['FrictionFrom'] = FF[FF>FF.mean()].mean()
friction_force[shape][bar]['FrictionTo'] = FF[FF<FF.mean()].mean()
# #### Friction force range definement plot - visual for in methodology
plt.annotate(text='',xy=(12,friction_force['O-ring'][1]['FrictionFrom']), xytext=(12,friction_force['O-ring'][1]['FrictionTo']), arrowprops=dict(arrowstyle='<->', lw=2))
plt.hlines(xmin=0, xmax=70,y=friction_force['O-ring'][1]['FrictionFrom'], linestyles='dashed', colors='0', lw=2)
plt.hlines(xmin=0, xmax=70,y=friction_force['O-ring'][1]['FrictionTo'], linestyles='dashed', colors='0', lw=2)
plt.plot(friction_force['O-ring'][1]['Time'],friction_force['O-ring'][1]['FrictionForce'],'tab:blue',label='O-ring')
plt.plot(friction_force['NAPN'][1]['Time'],friction_force['NAPN'][1]['FrictionForce'],'tab:orange',alpha=0.25,label='NAPN')
plt.plot(friction_force['NAP310'][1]['Time'],friction_force['NAP310'][1]['FrictionForce'],'tab:green',alpha=0.25,label='NAP 330')
plt.plot(friction_force['PK'][1]['Time'],friction_force['PK'][1]['FrictionForce'],'tab:red',alpha=0.25,label='PK')
plt.plot(friction_force['KDN'][1]['Time'],friction_force['KDN'][1]['FrictionForce'],'tab:purple', alpha=0.25,label='KDN')
plt.xlim([5,15])
plt.xlabel('Time (s)')
plt.ylabel('Force (N)')
plt.legend(loc='lower center',bbox_to_anchor=(0.5,-0.3),ncol=5)
plt.savefig('./figures/method_frictionforce_1bar_zoom.pdf',bbox_inches = 'tight')
plt.clf()
# #### Standard deviation & Standard error
# Function to calculate standard error for a specific test
def calculate_se(friction_force,model,bar):
# Calculate the mean to define retracting and extending parts
frictionforce_mean = friction_force[model][bar]['FrictionForce'].mean()
# Variable to store the friction force
frictionforce = list(friction_force[model][bar]['FrictionForce'])
# Variables for results and counter
frictionforce_se_means = []
i = 0
# Loop through the data and break them up into separate tests
while i < len(frictionforce) - 1:
# Lists for retracting and extending parts of a single test
retracting = []
extending = []
# First the retracting part of a test is done
# Get all values above the mean
while len(retracting) < 100 or frictionforce[i] > frictionforce_mean:
retracting.append(frictionforce[i])
i += 1
# Break if it gets below the mean
if i > len(frictionforce) - 1:
break
# Secondly the extending part of a test is done
# Get all values below the mean
while len(extending) < 100 or frictionforce[i] < frictionforce_mean:
extending.append(frictionforce[i])
i += 1
# Break if it gets above the mean
if i > len(frictionforce) - 1:
break
# The friction force range is defined as the difference between the mean friction force of the retracting and extending strokes
frictionforce_se_means.append(mean(retracting)-mean(extending))
# Standard error is calculated by the standard deviation of the means
# Also return the mean of the friction force ranges across the tests
# Finally return the last test to determine the standard deviation of one extending and retracting stroke
return mean(frictionforce_se_means),np.std(frictionforce_se_means),extending,retracting
# For each model use the calculate_se() function to acquire the friction force range and the standard error
# Additionally for each of the rings and shapes the standard deviation of a single test is saved
std_single_test_rings = pd.DataFrame(columns=['Bar']+rings)
std_single_test_rings = std_single_test_rings.set_index('Bar')
for ring in rings:
for bar in [1,3,5,7]:
mean_ff,se_ff,extending,retracting = calculate_se(friction_force,ring,bar)
friction_force[ring][bar]['SE_FrictionForce'] = se_ff
friction_force[ring][bar]['Mean_FrictionForce'] = mean_ff
# For each retracting and extending test, check if the index already exists
if str(bar)+'_bar_retracting' not in list(std_single_test_rings.index):
std_single_test_rings = std_single_test_rings.append(pd.Series(name= str(bar)+'_bar_retracting'))
if str(bar)+'_bar_extending' not in list(std_single_test_rings.index):
std_single_test_rings = std_single_test_rings.append(pd.Series(name= str(bar)+'_bar_extending'))
# For each individual test save the average and standard deviation
std_single_test_rings.loc[str(bar)+'_bar_retracting'][ring] = f'{str(round(mean(retracting),2))} $\pm$ {round(np.std(retracting),2)}'
std_single_test_rings.loc[str(bar)+'_bar_extending'][ring] = f'{str(round(mean(extending),2))} $\pm$ {round(np.std(extending),2)}'
# Again define a dataframe to store the standard deviations of each single test
std_single_test_shapes = pd.DataFrame(columns=['Bar']+shapes)
std_single_test_shapes = std_single_test_shapes.set_index('Bar')
for shape in shapes:
for bar in [1,2,3,4,5,6,7]:
try:
mean_ff,se_ff,extending,retracting = calculate_se(friction_force,shape,bar)
friction_force[shape][bar]['SE_FrictionForce'] = se_ff
friction_force[shape][bar]['Mean_FrictionForce'] = mean_ff
# For each retracting and extending test, check if the index already exists
if str(bar)+'_bar_retracting' not in list(std_single_test_shapes.index):
std_single_test_shapes = std_single_test_shapes.append(pd.Series(name= str(bar)+'_bar_retracting'))
if str(bar)+'_bar_extending' not in list(std_single_test_shapes.index):
std_single_test_shapes = std_single_test_shapes.append(pd.Series(name= str(bar)+'_bar_extending'))
# For each test save the average and standard deviation
std_single_test_shapes.loc[str(bar)+'_bar_retracting'][shape] = f'{str(round(mean(retracting),2))} $\pm$ {round(np.std(retracting),2)}'
std_single_test_shapes.loc[str(bar)+'_bar_extending'][shape] = f'{str(round(mean(extending),2))} $\pm$ {round(np.std(extending),2)}'
except Exception as e:
print(f'No data for {shape} - {e} bar due to extrusion of the O-ring')
print(std_single_test_rings)
# print(std_single_test_rings.to_latex(escape=False))
print(std_single_test_shapes)
# print(std_single_test_shapes.to_latex(escape=False))
# #### Friction force range plot 25mm
# Variables to make plotting of friction force range with standard error more clear
fr = {'Pressure': [.1,.3,.5,.7],
'O_ring': [friction_force['O-ring'][i]['Mean_FrictionForce'] for i in friction_force['O-ring']],
'NAPN': [friction_force['NAPN'][i]['Mean_FrictionForce'] for i in friction_force['NAPN']],
'NAP310': [friction_force['NAP310'][i]['Mean_FrictionForce'] for i in friction_force['NAP310']],
'PK': [friction_force['PK'][i]['Mean_FrictionForce'] for i in friction_force['PK']],
'KDN': [friction_force['KDN'][i]['Mean_FrictionForce'] for i in friction_force['KDN']],
'O_ring257': [friction_force['O-ring257'][i]['Mean_FrictionForce'] for i in friction_force['O-ring257']],
'X_ring257': [friction_force['X-ring257'][i]['Mean_FrictionForce'] for i in friction_force['X-ring257']],
}
fr = pd.DataFrame(data=fr)
se = {'Pressure': [.1,.3,.5,.7],
'O_ring': [friction_force['O-ring'][i]['SE_FrictionForce'] for i in friction_force['O-ring']],
'NAPN': [friction_force['NAPN'][i]['SE_FrictionForce'] for i in friction_force['NAPN']],
'NAP310': [friction_force['NAP310'][i]['SE_FrictionForce'] for i in friction_force['NAP310']],
'PK': [friction_force['PK'][i]['SE_FrictionForce'] for i in friction_force['PK']],
'KDN': [friction_force['KDN'][i]['SE_FrictionForce'] for i in friction_force['KDN']],
'O_ring257': [friction_force['O-ring257'][i]['SE_FrictionForce'] for i in friction_force['O-ring257']],
'X_ring257': [friction_force['X-ring257'][i]['SE_FrictionForce'] for i in friction_force['X-ring257']],
}
se = pd.DataFrame(data=se)
# Visualize the friction force range - 25 mm cylinder
plt.errorbar(fr.Pressure,fr.O_ring257,se.O_ring257,color='tab:blue',alpha=0.25, linestyle='dotted',linewidth=2,capsize=2)
plt.errorbar(fr.Pressure,fr.X_ring257,se.X_ring257,color='tab:brown',alpha=0.25,linestyle=(0,(5,2,2)),capsize=2)
plt.errorbar(fr.Pressure,fr.O_ring,se.O_ring,color='tab:blue',label='O-ring', linestyle='dotted',linewidth=2,capsize=2)
plt.errorbar(fr.Pressure,fr.NAPN,se.NAPN,color='tab:orange',label='NAPN',linestyle='dashdot',capsize=2)
plt.errorbar(fr.Pressure,fr.NAP310,se.NAP310,color='tab:green',label='NAP310', linestyle=(0,(5,2,2)),capsize=2)
plt.errorbar(fr.Pressure,fr.PK,se.PK,color='tab:red',label='PK',linestyle='dashed',capsize=2)
plt.errorbar(fr.Pressure,fr.KDN,se.KDN,color='tab:purple',label='KDN',linewidth=1,capsize=2)
plt.xlabel('Pressure (MPa)')
plt.ylabel('Dynamic friction force range (N)')
plt.legend()
plt.savefig('./figures/result_frictionforcerange_25mm.pdf',bbox_inches = 'tight')
plt.clf()
# #### Friction force range plot 25.7mm
# Visualize the friction force range - 25.7 mm cylinder
plt.errorbar(fr.Pressure,fr.O_ring,se.O_ring,color='tab:blue',alpha=0.25, linestyle='dotted',linewidth=2,capsize=2)
plt.errorbar(fr.Pressure,fr.NAPN,se.NAPN,color='tab:orange',alpha=0.25,linestyle='dashdot',capsize=2)
plt.errorbar(fr.Pressure,fr.NAP310,se.NAP310,color='tab:green',alpha=0.25, linestyle=(0,(5,2,2)),capsize=2)
plt.errorbar(fr.Pressure,fr.PK,se.PK,color='tab:red',alpha=0.25,linestyle='dashed',capsize=2)
plt.errorbar(fr.Pressure,fr.KDN,se.KDN,color='tab:purple',alpha=0.25,linewidth=1,capsize=2)
plt.errorbar(fr.Pressure,fr.O_ring257,se.O_ring257,color='tab:blue',label='O-ring', linestyle='dotted',linewidth=2,capsize=2)
plt.errorbar(fr.Pressure,fr.X_ring257,se.X_ring257,color='tab:brown',label='X-ring',linestyle=(0,(5,2,2)),capsize=2)
plt.xlabel('Pressure (MPa)')
plt.ylabel('Dynamic friction force range (N)')
plt.legend()
plt.savefig('./figures/result_frictionforcerange_257mm.pdf',bbox_inches = 'tight')
plt.clf()
# #### Friction force range plot different shapes
# Again variables to make plotting of friction force range with standard error more clear
fr_s = {'Pressure': [.1,.2,.3],
'Stadium': [friction_force['Stadium'][i]['Mean_FrictionForce'] for i in friction_force['Stadium']],
}
fr_s = pd.DataFrame(data=fr_s)
se_s = {'Pressure': [.1,.2,.3],
'Stadium': [friction_force['Stadium'][i]['SE_FrictionForce'] for i in friction_force['Stadium']],
}
se_s = pd.DataFrame(data=se_s)
fr_ck = {'Pressure': [.1,.2,.3,.4],
'Circle': [friction_force['Circle'][i]['Mean_FrictionForce'] for i in friction_force['Circle']][:4],
'Kidney': [friction_force['Kidney'][i]['Mean_FrictionForce'] for i in friction_force['Kidney']],
}
fr_ck = pd.DataFrame(data=fr_ck)
se_ck = {'Pressure': [.1,.2,.3,.4],
'Circle': [friction_force['Circle'][i]['SE_FrictionForce'] for i in friction_force['Circle']][:4],
'Kidney': [friction_force['Kidney'][i]['SE_FrictionForce'] for i in friction_force['Kidney']],
}
se_ck =
|
pd.DataFrame(data=se_ck)
|
pandas.DataFrame
|
""":func:`~pandas.eval` parsers
"""
import ast
import operator
import sys
import inspect
import tokenize
import datetime
import struct
from functools import partial
import pandas as pd
from pandas import compat
from pandas.compat import StringIO, zip, reduce, string_types
from pandas.core.base import StringMixin
from pandas.core import common as com
from pandas.computation.common import NameResolutionError
from pandas.computation.ops import (_cmp_ops_syms, _bool_ops_syms,
_arith_ops_syms, _unary_ops_syms, is_term)
from pandas.computation.ops import _reductions, _mathops, _LOCAL_TAG
from pandas.computation.ops import Op, BinOp, UnaryOp, Term, Constant, Div
from pandas.computation.ops import UndefinedVariableError
def _ensure_scope(level=2, global_dict=None, local_dict=None, resolvers=None,
target=None, **kwargs):
"""Ensure that we are grabbing the correct scope."""
return Scope(gbls=global_dict, lcls=local_dict, level=level,
resolvers=resolvers, target=target)
def _check_disjoint_resolver_names(resolver_keys, local_keys, global_keys):
"""Make sure that variables in resolvers don't overlap with locals or
globals.
"""
res_locals = list(com.intersection(resolver_keys, local_keys))
if res_locals:
msg = "resolvers and locals overlap on names {0}".format(res_locals)
raise NameResolutionError(msg)
res_globals = list(com.intersection(resolver_keys, global_keys))
if res_globals:
msg = "resolvers and globals overlap on names {0}".format(res_globals)
raise NameResolutionError(msg)
def _replacer(x, pad_size):
"""Replace a number with its padded hexadecimal representation. Used to tag
temporary variables with their calling scope's id.
"""
# get the hex repr of the binary char and remove 0x and pad by pad_size
# zeros
try:
hexin = ord(x)
except TypeError:
# bytes literals masquerade as ints when iterating in py3
hexin = x
return hex(hexin).replace('0x', '').rjust(pad_size, '0')
def _raw_hex_id(obj, pad_size=2):
"""Return the padded hexadecimal id of ``obj``."""
# interpret as a pointer since that's what really what id returns
packed = struct.pack('@P', id(obj))
return ''.join(_replacer(x, pad_size) for x in packed)
class Scope(StringMixin):
"""Object to hold scope, with a few bells to deal with some custom syntax
added by pandas.
Parameters
----------
gbls : dict or None, optional, default None
lcls : dict or Scope or None, optional, default None
level : int, optional, default 1
resolvers : list-like or None, optional, default None
Attributes
----------
globals : dict
locals : dict
level : int
resolvers : tuple
resolver_keys : frozenset
"""
__slots__ = ('globals', 'locals', 'resolvers', '_global_resolvers',
'resolver_keys', '_resolver', 'level', 'ntemps', 'target')
def __init__(self, gbls=None, lcls=None, level=1, resolvers=None,
target=None):
self.level = level
self.resolvers = tuple(resolvers or [])
self.globals = dict()
self.locals = dict()
self.target = target
self.ntemps = 1 # number of temporary variables in this scope
if isinstance(lcls, Scope):
ld, lcls = lcls, dict()
self.locals.update(ld.locals.copy())
self.globals.update(ld.globals.copy())
self.resolvers += ld.resolvers
if ld.target is not None:
self.target = ld.target
self.update(ld.level)
frame = sys._getframe(level)
try:
self.globals.update(gbls or frame.f_globals)
self.locals.update(lcls or frame.f_locals)
finally:
del frame
# add some useful defaults
self.globals['Timestamp'] = pd.lib.Timestamp
self.globals['datetime'] = datetime
# SUCH a hack
self.globals['True'] = True
self.globals['False'] = False
# function defs
self.globals['list'] = list
self.globals['tuple'] = tuple
res_keys = (list(o.keys()) for o in self.resolvers)
self.resolver_keys = frozenset(reduce(operator.add, res_keys, []))
self._global_resolvers = self.resolvers + (self.locals, self.globals)
self._resolver = None
self.resolver_dict = {}
for o in self.resolvers:
self.resolver_dict.update(dict(o))
def __unicode__(self):
return com.pprint_thing(
'locals: {0}\nglobals: {0}\nresolvers: '
'{0}\ntarget: {0}'.format(list(self.locals.keys()),
list(self.globals.keys()),
list(self.resolver_keys),
self.target))
def __getitem__(self, key):
return self.resolve(key, globally=False)
def resolve(self, key, globally=False):
resolvers = self.locals, self.globals
if globally:
resolvers = self._global_resolvers
for resolver in resolvers:
try:
return resolver[key]
except KeyError:
pass
def update(self, level=None):
"""Update the current scope by going back `level` levels.
Parameters
----------
level : int or None, optional, default None
"""
# we are always 2 levels below the caller
# plus the caller may be below the env level
# in which case we need addtl levels
sl = 2
if level is not None:
sl += level
# add sl frames to the scope starting with the
# most distant and overwritting with more current
# makes sure that we can capture variable scope
frame = inspect.currentframe()
try:
frames = []
while sl >= 0:
frame = frame.f_back
sl -= 1
if frame is None:
break
frames.append(frame)
for f in frames[::-1]:
self.locals.update(f.f_locals)
self.globals.update(f.f_globals)
finally:
del frame, frames
def add_tmp(self, value, where='locals'):
"""Add a temporary variable to the scope.
Parameters
----------
value : object
An arbitrary object to be assigned to a temporary variable.
where : basestring, optional, default 'locals', {'locals', 'globals'}
What scope to add the value to.
Returns
-------
name : basestring
The name of the temporary variable created.
"""
d = getattr(self, where, None)
if d is None:
raise AttributeError("Cannot add value to non-existent scope "
"{0!r}".format(where))
if not isinstance(d, dict):
raise TypeError("Cannot add value to object of type {0!r}, "
"scope must be a dictionary"
"".format(type(d).__name__))
name = 'tmp_var_{0}_{1}_{2}'.format(type(value).__name__, self.ntemps,
_raw_hex_id(self))
d[name] = value
# only increment if the variable gets put in the scope
self.ntemps += 1
return name
def remove_tmp(self, name, where='locals'):
d = getattr(self, where, None)
if d is None:
raise AttributeError("Cannot remove value from non-existent scope "
"{0!r}".format(where))
if not isinstance(d, dict):
raise TypeError("Cannot remove value from object of type {0!r}, "
"scope must be a dictionary"
"".format(type(d).__name__))
del d[name]
self.ntemps -= 1
def _rewrite_assign(source):
"""Rewrite the assignment operator for PyTables expression that want to use
``=`` as a substitute for ``==``.
"""
res = []
g = tokenize.generate_tokens(StringIO(source).readline)
for toknum, tokval, _, _, _ in g:
res.append((toknum, '==' if tokval == '=' else tokval))
return tokenize.untokenize(res)
def _replace_booleans(source):
"""Replace ``&`` with ``and`` and ``|`` with ``or`` so that bitwise
precedence is changed to boolean precedence.
"""
return source.replace('|', ' or ').replace('&', ' and ')
def _replace_locals(source, local_symbol='@'):
"""Replace local variables with a syntacticall valid name."""
return source.replace(local_symbol, _LOCAL_TAG)
def _preparse(source):
"""Compose assignment and boolean replacement."""
return _replace_booleans(_rewrite_assign(source))
def _is_type(t):
"""Factory for a type checking function of type ``t`` or tuple of types."""
return lambda x: isinstance(x.value, t)
_is_list = _is_type(list)
_is_str = _is_type(string_types)
# partition all AST nodes
_all_nodes = frozenset(filter(lambda x: isinstance(x, type) and
issubclass(x, ast.AST),
(getattr(ast, node) for node in dir(ast))))
def _filter_nodes(superclass, all_nodes=_all_nodes):
"""Filter out AST nodes that are subclasses of ``superclass``."""
node_names = (node.__name__ for node in all_nodes
if issubclass(node, superclass))
return frozenset(node_names)
_all_node_names = frozenset(map(lambda x: x.__name__, _all_nodes))
_mod_nodes = _filter_nodes(ast.mod)
_stmt_nodes = _filter_nodes(ast.stmt)
_expr_nodes = _filter_nodes(ast.expr)
_expr_context_nodes = _filter_nodes(ast.expr_context)
_slice_nodes = _filter_nodes(ast.slice)
_boolop_nodes = _filter_nodes(ast.boolop)
_operator_nodes = _filter_nodes(ast.operator)
_unary_op_nodes = _filter_nodes(ast.unaryop)
_cmp_op_nodes = _filter_nodes(ast.cmpop)
_comprehension_nodes = _filter_nodes(ast.comprehension)
_handler_nodes = _filter_nodes(ast.excepthandler)
_arguments_nodes = _filter_nodes(ast.arguments)
_keyword_nodes = _filter_nodes(ast.keyword)
_alias_nodes = _filter_nodes(ast.alias)
# nodes that we don't support directly but are needed for parsing
_hacked_nodes = frozenset(['Assign', 'Module', 'Expr'])
_unsupported_expr_nodes = frozenset(['Yield', 'GeneratorExp', 'IfExp',
'DictComp', 'SetComp', 'Repr', 'Lambda',
'Set', 'AST', 'Is', 'IsNot'])
# these nodes are low priority or won't ever be supported (e.g., AST)
_unsupported_nodes = ((_stmt_nodes | _mod_nodes | _handler_nodes |
_arguments_nodes | _keyword_nodes | _alias_nodes |
_expr_context_nodes | _unsupported_expr_nodes) -
_hacked_nodes)
# we're adding a different assignment in some cases to be equality comparison
# and we don't want `stmt` and friends in their so get only the class whose
# names are capitalized
_base_supported_nodes = (_all_node_names - _unsupported_nodes) | _hacked_nodes
_msg = 'cannot both support and not support {0}'.format(_unsupported_nodes &
_base_supported_nodes)
assert not _unsupported_nodes & _base_supported_nodes, _msg
def _node_not_implemented(node_name, cls):
"""Return a function that raises a NotImplementedError with a passed node
name.
"""
def f(self, *args, **kwargs):
raise NotImplementedError("{0!r} nodes are not "
"implemented".format(node_name))
return f
def disallow(nodes):
"""Decorator to disallow certain nodes from parsing. Raises a
NotImplementedError instead.
Returns
-------
disallowed : callable
"""
def disallowed(cls):
cls.unsupported_nodes = ()
for node in nodes:
new_method = _node_not_implemented(node, cls)
name = 'visit_{0}'.format(node)
cls.unsupported_nodes += (name,)
setattr(cls, name, new_method)
return cls
return disallowed
def _op_maker(op_class, op_symbol):
"""Return a function to create an op class with its symbol already passed.
Returns
-------
f : callable
"""
def f(self, node, *args, **kwargs):
"""Return a partial function with an Op subclass with an operator
already passed.
Returns
-------
f : callable
"""
return partial(op_class, op_symbol, *args, **kwargs)
return f
_op_classes = {'binary': BinOp, 'unary': UnaryOp}
def add_ops(op_classes):
"""Decorator to add default implementation of ops."""
def f(cls):
for op_attr_name, op_class in compat.iteritems(op_classes):
ops = getattr(cls, '{0}_ops'.format(op_attr_name))
ops_map = getattr(cls, '{0}_op_nodes_map'.format(op_attr_name))
for op in ops:
op_node = ops_map[op]
if op_node is not None:
made_op = _op_maker(op_class, op)
setattr(cls, 'visit_{0}'.format(op_node), made_op)
return cls
return f
@disallow(_unsupported_nodes)
@add_ops(_op_classes)
class BaseExprVisitor(ast.NodeVisitor):
"""Custom ast walker. Parsers of other engines should subclass this class
if necessary.
Parameters
----------
env : Scope
engine : str
parser : str
preparser : callable
"""
const_type = Constant
term_type = Term
binary_ops = _cmp_ops_syms + _bool_ops_syms + _arith_ops_syms
binary_op_nodes = ('Gt', 'Lt', 'GtE', 'LtE', 'Eq', 'NotEq', 'In', 'NotIn',
'BitAnd', 'BitOr', 'And', 'Or', 'Add', 'Sub', 'Mult',
None, 'Pow', 'FloorDiv', 'Mod')
binary_op_nodes_map = dict(zip(binary_ops, binary_op_nodes))
unary_ops = _unary_ops_syms
unary_op_nodes = 'UAdd', 'USub', 'Invert', 'Not'
unary_op_nodes_map = dict(
|
zip(unary_ops, unary_op_nodes)
|
pandas.compat.zip
|
import pandas as pd
import numpy as np
from hinpy.classes.object_group_class import *
from time import time as TCounter
def RandomRecommender(start_object_group,end_object_group,parameters,verbose=False):
start_objects = start_object_group.GetNames()
end_objects = end_object_group.GetNames()
start_group = start_object_group.name
end_group = end_object_group.name
relation_name=''
timestamp=
|
pd.Timestamp('')
|
pandas.Timestamp
|
from datetime import date, datetime, timedelta
from dateutil import tz
import numpy as np
import pytest
import pandas as pd
from pandas import DataFrame, Index, Series, Timestamp, date_range
import pandas._testing as tm
class TestDatetimeIndex:
def test_setitem_with_datetime_tz(self):
# 16889
# support .loc with alignment and tz-aware DatetimeIndex
mask = np.array([True, False, True, False])
idx = date_range("20010101", periods=4, tz="UTC")
df = DataFrame({"a": np.arange(4)}, index=idx).astype("float64")
result = df.copy()
result.loc[mask, :] = df.loc[mask, :]
tm.assert_frame_equal(result, df)
result = df.copy()
result.loc[mask] = df.loc[mask]
tm.assert_frame_equal(result, df)
idx = date_range("20010101", periods=4)
df = DataFrame({"a": np.arange(4)}, index=idx).astype("float64")
result = df.copy()
result.loc[mask, :] = df.loc[mask, :]
tm.assert_frame_equal(result, df)
result = df.copy()
result.loc[mask] = df.loc[mask]
tm.assert_frame_equal(result, df)
def test_indexing_with_datetime_tz(self):
# GH#8260
# support datetime64 with tz
idx = Index(date_range("20130101", periods=3, tz="US/Eastern"), name="foo")
dr = date_range("20130110", periods=3)
df = DataFrame({"A": idx, "B": dr})
df["C"] = idx
df.iloc[1, 1] = pd.NaT
df.iloc[1, 2] = pd.NaT
# indexing
result = df.iloc[1]
expected = Series(
[Timestamp("2013-01-02 00:00:00-0500", tz="US/Eastern"), pd.NaT, pd.NaT],
index=list("ABC"),
dtype="object",
name=1,
)
tm.assert_series_equal(result, expected)
result = df.loc[1]
expected = Series(
[Timestamp("2013-01-02 00:00:00-0500", tz="US/Eastern"), pd.NaT, pd.NaT],
index=list("ABC"),
dtype="object",
name=1,
)
tm.assert_series_equal(result, expected)
# indexing - fast_xs
df = DataFrame({"a": date_range("2014-01-01", periods=10, tz="UTC")})
result = df.iloc[5]
expected = Series(
[Timestamp("2014-01-06 00:00:00+0000", tz="UTC")], index=["a"], name=5
)
tm.assert_series_equal(result, expected)
result = df.loc[5]
tm.assert_series_equal(result, expected)
# indexing - boolean
result = df[df.a > df.a[3]]
expected = df.iloc[4:]
tm.assert_frame_equal(result, expected)
# indexing - setting an element
df = DataFrame(
data=pd.to_datetime(["2015-03-30 20:12:32", "2015-03-12 00:11:11"]),
columns=["time"],
)
df["new_col"] = ["new", "old"]
df.time = df.set_index("time").index.tz_localize("UTC")
v = df[df.new_col == "new"].set_index("time").index.tz_convert("US/Pacific")
# trying to set a single element on a part of a different timezone
# this converts to object
df2 = df.copy()
df2.loc[df2.new_col == "new", "time"] = v
expected = Series([v[0], df.loc[1, "time"]], name="time")
tm.assert_series_equal(df2.time, expected)
v = df.loc[df.new_col == "new", "time"] + pd.Timedelta("1s")
df.loc[df.new_col == "new", "time"] = v
tm.assert_series_equal(df.loc[df.new_col == "new", "time"], v)
def test_consistency_with_tz_aware_scalar(self):
# xef gh-12938
# various ways of indexing the same tz-aware scalar
df = Series([Timestamp("2016-03-30 14:35:25", tz="Europe/Brussels")]).to_frame()
df = pd.concat([df, df]).reset_index(drop=True)
expected =
|
Timestamp("2016-03-30 14:35:25+0200", tz="Europe/Brussels")
|
pandas.Timestamp
|
"""Tests for _data_reading.py"""
import datetime
from pathlib import Path
import numpy as np
import pandas as pd
import pytest
import primap2
import primap2.pm2io as pm2io
import primap2.pm2io._conversion
from primap2.pm2io._data_reading import additional_coordinate_metadata
from .utils import assert_ds_aligned_equal
DATA_PATH = Path(__file__).parent / "data"
@pytest.mark.parametrize(
"unit, entity, expected_attrs",
[
("Mt", "CO2", {"units": "Mt", "entity": "CO2"}),
(
"Gg CO2",
"KYOTOGHG (AR4GWP100)",
{
"units": "Gg CO2",
"entity": "KYOTOGHG",
"gwp_context": "AR4GWP100",
},
),
(
"kg CO2",
"CH4 (SARGWP100)",
{
"units": "kg CO2",
"entity": "CH4",
"gwp_context": "SARGWP100",
},
),
],
)
def test_metadata_for_variable(unit, entity, expected_attrs):
assert (
pm2io._interchange_format.metadata_for_variable(unit, entity) == expected_attrs
)
def assert_attrs_equal(attrs_result, attrs_expected):
assert attrs_result.keys() == attrs_expected.keys()
assert attrs_result["attrs"] == attrs_expected["attrs"]
assert attrs_result["time_format"] == attrs_expected["time_format"]
assert attrs_result["dimensions"].keys() == attrs_expected["dimensions"].keys()
for entity in attrs_result["dimensions"]:
assert set(attrs_result["dimensions"][entity]) == set(
attrs_expected["dimensions"][entity]
)
@pytest.fixture
def coords_cols():
return {
"unit": "unit",
"entity": "gas",
"area": "country",
"category": "category",
"sec_cats__Class": "classification",
}
@pytest.fixture
def add_coords_cols():
return {"category_name": ["category_name", "category"]}
@pytest.fixture
def coords_defaults():
return {
"source": "TESTcsv2021",
"sec_cats__Type": "fugitive",
"scenario": "HISTORY",
}
@pytest.fixture
def coords_terminologies():
return {
"area": "ISO3",
"category": "IPCC2006",
"sec_cats__Type": "type",
"sec_cats__Class": "class",
"scenario": "general",
}
@pytest.fixture
def coords_value_mapping():
return {
"category": "PRIMAP1",
"entity": "PRIMAP1",
"unit": "PRIMAP1",
}
@pytest.fixture
def coords_value_filling():
return {
"category": { # col to fill
"category_name": { # col to fill from
"Energy": "1", # from value: to value
"IPPU": "2",
}
}
}
@pytest.fixture
def filter_keep():
return {
"f1": {"category": ["IPC0", "IPC2"]},
"f2": {"classification": "TOTAL"},
}
@pytest.fixture
def filter_remove():
return {"f1": {"gas": "CH4"}, "f2": {"country": ["USA", "FRA"]}}
class TestReadWideCSVFile:
def test_output(
self,
tmp_path,
coords_cols,
coords_defaults,
coords_terminologies,
coords_value_mapping,
filter_keep,
filter_remove,
):
file_input = DATA_PATH / "test_csv_data_sec_cat.csv"
file_expected = DATA_PATH / "test_read_wide_csv_file_output.csv"
df_expected = pd.read_csv(file_expected, index_col=0)
meta_data = {"references": "Just ask around."}
df_result = pm2io.read_wide_csv_file_if(
file_input,
coords_cols=coords_cols,
coords_defaults=coords_defaults,
coords_terminologies=coords_terminologies,
coords_value_mapping=coords_value_mapping,
filter_keep=filter_keep,
filter_remove=filter_remove,
meta_data=meta_data,
)
attrs_result = df_result.attrs
df_result.to_csv(tmp_path / "temp.csv")
df_result = pd.read_csv(tmp_path / "temp.csv", index_col=0)
pd.testing.assert_frame_equal(df_result, df_expected, check_column_type=False)
attrs_expected = {
"attrs": {
"references": "Just ask around.",
"sec_cats": ["Class (class)", "Type (type)"],
"scen": "scenario (general)",
"area": "area (ISO3)",
"cat": "category (IPCC2006)",
},
"time_format": "%Y",
"dimensions": {
"*": [
"entity",
"source",
"area (ISO3)",
"Type (type)",
"unit",
"scenario (general)",
"Class (class)",
"category (IPCC2006)",
]
},
}
assert_attrs_equal(attrs_result, attrs_expected)
def test_no_sec_cats(
self,
tmp_path,
coords_cols,
coords_defaults,
coords_terminologies,
coords_value_mapping,
):
file_input = DATA_PATH / "test_csv_data.csv"
file_expected = DATA_PATH / "test_read_wide_csv_file_no_sec_cats.csv"
df_expected = pd.read_csv(file_expected, index_col=0)
del coords_cols["sec_cats__Class"]
del coords_defaults["sec_cats__Type"]
del coords_terminologies["sec_cats__Class"]
del coords_terminologies["sec_cats__Type"]
df_result = pm2io.read_wide_csv_file_if(
file_input,
coords_cols=coords_cols,
coords_defaults=coords_defaults,
coords_terminologies=coords_terminologies,
coords_value_mapping=coords_value_mapping,
)
attrs_result = df_result.attrs
df_result.to_csv(tmp_path / "temp.csv")
df_result = pd.read_csv(tmp_path / "temp.csv", index_col=0)
pd.testing.assert_frame_equal(df_result, df_expected, check_column_type=False)
attrs_expected = {
"attrs": {
"scen": "scenario (general)",
"area": "area (ISO3)",
"cat": "category (IPCC2006)",
},
"time_format": "%Y",
"dimensions": {
"*": [
"entity",
"source",
"area (ISO3)",
"unit",
"scenario (general)",
"category (IPCC2006)",
]
},
}
assert_attrs_equal(attrs_result, attrs_expected)
def test_add_coords(
self,
tmp_path,
coords_cols,
add_coords_cols,
coords_defaults,
coords_terminologies,
coords_value_mapping,
):
file_input = DATA_PATH / "test_csv_data_category_name.csv"
file_expected = DATA_PATH / "test_read_wide_csv_file_no_sec_cats_cat_name.csv"
df_expected = pd.read_csv(file_expected, index_col=0)
del coords_cols["sec_cats__Class"]
del coords_defaults["sec_cats__Type"]
del coords_terminologies["sec_cats__Class"]
del coords_terminologies["sec_cats__Type"]
df_result = pm2io.read_wide_csv_file_if(
file_input,
coords_cols=coords_cols,
add_coords_cols=add_coords_cols,
coords_defaults=coords_defaults,
coords_terminologies=coords_terminologies,
coords_value_mapping=coords_value_mapping,
)
attrs_result = df_result.attrs
df_result.to_csv(tmp_path / "temp.csv")
df_result =
|
pd.read_csv(tmp_path / "temp.csv", index_col=0)
|
pandas.read_csv
|
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import wisps
import numpy as np
import matplotlib.pyplot as plt
import wisps.simulations as wispsim
import pandas as pd
from tqdm import tqdm
import seaborn as sns
from matplotlib.colors import Normalize
import astropy.units as u
import wisps.simulations.effective_numbers as eff
import seaborn as sns
import matplotlib
import popsims
import itertools
#plt.style.use('dark_background')
#get_ipython().run_line_magic('matplotlib', 'inline')
# In[2]:
wispsim.MAG_LIMITS
# In[3]:
import popsims
import splat
# In[4]:
sgrid=wispsim.SPGRID
pnts=pd.read_pickle(wisps.OUTPUT_FILES+'/pointings_correctedf110.pkl')
corr_pols=wisps.POLYNOMIAL_RELATIONS['mag_limit_corrections']
klf=pd.read_csv('/users/caganze/research/wisps/data/kirkpatricklf.txt', delimiter=',')
klf['bin_center']=np.mean(np.array([klf.t0.values, klf.tf.values]), axis=0)
klf=klf.replace(0.0,np.nan)
ucds=
|
pd.read_pickle(wisps.LIBRARIES+'/new_real_ucds.pkl')
|
pandas.read_pickle
|
import datetime
from collections import OrderedDict
import numpy as np
import pandas as pd
import pytest
from numpy.testing import assert_almost_equal, assert_allclose
from pandas.util.testing import assert_frame_equal, assert_series_equal
from pvlib.location import Location
from pvlib import clearsky
from pvlib import solarposition
from pvlib import irradiance
from pvlib import atmosphere
from conftest import (requires_ephem, requires_numba, needs_numpy_1_10,
pandas_0_22)
# setup times and location to be tested.
tus = Location(32.2, -111, 'US/Arizona', 700)
# must include night values
times = pd.date_range(start='20140624', freq='6H', periods=4, tz=tus.tz)
ephem_data = solarposition.get_solarposition(
times, tus.latitude, tus.longitude, method='nrel_numpy')
irrad_data = tus.get_clearsky(times, model='ineichen', linke_turbidity=3)
dni_et = irradiance.extraradiation(times.dayofyear)
ghi = irrad_data['ghi']
# setup for et rad test. put it here for readability
timestamp = pd.Timestamp('20161026')
dt_index = pd.DatetimeIndex([timestamp])
doy = timestamp.dayofyear
dt_date = timestamp.date()
dt_datetime = datetime.datetime.combine(dt_date, datetime.time(0))
dt_np64 = np.datetime64(dt_datetime)
value = 1383.636203
@pytest.mark.parametrize('input, expected', [
(doy, value),
(np.float64(doy), value),
(dt_date, value),
(dt_datetime, value),
(dt_np64, value),
(np.array([doy]), np.array([value])),
(pd.Series([doy]), np.array([value])),
(dt_index, pd.Series([value], index=dt_index)),
(timestamp, value)
])
@pytest.mark.parametrize('method', [
'asce', 'spencer', 'nrel', requires_ephem('pyephem')])
def test_extraradiation(input, expected, method):
out = irradiance.extraradiation(input)
assert_allclose(out, expected, atol=1)
@requires_numba
def test_extraradiation_nrel_numba():
result = irradiance.extraradiation(times, method='nrel', how='numba', numthreads=8)
assert_allclose(result, [1322.332316, 1322.296282, 1322.261205, 1322.227091])
def test_extraradiation_epoch_year():
out = irradiance.extraradiation(doy, method='nrel', epoch_year=2012)
assert_allclose(out, 1382.4926804890767, atol=0.1)
def test_extraradiation_invalid():
with pytest.raises(ValueError):
irradiance.extraradiation(300, method='invalid')
def test_grounddiffuse_simple_float():
result = irradiance.grounddiffuse(40, 900)
assert_allclose(result, 26.32000014911496)
def test_grounddiffuse_simple_series():
ground_irrad = irradiance.grounddiffuse(40, ghi)
assert ground_irrad.name == 'diffuse_ground'
def test_grounddiffuse_albedo_0():
ground_irrad = irradiance.grounddiffuse(40, ghi, albedo=0)
assert 0 == ground_irrad.all()
def test_grounddiffuse_albedo_invalid_surface():
with pytest.raises(KeyError):
irradiance.grounddiffuse(40, ghi, surface_type='invalid')
def test_grounddiffuse_albedo_surface():
result = irradiance.grounddiffuse(40, ghi, surface_type='sand')
assert_allclose(result, [0, 3.731058, 48.778813, 12.035025], atol=1e-4)
def test_isotropic_float():
result = irradiance.isotropic(40, 100)
assert_allclose(result, 88.30222215594891)
def test_isotropic_series():
result = irradiance.isotropic(40, irrad_data['dhi'])
assert_allclose(result, [0, 35.728402, 104.601328, 54.777191], atol=1e-4)
def test_klucher_series_float():
result = irradiance.klucher(40, 180, 100, 900, 20, 180)
assert_allclose(result, 88.3022221559)
def test_klucher_series():
result = irradiance.klucher(40, 180, irrad_data['dhi'], irrad_data['ghi'],
ephem_data['apparent_zenith'],
ephem_data['azimuth'])
assert_allclose(result, [0, 37.446276, 109.209347, 56.965916], atol=1e-4)
def test_haydavies():
result = irradiance.haydavies(40, 180, irrad_data['dhi'], irrad_data['dni'],
dni_et,
ephem_data['apparent_zenith'],
ephem_data['azimuth'])
assert_allclose(result, [0, 14.967008, 102.994862, 33.190865], atol=1e-4)
def test_reindl():
result = irradiance.reindl(40, 180, irrad_data['dhi'], irrad_data['dni'],
irrad_data['ghi'], dni_et,
ephem_data['apparent_zenith'],
ephem_data['azimuth'])
assert_allclose(result, [np.nan, 15.730664, 104.131724, 34.166258], atol=1e-4)
def test_king():
result = irradiance.king(40, irrad_data['dhi'], irrad_data['ghi'],
ephem_data['apparent_zenith'])
assert_allclose(result, [0, 44.629352, 115.182626, 79.719855], atol=1e-4)
def test_perez():
am = atmosphere.relativeairmass(ephem_data['apparent_zenith'])
dni = irrad_data['dni'].copy()
dni.iloc[2] = np.nan
out = irradiance.perez(40, 180, irrad_data['dhi'], dni,
dni_et, ephem_data['apparent_zenith'],
ephem_data['azimuth'], am)
expected = pd.Series(np.array(
[ 0. , 31.46046871, np.nan, 45.45539877]),
index=times)
assert_series_equal(out, expected, check_less_precise=2)
def test_perez_components():
am = atmosphere.relativeairmass(ephem_data['apparent_zenith'])
dni = irrad_data['dni'].copy()
dni.iloc[2] = np.nan
out, df_components = irradiance.perez(40, 180, irrad_data['dhi'], dni,
dni_et, ephem_data['apparent_zenith'],
ephem_data['azimuth'], am, return_components=True)
expected = pd.Series(np.array(
[ 0. , 31.46046871, np.nan, 45.45539877]),
index=times)
expected_components = pd.DataFrame(
np.array([[ 0. , 26.84138589, np.nan, 31.72696071],
[ 0. , 0. , np.nan, 4.47966439],
[ 0. , 4.62212181, np.nan, 9.25316454]]).T,
columns=['isotropic', 'circumsolar', 'horizon'],
index=times
)
if pandas_0_22():
expected_for_sum = expected.copy()
expected_for_sum.iloc[2] = 0
else:
expected_for_sum = expected
sum_components = df_components.sum(axis=1)
assert_series_equal(out, expected, check_less_precise=2)
assert_frame_equal(df_components, expected_components)
assert_series_equal(sum_components, expected_for_sum, check_less_precise=2)
@needs_numpy_1_10
def test_perez_arrays():
am = atmosphere.relativeairmass(ephem_data['apparent_zenith'])
dni = irrad_data['dni'].copy()
dni.iloc[2] = np.nan
out = irradiance.perez(40, 180, irrad_data['dhi'].values, dni.values,
dni_et, ephem_data['apparent_zenith'].values,
ephem_data['azimuth'].values, am.values)
expected = np.array(
[ 0. , 31.46046871, np.nan, 45.45539877])
assert_allclose(out, expected, atol=1e-2)
def test_liujordan():
expected = pd.DataFrame(np.
array([[863.859736967, 653.123094076, 220.65905025]]),
columns=['ghi', 'dni', 'dhi'],
index=[0])
out = irradiance.liujordan(
pd.Series([10]), pd.Series([0.5]), pd.Series([1.1]), dni_extra=1400)
assert_frame_equal(out, expected)
# klutcher (misspelling) will be removed in 0.3
def test_total_irrad():
models = ['isotropic', 'klutcher', 'klucher',
'haydavies', 'reindl', 'king', 'perez']
AM = atmosphere.relativeairmass(ephem_data['apparent_zenith'])
for model in models:
total = irradiance.total_irrad(
32, 180,
ephem_data['apparent_zenith'], ephem_data['azimuth'],
dni=irrad_data['dni'], ghi=irrad_data['ghi'],
dhi=irrad_data['dhi'],
dni_extra=dni_et, airmass=AM,
model=model,
surface_type='urban')
assert total.columns.tolist() == ['poa_global', 'poa_direct',
'poa_diffuse', 'poa_sky_diffuse',
'poa_ground_diffuse']
@pytest.mark.parametrize('model', ['isotropic', 'klucher',
'haydavies', 'reindl', 'king', 'perez'])
def test_total_irrad_scalars(model):
total = irradiance.total_irrad(
32, 180,
10, 180,
dni=1000, ghi=1100,
dhi=100,
dni_extra=1400, airmass=1,
model=model,
surface_type='urban')
assert list(total.keys()) == ['poa_global', 'poa_direct',
'poa_diffuse', 'poa_sky_diffuse',
'poa_ground_diffuse']
# test that none of the values are nan
assert np.isnan(np.array(list(total.values()))).sum() == 0
def test_globalinplane():
aoi = irradiance.aoi(40, 180, ephem_data['apparent_zenith'],
ephem_data['azimuth'])
airmass = atmosphere.relativeairmass(ephem_data['apparent_zenith'])
gr_sand = irradiance.grounddiffuse(40, ghi, surface_type='sand')
diff_perez = irradiance.perez(
40, 180, irrad_data['dhi'], irrad_data['dni'], dni_et,
ephem_data['apparent_zenith'], ephem_data['azimuth'], airmass)
irradiance.globalinplane(
aoi=aoi, dni=irrad_data['dni'], poa_sky_diffuse=diff_perez,
poa_ground_diffuse=gr_sand)
def test_disc_keys():
clearsky_data = tus.get_clearsky(times, model='ineichen',
linke_turbidity=3)
disc_data = irradiance.disc(clearsky_data['ghi'], ephem_data['zenith'],
ephem_data.index)
assert 'dni' in disc_data.columns
assert 'kt' in disc_data.columns
assert 'airmass' in disc_data.columns
def test_disc_value():
times = pd.DatetimeIndex(['2014-06-24T12-0700','2014-06-24T18-0700'])
ghi = pd.Series([1038.62, 254.53], index=times)
zenith = pd.Series([10.567, 72.469], index=times)
pressure = 93193.
disc_data = irradiance.disc(ghi, zenith, times, pressure=pressure)
assert_almost_equal(disc_data['dni'].values,
np.array([830.46, 676.09]), 1)
def test_dirint():
clearsky_data = tus.get_clearsky(times, model='ineichen',
linke_turbidity=3)
pressure = 93193.
dirint_data = irradiance.dirint(clearsky_data['ghi'], ephem_data['zenith'],
ephem_data.index, pressure=pressure)
def test_dirint_value():
times = pd.DatetimeIndex(['2014-06-24T12-0700','2014-06-24T18-0700'])
ghi = pd.Series([1038.62, 254.53], index=times)
zenith = pd.Series([10.567, 72.469], index=times)
pressure = 93193.
dirint_data = irradiance.dirint(ghi, zenith, times, pressure=pressure)
assert_almost_equal(dirint_data.values,
np.array([ 888. , 683.7]), 1)
def test_dirint_nans():
times = pd.DatetimeIndex(start='2014-06-24T12-0700', periods=5, freq='6H')
ghi = pd.Series([np.nan, 1038.62, 1038.62, 1038.62, 1038.62], index=times)
zenith = pd.Series([10.567, np.nan, 10.567, 10.567, 10.567,], index=times)
pressure = pd.Series([93193., 93193., np.nan, 93193., 93193.], index=times)
temp_dew = pd.Series([10, 10, 10, np.nan, 10], index=times)
dirint_data = irradiance.dirint(ghi, zenith, times, pressure=pressure,
temp_dew=temp_dew)
assert_almost_equal(dirint_data.values,
np.array([np.nan, np.nan, np.nan, np.nan, 893.1]), 1)
def test_dirint_tdew():
times = pd.DatetimeIndex(['2014-06-24T12-0700','2014-06-24T18-0700'])
ghi =
|
pd.Series([1038.62, 254.53], index=times)
|
pandas.Series
|
import numpy as np
import pandas as pd
import math
from hotspot import sim_data
from hotspot import local_stats
from hotspot.knn import neighbors_and_weights, make_weights_non_redundant
from hotspot import bernoulli_model
from hotspot.utils import center_values
from hotspot import local_stats_pairs
def test_local_autocorrelation_centered():
"""
Test if the expected moment calculation is correct
"""
# Simulate some data
N_CELLS = 1000
N_DIM = 10
latent = sim_data.sim_latent(N_CELLS, N_DIM)
latent = pd.DataFrame(latent)
umi_counts = sim_data.sim_umi_counts(N_CELLS, 2000, 200)
umi_counts =
|
pd.Series(umi_counts)
|
pandas.Series
|
def features_processing(df_X, target, normalization, training=True, scaler=None):
import pandas as pd
from sklearn.preprocessing import StandardScaler
df_X.loc[:, 'Sex'] = (df_X.loc[:, 'Sex'] == 'female') * 1
df_X.rename(columns={'Sex': 'Sex==female'}, inplace=True)
ethnicity_dict = {'AFRICAN': 'OTHER',
'ARAB': 'OTHER',
'CHINESE': 'OTHER',
'DUTCH': 'EUROPEAN',
'ENGLISH': 'ENGLISH',
'FRENCH': 'EUROPEAN',
'GERMAN': 'EUROPEAN',
'GREEK': 'EUROPEAN',
'HISPANIC': 'OTHER',
'INDIAN': 'OTHER',
'ISRAELI': 'OTHER',
'ITALIAN': 'EUROPEAN',
'JAPANESE': 'OTHER',
'NORDIC': 'NORDIC',
'ROMANIAN': 'EUROPEAN',
'SLAV': 'EUROPEAN',
'THAI': 'OTHER',
'TURKISH': 'OTHER',
'VIETNAMESE': 'OTHER'}
df_X.loc[:, 'Ethnicity_origin'] = (df_X.loc[:, 'Ethnicity_origin']).apply(lambda x: ethnicity_dict[x])
df_X = pd.merge(df_X, pd.get_dummies(df_X['Ethnicity_origin'], prefix='Ethnicity'),
how='left', left_index=True, right_index=True)
if not normalization:
print('Normalization is turned OFF')
embarked_dict = {'S': 1, 'C': 3, 'Q': 2}
df_X.loc[:, 'Embarked'] = (df_X.loc[:, 'Embarked']).apply(lambda x: embarked_dict[x])
df_X = df_X.drop(['Cabin',
'Ticket',
'Name_title',
'Name',
'Name_first',
'Name_last',
'Name_other',
'Ticket',
'Ticket_Series',
'Ticket_No',
'Ticket_combined',
'Ethnicity_origin',
'Ethnicity_prob'], axis=1)
print(df_X.columns)
return [scaler, df_X]
else:
print('Normalization is turned ON')
df_X = pd.merge(df_X, pd.get_dummies(df_X['Embarked'], prefix='Embarked'),
how='left', left_index=True, right_index=True)
df_X = df_X.drop(['Cabin',
'Ticket',
'Name_title',
'Name',
'Name_first',
'Name_last',
'Name_other',
'Ticket',
'Ticket_Series',
'Ticket_No',
'Ticket_combined',
'Ethnicity_origin',
'Ethnicity_prob',
'Embarked'], axis=1)
predictors = [x for x in df_X.columns if x not in [target]]
if training:
# print(df_X.shape)
scaler = StandardScaler().fit(X=df_X[predictors])
df_Y = df_X[target]
df_X_index = df_X.index
df_X_res = scaler.transform(X=df_X[predictors])
# print(df_X_res.shape)
df_X =
|
pd.DataFrame(df_X_res, columns=df_X[predictors].columns)
|
pandas.DataFrame
|
"""
Testing the ``modelchain`` module.
SPDX-FileCopyrightText: 2019 oemof developer group <<EMAIL>>
SPDX-License-Identifier: MIT
"""
import pandas as pd
import numpy as np
import pytest
from pandas.util.testing import assert_series_equal
import windpowerlib.wind_turbine as wt
import windpowerlib.modelchain as mc
class TestModelChain:
@classmethod
def setup_class(self):
"""Setup default values"""
self.test_turbine = {'hub_height': 100,
'turbine_type': 'E-126/4200',
'power_curve': pd.DataFrame(
data={'value': [0.0, 4200 * 1000],
'wind_speed': [0.0, 25.0]})}
temperature_2m = np.array([[267], [268]])
temperature_10m = np.array([[267], [266]])
pressure_0m = np.array([[101125], [101000]])
wind_speed_8m = np.array([[4.0], [5.0]])
wind_speed_10m = np.array([[5.0], [6.5]])
roughness_length = np.array([[0.15], [0.15]])
self.weather_df = pd.DataFrame(
np.hstack((temperature_2m, temperature_10m, pressure_0m,
wind_speed_8m, wind_speed_10m, roughness_length)),
index=[0, 1],
columns=[np.array(['temperature', 'temperature', 'pressure',
'wind_speed', 'wind_speed',
'roughness_length']),
np.array([2, 10, 0, 8, 10, 0])])
def test_temperature_hub(self):
# Test modelchain with temperature_model='linear_gradient'
test_mc = mc.ModelChain(wt.WindTurbine(**self.test_turbine))
# Test modelchain with temperature_model='interpolation_extrapolation'
test_mc_2 = mc.ModelChain(
wt.WindTurbine(**self.test_turbine),
temperature_model='interpolation_extrapolation')
# Parameters for tests
temperature_2m = np.array([[267], [268]])
temperature_10m = np.array([[267], [266]])
weather_df = pd.DataFrame(np.hstack((temperature_2m,
temperature_10m)),
index=[0, 1],
columns=[np.array(['temperature',
'temperature']),
np.array([2, 10])])
# temperature_10m is closer to hub height than temperature_2m
temp_exp = pd.Series(data=[266.415, 265.415], name=10)
assert_series_equal(test_mc.temperature_hub(weather_df), temp_exp)
temp_exp = pd.Series(data=[267.0, 243.5])
assert_series_equal(test_mc_2.temperature_hub(weather_df), temp_exp)
# change heights of temperatures so that old temperature_2m is now used
weather_df.columns = [np.array(['temperature', 'temperature']),
np.array([10, 200])]
temp_exp = pd.Series(data=[266.415, 267.415], name=10)
assert_series_equal(test_mc.temperature_hub(weather_df), temp_exp)
temp_exp = pd.Series(data=[267.0, 267.052632])
assert_series_equal(test_mc_2.temperature_hub(weather_df), temp_exp)
# temperature at hub height
weather_df.columns = [np.array(['temperature', 'temperature']),
np.array([100, 10])]
temp_exp = pd.Series(data=[267, 268], name=100)
assert_series_equal(test_mc.temperature_hub(weather_df), temp_exp)
def test_density_hub(self):
# Test modelchain with density_model='barometric'
test_mc = mc.ModelChain(wt.WindTurbine(**self.test_turbine))
# Test modelchain with density_model='ideal_gas'
test_mc_2 = mc.ModelChain(wt.WindTurbine(**self.test_turbine),
density_model='ideal_gas')
# Test modelchain with density_model='interpolation_extrapolation'
test_mc_3 = mc.ModelChain(wt.WindTurbine(**self.test_turbine),
density_model='interpolation_extrapolation')
# Parameters for tests
temperature_2m = np.array([[267], [268]])
temperature_10m = np.array([[267], [266]])
pressure_0m = np.array([[101125], [101000]])
weather_df = pd.DataFrame(np.hstack((temperature_2m,
temperature_10m,
pressure_0m)),
index=[0, 1],
columns=[np.array(['temperature',
'temperature',
'pressure']),
np.array([2, 10, 0])])
# temperature_10m is closer to hub height than temperature_2m
rho_exp = pd.Series(data=[1.30591, 1.30919])
assert_series_equal(test_mc.density_hub(weather_df), rho_exp)
rho_exp =
|
pd.Series(data=[1.30595575725, 1.30923554056])
|
pandas.Series
|
# Importing the libraries
import numpy as np
import pandas as pd
#import tensorflow as tf
#Data Preprocessing
# Importing the dataset
dataset_1 = pd.read_csv('2020_US_weekly_symptoms_dataset.csv') #Search Trends dataset
dataset_2 = pd.read_csv('aggregated_cc_by.csv', dtype={"test_units": "object"}) #hospitalization cases dataset
dataset_2 = dataset_2.iloc[78164:90022] #Loading rows for USA only and rows that don't have all missing values
#Cleaning the datasets
dataset_1 = dataset_1.dropna(axis='columns', how='all') #removes columns with all NaN values
dataset_1 = dataset_1.dropna(axis='rows', how='all') #removes rows with all NaN values
dataset_2 = dataset_2.dropna(axis='columns', how='all') #removes columns with all NaN values
dataset_2 = dataset_2.dropna(axis='rows', how='all') #removes rows with all NaN values
#Match time resolution
|
pd.set_option('display.max_columns', None)
|
pandas.set_option
|
import numpy as np
import pandas as pd
import math
import torch
import matplotlib.pyplot as plt
from scipy.spatial import distance
from scipy import signal
from sklearn.model_selection import train_test_split
from torch.utils.data import Dataset, DataLoader
class HumanMotion(Dataset):
def __init__(self, x, y):
self.x, self.y = x, y
def __getitem__(self, item):
return self.x[item].astype(float), self.y[item].astype(float)
def __len__(self):
return self.x.shape[0]
def generate_dataset(x, y, batch_size = 128, shuffle = True):
x_train, x_eval, y_train, y_eval = train_test_split(x, y, test_size=0.4, shuffle=False)
x_valid, x_test, y_valid, y_test = train_test_split(x_eval, y_eval, test_size=0.5,
shuffle=False)
train_loader = DataLoader(HumanMotion(x_train, y_train), num_workers=8,
batch_size=batch_size, shuffle=shuffle, drop_last=True)
valid_loader = DataLoader(HumanMotion(x_valid, y_valid), num_workers=8,
batch_size=batch_size, shuffle=shuffle, drop_last=False)
test_loader = DataLoader(HumanMotion(x_test, y_test), num_workers=8,
batch_size=batch_size, shuffle=shuffle, drop_last=False)
return train_loader, valid_loader, test_loader, x_test, y_test
# Code based on:
# https://github.com/openai/baselines/blob/master/baselines/deepq/replay_buffer.py
# Expects tuples of (state, next_state, action, reward, done)
class ReplayBuffer(object):
'''
Change the buffer to array and delete for loop.
'''
def __init__(self, max_size=1e6):
self.storage = []
self.max_size = max_size
self.ptr = 0
def get(self, idx):
return self.storage[idx]
def add(self, data):
if len(self.storage) == self.max_size:
self.storage[int(self.ptr)] = data
self.ptr = (self.ptr + 1) % self.max_size
else:
self.storage.append(data)
def add_final_reward(self, final_reward, steps, delay=0):
len_buffer = len(self.storage)
for i in range(len_buffer - steps - delay, len_buffer - delay):
item = list(self.storage[i])
item[3] += final_reward
self.storage[i] = tuple(item)
def add_specific_reward(self, reward_vec, idx_vec):
for i in range(len(idx_vec)):
time_step_num = int(idx_vec[i])
item = list(self.storage[time_step_num])
item[3] += reward_vec[i]
self.storage[time_step_num] = tuple(item)
def sample_on_policy(self, batch_size, option_buffer_size):
return self.sample_from_storage(batch_size, self.storage[-option_buffer_size:])
def sample(self, batch_size):
return self.sample_from_storage(batch_size, self.storage)
@staticmethod
def sample_from_storage(batch_size, storage):
ind = np.random.randint(0, len(storage), size=batch_size)
x, y, u, r, d, p = [], [], [], [], [], []
for i in ind:
X, Y, U, R, D, P = storage[i]
x.append(np.array(X, copy=False))
y.append(np.array(Y, copy=False))
u.append(np.array(U, copy=False))
r.append(np.array(R, copy=False))
d.append(np.array(D, copy=False))
p.append(np.array(P, copy=False))
return np.array(x), np.array(y), np.array(u), np.array(r).reshape(-1, 1), \
np.array(d).reshape(-1, 1), np.array(p).reshape(-1, 1)
# Expects tuples of (state, next_state, action, reward, done)
class ReplayBufferMat(object):
'''
Change the buffer to array and delete for loop.
'''
def __init__(self, max_size=1e6):
self.storage = []
self.max_size = max_size
self.ptr = 0
self.data_size = 0
def add(self, data):
data = list(data)
if 0 == len(self.storage):
for item in data:
self.storage.append(np.asarray(item).reshape((1, -1)))
else:
if self.storage[0].shape[0] < int(self.max_size):
for i in range(len(data)):
self.storage[i] = np.r_[self.storage[i], np.asarray(data[i]).reshape((1, -1))]
else:
for i in range(len(data)):
self.storage[i][int(self.ptr)] = np.asarray(data[i]).reshape((1, -1))
self.ptr = (self.ptr + 1) % self.max_size
self.data_size = len(self.storage[0])
def sample_on_policy(self, batch_size, option_buffer_size):
return self.sample_from_storage(
batch_size, start_idx = self.storage[0].shape[0] - option_buffer_size)
def sample(self, batch_size):
return self.sample_from_storage(batch_size)
def sample_from_storage(self, batch_size, start_idx = 0):
buffer_len = self.storage[0].shape[0]
ind = np.random.randint(start_idx, buffer_len, size=batch_size)
data_list = []
# if buffer_len > 9998:
# print(buffer_len, ind)
for i in range(len(self.storage)):
# if buffer_len > 9998:
# print('{},shape:{}'.format(i, self.storage[i].shape))
data_list.append(self.storage[i][ind])
return tuple(data_list)
def add_final_reward(self, final_reward, steps):
self.storage[3][-steps:] += final_reward
def calc_array_symmetry(array_a, array_b):
cols = array_a.shape[-1]
dist = np.zeros(cols)
for c in range(cols):
dist[c] = 1 - distance.cosine(array_a[:, c], array_b[:, c])
return np.mean(dist)
def calc_cos_similarity(joint_angle_resample, human_joint_angle):
joint_num = human_joint_angle.shape[0]
dist = np.zeros(joint_num)
for c in range(joint_num):
dist[c] = 1 - distance.cosine(joint_angle_resample[c, :], human_joint_angle[c, :])
return np.mean(dist)
def calc_cross_gait_reward(gait_state_mat, gait_velocity, reward_name):
"""
reward_name_vec =['r_d', 'r_s', 'r_f', 'r_n', 'r_gv', 'r_lhs', 'r_gs', 'r_cg', 'r_fr', 'r_po']
"""
cross_gait_reward = 0.0
reward_str_list = []
frame_num = gait_state_mat.shape[0]
joint_deg_mat = joint_state_to_deg(gait_state_mat[:, :-2])
ankle_to_hip_deg_mat = joint_deg_mat[:, [0, 3]] - joint_deg_mat[:, [1, 4]]
if 'r_gv' in reward_name:
'''
gait velocity
'''
reward_str_list.append('r_gv')
cross_gait_reward += 0.2 * np.mean(gait_velocity)
if 'r_lhs' in reward_name:
'''
0: left heel strike: the left foot should contact ground between 40% to 60% gait cycle
Theoretical situation: 0, -1: right foot strike; 50: left foot strike
'''
reward_str_list.append('r_lhs')
l_foot_contact_vec = signal.medfilt(gait_state_mat[:, -1], 3)
l_foot_contact_vec[1:] -= l_foot_contact_vec[:-1]
l_foot_contact_vec[0] = 0
if 0 == np.mean(l_foot_contact_vec == 1):
# print(gait_state_mat_sampled)
return cross_gait_reward, reward_str_list
l_heel_strike_idx = np.where(l_foot_contact_vec == 1)[0][0]
cross_gait_reward += 0.2 * (1.0 - np.tanh((l_heel_strike_idx / (frame_num + 0.0) - 0.5) ** 2))
if 'r_gs' in reward_name:
'''
1: gait symmetry
'''
reward_str_list.append('r_gs')
r_gait_state_origin = gait_state_mat[:, np.r_[0:3, -2]]
l_gait_state_origin = gait_state_mat[:, np.r_[3:6, -1]]
l_gait_state = np.zeros(l_gait_state_origin.shape)
l_gait_state[0:(frame_num - l_heel_strike_idx), :] = l_gait_state_origin[l_heel_strike_idx:, :]
l_gait_state[(frame_num - l_heel_strike_idx):, :] = l_gait_state_origin[0:l_heel_strike_idx, :]
cross_gait_reward += 0.2 * calc_array_symmetry(r_gait_state_origin, l_gait_state)
if 'r_cg' in reward_name:
'''
2: cross gait
'''
reward_str_list.append('r_cg')
cross_gait_reward += (0.2 / 4.0) * (np.tanh(ankle_to_hip_deg_mat[0, 0]) +
np.tanh(- ankle_to_hip_deg_mat[l_heel_strike_idx, 0]) +
# np.tanh(ankle_to_hip_deg_mat[-1, 0]) + \
np.tanh(-ankle_to_hip_deg_mat[0, 1])
+ np.tanh(ankle_to_hip_deg_mat[l_heel_strike_idx, 1])
# + np.tanh(-ankle_to_hip_deg_mat[-1, 1])
)
# if ankle_to_hip_deg_mat[0, 0] > 5 \
# and ankle_to_hip_deg_mat[l_heel_strike_idx, 0] < -5 \
# and ankle_to_hip_deg_mat[-1, 0] > 5:
# cross_gait_reward += 0.1
#
# if ankle_to_hip_deg_mat[0, 1] < -5 \
# and ankle_to_hip_deg_mat[l_heel_strike_idx, 1] > 5 \
# and ankle_to_hip_deg_mat[-1, 1] < -5:
# cross_gait_reward += 0.1
if 'r_fr' in reward_name:
'''
3: foot recovery
'''
reward_str_list.append('r_fr')
ankle_to_hip_speed_mat = np.zeros(ankle_to_hip_deg_mat.shape)
ankle_to_hip_speed_mat[1:] = ankle_to_hip_deg_mat[1:] - ankle_to_hip_deg_mat[:-1]
cross_gait_reward += -0.1 * (np.tanh(ankle_to_hip_speed_mat[-1, 0]) +
np.tanh(ankle_to_hip_speed_mat[l_heel_strike_idx, 1]))
if 'r_po' in reward_name:
'''
4: push off
'''
reward_str_list.append('r_po')
r_foot_contact_vec = signal.medfilt(gait_state_mat[:, -2], 3)
r_foot_contact_vec[1:] -= r_foot_contact_vec[:-1]
r_foot_contact_vec[0] = 0
ankle_speed_mat = np.zeros(joint_deg_mat[:, [2, 5]].shape)
ankle_speed_mat[1:] = joint_deg_mat[1:, [2, 5]] - joint_deg_mat[:-1, [2, 5]]
if 0 == np.mean(r_foot_contact_vec == -1):
return cross_gait_reward, reward_str_list
r_push_off_idx = np.where(r_foot_contact_vec == -1)[0][0]
cross_gait_reward += -0.1 * np.tanh(ankle_speed_mat[r_push_off_idx, 0])
if 0 == np.mean(l_foot_contact_vec == -1):
return cross_gait_reward, reward_str_list
l_push_off_idx = np.where(l_foot_contact_vec == -1)[0][0]
cross_gait_reward += -0.1 * np.tanh(ankle_speed_mat[l_push_off_idx, 1])
return cross_gait_reward, reward_str_list
def calc_gait_symmetry(joint_angle):
joint_num = int(joint_angle.shape[-1] / 2)
half_num_sample = int(joint_angle.shape[0] / 2)
joint_angle_origin = np.copy(joint_angle)
joint_angle[0:half_num_sample, joint_num:] = joint_angle_origin[half_num_sample:, joint_num:]
joint_angle[half_num_sample:, joint_num:] = joint_angle_origin[0:half_num_sample, joint_num:]
dist = np.zeros(joint_num)
for c in range(joint_num):
dist[c] = 1 - distance.cosine(joint_angle[:, c], joint_angle[:, c + joint_num])
return np.mean(dist)
def calc_two_leg_J(joint_state, l_vec):
'''
:param q_vec: [q_r_hip, q_r_knee, q_r_ankle, q_l_hip, q_l_knee, q_l_ankle]
:param l_vec: [l_thigh, l_shank]
:return: J
'''
q_vec_normalized = joint_state[0::2]
q_vec_denormalized = denormalize_angle(q_vec_normalized)
J = np.eye(6)
J[0:2, 0:2] = calc_leg_J(q_vec_denormalized[0:2], l_vec)
J[3:5, 3:5] = calc_leg_J(q_vec_denormalized[3:5], l_vec)
return J
def calc_leg_J(q_vec, l_vec, is_polar = False):
'''
:param q_vec: [q_hip, q_knee]
:param l_vec: [l_thigh, l_shank]
:return: J
'''
if is_polar:
J = np.eye(2)
else:
dx_dq_hip = l_vec[0] * np.cos(q_vec[0]) + l_vec[1] * np.cos(q_vec[0] + q_vec[1])
dx_dq_knee = l_vec[1] * np.cos(q_vec[0] + q_vec[1])
dz_dq_hip = (l_vec[0] * np.sin(q_vec[0]) + l_vec[1] * np.sin(q_vec[0] + q_vec[1]))
dz_dq_knee = (l_vec[1] * np.sin(q_vec[0] + q_vec[1]))
J = np.asarray([[dx_dq_hip, dx_dq_knee],
[dz_dq_hip, dz_dq_knee]])
return J
def calc_J_redundent(J):
'''
:ref: A comparison of action spaces for learning manipulation tasks, https://arxiv.org/abs/1908.08659
:param J: end-effector Jacobian
:return: the force Jacobian from end-point force to joint torques in redundent robot,
where the freedom of joints is larger than that of end-points
check the jacobian for the torque.
'''
# J^T (JJ^T + alpha I)^-1, avoid singularity
JT = np.transpose(J)
JJT = np.matmul(J, JT)
I = np.eye(J.shape[0])
JJT_inv = np.linalg.pinv(JJT + 1e-6 * I)
return np.matmul(JT,JJT_inv)
def joing_angle_2_pos(q_vec, l_vec, is_polar = False):
'''
:param q_vec: [q_hip, q_knee, q_ankle]
:param l_vec: [l_thigh, l_shank]
:return: pose of ankle: x, z, and q_ankle
'''
x_ankle = l_vec[0] * np.sin(q_vec[0]) + l_vec[1] * np.sin(q_vec[0] + q_vec[1])
z_ankle = -(l_vec[0] * np.cos(q_vec[0]) + l_vec[1] * np.cos(q_vec[0] + q_vec[1]))
q_ankle = q_vec[2]
return np.asarray([x_ankle, z_ankle, q_ankle])
def joint_vel_2_end_vel(q_v_vec, q_vec, l_vec, is_polar=False):
'''
:param q_v_vec: [q_v_hip, q_v_knee, q_v_ankle]
:param q_vec: [q_hip, q_knee, q_ankle]
:param l_vec: [l_thigh, l_shank]
:return: velocity of ankle: dx, dz, and d_q_ankle
'''
J = calc_leg_J(q_vec[0:2], l_vec, is_polar=is_polar)
vel_ankle = np.zeros(3)
vel_ankle[0:2] = np.matmul(J, q_v_vec[0:2])
vel_ankle[-1] = q_v_vec[2]
return vel_ankle
def state_2_end_state(state, is_polar = False):
'''
:param state: 22: [z-z0, cos(error_yaw), sin(error_yaw), v_x, v_y, v_z,
roll, pitch, q_r_hip, dq_r_hip, q_r_knee, dq_r_knee, q_r_ankle, dq_r_ankle,
q_l_hip, dq_l_hip, q_l_knee, dq_l_knee, q_l_ankle, dq_l_ankle, foot pressures]
:return: state_end: this the states of end points: feet and root, including:
18: [z_m, q_m, v_x, v_z,
x_r_ankle, z_r_ankle, q_r_ankle, x_l_ankle, z_l_ankle, q_l_ankle,
dx_r_ankle, dz_r_ankle, dq_r_ankle, dx_l_ankle, dz_l_ankle, dq_l_ankle,
foot_pressures]
'''
joint_states = state[8:-2]
q_vec_normalized = joint_states[0::2] # normalized to [-1, 1]: 2 * (q - q_mid)/(q_max - q_min)
q_vec_denormalized = denormalize_angle(q_vec_normalized)
q_vec = np.copy(q_vec_denormalized)
q_vec[[2, 5]] = q_vec_normalized[[2, 5]] # q_ankle is not required to calculate Jacobian matrix.
q_v_vec = joint_states[1::2] # normalized: 0.1 * q_vel
l_leg = 0.95
l_th = 0.45 / l_leg # normalized the leg length
l_sh = 0.5 / l_leg # normalized the leg length
q_m = state[7]
z_m = state[0]/l_leg
v_x = state[3]/l_leg
v_z = state[5]/l_leg
l_vec = np.asarray([l_th, l_sh])
pos_r_ankle = joing_angle_2_pos(q_vec[0:3], l_vec, is_polar=is_polar)
pos_l_ankle = joing_angle_2_pos(q_vec[3:6], l_vec, is_polar=is_polar)
vel_r_ankle = joint_vel_2_end_vel(q_v_vec[0:3], q_vec[0:3], l_vec, is_polar=is_polar)
vel_l_ankle = joint_vel_2_end_vel(q_v_vec[3:6], q_vec[3:6], l_vec, is_polar=is_polar)
state_end = np.zeros(18)
state_end[0:4] = np.asarray([z_m, q_m, v_x, v_z])
state_end[4:16] = np.r_[pos_r_ankle, pos_l_ankle, vel_r_ankle, vel_l_ankle]
state_end[-2:] = state[-2:]
return state_end
def denormalize_angle(q_normalized):
q = np.copy(q_normalized)
q[[0, 3]] = np.deg2rad(35) + np.deg2rad(80) * q[[0, 3]]
q[[1, 4]] = np.deg2rad(-75) + np.deg2rad(75) * q[[1, 4]]
q[[2, 5]] = np.deg2rad(45) * q[[2, 5]]
return q
def normalize_angle(q):
q_normalized = np.copy(q)
q_normalized[:, [0, 3]] = (q_normalized[:, [0, 3]] - np.deg2rad(35)) / np.deg2rad(80)
q_normalized[:, [1, 4]] = (q_normalized[:, [1, 4]] - np.deg2rad(-75)) / np.deg2rad(75)
q_normalized[:, [0, 3]] = q_normalized[:, [0, 3]] / np.deg2rad(45)
return q_normalized
def end_impedance_control(target_end_pos, state, k = 5.0, b = 0.05, k_q = 0.5):
'''
:param target_end_pos: [pos_r_ankle, pos_l_ankle]
:param end_state: [z_m, q_m, v_x, v_z, pos_r_ankle, pos_l_ankle, vel_r_ankle, vel_l_ankle]
:param joint_state: [q_r_hip, dq_r_hip, q_r_knee, dq_r_knee, q_r_ankle, dq_r_ankle,
q_l_hip, dq_l_hip, q_l_knee, dq_l_knee, q_l_ankle, dq_l_ankle]
:param k:
:param b:
:return:
'''
end_state = state_2_end_state(state)
joint_state = state[8:-2]
q_vec = joint_state[0::2]
ankle_pos = end_state[4:10]
ankle_vel = end_state[10:16]
ankle_force = k * (target_end_pos - ankle_pos) - b * ankle_vel + 0.5
l_vec = np.asarray([0.45/0.95, 0.5/0.95])
J = calc_two_leg_J(joint_state, l_vec)
torque_offset = k_q * (0.0 - q_vec)
torque_offset[[0, 3]] = 0 # set the hip torque to 0
torque = np.matmul(np.transpose(J), ankle_force) + torque_offset
return torque
def impedance_control(target_pos, joint_states, k = 5.0, b = 0.05):
q_vec = joint_states[0::2]
q_v_vec = joint_states[1::2]
torque = k * (target_pos - q_vec) - b * q_v_vec
# print('angle error: ', target_pos - q_vec)
# print('action: ', action)
return torque
def calc_torque_from_impedance(action_im, joint_states, scale = 1.0):
k_vec = action_im[0::3]
b_vec = action_im[1::3]
q_e_vec = action_im[2::3]
q_vec = joint_states[0::2]
q_v_vec = joint_states[0::2]
action = (k_vec * (q_e_vec - q_vec) - b_vec * q_v_vec)/scale
return action
def check_cross_gait(gait_state_mat):
gait_num_1 = np.mean((gait_state_mat[:, 0] - gait_state_mat[:, 3]) > 0.1)
gait_num_2 = np.mean((gait_state_mat[:, 0] - gait_state_mat[:, 3]) < -0.1)
return (gait_num_1 > 0) and (gait_num_2 > 0)
def connect_str_list(str_list):
if 0 >= len(str_list):
return ''
str_out = str_list[0]
for i in range(1, len(str_list)):
str_out = str_out + '_' + str_list[i]
return str_out
def create_log_gaussian(mean, log_std, t):
quadratic = -((0.5 * (t - mean) / (log_std.exp())).pow(2))
len_mean = mean.shape
log_z = log_std
z = len_mean[-1] * math.log(2 * math.pi)
log_p = quadratic.sum(dim=-1) - log_z.sum(dim=-1) - 0.5 * z
return log_p
def fifo_data(data_mat, data):
data_mat[:-1] = data_mat[1:]
data_mat[-1] = data
return data_mat
def hard_update(target, source):
for target_param, param in zip(target.parameters(), source.parameters()):
target_param.data.copy_(param.data)
def joint_state_to_deg(joint_state_mat):
joint_deg_mat = np.zeros(joint_state_mat.shape)
joint_deg_mat[:, [0, 3]] = joint_state_mat[:, [0, 3]] * 80.0 + 35.0
joint_deg_mat[:, [1, 4]] = (1 - joint_state_mat[:, [1, 4]]) * 75.0
joint_deg_mat[:, [2, 5]] = joint_state_mat[:, [2, 5]] * 45.0
return joint_deg_mat
def logsumexp(inputs, dim=None, keepdim=False):
if dim is None:
inputs = inputs.view(-1)
dim = 0
s, _ = torch.max(inputs, dim=dim, keepdim=True)
outputs = s + (inputs - s).exp().sum(dim=dim, keepdim=True).log()
if not keepdim:
outputs = outputs.squeeze(dim)
return outputs
def plot_joint_angle(joint_angle_resample, human_joint_angle):
fig, axs = plt.subplots(human_joint_angle.shape[1])
for c in range(len(axs)):
axs[c].plot(joint_angle_resample[:, c])
axs[c].plot(human_joint_angle[:, c])
plt.legend(['walker 2d', 'human'])
plt.show()
def read_table(file_name='../../data/joint_angle.xls', sheet_name='walk_fast'):
dfs =
|
pd.read_excel(file_name, sheet_name=sheet_name)
|
pandas.read_excel
|
import json, datetime, time, os
from glob import glob
import pylab as pl
import numpy as np
from scipy import stats
import pandas as pd
import matplotlib.pyplot as plt
from chromatogram.visualization import COLOR_MAP
import cmocean
# from chromatogram.codebook import Codebook
# import chromatogram
DATA_ROOT = "irb"
STUDY_FEATURES = {
"motion": ["acc-x", "acc-y", "acc-z"],
"phasic": ["phasic"],
"hr": ["hr"],
"bio": ["bvp", "temp"],
"kinnunen": ["getting-started", "dealing-with-difficulties",
"encountering-difficulties", "failing", "submitting", "succeeding"],
"jupyter": ["execute", "mouseevent", "notebooksaved", "select", "textchunk"],
"q": ["q1", "q2", "q3", "q4"],
"notes": ["notesmetadata"],
"emotion": ["phasic", "hr"],
"acc": ["a-x", "a-y", "a-z"],
"gyro": ["g-x", "g-y", "g-z"],
"mag": ["m-x", "m-y", "m-z"],
"iron":["m-x", "m-y", "m-z", "g-x", "g-y", "g-z", "a-x", "a-y", "a-z"]
}
###########
# CLASSES #
###########
class MTS:
def __init__(self, users, feat_class):
self.users = users
self.feat_class = feat_class
self.features = compile_features([feat_class])
self.construct()
self.samples = None
def construct(self):
umts, tsum = extractMTS(self.users, self.features)
umts, user_bounds = resampleFeatureMTS(umts, self.features)
self.data = umts
self.user_bounds = user_bounds
self.max_len = max(user_bounds.values())
def __getitem__(self, key):
return self.data[key]
def __setitem__(self, key, value):
self.data[key] = value
def __delitem__(self, key):
del self.data[key]
def clip(self, feat, percentile):
feat_list = self.get_feat(feat)
feat_list.sort()
f_idx = self.features.index(feat)
clip_val = feat_list[int(len(feat_list) * percentile)]
print("Clipping feature: {} @ {}th pctl={}".format(feat, int(percentile*100), clip_val))
for u in self.users:
x = self.data[u][f_idx]
x[x > clip_val] = clip_val
self.data[u][f_idx] = x
def normalize(self, feat, how, clip=None):
if feat == 'all':
feats = self.features
else:
feats = [feat]
for feat in feats:
feat_list = self.get_feat(feat)
f_idx = self.features.index(feat)
if how == 'minmax':
f_min = min(feat_list)
if clip:
f_max = clip
else:
f_max = max(feat_list)
print("Feat: {}, min={}, max={}".format(feat, f_min, f_max))
if f_min == f_max:
continue
for u, mts in self.data.items():
x = mts[f_idx]
self.data[u][f_idx] = (x - f_min) / (f_max - f_min)
if how == 'zscore':
mean = np.mean(feat_list)
std = np.std(feat_list)
print("Feat: {}, mean={}, std={}".format(feat, mean, std))
if std < 1e-6:
continue
for u, mts in self.data.items():
x = mts[f_idx]
x = (x - mean) / std
if clip:
x[x > clip] = clip
self.data[u][f_idx] = x
def to_df(self):
out = {}
for u in self.users:
for i in range(len(self.features)):
out[(u, self.features[i])] =
|
pd.Series(self.data[u][i])
|
pandas.Series
|
import streamlit as st
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import urllib.request
from re import sub
import nltk
nltk.download('stopwords')
from nltk.corpus import stopwords
from PIL import Image
from io import BytesIO
from wordcloud import WordCloud, ImageColorGenerator
import sqlite3
import streamlit.components.v1 as components
import werkzeug
werkzeug.cached_property = werkzeug.utils.cached_property
from robobrowser import RoboBrowser
import requests
from bs4 import BeautifulSoup
import imageio as io
import base64
from numpy import polynomial as P
from sklearn.linear_model import LinearRegression
with st.echo(code_location='below'):
df =
|
pd.read_csv("goodreads_books.csv")
|
pandas.read_csv
|
from Evaluator.evaluation import evaluate, get_baseline
import pandas as pd
import matplotlib
matplotlib.use("TKAgg")
from matplotlib import pyplot as plt
def main():
distance = pd.read_csv('./distanceMF.csv')
landmark = pd.read_csv('./landmarkMF.csv')
rankings = pd.read_csv('./rankings.csv')
rankings[rankings.drop('dataset', axis=1).columns] = rankings.drop('dataset', axis=1).rank(axis=1, method='average')
landmark_distance =
|
pd.merge(distance, landmark)
|
pandas.merge
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
import csv
import numpy as np
import h5py
import statsmodels.api as sm
#from scipy.stats import wilcoxon
from keras import backend as K
from mnist_mutate import mutate_M2, mutate_M4, mutate_M5
from patsy import dmatrices
import pandas as pd
def cohen_d(orig_accuracy_list, accuracy_list):
nx = len(orig_accuracy_list)
ny = len(accuracy_list)
dof = nx + ny - 2
return (np.mean(orig_accuracy_list) - np.mean(accuracy_list)) / np.sqrt(((nx-1)*np.std(orig_accuracy_list, ddof=1) ** 2 + (ny-1)*np.std(accuracy_list, ddof=1) ** 2) / dof)
def get_dataset(i):
dataset_file = "/home/ubuntu/crossval_set_" + str(i) + ".h5"
hf = h5py.File(dataset_file, 'r')
xn_train = np.asarray(hf.get('xn_train'))
xn_test = np.asarray(hf.get('xn_test'))
yn_train = np.asarray(hf.get('yn_train'))
yn_test = np.asarray(hf.get('yn_test'))
return xn_train, yn_train, xn_test, yn_test
def train_and_get_accuracies(param, mutation):
accuracy_list = range(0, 25)
index = 0
csv_file = "mnist_binary_search_" + str(mutation) + ".csv"
with open(csv_file, 'a') as f1:
writer=csv.writer(f1, delimiter=',',lineterminator='\n',)
for i in range(0, 5):
x_train, y_train, x_test, y_test = get_dataset(i)
for j in range(0, 5):
print("Training " + str(index) + ", for param " + str(param))
if (mutation == '2'):
accuracy, loss = mutate_M2(0, param, x_train, y_train, x_test, y_test, i, j)
elif (mutation == '4r'):
accuracy, loss = mutate_M4(0, param, x_train, y_train, x_test, y_test, i, j, 1)
elif (mutation == '4p'):
accuracy, loss = mutate_M4(0, param, x_train, y_train, x_test, y_test, i, j, 0)
elif (mutation == '5'):
accuracy, loss = mutate_M5(param, x_train, y_train, x_test, y_test, i, j)
writer.writerow([str(i), str(j), str(param), str(accuracy), str(loss)])
print("Loss " + str(loss) + ", Accuracy " + str(accuracy))
accuracy_list[index] = accuracy
index += 1
K.clear_session()
accuracy_dict[param] = accuracy_list
return accuracy_list
def is_diff_sts(orig_accuracy_list, accuracy_list, threshold = 0.05):
#w, p_value = wilcoxon(orig_accuracy_list, accuracy_list)
list_length = len(orig_accuracy_list)
zeros_list = [0] * list_length
ones_list = [1] * list_length
mod_lists = zeros_list + ones_list
acc_lists = orig_accuracy_list + accuracy_list
data = {'Acc': acc_lists, 'Mod': mod_lists}
df =
|
pd.DataFrame(data)
|
pandas.DataFrame
|
import argparse
import fnet.data
import fnet.fnet_model
from fnet.utils import delta2rgb, get_stats
import json
import logging
import numpy as np
import os
import pandas as pd
import sys
import time
import warnings
from tqdm import tqdm
import matplotlib as mpl
from tifffile import imread, imsave
import scipy.misc
from sklearn.metrics import r2_score
from matplotlib import pyplot as plt
import glob
import pickle
import pdb
def evaluate_model(predictions_file = None, predictions_dir = None, path_save_dir='saved_models', save_error_maps=False, overwrite=True, reference_file = None):
if not os.path.exists(path_save_dir):
os.makedirs(path_save_dir)
if predictions_file is not None:
prediction_files = [predictions_file]
save_dirs = [path_save_dir]
train_or_tests = None
structures = None
else:
#do a directory traversal
prediction_files = glob.glob(predictions_dir + '/*/*/predictions.csv')
save_dirs = [path.replace(predictions_dir, '') for path in prediction_files]
save_dirs = [path.replace('predictions.csv', '') for path in save_dirs]
save_dirs = [path_save_dir + os.sep + path for path in save_dirs]
#do some jenky parsing
split_on_filesep = [path.split('/') for path in prediction_files]
train_or_tests = [split[-2] for split in split_on_filesep]
structures = [split[-3] for split in split_on_filesep]
# pdb.set_trace()
stats_file = path_save_dir + os.sep + 'stats.pkl'
if not overwrite and os.path.exists(stats_file):
all_stats_list, stats_per_im_list = pickle.load( open( stats_file, "rb" ) )
else:
all_stats_list = list()
stats_per_im_list = list()
for prediction_file, structure, train_or_test, save_dir in zip(prediction_files, structures, train_or_tests, save_dirs):
if not os.path.exists(save_dir):
os.makedirs(save_dir)
pred_dir, _ = os.path.split(prediction_file)
df_preds = pd.read_csv(prediction_file)
#prepend the directory to the paths in the dataframe
path_columns = [column for column in df_preds.columns if 'path' in column]
for column in path_columns:
not_nans = ~pd.isnull(df_preds[column])
if np.any(not_nans):
df_preds[column][not_nans] = pred_dir + os.sep + df_preds[column][not_nans]
df_preds['path_delta'] = [save_dir + os.sep + str(row[0]) + '_delta.tif' for row in df_preds.iterrows()]
df_preds['path_stats'] = [save_dir + os.sep + str(row[0]) + '_stats.csv' for row in df_preds.iterrows()]
path_stats_all = save_dir + os.sep + 'stats_all.csv'
print('Working on ' + prediction_file)
path_pred_col = [column for column in df_preds.columns if 'path_prediction' in column]
if len(path_pred_col) == 0:
path_pred_col = 'path_target'
else:
path_pred_col = path_pred_col[0]
stats_per_im, stats_all = eval_images(df_preds['path_target'],
df_preds[path_pred_col],
df_preds['path_delta'],
df_preds['path_stats'],
path_stats_all)
stats_per_im['structure'] = structure
stats_per_im['train_or_test'] = train_or_test
stats_all['structure'] = structure
stats_all['train_or_test'] = train_or_test
stats_per_im_list.append(stats_per_im)
all_stats_list.append(stats_all)
all_stats_list = pd.concat(all_stats_list)
stats_per_im_list = pd.concat(stats_per_im_list)
pickle.dump( [all_stats_list, stats_per_im_list] , open( stats_file, "wb" ) )
stats_per_im_list['c_max'] = np.nan
df_cmax = None
if reference_file is not None:
all_ref_stats_list, stats_ref_per_im_list = pickle.load( open( reference_file, "rb" ) )
all_ref = all_ref_stats_list[all_ref_stats_list['train_or_test'] == 'train']
all_ref_per_im = stats_ref_per_im_list[stats_ref_per_im_list['train_or_test'] == 'train']
stats_per_im_list_train = stats_per_im_list[stats_per_im_list['train_or_test'] == 'train']
u_structures = np.unique(all_ref['structure'])
wildtypes = [structure for structure in u_structures if 'wildtype' in structure]
wildtypes_short = np.array([wildtype.split('_')[1] for wildtype in wildtypes])
vars_g = np.array([np.mean(all_ref[wildtype == all_ref['structure']]['var_target']) for wildtype in wildtypes])
c_max_out = np.zeros(len(u_structures))
noise_model = list()
for structure, i in zip(u_structures, range(len(u_structures))):
#set nan cmax values to the average post
wt_map = [wildtype in structure for wildtype in wildtypes_short]
if ~np.any(wt_map):
wt_map = wildtypes_short == 'gfp'
noise_model += wildtypes_short[wt_map].tolist()
var_g = vars_g[wt_map]
var_i = np.mean(all_ref_per_im['var_target'][all_ref_per_im['structure'] == structure])
c_max_per_img = c_max(var_i, var_g)
c_max_out[i] = np.mean(c_max_per_img)
struct_inds_ref = stats_ref_per_im_list['structure'] == structure
struct_inds = stats_per_im_list['structure'] == structure
cm = c_max(stats_ref_per_im_list['var_target'][struct_inds_ref], var_g)
#if its wildtype gfp we know its undefined
if structure == 'wildtype_gfp':
cm = np.nan
if np.sum(struct_inds) > 0:
try:
stats_per_im_list['c_max'][struct_inds] = cm
except:
pdb.set_trace()
# var_i = all_ref_per_im['var_target'][all_ref_per_im['structure'] == structure]
# r2_train = stats_per_im_list_train['r2'][stats_per_im_list_train['structure'] == structure]
# r2 = np.corrcoef(r2_train, c_max_per_img)
# if structure == 'desmoplakin':
# pdb.set_trace()
# print(structure + ': ' + str(r2[0,1]))
df_cmax = pd.DataFrame(np.stack([u_structures, noise_model, c_max_out]).T, columns=['structure', 'noise_model', 'c_max'])
df_cmax.to_csv(path_save_dir + os.sep + 'c_max.csv')
stats_per_im_list.to_csv(path_save_dir + os.sep + 'stats_per_im.csv')
fig_basename = path_save_dir + os.sep + 'stats_'
filetypes = ['.eps', '.png']
stats_to_print = ['r2']
return all_stats_list, stats_per_im_list, df_cmax
# for stat in stats_to_print:
# for filetype in filetypes:
# figure_save_path = fig_basename + stat + filetype
def c_max(var_i, var_g):
cm = 1/np.sqrt(1 + (var_g / ((var_i-var_g)+1E-16)))
return cm
def time_series_to_img(im_path_list, window_position = None, window_size = None, border_thickness = 0, im_save_path = None, border_color = 255):
'''im_path_list is a list containing a list of images'''
im_list = []
for im_t in im_path_list:
channel_list = []
for im_channel in im_t:
im = imread(im_channel)
if im.shape[1] == 1:
im = im[:,0,:,:]
if window_position is not None and window_size is not None:
i_start = window_position[0]
i_end = i_start+window_size[0]
j_start = window_position[1]
j_end = j_start+window_size[1]
im_window = im[:, i_start:i_end,j_start:j_end]
else:
im_window = im
# pdb.set_trace()
channel_list+=[im_window]
if border_thickness > 0:
channel_list += [np.ones([1, border_thickness, im_window.shape[2]])*border_color]
#these should all be channel depth of 1 or 3
max_channel_depth = 1
for channel in channel_list:
if len(channel.shape) == 3 & channel.shape[0] != max_channel_depth:
max_channel_depth = channel.shape[0]
for channel, i in zip(channel_list, range(len(channel_list))):
if len(channel.shape) < 3 or channel.shape[0] != max_channel_depth:
channel_list[i] = np.tile(channel, [max_channel_depth, 1, 1])
im_list += [np.concatenate(channel_list, 1)]
if border_thickness > 0:
im_list += [np.ones([max_channel_depth, im_list[-1].shape[1], border_thickness])*border_color]
im_out = np.concatenate(im_list, 2)
if im_save_path is not None:
scipy.misc.imsave(im_save_path, np.squeeze(im_out))
return im_out
def stack_to_slices(im_path_list, window_position = None, window_size = None, border_thickness = 0, z_interval = [0,-1,1], im_save_path = None):
'''im_path_list is a list containing a list of images, assume images are [c,y,x,z]'''
im_list = []
for im_channel in im_path_list:
channel_list = []
# for im_channel in im_t:
im_channel = np.squeeze(im_channel)
im = im_channel[z_interval[0]:z_interval[1]:z_interval[2]]
if window_position is not None and window_size is not None:
i_start = window_position[0]
i_end = i_start+window_size[0]
j_start = window_position[1]
j_end = j_start+window_size[1]
im_window = im[:, i_start:i_end,j_start:j_end]
else:
im_window = im
for z in im_window:
channel_list+=[z]
if border_thickness > 0:
channel_list += [np.ones([im_window.shape[1], border_thickness])*255]
pdb.set_trace()
im_list += [np.concatenate(channel_list, 1)]
if border_thickness > 0:
im_list += [np.ones([max_channel_depth, im_list[-1].shape[1], border_thickness])*255]
im_out = np.concatenate(im_list, 2)
if im_save_path is not None:
scipy.misc.imsave(im_save_path, np.squeeze(im_out))
return im_out
def print_stats_all(stats_per_im, figure_save_path, parameter_to_plot='R2', width = 0.34, fontsize = 8, figsize = (10,3)):
fig = plt.figure(figsize=figsize)
ax = plt.gca()
u_structures = np.unique(stats_per_im['structure'])
for index, row in stats_per_im.iterrows():
pos = np.where(u_structures == row['structure'])[0]
color = 'r'
train_or_test = row['train_or_test']
param = row[parameter_to_plot]
if train_or_test == 'test':
pos = pos + width
color = 'y'
ax.bar(pos, param, width, color=color) #, yerr=men_std)
h1 = mpl.patches.Patch(color='r', label='train')
h2 = mpl.patches.Patch(color='y', label='test')
leg = plt.legend([h1, h2], ['train', 'test'], fontsize = fontsize,
loc=1,
borderaxespad=0,
frameon=False
)
# add some text for labels, title and axes ticks
ax.set_ylabel(r'$R^2$')
ax.set_xticks(np.arange(len(u_structures)) + width / 2)
ax.set_xticklabels(np.array(u_structures))
for tick in ax.get_xticklabels():
tick.set_rotation(25)
plt.savefig(figure_save_path, bbox_inches='tight')
plt.close()
def print_stats_all_v2(stats, figure_save_path, parameter_to_plot='r2', width = 0.34, fontsize = 8, figsize = (10,3), cmax_stats=None, show_train=True):
fig = plt.figure(figsize=figsize)
ax = plt.gca()
structures = stats['structure']
u_structures = np.unique(structures)
i = 0
for structure in u_structures:
struct_stats = stats[structure == stats['structure']]
if show_train:
train_or_test = ['train', 'test']
colors = ['r', 'y']
pos = i
else:
train_or_test = ['test']
colors = ['y']
pos = i + width/2
var_i = np.mean(struct_stats['var_target'])
for group, color in zip(train_or_test, colors):
group_stats = struct_stats[struct_stats['train_or_test'] == group]
# pdb.set_trace()
bplot = plt.boxplot(group_stats[parameter_to_plot], 0, '', positions = [pos], widths=[width], patch_artist=True, whis = 1.5)
bplot['boxes'][0].set_facecolor(color)
bplot['medians'][0].set_color('k')
# pdb.set_trace()
pos = pos + width
# var_colors = ['c', 'm']
if cmax_stats is not None:
c_max = cmax_stats[cmax_stats['structure'] == structure]['c_max'].tolist()[0]
plt.plot([i, i+width], [c_max]*2, color='k')
i += 1
hlist = list()
for group, color in zip(train_or_test, colors):
h = mpl.patches.Patch(color=color, label=group)
hlist.append(h)
leg = plt.legend(hlist, train_or_test, fontsize = fontsize,
loc=1,
borderaxespad=0,
frameon=False
)
ax.set_ylabel(parameter_to_plot)
ax.set_xticks(np.arange(len(u_structures)) + width / 2)
ax.set_xticklabels(np.array(u_structures))
ax.set_xlim(-.5, len(u_structures))
for tick in ax.get_xticklabels():
tick.set_rotation(25)
plt.savefig(figure_save_path, bbox_inches='tight')
plt.close()
def eval_images(path_targets, path_preds, path_save_delta, path_save_stats, path_save_stats_all):
log_per_im = list()
im_preds = list()
im_targets = list()
pbar = tqdm(zip(range(0, len(path_preds)), path_preds, path_targets, path_save_delta, path_save_stats))
for i, path_pred, path_target, path_save_delta, path_save_stat in pbar:
if pd.isnull(path_target):
continue
im_pred = imread(path_pred)
im_target = imread(path_target)
err_map, n_pixels, stats = get_stats(im_pred, im_target)
stats['img'] = i
im_preds.append(im_pred)
im_targets.append(im_target)
delta = im_pred - im_target
df_per_im = pd.DataFrame.from_dict([stats])
df_per_im.to_csv(path_save_stat)
log_per_im.append(df_per_im)
if len(log_per_im) == 0:
return None, None
else:
log_per_im = pd.concat(log_per_im)
im_pred_all_flat = np.hstack([im.flatten() for im in im_preds])
im_target_all_flat = np.hstack([im.flatten() for im in im_targets])
err_map, n_pixels, stats = get_stats(im_pred_all_flat, im_target_all_flat)
# pdb.set_trace()
log_all =
|
pd.DataFrame.from_dict([stats])
|
pandas.DataFrame.from_dict
|
import copy
import warnings
import catboost as cgb
import hyperopt
import lightgbm as lgb
import pandas as pd
import xgboost as xgb
from wax_toolbox import Timer
from churnchall.constants import MODEL_DIR, RESULT_DIR
from churnchall.datahandler import DataHandleCookie, to_gradboost_dataset
from churnchall.tuning import HyperParamsTuningMixin
warnings.filterwarnings(action="ignore", category=DeprecationWarning)
def compute_auc_lift(y_pred, y_true, target):
df_lift = pd.DataFrame({'pred': y_pred, 'true': y_true})
# Sort by prediction
if target == 1:
df_lift = df_lift.sort_values("pred", ascending=False)
elif target == 0:
df_lift = df_lift.sort_values("pred", ascending=True)
else:
raise ValueError
# compute lift score for each sample of population
nb_targets = float(df_lift[df_lift['true'] == target].shape[0])
df_lift["auclift"] = (df_lift["true"] == target).cumsum() / nb_targets
auc_lift = df_lift["auclift"].mean()
return auc_lift
def lgb_auc_lift(y_pred, y_true, target=0):
y_true = y_true.label
auc_lift = compute_auc_lift(y_pred, y_true, target)
return "AUC Lift", auc_lift, True
def xgb_auc_lift(y_pred, y_true, target=0):
y_true = y_true.get_label()
auc_lift = compute_auc_lift(y_pred, y_true, target)
# return a pair metric_name, result. The metric name must not contain a colon (:) or a space
# since preds are margin(before logistic transformation, cutoff at 0)
return "AUC_Lift", auc_lift
def get_df_importance(booster):
if hasattr(booster, "feature_name"): # lightgbm
idx = booster.feature_name()
arr = booster.feature_importance()
df = pd.DataFrame(index=idx, data=arr, columns=["importance"])
elif hasattr(booster, "get_score"): # xgboost
serie = pd.Series(booster.get_score())
df = pd.DataFrame(columns=["importance"], data=serie)
elif hasattr(booster, "get_feature_importance"): # catboost
idx = booster.feature_names_
arr = booster.get_feature_importance()
df =
|
pd.DataFrame(index=idx, data=arr, columns=["importance"])
|
pandas.DataFrame
|
import os
import sys
import pandas as pd
import time
import multiprocessing
class my_dictionary(dict):
def __init__(self):
self = dict()
def add(self, key, value):
self[key] = value
def chromosomes():
chr = ['chr1', 'chr2', 'chr3', 'chr4', 'chr5', 'chr6', 'chr7', 'chr8', 'chr9', 'chr10', 'chr11', 'chr12', 'chr13',
'chr14', 'chr15', 'chr16', 'chr17', 'chr18', 'chr19', 'chr20', 'chr21', 'chr22', 'chrX', 'chrY']
return(chr)
def create_dir(path):
if not os.path.isdir(path):
os.system(f'mkdir {path}')
def time_date_id():
import time
cur_time = time.asctime()
cur_time = cur_time.replace(":", "-")
cur_time = cur_time.replace(" ", "_")
return(cur_time)
def fix_clusters_represent(clust, representatives, tpm_threshold, ttl_reads):
df1 = pd.DataFrame.from_records(clust)
df1 = df1.rename(columns={0 : 'chr', 1:'start',2:'stop',3:'id',4:'reads',5:'strand'})
df2 = pd.DataFrame.from_records(representatives)
clusters = pd.concat([df1, df2], axis=1)
clusters.sort_values(by=['chr', 'start', 'stop'])
# Scale for cluster lenght
# clusters['tmp'] = clusters['reads'] / (clusters['stop'] - clusters['start'])
# Don't scale for cluster length
clusters['tmp'] = clusters['reads']
# Count total reads mapped to clusters
# total_reads = clusters['tmp'].sum()
# Scale per million
total_reads = ttl_reads / 1_000_000
# Final tpm
clusters['reads'] = clusters['tmp'] / total_reads
clusters = clusters.rename(columns={'reads' : 'tpm'})
# Filter tpm >= tpm_threshold
clusters = clusters[clusters['tpm'] >= tpm_threshold]
# Round tpm %f.2
clusters['tpm'] = clusters['tpm'].round(2)
# Fix id for clusters Dataframe
clusters['id'] = [f'CTSS_{x}' for x in range(1, len(clusters)+1)]
represent =
|
pd.DataFrame()
|
pandas.DataFrame
|
import pandas as pd
import numpy as np
import math
import collections
import seaborn as sn
import matplotlib.pyplot as plt
from matplotlib.pyplot import cm
import matplotlib.ticker as mticker
import statsmodels.api as sm
import statsmodels.formula.api as smf
from statsmodels.tsa.api import VAR
from statsmodels.tsa.stattools import adfuller
from statsmodels.graphics.tsaplots import pacf, plot_pacf
from scipy.stats import pearsonr
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
from sklearn.experimental import enable_iterative_imputer
from sklearn.impute import IterativeImputer, KNNImputer
from datetime import timedelta
from . import data_utils
def process_morning_survey(df, b_intrinsic=True, b_categorical=False):
#Get Mood categories and create new columns
df['Mood'] = pd.Categorical(df['Mood'])
df['Mood Code'] = df['Mood'].cat.codes
categories = dict(enumerate(df['Mood'].cat.categories))
for key, value in categories.items():
new_values = []
for x in df['Mood Code'].values:
if str(x) != str(-1):
if x == key:
new_values.append(True)
else:
new_values.append(False)
else:
new_values.append(np.nan)
df[value] = new_values
column_list = ['Busy', 'Committed', 'Rested']
for key, value in categories.items():
column_list.append(value)
df_selected = df[column_list]
if b_intrinsic:
#Temporary fix for data export
from pandas.core.common import SettingWithCopyWarning
import warnings
warnings.simplefilter(action="ignore", category=SettingWithCopyWarning)
df_selected["Extrinsic"] = [x if (str(x).lower() != 'nan') else df["Mm_Extrinsic_Motivation"].values[i]
for i, x in enumerate(df["Extrinsic"].values)]
df_selected["Intrinsic"] = [x if (str(x).lower() != 'nan') else df["Mm_Intrinsic_Motivation"].values[i]
for i, x in enumerate(df["Intrinsic"].values)]
if b_categorical:
return pd.concat([df['Mood'], df_selected], axis=1)
else:
return df_selected
def process_daily_metrics(df):
return df[['Fitbit Step Count', 'Fitbit Minutes Worn']]
def process_previous_fitbit_data(df):
import warnings
warnings.filterwarnings('ignore')
df['Previous Count'] = df['Fitbit Step Count'].shift()
df['Previous Worn'] = df['Fitbit Minutes Worn'].shift()
df.loc[df.groupby('Subject ID')['Previous Count'].head(1).index, 'Previous Count'] = 0
df.loc[df.groupby('Subject ID')['Previous Worn'].head(1).index, 'Previous Worn'] = 0
return df
def process_fitbit_minute(df):
df = df.rename(columns={"datetime": "Date"})
df = df.drop(columns=['fitbit_account', 'username', 'date'])
return df
def process_activity_logs(df, column_names=None, b_check_exceeded=True):
import warnings
warnings.filterwarnings('ignore')
if column_names == None:
df_activity = df[['Activity Duration']]
else:
df_activity = df[column_names]
participants = list(set([p for (p, date) in df.index.values]))
indices = df_activity.index.names
#Remark: pandas.DataFrame.resample() only aggregates numeric types
df_activity['Activity Duration'] = df_activity['Activity Duration'].astype(int)
df_activity['Start Time'] = pd.to_timedelta(df['Start Time'])
frames = []
for participant in participants:
df_individual = df_activity.groupby(by='Subject ID').get_group(participant)
df_individual = df_individual.reset_index()
df_individual['Date'] = pd.to_datetime(df_individual['Date'], format='%Y-%m-%d')
duration = pd.to_timedelta(df_individual['Activity Duration'], unit='minutes')
start_time = df_individual['Start Time']
#Handle overlapping time blocks
df_individual['With Overlap'] = (start_time + duration)
df_individual['Previous with Overlap'] = df_individual['With Overlap'].shift()
df_individual['Previous with Overlap'].values[0] = 0
df_individual['Same as Previous Day'] = df_individual['Date'].shift() == df_individual['Date']
df_individual['Found Overlap'] = (df_individual['Previous with Overlap'] > df_individual['Start Time']) & (
df_individual['Same as Previous Day'] == True)
df_individual['Overlap'] = df_individual['Previous with Overlap'] - df_individual['Start Time']
df_individual['Overlap'] = df_individual['Overlap'][df_individual['Found Overlap'] == True]
df_individual['Overlap'] = df_individual['Overlap'] / np.timedelta64(1,'m')
df_individual['Overlap'] = df_individual['Overlap'].shift(-1)
df_individual['Old Duration'] = df_individual['Activity Duration'].copy()
df_individual['Activity Duration'] = df_individual['Old Duration'] - df_individual['Overlap'].fillna(0)
df_individual['Exceed Day'] = df_individual['With Overlap'] < df_individual['Start Time']
if b_check_exceeded:
if df_individual['Exceed Day'].any() == True:
print('participant ', participant, 'activity duration exceeded day')
#Build daily frame: sum up all activity durations
df_individual['Activity Duration'] = df_individual['Activity Duration'].astype(int)
df_individual = df_individual[['Date', 'Activity Duration', 'Start Time']]
df_individual = df_individual.reset_index(drop=True)
df_individual = df_individual.set_index('Date')
df_individual = df_individual.resample('D')
df_individual = df_individual.sum().fillna(0)
df_individual = df_individual.reset_index()
df_individual['Subject ID'] = participant
df_individual['Date'] = df_individual['Date'].astype(str)
df_individual = df_individual.set_index(indices)
frames.append(df_individual)
df_activity = pd.concat(frames)
df_activity = df_activity.sort_index(level='Subject ID')
df_activity.name = 'Activity Logs'
return df_activity
def get_score_mapping(df_score):
df_score = df_score.rename(columns={'study_id':'Subject ID'})
score_mapping = {}
for score_name in list(df_score.columns):
if score_name != 'Subject ID':
score_mapping[score_name] = {}
for i, subject_id in enumerate(df_score['Subject ID'].values):
score_mapping[score_name][str(subject_id)] = df_score[score_name][i]
return score_mapping
def combine_with_score(df1, df_score, b_display_mapping=False):
score_mapping = get_score_mapping(df_score)
if b_display_mapping:
for k,v in score_mapping.items():
print(k, '->', v, '\n')
df_combined = df1.copy()
df_combined = df_combined.reset_index()
for score_name in score_mapping:
participants = list(score_mapping[score_name].keys())
df_combined[score_name] = df_combined['Subject ID'].map(lambda x: score_mapping[score_name][str(x)] if str(x) in participants else np.NaN)
df_combined = df_combined.set_index(['Subject ID','Date'])
return df_combined
def concatenate_mood_fitbit_minute(df1, df_fitbit, participant):
#Concatenate df1, df_fitbit using outer join by 'Date'
#The start date is the first date of df1
df1 = df1.reset_index()
df_fitbit = df_fitbit.reset_index()
df1['Date'] = pd.to_datetime(df1['Date'])
df_fitbit['Date'] =
|
pd.to_datetime(df_fitbit['Date'])
|
pandas.to_datetime
|
#v1.0
#v0.9 - All research graph via menu & mouse click
#v0.8 - Candlestick graphs
#v0.7 - Base version with all graphs and bug fixes
#v0.6
import pandas as pd
from pandas import DataFrame
from alpha_vantage.timeseries import TimeSeries
from alpha_vantage.techindicators import TechIndicators
class PrepareTestData():
def __init__(self, argFolder=None, argOutputSize='compact'):
super().__init__()
#argFolder='./scriptdata'
self.folder = argFolder + '/'
self.outputsize = argOutputSize.lower()
def loadDaily(self, argScript):
try:
if(self.outputsize == 'compact'):
filename=self.folder + 'daily_compact_'+argScript+'.csv'
else:
filename=self.folder + 'daily_full_'+argScript+'.csv'
csvdf = pd.read_csv(filename)
csvdf=csvdf.rename(columns={'open':'1. open', 'high':'2. high', 'low':'3. low', 'close':'4. close', 'volume': '5. volume'})
convert_type={'1. open':float, '2. high':float, '3. low':float, '4. close':float, '5. volume':float}
csvdf = csvdf.astype(convert_type)
csvdf.set_index('timestamp', inplace=True)
csvdf.index = pd.to_datetime(csvdf.index)
csvdf.index.names = ['date']
except Exception as e:
csvdf = DataFrame()
return csvdf
def loadIntra(self, argScript):
try:
if(self.outputsize == 'compact'):
filename=self.folder + 'intraday_5min_compact_'+argScript+'.csv'
else:
filename=self.folder + 'intraday_5min_full_'+argScript+'.csv'
csvdf = pd.read_csv(filename)
csvdf=csvdf.rename(columns={'open':'1. open', 'high':'2. high', 'low':'3. low', 'close':'4. close', 'volume': '5. volume'})
convert_type={'1. open':float, '2. high':float, '3. low':float, '4. close':float, '5. volume':float}
csvdf = csvdf.astype(convert_type)
csvdf.set_index('timestamp', inplace=True)
csvdf.index = pd.to_datetime(csvdf.index)
csvdf.index.names = ['date']
except Exception as e:
csvdf = DataFrame()
return csvdf
def loadSMA(self, argScript='', argPeriod=20):
try:
#if(argPeriod == 0):
# csvdf = pd.read_csv(self.folder + 'SMA_'+argScript+'.csv')
#else:
csvdf = pd.read_csv(self.folder + 'SMA_'+str(argPeriod)+ '_'+argScript+'.csv')
convert_type={'SMA':float}
csvdf = csvdf.astype(convert_type)
csvdf.set_index('time', inplace=True)
csvdf.index = pd.to_datetime(csvdf.index)
csvdf.index.names = ['date']
#ti = TechIndicators('XXXX', output_format='pandas')
#padf, pameta = ti.get_sma(argScript)
except Exception as e:
csvdf = DataFrame()
return csvdf
def loadEMA(self, argScript):
try:
csvdf = pd.read_csv(self.folder + 'EMA_'+argScript+'.csv')
convert_type={'EMA':float}
csvdf = csvdf.astype(convert_type)
csvdf.set_index('time', inplace=True)
csvdf.index = pd.to_datetime(csvdf.index)
csvdf.index.names = ['date']
#ti = TechIndicators('XXXX', output_format='pandas')
#padf, pameta = ti.get_ema(argScript)
except Exception as e:
csvdf = DataFrame()
return csvdf
def loadVWMP(self, argScript):
try:
csvdf = pd.read_csv(self.folder + 'VWAP_'+argScript+'.csv')
convert_type={'VWAP':float}
csvdf = csvdf.astype(convert_type)
csvdf.set_index('time', inplace=True)
csvdf.index = pd.to_datetime(csvdf.index)
csvdf.index.names = ['date']
#ti = TechIndicators('XXXX', output_format='pandas')
#padf, pameta = ti.get_ema(argScript)
except Exception as e:
csvdf = DataFrame()
return csvdf
def loadRSI(self, argScript):
try:
csvdf = pd.read_csv(self.folder + 'RSI_'+argScript+'.csv')
convert_type={'RSI':float}
csvdf = csvdf.astype(convert_type)
csvdf.set_index('time', inplace=True)
csvdf.index = pd.to_datetime(csvdf.index)
csvdf.index.names = ['date']
#ti = TechIndicators('XXXX', output_format='pandas')
#padf, pameta = ti.get_ema(argScript)
except Exception as e:
csvdf = DataFrame()
return csvdf
def loadStochasticOscillator(self, argScript):
try:
csvdf = pd.read_csv(self.folder + 'STOCH_'+argScript+'.csv')
convert_type={'SlowD':float, 'SlowK':float}
csvdf = csvdf.astype(convert_type)
csvdf.set_index('time', inplace=True)
csvdf.index = pd.to_datetime(csvdf.index)
csvdf.index.names = ['date']
#ti = TechIndicators('XXXX', output_format='pandas')
#padf, pameta = ti.get_ema(argScript)
except Exception as e:
csvdf = DataFrame()
return csvdf
def loadMACD(self, argScript):
try:
csvdf = pd.read_csv(self.folder + 'MACD_'+argScript+'.csv')
convert_type={'MACD':float, 'MACD_Hist':float, 'MACD_Signal':float}
csvdf = csvdf.astype(convert_type)
csvdf.set_index('time', inplace=True)
csvdf.index = pd.to_datetime(csvdf.index)
csvdf.index.names = ['date']
#ti = TechIndicators('XXXX', output_format='pandas')
#padf, pameta = ti.get_ema(argScript)
except Exception as e:
csvdf = DataFrame()
return csvdf
def loadAROON(self, argScript):
try:
csvdf = pd.read_csv(self.folder + 'AROON_'+argScript+'.csv')
convert_type={'Aroon Down':float, 'Aroon Up':float}
csvdf = csvdf.astype(convert_type)
csvdf.set_index('time', inplace=True)
csvdf.index = pd.to_datetime(csvdf.index)
csvdf.index.names = ['date']
#ti = TechIndicators('XXXX', output_format='pandas')
#padf, pameta = ti.get_ema(argScript)
except Exception as e:
csvdf = DataFrame()
return csvdf
def loadBBands(self, argScript):
try:
csvdf = pd.read_csv(self.folder + 'BBANDS_'+argScript+'.csv')
convert_type={'Real Lower Band':float, 'Real Middle Band':float, 'Real Upper Band':float}
csvdf = csvdf.astype(convert_type)
csvdf.set_index('time', inplace=True)
csvdf.index = pd.to_datetime(csvdf.index)
csvdf.index.names = ['date']
#ti = TechIndicators('XXXX', output_format='pandas')
#padf, pameta = ti.get_ema(argScript)
except Exception as e:
csvdf = DataFrame()
return csvdf
def loadADX(self, argScript):
try:
csvdf =
|
pd.read_csv(self.folder + 'ADX_'+argScript+'.csv')
|
pandas.read_csv
|
import os, random, time
import numpy as np
import pandas as pd
import seaborn as sns
import tensorflow as tf
import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix, roc_auc_score, classification_report
# Auxiliary functions
def color_map(val):
if type(val) == float:
if val <= 0.2:
color = 'red'
elif val <= 0.3:
color = 'orange'
elif val >= 0.8:
color = 'green'
else:
color = 'black'
else:
color = 'black'
return 'color: %s' % color
def seed_everything(seed=0):
random.seed(seed)
np.random.seed(seed)
tf.random.set_seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
os.environ['TF_DETERMINISTIC_OPS'] = '1'
def set_up_strategy():
try:
tpu = tf.distribute.cluster_resolver.TPUClusterResolver()
print('Running on TPU ', tpu.master())
except ValueError:
tpu = None
if tpu:
tf.config.experimental_connect_to_cluster(tpu)
tf.tpu.experimental.initialize_tpu_system(tpu)
strategy = tf.distribute.experimental.TPUStrategy(tpu)
else:
strategy = tf.distribute.get_strategy()
return strategy, tpu
# Model evaluation
def evaluate_model(k_fold, n_folds=1, label_col='toxic'):
metrics_df = pd.DataFrame([], columns=['Metric', 'Train', 'Valid', 'Var'])
metrics_df['Metric'] = ['ROC AUC', 'Accuracy', 'Precision', 'Recall', 'F1-score', 'Support']
for n_fold in range(n_folds):
rows = []
train_set = k_fold[k_fold['fold_%d' % (n_fold+1)] == 'train']
validation_set = k_fold[k_fold['fold_%d' % (n_fold+1)] == 'validation']
train_report = classification_report(train_set[label_col], train_set['pred_%d' % (n_fold+1)], output_dict=True)
valid_report = classification_report(validation_set[label_col], validation_set['pred_%d' % (n_fold+1)], output_dict=True)
rows.append([roc_auc_score(train_set[label_col], train_set['pred_%d' % (n_fold+1)]),
roc_auc_score(validation_set[label_col], validation_set['pred_%d' % (n_fold+1)])])
rows.append([train_report['accuracy'], valid_report['accuracy']])
rows.append([train_report['1']['precision'], valid_report['1']['precision']])
rows.append([train_report['1']['recall'], valid_report['1']['recall']])
rows.append([train_report['1']['f1-score'], valid_report['1']['f1-score']])
rows.append([train_report['1']['support'], valid_report['1']['support']])
metrics_df = pd.concat([metrics_df, pd.DataFrame(rows, columns=['Train_fold_%d' % (n_fold+1),
'Valid_fold_%d' % (n_fold+1)])], axis=1)
metrics_df['Train'] = metrics_df[[c for c in metrics_df.columns if c.startswith('Train_fold')]].mean(axis=1)
metrics_df['Valid'] = metrics_df[[c for c in metrics_df.columns if c.startswith('Valid_fold')]].mean(axis=1)
metrics_df['Var'] = metrics_df['Train'] - metrics_df['Valid']
return metrics_df.set_index('Metric')
def evaluate_model_single_fold(k_fold, n_fold=1, label_col='toxic'):
metrics_df = pd.DataFrame([], columns=['Metric', 'Train', 'Valid', 'Var'])
metrics_df['Metric'] = ['ROC AUC', 'Accuracy', 'Precision', 'Recall', 'F1-score', 'Support']
rows = []
fold_col = f'fold_{n_fold}'
pred_col = f'pred_{n_fold}'
train_set = k_fold[k_fold[fold_col] == 'train']
validation_set = k_fold[k_fold[fold_col] == 'validation']
train_report = classification_report(train_set[label_col], train_set[pred_col], output_dict=True)
valid_report = classification_report(validation_set[label_col], validation_set[pred_col], output_dict=True)
rows.append([roc_auc_score(train_set[label_col], train_set[pred_col]),
roc_auc_score(validation_set[label_col], validation_set[pred_col])])
rows.append([train_report['accuracy'], valid_report['accuracy']])
rows.append([train_report['1']['precision'], valid_report['1']['precision']])
rows.append([train_report['1']['recall'], valid_report['1']['recall']])
rows.append([train_report['1']['f1-score'], valid_report['1']['f1-score']])
rows.append([train_report['1']['support'], valid_report['1']['support']])
metrics_df = pd.concat([metrics_df, pd.DataFrame(rows, columns=['Train_' + fold_col,
'Valid_' + fold_col])], axis=1)
metrics_df['Train'] = metrics_df[[c for c in metrics_df.columns if c.startswith('Train_fold')]].mean(axis=1)
metrics_df['Valid'] = metrics_df[[c for c in metrics_df.columns if c.startswith('Valid_fold')]].mean(axis=1)
metrics_df['Var'] = metrics_df['Train'] - metrics_df['Valid']
return metrics_df.set_index('Metric')
def evaluate_model_lang(df, n_folds, label_col='toxic', pred_col='pred'):
metrics_df =
|
pd.DataFrame([], columns=['Lang / ROC AUC', 'Mean'])
|
pandas.DataFrame
|
from decimal import Decimal
import numpy as np
import pytest
import pandas as pd
import pandas._testing as tm
class TestDataFrameUnaryOperators:
# __pos__, __neg__, __inv__
@pytest.mark.parametrize(
"df,expected",
[
(pd.DataFrame({"a": [-1, 1]}), pd.DataFrame({"a": [1, -1]})),
(pd.DataFrame({"a": [False, True]}), pd.DataFrame({"a": [True, False]})),
(
pd.DataFrame({"a": pd.Series(pd.to_timedelta([-1, 1]))}),
pd.DataFrame({"a": pd.Series(pd.to_timedelta([1, -1]))}),
),
],
)
def test_neg_numeric(self, df, expected):
tm.assert_frame_equal(-df, expected)
tm.assert_series_equal(-df["a"], expected["a"])
@pytest.mark.parametrize(
"df, expected",
[
(np.array([1, 2], dtype=object), np.array([-1, -2], dtype=object)),
([Decimal("1.0"), Decimal("2.0")], [Decimal("-1.0"), Decimal("-2.0")]),
],
)
def test_neg_object(self, df, expected):
# GH#21380
df = pd.DataFrame({"a": df})
expected = pd.DataFrame({"a": expected})
tm.assert_frame_equal(-df, expected)
tm.assert_series_equal(-df["a"], expected["a"])
@pytest.mark.parametrize(
"df",
[
pd.DataFrame({"a": ["a", "b"]}),
pd.DataFrame({"a": pd.to_datetime(["2017-01-22", "1970-01-01"])}),
],
)
def test_neg_raises(self, df):
msg = (
"bad operand type for unary -: 'str'|"
r"Unary negative expects numeric dtype, not datetime64\[ns\]"
)
with pytest.raises(TypeError, match=msg):
(-df)
with pytest.raises(TypeError, match=msg):
(-df["a"])
def test_invert(self, float_frame):
df = float_frame
tm.assert_frame_equal(-(df < 0), ~(df < 0))
def test_invert_mixed(self):
shape = (10, 5)
df = pd.concat(
[
pd.DataFrame(np.zeros(shape, dtype="bool")),
pd.DataFrame(np.zeros(shape, dtype=int)),
],
axis=1,
ignore_index=True,
)
result = ~df
expected = pd.concat(
[
pd.DataFrame(np.ones(shape, dtype="bool")),
pd.DataFrame(-np.ones(shape, dtype=int)),
],
axis=1,
ignore_index=True,
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"df",
[
pd.DataFrame({"a": [-1, 1]}),
pd.DataFrame({"a": [False, True]}),
pd.DataFrame({"a": pd.Series(pd.to_timedelta([-1, 1]))}),
],
)
def test_pos_numeric(self, df):
# GH#16073
tm.assert_frame_equal(+df, df)
tm.assert_series_equal(+df["a"], df["a"])
@pytest.mark.parametrize(
"df",
[
# numpy changing behavior in the future
pytest.param(
pd.DataFrame({"a": ["a", "b"]}),
marks=[pytest.mark.filterwarnings("ignore")],
),
pd.DataFrame({"a": np.array([-1, 2], dtype=object)}),
pd.DataFrame({"a": [Decimal("-1.0"), Decimal("2.0")]}),
],
)
def test_pos_object(self, df):
# GH#21380
tm.assert_frame_equal(+df, df)
tm.assert_series_equal(+df["a"], df["a"])
@pytest.mark.parametrize(
"df", [pd.DataFrame({"a":
|
pd.to_datetime(["2017-01-22", "1970-01-01"])
|
pandas.to_datetime
|
#
# Copyright 2019 <NAME> Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing
# permissions and limitations under the License.
#
#
import requests
import lxml
import urllib
from urllib.parse import quote
import spectrumpy
import json
import shapely
import pandas as pd
import geopandas as gpd
import colour
class SpatialServer:
def __init__(self, spectrum):
''' Constructor for this class. '''
self.spectrum = spectrum
self.featureService=FeatureService(self, spectrum)
self.geometryOperations=Geometry(self, spectrum)
self.thematics=Thematics(self, spectrum)
self.namedResourceService=NamedResourceService(self, spectrum)
def Spectrum(self):
"""Return the Spectrum server. """
return self.spectrum
def NamedResourceService(self):
"""Return the Named Resource Service for this server. """
return self.namedResourceService
def FeatureService(self):
"""Return the Feature Service for this server. """
return self.featureService
def GeometryOperations(self):
"""Return the Geometry Service for this server. """
return self.geometryOperations
def Thematics(self):
"""Return the Thematics Service for this server. """
return self.thematics
class NamedResourceService:
def __init__(self, spatialserver, spectrum):
''' Constructor for this class. '''
self.spatialserver=spatialserver
self.spectrum=spectrum
self.service=self.spectrum.getSoapService("soap/NamedResourceService?wsdl")
def listNamedResources(self, path):
"""Lists the named resosurces at this server within the specified path. Use '/'for the root to return all resources. """
return self.service.service.listNamedResources(path)['NamedResource']
def does_exist(self, path, name):
"""Indicates True/False if the specified named resource exists. """
try:
resources=self.service.service.listNamedResources(path)['NamedResource']
for resource in resources:
if resource["Path"] == path + "/" + name:
return True
except:
# do nothing
return False
return False
def upsert(self, path, name, sz_resource):
"""Inserts or updates the named resource with the specified contents. """
resource = lxml.etree.fromstring(sz_resource)
if self.does_exist(path, name):
#Update
self.service.service.updateNamedResource(Resource=resource, Path=path + "/" + name)
else:
#Add
self.service.service.addNamedResource(Resource=resource, Path=path + "/" + name)
class FeatureService:
def __init__(self, spatialserver, spectrum):
''' Constructor for this class. '''
self.spatialserver=spatialserver
self.spectrum=spectrum
self.service='rest/Spatial/FeatureService'
def listTables(self):
try:
response = self.spectrum.get(self.service + '/listTableNames.json')
python_obj = json.loads(response.content)
return python_obj["Response"]["table"]
except requests.exceptions.RequestException as e:
print (e)
def describeTable(self,table):
print ("TABLE:" + table)
print ("------------------------------------------------------------------------------------")
try:
response = self.spectrum.get(self.service + '/tables' + table + '/metadata.json')
metadata = json.loads(response.content)
maxw = 10
for i in range(len(metadata["Metadata"])):
if (len(metadata["Metadata"][i]["name"]) > maxw):
maxw = len(metadata["Metadata"][i]["name"])
for i in range(len(metadata["Metadata"])):
w = maxw - len(metadata["Metadata"][i]["name"])
print (metadata["Metadata"][i]["name"], end='')
for j in range(w):
print(' ',end='')
print ("\t", end='')
print (metadata["Metadata"][i]["type"], end='')
if "totalDigits" in metadata["Metadata"][i] and "fractionalDigits" in metadata["Metadata"][i]:
print ("\t", end='')
print (" (" + str(metadata["Metadata"][i]["totalDigits"]) + "," + str(metadata["Metadata"][i]["fractionalDigits"]) + ")", end='')
print ("")
print ("")
except requests.exceptions.RequestException as e: # This is the correct syntax
print (e)
def createViewTable(self, query, path, viewName, refTables):
sz=''
sz=sz+'<NamedDataSourceDefinition version="MXP_NamedResource_1_5" xmlns="http://www.mapinfo.com/mxp" >'
sz=sz+' <ConnectionSet/>'
sz=sz+' <DataSourceDefinitionSet>'
if refTables is not None:
for refTable in refTables:
sz=sz+' <NamedDataSourceDefinitionRef id="id2" resourceID="'+refTable+'"/> '
sz=sz+' <MapinfoSQLDataSourceDefinition id="id3" readOnly="true">'
sz=sz+' <DataSourceName>'+viewName+'</DataSourceName>'
sz=sz+' <MapinfoSQLQuery>'
sz=sz+' <Query>'+query+'</Query>'
sz=sz+' </MapinfoSQLQuery>'
sz=sz+' </MapinfoSQLDataSourceDefinition>'
sz=sz+' </DataSourceDefinitionSet>'
sz=sz+' <DataSourceRef ref="id3"/>'
sz=sz+'</NamedDataSourceDefinition>'
self.spatialserver.NamedResourceService().upsert(path,viewName,sz)
def query(self, q, debug=False, pageLength=0):
class FeatureStream:
def __init__ (self, service, spatialserver, spectrum, q, pageLen, paging, dbg):
self.spatialserver=spatialserver
self.service=service
self.spectrum=spectrum
self.pageNum=0
self.pglen=pageLen
self.featureCollection=None
self.q=q
self.first=True
self.done=False
self.paging=paging
self.iter_numReturned=0
self.total=0
self.debug=dbg
def __iter__(self):
return self
def __querynext__(self):
try:
self.iter_numReturned = 0
done=False
while not done:
self.iter_numReturned=0
done=True
url = self.service + '/tables/features.json?'
if self.pglen > 0:
url = url + 'pageLength=' + str(self.pglen) + '&page=' + str(self.pageNum) + '&'
url = url +'q=' + self.q
if self.debug:
print (url)
response = self.spectrum.get(url)
fc = response.json()
if fc is None:
fc = {'features':[]}
if 'features' not in fc:
fc['features']=[]
self.iter_numReturned+=len(fc['features'])
if self.first:
self.first=False
self.featureCollection=fc
elif self.paging:
self.featureCollection=fc
else:
for feature in fc['features']:
self.featureCollection['features'].append(feature)
if self.iter_numReturned == self.pglen and not self.paging:
self.pageNum+=1
done=False
except requests.exceptions.RequestException as e: # This is the correct syntax
print (e)
return self.featureCollection
def __next__(self):
if self.done:
raise StopIteration
else:
self.pageNum += 1
fc = self.__querynext__()
if fc is None or self.iter_numReturned == 0:
raise StopIteration
self.total+=self.iter_numReturned
return fc
paging = pageLength > 0
if pageLength == 0:
pageLength = 1000
fs = FeatureStream(self.service, self.spatialserver, self.spectrum, urllib.parse.quote(q), pageLength, paging, debug)
if not paging:
fc = fs.__next__()
return fc
else:
return fs
def get(self, path):
try:
response = self.spectrum.get(self.service + path)
return response
except requests.exceptions.RequestException as e:
print (e)
class Geometry:
def __init__(self, spatialserver, spectrum):
self.spatialserver=spatialserver
self.spectrum=spectrum
def __coordinateArray2tupleArray(self, coordinates):
tuple_array=[]
for i in range(len(coordinates)):
tuple_array.append((coordinates[i][0],coordinates[i][1]))
return tuple_array
def __arrayOfCoordinateArray2arrayOfTupleArray(self, coordinateArray):
arrayOfTupleArray=[]
for i in range(len(coordinateArray)):
arrayOfTupleArray.append(self.__coordinateArray2tupleArray(coordinateArray[i]))
return arrayOfTupleArray
def __arrayOfArrayOfCoordinateArray2arrayOfArrayOfTupleArray(self, coordinateArray):
arrayOfArrayOfTupleArray=[]
for i in range(len(coordinateArray)):
arrayOfArrayOfTupleArray.append(self.__arrayOfCoordinateArray2arrayOfTupleArray(coordinateArray[i]))
return arrayOfArrayOfTupleArray
def __ToPolygon(self, coordinates):
ext=[]
ints=[]
for i in range(len(coordinates)):
if i == 0:
ext = self.__coordinateArray2tupleArray(coordinates[i])
else:
ints.append(self.__coordinateArray2tupleArray(coordinates[i]))
return shapely.geometry.Polygon(ext, ints)
def __ToMultiPolygon(self, coordinates):
polys=[]
for i in range(len(coordinates)):
polys.append(self.__ToPolygon(coordinates[i]))
return shapely.geometry.MultiPolygon(polys)
def __ToPoint(self, coordinates):
shape=shapely.geometry.Point(coordinates[0],coordinates[1])
return shape
def __ToMultiPoint(self, coordinates):
return shapely.geometry.MultiPoint(self.__coordinateArray2tupleArray(coordinates))
def __ToLineString(self, coordinates):
return shapely.geometry.LineString(self.__coordinateArray2tupleArray(coordinates))
def __ToMultiCurve(self, coordinates):
lines=[]
for i in range(len(coordinates)):
lines.append(self.__ToLineString(coordinates[i]))
return shapely.geometry.MultiLineString(lines)
def ToGeometry(self, geometry):
if geometry is None:
return None
# TODO: Set the crs
gtype = geometry['type']
coords = geometry['coordinates']
if gtype == 'MultiPolygon':
return self.__ToMultiPolygon(coords)
elif gtype == 'Point':
return self.__ToPoint(coords)
elif gtype == 'MultiPoint':
return self.__ToMultiPoint(coords)
elif gtype == 'MultiLineString':
return self.__ToMultiCurve(coords)
# elif gtype == 'Collection': TODO
def GeoJSON2GeoDataFrame(self, fc):
hasGeometry=False
column_list=[]
data_list=[]
if fc['features'] is not None:
if len(fc['features']) > 0:
for propset in fc['features'][0]['properties']:
column_list.append(propset)
if fc['features'][0]['geometry'] is not None:
column_list.append('geometry')
hasGeometry=True
for feature in fc['features']:
record=[]
for prop in feature['properties']:
record.append(feature['properties'][prop])
if feature['geometry'] is not None:
record.append(self.ToGeometry(feature['geometry']))
if not hasGeometry or feature['geometry'] is not None:
data_list.append(record)
if not hasGeometry:
gdf=pd.DataFrame(data_list,columns=column_list)
else:
gdf=gpd.GeoDataFrame(data_list,columns=column_list,crs={'init': 'epsg:4326'})
return gdf
class Thematics:
def __init__(self, spatialserver, spectrum):
self.spatialserver=spatialserver
self.spectrum=spectrum
def generate_range_theme_buckets(self, data_series, n_bins, start_color, end_color):
quantiles =
|
pd.qcut(data_series, n_bins, retbins=True)
|
pandas.qcut
|
import sys
sys.path.append('.')
# stdlib
import os
from glob import glob
from tqdm.auto import tqdm
import json
import pickle
from collections import defaultdict
import time
import argparse
# numlib
import numpy as np
import pandas as pd
from ensemble_boxes import nms, weighted_boxes_fusion
#from include import *
from utils.file import Logger
from utils.him import downsize_boxes, upsize_boxes
def am_mean(data, ws):
return np.sum([d*w for d, w in zip(data, ws)])/np.sum(ws)
def gm_mean(data, ws):
return np.prod([d**w for d, w in zip(data, ws)])**(1./np.sum(ws))
def am_gm_mean(data, ws):
return 0.5*(am_mean(data, ws) + gm_mean(data, ws))
def str2boxes_image(s, with_none=False):
"""
ouput: [prob x_min y_min x_max y_max]
range x,y: [0, +inf]
"""
s = s.strip().split()
s = np.array([s[6*idx+1:6*idx+6] for idx in range(len(s)//6) \
if s[6*idx] == 'opacity' or with_none]).astype(np.float32)
if len(s) == 0: print('Warning: image without box!')
return s
def str2boxes_df(df, with_none=False):
return [str2boxes_image(row['PredictionString'], with_none=with_none) \
for _, row in df.iterrows()]
def boxes2str_image(boxes):
if len(boxes) == 0:
return ''
return ' '.join(np.concatenate([[['opacity']]*len(boxes), boxes], \
axis=1).reshape(-1).astype('str'))
def boxes2str_df(boxes, image_ids=None):
strs = [boxes2str_image(bs) for bs in boxes]
if image_ids is None:
return strs
return pd.DataFrame({'id': image_ids, 'PredictionString': strs})
def check_num_boxes_per_image(df=None, csv_path=None, filter_rows=True):
assert df is not None or csv_path is not None
if df is None:
df = pd.read_csv(csv_path)
if filter_rows:
df_image = df[df['id'].apply(lambda x: x.endswith('image'))].reset_index(drop=True)
else:
df_image = df
all_boxes = str2boxes_df(df_image, with_none=False)
all_boxes = [boxes for boxes in all_boxes if len(boxes) > 0 ]
return np.concatenate(all_boxes).shape[0] / len(df_image)
def extract_none_probs(opacity_probs):
none_probs = []
for image_probs in opacity_probs:
none_prob = np.prod(1 - np.array(image_probs))
none_probs.append(none_prob)
return none_probs
def filter_rows(df, mode):
assert mode in ['study', 'image']
df = df.copy()
df = df[df['id'].apply(lambda x: x.endswith(mode))].reset_index(drop=True)
return df
def ensemble_image(dfs, df_meta, mode='wbf', \
iou_thr=0.5, skip_box_thr=0.001, weights=None, filter_rows=True):
if filter_rows:
df_meta = filter_rows(df_meta, mode='image')
dfs = [filter_rows(df, mode='image') for df in dfs]
image_ids, prediction_strings, all_scores = [], [], []
num_boxes = 0
for i, row in tqdm(df_meta.iterrows(), total=len(df_meta)):
image_id = row['id']
s = []
for df in dfs:
if np.sum(df['id']==image_id) > 0:
ss = df.loc[df['id']==image_id, 'PredictionString'].values[0]
if type(ss) == str:
s.append(ss)
else:
s.append('')
else:
s.append('')
boxes, scores, labels = [], [], []
for ss in s:
boxes_, scores_, labels_ = [], [], []
ss = str2boxes_image(ss, with_none=False)
if len(ss) > 0:
labels_ = [0]*len(ss)
scores_ = ss[:, 0].tolist()
boxes_ = downsize_boxes(ss[:, 1:], row['w'], row['h'])
labels.append(labels_)
boxes.append(boxes_)
scores.append(scores_)
if mode == 'wbf':
boxes, scores, labels = weighted_boxes_fusion(boxes,
scores,
labels,
iou_thr=iou_thr,
weights=weights,
skip_box_thr=skip_box_thr)
elif mode == 'nms':
boxes_, scores_, labels_, weights_ = [], [], [], []
for j, b in enumerate(boxes):
if len(b) > 0:
boxes_.append(b)
scores_.append(scores[j])
labels_.append(labels[j])
if weights is not None:
weights_.append(weights[j])
if weights is None:
weights_ = None
boxes, scores, labels = nms(boxes_,
scores_,
labels_,
iou_thr=iou_thr,
weights=weights_)
if len(boxes) == 0:
image_ids.append(image_id)
prediction_strings.append('')
print('Warning: no box found after boxes fusion!')
continue
num_boxes += len(boxes)
all_scores.append(scores)
boxes = upsize_boxes(boxes, row['w'], row['h'])
s = []
for box, score, label in zip(boxes, scores, labels):
s.append(' '.join(['opacity', str(score), ' '.join(box.astype(str))]))
image_ids.append(image_id)
prediction_strings.append(' '.join(s))
df_pred = pd.DataFrame({'id': image_ids, 'PredictionString': prediction_strings})
return df_pred, num_boxes, np.concatenate(all_scores).tolist()
def ensemble_study(dfs, weights=None, mean='am'):
dfs = [filter_rows(df, mode='study') for df in dfs]
study_ids = dfs[0]['id'].values
if weights is None:
weights = [1.] * len(dfs)
weights = np.array(weights) / np.sum(weights)
ens_probs_am = np.zeros((len(study_ids), 4), dtype=np.float32)
ens_probs_gm = np.ones((len(study_ids), 4), dtype=np.float32)
for df, w in zip(dfs, weights):
df = df[df['id'].apply(lambda x: x.endswith('study'))].reset_index(drop=False)
for i, id_ in enumerate(study_ids):
s = df.loc[df['id']==id_, 'PredictionString'].values[0]
preds = s.strip().split()
for idx in range(len(preds)//6):
ens_probs_am[i, cls_map[preds[6*idx]]] += float(preds[6*idx + 1]) * w
ens_probs_gm[i, cls_map[preds[6*idx]]] *= float(preds[6*idx + 1]) ** w
# apply different ensemble methods
if mean == 'am':
ens_probs = ens_probs_am
elif mean == 'gm':
ens_probs = ens_probs_gm
elif mean == 'am_gm':
ens_probs = 0.5*(ens_probs_am + ens_probs_gm)
df = pd.DataFrame({'id': study_ids})
df[class_names] = ens_probs
df['PredictionString'] = df.apply(lambda row: \
f'negative {row["negative"]} 0 0 1 1 typical {row["typical"]} 0 0 1 1 \
indeterminate {row["indeterminate"]} 0 0 1 1 atypical {row["atypical"]} 0 0 1 1', \
axis=1)
df = df[['id', 'PredictionString']]
return df
def extract_negative_prob(df, std2img):
"""
Args:
df: study-level df
std2img: dict maps from study_id to image_id
Returns:
df with image-level ids and mapped negative probabilities
"""
df = filter_rows(df, mode='study')
image_ids, negative_probs = [], []
for study_id, img_ids in std2img.items():
s = df.loc[df['id']==study_id + '_study', 'PredictionString'].values[0]
s = s.strip().split()
for idx in range(len(s)//6):
if s[6*idx] == 'negative':
neg_prob = float(s[6*idx + 1])
break
image_ids.extend([img_id + '_image' for img_id in img_ids])
negative_probs.extend([neg_prob]*len(img_ids))
return pd.DataFrame({'id': image_ids, 'negative': negative_probs})
def postprocess_image(df_image, df_study, std2img, df_none=None, \
none_cls_w=0., none_dec_w=0.5, neg_w=0.5, \
detect_w=0.84, clsf_w=0.84):
df_image = filter_rows(df_image, mode='image')
df_study = filter_rows(df_study, mode='study')
if df_none is None:
none_cls_w = 0.
none_cls_w, none_dec_w, neg_w = \
none_cls_w/(none_cls_w + none_dec_w + neg_w), \
none_dec_w/(none_cls_w + none_dec_w + neg_w), \
neg_w/(none_cls_w + none_dec_w + neg_w)
detect_w, clsf_w = \
detect_w/(detect_w + clsf_w), \
clsf_w/(detect_w + clsf_w)
df_negative = extract_negative_prob(df_study, std2img)
df_image = df_image.merge(df_negative, on='id', how='left')
if none_cls_w > 0.:
df_image = df_image.merge(df_none, on='id', how='left')
new_nones = []
for i, row in df_image.iterrows():
if row['PredictionString'] == 'none 1 0 0 1 1' \
or row['PredictionString'] == '' \
or type(row['PredictionString']) != str:
df_image.loc[i, 'PredictionString'] = f'none {row["none"]} 0 0 1 1'
#df_image.loc[i, 'new_none'] = row["none"]
new_nones.append(row["none"])
print('no opacity founded!')
continue
else:
# extract none probabilities
none_dec_prob = 1.
bboxes = row['PredictionString'].strip().split()
for idx in range(len(bboxes)//6):
if bboxes[6*idx] == 'opacity':
none_dec_prob *= 1 - float(bboxes[6*idx + 1])
# modify opacity boxes
if none_cls_w > 0.:
post_none_prob = none_cls_w*row["none"] + none_dec_w*none_dec_prob + neg_w*row["negative"]
else:
post_none_prob = none_dec_w*none_dec_prob + neg_w*row["negative"]
for idx in range(len(bboxes)//6):
if bboxes[6*idx] == 'opacity':
bboxes[6*idx + 1] = str(float(bboxes[6*idx + 1])**detect_w * (1 - post_none_prob)**clsf_w)
df_image.loc[i, 'PredictionString'] = ' '.join(bboxes)
# add none boxes
df_image.loc[i, 'PredictionString'] += f' none {post_none_prob} 0 0 1 1'
# act none probability for ensemble with negative in study-level
if none_cls_w > 0.:
new_nones.append(none_cls_w/(none_cls_w + none_dec_w))*row["none"] + \
(none_dec_w/(none_cls_w + none_dec_w))*none_dec_prob
else:
new_nones.append(none_dec_prob)
df_none = pd.DataFrame({'id': df_image['id'].values, 'none': new_nones})
return df_image, df_none
def postprocess_study(df, df_none, std2img, neg_w=0.7, none_w=0.3):
"""
Args:
df: study-level prediction
df_none: image-level none probability
std2img: dict maps from study_id to image_id
"""
df = filter_rows(df, mode='study')
df_none = filter_rows(df_none, mode='image')
neg_w, none_w = \
neg_w/(neg_w + none_w), \
none_w/(neg_w + none_w)
# extract none probability for each study
study_ids, none_probs = [], []
for study_id, image_ids in std2img.items():
image_ids_ = [img_id + '_image' for img_id in image_ids]
study_none_prob = df_none.loc[df_none['id'].isin(image_ids_), 'none'].mean()
study_ids.append(study_id + '_study')
none_probs.append(study_none_prob)
df_study_none = pd.DataFrame({'id': study_ids, 'none': none_probs})
df = pd.merge(df, df_study_none, on='id', how='left')
for i, row in df.iterrows():
#----modifiy negative probalibity
bboxes = row['PredictionString'].strip().split()
for idx in range(len(bboxes)//6):
if bboxes[6*idx] == 'negative':
bboxes[6*idx + 1] = str(neg_w*float(bboxes[6*idx + 1]) + none_w*float(row['none']))
break
df.loc[i, 'PredictionString'] = ' '.join(bboxes)
df = df[['id', 'PredictionString']]
return df
def parse_opt():
parser = argparse.ArgumentParser()
parser.add_argument('-study-csv', type=str, nargs='+', help='paths to study-level csv', required=True)# img-level paths
parser.add_argument('-image-csv', type=str, nargs='+', help='paths to image-level csvrequired', required=True)# study-level paths
parser.add_argument('-sw', type=float, nargs='+', help='study-level ensemble weights', required=True)# img-level weights
parser.add_argument('-iw', type=float, nargs='+', help='image-level ensemble weights', required=True)# img-level weights
parser.add_argument('-std2img', type=str, help='path to study2image pickle', required=True)# std2img dict
parser.add_argument('-img2shape', type=str, help='path to image2shape pickle', required=True)# meta data path
parser.add_argument('--iou-thr', type=float, default=0.6, help='boxes fusion iou threshold')# iou thres
parser.add_argument('--conf-thr', type=float, default=0.0001, help='boxes fusion skip box threshold')# conf thes
parser.add_argument('--none-csv', type=str, help='path to none csv in case of using seperate none probability')
return parser.parse_args()
def main():
t0 = time.time()
opt = parse_opt()
assert len(opt.study_csv) == len(opt.sw), f'len(study_csv) == {len(opt.study_csv)} != len(sw) == {opt.sw}'
assert len(opt.image_csv) == len(opt.iw), f'len(image_csv) == {len(opt.image_csv)} != len(iw) == {opt.iw}'
# logging
log = Logger()
os.makedirs('../logging', exist_ok=True)
log.open(os.path.join('../logging', 'post_processing.txt'), mode='a')
log.write('STUDY-LEVEL\n')
log.write('weight\tpath\n')
for p, w in zip(opt.study_csv, opt.sw):
log.write('%.2f\t%s\n'%(w, p))
log.write('\n')
log.write('IMAGE-LEVEL\n')
log.write('weight\tpath\n')
for p, w in zip(opt.image_csv, opt.iw):
log.write('%.2f\t%s\n'%(w, p))
log.write('\n')
log.write('iou_thr=%.4f,skip_box_thr=%.4f\n'%(opt.iou_thr, opt.conf_thr))
# prepare data
dfs_study = [pd.read_csv(df_path) for df_path in opt.study_csv]
dfs_image = [pd.read_csv(df_path) for df_path in opt.image_csv]
with open(opt.std2img, 'rb') as f:
std2img = pickle.load(f)
with open(opt.img2shape, 'rb') as f:
img2shape = pickle.load(f)
ids, hs, ws = [], [], []
for k, v in img2shape.items():
ids.append(k + '_image')
hs.append(v[0])
ws.append(v[1])
df_meta = pd.DataFrame({'id': ids, 'w': ws, 'h': hs})
# post-process
df_study = ensemble_study(dfs_study, weights=opt.sw)
df_image = ensemble_image(dfs_image, df_meta, mode='wbf', \
iou_thr=opt.iou_thr, skip_box_thr=opt.conf_thr, weights=opt.iw)[0]
df_image, df_none = postprocess_image(df_image, df_study, std2img)
df_study = postprocess_study(df_study, df_none, std2img)
df_sub =
|
pd.concat([df_study, df_image], axis=0, ignore_index=True)
|
pandas.concat
|
# -*- coding: utf-8 -*-
"""
Created on Tue May 30 11:04:14 2017
@author: rdk10
"""
import numpy as np
import pandas as pd
import sitchensis.Functions as f
import pdb #for debugging
import numbers as nu
def interpTrunks(trunkDat, logFileName, interpCol = 'dist'):
""" This function brings in a subset of trunk data with missing values of distance or azimuth and outputs interpolated values based on nighboring measurements"""
#INPUTS:
#trunkDat = trunk data from raw field data
#interpCol = defaults to dist but can also be set to azi to interp azimuths
#locate indices where there is a number in the azi or dist column
t1 = trunkDat['{0}'.format(interpCol)].astype(object) != 'interp'
t2 = trunkDat['{0}'.format(interpCol)].notnull()
test = f.vectorBool(t1,t2,'and')
refInd = trunkDat[test].index.tolist()
#Tests to see if values needing interpolation are at the base or top of a trunk, If so a warning is issued (there is nothing to calc from)
if all([ind < trunkDat.index.max() for ind in refInd]):
f.print2Log(logFileName, 'The lowest measurement in the tree needs to be interpolated but there is nothing to interpolate to')
elif all([trunkDat.index.min() < ind for ind in refInd]):
f.print2Log(logFileName, 'The highest measurement in the tree needs to be interpolated but there is nothing to interpolate to')
if all(test) == True:
return(trunkDat)
#refInd = trunkDat[trunkDat['{0}'.format(interpCol)] != 'interp'].index.tolist() #locate indices where dist or azi are NA, located bordering numbers, interpolate
else:
#Convert distances to number if still string after operation, convert azis to int
for i in range(len(refInd)-1):
rowCount = refInd[i]-refInd[i+1]-1 # number of rows needing calculation within each bounded section of interp heights
if rowCount < 1:
pass
else:
interpInd = [ x + 1 + refInd[i+1] for x in list(range(rowCount))] #Indices of current rows to calc
baseInd = refInd[i]
topInd = refInd[i+1]
for j in interpInd:
baseDist = trunkDat['{0}'.format(interpCol)].loc[baseInd] #ref
baseHt = trunkDat['height'].loc[baseInd] #ref
topDist = trunkDat['{0}'.format(interpCol)].loc[topInd] #ref
topHt = trunkDat['height'].loc[topInd] #ref
targetHt = trunkDat['height'].loc[j] #calcRow
trunkDat.loc[j,'{0}'.format(interpCol)] = (targetHt-topHt)*(baseDist-topDist)/(baseHt-topHt)+topDist #interpolate
trunkDat.loc[j,'ref type'] = 'p2p'
if interpCol == 'dist':
trunkDat.assign(dist = trunkDat['{0}'.format(interpCol)].astype(np.float64))
else:
trunkDat.assign(azi = trunkDat['{0}'.format(interpCol)].astype(np.float64))
return(trunkDat)
def trunkRoutine(trunkDat, custRefs, logFileName):
##Inputs are all the trunk data, and any custom reference data
##Output is the trunk data with all calculations of X,Y,Z positions.
#Height-sorted Dictionary of dataframes, one for each trunk
#1)Create subsets of data based on trunk name.
mainTrunks = f.breakDataByName(trunkDat)
#interpolate values that need it. (dist, azi), locate integers between indexes where we have info and calc
for trunk in mainTrunks:
trunkDat = mainTrunks[trunk]
#first change all p2f to p2p then interp where there is a value in dist
tempTrunk = trunkDat[pd.notnull(trunkDat['dist'])].copy() #Copy to avoid chained indexing error
for i in tempTrunk.index:
if trunkDat.at[i,'ref type'] == 'p2f': #if p2f convert to p2p
trunkDat.at[i,'dist'] = trunkDat.at[i,'dist'] + trunkDat.at[i,'diam']/200 #Add in the radius in m from cm
trunkDat.at[i,'ref type'] = 'p2p' #Change all the labels
mainTrunks[trunk] = interpTrunks(trunkDat, logFileName,'dist')
mainTrunks[trunk] = interpTrunks(trunkDat,logFileName,'azi')
#Bring the dataframe back together and add columns
reorder = trunkDat.columns.values
trunkDat = pd.concat(list(mainTrunks.values()))
trunkDat = trunkDat[reorder]
trunkDat = pd.concat([trunkDat, pd.DataFrame(columns = ['ref x', 'ref y', 'ref z', 'ref radius','x','y','z'])])
reorder = np.append(reorder[reorder!='notes'],['ref x', 'ref y', 'ref z', 'ref radius','x','y','z','notes'])
trunkDat = trunkDat[reorder] #resort columns
#Seperate out the calculation values we'll need
calcVals = trunkDat.loc[:,['dist','azi', 'radius','ref type']]
#First calculate where refs == Ground or where refs = a custom ref, then calculate the rest in height order
if len(trunkDat.index[trunkDat['ref'] == 'G'].tolist())>0:
grdInd = trunkDat.index[trunkDat['ref'] == 'G'].tolist()
refVals = [0,0,0]
trunkDat = calcTrunkVals(trunkDat, refVals, calcVals, grdInd)
#This is for all custom references
if len(custRefs) > 0:
for matchRef in custRefs['name']:
if all(trunkDat['ref'].str.contains(matchRef) == False):
print('\nMake sure the reference name in the cust refs tab and in the main trunk tab are spelled the same. There is no match for "{0}" in references for the main trunks'.format(matchRef))
f.print2Log(logFileName, '\nMake sure the reference name in the cust refs tab and in the main trunk tab are spelled the same. There is no match for "{0}" in references for the main trunks'.format(matchRef))
else:
Ind = trunkDat.index[trunkDat['ref'] == matchRef]
refVals = [custRefs['x'].iloc[0],custRefs['y'].iloc[0],custRefs['radius'].iloc[0]] #custom reference x, y, and radius
trunkDat = calcTrunkVals(trunkDat, refVals, calcVals, Ind)
#Now locate other references and try to calculate them.
#1) get indices of references with @ symbol
if any(pd.isnull(trunkDat['ref'])):
print('There are missing references for the main trunk, these must be filled in prior to running the program.')
f.print2Log(logFileName, 'There are missing references for the main trunk, these must be filled in prior to running the program.')
trunkSub = trunkDat[trunkDat['ref'].str.contains("@")]
trunkSub1 = trunkSub['ref'].str.upper()
trunkSub1 = trunkSub1.drop_duplicates()
#2)Seperate out the ref from the height
refNames = trunkSub1.str.split('@').str.get(0) #get the top names
refHeights = trunkSub1.str.split('@').str.get(1) #get the height
refNames = refNames.replace(' ','')
refHeights = refHeights.replace(' ','')
refHeights = refHeights.astype(float)
#################
# break if more than 10 iterations and post error message
# while condition might is that there are still uncalculated x and y vals for the ref locations.
numNan = trunkSub.loc[:,['x','y']].isnull().sum().sum() #number of uncalculated values in the xy cols of references to other heights (e.g. M@50)
counter = 0
while numNan > 0 and counter < numNan/2+1: #this loops through all the refNames and calculates the x,y for each row where it matches the ref
#i = 0
####################
for i in range(len(refNames)):
#3)locate the x,y,z of the reference
ref = refNames.iloc[i]
ht = refHeights.iloc[i]
if ref not in mainTrunks:
f.print2Log(logFileName, 'Double check references, reference {0} is not part of the main trunk'.format(ref))
#pass
elif len(mainTrunks[ref][mainTrunks[ref]['height']== ht]) == 0:
f.print2Log(logFileName,'There is no {0} m height on trunk {1}, double check your reference to {1}@{0}'.format(ht,ref))
#pass
else:
ind = mainTrunks[ref][mainTrunks[ref]['height']== ht].index[0] #index of reference row
refVals = [trunkDat['x'].loc[ind],trunkDat['y'].loc[ind],trunkDat['radius'].loc[ind]] #reference values x,y, radius
#test if the ref values are calculated yet, if not get them on the next go.
if any(np.isnan(refVals)):
pass
else:
##OK, this is the row I need, now I just need to save the info and use code from custom refs.
#4) calculate
refName = trunkDat['ref'].str.split('@').str.get(0)
calcInd = list(trunkDat[refName==ref].index) #Matching Indices in calc data with ref data
trunkDat = calcTrunkVals(trunkDat, refVals, calcVals, calcInd)
#i = i + 1
counter = counter + 1
#print(ref + '@' + ht)
trunkSub = trunkDat[trunkDat['ref'].str.contains("@")]
numNan = trunkSub.loc[:,['x','y']].isnull().sum().sum() #caluclates the number of missing calcs
#print(counter)
#print(numNan)
#print(counter)
#print(numNan)
#print(trunkDat.loc[:, ['name','x','y','dist','height']])
if counter == 10 and numNan < 0:
print("There is at least one trunk references using the @ symbol that was not calculateable, double check the input file for correct referencing")
return(trunkDat)
#Break up dataset into dictionary
def arrangeTrunkData(trunkDat, logFileName):
"""Brings in a dataframe of the trunk format as collected in the field and returns a dictionary of dataframes reorganized into the segment format"""
mainTrunks = f.breakDataByName(trunkDat)
for trunk in mainTrunks:
#For each dataframe (trunk Name) I need to copy indices 1:n and cbind to indices 0:n-1, I also need to add a 'base', or 'top' to each col header
#mainTrunks[trunk].columns
trunkSub = mainTrunks[trunk].sort_values(by = 'height', ascending = False)
rows = len(trunkSub)
if rows == 1: #Check to make sure all trunks have at least two rows.
delTrunk = trunk
else:
renameCols = ['height','diam','radius','dist','azi','ref','ref radius', 'ref type','ref x', 'ref y', 'ref z','x','y', 'z']
colIndices = [trunkSub.columns.get_loc(s) for s in renameCols]
#setup and fill base infromation
baseTrunks = trunkSub.iloc[1:rows]
#renameColumns that need it
newNames = ['base '+ name for name in renameCols]
cvals = baseTrunks.columns.values
cvals[colIndices]=newNames
baseTrunks.columns = cvals
###Sometimes there are no notes, so I need to take that into account
ind1 = baseTrunks['notes'].index[0]
if type(baseTrunks['notes'].iloc[0])==str and type(trunkSub['notes'].iloc[0])==str:
baseTrunks.at[ind1,'notes'] = 'Base Note: ' + baseTrunks['notes'].iloc[0] + ' Top note: ' + trunkSub['notes'].iloc[0]
elif type(baseTrunks['notes'].iloc[0])!=str and type(trunkSub['notes'].iloc[0])==str:
baseTrunks.at[ind1,'notes'] = 'Top note: ' + trunkSub['notes'].iloc[0]
elif type(baseTrunks['notes'].iloc[0])!=str and type(trunkSub['notes'].iloc[0])!=str:
pass
#Setup and fill top information
topTrunks = trunkSub.iloc[0:rows-1,colIndices]
newNames = ['top '+name for name in renameCols]
topTrunks.columns = newNames
#combind datasets
reorderBase = baseTrunks.columns.values
reorderTop = topTrunks.columns.values
reorder = np.append(reorderBase[reorderBase!='notes'],np.append(reorderTop,'notes'))
mainTrunks[trunk] = pd.concat([baseTrunks.reset_index(drop = True),topTrunks.reset_index(drop = True)], axis = 1)
mainTrunks[trunk] = mainTrunks[trunk][reorder]
#trunkDat = trunkDat[reorder]
if 'delTrunk' in locals():
mainTrunks.pop(delTrunk, None)
f.print2Log(logFileName,'Warning: trunk "{0}" only had one line of data so will be deleted\n'.format(delTrunk))
return(mainTrunks)
def calcTrunkVals (trunkDat, refVals, calcVals, indices):
###This function brings in a data.frame, reference values, calculation values and the indices that need calculating and outputs a data.frame with the values calculated.
##there is a description of what the reference values, calculation values are in the calcPosition function.
##refVals is a list, calcVals is a data.frame
for ind in indices:
cVals = calcVals.loc[ind].values.tolist()
position = f.calcPosition(refVals, cVals, calcType = 'trunk')
# trunkDat.set_value(ind,'ref x',refVals[0])
trunkDat.at[ind,'ref x'] = refVals[0]
# trunkDat.set_value(ind,'ref y',refVals[1])
trunkDat.at[ind,'ref y'] = refVals[1]
# trunkDat.set_value(ind,'ref radius',refVals[2])
trunkDat.at[ind,'ref radius'] = refVals[2]
# trunkDat.set_value(ind,'x',position['x'])
trunkDat.at[ind,'x'] = position['x']
# trunkDat.set_value(ind,'y',position['y'])
trunkDat.at[ind,'y'] = position['y']
# trunkDat.set_value(ind,'z',trunkDat['height'].loc[ind])
trunkDat.at[ind,'z'] = trunkDat['height'].loc[ind]
return(trunkDat)
def segs2custRefs(segs, custRefs, logFileName, error = False):
"""Brings in segment frame, extracts rows that match any in the custom references, utilizes them to match with cust ref info to copy over to segs"""
#loop through bases and tops
for j in range(2):
if j == 0:
string = 'base'
else:
string = 'top'
#Extract rows where ref is in cust refs
calcSegs = segs[segs['{0} ref'.format(string)].isin(custRefs['name'])].copy() #use a copy to avoid chained indexing, calcSegs is temporary anyway
#Maps the values x,y,radius of custRefs to calcSegs with matching names in base or top ref
calcSegs['{0} ref x'.format(string)] = calcSegs['{0} ref'.format(string)].map(custRefs.set_index(custRefs['name'])['x'])
calcSegs['{0} ref y'.format(string)] = calcSegs['{0} ref'.format(string)].map(custRefs.set_index(custRefs['name'])['y'])
calcSegs['{0} ref radius'.format(string)] = calcSegs['{0} ref'.format(string)].map(custRefs.set_index(custRefs['name'])['radius'])
#Go through all the needed rows and do the calculations
for i in calcSegs.index:
#Calculate positional information for this row and column.
refVals = [calcSegs.loc[i,'{0} ref x'.format(string)], calcSegs.loc[i,'{0} ref y'.format(string)], calcSegs.loc[i,'{0} ref radius'.format(string)]]
positionMeasures = [calcSegs.loc[i,'{0} dist'.format(string)], calcSegs.loc[i,'{0} azi'.format(string)], calcSegs.loc[i,'{0} radius'.format(string)], calcSegs.loc[i,'{0} ref type'.format(string)]]
if 'int' in positionMeasures or any([item!=item for item in positionMeasures]): #look for 'int' or Nan
print('There are missing values of dist, azi, or radius for segments referenced to custom references. You must provide these numbers.')
f.print2Log(logFileName, 'There are missing values of dist, azi, or radius for segments referenced to custom references. You must provide these numbers.')
calcs = f.calcPosition(refVals, positionMeasures,calcType = 'segment')
#assign to original data.frame
segs.loc[i,'{0} x'.format(string)] = calcs['x']
segs.loc[i,'{0} y'.format(string)] = calcs['y']
segs.loc[i,'{0} ref x'.format(string)] = calcSegs.loc[i,'{0} ref x'.format(string)]
segs.loc[i,'{0} ref y'.format(string)] = calcSegs.loc[i,'{0} ref y'.format(string)]
segs.loc[i,'{0} ref radius'.format(string)] = calcSegs.loc[i,'{0} ref radius'.format(string)]
return(segs)
def segs2trunks(segmentFrame,referenceFrame, custRefs, logFileName, error = False):
"""Brings in the segment dataframe and a dictionary of reference data.frames broken up into seperate data.frames for each reference trunk name.
The code then looks at the reference name and matches that with the name of a reference. It then finds where a reference has a height
on either side of the node needs reference calcuations and interpolates the reference x, y, and radius at the height of the node
in question. There is some error logging here that opens the error log file and ourputs any errors encountered. A boolean error variable is passed
in from the main script that is set to False if there are no previous errors. The log file is a string of the log file name to append error information to."""
for j in range(2): #1 = base loop; 2 = top loop
for i in range(len(segmentFrame.index)): #i = all row indices
if j == 0: #set this variable to determine if we calculate reference coordinates for base or top of segment
string = 'base'
else:
string = 'top'
#get name of base or top ref depending on value of "j"
refName = str(segmentFrame['{0} ref'.format(string)].iloc[i])
refName = refName.replace(" ","")
refHt = segmentFrame['{0} z'.format(string)].iloc[i]
if string == 'base':
dashes = segmentFrame['name'].iloc[i].count('-') #More than two dashes in the base name means this is a mid-segment segment
else:
dashes = 1 #if it is the top of a segment is shouldn't matter so long as the reference if correct
if dashes >2 and refName.isalpha():
f.print2Log(logFileName, "Careful there buddy: the reference for mid-segment {0} is {1} and should probably be the segment base".format(segmentFrame['name'].iloc[i],refName))
#This operates on rows with a letter ref that does not equal 'calc', is not a mid-segment segment, that does not have @ notation and that is not in cust refs
if len(custRefs)>0: #Are there cust refs to test against?
test = (refName.isalpha() and refName.lower() != 'calc' and dashes < 2 and not any(custRefs['name'].isin([refName]))) or (refName.count('@') > 0)
else:
test = (refName.isalpha() and refName.lower() != 'calc' and dashes < 2) or (refName.count('@') > 0)
if test:
#if refname uses the @ notation
if refName.count('@') > 0:
rn = refName
refName = rn.split('@')[0]
refHt = float(rn.split('@')[1])
#if refName is 'Mtop' then refHt is highest top in main trunks
elif ('M' in refName and 'top' in refName):
tempFrame = referenceFrame['M']
refHt = round(float(tempFrame['top z'].max()),2)
refName = 'M'
elif isinstance(refHt,np.float64) or isinstance(refHt, np.int64): #convert numpy.floats to native floats, not sure why but several heights are imported as numpy.float not float
refHt = refHt.item()
if type(refHt) != float and type(refHt) != int:
print("Check the {0} reference height for segment {1}, it is not a number.".format(string,segmentFrame['name'].iloc[i]))
f.print2Log(logFileName,"Check the {0} reference height for segment {1}, it is not a number.".format(string,segmentFrame['name'].iloc[i]))
error = True
if all([refName != key for key in referenceFrame]):
print('The refName "{0}" for the {1} of segment "{2}" does not match any of the trunk names'.format(refName, string, segmentFrame.loc[i,'name']))
f.print2Log(logFileName, 'The refName "{0}" for the {1} of segment "{2}" does not match any of the trunk names'.format(refName, string, segmentFrame.loc[i,'name']))
#Call appropriate rows from mainT or segment data from the reference data.frame provided
tRows = referenceFrame['{0}'.format(refName)]
x=refHt >= tRows['base z']
y=refHt <= tRows['top z']
interpRow = tRows[[a and b for a, b in zip(x, y)]][:1] #intersection of boolean vectors is correct index
#Interp x,y,r
if interpRow.empty:
print("There are no main trunk sections surrounding the {0} height of the segment: {1}".format(string,segmentFrame['name'].iloc[i]))
f.print2Log(logFileName, "There are no main trunk sections surrounding the {0} height of the segment: {1}, if referencing the main below this segment use M@height for the ref\n".format(string,segmentFrame['name'].iloc[i]))
error = True
else:
interpVals = f.linearInterp(interpRow,refHt) #dictionary of interpolated values under 'ref_X','ref_Y', and 'ref_R'
if interpVals['errors']:
f.print2Log(logFileName, "Target height of {0} of segment {1} is not between reference heights".format(string, segmentFrame['name'].iloc[i]))
error = True
#Copy x,y,r to sement row
segmentFrame.at[i,'{0} ref x'.format(string)] = interpVals['ref_X'].iloc[0]
segmentFrame.at[i,'{0} ref y'.format(string)] = interpVals['ref_Y'].iloc[0]
segmentFrame.at[i,'{0} ref radius'.format(string)] = interpVals['ref_R'].iloc[0]
#Calculate positional information for this row and column as well.
refVals = [interpVals['ref_X'].iloc[0],interpVals['ref_Y'].iloc[0], interpVals['ref_R'].iloc[0]]
positionMeasures = [segmentFrame['{0} dist'.format(string)].iloc[i],segmentFrame['{0} azi'.format(string)].iloc[i],
segmentFrame['{0} radius'.format(string)].iloc[i],segmentFrame['{0} ref type'.format(string)].iloc[i]]
calcs = f.calcPosition(refVals, positionMeasures,calcType = 'segment')
segmentFrame.at[i,'{0} x'.format(string)] = calcs['x']
segmentFrame.at[i,'{0} y'.format(string)] = calcs['y']
#pdb.set_trace()
if calcs['error'] == True:
f.print2Log(logFileName,'Segment {0} reference assumed to be face to pith (reference to target)'.format(segmentFrame['name'].iloc[i]))
error = calcs['error']
f.closingStatement(logFileName, error)
return (segmentFrame)
def segs2nodes(segs, logFileName, error = False):
"""inputs a segment dataframe, brings in references that were calculated, and calculates x,y, and z values for base and top"""
""" If no rows need calculating the code skips the calculation process and outputs the input dataframe, this is to be efficient when while looping through all the routines"""
#This tests for rows that need calculating
calcSegs = f.isnumber(segs) #Segments referenced to nodes that may need calculations
if len(calcSegs) == 0: # There must be some rows that need calculation otherwise nothing is calculated
return(segs)
#only do calculations if we need to (there must be unclculated values in the base or top of the segment)
elif calcSegs['baseTest'].sum() + calcSegs['topTest'].sum() > 0:
#If base x,y,or z = NA AND ref is numeric and only only one number
refSegs = segs[pd.isnull(segs['top x'])==False] #segments that have calculations for referencing
#Move references over to ref cols and calculate x,y positions, copy complete rows to refSegs and repeat
for i in calcSegs.index: #for each row that needs a caluclation if referenced to nodes
for j in range(2): #this is for base vs top
if j == 0:
string = 'base'
else:
string = 'top'
#If this is a node that needs calculating (is a node number depending on if base or top)
if calcSegs.loc[i,'{0}Test'.format(string)] : #test variable asks if node is numeric using column created by f.isnumber above
findName = calcSegs.loc[i,'{0} ref'.format(string)]
if type(findName)!=str: #sometimes import from excel produces mixed variable types, need to convert to string
findName = str(findName)
calcSegs.at[i,'{0} ref'.format(string)] = findName
if type(findName)==str:
print("Reference variable at {0} of segment {1} converted to string".format(string,calcSegs.loc[i,'name']))
f.print2Log(logFileName,"\nReference variable at {0} of segment {1} converted to string".format(string,calcSegs.loc[i,'name']))
error = True
else:
print("Attempeted and failed to convert {0} of segment {1} to string".format(string,calcSegs.loc[i,'name']))
f.print2Log(logFileName,"\nAttempeted and failed to convert {0} of segment {1} to string".format(string,calcSegs.loc[i,'name']))
error = True
nodeRow = refSegs[refSegs['top name'] == findName]
if len(nodeRow) != 0: #skip if there is not matching node row and get it on the next pass
if len(nodeRow)==1:
nodeRow = nodeRow
elif len(nodeRow) >1:
#If they do match and none are 'mid' position then use top supplemental row
if all(nodeRow['name'] == nodeRow['name'].iloc[0]) and sum(nodeRow['position']=='mid') > 0:
midSegOuts = f.midSegTopLocator(nodeRow, logFileName, error) #get the most distal of the midsegment rows
nodeRow = midSegOuts[0]
error = midSegOuts[2]
if len(nodeRow)==0:
f.print2Log(logFileName,'Make sure that you lebelled supplemental measurements "mid" in the position column for segment {0}.'.format(findName))
#If the node names do not match
else:
nodeRow = nodeRow.iloc[0]
f.print2Log(logFileName,'\nWarning: There were more than one node matches the ref "{2}" for the {0} of segment {1}, the first was used. If refencing to a segment with a supplemental measurement, make sure the position column says "mid" for supplemewntal rows'.format(string, calcSegs['name'].loc[i], nodeRow['top name'])) #.values
error = True
#Assign Referenece values
RefX = float(nodeRow['top x'])
RefY = float(nodeRow['top y'])
RefRad = float(nodeRow['top radius'] )
#set refs and position to node location baseded on top node of origin segment
segs.loc[i,['{0} ref x'.format(string),'{0} x'.format(string)]] = RefX
segs.loc[i,['{0} ref y'.format(string),'{0} y'.format(string)]] = RefY
segs.at[i,'{0} ref radius'.format(string)] = RefRad
#Calc x and y based on refs, dist, and azi
posMeasures = [segs.loc[i,'{0} dist'.format(string)],segs.loc[i,'{0} azi'.format(string)],
segs.loc[i,'{0} radius'.format(string)],segs.loc[i,'{0} ref type'.format(string)]]
calcs = f.calcPosition(refVals = (RefX, RefY, RefRad), calcVals = posMeasures, calcType = 'segment')
segs.at[i,'{0} x'.format(string)] = calcs['x']
segs.at[i,'{0} y'.format(string)] = calcs['y']
Z_offset = 0
if string == 'base' and isinstance(calcSegs['notes'].loc[i], str): #If there is a note
if 'from top' in calcSegs['notes'].loc[i]:
Z_offset = float(RefRad)
elif 'from bot' in calcSegs['notes'].loc[i]:
Z_offset = -float(RefRad)
segs.at[i,'base z'] = segs.loc[i,'base z'] + Z_offset
if calcs['error'] == True:
f.print2Log(logFileName,'Segment {0} reference assumed to be face to pith (reference to target)'.format(segs['name'].iloc[i]))
error = calcs['error']
f.closingStatement(logFileName, error)
return(segs)
def segs2reits(segs, logFileName, error = False):
#Get values for interpolation
for j in range(2):
if j == 0: #set this variable to determine if we calculate reference coordinates for base or top of segment
string = 'base'
else:
string = 'top'
refs = segs['{0} ref'.format(string)]
names = segs['name']
heights = segs['{0} z'.format(string)]
types = segs['type']
##Anything referenced to a trunk segment###
segsFromReitIndex = f.refSegType(refs, names, heights, types,'t').tolist()
##Only do the routine if there are rows that need calculations
calcSegs = segs.loc[segsFromReitIndex]
# pdb.set_trace()
if np.isnan(calcSegs.loc[:,['{0} x'.format(string), '{0} y'.format(string)]]).sum().sum() > 0: #if there are any empty cells
for i in calcSegs.index: #find indices of test and cycle over
refName = calcSegs['{0} ref'.format(string)].loc[i]
refRow = segs[segs['name']==refName] #Index of segs to look in for base and top information
#Deals with supplemental rows and extracts correct row
if len(refRow)==0:
f.print2Log(logFileName,'There are no reiterationed trunks matching the origin of {0} for segment {1}'.format(refName,calcSegs.loc[i,'name']))
error = True
#Need to locate correct row if we have multiple rows
elif len(refRow)> 1:
refHt = calcSegs.loc[i,'{0} z'.format(string)]
if isinstance(refHt,np.float64) or isinstance(refHt, np.int64): #convert numpy.floats to native floats, not sure why but several heights are imported as numpy.float not float
refHt = refHt.item()
x=refHt >= refRow['base z']
y=refHt <= refRow['top z']
refRow = refRow[[a and b for a, b in zip(x, y)]][:1] #intersection of boolean vectors is correct index
if len(refRow) == 0:
pdb.set_trace()
print("The height of the {0} of segment {1} is not between the reiterated trunk {2} heights".format(string, calcSegs['name'].loc[i], refName))
f.print2Log(logFileName,"The height of the {0} of segment {1} is not between the reiterated trunk {3} heights".format(string, calcSegs['name'].loc[i], refName))
#Calculate ref vals, pass to function to calc final x,y, and radius
refValues = f.linearInterp(refRow, calcSegs['{0} z'.format(string)].loc[i], logFileName) #pass in the reference row and the reference Height
refVals = [None]*3
refVals[0] = refValues['ref_X']
refVals[1] = refValues['ref_Y']
refVals[2] = refValues['ref_R']
posMeasures = [None]*4
posMeasures [0] = calcSegs['{0} dist'.format(string)].loc[i]
posMeasures [1] = calcSegs['{0} azi'.format(string)].loc[i]
posMeasures [2] = calcSegs['{0} radius'.format(string)].loc[i]
posMeasures [3] = calcSegs['{0} ref type'.format(string)].loc[i]
calcs = f.calcPosition(refVals, posMeasures, 'segment')
segs.at[i,'{0} ref x'.format(string)] = refVals[0]
segs.at[i,'{0} ref y'.format(string)] = refVals[1]
segs.at[i,'{0} ref radius'.format(string)] = refVals[2]
segs.at[i,'{0} x'.format(string)] = calcs['x']
segs.at[i,'{0} y'.format(string)] = calcs['y']
if calcs['error'] == True:
f.print2Log(logFileName,'Segment {0} reference assumed to be face to pith (reference to target)'.format(segs['name'].iloc[i]))
error = calcs['error']
elif refValues['errors'] == True:
f.print2Log(logFileName, "Target height of {0} of segment {1} is not between reference heights".format(string, calcSegs['name'].loc[i]))
error = True
else:
error = False
f.closingStatement(logFileName, error)
return(segs)
def segs2midsegs(segs, logFileName, error = False):
#Test for references to segments without height measurements
test1 = segs['base name'].str.contains('-') & segs['base z'].map(lambda x: not isinstance(x, (int, float))) #This works for the T2 and prez (calc instead of missing), doesn't sork for missing numbers
test2 = segs['base name'].str.contains('-') & pd.isnull(segs['base z']) #Works for fin, runs for T2, runs for prez
test = f.vectorBool(test1,test2,'or')
#test = segs['base ref'].str.contains('-') & [not i for i in segs['base z'].map(np.isreal)] #Test for references to segments without height measurements
calcSegs = segs[test]
if sum(test) > 0: #Only run through this if there are rows that even need it.
#Separate name into node names for later searching.
#nodeNames = f.splitName(segs['name'])
for i in calcSegs.index:
if isinstance(calcSegs['base azi'].loc[i],nu.Number): #Sometimes the data will say "calc" here, this tests for that
azi = calcSegs['base azi'].loc[i] #use base azi if there is one specified
elif isinstance(calcSegs['top azi'].loc[i],nu.Number):
azi = calcSegs['top azi'].loc[i] #otherwise use the top azi
print("Warning: There a missing base azimuth for segment {0}, top azi taken as base".format(calcSegs['name'].loc[i]))
f.print2Log(logFileName,"Warning: There a missing base azimuth for segment {0}, top azi taken as base".format(calcSegs['name'].loc[i]))
error = True
dist2pt = calcSegs['midsegment dist'].loc[i]
#if dist2pt is nan assign to middle of segment
if np.isnan(dist2pt):
dist2pt = 'mid'
f.print2Log(logFileName, "Warning: No length to node given for midsegment {0}, origin assumed to be from middle of {1}".format(calcSegs['name'].loc[i],segs['base name'].loc[i]))
error = True
#Assign ref4Dist2pt, if there is one recorded use it otherwise use the top node from origin segment, will calc to center of orig segment
if
|
pd.notnull(calcSegs['midsegment ref'].loc[i])
|
pandas.notnull
|
from datetime import datetime, timedelta
import unittest
from pandas.core.datetools import (
bday, BDay, BQuarterEnd, BMonthEnd, BYearEnd, MonthEnd,
DateOffset, Week, YearBegin, YearEnd, Hour, Minute, Second,
format, ole2datetime, to_datetime, normalize_date,
getOffset, getOffsetName, inferTimeRule, hasOffsetName)
from nose.tools import assert_raises
####
## Misc function tests
####
def test_format():
actual = format(datetime(2008, 1, 15))
assert actual == '20080115'
def test_ole2datetime():
actual = ole2datetime(60000)
assert actual == datetime(2064, 4, 8)
assert_raises(Exception, ole2datetime, 60)
def test_to_datetime1():
actual = to_datetime(datetime(2008, 1, 15))
assert actual == datetime(2008, 1, 15)
actual = to_datetime('20080115')
assert actual == datetime(2008, 1, 15)
# unparseable
s = 'Month 1, 1999'
assert to_datetime(s) == s
def test_normalize_date():
actual = normalize_date(datetime(2007, 10, 1, 1, 12, 5, 10))
assert actual == datetime(2007, 10, 1)
#####
### DateOffset Tests
#####
class TestDateOffset(object):
def setUp(self):
self.d = datetime(2008, 1, 2)
def test_repr(self):
repr(DateOffset())
repr(
|
DateOffset(2)
|
pandas.core.datetools.DateOffset
|
import json
import logging
import os
import subprocess
import pandas as pd
from csgo.utils import check_go_version
class DemoParser:
"""DemoParser can parse, load and clean data from a CSGO demofile. Can be instantiated without a specified demofile.
Attributes:
demofile (string): A string denoting the path to the demo file, which ends in .dem
log (boolean): A boolean denoting if a log will be written. If true, log is written to "csgo_parser.log"
demo_id (string): A unique demo name/game id. Default is inferred from demofile name
parse_rate (int): One of 128, 64, 32, 16, 8, 4, 2, or 1. The lower the value, the more frames are collected. Indicates spacing between parsed demo frames in ticks. Default is 128.
parse_frames (bool): Flag if you want to parse frames (trajectory data) or not
trade_time (int): Length of the window for a trade (in seconds). Default is 5.
dmg_rolled (bool): Boolean if you want damages rolled up (since multiple damages for a player can happen in 1 tick from the same weapon.)
buy_style (string): Buy style string, one of "hltv" or "csgo"
use_exe_parser (bool): Flag if you want to parse demo on Windows without installing Go lang
Raises:
ValueError: Raises a ValueError if the Golang version is lower than 1.14
"""
def __init__(
self,
demofile="",
outpath=None,
log=False,
demo_id=None,
parse_rate=128,
parse_frames=True,
trade_time=5,
dmg_rolled=False,
buy_style="hltv",
use_exe_parser=None
):
# Set up logger
if log:
logging.basicConfig(
filename="csgo_demoparser.log",
level=logging.INFO,
format="%(asctime)s [%(levelname)s] %(message)s",
datefmt="%H:%M:%S",
)
self.logger = logging.getLogger("CSGODemoParser")
self.logger.handlers = []
fh = logging.FileHandler("csgo_demoparser.log")
fh.setLevel(logging.INFO)
self.logger.addHandler(fh)
else:
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s [%(levelname)s] %(message)s",
datefmt="%H:%M:%S",
)
self.logger = logging.getLogger("CSGODemoParser")
# Handle demofile and demo_id name. Finds right most '/' in case demofile is a specified path.
self.demofile = os.path.abspath(demofile)
self.logger.info("Initialized CSGODemoParser with demofile " + self.demofile)
if (demo_id is None) | (demo_id == ""):
self.demo_id = demofile[demofile.rfind("/") + 1 : -4]
else:
self.demo_id = demo_id
if outpath is None:
self.outpath = os.path.abspath(os.getcwd())
else:
self.outpath = os.path.abspath(outpath)
self.logger.info("Setting demo id to " + self.demo_id)
# Handle parse rate. If the parse rate is less than 64, likely to be slow
if parse_rate < 1 or type(parse_rate) is not int:
self.logger.warning(
"Parse rate of "
+ str(parse_rate)
+ " not acceptable! Parse rate must be an integer greater than 0."
)
parse_rate = 128
self.parse_rate = parse_rate
if parse_rate < 64 and parse_rate > 1:
self.logger.warning(
"A parse rate lower than 64 may be slow depending on the tickrate of the demo, which is usually 64 for MM and 128 for pro demos."
)
self.parse_rate = parse_rate
elif parse_rate >= 256:
self.logger.warning(
"A high parse rate means very few frames. Only use for testing purposes."
)
self.parse_rate = parse_rate
else:
self.parse_rate = parse_rate
self.logger.info("Setting parse rate to " + str(self.parse_rate))
# Handle trade time
if trade_time <= 0:
self.logger.warning(
"Trade time can't be negative, setting to default value of 5 seconds."
)
self.trade_time = 5
elif trade_time > 7:
self.logger.warning(
"Trade time of "
+ str(trade_time)
+ " is rather long. Consider a value between 4-7."
)
else:
self.trade_time = trade_time
self.logger.info("Setting trade time to " + str(self.trade_time))
# Handle buy style
if buy_style not in ["hltv", "csgo"]:
self.logger.warning(
"Buy style specified is not one of hltv, csgo, will be set to hltv by default"
)
self.buy_style = "hltv"
else:
self.buy_style = buy_style
self.logger.info("Setting buy style to " + str(self.buy_style))
self.dmg_rolled = dmg_rolled
self.parse_frames = parse_frames
self.logger.info("Rollup damages set to " + str(self.dmg_rolled))
self.logger.info("Parse frames set to " + str(self.parse_frames))
self.logger.info("Setting demo id to " + self.demo_id)
if (use_exe_parser is None) | (not use_exe_parser):
self.use_exe_parser = False
else:
self.use_exe_parser = True
# Set parse error to False
self.parse_error = False
def parse_demo(self):
"""Parse a demofile using the Go script parse_demo.go -- this function needs the .demofile to be set in the class, and the file needs to exist.
Returns:
Outputs a JSON file to current working directory.
Raises:
ValueError: Raises a ValueError if the Golang version is lower than 1.14
FileNotFoundError: Raises a FileNotFoundError if the demofile path does not exist.
"""
# Check if Golang version is compatible
if self.use_exe_parser:
self.logger.info("Use exe parser")
else:
acceptable_go = check_go_version()
if not acceptable_go:
self.logger.error(
"Error calling Go. Check if Go is installed using 'go version'. Need at least v1.14.0."
)
raise ValueError(
"Error calling Go. Check if Go is installed using 'go version'. Need at least v1.14.0."
)
else:
self.logger.info("Go version>=1.14.0")
# Check if demofile exists
if not os.path.exists(os.path.abspath(self.demofile)):
self.logger.error("Demofile path does not exist!")
raise FileNotFoundError("Demofile path does not exist!")
path = os.path.join(os.path.dirname(__file__), "")
self.logger.info("Running parser from " + path)
self.logger.info("Looking for file at " + self.demofile)
self.parser_cmd = [os.path.join(os.path.dirname(os.path.abspath(__file__)), 'parse_demo.exe')] if self.use_exe_parser else ["go", "run", "parse_demo.go"]
self.parser_cmd += [
"-demo",
self.demofile,
"-parserate",
str(self.parse_rate),
"-tradetime",
str(self.trade_time),
"-buystyle",
str(self.buy_style),
"-demoid",
str(self.demo_id),
"-out",
self.outpath,
]
if self.dmg_rolled:
self.parser_cmd.append("--dmgrolled")
if self.parse_frames:
self.parser_cmd.append("--parseframes")
proc = subprocess.Popen(
self.parser_cmd,
stdout=subprocess.PIPE,
cwd=path,
)
stdout = proc.stdout.read().splitlines()
self.output_file = self.demo_id + ".json"
if os.path.isfile(self.output_file):
self.logger.info("Wrote demo parse output to " + self.output_file)
self.parse_error = False
else:
self.parse_error = True
self.logger.error("No file produced, error in calling Golang")
self.logger.error(stdout)
def read_json(self, json_path):
"""Reads the JSON file given a JSON path. Can be used to read in already processed demofiles.
Args:
json_path (string): Path to JSON file
Returns:
JSON in Python dictionary form
Raises:
FileNotFoundError: Raises a FileNotFoundError if the JSON path doesn't exist
"""
# Check if JSON exists
if not os.path.exists(os.path.abspath(json_path)):
self.logger.error("JSON path does not exist!")
raise FileNotFoundError("JSON path does not exist!")
# Read in json to .json attribute
with open(json_path, encoding="utf8") as f:
demo_data = json.load(f)
self.json = demo_data
self.logger.info(
"JSON data loaded, available in the `json` attribute to parser"
)
return demo_data
def parse(self, return_type="json"):
"""Wrapper for parse_demo() and read_json(). Use to parse a demo.
Args:
return_type (string): Either "json" or "df"
Returns:
A dictionary of output (which is parsed to a JSON file in the working directory)
Raises:
ValueError: Raises a ValueError if the return_type is not "json" or "df"
AttributeError: Raises an AttributeError if the .json attribute is None
"""
self.parse_demo()
self.read_json(json_path=self.outpath + "/" + self.output_file)
if self.json:
self.logger.info("JSON output found")
if return_type == "json":
return self.json
elif return_type == "df":
demo_data = self.parse_json_to_df()
self.logger.info("Returned dataframe output")
return demo_data
else:
self.logger.error("Parse return_type must be either 'json' or 'df'")
raise ValueError("return_type must be either 'json' or 'df'")
else:
self.logger.error("JSON couldn't be returned")
raise AttributeError("No JSON parsed! Error in producing JSON.")
def parse_json_to_df(self):
"""Returns JSON into dictionary where keys correspond to data frames
Returns:
A dictionary of output
Raises:
AttributeError: Raises an AttributeError if the .json attribute is None
"""
if self.json:
demo_data = {}
demo_data["matchID"] = self.json["matchID"]
demo_data["clientName"] = self.json["clientName"]
demo_data["mapName"] = self.json["mapName"]
demo_data["tickRate"] = self.json["tickRate"]
demo_data["playbackTicks"] = self.json["playbackTicks"]
demo_data["rounds"] = self._parse_rounds()
demo_data["kills"] = self._parse_kills()
demo_data["damages"] = self._parse_damages()
demo_data["grenades"] = self._parse_grenades()
demo_data["flashes"] = self._parse_flashes()
demo_data["weaponFires"] = self._parse_weapon_fires()
demo_data["bombEvents"] = self._parse_bomb_events()
demo_data["frames"] = self._parse_frames()
demo_data["playerFrames"] = self._parse_player_frames()
self.logger.info("Returned dataframe output")
return demo_data
else:
self.logger.error(
"JSON not found. Run .parse() or .read_json() if JSON already exists"
)
raise AttributeError(
"JSON not found. Run .parse() or .read_json() if JSON already exists"
)
def _parse_frames(self):
"""Returns frames as a Pandas dataframe
Returns:
A Pandas dataframe where each row is a frame (game state) in the demo, which is a discrete point of time.
Raises:
AttributeError: Raises an AttributeError if the .json attribute is None
"""
if self.json:
frames_dataframes = []
keys = ["tick", "seconds"]
for r in self.json["gameRounds"]:
if r["frames"]:
for frame in r["frames"]:
frame_item = {}
frame_item["roundNum"] = r["roundNum"]
for k in keys:
frame_item[k] = frame[k]
for side in ["ct", "t"]:
if side == "ct":
frame_item["ctTeamName"] = frame["ct"]["teamName"]
frame_item["ctEqVal"] = frame["ct"]["teamEqVal"]
frame_item["ctAlivePlayers"] = frame["ct"][
"alivePlayers"
]
frame_item["ctUtility"] = frame["ct"]["totalUtility"]
else:
frame_item["tTeamName"] = frame["t"]["teamName"]
frame_item["tEqVal"] = frame["t"]["teamEqVal"]
frame_item["tAlivePlayers"] = frame["t"]["alivePlayers"]
frame_item["tUtility"] = frame["t"]["totalUtility"]
frames_dataframes.append(frame_item)
frames_df = pd.DataFrame(frames_dataframes)
frames_df["matchID"] = self.json["matchID"]
frames_df["mapName"] = self.json["mapName"]
return pd.DataFrame(frames_dataframes)
else:
self.logger.error(
"JSON not found. Run .parse() or .read_json() if JSON already exists"
)
raise AttributeError(
"JSON not found. Run .parse() or .read_json() if JSON already exists"
)
def _parse_player_frames(self):
"""Returns player frames as a Pandas dataframe.
Returns:
A Pandas dataframe where each row is a player's attributes at a given frame (game state).
Raises:
AttributeError: Raises an AttributeError if the .json attribute is None
"""
if self.json:
player_frames = []
for r in self.json["gameRounds"]:
if r["frames"]:
for frame in r["frames"]:
for side in ["ct", "t"]:
if frame[side]["players"] is not None and (
len(frame[side]["players"])
> 0 # Used to be == 5, to ensure the sides were equal.
):
for player in frame[side]["players"]:
player_item = {}
player_item["roundNum"] = r["roundNum"]
player_item["tick"] = frame["tick"]
player_item["seconds"] = frame["seconds"]
player_item["side"] = side
player_item["teamName"] = frame[side]["teamName"]
for col in player.keys():
if col != "inventory":
player_item[col] = player[col]
player_frames.append(player_item)
player_frames_df = pd.DataFrame(player_frames)
player_frames_df["matchID"] = self.json["matchID"]
player_frames_df["mapName"] = self.json["mapName"]
return pd.DataFrame(player_frames_df)
else:
self.logger.error(
"JSON not found. Run .parse() or .read_json() if JSON already exists"
)
raise AttributeError(
"JSON not found. Run .parse() or .read_json() if JSON already exists"
)
def _parse_rounds(self):
"""Returns rounds as a Pandas dataframe
Returns:
A Pandas dataframe where each row is a round
Raises:
AttributeError: Raises an AttributeError if the .json attribute is None
"""
if self.json:
rounds = []
cols = [
"roundNum",
"startTick",
"freezeTimeEndTick",
"endTick",
"endOfficialTick",
"tScore",
"ctScore",
"endTScore",
"endCTScore",
"tTeam",
"ctTeam",
"winningSide",
"winningTeam",
"losingTeam",
"roundEndReason",
"tStartEqVal",
"tRoundStartEqVal",
"tRoundStartMoney",
"tBuyType",
"tSpend",
"ctStartEqVal",
"ctRoundStartEqVal",
"ctRoundStartMoney",
"ctBuyType",
"ctSpend",
]
for r in self.json["gameRounds"]:
round_item = {}
for k in cols:
round_item[k] = r[k]
round_item["matchID"] = self.json["matchID"]
round_item["mapName"] = self.json["mapName"]
rounds.append(round_item)
return pd.DataFrame(rounds)
else:
self.logger.error(
"JSON not found. Run .parse() or .read_json() if JSON already exists"
)
raise AttributeError(
"JSON not found. Run .parse() or .read_json() if JSON already exists"
)
def _parse_kills(self):
"""Returns kills as either a Pandas dataframe
Returns:
A Pandas dataframe where each row is a kill
Raises:
AttributeError: Raises an AttributeError if the .json attribute is None
"""
if self.json:
kills = []
for r in self.json["gameRounds"]:
if r["kills"] is not None:
for k in r["kills"]:
new_k = k
new_k["roundNum"] = r["roundNum"]
new_k["matchID"] = self.json["matchID"]
new_k["mapName"] = self.json["mapName"]
kills.append(new_k)
return
|
pd.DataFrame(kills)
|
pandas.DataFrame
|
import numpy as np
import pandas as pd
import sys
import pickle
import matplotlib.pyplot as plt
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
import pyqtgraph
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PyQt5.QtTest import *
from Model_module import Model_module
from Data_module import Data_module
# from Sub_widget import another_result_explain
class Worker(QObject):
# Signal을 보낼 그릇을 생성# #############
train_value = pyqtSignal(object)
# nor_ab_value = pyqtSignal(object)
procedure_value = pyqtSignal(object)
verif_value = pyqtSignal(object)
timer = pyqtSignal(object)
symptom_db = pyqtSignal(object)
shap = pyqtSignal(object)
plot_db = pyqtSignal(object)
display_ex = pyqtSignal(object, object, object)
another_shap = pyqtSignal(object, object, object)
another_shap_table = pyqtSignal(object)
##########################################
@pyqtSlot(object)
def generate_db(self):
test_db = input('구현할 시나리오를 입력해주세요 : ')
print(f'입력된 시나리오 : {test_db}를 실행합니다.')
Model_module() # model module 내의 빈행렬 초기화
data_module = Data_module()
db, check_db = data_module.load_data(file_name=test_db) # test_db 불러오기
data_module.data_processing() # Min-Max o, 2 Dimension
liner = []
plot_data = []
normal_data = []
compare_data = {'Normal':[], 'Ab21-01':[], 'Ab21-02':[], 'Ab20-04':[], 'Ab15-07':[], 'Ab15-08':[], 'Ab63-04':[], 'Ab63-02':[], 'Ab21-12':[], 'Ab19-02':[], 'Ab21-11':[], 'Ab23-03':[], 'Ab60-02':[], 'Ab59-02':[], 'Ab23-01':[], 'Ab23-06':[]}
for line in range(np.shape(db)[0]):
QTest.qWait(0.01)
print(np.shape(db)[0], line)
data = np.array([data_module.load_real_data(row=line)])
liner.append(line)
check_data, check_parameter = data_module.load_real_check_data(row=line)
plot_data.append(check_data[0])
try: normal_data.append(normal_db.iloc[line])
except: pass
try: compare_data['Normal'].append(normal_db.iloc[line])
except: pass
try: compare_data['Ab21-01'].append(ab21_01.iloc[line])
except: pass
try: compare_data['Ab21-02'].append(ab21_02.iloc[line])
except: pass
try: compare_data['Ab20-04'].append(ab20_04.iloc[line])
except: pass
try: compare_data['Ab15-07'].append(ab15_07.iloc[line])
except: pass
try: compare_data['Ab15-08'].append(ab15_08.iloc[line])
except: pass
try: compare_data['Ab63-04'].append(ab63_04.iloc[line])
except: pass
try: compare_data['Ab63-02'].append(ab63_02.iloc[line])
except: pass
try: compare_data['Ab21-12'].append(ab21_12.iloc[line])
except: pass
try: compare_data['Ab19-02'].append(ab19_02.iloc[line])
except: pass
try: compare_data['Ab21-11'].append(ab21_11.iloc[line])
except: pass
try: compare_data['Ab23-03'].append(ab23_03.iloc[line])
except: pass
try: compare_data['Ab60-02'].append(ab60_02.iloc[line])
except: pass
try: compare_data['Ab59-02'].append(ab59_02.iloc[line])
except: pass
try: compare_data['Ab23-01'].append(ab23_01.iloc[line])
except: pass
try: compare_data['Ab23-06'].append(ab23_06.iloc[line])
except: pass
if np.shape(data) == (1, 10, 46):
dim2 = np.array(data_module.load_scaled_data(row=line - 9)) # 2차원 scale
# check_data, check_parameter = data_module.load_real_check_data(row=line - 8)
# plot_data.append(check_data[0])
train_untrain_reconstruction_error, train_untrain_error = model_module.train_untrain_classifier(data=data)
# normal_abnormal_reconstruction_error = model_module.normal_abnormal_classifier(data=data)
abnormal_procedure_result, abnormal_procedure_prediction, shap_add_des, shap_value = model_module.abnormal_procedure_classifier(data=dim2)
abnormal_verif_reconstruction_error, verif_threshold, abnormal_verif_error = model_module.abnormal_procedure_verification(data=data)
self.train_value.emit(train_untrain_error)
# self.nor_ab_value.emit(np.argmax(abnormal_procedure_result[line-9], axis=1)[0])
self.procedure_value.emit(np.argmax(abnormal_procedure_prediction, axis=1)[0])
self.verif_value.emit([abnormal_verif_error, verif_threshold])
self.timer.emit([line, check_parameter])
self.symptom_db.emit([np.argmax(abnormal_procedure_prediction, axis=1)[0], check_parameter])
self.shap.emit(shap_add_des)
self.plot_db.emit([liner, plot_data])
self.display_ex.emit(shap_add_des, [liner, plot_data], normal_data)
self.another_shap.emit(shap_value, [liner, plot_data], compare_data)
self.another_shap_table.emit(shap_value)
class AlignDelegate(QStyledItemDelegate):
def initStyleOption(self, option, index):
super(AlignDelegate, self).initStyleOption(option, index)
option.displayAlignment = Qt.AlignCenter
class Mainwindow(QWidget):
def __init__(self):
super().__init__()
self.setWindowTitle("Real-Time Abnormal Diagnosis for NPP")
self.setGeometry(150, 50, 1700, 800)
# 그래프 초기조건
pyqtgraph.setConfigOption("background", "w")
pyqtgraph.setConfigOption("foreground", "k")
#############################################
self.selected_para = pd.read_csv('./DataBase/Final_parameter.csv')
# GUI part 1 Layout (진단 부분 통합)
layout_left = QVBoxLayout()
# 영 번째 그룹 설정 (Time and Power)
gb_0 = QGroupBox("Training Status") # 영 번째 그룹 이름 설정
layout_left.addWidget(gb_0) # 전체 틀에 영 번째 그룹 넣기
gb_0_layout = QBoxLayout(QBoxLayout.LeftToRight) # 영 번째 그룹 내용을 넣을 레이아웃 설정
# 첫 번째 그룹 설정
gb_1 = QGroupBox("Training Status") # 첫 번째 그룹 이름 설정
layout_left.addWidget(gb_1) # 전체 틀에 첫 번째 그룹 넣기
gb_1_layout = QBoxLayout(QBoxLayout.LeftToRight) # 첫 번째 그룹 내용을 넣을 레이아웃 설정
# 두 번째 그룹 설정
gb_2 = QGroupBox('NPP Status')
layout_left.addWidget(gb_2)
gb_2_layout = QBoxLayout(QBoxLayout.LeftToRight)
# 세 번째 그룹 설정
gb_3 = QGroupBox(self)
layout_left.addWidget(gb_3)
gb_3_layout = QBoxLayout(QBoxLayout.LeftToRight)
# 네 번째 그룹 설정
gb_4 = QGroupBox('Predicted Result Verification')
layout_left.addWidget(gb_4)
gb_4_layout = QBoxLayout(QBoxLayout.LeftToRight)
# 다섯 번째 그룹 설정
gb_5 = QGroupBox('Symptom check in scenario')
layout_left.addWidget(gb_5)
gb_5_layout = QBoxLayout(QBoxLayout.TopToBottom)
# Spacer 추가
# layout_part1.addItem(QSpacerItem(20, 40, QSizePolicy.Minimum, QSizePolicy.Expanding))
# 영 번째 그룹 내용
self.time_label = QLabel(self)
self.power_label = QPushButton(self)
# 첫 번째 그룹 내용
# Trained / Untrained condition label
self.trained_label = QPushButton('Trained')
self.Untrained_label = QPushButton('Untrained')
# 두 번째 그룹 내용
self.normal_label = QPushButton('Normal')
self.abnormal_label = QPushButton('Abnormal')
# 세 번째 그룹 내용
self.name_procedure = QLabel('Number of Procedure: ')
self.num_procedure = QLineEdit(self)
self.num_procedure.setAlignment(Qt.AlignCenter)
self.name_scnario = QLabel('Name of Procedure: ')
self.num_scnario = QLineEdit(self)
self.num_scnario.setAlignment(Qt.AlignCenter)
# 네 번째 그룹 내용
self.success_label = QPushButton('Diagnosis Success')
self.failure_label = QPushButton('Diagnosis Failure')
# 다섯 번째 그룹 내용
self.symptom_name = QLabel(self)
self.symptom1 = QCheckBox(self)
self.symptom2 = QCheckBox(self)
self.symptom3 = QCheckBox(self)
self.symptom4 = QCheckBox(self)
self.symptom5 = QCheckBox(self)
self.symptom6 = QCheckBox(self)
# 영 번째 그룹 내용 입력
gb_0_layout.addWidget(self.time_label)
gb_0_layout.addWidget(self.power_label)
gb_0.setLayout(gb_0_layout)
# 첫 번째 그룹 내용 입력
gb_1_layout.addWidget(self.trained_label)
gb_1_layout.addWidget(self.Untrained_label)
gb_1.setLayout(gb_1_layout) # 첫 번째 레이아웃 내용을 첫 번째 그룹 틀로 넣기
# 두 번째 그룹 내용 입력
gb_2_layout.addWidget(self.normal_label)
gb_2_layout.addWidget(self.abnormal_label)
gb_2.setLayout(gb_2_layout)
# 세 번째 그룹 내용 입력
gb_3_layout.addWidget(self.name_procedure)
gb_3_layout.addWidget(self.num_procedure)
gb_3_layout.addWidget(self.name_scnario)
gb_3_layout.addWidget(self.num_scnario)
gb_3.setLayout(gb_3_layout)
# 네 번째 그룹 내용 입력
gb_4_layout.addWidget(self.success_label)
gb_4_layout.addWidget(self.failure_label)
gb_4.setLayout(gb_4_layout)
# 다섯 번째 그룹 내용 입력
gb_5_layout.addWidget(self.symptom_name)
gb_5_layout.addWidget(self.symptom1)
gb_5_layout.addWidget(self.symptom2)
gb_5_layout.addWidget(self.symptom3)
gb_5_layout.addWidget(self.symptom4)
gb_5_layout.addWidget(self.symptom5)
gb_5_layout.addWidget(self.symptom6)
gb_5.setLayout(gb_5_layout)
# Start 버튼 맨 아래에 위치
self.start_btn = QPushButton('Start')
# layout_part1.addWidget(self.start_btn)
self.tableWidget = QTableWidget(0, 0)
self.tableWidget.setFixedHeight(500)
self.tableWidget.setFixedWidth(800)
# Plot 구현
self.plot_1 = pyqtgraph.PlotWidget(title=self)
self.plot_2 = pyqtgraph.PlotWidget(title=self)
self.plot_3 = pyqtgraph.PlotWidget(title=self)
self.plot_4 = pyqtgraph.PlotWidget(title=self)
# Explanation Alarm 구현
red_alarm = QGroupBox('Main basis for diagnosis')
red_alarm_layout = QGridLayout()
orange_alarm = QGroupBox('Sub basis for diagnosis')
orange_alarm_layout = QGridLayout()
# Display Button 생성
self.red1 = QPushButton(self)
self.red2 = QPushButton(self)
self.red3 = QPushButton(self)
self.red4 = QPushButton(self)
self.orange1 = QPushButton(self)
self.orange2 = QPushButton(self)
self.orange3 = QPushButton(self)
self.orange4 = QPushButton(self)
self.orange5 = QPushButton(self)
self.orange6 = QPushButton(self)
self.orange7 = QPushButton(self)
self.orange8 = QPushButton(self)
self.orange9 = QPushButton(self)
self.orange10 = QPushButton(self)
self.orange11 = QPushButton(self)
self.orange12 = QPushButton(self)
# Layout에 widget 삽입
red_alarm_layout.addWidget(self.red1, 0, 0)
red_alarm_layout.addWidget(self.red2, 0, 1)
red_alarm_layout.addWidget(self.red3, 1, 0)
red_alarm_layout.addWidget(self.red4, 1, 1)
orange_alarm_layout.addWidget(self.orange1, 0, 0)
orange_alarm_layout.addWidget(self.orange2, 0, 1)
orange_alarm_layout.addWidget(self.orange3, 1, 0)
orange_alarm_layout.addWidget(self.orange4, 1, 1)
orange_alarm_layout.addWidget(self.orange5, 2, 0)
orange_alarm_layout.addWidget(self.orange6, 2, 1)
orange_alarm_layout.addWidget(self.orange7, 3, 0)
orange_alarm_layout.addWidget(self.orange8, 3, 1)
orange_alarm_layout.addWidget(self.orange9, 4, 0)
orange_alarm_layout.addWidget(self.orange10, 4, 1)
orange_alarm_layout.addWidget(self.orange11, 5, 0)
orange_alarm_layout.addWidget(self.orange12, 5, 1)
# Group Box에 Layout 삽입
red_alarm.setLayout(red_alarm_layout)
orange_alarm.setLayout(orange_alarm_layout)
# 각 Group Box를 상위 Layout에 삽입
layout_part1 = QVBoxLayout()
detail_part = QHBoxLayout()
detailed_table = QPushButton('Detail Explanation [Table]')
self.another_classification = QPushButton('Why other scenarios were not chosen')
detail_part.addWidget(detailed_table)
detail_part.addWidget(self.another_classification)
alarm_main = QVBoxLayout()
alarm_main.addWidget(red_alarm)
alarm_main.addWidget(orange_alarm)
layout_part1.addLayout(layout_left)
layout_part1.addLayout(alarm_main)
layout_part1.addLayout(detail_part)
layout_part1.addItem(QSpacerItem(20, 40, QSizePolicy.Minimum, QSizePolicy.Expanding))
# GUI part2 Layout (XAI 구현)
layout_part2 = QVBoxLayout()
layout_part2.addWidget(self.plot_1)
layout_part2.addWidget(self.plot_2)
layout_part2.addWidget(self.plot_3)
layout_part2.addWidget(self.plot_4)
# layout_part2.addItem(QSpacerItem(20, 40, QSizePolicy.Minimum, QSizePolicy.Expanding))
# layout_part2.addWidget(self.tableWidget)
# GUI part1 and part2 통합
layout_base = QHBoxLayout()
layout_base.addLayout(layout_part1)
layout_base.addLayout(layout_part2)
# GUI 최종 통합 (start button을 하단에 배치시키기 위함)
total_layout = QVBoxLayout()
total_layout.addLayout(layout_base)
total_layout.addWidget(self.start_btn)
self.setLayout(total_layout) # setLayout : 최종 출력될 GUI 화면을 결정
# Threading Part##############################################################################################################
# 데이터 연산 부분 Thread화
self.worker = Worker()
self.worker_thread = QThread()
# Signal을 Main Thread 내의 함수와 연결
self.worker.train_value.connect(self.Determine_train)
self.worker.procedure_value.connect(self.Determine_abnormal)
self.worker.procedure_value.connect(self.Determine_procedure)
self.worker.verif_value.connect(self.verifit_result)
self.worker.timer.connect(self.time_display)
self.worker.symptom_db.connect(self.procedure_satisfaction)
# self.worker.shap.connect(self.explain_result)
self.worker.plot_db.connect(self.plotting)
self.worker.display_ex.connect(self.display_explain)
self.worker.moveToThread(self.worker_thread) # Worker class를 Thread로 이동
# self.worker_thread.started.connect(lambda: self.worker.generate_db())
self.start_btn.clicked.connect(lambda: self.worker.generate_db()) # 누르면 For문 실행
self.worker_thread.start()
# Threading Part##############################################################################################################
# 이벤트 처리 ----------------------------------------------------------------------------------------------------
detailed_table.clicked.connect(self.show_table)
self.another_classification.clicked.connect(self.show_another_result)
# Button 클릭 연동 이벤트 처리
convert_red_btn = {0: self.red1, 1: self.red2, 2: self.red3, 3: self.red4} # Red Button
convert_red_plot = {0: self.red1_plot, 1: self.red2_plot, 2: self.red3_plot, 3: self.red4_plot} #
convert_orange_btn = {0: self.orange1, 1: self.orange2, 2: self.orange3, 3: self.orange4, 4: self.orange5,
5: self.orange6, 6: self.orange7, 7: self.orange8, 8: self.orange9, 9: self.orange10,
10: self.orange11, 11: self.orange12} # Orange Button
convert_orange_plot = {0: self.orange1_plot, 1: self.orange2_plot, 2: self.orange3_plot, 3: self.orange4_plot, 4: self.orange5_plot,
5: self.orange6_plot, 6: self.orange7_plot, 7: self.orange8_plot, 8: self.orange9_plot, 9: self.orange10_plot,
10: self.orange11_plot, 11: self.orange12_plot}
# 초기 Button 위젯 선언 -> 초기에 선언해야 끊기지않고 유지됨.
# Red Button
[convert_red_btn[i].clicked.connect(convert_red_plot[i]) for i in range(4)]
self.red_plot_1 = pyqtgraph.PlotWidget(title=self)
self.red_plot_2 = pyqtgraph.PlotWidget(title=self)
self.red_plot_3 = pyqtgraph.PlotWidget(title=self)
self.red_plot_4 = pyqtgraph.PlotWidget(title=self)
# Grid setting
self.red_plot_1.showGrid(x=True, y=True, alpha=0.3)
self.red_plot_2.showGrid(x=True, y=True, alpha=0.3)
self.red_plot_3.showGrid(x=True, y=True, alpha=0.3)
self.red_plot_4.showGrid(x=True, y=True, alpha=0.3)
# Orange Button
[convert_orange_btn[i].clicked.connect(convert_orange_plot[i]) for i in range(12)]
self.orange_plot_1 = pyqtgraph.PlotWidget(title=self)
self.orange_plot_2 = pyqtgraph.PlotWidget(title=self)
self.orange_plot_3 = pyqtgraph.PlotWidget(title=self)
self.orange_plot_4 = pyqtgraph.PlotWidget(title=self)
self.orange_plot_5 = pyqtgraph.PlotWidget(title=self)
self.orange_plot_6 = pyqtgraph.PlotWidget(title=self)
self.orange_plot_7 = pyqtgraph.PlotWidget(title=self)
self.orange_plot_8 = pyqtgraph.PlotWidget(title=self)
self.orange_plot_9 = pyqtgraph.PlotWidget(title=self)
self.orange_plot_10 = pyqtgraph.PlotWidget(title=self)
self.orange_plot_11 = pyqtgraph.PlotWidget(title=self)
self.orange_plot_12 = pyqtgraph.PlotWidget(title=self)
# Grid setting
self.orange_plot_1.showGrid(x=True, y=True, alpha=0.3)
self.orange_plot_2.showGrid(x=True, y=True, alpha=0.3)
self.orange_plot_3.showGrid(x=True, y=True, alpha=0.3)
self.orange_plot_4.showGrid(x=True, y=True, alpha=0.3)
self.orange_plot_5.showGrid(x=True, y=True, alpha=0.3)
self.orange_plot_6.showGrid(x=True, y=True, alpha=0.3)
self.orange_plot_7.showGrid(x=True, y=True, alpha=0.3)
self.orange_plot_8.showGrid(x=True, y=True, alpha=0.3)
self.orange_plot_9.showGrid(x=True, y=True, alpha=0.3)
self.orange_plot_10.showGrid(x=True, y=True, alpha=0.3)
self.orange_plot_11.showGrid(x=True, y=True, alpha=0.3)
self.orange_plot_12.showGrid(x=True, y=True, alpha=0.3)
self.show() # UI show command
def time_display(self, display_variable):
# display_variable[0] : time, display_variable[1].iloc[1]
self.time_label.setText(f'<b>Time :<b/> {display_variable[0]} sec')
self.time_label.setFont(QFont('Times new roman', 15))
self.time_label.setAlignment(Qt.AlignCenter)
self.power_label.setText(f'Power : {round(display_variable[1].iloc[1]["QPROREL"]*100, 2)}%')
if round(display_variable[1].iloc[1]["QPROREL"]*100, 2) < 95:
self.power_label.setStyleSheet('color : white;' 'font-weight: bold;' 'background-color: red;')
else:
self.power_label.setStyleSheet('color : black;' 'background-color: light gray;')
def Determine_train(self, train_untrain_reconstruction_error):
if train_untrain_reconstruction_error[0] <= 0.00225299: # Trained Data
self.trained_label.setStyleSheet('color : white;' 'font-weight: bold;' 'background-color: green;')
self.Untrained_label.setStyleSheet('color : black;' 'background-color: light gray;')
else: # Untrianed Data
self.Untrained_label.setStyleSheet('color : white;' 'font-weight: bold;' 'background-color: red;')
self.trained_label.setStyleSheet('color : black;' 'background-color: light gray;')
def Determine_abnormal(self, abnormal_diagnosis):
if abnormal_diagnosis == 0: # 정상상태
self.normal_label.setStyleSheet('color : white;' 'font-weight: bold;' 'background-color: green;')
self.abnormal_label.setStyleSheet('color : black;' 'background-color: light gray;')
else: # 비정상상태
self.abnormal_label.setStyleSheet('color : white;' 'font-weight: bold;' 'background-color: red;')
self.normal_label.setStyleSheet('color : black;' 'background-color: light gray;')
def Determine_procedure(self, abnormal_procedure_result):
if abnormal_procedure_result == 0:
self.num_procedure.setText('Normal')
self.num_scnario.setText('Normal')
elif abnormal_procedure_result == 1:
self.num_procedure.setText('Ab21-01')
self.num_scnario.setText('가압기 압력 채널 고장 "고"')
elif abnormal_procedure_result == 2:
self.num_procedure.setText('Ab21-02')
self.num_scnario.setText('가압기 압력 채널 고장 "저"')
elif abnormal_procedure_result == 3:
self.num_procedure.setText('Ab20-04')
self.num_scnario.setText('가압기 수위 채널 고장 "저"')
elif abnormal_procedure_result == 4:
self.num_procedure.setText('Ab15-07')
self.num_scnario.setText('증기발생기 수위 채널 고장 "저"')
elif abnormal_procedure_result == 5:
self.num_procedure.setText('Ab15-08')
self.num_scnario.setText('증기발생기 수위 채널 고장 "고"')
elif abnormal_procedure_result == 6:
self.num_procedure.setText('Ab63-04')
self.num_scnario.setText('제어봉 낙하')
elif abnormal_procedure_result == 7:
self.num_procedure.setText('Ab63-02')
self.num_scnario.setText('제어봉의 계속적인 삽입')
elif abnormal_procedure_result == 8:
self.num_procedure.setText('Ab21-12')
# self.num_scnario.setText('가압기 PORV 열림')
self.num_scnario.setText('Pressurizer PORV opening')
elif abnormal_procedure_result == 9:
self.num_procedure.setText('Ab19-02')
self.num_scnario.setText('가압기 안전밸브 고장')
elif abnormal_procedure_result == 10:
self.num_procedure.setText('Ab21-11')
self.num_scnario.setText('가압기 살수밸브 고장 "열림"')
elif abnormal_procedure_result == 11:
self.num_procedure.setText('Ab23-03')
self.num_scnario.setText('1차기기 냉각수 계통으로 누설 "CVCS->CCW"')
elif abnormal_procedure_result == 12:
self.num_procedure.setText('Ab60-02')
self.num_scnario.setText('재생열교환기 전단부위 파열')
elif abnormal_procedure_result == 13:
self.num_procedure.setText('Ab59-02')
self.num_scnario.setText('충전수 유량조절밸브 후단 누설')
elif abnormal_procedure_result == 14:
self.num_procedure.setText('Ab23-01')
self.num_scnario.setText('1차기기 냉각수 계통으로 누설 "RCS->CCW"')
elif abnormal_procedure_result == 15:
self.num_procedure.setText('Ab23-06')
self.num_scnario.setText('증기발생기 전열관 누설')
def verifit_result(self, verif_value):
if verif_value[0] <= verif_value[1]: # 진단 성공
self.success_label.setStyleSheet('color : white;' 'font-weight: bold;' 'background-color: green;')
self.failure_label.setStyleSheet('color : black;' 'background-color: light gray;')
else: # 진단 실패
self.failure_label.setStyleSheet('color : white;' 'font-weight: bold;' 'background-color: red;')
self.success_label.setStyleSheet('color : black;' 'background-color: light gray;')
def procedure_satisfaction(self, symptom_db):
# symptom_db[0] : classification result [0~15]
# symptom_db[1] : check_db [2,2222] -> 현시점과 이전시점 비교를 위함.
# symptom_db[1].iloc[0] : 이전 시점 # symptom_db[1].iloc[1] : 현재 시점
if symptom_db[0] == 0: # 정상 상태
self.symptom_name.setText('Diagnosis Result : Normal → Symptoms : 0')
self.symptom1.setText('')
self.symptom1.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
self.symptom2.setText('')
self.symptom2.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
self.symptom3.setText('')
self.symptom3.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
self.symptom4.setText('')
self.symptom4.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
self.symptom5.setText('')
self.symptom5.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
self.symptom6.setText('')
self.symptom6.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
elif symptom_db[0] == 1:
self.symptom_name.setText('Diagnosis Result : Ab21-01 Pressurizer pressure channel failure "High" → Symptoms : 6')
self.symptom1.setText("채널 고장으로 인한 가압기 '고' 압력 지시")
if symptom_db[1].iloc[1]['PPRZN'] > symptom_db[1].iloc[1]['CPPRZH']:
self.symptom1.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
else:
self.symptom1.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
self.symptom2.setText("가압기 살수밸브 '열림' 지시")
if symptom_db[1].iloc[1]['BPRZSP'] > 0:
self.symptom2.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
else:
self.symptom2.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
self.symptom3.setText("가압기 비례전열기 꺼짐")
if symptom_db[1].iloc[1]['QPRZP'] == 0:
self.symptom3.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
else:
self.symptom3.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
self.symptom4.setText("가압기 보조전열기 꺼짐")
if symptom_db[1].iloc[1]['QPRZB'] == 0:
self.symptom4.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
else:
self.symptom4.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
self.symptom5.setText("실제 가압기 '저' 압력 지시")
if symptom_db[1].iloc[1]['PPRZ'] < symptom_db[1].iloc[1]['CPPRZL']:
self.symptom5.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
else:
self.symptom5.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
self.symptom6.setText("가압기 PORV 차단밸브 닫힘")
if symptom_db[1].iloc[1]['BHV6'] == 0:
self.symptom6.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
else:
self.symptom6.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
elif symptom_db[0] == 2:
self.symptom_name.setText('진단 : Ab21-02 가압기 압력 채널 고장 "저" → 증상 : 5')
self.symptom1.setText("채널 고장으로 인한 가압기 '저' 압력 지시")
if symptom_db[1].iloc[1]['PPRZN'] < symptom_db[1].iloc[1]['CPPRZL']:
self.symptom1.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
else:
self.symptom1.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
self.symptom2.setText('가압기 저압력으로 인한 보조 전열기 켜짐 지시 및 경보 발생')
if (symptom_db[1].iloc[1]['PPRZN'] < symptom_db[1].iloc[1]['CQPRZB']) and (symptom_db[1].iloc[1]['KBHON'] == 1):
self.symptom2.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
else:
self.symptom2.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
self.symptom3.setText("실제 가압기 '고' 압력 지시")
if symptom_db[1].iloc[1]['PPRZ'] > symptom_db[1].iloc[1]['CPPRZH']:
self.symptom3.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
else:
self.symptom3.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
self.symptom4.setText('가압기 PORV 열림 지시 및 경보 발생')
if symptom_db[1].iloc[1]['BPORV'] > 0:
self.symptom4.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
else:
self.symptom4.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
self.symptom5.setText('실제 가압기 압력 감소로 가압기 PORV 닫힘') # 가압기 압력 감소에 대해 해결해야함.
if symptom_db[1].iloc[1]['BPORV'] == 0 and (symptom_db[1].iloc[0]['PPRZ'] > symptom_db[1].iloc[1]['PPRZ']):
self.symptom5.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
else:
self.symptom5.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
elif symptom_db[0] == 3:
self.symptom_name.setText('진단 : Ab20-04 가압기 수위 채널 고장 "저" → 증상 : 5')
self.symptom1.setText("채널 고장으로 인한 가압기 '저' 수위 지시")
if symptom_db[1].iloc[1]['ZINST63'] < 17: # 나중에 다시 확인해야함.
self.symptom1.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
# else:
# self.symptom1.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
self.symptom2.setText('"LETDN HX OUTLET FLOW LOW" 경보 발생')
if symptom_db[1].iloc[1]['UNRHXUT'] > symptom_db[1].iloc[1]['CULDHX']:
self.symptom2.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
# else:
# self.symptom2.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
self.symptom3.setText('"CHARGING LINE FLOW HI/LO" 경보 발생')
if (symptom_db[1].iloc[1]['WCHGNO'] < symptom_db[1].iloc[1]['CWCHGL']) or (symptom_db[1].iloc[1]['WCHGNO'] > symptom_db[1].iloc[1]['CWCHGH']):
self.symptom3.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
# else:
# self.symptom3.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
self.symptom4.setText('충전 유량 증가')
if symptom_db[1].iloc[0]['WCHGNO'] < symptom_db[1].iloc[1]['WCHGNO']:
self.symptom4.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
# else:
# self.symptom4.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
self.symptom5.setText('건전한 수위지시계의 수위 지시치 증가')
if symptom_db[1].iloc[0]['ZPRZNO'] < symptom_db[1].iloc[1]['ZPRZNO']:
self.symptom5.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
# else:
# self.symptom5.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
elif symptom_db[0] == 4:
self.symptom_name.setText('진단 : Ab15-07 증기발생기 수위 채널 고장 "저" → 증상 : ')
self.symptom1.setText('증기발생기 수위 "저" 경보 발생')
if symptom_db[1].iloc[1]['ZINST78']*0.01 < symptom_db[1].iloc[1]['CZSGW'] or symptom_db[1].iloc[1]['ZINST77']*0.01 < symptom_db[1].iloc[1]['CZSGW'] or symptom_db[1].iloc[1]['ZINST76']*0.01 < symptom_db[1].iloc[1]['CZSGW']:
self.symptom1.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
else:
self.symptom1.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
self.symptom2.setText('해당 SG MFCV 열림 방향으로 진행 및 해당 SG 실제 급수유량 증가')
elif symptom_db[0] == 8:
# self.symptom_name.setText('진단 : Ab21-12 가압기 PORV 열림 → 증상 : 5')
self.symptom_name.setText('Diagnosis result : Ab21-12 Pressurizer PORV opening → Symptoms : 5')
# self.symptom1.setText('가압기 PORV 열림 지시 및 경보 발생')
self.symptom1.setText('Pressurizer PORV open indication and alarm')
if symptom_db[1].iloc[1]['BPORV'] > 0:
self.symptom1.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
else:
self.symptom1.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
# self.symptom2.setText('가압기 저압력으로 인한 보조 전열기 켜짐 지시 및 경보 발생')
self.symptom2.setText('Aux. heater turn on instruction and alarm due to pressurizer low pressure')
if (symptom_db[1].iloc[1]['PPRZN'] < symptom_db[1].iloc[1]['CQPRZB']) and (symptom_db[1].iloc[1]['KBHON'] == 1):
self.symptom2.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
else:
self.symptom2.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
# self.symptom3.setText("가압기 '저' 압력 지시 및 경보 발생")
self.symptom3.setText("pressurizer 'low' pressure indication and alarm")
if symptom_db[1].iloc[1]['PPRZ'] < symptom_db[1].iloc[1]['CPPRZL'] :
self.symptom3.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
else:
self.symptom3.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
# self.symptom4.setText("PRT 고온 지시 및 경보 발생")
self.symptom4.setText("PRT high temperature indication and alarm")
if symptom_db[1].iloc[1]['UPRT'] > symptom_db[1].iloc[1]['CUPRT'] :
self.symptom4.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
else:
self.symptom4.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
# self.symptom5.setText("PRT 고압 지시 및 경보 발생")
self.symptom5.setText("PRT high pressure indication and alarm")
if (symptom_db[1].iloc[1]['PPRT'] - 0.98E5) > symptom_db[1].iloc[1]['CPPRT']:
self.symptom5.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
else:
self.symptom5.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
self.symptom6.setText("Blank")
self.symptom6.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
elif symptom_db[0] == 10:
self.symptom_name.setText("진단 : Ab21-11 가압기 살수밸브 고장 '열림' → 증상 : 4")
self.symptom1.setText("가압기 살수밸브 '열림' 지시 및 상태 표시등 점등")
if symptom_db[1].iloc[1]['BPRZSP'] > 0:
self.symptom1.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
else:
self.symptom1.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
self.symptom2.setText("가압기 보조전열기 켜짐 지시 및 경보 발생")
if (symptom_db[1].iloc[1]['PPRZN'] < symptom_db[1].iloc[1]['CQPRZB']) and (symptom_db[1].iloc[1]['KBHON'] == 1):
self.symptom2.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
else:
self.symptom2.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
self.symptom3.setText("가압기 '저' 압력 지시 및 경보 발생")
if symptom_db[1].iloc[1]['PPRZ'] < symptom_db[1].iloc[1]['CPPRZL']:
self.symptom3.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
else:
self.symptom3.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
self.symptom4.setText("가압기 수위 급격한 증가") # 급격한 증가에 대한 수정은 필요함 -> 추후 수정
if symptom_db[1].iloc[0]['ZINST63'] < symptom_db[1].iloc[1]['ZINST63']:
self.symptom4.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
else:
self.symptom4.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
def explain_result(self, shap_add_des):
'''
# shap_add_des['index'] : 변수 이름 / shap_add_des[0] : shap value
# shap_add_des['describe'] : 변수에 대한 설명 / shap_add_des['probability'] : shap value를 확률로 환산한 값
'''
self.tableWidget.setRowCount(len(shap_add_des))
self.tableWidget.setColumnCount(4)
self.tableWidget.setHorizontalHeaderLabels(["value_name", 'probability', 'describe', 'system'])
header = self.tableWidget.horizontalHeader()
header.setSectionResizeMode(QHeaderView.ResizeToContents)
header.setSectionResizeMode(0, QHeaderView.Stretch)
header.setSectionResizeMode(1, QHeaderView.Stretch)
header.setSectionResizeMode(2, QHeaderView.ResizeToContents)
header.setSectionResizeMode(3, QHeaderView.Stretch)
[self.tableWidget.setItem(i, 0, QTableWidgetItem(f"{shap_add_des['index'][i]}")) for i in range(len(shap_add_des['index']))]
[self.tableWidget.setItem(i, 1, QTableWidgetItem(f"{round(shap_add_des['probability'][i],2)}%")) for i in range(len(shap_add_des['probability']))]
[self.tableWidget.setItem(i, 2, QTableWidgetItem(f"{shap_add_des['describe'][i]}")) for i in range(len(shap_add_des['describe']))]
[self.tableWidget.setItem(i, 3, QTableWidgetItem(f"{shap_add_des['system'][i]}")) for i in range(len(shap_add_des['system']))]
delegate = AlignDelegate(self.tableWidget)
self.tableWidget.setItemDelegate(delegate)
def show_table(self):
self.worker.shap.connect(self.explain_result)
# 클릭시 Thread를 통해 신호를 전달하기 때문에 버퍼링이 발생함. 2초 정도? 이 부분은 나중에 생각해서 초기에 불러올지 고민해봐야할듯.
self.tableWidget.show()
def plotting(self, symptom_db):
# symptom_db[0] : liner : appended time (axis-x) / symptom_db[1].iloc[1] : check_db (:line,2222)[1]
# -- scatter --
# time = []
# value1, value2, value3 = [], [], []
# time.append(symptom_db[0])
# value1.append(round(symptom_db[1].iloc[1]['ZVCT'],2))
# value2.append(round(symptom_db[1].iloc[1]['BPORV'],2))
# value3.append(round(symptom_db[1].iloc[1]['UPRZ'],2))
# self.plotting_1 = self.plot_1.plot(pen=None, symbol='o', symbolBrush='w', symbolPen='w', symbolSize=5)
# self.plotting_2 = self.plot_2.plot(pen=None, symbol='o', symbolBrush='w', symbolPen='w', symbolSize=5)
# self.plotting_3 = self.plot_3.plot(pen=None, symbol='o', symbolBrush='w', symbolPen='w', symbolSize=5)
# -- Line plotting --
# self.plotting_1 = self.plot_1.plot(pen='w')
# self.plotting_2 = self.plot_2.plot(pen='w')
# self.plotting_3 = self.plot_3.plot(pen='w')
# self.plotting_4 = self.plot_4.plot(pen='w')
self.plot_1.showGrid(x=True, y=True, alpha=0.3)
self.plot_2.showGrid(x=True, y=True, alpha=0.3)
self.plot_3.showGrid(x=True, y=True, alpha=0.3)
self.plot_4.showGrid(x=True, y=True, alpha=0.3)
self.plotting_1 = self.plot_1.plot(pen=pyqtgraph.mkPen('k',width=3))
self.plotting_2 = self.plot_2.plot(pen=pyqtgraph.mkPen('k',width=3))
self.plotting_3 = self.plot_3.plot(pen=pyqtgraph.mkPen('k',width=3))
self.plotting_4 = self.plot_4.plot(pen=pyqtgraph.mkPen('k',width=3))
self.plotting_1.setData(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])['BPORV'])
self.plot_1.setTitle('PORV open state')
self.plotting_2.setData(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])['PPRZN'])
self.plot_2.setTitle('Pressurizer pressure')
self.plotting_3.setData(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])['UPRT'])
self.plot_3.setTitle('PRT temperature')
self.plotting_4.setData(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])['PPRT'])
self.plot_4.setTitle('PRT pressure')
# red_range = display_db[display_db['probability'] >= 10] # 10% 이상의 확률을 가진 변수
#
# print(bool(red_range["describe"].iloc[3]))
# try :
# self.plotting_1.setData(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[red_range['index'].iloc[0]])
# if red_range["describe"].iloc[0] == None:
# self.plot_1.setTitle(self)
# else:
# self.plot_1.setTitle(f'{red_range["describe"].iloc[0]}')
# # self.plot_1.clear()
# except:
# print('plot1 fail')
# try:
# self.plotting_2.setData(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[red_range['index'].iloc[1]])
# if red_range["describe"].iloc[1] == None:
# self.plot_2.setTitle(self)
# else:
# self.plot_2.setTitle(f'{red_range["describe"].iloc[1]}')
# # self.plot_2.clear()
# except:
# print('plot2 fail')
# try:
# self.plotting_3.setData(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[red_range['index'].iloc[2]])
# if red_range["describe"].iloc[2] == None:
# self.plot_3.setTitle(self)
# else:
# self.plot_3.setTitle(f'{red_range["describe"].iloc[2]}')
# # self.plot_3.clear()
# except:
# print('plot3 fail')
# try:
# self.plotting_4.setData(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[red_range['index'].iloc[3]])
# if red_range["describe"].iloc[3] == None:
# self.plot_4.setTitle(self)
# else:
# self.plot_4.setTitle(f'{red_range["describe"].iloc[3]}')
# # self.plot_4.clear()
# except:
# print('plot4 fail')
def display_explain(self, display_db, symptom_db, normal_db):
'''
# display_db['index'] : 변수 이름 / display_db[0] : shap value
# display_db['describe'] : 변수에 대한 설명 / display_db['probability'] : shap value를 확률로 환산한 값
# symptom_db[0] : liner : appended time (axis-x) / symptom_db[1].iloc[1] : check_db (:line,2222)[1]
'''
red_range = display_db[display_db['probability'] >=10]
orange_range = display_db[[display_db['probability'].iloc[i]<10 and display_db['probability'].iloc[i]>1 for i in range(len(display_db['probability']))]]
convert_red = {0: self.red1, 1: self.red2, 2: self.red3, 3: self.red4}
convert_orange = {0: self.orange1, 1: self.orange2, 2: self.orange3, 3: self.orange4, 4: self.orange5, 5: self.orange6, 6: self.orange7, 7: self.orange8, 8: self.orange9, 9: self.orange10, 10: self.orange11, 11: self.orange12}
if 4-len(red_range) == 0:
red_del = []
elif 4-len(red_range) == 1:
red_del = [3]
elif 4-len(red_range) == 2:
red_del = [2,3]
elif 4-len(red_range) == 3:
red_del = [1,2,3]
elif 4-len(red_range) == 4:
red_del = [0,1,2,3]
if 12-len(orange_range) == 0:
orange_del = []
elif 12-len(orange_range) == 1:
orange_del = [11]
elif 12-len(orange_range) == 2:
orange_del = [10,11]
elif 12-len(orange_range) == 3:
orange_del = [9,10,11]
elif 12-len(orange_range) == 4:
orange_del = [8,9,10,11]
elif 12-len(orange_range) == 5:
orange_del = [7,8,9,10,11]
elif 12-len(orange_range) == 6:
orange_del = [6,7,8,9,10,11]
elif 12-len(orange_range) == 7:
orange_del = [5,6,7,8,9,10,11]
elif 12-len(orange_range) == 8:
orange_del = [4,5,6,7,8,9,10,11]
elif 12-len(orange_range) == 9:
orange_del = [3,4,5,6,7,8,9,10,11]
elif 12-len(orange_range) == 10:
orange_del = [2,3,4,5,6,7,8,9,10,11]
elif 12-len(orange_range) == 11:
orange_del = [1,2,3,4,5,6,7,8,9,10,11]
elif 12-len(orange_range) == 12:
orange_del = [0,1,2,3,4,5,6,7,8,9,10,11]
[convert_red[i].setText(f'{red_range["describe"].iloc[i]} \n[{round(red_range["probability"].iloc[i],2)}%]') for i in range(len(red_range))]
[convert_red[i].setText('None\nParameter') for i in red_del]
[convert_red[i].setStyleSheet('color : white;' 'font-weight: bold;' 'background-color: blue;') for i in range(len(red_range))]
[convert_red[i].setStyleSheet('color : black;' 'background-color: light gray;') for i in red_del]
[convert_orange[i].setText(f'{orange_range["describe"].iloc[i]} \n[{round(orange_range["probability"].iloc[i],2)}%]') for i in range(len(orange_range))]
[convert_orange[i].setText('None\nParameter') for i in orange_del]
# [convert_orange[i].setStyleSheet('color : white;' 'font-weight: bold;' 'background-color: orange;') for i in range(len(orange_range))]
# [convert_orange[i].setStyleSheet('color : black;' 'background-color: light gray;') for i in orange_del]
# 각 Button에 호환되는 Plotting 데이터 구축
# Red1 Button
if self.red1.text().split()[0] != 'None':
self.red_plot_1.clear()
self.red_plot_1.setTitle(red_range['describe'].iloc[0])
self.red_plot_1.addLegend(offset=(-30,20))
self.red_plot_1.plot(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[red_range['index'].iloc[0]], pen=pyqtgraph.mkPen('b', width=3), name = 'Real Data')
self.red_plot_1.plot(x=symptom_db[0], y=pd.DataFrame(normal_db)[red_range['index'].iloc[0]], pen=pyqtgraph.mkPen('k', width=3), name = 'Normal Data')
# Red2 Button
if self.red2.text().split()[0] != 'None':
self.red_plot_2.clear()
self.red_plot_2.setTitle(red_range['describe'].iloc[1])
self.red_plot_2.addLegend(offset=(-30, 20))
self.red_plot_2.plot(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[red_range['index'].iloc[1]], pen=pyqtgraph.mkPen('b', width=3), name='Real Data')
self.red_plot_2.plot(x=symptom_db[0], y=pd.DataFrame(normal_db)[red_range['index'].iloc[1]], pen=pyqtgraph.mkPen('k', width=3), name='Normal Data')
# Red3 Button
if self.red3.text().split()[0] != 'None':
self.red_plot_3.clear()
self.red_plot_3.setTitle(red_range['describe'].iloc[2])
self.red_plot_3.addLegend(offset=(-30, 20))
self.red_plot_3.plot(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[red_range['index'].iloc[2]], pen=pyqtgraph.mkPen('b', width=3), name='Real Data')
self.red_plot_3.plot(x=symptom_db[0], y=pd.DataFrame(normal_db)[red_range['index'].iloc[2]], pen=pyqtgraph.mkPen('k', width=3), name='Normal Data')
# Red4 Button
if self.red4.text().split()[0] != 'None':
self.red_plot_4.clear()
self.red_plot_4.setTitle(red_range['describe'].iloc[3])
self.red_plot_4.addLegend(offset=(-30, 20))
self.red_plot_4.plot(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[red_range['index'].iloc[3]], pen=pyqtgraph.mkPen('b', width=3), name='Real Data')
self.red_plot_4.plot(x=symptom_db[0], y=pd.DataFrame(normal_db)[red_range['index'].iloc[3]], pen=pyqtgraph.mkPen('k', width=3), name='Normal Data')
# Orange1 Button
if self.orange1.text().split()[0] != 'None':
self.orange_plot_1.clear()
self.orange_plot_1.setTitle(orange_range['describe'].iloc[0])
self.orange_plot_1.addLegend(offset=(-30, 20))
self.orange_plot_1.plot(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[orange_range['index'].iloc[0]], pen=pyqtgraph.mkPen('b', width=3), name='Real Data')
self.orange_plot_1.plot(x=symptom_db[0], y=pd.DataFrame(normal_db)[orange_range['index'].iloc[0]], pen=pyqtgraph.mkPen('k', width=3), name='Normal Data')
# Orange2 Button
if self.orange2.text().split()[0] != 'None':
self.orange_plot_2.clear()
self.orange_plot_2.setTitle(orange_range['describe'].iloc[1])
self.orange_plot_2.addLegend(offset=(-30, 20))
self.orange_plot_2.plot(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[orange_range['index'].iloc[1]], pen=pyqtgraph.mkPen('b', width=3), name='Real Data')
self.orange_plot_2.plot(x=symptom_db[0], y=pd.DataFrame(normal_db)[orange_range['index'].iloc[1]], pen=pyqtgraph.mkPen('k', width=3), name='Normal Data')
# Orange3 Button
if self.orange3.text().split()[0] != 'None':
self.orange_plot_3.clear()
self.orange_plot_3.setTitle(orange_range['describe'].iloc[2])
self.orange_plot_3.addLegend(offset=(-30, 20))
self.orange_plot_3.plot(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[orange_range['index'].iloc[2]], pen=pyqtgraph.mkPen('b', width=3), name='Real Data')
self.orange_plot_3.plot(x=symptom_db[0], y=pd.DataFrame(normal_db)[orange_range['index'].iloc[2]], pen=pyqtgraph.mkPen('k', width=3), name='Normal Data')
# Orange4 Button
if self.orange4.text().split()[0] != 'None':
self.orange_plot_4.clear()
self.orange_plot_4.setTitle(orange_range['describe'].iloc[3])
self.orange_plot_4.addLegend(offset=(-30, 20))
self.orange_plot_4.plot(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[orange_range['index'].iloc[3]], pen=pyqtgraph.mkPen('b', width=3), name='Real Data')
self.orange_plot_4.plot(x=symptom_db[0], y=pd.DataFrame(normal_db)[orange_range['index'].iloc[3]], pen=pyqtgraph.mkPen('k', width=3), name='Normal Data')
# Orange5 Button
if self.orange5.text().split()[0] != 'None':
self.orange_plot_5.clear()
self.orange_plot_5.setTitle(orange_range['describe'].iloc[4])
self.orange_plot_5.addLegend(offset=(-30, 20))
self.orange_plot_5.plot(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[orange_range['index'].iloc[4]], pen=pyqtgraph.mkPen('b', width=3), name='Real Data')
self.orange_plot_5.plot(x=symptom_db[0], y=pd.DataFrame(normal_db)[orange_range['index'].iloc[4]], pen=pyqtgraph.mkPen('k', width=3), name='Normal Data')
# Orange6 Button
if self.orange6.text().split()[0] != 'None':
self.orange_plot_6.clear()
self.orange_plot_6.setTitle(orange_range['describe'].iloc[5])
self.orange_plot_6.addLegend(offset=(-30, 20))
self.orange_plot_6.plot(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[orange_range['index'].iloc[5]], pen=pyqtgraph.mkPen('b', width=3), name='Real Data')
self.orange_plot_6.plot(x=symptom_db[0], y=pd.DataFrame(normal_db)[orange_range['index'].iloc[5]], pen=pyqtgraph.mkPen('k', width=3), name='Normal Data')
# Orange7 Button
if self.orange7.text().split()[0] != 'None':
self.orange_plot_7.clear()
self.orange_plot_7.setTitle(orange_range['describe'].iloc[6])
self.orange_plot_7.addLegend(offset=(-30, 20))
self.orange_plot_7.plot(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[orange_range['index'].iloc[6]], pen=pyqtgraph.mkPen('b', width=3), name='Real Data')
self.orange_plot_7.plot(x=symptom_db[0], y=pd.DataFrame(normal_db)[orange_range['index'].iloc[6]], pen=pyqtgraph.mkPen('k', width=3), name='Normal Data')
# Orange8 Button
if self.orange8.text().split()[0] != 'None':
self.orange_plot_8.clear()
self.orange_plot_8.setTitle(orange_range['describe'].iloc[7])
self.orange_plot_8.addLegend(offset=(-30, 20))
self.orange_plot_8.plot(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[orange_range['index'].iloc[7]], pen=pyqtgraph.mkPen('b', width=3), name='Real Data')
self.orange_plot_8.plot(x=symptom_db[0], y=pd.DataFrame(normal_db)[orange_range['index'].iloc[7]], pen=pyqtgraph.mkPen('k', width=3), name='Normal Data')
# Orange9 Button
if self.orange9.text().split()[0] != 'None':
self.orange_plot_9.clear()
self.orange_plot_9.setTitle(orange_range['describe'].iloc[8])
self.orange_plot_9.addLegend(offset=(-30, 20))
self.orange_plot_9.plot(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[orange_range['index'].iloc[8]], pen=pyqtgraph.mkPen('b', width=3), name='Real Data')
self.orange_plot_9.plot(x=symptom_db[0], y=pd.DataFrame(normal_db)[orange_range['index'].iloc[8]], pen=pyqtgraph.mkPen('k', width=3), name='Normal Data')
# Orange10 Button
if self.orange10.text().split()[0] != 'None':
self.orange_plot_10.clear()
self.orange_plot_10.setTitle(orange_range['describe'].iloc[9])
self.orange_plot_10.addLegend(offset=(-30, 20))
self.orange_plot_10.plot(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[orange_range['index'].iloc[9]], pen=pyqtgraph.mkPen('b', width=3), name='Real Data')
self.orange_plot_10.plot(x=symptom_db[0], y=pd.DataFrame(normal_db)[orange_range['index'].iloc[9]], pen=pyqtgraph.mkPen('k', width=3), name='Normal Data')
# Orange11 Button
if self.orange11.text().split()[0] != 'None':
self.orange_plot_11.clear()
self.orange_plot_11.setTitle(orange_range['describe'].iloc[10])
self.orange_plot_11.addLegend(offset=(-30, 20))
self.orange_plot_11.plot(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[orange_range['index'].iloc[10]], pen=pyqtgraph.mkPen('b', width=3), name='Real Data')
self.orange_plot_11.plot(x=symptom_db[0], y=pd.DataFrame(normal_db)[orange_range['index'].iloc[10]], pen=pyqtgraph.mkPen('k', width=3), name='Normal Data')
# Orange12 Button
if self.orange12.text().split()[0] != 'None':
self.orange_plot_12.clear()
self.orange_plot_12.setTitle(orange_range['describe'].iloc[11])
self.orange_plot_12.addLegend(offset=(-30, 20))
self.orange_plot_12.plot(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[orange_range['index'].iloc[11]], pen=pyqtgraph.mkPen('b', width=3), name='Real Data')
self.orange_plot_12.plot(x=symptom_db[0], y=pd.DataFrame(normal_db)[orange_range['index'].iloc[11]], pen=pyqtgraph.mkPen('k', width=3), name='Normal Data')
[convert_red[i].setCheckable(True) for i in range(4)]
[convert_orange[i].setCheckable(True) for i in range(12)]
def red1_plot(self):
if self.red1.isChecked():
if self.red1.text().split()[0] != 'None':
self.red_plot_1.show()
self.red1.setCheckable(False)
def red2_plot(self):
if self.red2.isChecked():
if self.red2.text().split()[0] != 'None':
self.red_plot_2.show()
self.red2.setCheckable(False)
def red3_plot(self):
if self.red3.isChecked():
if self.red3.text().split()[0] != 'None':
self.red_plot_3.show()
self.red3.setCheckable(False)
def red4_plot(self):
if self.red4.isChecked():
if self.red4.text().split()[0] != 'None':
self.red_plot_4.show()
self.red4.setCheckable(False)
def orange1_plot(self):
if self.orange1.isChecked():
if self.orange1.text().split()[0] != 'None':
self.orange_plot_1.show()
self.orange1.setCheckable(False)
def orange2_plot(self):
if self.orange2.isChecked():
if self.orange2.text().split()[0] != 'None':
self.orange_plot_2.show()
self.orange2.setCheckable(False)
def orange3_plot(self):
if self.orange3.isChecked():
if self.orange3.text().split()[0] != 'None':
self.orange_plot_3.show()
self.orange3.setCheckable(False)
def orange4_plot(self):
if self.orange4.isChecked():
if self.orange4.text().split()[0] != 'None':
self.orange_plot_4.show()
self.orange4.setCheckable(False)
def orange5_plot(self):
if self.orange5.isChecked():
if self.orange5.text().split()[0] != 'None':
self.orange_plot_5.show()
self.orange5.setCheckable(False)
def orange6_plot(self):
if self.orange6.isChecked():
if self.orange6.text().split()[0] != 'None':
self.orange_plot_6.show()
self.orange6.setCheckable(False)
def orange7_plot(self):
if self.orange7.isChecked():
if self.orange7.text().split()[0] != 'None':
self.orange_plot_7.show()
self.orange7.setCheckable(False)
def orange8_plot(self):
if self.orange8.isChecked():
if self.orange8.text().split()[0] != 'None':
self.orange_plot_8.show()
self.orange8.setCheckable(False)
def orange9_plot(self):
if self.orange9.isChecked():
if self.orange9.text().split()[0] != 'None':
self.orange_plot_9.show()
self.orange9.setCheckable(False)
def orange10_plot(self):
if self.orange10.isChecked():
if self.orange10.text().split()[0] != 'None':
self.orange_plot_10.show()
self.orange10.setCheckable(False)
def orange11_plot(self):
if self.orange11.isChecked():
if self.orange11.text().split()[0] != 'None':
self.orange_plot_11.show()
self.orange11.setCheckable(False)
def orange12_plot(self):
if self.orange12.isChecked():
if self.orange12.text().split()[0] != 'None':
self.orange_plot_12.show()
self.orange12.setCheckable(False)
def show_another_result(self):
self.other = another_result_explain()
self.worker.another_shap_table.connect(self.other.show_another_result_table)
self.worker.another_shap.connect(self.other.show_shap)
self.other.show()
class another_result_explain(QWidget):
def __init__(self):
super().__init__()
# 서브 인터페이스 초기 설정
self.setWindowTitle('Another Result Explanation')
self.setGeometry(300, 300, 800, 500)
self.selected_para = pd.read_csv('./DataBase/Final_parameter_200825.csv')
# 레이아웃 구성
combo_layout = QVBoxLayout()
self.title_label = QLabel("<b>선택되지 않은 시나리오에 대한 결과 해석<b/>")
self.title_label.setAlignment(Qt.AlignCenter)
self.blank = QLabel(self) # Enter를 위한 라벨
self.show_table = QPushButton("Show Table")
self.cb = QComboBox(self)
self.cb.addItem('Normal')
self.cb.addItem('Ab21-01: Pressurizer pressure channel failure (High)')
self.cb.addItem('Ab21-02: Pressurizer pressure channel failure (Low)')
self.cb.addItem('Ab20-04: Pressurizer level channel failure (Low)')
self.cb.addItem('Ab15-07: Steam generator level channel failure (High)')
self.cb.addItem('Ab15-08: Steam generator level channel failure (Low)')
self.cb.addItem('Ab63-04: Control rod fall')
self.cb.addItem('Ab63-02: Continuous insertion of control rod')
self.cb.addItem('Ab21-12: Pressurizer PORV opening')
self.cb.addItem('Ab19-02: Pressurizer safety valve failure')
self.cb.addItem('Ab21-11: Pressurizer spray valve failed opening')
self.cb.addItem('Ab23-03: Leakage from CVCS to RCS')
self.cb.addItem('Ab60-02: Rupture of the front end of the regenerative heat exchanger')
self.cb.addItem('Ab59-02: Leakage at the rear end of the charging flow control valve')
self.cb.addItem('Ab23-01: Leakage from CVCS to CCW')
self.cb.addItem('Ab23-06: Steam generator u-tube leakage')
# Explanation Alarm 구현
cb_red_alarm = QGroupBox('Main basis for diagnosis')
cb_red_alarm_layout = QGridLayout()
cb_orange_alarm = QGroupBox('Sub basis for diagnosis')
cb_orange_alarm_layout = QGridLayout()
# Display Button 생성
self.cb_red1 = QPushButton(self)
self.cb_red2 = QPushButton(self)
self.cb_red3 = QPushButton(self)
self.cb_red4 = QPushButton(self)
self.cb_orange1 = QPushButton(self)
self.cb_orange2 = QPushButton(self)
self.cb_orange3 = QPushButton(self)
self.cb_orange4 = QPushButton(self)
self.cb_orange5 = QPushButton(self)
self.cb_orange6 = QPushButton(self)
self.cb_orange7 = QPushButton(self)
self.cb_orange8 = QPushButton(self)
self.cb_orange9 = QPushButton(self)
self.cb_orange10 = QPushButton(self)
self.cb_orange11 = QPushButton(self)
self.cb_orange12 = QPushButton(self)
# Layout에 widget 삽입
cb_red_alarm_layout.addWidget(self.cb_red1, 0, 0)
cb_red_alarm_layout.addWidget(self.cb_red2, 0, 1)
cb_red_alarm_layout.addWidget(self.cb_red3, 1, 0)
cb_red_alarm_layout.addWidget(self.cb_red4, 1, 1)
cb_orange_alarm_layout.addWidget(self.cb_orange1, 0, 0)
cb_orange_alarm_layout.addWidget(self.cb_orange2, 0, 1)
cb_orange_alarm_layout.addWidget(self.cb_orange3, 1, 0)
cb_orange_alarm_layout.addWidget(self.cb_orange4, 1, 1)
cb_orange_alarm_layout.addWidget(self.cb_orange5, 2, 0)
cb_orange_alarm_layout.addWidget(self.cb_orange6, 2, 1)
cb_orange_alarm_layout.addWidget(self.cb_orange7, 3, 0)
cb_orange_alarm_layout.addWidget(self.cb_orange8, 3, 1)
cb_orange_alarm_layout.addWidget(self.cb_orange9, 4, 0)
cb_orange_alarm_layout.addWidget(self.cb_orange10, 4, 1)
cb_orange_alarm_layout.addWidget(self.cb_orange11, 5, 0)
cb_orange_alarm_layout.addWidget(self.cb_orange12, 5, 1)
cb_red_alarm.setLayout(cb_red_alarm_layout)
cb_orange_alarm.setLayout(cb_orange_alarm_layout)
combo_layout.addWidget(self.title_label)
combo_layout.addWidget(self.blank)
combo_layout.addWidget(self.cb)
combo_layout.addWidget(self.blank)
# combo_layout.addItem(QSpacerItem(20, 40, QSizePolicy.Minimum, QSizePolicy.Expanding))
combo_layout.addWidget(cb_red_alarm)
combo_layout.addWidget(cb_orange_alarm)
combo_layout.addWidget(self.blank)
combo_layout.addWidget(self.show_table)
combo_layout.addItem(QSpacerItem(20, 40, QSizePolicy.Minimum, QSizePolicy.Expanding))
self.setLayout(combo_layout)
self.combo_tableWidget = QTableWidget(0, 0)
self.combo_tableWidget.setFixedHeight(500)
self.combo_tableWidget.setFixedWidth(800)
# self.combo_tableWidget = QTableWidget(0, 0)
# 이벤트 처리 부분 ########################################################
self.show_table.clicked.connect(self.show_anoter_table)
self.cb.activated[str].connect(self.show_another_result_table)
self.cb.activated[str].connect(self.show_shap)
##########################################################################
# Button 클릭 연동 이벤트 처리
convert_cb_red_btn = {0: self.cb_red1, 1: self.cb_red2, 2: self.cb_red3, 3: self.cb_red4} # Red Button
convert_cb_red_plot = {0: self.cb_red1_plot, 1: self.cb_red2_plot, 2: self.cb_red3_plot, 3: self.cb_red4_plot}
convert_cb_orange_btn = {0: self.cb_orange1, 1: self.cb_orange2, 2: self.cb_orange3, 3: self.cb_orange4, 4: self.cb_orange5,
5: self.cb_orange6, 6: self.cb_orange7, 7: self.cb_orange8, 8: self.cb_orange9, 9: self.cb_orange10,
10: self.cb_orange11, 11: self.cb_orange12} # Orange Button
convert_cb_orange_plot = {0: self.cb_orange1_plot, 1: self.cb_orange2_plot, 2: self.cb_orange3_plot, 3: self.cb_orange4_plot,
4: self.cb_orange5_plot, 5: self.cb_orange6_plot, 6: self.cb_orange7_plot, 7: self.cb_orange8_plot,
8: self.cb_orange9_plot, 9: self.cb_orange10_plot, 10: self.cb_orange11_plot, 11: self.cb_orange12_plot}
################################################################################################################
# 초기 Button 위젯 선언 -> 초기에 선언해야 끊기지않고 유지됨.
# Red Button
[convert_cb_red_btn[i].clicked.connect(convert_cb_red_plot[i]) for i in range(4)]
self.cb_red_plot_1 = pyqtgraph.PlotWidget(title=self)
self.cb_red_plot_2 = pyqtgraph.PlotWidget(title=self)
self.cb_red_plot_3 = pyqtgraph.PlotWidget(title=self)
self.cb_red_plot_4 = pyqtgraph.PlotWidget(title=self)
# Grid setting
self.cb_red_plot_1.showGrid(x=True, y=True, alpha=0.3)
self.cb_red_plot_2.showGrid(x=True, y=True, alpha=0.3)
self.cb_red_plot_3.showGrid(x=True, y=True, alpha=0.3)
self.cb_red_plot_4.showGrid(x=True, y=True, alpha=0.3)
# Orange Button
[convert_cb_orange_btn[i].clicked.connect(convert_cb_orange_plot[i]) for i in range(12)]
self.cb_orange_plot_1 = pyqtgraph.PlotWidget(title=self)
self.cb_orange_plot_2 = pyqtgraph.PlotWidget(title=self)
self.cb_orange_plot_3 = pyqtgraph.PlotWidget(title=self)
self.cb_orange_plot_4 = pyqtgraph.PlotWidget(title=self)
self.cb_orange_plot_5 = pyqtgraph.PlotWidget(title=self)
self.cb_orange_plot_6 = pyqtgraph.PlotWidget(title=self)
self.cb_orange_plot_7 = pyqtgraph.PlotWidget(title=self)
self.cb_orange_plot_8 = pyqtgraph.PlotWidget(title=self)
self.cb_orange_plot_9 = pyqtgraph.PlotWidget(title=self)
self.cb_orange_plot_10 = pyqtgraph.PlotWidget(title=self)
self.cb_orange_plot_11 = pyqtgraph.PlotWidget(title=self)
self.cb_orange_plot_12 = pyqtgraph.PlotWidget(title=self)
# Grid setting
self.cb_orange_plot_1.showGrid(x=True, y=True, alpha=0.3)
self.cb_orange_plot_2.showGrid(x=True, y=True, alpha=0.3)
self.cb_orange_plot_3.showGrid(x=True, y=True, alpha=0.3)
self.cb_orange_plot_4.showGrid(x=True, y=True, alpha=0.3)
self.cb_orange_plot_5.showGrid(x=True, y=True, alpha=0.3)
self.cb_orange_plot_6.showGrid(x=True, y=True, alpha=0.3)
self.cb_orange_plot_7.showGrid(x=True, y=True, alpha=0.3)
self.cb_orange_plot_8.showGrid(x=True, y=True, alpha=0.3)
self.cb_orange_plot_9.showGrid(x=True, y=True, alpha=0.3)
self.cb_orange_plot_10.showGrid(x=True, y=True, alpha=0.3)
self.cb_orange_plot_11.showGrid(x=True, y=True, alpha=0.3)
self.cb_orange_plot_12.showGrid(x=True, y=True, alpha=0.3)
################################################################################################################
self.show() # Sub UI show command
def show_shap(self, all_shap, symptom_db, compare_data):
# all_shap : 전체 시나리오에 해당하는 shap_value를 가지고 있음.
# symptom_db[0] : liner : appended time (axis-x) / symptom_db[1].iloc[1] : check_db (:line,2222)[1]
if self.cb.currentText() == 'Normal':
step1 = pd.DataFrame(all_shap[0], columns=self.selected_para['0'].tolist())
compared_db = compare_data[self.cb.currentText()]
elif self.cb.currentText() == 'Ab21-01: Pressurizer pressure channel failure (High)':
step1 = pd.DataFrame(all_shap[1], columns=self.selected_para['0'].tolist())
compared_db = compare_data[self.cb.currentText()[:7]]
elif self.cb.currentText() == 'Ab21-02: Pressurizer pressure channel failure (Low)':
step1 = pd.DataFrame(all_shap[2], columns=self.selected_para['0'].tolist())
compared_db = compare_data[self.cb.currentText()[:7]]
elif self.cb.currentText() == 'Ab20-04: Pressurizer level channel failure (Low)':
step1 = pd.DataFrame(all_shap[3], columns=self.selected_para['0'].tolist())
compared_db = compare_data[self.cb.currentText()[:7]]
elif self.cb.currentText() == 'Ab15-07: Steam generator level channel failure (High)':
step1 = pd.DataFrame(all_shap[4], columns=self.selected_para['0'].tolist())
compared_db = compare_data[self.cb.currentText()[:7]]
elif self.cb.currentText() == 'Ab15-08: Steam generator level channel failure (Low)':
step1 = pd.DataFrame(all_shap[5], columns=self.selected_para['0'].tolist())
compared_db = compare_data[self.cb.currentText()[:7]]
elif self.cb.currentText() == 'Ab63-04: Control rod fall':
step1 = pd.DataFrame(all_shap[6], columns=self.selected_para['0'].tolist())
compared_db = compare_data[self.cb.currentText()[:7]]
elif self.cb.currentText() == 'Ab63-02: Continuous insertion of control rod':
step1 = pd.DataFrame(all_shap[7], columns=self.selected_para['0'].tolist())
compared_db = compare_data[self.cb.currentText()[:7]]
elif self.cb.currentText() == 'Ab21-12: Pressurizer PORV opening':
step1 = pd.DataFrame(all_shap[8], columns=self.selected_para['0'].tolist())
compared_db = compare_data[self.cb.currentText()[:7]]
elif self.cb.currentText() == 'Ab19-02: Pressurizer safety valve failure':
step1 = pd.DataFrame(all_shap[9], columns=self.selected_para['0'].tolist())
compared_db = compare_data[self.cb.currentText()[:7]]
elif self.cb.currentText() == 'Ab21-11: Pressurizer spray valve failed opening':
step1 = pd.DataFrame(all_shap[10], columns=self.selected_para['0'].tolist())
compared_db = compare_data[self.cb.currentText()[:7]]
elif self.cb.currentText() == 'Ab23-03: Leakage from CVCS to RCS':
step1 = pd.DataFrame(all_shap[11], columns=self.selected_para['0'].tolist())
compared_db = compare_data[self.cb.currentText()[:7]]
elif self.cb.currentText() == 'Ab60-02: Rupture of the front end of the regenerative heat exchanger':
step1 = pd.DataFrame(all_shap[12], columns=self.selected_para['0'].tolist())
compared_db = compare_data[self.cb.currentText()[:7]]
elif self.cb.currentText() == 'Ab59-02: Leakage at the rear end of the charging flow control valve':
step1 = pd.DataFrame(all_shap[13], columns=self.selected_para['0'].tolist())
compared_db = compare_data[self.cb.currentText()[:7]]
elif self.cb.currentText() == 'Ab23-01: Leakage from CVCS to CCW':
step1 = pd.DataFrame(all_shap[14], columns=self.selected_para['0'].tolist())
compared_db = compare_data[self.cb.currentText()[:7]]
elif self.cb.currentText() == 'Ab23-06: Steam generator u-tube leakage':
step1 = pd.DataFrame(all_shap[15], columns=self.selected_para['0'].tolist())
compared_db = compare_data[self.cb.currentText()[:7]]
step2 = step1.sort_values(by=0, ascending=True, axis=1)
step3 = step2[step2.iloc[:] < 0].dropna(axis=1).T
self.step4 = step3.reset_index()
col = self.step4['index']
var = [self.selected_para['0'][self.selected_para['0'] == col_].index for col_ in col]
val_col = [self.selected_para['1'][var_].iloc[0] for var_ in var]
proba = [(self.step4[0][val_num] / sum(self.step4[0])) * 100 for val_num in range(len(self.step4[0]))]
val_system = [self.selected_para['2'][var_].iloc[0] for var_ in var]
self.step4['describe'] = val_col
self.step4['probability'] = proba
self.step4['system'] = val_system
red_range = self.step4[self.step4['probability'] >= 10]
orange_range = self.step4[
[self.step4['probability'].iloc[i] < 10 and self.step4['probability'].iloc[i] > 1 for i in
range(len(self.step4['probability']))]]
convert_red = {0: self.cb_red1, 1: self.cb_red2, 2: self.cb_red3, 3: self.cb_red4}
convert_orange = {0: self.cb_orange1, 1: self.cb_orange2, 2: self.cb_orange3, 3: self.cb_orange4, 4: self.cb_orange5,
5: self.cb_orange6, 6: self.cb_orange7, 7: self.cb_orange8, 8: self.cb_orange9, 9: self.cb_orange10,
10: self.cb_orange11, 11: self.cb_orange12}
if 4 - len(red_range) == 0:
red_del = []
elif 4 - len(red_range) == 1:
red_del = [3]
elif 4 - len(red_range) == 2:
red_del = [2, 3]
elif 4 - len(red_range) == 3:
red_del = [1, 2, 3]
elif 4 - len(red_range) == 4:
red_del = [0, 1, 2, 3]
if 12 - len(orange_range) == 0:
orange_del = []
elif 12 - len(orange_range) == 1:
orange_del = [11]
elif 12 - len(orange_range) == 2:
orange_del = [10, 11]
elif 12 - len(orange_range) == 3:
orange_del = [9, 10, 11]
elif 12 - len(orange_range) == 4:
orange_del = [8, 9, 10, 11]
elif 12 - len(orange_range) == 5:
orange_del = [7, 8, 9, 10, 11]
elif 12 - len(orange_range) == 6:
orange_del = [6, 7, 8, 9, 10, 11]
elif 12 - len(orange_range) == 7:
orange_del = [5, 6, 7, 8, 9, 10, 11]
elif 12 - len(orange_range) == 8:
orange_del = [4, 5, 6, 7, 8, 9, 10, 11]
elif 12 - len(orange_range) == 9:
orange_del = [3, 4, 5, 6, 7, 8, 9, 10, 11]
elif 12 - len(orange_range) == 10:
orange_del = [2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
elif 12 - len(orange_range) == 11:
orange_del = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
elif 12 - len(orange_range) == 12:
orange_del = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
[convert_red[i].setText(f'{red_range["describe"].iloc[i]} \n[{round(red_range["probability"].iloc[i], 2)}%]') for i in range(len(red_range))]
[convert_red[i].setText('None\nParameter') for i in red_del]
[convert_red[i].setStyleSheet('color : white;' 'font-weight: bold;' 'background-color: blue;') for i in range(len(red_range))]
[convert_red[i].setStyleSheet('color : black;' 'background-color: light gray;') for i in red_del]
[convert_orange[i].setText(f'{orange_range["describe"].iloc[i]} \n[{round(orange_range["probability"].iloc[i], 2)}%]') for i in range(len(orange_range))]
[convert_orange[i].setText('None\nParameter') for i in orange_del]
#####################################################################################################################################
# 각 Button에 호환되는 Plotting 데이터 구축
# Red1 Button
if self.cb_red1.text().split()[0] != 'None':
self.cb_red_plot_1.clear()
self.cb_red_plot_1.setTitle(red_range['describe'].iloc[0])
self.cb_red_plot_1.addLegend(offset=(-30,20))
self.cb_red_plot_1.plot(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[red_range['index'].iloc[0]], pen=pyqtgraph.mkPen('b', width=3), name='Real Data')
self.cb_red_plot_1.plot(x=symptom_db[0], y=pd.DataFrame(compared_db)[red_range['index'].iloc[0]], pen=pyqtgraph.mkPen('k', width=3), name=self.cb.currentText()[:7])
# Red2 Button
if self.cb_red2.text().split()[0] != 'None':
self.cb_red_plot_2.clear()
self.cb_red_plot_2.setTitle(red_range['describe'].iloc[1])
self.cb_red_plot_2.addLegend(offset=(-30, 20))
self.cb_red_plot_2.plot(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[red_range['index'].iloc[1]], pen=pyqtgraph.mkPen('b', width=3), name='Real Data')
self.cb_red_plot_2.plot(x=symptom_db[0], y=pd.DataFrame(compared_db)[red_range['index'].iloc[1]], pen=pyqtgraph.mkPen('k', width=3), name=self.cb.currentText()[:7])
# Red3 Button
if self.cb_red3.text().split()[0] != 'None':
self.cb_red_plot_3.clear()
self.cb_red_plot_3.setTitle(red_range['describe'].iloc[2])
self.cb_red_plot_3.addLegend(offset=(-30, 20))
self.cb_red_plot_3.plot(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[red_range['index'].iloc[2]], pen=pyqtgraph.mkPen('b', width=3), name='Real Data')
self.cb_red_plot_3.plot(x=symptom_db[0], y=pd.DataFrame(compared_db)[red_range['index'].iloc[2]], pen=pyqtgraph.mkPen('k', width=3), name=self.cb.currentText()[:7])
# Red4 Button
if self.cb_red4.text().split()[0] != 'None':
self.cb_red_plot_4.clear()
self.cb_red_plot_4.setTitle(red_range['describe'].iloc[3])
self.cb_red_plot_4.addLegend(offset=(-30, 20))
self.cb_red_plot_4.plot(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[red_range['index'].iloc[3]], pen=pyqtgraph.mkPen('b', width=3), name='Real Data')
self.cb_red_plot_4.plot(x=symptom_db[0], y=pd.DataFrame(compared_db)[red_range['index'].iloc[3]], pen=pyqtgraph.mkPen('k', width=3), name=self.cb.currentText()[:7])
# Orange1 Button
if self.cb_orange1.text().split()[0] != 'None':
self.cb_orange_plot_1.clear()
self.cb_orange_plot_1.setTitle(orange_range['describe'].iloc[0])
self.cb_orange_plot_1.addLegend(offset=(-30, 20))
self.cb_orange_plot_1.plot(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[orange_range['index'].iloc[0]], pen=pyqtgraph.mkPen('b', width=3), name='Real Data')
self.cb_orange_plot_1.plot(x=symptom_db[0], y=pd.DataFrame(compared_db)[orange_range['index'].iloc[0]], pen=pyqtgraph.mkPen('k', width=3), name=self.cb.currentText()[:7])
# Orange2 Button
if self.cb_orange2.text().split()[0] != 'None':
self.cb_orange_plot_2.clear()
self.cb_orange_plot_2.setTitle(orange_range['describe'].iloc[1])
self.cb_orange_plot_2.addLegend(offset=(-30, 20))
self.cb_orange_plot_2.plot(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[orange_range['index'].iloc[1]], pen=pyqtgraph.mkPen('b', width=3), name='Real Data')
self.cb_orange_plot_2.plot(x=symptom_db[0], y=pd.DataFrame(compared_db)[orange_range['index'].iloc[1]], pen=pyqtgraph.mkPen('k', width=3), name=self.cb.currentText()[:7])
# Orange3 Button
if self.cb_orange3.text().split()[0] != 'None':
self.cb_orange_plot_3.clear()
self.cb_orange_plot_3.setTitle(orange_range['describe'].iloc[2])
self.cb_orange_plot_3.addLegend(offset=(-30, 20))
self.cb_orange_plot_3.plot(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[orange_range['index'].iloc[2]], pen=pyqtgraph.mkPen('b', width=3), name='Real Data')
self.cb_orange_plot_3.plot(x=symptom_db[0], y=pd.DataFrame(compared_db)[orange_range['index'].iloc[2]], pen=pyqtgraph.mkPen('k', width=3), name=self.cb.currentText()[:7])
# Orange4 Button
if self.cb_orange4.text().split()[0] != 'None':
self.cb_orange_plot_4.clear()
self.cb_orange_plot_4.setTitle(orange_range['describe'].iloc[3])
self.cb_orange_plot_4.addLegend(offset=(-30, 20))
self.cb_orange_plot_4.plot(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[orange_range['index'].iloc[3]], pen=pyqtgraph.mkPen('b', width=3), name='Real Data')
self.cb_orange_plot_4.plot(x=symptom_db[0], y=pd.DataFrame(compared_db)[orange_range['index'].iloc[3]], pen=pyqtgraph.mkPen('k', width=3), name=self.cb.currentText()[:7])
# Orange5 Button
if self.cb_orange5.text().split()[0] != 'None':
self.cb_orange_plot_5.clear()
self.cb_orange_plot_5.setTitle(orange_range['describe'].iloc[4])
self.cb_orange_plot_5.addLegend(offset=(-30, 20))
self.cb_orange_plot_5.plot(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[orange_range['index'].iloc[4]], pen=pyqtgraph.mkPen('b', width=3), name='Real Data')
self.cb_orange_plot_5.plot(x=symptom_db[0], y=pd.DataFrame(compared_db)[orange_range['index'].iloc[4]], pen=pyqtgraph.mkPen('k', width=3), name=self.cb.currentText()[:7])
# Orange6 Button
if self.cb_orange6.text().split()[0] != 'None':
self.cb_orange_plot_6.clear()
self.cb_orange_plot_6.setTitle(orange_range['describe'].iloc[5])
self.cb_orange_plot_6.addLegend(offset=(-30, 20))
self.cb_orange_plot_6.plot(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[orange_range['index'].iloc[5]], pen=pyqtgraph.mkPen('b', width=3), name='Real Data')
self.cb_orange_plot_6.plot(x=symptom_db[0], y=pd.DataFrame(compared_db)[orange_range['index'].iloc[5]], pen=pyqtgraph.mkPen('k', width=3), name=self.cb.currentText()[:7])
# Orange7 Button
if self.cb_orange7.text().split()[0] != 'None':
self.cb_orange_plot_7.clear()
self.cb_orange_plot_7.setTitle(orange_range['describe'].iloc[6])
self.cb_orange_plot_7.addLegend(offset=(-30, 20))
self.cb_orange_plot_7.plot(x=symptom_db[0], y=
|
pd.DataFrame(symptom_db[1])
|
pandas.DataFrame
|
import pandas as pd
import glob
import os
import numpy as np
import time
import fastparquet
import argparse
from multiprocessing import Pool
import multiprocessing as mp
from os.path import isfile
parser = argparse.ArgumentParser(description='Program to run google compounder for a particular file and setting')
parser.add_argument('--data', type=str,
help='location of the pickle file')
# don't use this for now
parser.add_argument('--word', action='store_true',
help='Extracting context for words only?')
parser.add_argument('--output', type=str,
help='directory to save dataset in')
args = parser.parse_args()
with open('/mnt/dhr/CreateChallenge_ICC_0821/no_ner_0_50000.txt','r') as f:
contexts=f.read().split("\n")
contexts=contexts[:-1]
def left_side_parser(df): # N N _ _ _
cur_df=df.copy()
try:
cur_df[['modifier','head','w1','w2','w3']]=cur_df.lemma_pos.str.split(' ',expand=True)
except ValueError:
compound_df=pd.DataFrame()
modifier_df=pd.DataFrame()
head_df=pd.DataFrame()
return compound_df,modifier_df,head_df
compound_df=pd.melt(cur_df,id_vars=['modifier','head','year','count'],value_vars=['w1','w2','w3'],value_name='context')
compound_df=compound_df.loc[compound_df.context.isin(contexts)]
modifier_df=pd.melt(cur_df,id_vars=['modifier','year','count'],value_vars=['head','w1','w2'],value_name='context')
modifier_df=modifier_df.loc[modifier_df.context.isin(contexts)]
head_df=pd.melt(cur_df,id_vars=['head','year','count'],value_vars=['modifier','w1','w2','w3'],value_name='context')
head_df=head_df.loc[head_df.context.isin(contexts)]
return compound_df,modifier_df,head_df
def mid1_parser(df): # _ N N _ _
cur_df=df.copy()
try:
cur_df[['w1','modifier','head','w2','w3']]=cur_df.lemma_pos.str.split(' ',expand=True)
except ValueError:
compound_df=pd.DataFrame()
modifier_df=pd.DataFrame()
head_df=pd.DataFrame()
return compound_df,modifier_df,head_df
compound_df=pd.melt(cur_df,id_vars=['modifier','head','year','count'],value_vars=['w1','w2','w3'],value_name='context')
compound_df=compound_df.loc[compound_df.context.isin(contexts)]
modifier_df=pd.melt(cur_df,id_vars=['modifier','year','count'],value_vars=['head','w1','w2','w3'],value_name='context')
modifier_df=modifier_df.loc[modifier_df.context.isin(contexts)]
head_df=pd.melt(cur_df,id_vars=['head','year','count'],value_vars=['modifier','w1','w2','w3'],value_name='context')
head_df=head_df.loc[head_df.context.isin(contexts)]
return compound_df,modifier_df,head_df
def mid2_parser(df): # _ _ N N _
cur_df=df.copy()
try:
cur_df[['w1','w2','modifier','head','w3']]=cur_df.lemma_pos.str.split(' ',expand=True)
except ValueError:
compound_df=pd.DataFrame()
modifier_df=pd.DataFrame()
head_df=pd.DataFrame()
return compound_df,modifier_df,head_df
compound_df=pd.melt(cur_df,id_vars=['modifier','head','year','count'],value_vars=['w1','w2','w3'],value_name='context')
compound_df=compound_df.loc[compound_df.context.isin(contexts)]
modifier_df=
|
pd.melt(cur_df,id_vars=['modifier','year','count'],value_vars=['head','w1','w2','w3'],value_name='context')
|
pandas.melt
|
"""
immune_effects.py
Use-case analysis demonstrating ComptoxAI's ability to describe links between
PFOA and immune-modulating genes.
"""
from comptox_ai.db import GraphDB
import pandas as pd
import ipdb
db = GraphDB(hostname="comptox.ai")
df = pd.read_excel("D:/data/innatedb/innatedb_curated_genes.xls")
hsap = df.loc[df['Species']==9606,:] # Only interested in human genes
hsap_genes = list(
|
pd.unique(hsap['Gene Symbol'])
|
pandas.unique
|
from datetime import datetime, timedelta
from io import StringIO
import re
import sys
import numpy as np
import pytest
from pandas._libs.tslib import iNaT
from pandas.compat import PYPY
from pandas.compat.numpy import np_array_datetime64_compat
from pandas.core.dtypes.common import (
is_datetime64_dtype,
is_datetime64tz_dtype,
is_object_dtype,
is_timedelta64_dtype,
needs_i8_conversion,
)
from pandas.core.dtypes.dtypes import DatetimeTZDtype
import pandas as pd
from pandas import (
CategoricalIndex,
DataFrame,
DatetimeIndex,
Index,
Interval,
IntervalIndex,
PeriodIndex,
Series,
Timedelta,
TimedeltaIndex,
Timestamp,
)
from pandas.core.accessor import PandasDelegate
from pandas.core.arrays import DatetimeArray, PandasArray, TimedeltaArray
from pandas.core.base import NoNewAttributesMixin, PandasObject
from pandas.core.indexes.datetimelike import DatetimeIndexOpsMixin
import pandas.util.testing as tm
class CheckStringMixin:
def test_string_methods_dont_fail(self):
repr(self.container)
str(self.container)
bytes(self.container)
def test_tricky_container(self):
if not hasattr(self, "unicode_container"):
pytest.skip("Need unicode_container to test with this")
repr(self.unicode_container)
str(self.unicode_container)
class CheckImmutable:
mutable_regex = re.compile("does not support mutable operations")
def check_mutable_error(self, *args, **kwargs):
# Pass whatever function you normally would to pytest.raises
# (after the Exception kind).
with pytest.raises(TypeError):
self.mutable_regex(*args, **kwargs)
def test_no_mutable_funcs(self):
def setitem():
self.container[0] = 5
self.check_mutable_error(setitem)
def setslice():
self.container[1:2] = 3
self.check_mutable_error(setslice)
def delitem():
del self.container[0]
self.check_mutable_error(delitem)
def delslice():
del self.container[0:3]
self.check_mutable_error(delslice)
mutable_methods = getattr(self, "mutable_methods", [])
for meth in mutable_methods:
self.check_mutable_error(getattr(self.container, meth))
def test_slicing_maintains_type(self):
result = self.container[1:2]
expected = self.lst[1:2]
self.check_result(result, expected)
def check_result(self, result, expected, klass=None):
klass = klass or self.klass
assert isinstance(result, klass)
assert result == expected
class TestPandasDelegate:
class Delegator:
_properties = ["foo"]
_methods = ["bar"]
def _set_foo(self, value):
self.foo = value
def _get_foo(self):
return self.foo
foo = property(_get_foo, _set_foo, doc="foo property")
def bar(self, *args, **kwargs):
""" a test bar method """
pass
class Delegate(PandasDelegate, PandasObject):
def __init__(self, obj):
self.obj = obj
def setup_method(self, method):
pass
def test_invalid_delegation(self):
# these show that in order for the delegation to work
# the _delegate_* methods need to be overridden to not raise
# a TypeError
self.Delegate._add_delegate_accessors(
delegate=self.Delegator,
accessors=self.Delegator._properties,
typ="property",
)
self.Delegate._add_delegate_accessors(
delegate=self.Delegator, accessors=self.Delegator._methods, typ="method"
)
delegate = self.Delegate(self.Delegator())
with pytest.raises(TypeError):
delegate.foo
with pytest.raises(TypeError):
delegate.foo = 5
with pytest.raises(TypeError):
delegate.foo()
@pytest.mark.skipif(PYPY, reason="not relevant for PyPy")
def test_memory_usage(self):
# Delegate does not implement memory_usage.
# Check that we fall back to in-built `__sizeof__`
# GH 12924
delegate = self.Delegate(self.Delegator())
sys.getsizeof(delegate)
class Ops:
def _allow_na_ops(self, obj):
"""Whether to skip test cases including NaN"""
if (isinstance(obj, Index) and obj.is_boolean()) or not obj._can_hold_na:
# don't test boolean / integer dtypes
return False
return True
def setup_method(self, method):
self.bool_index = tm.makeBoolIndex(10, name="a")
self.int_index = tm.makeIntIndex(10, name="a")
self.float_index = tm.makeFloatIndex(10, name="a")
self.dt_index = tm.makeDateIndex(10, name="a")
self.dt_tz_index = tm.makeDateIndex(10, name="a").tz_localize(tz="US/Eastern")
self.period_index = tm.makePeriodIndex(10, name="a")
self.string_index = tm.makeStringIndex(10, name="a")
self.unicode_index = tm.makeUnicodeIndex(10, name="a")
arr = np.random.randn(10)
self.bool_series = Series(arr, index=self.bool_index, name="a")
self.int_series = Series(arr, index=self.int_index, name="a")
self.float_series = Series(arr, index=self.float_index, name="a")
self.dt_series = Series(arr, index=self.dt_index, name="a")
self.dt_tz_series = self.dt_tz_index.to_series(keep_tz=True)
self.period_series = Series(arr, index=self.period_index, name="a")
self.string_series = Series(arr, index=self.string_index, name="a")
self.unicode_series = Series(arr, index=self.unicode_index, name="a")
types = ["bool", "int", "float", "dt", "dt_tz", "period", "string", "unicode"]
self.indexes = [getattr(self, "{}_index".format(t)) for t in types]
self.series = [getattr(self, "{}_series".format(t)) for t in types]
# To test narrow dtypes, we use narrower *data* elements, not *index* elements
index = self.int_index
self.float32_series = Series(arr.astype(np.float32), index=index, name="a")
arr_int = np.random.choice(10, size=10, replace=False)
self.int8_series = Series(arr_int.astype(np.int8), index=index, name="a")
self.int16_series = Series(arr_int.astype(np.int16), index=index, name="a")
self.int32_series = Series(arr_int.astype(np.int32), index=index, name="a")
self.uint8_series = Series(arr_int.astype(np.uint8), index=index, name="a")
self.uint16_series = Series(arr_int.astype(np.uint16), index=index, name="a")
self.uint32_series = Series(arr_int.astype(np.uint32), index=index, name="a")
nrw_types = ["float32", "int8", "int16", "int32", "uint8", "uint16", "uint32"]
self.narrow_series = [getattr(self, "{}_series".format(t)) for t in nrw_types]
self.objs = self.indexes + self.series + self.narrow_series
def check_ops_properties(self, props, filter=None, ignore_failures=False):
for op in props:
for o in self.is_valid_objs:
# if a filter, skip if it doesn't match
if filter is not None:
filt = o.index if isinstance(o, Series) else o
if not filter(filt):
continue
try:
if isinstance(o, Series):
expected = Series(getattr(o.index, op), index=o.index, name="a")
else:
expected = getattr(o, op)
except (AttributeError):
if ignore_failures:
continue
result = getattr(o, op)
# these could be series, arrays or scalars
if isinstance(result, Series) and isinstance(expected, Series):
tm.assert_series_equal(result, expected)
elif isinstance(result, Index) and isinstance(expected, Index):
tm.assert_index_equal(result, expected)
elif isinstance(result, np.ndarray) and isinstance(
expected, np.ndarray
):
tm.assert_numpy_array_equal(result, expected)
else:
assert result == expected
# freq raises AttributeError on an Int64Index because its not
# defined we mostly care about Series here anyhow
if not ignore_failures:
for o in self.not_valid_objs:
# an object that is datetimelike will raise a TypeError,
# otherwise an AttributeError
err = AttributeError
if issubclass(type(o), DatetimeIndexOpsMixin):
err = TypeError
with pytest.raises(err):
getattr(o, op)
@pytest.mark.parametrize("klass", [Series, DataFrame])
def test_binary_ops_docs(self, klass):
op_map = {
"add": "+",
"sub": "-",
"mul": "*",
"mod": "%",
"pow": "**",
"truediv": "/",
"floordiv": "//",
}
for op_name in op_map:
operand1 = klass.__name__.lower()
operand2 = "other"
op = op_map[op_name]
expected_str = " ".join([operand1, op, operand2])
assert expected_str in getattr(klass, op_name).__doc__
# reverse version of the binary ops
expected_str = " ".join([operand2, op, operand1])
assert expected_str in getattr(klass, "r" + op_name).__doc__
class TestIndexOps(Ops):
def setup_method(self, method):
super().setup_method(method)
self.is_valid_objs = self.objs
self.not_valid_objs = []
def test_none_comparison(self):
# bug brought up by #1079
# changed from TypeError in 0.17.0
for o in self.is_valid_objs:
if isinstance(o, Series):
o[0] = np.nan
# noinspection PyComparisonWithNone
result = o == None # noqa
assert not result.iat[0]
assert not result.iat[1]
# noinspection PyComparisonWithNone
result = o != None # noqa
assert result.iat[0]
assert result.iat[1]
result = None == o # noqa
assert not result.iat[0]
assert not result.iat[1]
result = None != o # noqa
assert result.iat[0]
assert result.iat[1]
if is_datetime64_dtype(o) or is_datetime64tz_dtype(o):
# Following DatetimeIndex (and Timestamp) convention,
# inequality comparisons with Series[datetime64] raise
with pytest.raises(TypeError):
None > o
with pytest.raises(TypeError):
o > None
else:
result = None > o
assert not result.iat[0]
assert not result.iat[1]
result = o < None
assert not result.iat[0]
assert not result.iat[1]
def test_ndarray_compat_properties(self):
for o in self.objs:
# Check that we work.
for p in ["shape", "dtype", "T", "nbytes"]:
assert getattr(o, p, None) is not None
# deprecated properties
for p in ["flags", "strides", "itemsize"]:
with tm.assert_produces_warning(FutureWarning):
assert getattr(o, p, None) is not None
with tm.assert_produces_warning(FutureWarning):
assert hasattr(o, "base")
# If we have a datetime-like dtype then needs a view to work
# but the user is responsible for that
try:
with tm.assert_produces_warning(FutureWarning):
assert o.data is not None
except ValueError:
pass
with pytest.raises(ValueError):
with tm.assert_produces_warning(FutureWarning):
o.item() # len > 1
assert o.ndim == 1
assert o.size == len(o)
with tm.assert_produces_warning(FutureWarning):
assert Index([1]).item() == 1
assert Series([1]).item() == 1
def test_value_counts_unique_nunique(self):
for orig in self.objs:
o = orig.copy()
klass = type(o)
values = o._values
if isinstance(values, Index):
# reset name not to affect latter process
values.name = None
# create repeated values, 'n'th element is repeated by n+1 times
# skip boolean, because it only has 2 values at most
if isinstance(o, Index) and o.is_boolean():
continue
elif isinstance(o, Index):
expected_index = Index(o[::-1])
expected_index.name = None
o = o.repeat(range(1, len(o) + 1))
o.name = "a"
else:
expected_index = Index(values[::-1])
idx = o.index.repeat(range(1, len(o) + 1))
# take-based repeat
indices = np.repeat(np.arange(len(o)), range(1, len(o) + 1))
rep = values.take(indices)
o = klass(rep, index=idx, name="a")
# check values has the same dtype as the original
assert o.dtype == orig.dtype
expected_s = Series(
range(10, 0, -1), index=expected_index, dtype="int64", name="a"
)
result = o.value_counts()
tm.assert_series_equal(result, expected_s)
assert result.index.name is None
assert result.name == "a"
result = o.unique()
if isinstance(o, Index):
assert isinstance(result, o.__class__)
tm.assert_index_equal(result, orig)
assert result.dtype == orig.dtype
elif is_datetime64tz_dtype(o):
# datetimetz Series returns array of Timestamp
assert result[0] == orig[0]
for r in result:
assert isinstance(r, Timestamp)
tm.assert_numpy_array_equal(
result.astype(object), orig._values.astype(object)
)
else:
tm.assert_numpy_array_equal(result, orig.values)
assert result.dtype == orig.dtype
assert o.nunique() == len(np.unique(o.values))
@pytest.mark.parametrize("null_obj", [np.nan, None])
def test_value_counts_unique_nunique_null(self, null_obj):
for orig in self.objs:
o = orig.copy()
klass = type(o)
values = o._ndarray_values
if not self._allow_na_ops(o):
continue
# special assign to the numpy array
if is_datetime64tz_dtype(o):
if isinstance(o, DatetimeIndex):
v = o.asi8
v[0:2] = iNaT
values = o._shallow_copy(v)
else:
o = o.copy()
o[0:2] = pd.NaT
values = o._values
elif needs_i8_conversion(o):
values[0:2] = iNaT
values = o._shallow_copy(values)
else:
values[0:2] = null_obj
# check values has the same dtype as the original
assert values.dtype == o.dtype
# create repeated values, 'n'th element is repeated by n+1
# times
if isinstance(o, (DatetimeIndex, PeriodIndex)):
expected_index = o.copy()
expected_index.name = None
# attach name to klass
o = klass(values.repeat(range(1, len(o) + 1)))
o.name = "a"
else:
if isinstance(o, DatetimeIndex):
expected_index = orig._values._shallow_copy(values)
else:
expected_index =
|
Index(values)
|
pandas.Index
|
# -*- coding: utf-8 -*-
"""
__file__
preprocess.py
__description__
pre-processing the data:
- text cleaning
- merging synonyms
- stemming
- cleaning attribute
- building attribute_description
- extracting brand and size for products
__author__
<NAME>
"""
from __future__ import print_function
from nlp_utils import *
import cPickle
import pandas as pd
import project_params as pp
import sys
from spell_corr import spell_check_dict
import re
def prog():
print(".",end='')
sys.stdout.flush()
def longprog():
print("....",end='')
sys.stdout.flush()
def clean_attributes(df):
def cat_text(x):
res = '%s %s' % (x['name'], x['value'])
return res
df['attribute_description'] = list(df.apply(cat_text, axis=1)); prog()
remove_bullet = lambda x: re.sub(r'(bullet\d+)', r' ', x)
df['attribute_description'] = df['attribute_description'].map(remove_bullet); prog()
def has_size_attribute(x):
if ('height' in x) | ('width' in x) | ('length' in x) | ('depth' in x):
return True
else:
return False
df['has_size'] = df['name'].map(has_size_attribute); prog()
dfSize = df.loc[df.has_size, ['product_uid','value']]
df = df.drop(['has_size'],axis=1)
all_sizes = dfSize.groupby('product_uid').agg(lambda x : ' '.join(x))
indx = all_sizes.index.map(int)
dfSize = pd.DataFrame({'product_uid':list(indx), 'size_attribute':list(all_sizes['value'])})
prog()
dfBrand = df.loc[df['name'] == 'MFG Brand Name',['product_uid','value']].rename(columns={"value": "brand"})
dfBrand['brand']= dfBrand['brand'].map(lambda x: x.lower())
all_descr = df[['product_uid','attribute_description']].groupby('product_uid').agg(lambda x: ' '.join(x))
indx = all_descr.index.map(int)
prog()
df = pd.DataFrame({'product_uid':list(indx), 'attribute_description':list(all_descr['attribute_description'])})
df = pd.merge(df,dfSize,on='product_uid',how='left')
df = df.fillna(u'unknownsize')
df = pd.merge(df,dfBrand,on='product_uid',how='left')
df = df.fillna(u'unknownbrand')
return df
def extra_clean(word):
word = word.replace('kholerhighland', 'kohler highline')
word = word.replace('smart', ' smart ')
word = word.replace('residential', ' residential ')
word = word.replace('whirlpool', ' whirlpool ')
word = word.replace('alexandrea',' alexandria ')
word = word.replace('bicycle',' bicycle ')
word = word.replace('non',' non ')
word = word.replace('replacement',' replacement')
word = word.replace('mowerectrical', 'mow electrical')
word = word.replace('dishwaaher', 'dishwasher')
word = word.replace('fairfield',' fairfield ')
word = word.replace('hooverwindtunnel','hoover windtunnel')
word = word.replace('airconditionerwith','airconditioner with ')
word = word.replace('pfistersaxton', 'pfister saxton')
word = word.replace('eglimgton','ellington')
word = word.replace('chrome', ' chrome ')
word = word.replace('foot', ' foot ')
word = word.replace('samsung', ' samsung ')
word = word.replace('galvanised', ' galvanised ')
word = word.replace('exhaust', ' exhaust ')
word = word.replace('reprobramable', 'reprogramable')
word = word.replace('rackcloset', 'rack closet ')
word = word.replace('hamptonbay', ' hampton bay ')
word = word.replace('cadet', ' cadet ')
word = word.replace('weatherstripping', 'weather stripping')
word = word.replace('poyurethane', 'polyurethane')
word = word.replace('refrigeratorators','refrigerator')
word = word.replace('baxksplash','backsplash')
word = word.replace('inches',' inch ')
word = word.replace('conditioner',' conditioner ')
word = word.replace('landscasping',' landscaping ')
word = word.replace('discontinuedbrown',' discontinued brown ')
word = word.replace('drywall',' drywall ')
word = word.replace('carpet', ' carpet ')
word = word.replace('less', ' less ')
word = word.replace('tub', ' tub')
word = word.replace('tubs', ' tub ')
word = word.replace('marble',' marble ')
word = word.replace('replaclacemt',' replacement ')
word = word.replace('non',' non ')
word = word.replace('soundfroofing', 'sound proofing')
return word
def str_clean_stem_lower(s):
try:
s = s.lower()
s = extra_clean(s)
s = re.sub(r"(\w)\.([A-Z])", r"\1 \2", s)
s = re.sub(r"([0-9]+)( *)(inches|inch|in|')\.?", r"\1in. ", s)
s = re.sub(r"([0-9]+)( *)(foot|feet|ft|'')\.?", r"\1ft. ", s)
s = re.sub(r"([0-9]+)( *)(pounds|pound|lbs|lb)\.?", r"\1lb. ", s)
s = re.sub(r"([0-9]+)( *)(square|sq) ?\.?(feet|foot|ft)\.?", r"\1sq.ft. ", s)
s = re.sub(r"([0-9]+)( *)(gallons|gallon|gal)\.?", r"\1gal. ", s)
s = re.sub(r"([0-9]+)( *)(ounces|ounce|oz)\.?", r"\1oz. ", s)
s = re.sub(r"([0-9]+)( *)(centimeters|cm)\.?", r"\1cm. ", s)
s = re.sub(r"([0-9]+)( *)(milimeters|mm)\.?", r"\1mm. ", s)
s = re.sub(r"([0-9]+)( *)(degrees|degree)\.?", r"\1deg. ", s)
s = re.sub(r"([0-9]+)( *)(volts|volt)\.?", r"\1volt. ", s)
s = re.sub(r"([0-9]+)( *)(watts|watt)\.?", r"\1watt. ", s)
s = re.sub(r"([0-9]+)( *)(amperes|ampere|amps|amp)\.?", r"\1amp. ", s)
s = s.replace(" x "," xby ")
s = s.replace("*"," xby ")
s = s.replace(" by "," xby")
s = s.replace("x0"," xby 0")
s = s.replace("x1"," xby 1")
s = s.replace("x2"," xby 2")
s = s.replace("x3"," xby 3")
s = s.replace("x4"," xby 4")
s = s.replace("x5"," xby 5")
s = s.replace("x6"," xby 6")
s = s.replace("x7"," xby 7")
s = s.replace("x8"," xby 8")
s = s.replace("x9"," xby 9")
s = s.replace("0x","0 xby ")
s = s.replace("1x","1 xby ")
s = s.replace("2x","2 xby ")
s = s.replace("3x","3 xby ")
s = s.replace("4x","4 xby ")
s = s.replace("5x","5 xby ")
s = s.replace("6x","6 xby ")
s = s.replace("7x","7 xby ")
s = s.replace("8x","8 xby ")
s = s.replace("9x","9 xby ")
s = s.replace("whirpool","whirlpool")
s = s.replace("whirlpoolga", "whirlpool")
s = s.replace("whirlpoolstainless","whirlpool stainless")
s = s.replace(" "," ")
# using default stemmer from nlp_utils:
s = (' ').join([stemmer.stem(z) for z in s.split(' ')])
if s == '':
s = u'null'
return s.lower()
except:
return u'null'
if __name__ == '__main__':
######### reading csv files #############
print("Loading data.",end='')
dfTrain = pd.read_csv(pp.train_raw_file,encoding=pp.encoding); prog()
dfTest = pd.read_csv(pp.test_raw_file,encoding=pp.encoding); prog()
dfAttribute = pd.read_csv(pp.attribute_raw_file,encoding=pp.encoding); prog()
dfProdDescription = pd.read_csv(pp.description_raw_file,encoding=pp.encoding); prog()
dfSynTrain = pd.read_csv(pp.synonyms_train_raw_file,encoding=pp.encoding); prog()
dfSynTest =
|
pd.read_csv(pp.synonyms_test_raw_file,encoding=pp.encoding)
|
pandas.read_csv
|
import re
from collections import OrderedDict
from pathlib import Path
from typing import Any, Dict, List, Optional
import altair as alt
import click
import pandas as pd
import streamlit as st
from gobbli.inspect.evaluate import ClassificationError, ClassificationEvaluation
from gobbli.interactive.util import (
DEFAULT_PREDICT_BATCH_SIZE,
get_label_indices,
get_predictions,
load_data,
safe_sample,
st_model_metadata,
st_sample_data,
st_select_model_checkpoint,
)
from gobbli.util import truncate_text
def show_metrics(metrics: Dict[str, Any]):
st.header("Metrics")
md = ""
for name, value in metrics.items():
md += f"- **{name}:** {value:.4f}\n"
st.markdown(md)
def show_plot(plot: alt.Chart):
st.header("Model Predicted Probability by True Class")
st.altair_chart(plot)
TRUE_LABEL_COLOR = "#1f78b4"
TRUE_LABEL_TEXT_COLOR = "white"
PRED_PROB_LABEL_RE = re.compile(r"^(.+) \((?:[0-9.]+)\)$")
def _show_example_predictions(
texts: List[str],
labels: Optional[List[str]],
y_pred_proba: pd.DataFrame,
truncate_len: int,
top_k: int,
):
def gather_predictions(row):
ndx = row.name
pred_prob_order = row.sort_values(ascending=False)[:top_k]
data = {"Document": truncate_text(texts[ndx], truncate_len)}
if labels is not None:
y_true = labels[ndx]
column_header = "True Labels" if isinstance(y_true, list) else "True Label"
data[column_header] = y_true
for i, (label, pred_prob) in enumerate(pred_prob_order.items()):
data[f"Predicted Label {i+1}"] = f"{label} ({pred_prob:.3f})"
return pd.Series(data)
df = y_pred_proba.apply(gather_predictions, axis=1)
def style_pred_prob(row, labels):
ndx = row.name
true_label_style = (
f"background-color: {TRUE_LABEL_COLOR};color: {TRUE_LABEL_TEXT_COLOR}"
)
style = [
# Text
"",
# True label
true_label_style,
]
pred_probs = row[2:]
for p in pred_probs:
match = re.match(PRED_PROB_LABEL_RE, p)
if match is None:
raise ValueError(f"Failed to parse predicted probability cell: {p}")
y_true = labels[ndx]
if isinstance(y_true, list):
is_match = match.group(1) in y_true
else:
is_match = match.group(1) == y_true
if is_match:
# The cell corresponds to a correct label
cell_style = true_label_style
else:
cell_style = ""
style.append(cell_style)
return style
if labels is not None:
df = df.style.apply(style_pred_prob, axis=1, labels=labels)
st.table(df)
def show_example_predictions(
texts: List[str],
labels: Optional[List[str]],
y_pred_proba: pd.DataFrame,
example_truncate_len: int,
example_num_docs: int,
example_top_k: int,
):
st.header("Example Predictions")
example_indices = safe_sample(range(len(texts)), example_num_docs)
_show_example_predictions(
[texts[i] for i in example_indices],
None if labels is None else [labels[i] for i in example_indices],
y_pred_proba.iloc[example_indices, :].reset_index(drop=True),
example_truncate_len,
example_top_k,
)
def show_errors(errors: List[ClassificationError], truncate_len: int = 500):
df_data = []
for e in errors:
pred_class = max(e.y_pred_proba, key=e.y_pred_proba.get)
pred_class_prob = e.y_pred_proba[pred_class]
true_column_header = (
"True Labels" if isinstance(e.y_true, list) else "True Label"
)
df_data.append(
# Use OrderedDict to preserve column order
OrderedDict(
{
"Document": truncate_text(e.X, truncate_len),
true_column_header: e.y_true,
"Top Predicted Label": f"{pred_class} ({pred_class_prob:.4f})",
}
)
)
df =
|
pd.DataFrame(df_data)
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
"""
This is the base file that can serve as a starting point for the Python Adaptor development.
This file can also be used as template for Python modules.
main_logger.
"""
import argparse as ap
import csv
import datetime
import logging
import os
import pandas as pd
from pathlib import Path
import subprocess
import re
from shutil import move
import xarray as xr
import sys
import xml.etree.ElementTree as ET
# For package only #
# uncomment this when building the wheel distribution: python setup.py bdist_wheel
# from epaswmmadaptor import epaswmm
# from epaswmmadaptor import __version__
__author__ = "pbishop,lboutin"
__copyright__ = "pbishop,lboutin"
__license__ = "mit"
# XML namespace dict, needed to find elements
namespace = {"pi": "http://www.wldelft.nl/fews/PI"}
def add_attributes(ds):
"""
Add model specific attributes to make it more CF compliant
"""
ds.time.attrs["standard_name"] = "time"
ds.time.attrs["long_name"] = "time"
ds.time.attrs["axis"] = "T"
ds.station_id.attrs["standard_name"] = "Station Identifier"
ds.station_id.attrs["long_name"] = "EPA_SWMM Station Identifier"
ds.station_id.attrs["axis"] = "XY"
ds.station_id.attrs["cf_role"] = "timeseries_id"
ds = ds.assign_attrs(
Conventions="CF-1.6",
title="Data from simulation outputs",
institution="TRCA",
source="Don River Hydrology Update Project Number 60528844 December 2018",
history=datetime.datetime.utcnow().replace(microsecond=0).isoformat(" ")
+ " EMT: simulation results from EPA SWMM model",
references="https://trca.ca/",
Metadata_Conventions="Unidata Dataset Discovery v1.0",
summary="EPA SWMM simulation output",
date_created=datetime.datetime.utcnow().replace(microsecond=0).isoformat(" ")
+ " EMT",
coordinate_system="WGS 1984",
featureType="timeSeries",
comment="created from Python script EPA-SWMM-Adaptor",
)
return ds
def bytes_to_string(df, col_to_convert):
"""
Decodes columns in a dataframe. When the NetCDF file is read, string columns are encoded.
"""
for x in col_to_convert:
df[x] = df[x].str.decode('utf-8')
return df
def check_properties(key, props, run_info_file):
if key not in props:
main_logger.error("Key (%s) was not specified in the run_info.xml file." % key)
raise KeyError(
f'"{key}" needs to be specified under <properties> in {run_info_file.resolve()}'
)
def create_xarray_dataset(data_dict, swmm_unit_dict):
"""
Creating xarray datasets.
"""
main_logger.debug("Creating DataSet from the results DataFrame.")
list_ds_nodes = []
list_ds_links = []
list_keys_ignored = []
for key in data_dict.keys():
try:
header = data_dict[key]['Header']
units = data_dict[key]['Units']
rename_header = {}
except Exception:
main_logger.error("Failed to get header/units when creating dataset.")
stop_program()
for item in range(0, len(units)):
try:
rename_header[units[item]] = header[item]
temp_df = data_dict[key]['Data'].copy(deep=True)
temp_df = temp_df.rename(rename_header, axis='columns')
temp_df['station_id'] = key
temp_df.set_index(['station_id'], append=True, inplace=True)
ds2 = xr.Dataset.from_dataframe(temp_df)
except Exception:
main_logger.error("Failed to create DataSet for {0}".format(temp_df['station_id']))
stop_program()
for var, unit in data_dict[key]['units_dict'].items():
try:
attributes_info = swmm_unit_dict.get(unit)
for attrs, val in attributes_info.items():
if attrs == 'UDUNITS':
attrs = 'units'
ds2[var].attrs[attrs] = val
except Exception:
main_logger.error(
"Error raised due to EPA SWMM unit --> {0} is not recognized. Please add corresponding information into the UDUNITS_lookup.csv input file.".format(
unit))
stop_program()
raise KeyError(
"Error raised due to EPA SWMM unit --> {0} is not recognized. Please add corresponding information into the UDUNITS_lookup.csv input file.".format(
unit))
try:
if "node" in key.lower():
list_ds_nodes.append(ds2)
elif "link" in key.lower():
list_ds_links.append(ds2)
else:
list_keys_ignored.append(key)
pass
except Exception:
main_logger.error("Failed to append data to dataset for: {0}".format(key))
stop_program()
print("Locations ignored in the resulting output file (i.e. not a node or a link): \n\n" + str(list_keys_ignored))
# Combining Dataset for each station_id with same type
try:
main_logger.debug("Start combining xarray DataSets for nodes ...")
combined_ds_nodes = xr.combine_by_coords(list_ds_nodes)
combined_ds_nodes = add_attributes(combined_ds_nodes)
except Exception:
main_logger.error("Failed to combining xarray DataSets for nodes")
stop_program()
try:
main_logger.debug("Start combining xarray DataSets for links ...")
combined_ds_links = xr.combine_by_coords(list_ds_links)
combined_ds_links = add_attributes(combined_ds_links)
except Exception:
main_logger.error("Failed to combining xarray DataSets for links")
stop_program()
print("\nDone creating xarray DataSet for Nodes and Links.\n")
main_logger.debug("Done creating xarray DataSet for Nodes and Links.")
return combined_ds_nodes, combined_ds_links
def dir_element(elem, exists=True):
"""
Checks if a string or XML element is a directory path, and returns the corresponding path.
"""
if isinstance(elem, str):
# such that this works if the path is in an attribute
path = Path(elem)
else:
path = Path(elem.text)
if exists and not path.is_dir():
main_logger.error(
"The following is expected to exist but was not found: %s" % (os.path.join(os.getcwd(), path)))
stop_program()
raise FileNotFoundError(path.resolve())
return path
def file_element(elem, exists=True):
"""
Checks if a string or XML element is a path, and returns the corresponding path.
"""
if isinstance(elem, str):
# such that this works if the path is in an attribute
if "bin" in elem:
path = Path(elem)
root = Path(os.getcwd())
path = os.path.join(root.parents[0], path)
path = Path(path)
else:
path = Path(elem)
else:
path = Path(elem.text)
if exists and not path.is_file():
print("The following is expected to exist but was not found: %s" % (os.path.join(os.getcwd(), path)))
main_logger.error(
"The following is expected to exist but was not found: %s" % (os.path.join(os.getcwd(), path)))
stop_program()
raise FileNotFoundError(path.resolve())
return path
def make_df(lines, start, nrows, df_header):
"""
Method to create a pandas DataFrame from a subset of lines from the simulation results *.rpt file.
"""
# PERFORMANCE ISSUE: parsers.py from pandas is causing lost in performance.
# Using pandas.DataFrame() method allowed increasing performance.
# df = pd.read_csv(file, delimiter=r"\s+", names=df_header, header=None,
# skiprows=start+2,nrows=nrows-1, parse_dates=[0], dayfirst = False)
try:
df = pd.DataFrame([[i for i in line.strip().split()] for line in lines[start + 2:start + nrows - 1]],
columns=df_header)
df['DateO'] = pd.to_datetime(df['Date'])
df['TimeO'] = pd.to_timedelta(df['Time'])
df['time'] = df['DateO'] + df['TimeO']
df = df.drop(columns=['DateO', 'TimeO', 'Date', 'Time'])
df = df.set_index('time')
df = df.apply(pd.to_numeric)
except Exception:
main_logger.error("Failed to create dataframe for line starting with: {0}".format(lines[start]))
stop_program()
return df
def read_netcdf(netcdf_filename, col_to_convert):
"""
Read a netCDF file and return a pandas DataFrame
"""
ds = xr.open_dataset(netcdf_filename)
try:
df = ds.to_dataframe()
except Exception:
main_logger.error("Failed to create dataframe when reading the NetCDF file.")
stop_program()
try:
df = bytes_to_string(df, col_to_convert)
except Exception:
main_logger.error("Failed to decode following columns when reading NetCDF file: " + ','.join(col_to_convert))
stop_program()
return df
def read_errors_warnings(file_list):
"""
Read errors and warnings from the *.rpt ASCII and Python log file output from the simulation.
"""
main_logger.debug("File list: {0}".format(file_list))
list_warning_error = []
df = None
for f in file_list:
try:
with open(f, "r") as fi:
for ln in fi:
if any(x in ln for x in ["ERROR", "WARNING", "DEBUG", "INFO", "FATAL"]):
list_warning_error.append(ln.strip())
except Exception:
main_logger.error(
"The following is expected to exist but was not found: {0}".format(os.path.join(os.getcwd(), f)))
stop_program()
raise FileNotFoundError(Path(f).resolve())
if len(list_warning_error) > 0:
df =
|
pd.Series(list_warning_error)
|
pandas.Series
|
import pandas as pd
from sklearn.experimental import enable_iterative_imputer
from sklearn.impute import IterativeImputer
from sklearn.preprocessing import RobustScaler
from sklearn.neighbors import KNeighborsRegressor
import config
def preprocess():
train_df = pd.read_csv(config.TRAINING_FILE)
test_df = pd.read_csv(config.TESTING_FILE)
test_df['kfold'] = -1
## Changing milli seconds to minutes
train_df['song_duration_ms'] = train_df.song_duration_ms / 1000 / 60
test_df['song_duration_ms'] = test_df.song_duration_ms / 1000 / 60
valcol = [
'song_duration_ms', 'acousticness', 'danceability', 'energy',
'instrumentalness', 'key', 'liveness', 'loudness', 'audio_mode',
'speechiness', 'tempo', 'time_signature', 'audio_valence', 'kfold'
]
cat = ['audio_mode', 'time_signature']
cont = ['song_duration_ms', 'acousticness', 'danceability', 'energy',
'instrumentalness', 'key', 'liveness', 'loudness', 'speechiness', 'tempo', 'audio_valence']
col_train = train_df.columns
col_test = test_df.columns
## Initiating the imputer
imputer = IterativeImputer(random_state=0, max_iter=10, initial_strategy='mean')
train_df = pd.DataFrame(imputer.fit_transform(train_df))
train_df.columns = col_train
test_df = pd.DataFrame(imputer.fit_transform(test_df))
test_df.columns = col_test
## Robust Scaler
## https://www.geeksforgeeks.org/standardscaler-minmaxscaler-and-robustscaler-techniques-ml/
combined_df =
|
pd.concat([train_df.loc[:,cont], test_df.loc[:,cont]], ignore_index=True)
|
pandas.concat
|
import pandas as pd
from BS.utils import (read_src_socket_bs, get_dicts_from_csv_file,
save_list_to_file, read_src_bs, get_socket_word_form,
get_string_list_from_file, get_bs_title_word_form)
def get_root_index_data_set():
"""
13.1. найти в док-те БГ 06.04.21.txt все многокорневые слова,
т.е. слова (кроме невидимок), у которых есть корневой индекс;
13.2. создать док-т Excel Многокорневые слова.xlsx и заполнить его строками
с найденными многокорневыми словами с соблюдением следующих правил:
а. строки приводятся ПОЛНОСТЬЮ;
б. строки вставляются в тот или иной столбец в зависимости
от корневого индекса;
в. уже вставленная строка не должна вставляться второй (и более!) раз!
Другими словами, в док-те Многокорневые слова.xlsx не должно быть повторов
абсолютно одинаковых строк
(напр. строка
автобус 3* .СеИ неод мI1 мнII1 * auto(mobile) (omni)bus
вставляется 1 раз несмотря на то, что в базе она встречается 2 раза, или
напр. строка
ЮНЕСКО 6
вставляется 1 раз несмотря на то, что в базе она встречается 6 раз);
13.3. по завершении заполнения док-та Многокорневые слова.xlsx
вставленные строки в столбцах расположить в соответствии
с алфавитным порядком слов,
а сами столбцы - в следующем порядке:
2 2! 2* 3 3! 3* 3** 4 4! 4* 4** 5 5! 5* 5** 6 6! 6* 6** 7 7! 7* 7** .
"""
socket_group_word_form_list = read_src_socket_bs(
'src_dict/БГ 06.04.21.txt')
root_index_ds = {
'2': [],
'2!': [],
'2*': [],
'3': [],
'3!': [],
'3*': [],
'3**': [],
'4': [],
'4!': [],
'4*': [],
'4**': [],
'5': [],
'5!': [],
'5*': [],
'5**': [],
'6': [],
'6!': [],
'6*': [],
'6**': [],
'7': [],
'7!': [],
'7*': [],
'7**': [],
}
for socket_group_word_form in socket_group_word_form_list:
for socket_word_form in socket_group_word_form.socket_word_forms:
root_index = socket_word_form.root_index
if root_index and not socket_word_form.invisible:
root_index_ds[root_index].append(str(socket_word_form))
for k in root_index_ds:
root_index_ds[k] = sorted(list(
set(root_index_ds[k])),
key=lambda x: x.replace('*', '').lower().strip()
)
ds = []
for k in root_index_ds:
for word_form in root_index_ds[k]:
ds.append({
'root_index': k,
'word_form': word_form,
})
df =
|
pd.DataFrame(ds)
|
pandas.DataFrame
|
import re
import libchebipy
from rdkit import Chem
from rdkit.Chem import rdChemReactions
from rdkit.Chem.MolStandardize.rdMolStandardize import TautomerEnumerator, Uncharger, StandardizeSmiles
import pybel
import urllib.request
import time
import ssl
import pandas as pd
def get_master_rhea(rhea_map, ids):
rhea_ids = list(rhea_map['ID'])
matched_ids = list(set(rhea_ids).intersection(set(ids))) # All Uniprot identifiers that were found with a local cross-reference
missing_ids = list(set(ids)-set(rhea_ids)) # All Uniprot identifiers not found with local cross-reference
found_entries = rhea_map.loc[rhea_map['ID'].isin(matched_ids)]
found_rheas = list(found_entries['MASTER_ID'])
return(found_rheas, missing_ids)
#TODO: Return only those based on argument passed. Right now, it's everything.
#As of now, everything is selected (both directions).
#TODO: Also clean up as much as possible
def get_rxn_ids(rhea_ids, rhea_directions):
"""
Read in ambiguous Rhea identifiers and return a dict containing rhea identifiers with local ID's along
with those that aren't in rhea_directions.
"""
master_rxns = rhea_directions.loc[rhea_directions['RHEA_ID_MASTER'].isin(rhea_ids)]
master_found = list(master_rxns['RHEA_ID_MASTER'])
lr_master = list(master_rxns['RHEA_ID_LR'])
rl_master = list(master_rxns['RHEA_ID_RL'])
lr_only = list(rhea_directions.loc[rhea_directions['RHEA_ID_LR'].isin(rhea_ids)]['RHEA_ID_LR'])
rl_only = list(rhea_directions.loc[rhea_directions['RHEA_ID_RL'].isin(rhea_ids)]['RHEA_ID_RL'])
bi_lr = list(rhea_directions.loc[rhea_directions['RHEA_ID_BI'].isin(rhea_ids)]['RHEA_ID_LR'])
bi_rl = list(rhea_directions.loc[rhea_directions['RHEA_ID_BI'].isin(rhea_ids)]['RHEA_ID_RL'])
bi_both = bi_lr + bi_rl
all_found_rhea = lr_master + rl_master + lr_only + rl_only + bi_both
not_found_rhea = list(set(rhea_ids) - set(master_found) - set(lr_master) - set(rl_master) - set(bi_both))
rhea_rxn_ids = {'local': all_found_rhea, 'external': not_found_rhea}
return(rhea_rxn_ids)
def chebi_rels(chebi_id, wait=.1):
chebi = libchebipy.ChebiEntity(chebi_id)
chebi_rels = chebi.get_incomings()
chebis = [z._Relation__target_chebi_id for z in chebi_rels]
time.sleep(wait)
return(chebis)
def get_chebi_smiles(chebi_frame, chebis):
entries = chebi_frame.loc[chebi_frame['ChEBI_ID'].isin(chebis)]
entries.reset_index(inplace=True, drop=True)
return(entries)
def external_rhea(rhea_ids, wait=.1):
rxns = list()
for rhea in rhea_ids:
try:
query = "https://www.rhea-db.org/rest/1.0/ws/reaction/rxn/" + rhea
contents = urllib.request.urlopen(query, context=ssl.SSLContext()).read()
rxns.append(rdChemReactions.ReactionFromRxnBlock(contents))
except Exception:
print("Could not retrieve guessed external rhea reaction: {}".format(rhea))
time.sleep(wait)
return(rxns)
def convert_rxn(rxn_path):
try:
return rdChemReactions.ReactionFromRxnFile(rxn_path)
except Exception:
return "ERROR: {}".format(rxn_path)
def rxn_to_smiles(rxn):
try:
products = rxn.GetProducts()
all_smiles = {mol.GetProp('_Name'): Chem.MolToSmiles(mol) for mol in products}
return(all_smiles)
except Exception:
return("ERROR: {}".format(rxn))
def get_chebi_names(chebi_path, smiles_df):
chebi_names = pd.read_csv(chebi_path, sep='\t', header=None)
chebi_names.columns = ['ChEBI_ID', 'Name']
found_names = chebi_names.loc[chebi_names['ChEBI_ID'].isin(smiles_df['ChEBI_ID'])]
full_df =
|
pd.merge(found_names, smiles_df, on='ChEBI_ID', how='outer')
|
pandas.merge
|
import pandas as pd
import os
def parse_comments(file, comment='#',sep='\t',expect_one_value=True):
"parse comments at begin of file #Avg: 123"
Parsed = {}
with open(file) as f:
line = f.readline()
while line.startswith(comment):
line_values = line[1:].strip().split(sep)
name = line_values[0]
if name[-1] == ':':
name = name[:-1]
values = line_values[1:]
if len(values) == 1:
Parsed[name]=values[0]
elif not expect_one_value:
Parsed[name]=values
line= f.readline()
if len(Parsed)==0:
raise Exception("Couldn't parse values from file {} with comment char {} and sep '{}' ".format(file,comment,sep))
return Parsed
def read_coverage_binned(covarage_binned_file):
return pd.read_csv(covarage_binned_file,sep='\t',
skiprows=2,
index_col=[0,2],
usecols=[0,1,2],
squeeze=True)
def combine_coverages(coverage_files,sample_names,coverage_measure='Median_fold'):
"""
Combines the coverage files from different samples
Args:
coverage_files: bunch of coverage_files produced with pileup.sh from the bbmap package
sample_names: sample names associated with the coverage_files
Output:
combined_cov: pandas dataframe of samples x contigs for coverage
combined_N_reads: pandas dataframe of samples x contigs for number of mapped reads
"""
combined_cov={}
combined_N_reads={}
assert len(coverage_files)==len(sample_names)
for i in range(len(coverage_files)):
sample= sample_names[i]
data= pd.read_csv(coverage_files[i],index_col=0,sep='\t')
data.loc[data[coverage_measure]<0,coverage_measure]=0
combined_cov[sample]= data[coverage_measure]
combined_N_reads[sample] = data.Plus_reads+data.Minus_reads
combined_cov=
|
pd.DataFrame(combined_cov)
|
pandas.DataFrame
|
#!/usr/bin/env python
# coding: utf-8
# Author : <NAME>
# Initial Date: Apr 2, 2020
# About: strymmap class to visualize and analyze GPS data from CSV file recorded using Grey Panda device and libpanda software.
# Read associated README for full description
# License: MIT License
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject
# to the following conditions:
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
# ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
# TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
# PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHORS, COPYRIGHT HOLDERS OR ARIZONA BOARD OF REGENTS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
# AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
# OR OTHER DEALINGS IN THE SOFTWARE.
__author__ = '<NAME>'
__email__ = '<EMAIL>'
## General Data processing and visualization Import
import time
import numpy as np
import math
import matplotlib.pyplot as plt
plt.rcParams["figure.figsize"] = (16,8)
from scipy.interpolate import interp1d
from .phasespace import phasespace
from .strymread import strymread
from logging import Logger
from .utils import configure_logworker
LOGGER = configure_logworker()
from matplotlib import cm
import pandas as pd # Note that this is not commai Panda, but Database Pandas
import os
import sys
from subprocess import Popen, PIPE
import gmaps
from dotenv import load_dotenv
load_dotenv()
from .config import config
import IPython
shell_type = IPython.get_ipython().__class__.__name__
if shell_type in ['ZMQInteractiveShell', 'TerminalInteractiveShell']:
import ntpath
import bokeh.io
import bokeh.plotting
import bokeh.models
import bokeh.transform
from bokeh.palettes import Magma256 as palette
from bokeh.models import ColorBar
from bokeh.io import output_notebook
from bokeh.io.export import get_screenshot_as_png
from selenium import webdriver
from webdriver_manager.chrome import ChromeDriverManager
from .tools import ellipse_fit
output_notebook()
import plotly.express as px
import plotly.io as pio
import plotly.offline as pyo
# Set notebook mode to work in offline
pyo.init_notebook_mode()
class strymmap:
'''
`strymmap` reads the GPS data from the given CSV file.
This class provides several utilities functions to work with GPS Data
Parameters
----------------
csvfie: `str`
The CSV file to be read
Attributes
---------------
csvfile:`string`
The filepath of CSV Data file
dataframe: `pandas.Dataframe`
Pandas dataframe that stores content of csvfile as dataframe
aq_time: `string`
Acquisition Time of GPS Signal with valid Lattitude and Longitude in the form of human-readable date string as per local timezone
latitude: `pandas.DataFrame`
Latitude Timeseries
longitude: `pandas.DataFrame`
Longitude Timeseries
altitude: `pandas.DataFrame`
Altitude Timeseries
success: `bool`
If file reading was successful, then set to success to True
Returns
---------------
`strymmap`
Returns an object of type `strymmap`
Example
----------------
Generating GOOGLE MAP API KEY
You will ensure that you have right Google API KEY before you can use `strymmap`.
You can generate API KEY at https://console.developers.google.com/projectselector2/apis/dashboard.
Put API KEY as an environment variable in the file ~/.env by executing following from the command line
`echo "export GOOGLE_MAP_API_KEY=<KEY>" >> ~/.env`
Use your own key instead of `abcdefghijklmnopqrstuvwxyz`.
A good tutorial on how to perform API setup is given at https://web.archive.org/web/20200404070618/https://pybit.es/persistent-environment-variables.html
Generating MAP BOX API KEY
Generating MAP BOX API key is easier than generating, Google map API Key
Just create an account on mapbox.com and select create token.
You can also check tutorials on https://www.youtube.com/watch?v=6iQEhaE1bCY
Put API Key as an environment variable in the file ~/.env by executing following from the command line
`echo "export MAP_BOX_API=abcdefghijklmnopqrstuvwxyz" >> ~/.env`.
Use your own key instead of `abcdefghijklmnopqrstuvwxyz`.
>>> import strym
>>> from strym import strymmap
>>> import matplotlib.pyplot as plt
>>> import numpy as np
>>> csvdata = '2020-03-20.csv'
>>> r0 = strymmap(csvfile=csvdata)
'''
def __init__(self, csvfile, **kwargs):
self.success = False
# if file size is less than 60 bytes, return without processing
if os.path.getsize(csvfile) < 60:
print("Nothing significant to read in {}. No further analysis is warranted.".format(csvfile))
return
if shell_type not in ['ZMQInteractiveShell', 'TerminalInteractiveShell']:
raise ValueError("strymmap can only be used within Jupyter Notebook.")
# CSV File
self.csvfile = csvfile
LOGGER.info("Reading GPS file {}".format(csvfile))
# All CAN messages will be saved as pandas dataframe
try:
status_category =
|
pd.CategoricalDtype(categories=['A', 'V'], ordered=False)
|
pandas.CategoricalDtype
|
#
# Convert API responses to Pandas DataFrames
#
import pandas as pd
def accounts(data):
"""accounts as dataframe"""
return pd.concat(
|
pd.json_normalize(v["securitiesAccount"])
|
pandas.json_normalize
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
:Purpose: Perform automated testing on pdvalidate.
:Platform: Linux/Windows | Python 3.5
:Developer: <NAME>
:Email: <EMAIL>
"""
# pylint: disable=protected-access
# pylint: disable=wrong-import-position
import os
import sys
sys.path.insert(0, os.path.dirname(os.path.dirname(__file__)))
import datetime
import numpy as np
import pytest
import pandas as pd
from pandas.util.testing import assert_series_equal, assert_frame_equal
from pdvalidate.validation import ei, \
validate as pdv, \
ValidationWarning
class TestReturnTypes():
strings = pd.Series(['1', '1', 'ab\n', 'a b', 'Ab', 'AB', np.nan])
masks = [pd.Series([False, False, False, True, True, False, False]),
pd.Series([True, True, False, True, True, False, True])]
def test_return_mask_series(self):
assert_series_equal(pdv._get_return_object(self.masks, self.strings, 'mask_series'),
pd.Series([True, True, False, True, True, False, True]))
def test_return_mask_frame(self):
assert_frame_equal(pdv._get_return_object(self.masks, self.strings, 'mask_frame'),
pd.concat(self.masks, axis='columns'))
def test_return_values(self):
assert_series_equal(pdv._get_return_object(self.masks, self.strings, 'values'),
pd.Series([np.nan, np.nan, 'ab\n', np.nan, np.nan, 'AB', np.nan]))
def test_wrong_return_type(self):
with pytest.raises(ValueError):
pdv._get_return_object(self.masks, self.strings, 'wrong return type')
class TestMaskNonconvertible():
mixed = pd.Series([1, 2.3, np.nan, 'abc', pd.datetime(2014, 1, 7), '2014'])
inconvertible_numeric = pd.Series([False, False, False, True, True, False])
inconvertible_exact_dates = pd.Series([True, True, False, True, True, False])
inconvertible_inexact_dates = pd.Series([True, True, False, True, False, False])
def test_numeric(self):
assert_series_equal(pdv.mask_nonconvertible(self.mixed, 'numeric'),
self.inconvertible_numeric)
def test_datetime_exact_date(self):
assert_series_equal(pdv.mask_nonconvertible(self.mixed,
'datetime',
datetime_format='%Y',
exact_date=True),
self.inconvertible_exact_dates)
assert_series_equal(pdv.mask_nonconvertible(self.mixed,
'datetime',
datetime_format='%Y', exact_date=False),
self.inconvertible_inexact_dates)
class TestToDatetime():
mixed = pd.Series([1, 2.3, np.nan, 'abc', pd.datetime(2014, 1, 7), '2014'])
def test_exact(self):
expected_result1 = [pd.NaT, pd.NaT, pd.NaT, pd.NaT, pd.NaT,
pd.Timestamp('2014-01-01 00:00:00')]
assert (pdv.to_datetime(self.mixed,
datetime_format='%Y',
exact=True).tolist() == expected_result1)
expected_result2 = [pd.NaT, pd.NaT, pd.NaT, pd.NaT,
pd.Timestamp('2014-01-07 00:00:00'),
pd.Timestamp('2014-01-01 00:00:00')]
assert (pdv.to_datetime(self.mixed,
datetime_format='%Y/%m/%d',
exact=False).tolist() == expected_result2)
class TestToNumeric():
mixed = pd.Series([1, 2.3, np.nan, 'abc', pd.datetime(2014, 1, 7), '2014'])
def test_conversion(self):
assert (pdv.to_numeric(self.mixed).sum() == 2017.3)
pytest.warns(ValidationWarning, pdv.to_numeric, self.mixed)
class TestToString():
mixed = pd.Series([1, 2.3, np.nan, 'abc', pd.datetime(2014, 1, 7)])
numeric_as_strings = pd.Series(['1', '2.3', np.nan, 'abc', pd.datetime(2014, 1, 7)])
datetimes_as_strings = pd.Series([1, 2.3, np.nan, 'abc', '2014-01-07'])
all_values_as_strings = pd.Series(['1', '2.3', np.nan, 'abc', '2014-01-07'])
def test_numeric_to_string(self):
assert_series_equal(pdv._numeric_to_string(self.mixed),
self.numeric_as_strings)
def test_datetime_to_string(self):
assert_series_equal(pdv._datetime_to_string(self.mixed,
datetime_format='%Y-%m-%d'),
self.datetimes_as_strings)
def test_to_string(self):
assert_series_equal(pdv.to_string(self.mixed,
float_format='%g',
datetime_format='%Y-%m-%d'),
self.all_values_as_strings)
class TestValidateDate():
dates = pd.Series([datetime.datetime(2014, 1, 7),
datetime.datetime(2014, 1, 7),
datetime.datetime(2014, 2, 28),
pd.NaT])
rtype = 'mask_series'
def test_validation(self):
results, msg = pdv.validate_date(self.dates, return_type='values')
assert_series_equal(self.dates, results)
_, msg = pdv.validate_date(self.dates,
nullable=False,
return_type=self.rtype)
assert ei.natv in msg
_, msg = pdv.validate_date(self.dates,
unique=True,
return_type=self.rtype)
assert ei.nonu in msg
_, msg = pdv.validate_date(self.dates,
min_date=datetime.date(2014, 1, 8),
return_type=self.rtype)
assert ei.elyd in msg
_, msg = pdv.validate_date(self.dates,
max_date=datetime.date(2014, 1, 8),
return_type=self.rtype)
assert ei.lted in msg
class TestValidateTimestamp():
timestamps = pd.Series([pd.Timestamp(2014, 1, 7, 12, 0, 5),
pd.Timestamp(2014, 1, 7, 12, 0, 5),
pd.Timestamp(2014, 2, 28, 0, 0, 0),
pd.NaT])
rtype = 'mask_series'
def test_validation(self):
results, msg = pdv.validate_timestamp(self.timestamps, return_type='values')
assert_series_equal(self.timestamps, results)
_, msg = pdv.validate_timestamp(self.timestamps, nullable=False, return_type=self.rtype)
assert ei.natv in msg
_, msg = pdv.validate_timestamp(self.timestamps, unique=True, return_type=self.rtype)
assert ei.nonu in msg
_, msg = pdv.validate_timestamp(self.timestamps,
min_timestamp=pd.Timestamp(2014, 1, 8),
return_type=self.rtype)
assert ei.elyt in msg
_, msg = pdv.validate_timestamp(self.timestamps,
max_timestamp=pd.Timestamp(2014, 1, 8),
return_type=self.rtype)
assert ei.ltet in msg
class TestValidateNumber():
numeric_with_string = pd.Series([-1, -1, 2.3, '1'])
numeric = pd.Series([-1, -1, 2.3, np.nan])
rtype = 'mask_series'
def test_validation(self):
results, msg = pdv.validate_numeric(self.numeric_with_string,
return_type='values')
assert_series_equal(results, self.numeric)
_, msg = pdv.validate_numeric(self.numeric, nullable=False, return_type=self.rtype)
assert ei.nanv in msg
_, msg = pdv.validate_numeric(self.numeric, unique=True, return_type=self.rtype)
assert ei.nonu in msg
_, msg = pdv.validate_numeric(self.numeric, integer=True, return_type=self.rtype)
assert ei.nint in msg
_, msg = pdv.validate_numeric(self.numeric, min_value=0, return_type=self.rtype)
assert ei.lowv in msg
_, msg = pdv.validate_numeric(self.numeric, max_value=0, return_type=self.rtype)
assert ei.hghv in msg
class TestValidateString():
mixed =
|
pd.Series(['ab\n', 'ab\r\n', 'a b', 'Ab', 'Ab', 'AB', ' aBc', 'aBc ', 1, np.nan])
|
pandas.Series
|
## Tune the clip threshold and the confidence using EPR
from COSLIR import *
import pandas as pd
res = np.load('data/human/hSTR_656.npy')
threshold_list = [0, 1e-3, 3e-3, 6e-3, 1e-2]
conf_list = [0.5, 0.6, 0.7]
bootstrap_num, dim, _ = res.shape
print('bootstrap, dim', bootstrap_num, dim)
# load data
X = np.genfromtxt('data/hSTRING/ExpressionData5.csv', delimiter=',')
X = X[1:,1:]
Y = np.genfromtxt('data/hSTRING/ExpressionData6.csv', delimiter=',')
Y = Y[1:,1:]
X = X.T
Y = Y.T
print('sample size: X, ', X.shape, 'Y, ', Y.shape)
Expre = pd.read_csv('data/hSTRING/ExpressionData1.csv')
# Expre = np.load('data/mouse/mSTR_1-2_Lambda6.npy')
name = Expre['X'].str.upper()
name = pd.DataFrame(name)
# rescale the final estimator
def rescale(A, X, Y):
mu1 = np.mean(X, axis=0)
mu2 = np.mean(Y, axis=0)
diff_mu = np.abs(mu2 - mu1)
diff_mu = diff_mu[:, np.newaxis]
mu1 = mu1[:, np.newaxis]
Coef = np.matmul(diff_mu, mu1.T)
return np.abs(A) * Coef
# Change the coefficient matrix A into the path table format
def MatToPath(A):
pos = np.nonzero(A)
source_ind = pos[1]
target_ind = pos[0]
# num = source_ind.shape[0]
source = pd.DataFrame(name.iloc[source_ind])
target = pd.DataFrame(name.iloc[target_ind])
value =
|
pd.DataFrame(A[target_ind, source_ind])
|
pandas.DataFrame
|
import pathlib
import sqlite3
import pandas as pd
import numpy as np
import xarray as xr
from metpy import calc
from metpy.units import units
from atmPy.aerosols.size_distribution import sizedistribution as sd
from atmPy.aerosols import size_distribution
from atmPy.data_archives import arm
import scipy as sp
import sys
def open_iMet(path, verbose=False):
ds = xr.open_dataset(path)
# txt = ',\n'.join('"{}"'.format(k) for k in ds.variables.keys())
# print(txt)
imet_columns2use = {
# "datetime",
"altitude (from iMet PTU) [km]": 'altitude_ptu',
"iMet pressure [mb]": 'atm_pressure',
"iMet air temperature (corrected) [deg C]": 'temp',
# "iMet air temperature (raw) [deg C]",
"iMet humidity [RH %]": 'rh',
# "iMet frostpoint [deg C]": 'frost_point',
# "iMet internal temperature [deg C]",
# "iMet battery voltage [V]",
"iMet theta [K]": 'potential_temperature',
# "iMet temperature (of pressure sensor) [deg C]",
# "iMet temperature (of humidity sensor) [deg C]",
# "iMet ascent rate [m*s^-1]",
# "iMet water vapor mixing ratio [ppmv]",
# "iMet total column water [mm]",
# "GPS latitude",
# "GPS longitude",
"GPS altitude [km]": 'altitude_gps',
# "GPS num satellites",
# "GPS pressure [mb]",
# "GPS wind speed [m*s^-1]",
# "GPS wind direction [deg]",
# "GPS ascent rate [m*s^-1]",
# "GPS(X) east velocity [m*s^-1]",
# "GPS(X) north velocity [m*s^-1]",
# "GPS(X) up velocity [m*s^-1]",
# "GPS time [h:m:s GMT]": 'time_gps',
# "GPS heading from launch [deg]",
# "GPS elevation angle from launch [deg]",
# "GPS distance from launch [km]",
# "predicted landing latitude",
# "predicted landing longitude",
# "predicted time to landing [min]",
# "POPS Particle Rate [count]",
# "POPS Flow Rate [cc*s^-1]",
# "POPS Temperature [deg C]",
# "POPS Bin 0",
# "POPS Bin 1",
# "POPS Bin 2",
# "POPS Bin 3",
# "POPS Bin 4",
# "POPS Bin 5",
# "POPS Bin 6",
# "POPS Bin 7",
# "POPS Bin 8",
# "POPS Bin 9",
# "POPS Bin 10",
# "POPS Bin 11"
}
# imet_columns2use
if verbose:
print('======')
for var in ds.variables:
print(var)
df = pd.DataFrame()
for key in imet_columns2use.keys():
df[imet_columns2use[key]] = ds[key].to_pandas()
return df
def set_altitude_column(imet, alt_source):
if 'baro' in alt_source:
imet['altitude'] = imet['altitude_ptu']
elif 'gps' in alt_source:
imet['altitude'] = imet['altitude_gps']
elif alt_source == 'bad':
return False
else:
raise ValueError('alt_source unknown: {}'.format(alt_source))
imet.drop(['altitude_ptu', 'altitude_gps'], axis=1, inplace=True)
imet.altitude *= 1000
return True
def load_met_files(start_time, end_time, folders):
fnames = [i for i in folders['path2met_folder'].glob('*.cdf')]
met_start = [pd.to_datetime(i.name.split('.')[2]) for i in fnames]
met_file_df = pd.DataFrame({'path': fnames}, index = met_start)
met_file_df.sort_index(inplace=True)
met_file_df_match = met_file_df.truncate(start_time - pd.Timedelta(1, 'D'), end_time)
tl = []
for idx, path in met_file_df_match.iterrows():
ds = xr.open_dataset(path[0])
press = ds.atmos_pressure.to_dataframe()
press_resamp = press.resample('1s').interpolate()
tl.append(press_resamp)
press_df = pd.concat(tl, sort=True)
press_df = press_df.truncate(start_time, end_time)
return press_df * 10
def add_eqiv_potential_temp(tbs):
temp = tbs['temp'].values * units.celsius
rh = tbs['rh'].values * units.percent
press = tbs['atm_pressure'].values * units.millibar
# dewpt_pint = calc.dewpoint_rh(temp, rh)
dewpt_pint = calc.dewpoint_from_relative_humidity(temp, rh)
dewpt = np.array(dewpt_pint)
tbs['dew_point'] = dewpt
tbs['equiv_potential_temperature'] = np.array(calc.equivalent_potential_temperature(press, temp, dewpt_pint))
return
def add_cloud_base_distance_and_transit(dst, ceilometer_folder = '/mnt/telg/data/arm_data/OLI/ceilometer/'):
# find ceilometer files around the tbs time
## get availble ceilometer files
path = pathlib.Path(ceilometer_folder)
df = pd.DataFrame(list(path.glob('*.nc')), columns = ['path'])
df.index = df.path.apply(lambda pt: pd.to_datetime(' '.join(pt.name.split('.')[2:4])))
df.index.name = 'datetime'
df['name'] = df.path.apply(lambda x: x.name)
df.sort_index(inplace=True)
tdiff = abs(df.index - dst.datetime.values[0])
tdmin, tdargmin = tdiff.min(), tdiff.argmin()
assert(tdmin < pd.Timedelta(1, 'd')) # if this is False there is basically no ceilometer data found close by
df_sel = df.iloc[tdargmin-1:tdargmin+2,:]
ceil = arm.read_ceilometer_nc([pt for pt in df_sel.path])
# distance of cloud base to flight path
# measurement frequencies differ a lot between ceilometer and nasascience therefore I will interpolate the ceilometer data to the nsa_sceince measurment frequency
ct = ceil.copy()
cb = ct.cloudbase.data.resample('1s').bfill().reindex(dst.datetime.values)
dst_alt = dst.altitude.to_pandas()
dist2cb = (cb * (-1)).add(dst_alt, axis = 0).First_cloud_base
# take each minimum fit it and determine the local cloud base transit
def find_section_with_cb_trans(dist2cb, trans, window = 150):
td = pd.to_timedelta(f'{window}s')
dist2cb_sel = dist2cb.loc[trans - td: trans + td]
dist2cb_sel_df =
|
pd.DataFrame(dist2cb_sel)
|
pandas.DataFrame
|
import pandas as pd
import numpy as np
import re
from nltk.stem import *
import nltk
from nltk.corpus import stopwords
from nltk.corpus import wordnet
data = pd.read_csv("most common words in python.txt",sep=",",nrows=1)
reg_pat = '^[a-z][a-z\'-]+[a-z]$|^[a-z][a-z\'-]+[a-z]\.$|^[a-z][a-z\'-]+[a-z],$'
stemmer = PorterStemmer()
stop = set(stopwords.words('english'))
def pre_process(s):
# convert all to lower case.
s = s.lower()
# Text is only being considered from "Subject" and "Body" of email.
# to filter any lines that is due to forwards or replies in message.
lines = filter(lambda line: not line.strip().startswith('>'),s.split('\n'))
# to filter any lines that start from Date:,X-From,X-To,X-Folder,X-Origin,X-FileName.
lines = filter(lambda line: not line.startswith('date:'),lines)
lines = filter(lambda line: not line.startswith('x-from:'),lines)
lines = filter(lambda line: not line.startswith('x-to:'),lines)
lines = filter(lambda line: not line.startswith('x-folder:'),lines)
lines = filter(lambda line: not line.startswith('x-origin:'),lines)
lines = filter(lambda line: not line.startswith('x-filename:'),lines)
# Tokenizing the Text message & considering only the tokens with length >= 3.
arr = '\n'.join(lines).split()
terms = []
for term in arr:
if re.match(reg_pat, term) != None:
terms.append(term.replace(",","").replace(".",""))
# Pruning the stop words.
terms = list(filter(lambda term: term not in stop, terms))
# Perform Stemming on terms.
# terms = list(pd.Series(terms).map(stemmer.stem))
return terms
lsa_data = data
#print(lsa_data) # display enron data
temp = []
for i in range(len(data)):
count = 0
for j in range(len(lsa_data)-1):
if lsa_data[0][i] in lsa_data[j+1]: # delete repeatable data from lsa_data list
count += 1
if count == len(lsa_data)-1:
temp.append(lsa_data[0][i]) # if end of list, insert into temp list
for i in range(len(temp)):
for ttt in lsa_data:
ttt.remove(temp[i])
print ("========reral")
print (lsa_data)
word_dict={}
for i in lsa_data:
for word in i:
word_dict[word] = word_dict.get(word,0)+1
word_dict
words=[]
countli=[]
for key in word_dict:
words.append(key)
countli.append(word_dict[key])
def SET(m,in_list):
# Set the value of parameter m = the no. of iterations you require
Card = pd.Series(np.NAN)
DS=
|
pd.Series(np.NAN)
|
pandas.Series
|
USAGE = """
python pba50_metrics_urbansim.py
Needs access to these box folders and M Drive
Box/Modeling and Surveys/Urban Modeling/Bay Area UrbanSim 1.5/PBA50/Draft Blueprint runs/
Box/Horizon and Plan Bay Area 2050/Equity and Performance/7_Analysis/Metrics/
Processes model outputs and creates a single csv with scenario metrics in this folder:
Box/Horizon and Plan Bay Area 2050/Equity and Performance/7_Analysis/Metrics/
This csv file will have 6 columns:
1) modelrun ID
2) metric ID
3) metric name
4) year (note: for metrics that depict change from 2015 to 2050, this value will be 2050)
5) blueprint type
6) metric value
"""
import datetime, os, sys
import numpy, pandas as pd
from collections import OrderedDict, defaultdict
def calculate_normalize_factor_Q1Q2(parcel_sum_df):
return ((parcel_sum_df['hhq1_2050'].sum() + parcel_sum_df['hhq2_2050'].sum()) / parcel_sum_df['tothh_2050'].sum()) \
/ ((parcel_sum_df['hhq1_2015'].sum() + parcel_sum_df['hhq2_2015'].sum()) / parcel_sum_df['tothh_2015'].sum())
def calculate_normalize_factor_Q1(parcel_sum_df):
return (parcel_sum_df['hhq1_2050'].sum() / parcel_sum_df['tothh_2050'].sum()) \
/ (parcel_sum_df['hhq1_2015'].sum() / parcel_sum_df['tothh_2015'].sum())
def calculate_Affordable2_deed_restricted_housing(runid, parcel_sum_df, metrics_dict):
metric_id = "A2"
# totals for 2050 and 2015
metrics_dict[runid,metric_id,'deed_restricted',y2] = parcel_sum_df['deed_restricted_units_2050'].sum()
metrics_dict[runid,metric_id,'deed_restricted',y1] = parcel_sum_df['deed_restricted_units_2015'].sum()
metrics_dict[runid,metric_id,'residential_units',y2] = parcel_sum_df['residential_units_2050'].sum()
metrics_dict[runid,metric_id,'residential_units',y1] = parcel_sum_df['residential_units_2015'].sum()
metrics_dict[runid,metric_id,'deed_restricted_HRA',y2] = parcel_sum_df.loc[parcel_sum_df['fbpchcat'].str.contains('HRA', na=False), 'deed_restricted_units_2050'].sum()
metrics_dict[runid,metric_id,'deed_restricted_HRA',y1] = parcel_sum_df.loc[parcel_sum_df['fbpchcat'].str.contains('HRA', na=False), 'deed_restricted_units_2015'].sum()
metrics_dict[runid,metric_id,'residential_units_HRA',y2] = parcel_sum_df.loc[parcel_sum_df['fbpchcat'].str.contains('HRA', na=False), 'residential_units_2050'].sum()
metrics_dict[runid,metric_id,'residential_units_HRA',y1] = parcel_sum_df.loc[parcel_sum_df['fbpchcat'].str.contains('HRA', na=False), 'residential_units_2015'].sum()
# diff between 2050 and 2015
metrics_dict[runid,metric_id,'deed_restricted',y_diff] = metrics_dict[runid,metric_id,'deed_restricted',y2] - metrics_dict[runid,metric_id,'deed_restricted',y1]
metrics_dict[runid,metric_id,'residential_units',y_diff] = metrics_dict[runid,metric_id,'residential_units',y2] - metrics_dict[runid,metric_id,'residential_units',y1]
metrics_dict[runid,metric_id,'deed_restricted_HRA',y_diff] = metrics_dict[runid,metric_id,'deed_restricted_HRA',y2] - metrics_dict[runid,metric_id,'deed_restricted_HRA',y1]
metrics_dict[runid,metric_id,'residential_units_HRA',y_diff] = metrics_dict[runid,metric_id,'residential_units_HRA',y2] - metrics_dict[runid,metric_id,'residential_units_HRA',y1]
metrics_dict[runid,metric_id,'deed_restricted_nonHRA',y_diff] = metrics_dict[runid,metric_id,'deed_restricted',y_diff] - metrics_dict[runid,metric_id,'deed_restricted_HRA',y_diff]
metrics_dict[runid,metric_id,'residential_units_nonHRA',y_diff] = metrics_dict[runid,metric_id,'residential_units',y_diff] - metrics_dict[runid,metric_id,'residential_units_HRA',y_diff]
# metric: deed restricted % of total units: overall, HRA and non-HRA
metrics_dict[runid,metric_id,'deed_restricted_pct_new_units',y_diff] = metrics_dict[runid,metric_id,'deed_restricted',y_diff] / metrics_dict[runid,metric_id,'residential_units',y_diff]
metrics_dict[runid,metric_id,'deed_restricted_pct_new_units_HRA',y_diff] = metrics_dict[runid,metric_id,'deed_restricted_HRA',y_diff]/metrics_dict[runid,metric_id,'residential_units_HRA',y_diff]
metrics_dict[runid,metric_id,'deed_restricted_pct_new_units_nonHRA',y_diff] = metrics_dict[runid,metric_id,'deed_restricted_nonHRA',y_diff]/metrics_dict[runid,metric_id,'residential_units_nonHRA',y_diff]
print('********************A2 Affordable********************')
print('DIS pct of new units %s' % metrics_dict[runid,metric_id,'deed_restricted_pct_new_units',y_diff] )
print('DIS pct of new units in HRAs %s' % metrics_dict[runid,metric_id,'deed_restricted_pct_new_units_HRA',y_diff] )
print('DIS pct of new units outside of HRAs %s' % metrics_dict[runid,metric_id,'deed_restricted_pct_new_units_nonHRA',y_diff])
# Forcing preservation metrics
#metrics_dict[runid,metric_id,'preservation_affordable_housing',y_diff] = 1
def calculate_Diverse1_LIHHinHRAs(runid, parcel_sum_df, tract_sum_df, normalize_factor_Q1Q2, normalize_factor_Q1, metrics_dict):
metric_id = "D1"
# Share of region's LIHH households that are in HRAs
metrics_dict[runid,metric_id,'LIHH_total',y2] = parcel_sum_df['hhq1_2050'].sum() + parcel_sum_df['hhq2_2050'].sum()
metrics_dict[runid,metric_id,'LIHH_total',y1] = parcel_sum_df['hhq1_2015'].sum() + parcel_sum_df['hhq2_2015'].sum()
metrics_dict[runid,metric_id,'LIHH_total',y_diff] = metrics_dict[runid,metric_id,'LIHH_total',y2] - metrics_dict[runid,metric_id,'LIHH_total',y1]
metrics_dict[runid,metric_id,'LIHH_inHRA',y2] = parcel_sum_df.loc[parcel_sum_df['fbpchcat'].str.contains('HRA', na=False), 'hhq1_2050'].sum() + parcel_sum_df.loc[parcel_sum_df['fbpchcat'].str.contains('HRA', na=False), 'hhq2_2050'].sum()
metrics_dict[runid,metric_id,'LIHH_inHRA',y1] = parcel_sum_df.loc[parcel_sum_df['fbpchcat'].str.contains('HRA', na=False), 'hhq1_2015'].sum() + parcel_sum_df.loc[parcel_sum_df['fbpchcat'].str.contains('HRA', na=False), 'hhq2_2015'].sum()
metrics_dict[runid,metric_id,'LIHH_inHRA',y_diff] = metrics_dict[runid,metric_id,'LIHH_inHRA',y2] - metrics_dict[runid,metric_id,'LIHH_inHRA',y1]
metrics_dict[runid,metric_id,'LIHH_shareinHRA',y2] = metrics_dict[runid,metric_id,'LIHH_inHRA',y2] / metrics_dict[runid,metric_id,'LIHH_total',y2]
metrics_dict[runid,metric_id,'LIHH_shareinHRA',y1] = metrics_dict[runid,metric_id,'LIHH_inHRA',y1] / metrics_dict[runid,metric_id,'LIHH_total',y1]
# normalizing for overall growth in LIHH
metrics_dict[runid,metric_id,'LIHH_shareinHRA_normalized',y1] = metrics_dict[runid,metric_id,'LIHH_shareinHRA',y1] * normalize_factor_Q1Q2
metrics_dict[runid,metric_id,'LIHH_shareinHRA_normalized',y2] = metrics_dict[runid,metric_id,'LIHH_shareinHRA',y2]
# Total HHs in CoC Tracts, in 2015 and 2050
metrics_dict[runid,metric_id,'TotHH_inCoC',y1] = tract_sum_df.loc[(tract_sum_df['coc_flag_pba2050'] == 1), 'tothh_2015'].sum()
metrics_dict[runid,metric_id,'TotHH_inCoC',y2] = tract_sum_df.loc[(tract_sum_df['coc_flag_pba2050'] == 1), 'tothh_2050'].sum()
metrics_dict[runid,metric_id,'TotHH_inCoC',y_diff] = metrics_dict[runid,metric_id,'TotHH_inCoC',y2] - metrics_dict[runid,metric_id,'TotHH_inCoC',y1]
########### Tracking movement of Q1 households: Q1 share of Households
# Share of Households that are Q1, within each geography type in this order:
# Overall Region; HRAs; DIS Tracts; CoCs; PDAs; TRAs
# Region
metrics_dict[runid,metric_id,'Q1HH_shareofRegion',y1] = parcel_sum_df['hhq1_2015'].sum() / parcel_sum_df['tothh_2015'].sum()
metrics_dict[runid,metric_id,'Q1HH_shareofRegion_normalized',y1] = parcel_sum_df['hhq1_2015'].sum() / parcel_sum_df['tothh_2015'].sum() * normalize_factor_Q1
metrics_dict[runid,metric_id,'Q1HH_shareofRegion',y2] = parcel_sum_df['hhq1_2050'].sum() / parcel_sum_df['tothh_2050'].sum()
metrics_dict[runid,metric_id,'Q1HH_shareofRegion_normalized',y2] = metrics_dict[runid,metric_id,'Q1HH_shareofRegion',y2]
#HRA
metrics_dict[runid,metric_id,'Q1HH_shareofHRA',y1] = parcel_sum_df.loc[parcel_sum_df['fbpchcat'].str.contains('HRA', na=False), 'hhq1_2015'].sum() / \
parcel_sum_df.loc[parcel_sum_df['fbpchcat'].str.contains('HRA', na=False), 'tothh_2015'].sum()
metrics_dict[runid,metric_id,'Q1HH_shareofHRA_normalized',y1] = metrics_dict[runid,metric_id,'Q1HH_shareofHRA',y1] * normalize_factor_Q1
metrics_dict[runid,metric_id,'Q1HH_shareofHRA',y2] = parcel_sum_df.loc[parcel_sum_df['fbpchcat'].str.contains('HRA', na=False), 'hhq1_2050'].sum() / \
parcel_sum_df.loc[parcel_sum_df['fbpchcat'].str.contains('HRA', na=False), 'tothh_2050'].sum()
metrics_dict[runid,metric_id,'Q1HH_shareofHRA_normalized',y2] = metrics_dict[runid,metric_id,'Q1HH_shareofHRA',y2]
#DIS
metrics_dict[runid,metric_id,'Q1HH_shareofDIS',y1] = parcel_sum_df.loc[parcel_sum_df['fbpchcat'].str.contains('DIS', na=False),'hhq1_2015'].sum() / \
parcel_sum_df.loc[parcel_sum_df['fbpchcat'].str.contains('DIS', na=False), 'tothh_2015'].sum()
metrics_dict[runid,metric_id,'Q1HH_shareofDIS_normalized',y1] = metrics_dict[runid,metric_id,'Q1HH_shareofDIS',y1] * normalize_factor_Q1
metrics_dict[runid,metric_id,'Q1HH_shareofDIS',y2] = parcel_sum_df.loc[parcel_sum_df['fbpchcat'].str.contains('DIS', na=False), 'hhq1_2050'].sum() / \
parcel_sum_df.loc[parcel_sum_df['fbpchcat'].str.contains('DIS', na=False), 'tothh_2050'].sum()
metrics_dict[runid,metric_id,'Q1HH_shareofDIS_normalized',y2] = metrics_dict[runid,metric_id,'Q1HH_shareofDIS',y2]
#CoC
metrics_dict[runid,metric_id,'Q1HH_shareofCoC',y1] = tract_sum_df.loc[(tract_sum_df['coc_flag_pba2050'] == 1), 'hhq1_2015'].sum() / \
tract_sum_df.loc[(tract_sum_df['coc_flag_pba2050'] == 1), 'tothh_2015'].sum()
metrics_dict[runid,metric_id,'Q1HH_shareofCoC_normalized',y1] = metrics_dict[runid,metric_id,'Q1HH_shareofCoC',y1] * normalize_factor_Q1
metrics_dict[runid,metric_id,'Q1HH_shareofCoC',y2] = tract_sum_df.loc[(tract_sum_df['coc_flag_pba2050'] == 1), 'hhq1_2050'].sum() / \
tract_sum_df.loc[(tract_sum_df['coc_flag_pba2050'] == 1), 'tothh_2050'].sum()
metrics_dict[runid,metric_id,'Q1HH_shareofCoC_normalized',y2] = metrics_dict[runid,metric_id,'Q1HH_shareofCoC',y2]
#GG
metrics_dict[runid,metric_id,'Q1HH_shareofGG',y1] = parcel_sum_df.loc[parcel_sum_df['fbpchcat'].str.contains('GG', na=False), 'hhq1_2015'].sum() / \
parcel_sum_df.loc[parcel_sum_df['fbpchcat'].str.contains('GG', na=False), 'tothh_2015'].sum()
metrics_dict[runid,metric_id,'Q1HH_shareofGG_normalized',y1] = metrics_dict[runid,metric_id,'Q1HH_shareofGG',y1] * normalize_factor_Q1
metrics_dict[runid,metric_id,'Q1HH_shareofGG',y2] = parcel_sum_df.loc[parcel_sum_df['fbpchcat'].str.contains('GG', na=False), 'hhq1_2050'].sum() / \
parcel_sum_df.loc[parcel_sum_df['fbpchcat'].str.contains('GG', na=False), 'tothh_2050'].sum()
metrics_dict[runid,metric_id,'Q1HH_shareofGG_normalized',y2] = metrics_dict[runid,metric_id,'Q1HH_shareofGG',y2]
#TRAGG
parcel_GG = parcel_sum_df.loc[parcel_sum_df['fbpchcat'].str.contains('GG', na=False)]
parcel_TRAGG = parcel_GG.loc[parcel_GG['fbpchcat'].str.contains('tra', na=False)]
metrics_dict[runid,metric_id,'Q1HH_shareofTRAGG',y1] = parcel_TRAGG['hhq1_2015'].sum() / parcel_TRAGG['tothh_2015'].sum()
metrics_dict[runid,metric_id,'Q1HH_shareofTRAGG_normalized',y1] = metrics_dict[runid,metric_id,'Q1HH_shareofTRAGG',y1] * normalize_factor_Q1
metrics_dict[runid,metric_id,'Q1HH_shareofTRAGG',y2] = parcel_TRAGG['hhq1_2050'].sum() / parcel_TRAGG['tothh_2050'].sum()
metrics_dict[runid,metric_id,'Q1HH_shareofTRAGG_normalized',y2] = metrics_dict[runid,metric_id,'Q1HH_shareofTRAGG',y2]
print('********************D1 Diverse********************')
print('Growth of LIHH share of population (normalize factor))',normalize_factor_Q1Q2 )
print('LIHH Share in HRA 2050 %s' % metrics_dict[runid,metric_id,'LIHH_shareinHRA',y2] )
print('LIHH Share in HRA 2015 %s' % metrics_dict[runid,metric_id,'LIHH_shareinHRA_normalized',y1] )
def calculate_Diverse2_LIHH_Displacement(runid, parcel_sum_df, tract_sum_df, normalize_factor_Q1, metrics_dict):
metric_id = "D2"
# For reference: total number of LIHH in tracts
metrics_dict[runid,metric_id,'LIHH_inDIS',y2] = parcel_sum_df.loc[parcel_sum_df['fbpchcat'].str.contains('DIS', na=False), 'hhq1_2050'].sum()
metrics_dict[runid,metric_id,'LIHH_inDIS',y1] = parcel_sum_df.loc[parcel_sum_df['fbpchcat'].str.contains('DIS', na=False), 'hhq1_2015'].sum()
metrics_dict[runid,metric_id,'LIHH_inDIS_normalized',y1] = metrics_dict[runid,metric_id,'LIHH_inDIS',y1] * normalize_factor_Q1
metrics_dict[runid,metric_id,'LIHH_inDIS_normalized',y2] = metrics_dict[runid,metric_id,'LIHH_inDIS',y2]
print('********************D2 Diverse********************')
print('Total Number of LIHH in DIS tracts in 2050',metrics_dict[runid,metric_id,'LIHH_inDIS',y2] )
print('Number of LIHH in DIS tracts in 2015',metrics_dict[runid,metric_id,'LIHH_inDIS',y1] )
print('Number of LIHH in DIS tracts in normalized',metrics_dict[runid,metric_id,'LIHH_inDIS_normalized',y1] )
def calculate_Healthy1_HHs_SLRprotected(runid, parcel_sum_df, metrics_dict):
metric_id = "H1"
# Renaming Parcels as "Protected", "Unprotected", and "Unaffected"
#Basic
def label_SLR(row):
if (row['SLR'] == 12): return 'Unprotected'
elif (row['SLR'] == 24): return 'Unprotected'
elif (row['SLR'] == 36): return 'Unprotected'
elif (row['SLR'] == 100): return 'Protected'
else: return 'Unaffected'
parcel_sum_df['SLR_protection'] = parcel_sum_df.apply (lambda row: label_SLR(row), axis=1)
# Calculating protected households
# All households
tothh_2050_affected = parcel_sum_df.loc[(parcel_sum_df['SLR_protection'].str.contains("rotected") == True), 'tothh_2050'].sum()
tothh_2050_protected = parcel_sum_df.loc[(parcel_sum_df['SLR_protection'].str.contains("Protected") == True), 'tothh_2050'].sum()
tothh_2015_affected = parcel_sum_df.loc[(parcel_sum_df['SLR_protection'].str.contains("rotected") == True), 'tothh_2015'].sum()
tothh_2015_protected = parcel_sum_df.loc[(parcel_sum_df['SLR_protection'].str.contains("Protected") == True), 'tothh_2015'].sum()
# Q1 Households
hhq1_2050_affected = parcel_sum_df.loc[(parcel_sum_df['SLR_protection'].str.contains("rotected") == True), 'hhq1_2050'].sum()
hhq1_2050_protected = parcel_sum_df.loc[(parcel_sum_df['SLR_protection'].str.contains("Protected") == True), 'hhq1_2050'].sum()
hhq1_2015_affected = parcel_sum_df.loc[(parcel_sum_df['SLR_protection'].str.contains("rotected") == True), 'hhq1_2015'].sum()
hhq1_2015_protected = parcel_sum_df.loc[(parcel_sum_df['SLR_protection'].str.contains("Protected") == True), 'hhq1_2015'].sum()
# CoC Households
CoChh_2050_affected = parcel_sum_df.loc[((parcel_sum_df['SLR_protection'].str.contains("rotected") == True) & \
parcel_sum_df['coc_flag_pba2050']==1), 'tothh_2050'].sum()
CoChh_2050_protected = parcel_sum_df.loc[((parcel_sum_df['SLR_protection'].str.contains("Protected") == True) & \
parcel_sum_df['coc_flag_pba2050']==1), 'tothh_2050'].sum()
CoChh_2015_affected = parcel_sum_df.loc[((parcel_sum_df['SLR_protection'].str.contains("rotected") == True) & \
parcel_sum_df['coc_flag_pba2050']==1), 'tothh_2015'].sum()
CoChh_2015_protected = parcel_sum_df.loc[((parcel_sum_df['SLR_protection'].str.contains("Protected") == True) & \
parcel_sum_df['coc_flag_pba2050']==1), 'tothh_2015'].sum()
metrics_dict[runid,metric_id,'SLR_protected_pct_affected_tothh',y2] = tothh_2050_protected / tothh_2050_affected
metrics_dict[runid,metric_id,'SLR_protected_pct_affected_hhq1',y2] = hhq1_2050_protected / hhq1_2050_affected
metrics_dict[runid,metric_id,'SLR_protected_pct_affected_CoChh',y2] = CoChh_2050_protected / CoChh_2050_affected
print('********************H1 Healthy********************')
print('Pct of HHs affected by 3ft SLR that are protected in 2050 in %s' % metrics_dict[runid,metric_id,'SLR_protected_pct_affected_tothh',y2])
print('Pct of Q1 HHs affected by 3ft SLR that are protected in 2050 in %s' % metrics_dict[runid,metric_id,'SLR_protected_pct_affected_hhq1',y2])
print('Pct of CoC HHs affected by 3ft SLR that are protected in 2050 in %s' % metrics_dict[runid,metric_id,'SLR_protected_pct_affected_CoChh',y2])
def calculate_Healthy1_HHs_EQprotected(runid, parcel_sum_df, metrics_dict):
metric_id = "H1"
'''
# Reading building codes file, which has info at building level, on which parcels are inundated and protected
buildings_code = pd.read_csv('C:/Users/ATapase/Box/Horizon and Plan Bay Area 2050/Equity and Performance/7_Analysis/Metrics/Healthy/buildings_with_eq_code.csv')
buildings_eq = pd.merge(left=buildings_code[['building_id', 'parcel_id', 'residential_units', 'year_built', 'earthquake_code']], right=parcel_sum_df[['parcel_id','zone_id','tract_id','coc_flag_pba2050','fbpchcat','hhq1_2015','hhq1_2050','tothh_2015','tothh_2050']], left_on="parcel_id", right_on="parcel_id", how="left")
buildings_eq = pd.merge(left=buildings_eq, right=coc_flag[['tract_id_coc','county_fips']], left_on="tract_id", right_on="tract_id_coc", how="left")
buildings_cat = pd.read_csv('C:/Users/ATapase/Box/Horizon and Plan Bay Area 2050/Equity and Performance/7_Analysis/Metrics/Healthy/building_eq_categories.csv')
buildings_eq = pd.merge(left=buildings_eq, right=buildings_cat, left_on="earthquake_code", right_on="building_eq_code", how="inner")
buildings_eq.drop(['building_eq_code', 'tract_id_coc'], axis=1, inplace=True)
buildings_eq['cost_retrofit_total'] = buildings_eq['residential_units'] * buildings_eq['cost_retrofit']
# Calculated protected households in PLus
# Number of Units retrofitted
metrics_dict['H2_eq_num_units_retrofit'] = buildings_eq['residential_units'].sum()
metrics_dict['H2_eq_num_CoC_units_retrofit'] = buildings_eq.loc[(buildings_eq['coc_flag_pba2050']== 1), 'residential_units'].sum()
metrics_dict['H2_eq_total_cost_retrofit'] = buildings_eq['cost_retrofit_total'].sum()
metrics_dict['H2_eq_CoC_cost_retrofit'] = buildings_eq.loc[(buildings_eq['coc_flag_pba2050']== 1), 'cost_retrofit_total'].sum()
print('Total number of units retrofited',metrics_dict['H2_eq_num_units_retrofit'])
print('CoC number of units retrofited',metrics_dict['H2_eq_num_CoC_units_retrofit'])
print('Total cost of retrofit',metrics_dict['H2_eq_total_cost_retrofit'])
print('CoC cost of retrofit',metrics_dict['H2_eq_CoC_cost_retrofit'])
'''
def calculate_Healthy1_HHs_WFprotected(runid, arcel_sum_df, metrics_dict):
metric_id = "H1"
'''
#
'''
def calculate_Healthy2_HHs_WFprotected(runid, parcel_sum_df, metrics_dict):
metric_id = "H2"
'''
#
'''
def calculate_Healthy2_GreenfieldDev(runid, greenfield_sum_df, metrics_dict):
metric_id = "H2-3"
print('********************H2-3 Annual Greenfield Development********************')
metrics_dict[runid,metric_id,'Annual_greenfield_develop_acres',y2] = (greenfield_sum_df.iloc[3]['urban_footprint_0_2050'] - greenfield_sum_df.iloc[3]['urban_footprint_0_2015'])/ \
(int(y2) - int(y1)) #3 is the rownumber for "acres"
metrics_dict[runid,metric_id,'Annual_greenfield_develop_acres',y1] = 6642/2 #2015 is observed data
print('Annual Greenfield Development Acres in 2050 %s' % metrics_dict[runid,metric_id,'Annual_greenfield_develop_acres',y2])
print('Annual Greenfield Development Acres in 2015 %s' % metrics_dict[runid,metric_id,'Annual_greenfield_develop_acres',y1])
def calculate_Vibrant2_Jobs(runid, parcel_sum_df, metrics_dict):
metric_id = 'V2'
print('********************V2 Vibrant********************')
# Total Jobs Growth
metrics_dict[runid,metric_id,'Total_jobs',y2] = parcel_sum_df['totemp_2050'].sum()
metrics_dict[runid,metric_id,'Total_jobs',y1] = parcel_sum_df['totemp_2015'].sum()
metrics_dict[runid,metric_id,'Total_jobs',y_diff] = metrics_dict[runid,metric_id,'Total_jobs',y2]/ metrics_dict[runid,metric_id,'Total_jobs',y1]-1
print('Number of Jobs in 2050 %s' % metrics_dict[runid,metric_id,'Total_jobs',y2])
print('Number of Jobs in 2015 %s' % metrics_dict[runid,metric_id,'Total_jobs',y1])
print('Job Growth from 2015 to 2050 %s' % metrics_dict[runid,metric_id,'Total_jobs',y_diff])
# MWTEMPN jobs
metrics_dict[runid,metric_id,'Total_MWTEMPN_jobs',y2] = parcel_sum_df['MWTEMPN_2050'].sum()
metrics_dict[runid,metric_id,'Total_MWTEMPN_jobs',y1] = parcel_sum_df['MWTEMPN_2015'].sum()
metrics_dict[runid,metric_id,'Total_MWTEMPN_jobs',y_diff] = metrics_dict[runid,metric_id,'Total_MWTEMPN_jobs',y2]/metrics_dict[runid,metric_id,'Total_MWTEMPN_jobs',y1]-1
print('Number of Total MWTEMPN Jobs 2050 %s' % metrics_dict[runid,metric_id,'Total_MWTEMPN_jobs',y2])
print('Number of Total MWTEMPN Jobs 2015 %s' % metrics_dict[runid,metric_id,'Total_MWTEMPN_jobs',y1])
print('Job Growth Total MWTEMPN from 2015 to 2050 %s' % metrics_dict[runid,metric_id,'Total_MWTEMPN_jobs',y_diff])
# Jobs Growth in PPAs
metrics_dict[runid,metric_id,'PPA_jobs',y2] = parcel_sum_df.loc[parcel_sum_df['fbpchcat'].str.contains('ppa', na=False), 'totemp_2050'].sum()
metrics_dict[runid,metric_id,'PPA_jobs',y1] = parcel_sum_df.loc[parcel_sum_df['fbpchcat'].str.contains('ppa', na=False), 'totemp_2015'].sum()
metrics_dict[runid,metric_id,'PPA_jobs',y_diff] = metrics_dict[runid,metric_id,'PPA_jobs',y2]/metrics_dict[runid,metric_id,'PPA_jobs',y1]-1
print('Number of Jobs in PPAs 2050 %s' % metrics_dict[runid,metric_id,'PPA_jobs',y2])
print('Number of Jobs in PPAs 2015 %s' % metrics_dict[runid,metric_id,'PPA_jobs',y1])
print('Job Growth in PPAs from 2015 to 2050 %s' % metrics_dict[runid,metric_id,'Total_MWTEMPN_jobs',y_diff])
# Jobs Growth MWTEMPN in PPAs (Manufacturing & Wholesale, Transportation & Utilities)
metrics_dict[runid,metric_id,'PPA_MWTEMPN_jobs',y2] = parcel_sum_df.loc[parcel_sum_df['fbpchcat'].str.contains('ppa', na=False), 'MWTEMPN_2050'].sum()
metrics_dict[runid,metric_id,'PPA_MWTEMPN_jobs',y1] = parcel_sum_df.loc[parcel_sum_df['fbpchcat'].str.contains('ppa', na=False), 'MWTEMPN_2015'].sum()
metrics_dict[runid,metric_id,'PPA_MWTEMPN_jobs',y_diff] = metrics_dict[runid,metric_id,'PPA_MWTEMPN_jobs',y2]/metrics_dict[runid,metric_id,'PPA_MWTEMPN_jobs',y1]-1
print('Number of MWTEMPN Jobs in PPAs 2050 %s' % metrics_dict[runid,metric_id,'PPA_MWTEMPN_jobs',y2])
print('Number of MWTEMPN Jobs in PPAs 2015 %s' % metrics_dict[runid,metric_id,'PPA_MWTEMPN_jobs',y1])
print('Job Growth MWTEMPN in PPAs from 2015 to 2050 %s' % metrics_dict[runid,metric_id,'PPA_MWTEMPN_jobs',y_diff])
def parcel_building_output_sum(urbansim_runid):
#################### creating parcel level df from buildings output
###this is to analyze changes of residential units and total deed_restricted units
building_output_2050 =
|
pd.read_csv(urbansim_runid+'_building_data_2050.csv', engine='python')
|
pandas.read_csv
|
import pandas as pd
try:
from boolean1_neg import boolean1
except ImportError:
from contra_qa.text_generation.boolean1_neg import boolean1
try:
from boolean2_S_and import boolean2
except ImportError:
from contra_qa.text_generation.boolean2_S_and import boolean2
try:
from boolean3_NP_and import boolean3
except ImportError:
from contra_qa.text_generation.boolean3_NP_and import boolean3
try:
from boolean4_VP_and import boolean4
except ImportError:
from contra_qa.text_generation.boolean4_VP_and import boolean4
try:
from boolean5_AP_and import boolean5
except ImportError:
from contra_qa.text_generation.boolean5_AP_and import boolean5
try:
from boolean6_implicit_and import boolean6
except ImportError:
from contra_qa.text_generation.boolean6_implicit_and import boolean6
try:
from boolean7_S_or import boolean7
except ImportError:
from contra_qa.text_generation.boolean7_S_or import boolean7
try:
from boolean8_NP_or import boolean8
except ImportError:
from contra_qa.text_generation.boolean8_NP_or import boolean8
try:
from boolean9_VP_or import boolean9
except ImportError:
from contra_qa.text_generation.boolean9_VP_or import boolean9
try:
from boolean10_AP_or import boolean10
except ImportError:
from contra_qa.text_generation.boolean10_AP_or import boolean10
def create_all():
boolean1()
boolean2()
boolean3()
boolean4()
boolean5()
boolean6()
boolean7()
boolean8()
boolean9()
boolean10()
# creating the AND dataset
df2_tr = pd.read_csv("data/boolean2_train.csv")
df3_tr = pd.read_csv("data/boolean3_train.csv")
df4_tr = pd.read_csv("data/boolean4_train.csv")
df5_tr = pd.read_csv("data/boolean5_train.csv")
df6_tr = pd.read_csv("data/boolean6_train.csv")
df2_te = pd.read_csv("data/boolean2_test.csv")
df3_te = pd.read_csv("data/boolean3_test.csv")
df4_te = pd.read_csv("data/boolean4_test.csv")
df5_te = pd.read_csv("data/boolean5_test.csv")
df6_te = pd.read_csv("data/boolean6_test.csv")
train_and = [df2_tr, df3_tr, df4_tr, df5_tr, df6_tr]
test_and = [df2_te, df3_te, df4_te, df5_te, df6_te]
df_train_and = pd.concat(train_and)
df_test_and = pd.concat(test_and)
df_train_and = df_train_and.sample(frac=1).reset_index(drop=True)
df_test_and = df_test_and.sample(frac=1).reset_index(drop=True)
df_train_and = df_train_and.iloc[:10000]
df_test_and = df_test_and.iloc[:1000]
df_train_and.to_csv("data/boolean_AND_train.csv", index=False)
df_test_and.to_csv("data/boolean_AND_test.csv", index=False)
# creating the OR dataset
df7_tr = pd.read_csv("data/boolean7_train.csv")
df8_tr =
|
pd.read_csv("data/boolean8_train.csv")
|
pandas.read_csv
|
import os, csv
import numpy as np
import pandas as pd
from pathlib import Path
from sklearn.model_selection import train_test_split
from scipy import signal
class ProcessSignalData(object):
def __init__(self):
# path to video data from signal_output.py
self.dir = './processed_new/videos'
self.full_path = ''
self.dataframe = pd.DataFrame()
self.real_data = pd.DataFrame()
self.fake_data = pd.DataFrame()
self.dataset = pd.DataFrame()
self.real_data_mean = {}
self.fake_data_mean = {}
self.real_data_var = {}
self.fake_data_var = {}
self.real_data_std = {}
self.fake_data_std = {}
self.real_data_psd = {}
self.fake_data_psd = {}
self.real_data_csd = {}
self.fake_data_csd = {}
self.real_data_f1 = {}
self.fake_data_f1 = {}
self.real_data_test = {}
self.fake_data_test = {}
self.real_data_RCCE = {}
self.real_data_LCCE = {}
self.real_data_LCRC = {}
self.fake_data_RCCE = {}
self.fake_data_LCCE = {}
self.fake_data_LCRC = {}
self.real_count = 0
self.fake_count = 0
self.vid_count = 0
self.data_path_lcce = './lcce250.csv'
self.data_path_lcrc = './lcrc250.csv'
self.data_path_rcce = './rcce250.csv'
self.data_path_m = './mean_data16.csv'
self.data_path_v = './new_chrom/var_data16.csv'
self.data_path_s = './new_chrom/std_data16.csv'
self.data_path_p = './new_chrom/psd_data16.csv'
self.data_path_c = './new_chrom/csd_data_128.csv'
self.data_path_c = './f1_data_128.csv'
self.log_path = './process_log.csv'
self.test_data_lcce_path = './new_chrom/test_lcce.csv'
self.test_data_lcrc_path = './new_chrom/test_lcrc.csv'
self.test_data_rcce_path = './new_chrom/test_rcce.csv'
self.train_data_lcce_path = './new_chrom/train_lcce.csv'
self.train_data_lcrc_path = './new_chrom/train_lcrc.csv'
self.train_data_rcce_path = './new_chrom/train_rcce.csv'
self.test_data_v_path = './new_chrom/train_data_v32c.csv'
self.train_data_v_path = './new_chrom/test_data_v32c.csv'
self.test_data_m_path = './new_chrom/train_data_m32c.csv'
self.train_data_m_path = './new_chrom/test_data_m32c.csv'
self.test_data_s_path = './new_chrom/train_data_s32c.csv'
self.train_data_s_path = './new_chrom/test_data_s32c.csv'
self.test_data_p_path = './new_chrom/train_data_p128c.csv'
self.train_data_p_path = './new_chrom/test_data_p128c.csv'
self.test_data_c_path = './train_data_c128c.csv'
self.train_data_c_path = './test_data_c128c.csv'
self.test_data_f1_path = './train_data_f1-128c.csv'
self.train_data_f1_path = './test_data_f1-128c.csv'
self.test_data_test_path = './train_data_test.csv'
self.train_data_test_path = './test_data_test.csv'
self.main()
def new_chrom(self, red, green, blue):
# calculation of new X and Y
Xcomp = 3 * red - 2 * green
Ycomp = (1.5 * red) + green - (1.5 * blue)
# standard deviations
sX = np.std(Xcomp)
sY = np.std(Ycomp)
alpha = sX / sY
# -- rPPG signal
bvp = Xcomp - alpha * Ycomp
return bvp
def main(self):
# length of video in frames to process
sample_length = 250
# interval for mean, var, std
group_size = 32
#window for psd
psd_size = 128
for paths, subdir, files in os.walk(self.dir):
for file in files:
if file.endswith('.csv'):
self.full_path = os.path.join(paths, file)
if 'rejected' in self.full_path.lower() or '.txt' in self.full_path.lower() or 'imposter' in self.full_path.lower():
pass
else:
print(self.full_path)
self.dataset = pd.read_csv(self.full_path)
right_R = self.dataset['RC-R'].iloc[:sample_length]
left_R = self.dataset['LC-R'].iloc[:sample_length]
chin_R = self.dataset['C-R'].iloc[:sample_length]
forehead_R = self.dataset['F-R'].iloc[:sample_length]
outerR_R = self.dataset['OR-R'].iloc[:sample_length]
outerL_R = self.dataset['OL-R'].iloc[:sample_length]
center_R = self.dataset['CE-R'].iloc[:sample_length]
right_G = self.dataset['RC-G'].iloc[:sample_length]
left_G = self.dataset['LC-G'].iloc[:sample_length]
chin_G = self.dataset['C-G'].iloc[:sample_length]
forehead_G = self.dataset['F-G'].iloc[:sample_length]
outerR_G = self.dataset['OR-G'].iloc[:sample_length]
outerL_G = self.dataset['OL-G'].iloc[:sample_length]
center_G = self.dataset['CE-G'].iloc[:sample_length]
right_B = self.dataset['RC-B'].iloc[:sample_length]
left_B = self.dataset['LC-B'].iloc[:sample_length]
chin_B = self.dataset['C-B'].iloc[:sample_length]
forehead_B = self.dataset['F-B'].iloc[:sample_length]
outerR_B = self.dataset['OR-B'].iloc[:sample_length]
outerL_B = self.dataset['OL-B'].iloc[:sample_length]
center_B = self.dataset['CE-B'].iloc[:sample_length]
right_C = self.dataset['RC-chrom'].iloc[:sample_length]
left_C = self.dataset['LC-Chrom'].iloc[:sample_length]
chin_C = self.dataset['C-chrom'].iloc[:sample_length]
forehead_C = self.dataset['F-chrom'].iloc[:sample_length]
outerR_C = self.dataset['OR-chrom'].iloc[:sample_length]
outerL_C = self.dataset['OL-chrom'].iloc[:sample_length]
center_C = self.dataset['CE-chrom'].iloc[:sample_length]
chrom_R = right_C
chrom_L = left_C
chrom_CE = center_C
chrom_OL = outerL_C
chrom_OR = outerR_C
#chrom_R = self.new_chrom(right_R, right_G, right_B)
#chrom_L = self.new_chrom(left_R, left_G, left_B)
chrom_C = self.new_chrom(chin_R, chin_G, chin_B)
chrom_F = self.new_chrom(forehead_R, forehead_G, forehead_B)
#chrom_OR = self.new_chrom(outerR_R, outerR_G, outerR_B)
#chrom_OL = self.new_chrom(outerL_R, outerL_G, outerL_B)
#chrom_CE = self.new_chrom(center_R, center_G, center_B)
difg_LCRC = (self.dataset['RC-G'].iloc[:sample_length] - self.dataset['LC-G'].iloc[:sample_length]).abs()
difc_LCRC = (self.dataset['RC-chrom'].iloc[:sample_length] - self.dataset['LC-Chrom'].iloc[:sample_length]).abs()
difg_o1 = (self.dataset['C-G'].iloc[:sample_length] - self.dataset['F-G'].iloc[:sample_length]).abs()
difc_o1 = (self.dataset['C-chrom'].iloc[:sample_length] - self.dataset['F-chrom'].iloc[:sample_length]).abs()
difg_o2 = (self.dataset['OR-G'].iloc[:sample_length] - self.dataset['OL-G'].iloc[:sample_length]).abs()
difc_o2 = (self.dataset['OR-chrom'].iloc[:sample_length] - self.dataset['OL-chrom'].iloc[:sample_length]).abs()
difc_LCCe = (self.dataset['LC-Chrom'].iloc[:sample_length] - self.dataset['CE-chrom'].iloc[
:sample_length]).abs()
difc_RCCe = (self.dataset['RC-chrom'].iloc[:sample_length] - self.dataset['CE-chrom'].iloc[
:sample_length]).abs()
difc_LCRC = (chrom_R.iloc[:sample_length] - chrom_L.iloc[:sample_length]).abs()
difc_LCCe = (chrom_L.iloc[:sample_length] - chrom_CE.iloc[:sample_length]).abs()
difc_RCCe = (chrom_R.iloc[:sample_length] - chrom_CE.iloc[:sample_length]).abs()
difc_LCOL = (chrom_L.iloc[:sample_length] - chrom_OL.iloc[:sample_length]).abs()
difc_RCOR = (chrom_R.iloc[:sample_length] - chrom_OR.iloc[:sample_length]).abs()
difg_LCOL = (self.dataset['LC-G'].iloc[:sample_length] - self.dataset['OL-G'].iloc[:sample_length]).abs()
difg_RCOR = (self.dataset['RC-G'].iloc[:sample_length] - self.dataset['OR-G'].iloc[:sample_length]).abs()
# green channel features
# right cheek - left cheek
difg_LCRC_lst = [difg_LCRC.iloc[i:i + group_size] for i in
range(0, len(difg_LCRC) - group_size + 1, group_size)]
# forehead - chin
difg_o1_lst = [difg_o1.iloc[i:i + group_size] for i in
range(0, len(difg_o1) - group_size + 1, group_size)]
# outer right - outer left
difg_o2_lst = [difg_o2.iloc[i:i + group_size] for i in
range(0, len(difg_o2) - group_size + 1, group_size)]
# chrominance features
# right cheek - left cheek
difc_LCRC_lst = [difc_LCRC.iloc[i:i + group_size] for i in
range(0, len(difc_LCRC) - group_size + 1, group_size)]
# forehead - chin
difc_o1_lst = [difc_o1.iloc[i:i + group_size] for i in
range(0, len(difc_o1) - group_size + 1, group_size)]
# outer right - outer left
difc_o2_lst = [difc_o2.iloc[i:i + group_size] for i in
range(0, len(difc_o2) - group_size + 1, group_size)]
# mean
difg_LCRC_mean = np.array([difg_LCRC_lst[i].mean() for i in range(len(difg_LCRC_lst))])
difc_LCRC_mean = np.array([difc_LCRC_lst[i].mean() for i in range(len(difc_LCRC_lst))])
print("MEAN")
print(difc_LCRC_mean)
difg_o1_mean = np.array([difg_o1_lst[i].mean() for i in range(len(difg_o1_lst))])
difc_o1_mean = np.array([difc_o1_lst[i].mean() for i in range(len(difc_o1_lst))])
difg_o2_mean = np.array([difg_o2_lst[i].mean() for i in range(len(difg_o2_lst))])
difc_o2_mean = np.array([difc_o2_lst[i].mean() for i in range(len(difc_o2_lst))])
# variance
difg_LCRC_var = np.array([difg_LCRC_lst[i].var() for i in range(len(difg_LCRC_lst))])
difc_LCRC_var = np.array([difc_LCRC_lst[i].var() for i in range(len(difc_LCRC_lst))])
print("VAR")
print(difc_LCRC_var)
difg_o1_var = np.array([difg_o1_lst[i].var() for i in range(len(difg_o1_lst))])
difc_o1_var = np.array([difc_o1_lst[i].var() for i in range(len(difc_o1_lst))])
difg_o2_var = np.array([difg_o2_lst[i].var() for i in range(len(difg_o2_lst))])
difc_o2_var = np.array([difc_o2_lst[i].var() for i in range(len(difc_o2_lst))])
# standard deviation
difg_LCRC_std = np.array([difg_LCRC_lst[i].std() for i in range(len(difg_LCRC_lst))])
difc_LCRC_std = np.array([difc_LCRC_lst[i].std() for i in range(len(difc_LCRC_lst))])
print("STD")
print(difc_LCRC_std)
difg_o1_std = np.array([difg_o1_lst[i].std() for i in range(len(difg_o1_lst))])
difc_o1_std = np.array([difc_o1_lst[i].std() for i in range(len(difc_o1_lst))])
difg_o2_std = np.array([difg_o2_lst[i].std() for i in range(len(difg_o2_lst))])
difc_o2_std = np.array([difc_o2_lst[i].std() for i in range(len(difc_o2_lst))])
# power spectral density
f, difg_LCRC_psd = signal.welch(difg_LCRC, nperseg=psd_size)
f, difc_LCCe_psd = signal.welch(difc_LCCe, nperseg=psd_size)
f, difc_RCCe_psd = signal.welch(difc_RCCe, nperseg=psd_size)
f, difc_LCRC_psd = signal.welch(difc_LCRC, nperseg=psd_size)
print("PSD")
print(difc_LCRC_psd)
f, difg_o1_psd = signal.welch(difg_o1, nperseg=psd_size)
f, difc_o1_psd = signal.welch(difc_o1, nperseg=psd_size)
f, difg_o2_psd = signal.welch(difg_o2, nperseg=psd_size)
f, difc_o2_psd = signal.welch(difc_o2, nperseg=psd_size)
# cross power spectral density
left_C.fillna(0, inplace=True)
center_C.fillna(0, inplace=True)
right_C.fillna(0, inplace=True)
outerL_C.fillna(0, inplace=True)
outerR_C.fillna(0, inplace=True)
f, difc_LCCe_v_csd = signal.csd(left_C, center_C, nperseg=128)
f, difc_LCRC_v_csd = signal.csd(left_C, right_C, nperseg=128)
f, difc_RCCe_v_csd = signal.csd(right_C, center_C, nperseg=128)
f, difc_LCOL_v_csd = signal.csd(left_C, outerL_C, nperseg=128)
f, difc_RCOR_v_csd =signal.csd(right_C, outerR_C, nperseg=128)
difc_LCCe_csd_0 = []
difc_LCRC_csd_0 = []
difc_RCCe_csd_0 = []
difc_LCOL_csd_0 = []
difc_RCOR_csd_0 = []
difc_LCCe_csd_1 = []
difc_LCRC_csd_1 = []
difc_RCCe_csd_1 = []
difc_LCOL_csd_1 = []
difc_RCOR_csd_1 = []
for i in range(len(difc_LCCe_v_csd)):
difc_LCCe_csd_0.append(difc_LCCe_v_csd[i].real)
difc_LCCe_csd_1.append(difc_LCCe_v_csd[i].imag)
for i in range(len(difc_LCRC_v_csd)):
difc_LCRC_csd_0.append(difc_LCRC_v_csd[i].real)
difc_LCRC_csd_1.append(difc_LCRC_v_csd[i].imag)
for i in range(len(difc_RCCe_v_csd)):
difc_RCCe_csd_0.append(difc_RCCe_v_csd[i].real)
difc_RCCe_csd_1.append(difc_RCCe_v_csd[i].imag)
for i in range(len(difc_LCOL_v_csd)):
difc_LCOL_csd_0.append(difc_LCOL_v_csd[i].real)
difc_LCOL_csd_1.append(difc_LCOL_v_csd[i].imag)
for i in range(len(difc_RCOR_v_csd)):
difc_RCOR_csd_0.append(difc_RCOR_v_csd[i].real)
difc_RCOR_csd_1.append(difc_RCOR_v_csd[i].imag)
csd2_LCCe = []
csd2_LCRC = []
csd2_RCCe = []
for i in range(len(difc_RCCe_csd_0)):
csd2_LCCe.append((difc_LCCe_csd_0[i], difc_LCCe_csd_1[i]))
csd2_LCRC.append((difc_LCRC_csd_0[i], difc_LCRC_csd_1[i]))
csd2_RCCe.append((difc_RCCe_csd_0[i], difc_RCCe_csd_1[i]))
# f1 feature
t = np.abs(difc_LCCe_v_csd)
j = np.argmax(t)
max_cLCCe = (difc_LCCe_csd_0[j], difc_LCCe_csd_1[j])
mean_cLCCe = [np.mean(np.asarray(difc_LCCe_csd_0)), np.mean(np.asarray(difc_LCCe_csd_1))]
f1LCCe = np.array([max_cLCCe[0], max_cLCCe[1], mean_cLCCe[0], mean_cLCCe[1]])
t = np.abs(difc_LCRC_v_csd)
j = np.argmax(t)
max_cLCRC = (difc_LCRC_csd_0[j], difc_LCRC_csd_1[j])
mean_cLCRC = [np.mean(np.asarray(difc_LCRC_csd_0)), np.mean(np.asarray(difc_LCRC_csd_1))]
f1LCRC = np.array([max_cLCRC[0], max_cLCRC[1], mean_cLCRC[0], mean_cLCRC[1]])
t = np.abs(difc_RCCe_v_csd)
j = np.argmax(t)
max_cRCCe = (difc_RCCe_csd_0[j], difc_RCCe_csd_1[j])
mean_cRCCe = [np.mean(np.asarray(difc_RCCe_csd_0)), np.mean(np.asarray(difc_RCCe_csd_1))]
f1RCCe = np.array([max_cRCCe[0], max_cRCCe[1], mean_cRCCe[0], mean_cRCCe[1]])
t = np.abs(difc_LCOL_v_csd)
j = np.argmax(t)
max_cLCOL = (difc_LCOL_csd_0[j], difc_LCOL_csd_1[j])
mean_cLCOL = [np.mean(np.asarray(difc_LCOL_csd_0)), np.mean(np.asarray(difc_LCOL_csd_1))]
f1LCOL = np.array([max_cLCOL[0], max_cLCOL[1], mean_cLCOL[0], mean_cLCOL[1]])
t = np.abs(difc_RCOR_v_csd)
j = np.argmax(t)
max_cRCOR = (difc_RCOR_csd_0[j], difc_RCOR_csd_1[j])
mean_cRCOR = [np.mean(np.asarray(difc_RCOR_csd_0)), np.mean(np.asarray(difc_RCOR_csd_1))]
f1RCOR = np.array([max_cRCOR[0], max_cRCOR[1], mean_cRCOR[0], mean_cRCOR[1]])
derived_data_mean = np.concatenate([difg_LCRC_mean, difc_LCRC_mean, difg_o1_mean, difc_o1_mean,
difg_o2_mean, difc_o2_mean])
derived_data_var = np.concatenate([difg_LCRC_var, difc_LCRC_var, difg_o1_var, difc_o1_var,
difg_o2_var, difc_o2_var])
derived_data_std = np.concatenate([difg_LCRC_std, difc_LCRC_std, difg_o1_std, difc_o1_std,
difg_o2_std, difc_o2_std])
derived_data_psd = np.concatenate([difc_LCCe_psd, difc_LCRC_psd, difc_RCCe_psd])
derived_data_csd = np.concatenate([difc_LCCe_csd_0, difc_LCCe_csd_1, difc_LCRC_csd_0, difc_LCRC_csd_1, difc_RCCe_csd_0, difc_RCCe_csd_1])
derived_data_rcsd = np.concatenate([difc_LCCe_csd_0, difc_LCRC_csd_0, difc_RCCe_csd_0])
derived_data_f1 = np.concatenate([f1LCCe, f1LCRC, f1RCCe])
derived_data_test = np.concatenate([f1LCCe, f1LCRC, f1RCCe, f1LCOL, f1RCOR, difc_LCRC_std, difc_LCRC_var, difc_LCRC_psd, difc_LCRC_mean])
chrom_data = self.dataset['RC-chrom'].iloc[50] - self.dataset['C-chrom'].iloc[50]
if 'fake' in self.full_path.lower():
self.fake_data_LCCE[self.fake_count] = difc_LCCe
self.fake_data_LCRC[self.fake_count] = difc_LCRC
self.fake_data_RCCE[self.fake_count] = difc_RCCe
self.fake_data_mean[self.fake_count] = derived_data_mean
self.fake_data_var[self.fake_count] = derived_data_var
self.fake_data_std[self.fake_count] = derived_data_std
self.fake_data_psd[self.fake_count] = derived_data_psd
self.fake_data_csd[self.fake_count] = derived_data_csd
self.fake_data_f1[self.fake_count] = derived_data_f1
self.fake_data_test[self.fake_count] = derived_data_test
self.fake_count += 1
else:
self.real_data_LCCE[self.real_count] = difc_LCCe
self.real_data_LCRC[self.real_count] = difc_LCRC
self.real_data_RCCE[self.real_count] = difc_RCCe
self.real_data_mean[self.real_count] = derived_data_mean
self.real_data_var[self.real_count] = derived_data_var
self.real_data_std[self.real_count] = derived_data_std
self.real_data_psd[self.real_count] = derived_data_psd
self.real_data_csd[self.real_count] = derived_data_csd
self.real_data_f1[self.real_count] = derived_data_f1
self.real_data_test[self.real_count] = derived_data_test
self.real_count += 1
self.vid_count += 1
self.real_df_LCCE = pd.DataFrame(self.real_data_LCCE)
self.real_df_LCRC = pd.DataFrame(self.real_data_LCRC)
self.real_df_RCCE = pd.DataFrame(self.real_data_RCCE)
self.fake_df_LCCE = pd.DataFrame(self.fake_data_LCCE)
self.fake_df_LCRC = pd.DataFrame(self.fake_data_LCRC)
self.fake_df_RCCE = pd.DataFrame(self.fake_data_RCCE)
self.real_df_m = pd.DataFrame(self.real_data_mean)
self.fake_df_m = pd.DataFrame(self.fake_data_mean)
self.real_df_v = pd.DataFrame(self.real_data_var)
self.fake_df_v = pd.DataFrame(self.fake_data_var)
self.real_df_s = pd.DataFrame(self.real_data_std)
self.fake_df_s = pd.DataFrame(self.fake_data_std)
self.real_df_p = pd.DataFrame(self.real_data_psd)
self.fake_df_p = pd.DataFrame(self.fake_data_psd)
self.real_df_csp =
|
pd.DataFrame(self.real_data_csd)
|
pandas.DataFrame
|
from tensorflow.keras.preprocessing import sequence
from tensorflow.keras.preprocessing.text import Tokenizer
import pandas as pd
import numpy as np
import dill
from constants import Const
from polyglot.text import Text
def is_none(x):
if type(x).__name__ == 'None':
return True
return False
def get_raw_test_reviews(review="test"):
if review == "test":
def read_data_from_file(path):
data = {
'all' : [],
'sentences' : [],
'list_of_poss' : [],
'list_of_is_aspects' : [],
'list_of_iobs' : [],
'raw' : []
}
with open(path, "r") as f:
tokens, words, poss, is_aspects, iob_aspects = [], [], [], [], []
for line in f:
line = line.rstrip()
if line:
token = tuple(line.split())
words.append(token[0])
poss.append(token[1])
is_aspects.append(token[2])
iob_aspects.append(token[6])
tokens.append(token)
else:
data['all'].append(tokens)
data['sentences'].append(words)
data['list_of_poss'].append(poss)
data['list_of_is_aspects'].append(is_aspects)
data['list_of_iobs'].append(iob_aspects)
data['raw'].append(" ".join(words))
tokens, words, poss, is_aspects, iob_aspects = [], [], [], [], []
return data
test_data = read_data_from_file(Const.OTE_ROOT + 'data/test_data_fixed.txt')
# df_test = pd.read_csv(Const.CE_ROOT + "data/test_data.csv", delimiter=";", header=0, encoding = "ISO-8859-1")
# X_test = df_test['review']
# return X_test
return test_data['raw']
elif review == "tizi":
X = []
with open(Const.REVIEWS_ROOT + 'tizi_reviews.txt', 'r') as fi:
X = fi.read().split('\n')
return X
elif review == "cafe_halaman":
X = []
with open(Const.REVIEWS_ROOT + 'cafe_halaman_reviews.txt', 'r') as fi:
X = fi.read().split('\n')
return X
else:
print("REVIEWS: test, tizi, cafe_halaman")
def get_tokenizer():
"""
Load Tokenizer
"""
# Make Tokenizer (load or from dataset)
from tensorflow.keras.preprocessing.text import Tokenizer
tokenizer = Tokenizer()
with open(Const.TOKENIZER_PATH, 'rb') as fi:
tokenizer = dill.load(fi)
return tokenizer
def prepare_ce_X(X, tokenizer):
X_new = tokenizer.texts_to_sequences(X)
max_review_length = 150
PADDING_TYPE = 'post'
X_new = sequence.pad_sequences(X_new, maxlen=max_review_length, padding=PADDING_TYPE)
return X_new
def prepare_ce_y(df):
y = df[['food', 'service', 'price', 'place']]
y = y.replace(to_replace='yes', value=1)
y = y.replace(to_replace='no', value=0)
y = y.replace(to_replace=np.nan, value=0)
return y
def get_ce_dataset(return_df=False):
tokenizer = get_tokenizer()
"""
Construct X and y
"""
df = pd.read_csv(Const.CE_ROOT + "data/train_data.csv", delimiter=";", header=0, encoding = "ISO-8859-1")
df_test = pd.read_csv(Const.CE_ROOT + "data/test_data.csv", delimiter=";", header=0, encoding = "ISO-8859-1")
df = df.sample(frac=1, random_state=7)
X = df['review']
X_test = df_test['review']
X = prepare_ce_X(X, tokenizer)
X_test = prepare_ce_X(X_test, tokenizer)
y = prepare_ce_y(df)
y_test = prepare_ce_y(df_test)
if return_df:
return X, y, X_test, y_test, df, df_test
else:
return X, y, X_test, y_test
def get_spc_dataset(category, get_relevant_categories_only=True, return_df = False):
tokenizer = get_tokenizer()
"""
Construct X and y
"""
df = pd.read_csv(Const.SPC_ROOT + "data/train_data_3.csv", delimiter=";", header=0, encoding = "ISO-8859-1")
df_test = pd.read_csv(Const.SPC_ROOT + "data/test_data_3.csv", delimiter=";", header=0, encoding = "ISO-8859-1")
df = df.sample(frac=1, random_state=7)
if get_relevant_categories_only:
X = df[df[category] != '-' ]['review']
X_test = df_test[df_test[category] != '-' ]['review']
else:
X = df['review']
X_test = df_test['review']
X = prepare_ce_X(X, tokenizer)
X_test = prepare_ce_X(X_test, tokenizer)
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
y = df[category]
y_test = df_test[category]
if get_relevant_categories_only:
y = y[y != '-']
y_test = y_test[y_test != '-']
else:
y = y.replace(to_replace='-', value=2)
y_test = y_test.replace(to_replace='-', value=2)
y = y.replace(to_replace='positive', value=1)
y = y.replace(to_replace='negative', value=0)
y_test = y_test.replace(to_replace='positive', value=1)
y_test = y_test.replace(to_replace='negative', value=0)
if return_df:
return X, y, X_test, y_test, df[df[category] != '-' ], df_test[df_test[category] != '-' ]
else:
return X, y, X_test, y_test
def get_ote_dataset(return_df = False):
def read_data_from_file(path):
data = {
'all' : [],
'sentences' : [],
'list_of_poss' : [],
'list_of_is_aspects' : [],
'list_of_iobs' : [],
'raw' : []
}
with open(path, "r") as f:
tokens, words, poss, is_aspects, iob_aspects = [], [], [], [], []
for line in f:
line = line.rstrip()
if line:
token = tuple(line.split())
words.append(token[0])
poss.append(token[1])
is_aspects.append(token[2])
iob_aspects.append(token[6])
tokens.append(token)
else:
data['all'].append(tokens)
data['sentences'].append(words)
data['list_of_poss'].append(poss)
data['list_of_is_aspects'].append(is_aspects)
data['list_of_iobs'].append(iob_aspects)
data['raw'].append(" ".join(words))
tokens, words, poss, is_aspects, iob_aspects = [], [], [], [], []
return data
train_data = read_data_from_file(Const.OTE_ROOT + 'data/train_data_fixed.txt')
test_data = read_data_from_file(Const.OTE_ROOT + 'data/test_data_fixed.txt')
df = pd.DataFrame(train_data)
df_test = pd.DataFrame(test_data)
"""
Calculate Metrics
"""
# from scipy import stats
# sentence_lengths = []
# for sentence in train_data['sentences']:
# sentence_lengths.append(len(sentence))
# print("max :", np.max(sentence_lengths))
# print("min :", np.min(sentence_lengths))
# print("mean:", np.mean(sentence_lengths))
# print("mode:", stats.mode(sentence_lengths))
tokenizer = get_tokenizer()
from polyglot.text import Text
from tensorflow.keras.utils import to_categorical
tags = [
'ADJ', 'ADP', 'ADV', 'AUX', 'CONJ', 'DET', 'INTJ', 'NOUN', 'NUM',
'PART', 'PRON', 'PROPN', 'PUNCT', 'SCONJ', 'SYM', 'VERB', 'X'
]
pos_tokenizer = Tokenizer()
pos_tokenizer.fit_on_texts(tags)
def read_pos_from_raw(data):
pos = []
for sent in data['raw']:
plg = Text(sent)
plg.language = 'id'
_, plg = zip(*plg.pos_tags)
pos.append(" ".join(list(plg)))
pos = pos_tokenizer.texts_to_sequences(pos)
return pos
pos = read_pos_from_raw(train_data)
pos_test = read_pos_from_raw(test_data)
"""
Create X and Y
"""
X = train_data['raw']
X_test = test_data['raw']
X = tokenizer.texts_to_sequences(X)
X_test = tokenizer.texts_to_sequences(X_test)
# truncate and pad input sequences
max_review_length = 81
PADDING = 'post'
def is_valid(arr):
return not np.any(np.isnan(arr))
X = sequence.pad_sequences(X, maxlen=max_review_length, padding=PADDING, value=Const.PADDING)
assert is_valid(X)
X_test = sequence.pad_sequences(X_test, maxlen=max_review_length, padding=PADDING, value=Const.PADDING)
assert is_valid(X_test)
dum = ['O ASPECT-B ASPECT-I']
iob_tokenizer = Tokenizer(filters='')
iob_tokenizer.fit_on_texts(dum)
from tensorflow.keras.utils import to_categorical
y_raw = [" ".join(x) for x in df['list_of_iobs']]
y_raw = iob_tokenizer.texts_to_sequences(y_raw)
y = sequence.pad_sequences(y_raw, maxlen=max_review_length, padding=PADDING, value=1.)
assert is_valid(y)
y_test_raw = [" ".join(x) for x in df_test['list_of_iobs']]
y_test_raw = iob_tokenizer.texts_to_sequences(y_test_raw)
y_test = sequence.pad_sequences(y_test_raw, maxlen=max_review_length, padding=PADDING, value=1.)
assert is_valid(y_test)
pos = sequence.pad_sequences(pos, maxlen=max_review_length, padding='post', value=Const.PADDING)
assert is_valid(pos)
pos_test = sequence.pad_sequences(pos_test, maxlen=max_review_length, padding='post', value=Const.PADDING)
assert is_valid(pos_test)
y = to_categorical(y)
y_test = to_categorical(y_test)
pos = to_categorical(pos)
pos_test = to_categorical(pos_test)
y = y[:,:,1:]
y_test = y_test[:,:,1:]
if return_df:
return X, y, pos, X_test, y_test, pos_test, df, df_test
else:
return X, y, pos, X_test, y_test, pos_test
def filter_sentence(sentence):
# new_sentence = sentence
new_sentence = sentence.replace('-', 'DASH')
# new_sentence = sentence.replace('~', 'WAVE')
return new_sentence
def prepare_crf_X(X_raw):
X_pos_tagged = []
for sentence in X_raw:
filtered_sentence = filter_sentence(sentence)
polyglot_text = Text(filtered_sentence)
polyglot_text.language = 'id'
tagged_sentence = polyglot_text.pos_tags
X_pos_tagged.append(tagged_sentence)
return X_pos_tagged
def get_crf_ote_dataset(return_df = False):
def read_data_from_file(path):
data = {
'all' : [],
'sentences' : [],
'list_of_poss' : [],
'list_of_is_aspects' : [],
'list_of_iobs' : [],
'raw' : []
}
with open(path, "r") as f:
tokens, words, poss, is_aspects, iob_aspects = [], [], [], [], []
for line in f:
line = line.rstrip()
if line:
token = tuple(line.split())
words.append(token[0])
poss.append(token[1])
is_aspects.append(token[2])
iob_aspects.append(token[6])
tokens.append(token)
else:
data['all'].append(tokens)
data['sentences'].append(words)
data['list_of_poss'].append(poss)
data['list_of_is_aspects'].append(is_aspects)
data['list_of_iobs'].append(iob_aspects)
data['raw'].append(" ".join(words))
tokens, words, poss, is_aspects, iob_aspects = [], [], [], [], []
return data
train_data = read_data_from_file(Const.OTE_ROOT + 'data/train_data_fixed.txt')
test_data = read_data_from_file(Const.OTE_ROOT + 'data/test_data_fixed.txt')
df =
|
pd.DataFrame(train_data)
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
import re
import warnings
from datetime import timedelta
from itertools import product
import pytest
import numpy as np
import pandas as pd
from pandas import (CategoricalIndex, DataFrame, Index, MultiIndex,
compat, date_range, period_range)
from pandas.compat import PY3, long, lrange, lzip, range, u, PYPY
from pandas.errors import PerformanceWarning, UnsortedIndexError
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.core.indexes.base import InvalidIndexError
from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
from pandas._libs.tslib import Timestamp
import pandas.util.testing as tm
from pandas.util.testing import assert_almost_equal, assert_copy
from .common import Base
class TestMultiIndex(Base):
_holder = MultiIndex
_compat_props = ['shape', 'ndim', 'size']
def setup_method(self, method):
major_axis = Index(['foo', 'bar', 'baz', 'qux'])
minor_axis = Index(['one', 'two'])
major_labels = np.array([0, 0, 1, 2, 3, 3])
minor_labels = np.array([0, 1, 0, 1, 0, 1])
self.index_names = ['first', 'second']
self.indices = dict(index=MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels
], names=self.index_names,
verify_integrity=False))
self.setup_indices()
def create_index(self):
return self.index
def test_can_hold_identifiers(self):
idx = self.create_index()
key = idx[0]
assert idx._can_hold_identifiers_and_holds_name(key) is True
def test_boolean_context_compat2(self):
# boolean context compat
# GH7897
i1 = MultiIndex.from_tuples([('A', 1), ('A', 2)])
i2 = MultiIndex.from_tuples([('A', 1), ('A', 3)])
common = i1.intersection(i2)
def f():
if common:
pass
tm.assert_raises_regex(ValueError, 'The truth value of a', f)
def test_labels_dtypes(self):
# GH 8456
i = MultiIndex.from_tuples([('A', 1), ('A', 2)])
assert i.labels[0].dtype == 'int8'
assert i.labels[1].dtype == 'int8'
i = MultiIndex.from_product([['a'], range(40)])
assert i.labels[1].dtype == 'int8'
i = MultiIndex.from_product([['a'], range(400)])
assert i.labels[1].dtype == 'int16'
i = MultiIndex.from_product([['a'], range(40000)])
assert i.labels[1].dtype == 'int32'
i = pd.MultiIndex.from_product([['a'], range(1000)])
assert (i.labels[0] >= 0).all()
assert (i.labels[1] >= 0).all()
def test_where(self):
i = MultiIndex.from_tuples([('A', 1), ('A', 2)])
def f():
i.where(True)
pytest.raises(NotImplementedError, f)
def test_where_array_like(self):
i = MultiIndex.from_tuples([('A', 1), ('A', 2)])
klasses = [list, tuple, np.array, pd.Series]
cond = [False, True]
for klass in klasses:
def f():
return i.where(klass(cond))
pytest.raises(NotImplementedError, f)
def test_repeat(self):
reps = 2
numbers = [1, 2, 3]
names = np.array(['foo', 'bar'])
m = MultiIndex.from_product([
numbers, names], names=names)
expected = MultiIndex.from_product([
numbers, names.repeat(reps)], names=names)
tm.assert_index_equal(m.repeat(reps), expected)
with tm.assert_produces_warning(FutureWarning):
result = m.repeat(n=reps)
tm.assert_index_equal(result, expected)
def test_numpy_repeat(self):
reps = 2
numbers = [1, 2, 3]
names = np.array(['foo', 'bar'])
m = MultiIndex.from_product([
numbers, names], names=names)
expected = MultiIndex.from_product([
numbers, names.repeat(reps)], names=names)
tm.assert_index_equal(np.repeat(m, reps), expected)
msg = "the 'axis' parameter is not supported"
tm.assert_raises_regex(
ValueError, msg, np.repeat, m, reps, axis=1)
def test_set_name_methods(self):
# so long as these are synonyms, we don't need to test set_names
assert self.index.rename == self.index.set_names
new_names = [name + "SUFFIX" for name in self.index_names]
ind = self.index.set_names(new_names)
assert self.index.names == self.index_names
assert ind.names == new_names
with tm.assert_raises_regex(ValueError, "^Length"):
ind.set_names(new_names + new_names)
new_names2 = [name + "SUFFIX2" for name in new_names]
res = ind.set_names(new_names2, inplace=True)
assert res is None
assert ind.names == new_names2
# set names for specific level (# GH7792)
ind = self.index.set_names(new_names[0], level=0)
assert self.index.names == self.index_names
assert ind.names == [new_names[0], self.index_names[1]]
res = ind.set_names(new_names2[0], level=0, inplace=True)
assert res is None
assert ind.names == [new_names2[0], self.index_names[1]]
# set names for multiple levels
ind = self.index.set_names(new_names, level=[0, 1])
assert self.index.names == self.index_names
assert ind.names == new_names
res = ind.set_names(new_names2, level=[0, 1], inplace=True)
assert res is None
assert ind.names == new_names2
@pytest.mark.parametrize('inplace', [True, False])
def test_set_names_with_nlevel_1(self, inplace):
# GH 21149
# Ensure that .set_names for MultiIndex with
# nlevels == 1 does not raise any errors
expected = pd.MultiIndex(levels=[[0, 1]],
labels=[[0, 1]],
names=['first'])
m = pd.MultiIndex.from_product([[0, 1]])
result = m.set_names('first', level=0, inplace=inplace)
if inplace:
result = m
tm.assert_index_equal(result, expected)
def test_set_levels_labels_directly(self):
# setting levels/labels directly raises AttributeError
levels = self.index.levels
new_levels = [[lev + 'a' for lev in level] for level in levels]
labels = self.index.labels
major_labels, minor_labels = labels
major_labels = [(x + 1) % 3 for x in major_labels]
minor_labels = [(x + 1) % 1 for x in minor_labels]
new_labels = [major_labels, minor_labels]
with pytest.raises(AttributeError):
self.index.levels = new_levels
with pytest.raises(AttributeError):
self.index.labels = new_labels
def test_set_levels(self):
# side note - you probably wouldn't want to use levels and labels
# directly like this - but it is possible.
levels = self.index.levels
new_levels = [[lev + 'a' for lev in level] for level in levels]
def assert_matching(actual, expected, check_dtype=False):
# avoid specifying internal representation
# as much as possible
assert len(actual) == len(expected)
for act, exp in zip(actual, expected):
act = np.asarray(act)
exp = np.asarray(exp)
tm.assert_numpy_array_equal(act, exp, check_dtype=check_dtype)
# level changing [w/o mutation]
ind2 = self.index.set_levels(new_levels)
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
# level changing [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels, inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, new_levels)
# level changing specific level [w/o mutation]
ind2 = self.index.set_levels(new_levels[0], level=0)
assert_matching(ind2.levels, [new_levels[0], levels[1]])
assert_matching(self.index.levels, levels)
ind2 = self.index.set_levels(new_levels[1], level=1)
assert_matching(ind2.levels, [levels[0], new_levels[1]])
assert_matching(self.index.levels, levels)
# level changing multiple levels [w/o mutation]
ind2 = self.index.set_levels(new_levels, level=[0, 1])
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
# level changing specific level [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels[0], level=0, inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, [new_levels[0], levels[1]])
assert_matching(self.index.levels, levels)
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels[1], level=1, inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, [levels[0], new_levels[1]])
assert_matching(self.index.levels, levels)
# level changing multiple levels [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels, level=[0, 1],
inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
# illegal level changing should not change levels
# GH 13754
original_index = self.index.copy()
for inplace in [True, False]:
with tm.assert_raises_regex(ValueError, "^On"):
self.index.set_levels(['c'], level=0, inplace=inplace)
assert_matching(self.index.levels, original_index.levels,
check_dtype=True)
with tm.assert_raises_regex(ValueError, "^On"):
self.index.set_labels([0, 1, 2, 3, 4, 5], level=0,
inplace=inplace)
assert_matching(self.index.labels, original_index.labels,
check_dtype=True)
with tm.assert_raises_regex(TypeError, "^Levels"):
self.index.set_levels('c', level=0, inplace=inplace)
assert_matching(self.index.levels, original_index.levels,
check_dtype=True)
with tm.assert_raises_regex(TypeError, "^Labels"):
self.index.set_labels(1, level=0, inplace=inplace)
assert_matching(self.index.labels, original_index.labels,
check_dtype=True)
def test_set_labels(self):
# side note - you probably wouldn't want to use levels and labels
# directly like this - but it is possible.
labels = self.index.labels
major_labels, minor_labels = labels
major_labels = [(x + 1) % 3 for x in major_labels]
minor_labels = [(x + 1) % 1 for x in minor_labels]
new_labels = [major_labels, minor_labels]
def assert_matching(actual, expected):
# avoid specifying internal representation
# as much as possible
assert len(actual) == len(expected)
for act, exp in zip(actual, expected):
act = np.asarray(act)
exp = np.asarray(exp, dtype=np.int8)
tm.assert_numpy_array_equal(act, exp)
# label changing [w/o mutation]
ind2 = self.index.set_labels(new_labels)
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
# label changing [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels, inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, new_labels)
# label changing specific level [w/o mutation]
ind2 = self.index.set_labels(new_labels[0], level=0)
assert_matching(ind2.labels, [new_labels[0], labels[1]])
assert_matching(self.index.labels, labels)
ind2 = self.index.set_labels(new_labels[1], level=1)
assert_matching(ind2.labels, [labels[0], new_labels[1]])
assert_matching(self.index.labels, labels)
# label changing multiple levels [w/o mutation]
ind2 = self.index.set_labels(new_labels, level=[0, 1])
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
# label changing specific level [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels[0], level=0, inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, [new_labels[0], labels[1]])
assert_matching(self.index.labels, labels)
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels[1], level=1, inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, [labels[0], new_labels[1]])
assert_matching(self.index.labels, labels)
# label changing multiple levels [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels, level=[0, 1],
inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
# label changing for levels of different magnitude of categories
ind = pd.MultiIndex.from_tuples([(0, i) for i in range(130)])
new_labels = range(129, -1, -1)
expected = pd.MultiIndex.from_tuples(
[(0, i) for i in new_labels])
# [w/o mutation]
result = ind.set_labels(labels=new_labels, level=1)
assert result.equals(expected)
# [w/ mutation]
result = ind.copy()
result.set_labels(labels=new_labels, level=1, inplace=True)
assert result.equals(expected)
def test_set_levels_labels_names_bad_input(self):
levels, labels = self.index.levels, self.index.labels
names = self.index.names
with tm.assert_raises_regex(ValueError, 'Length of levels'):
self.index.set_levels([levels[0]])
with tm.assert_raises_regex(ValueError, 'Length of labels'):
self.index.set_labels([labels[0]])
with tm.assert_raises_regex(ValueError, 'Length of names'):
self.index.set_names([names[0]])
# shouldn't scalar data error, instead should demand list-like
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_levels(levels[0])
# shouldn't scalar data error, instead should demand list-like
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_labels(labels[0])
# shouldn't scalar data error, instead should demand list-like
with tm.assert_raises_regex(TypeError, 'list-like'):
self.index.set_names(names[0])
# should have equal lengths
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_levels(levels[0], level=[0, 1])
with tm.assert_raises_regex(TypeError, 'list-like'):
self.index.set_levels(levels, level=0)
# should have equal lengths
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_labels(labels[0], level=[0, 1])
with tm.assert_raises_regex(TypeError, 'list-like'):
self.index.set_labels(labels, level=0)
# should have equal lengths
with tm.assert_raises_regex(ValueError, 'Length of names'):
self.index.set_names(names[0], level=[0, 1])
with tm.assert_raises_regex(TypeError, 'string'):
self.index.set_names(names, level=0)
def test_set_levels_categorical(self):
# GH13854
index = MultiIndex.from_arrays([list("xyzx"), [0, 1, 2, 3]])
for ordered in [False, True]:
cidx = CategoricalIndex(list("bac"), ordered=ordered)
result = index.set_levels(cidx, 0)
expected = MultiIndex(levels=[cidx, [0, 1, 2, 3]],
labels=index.labels)
tm.assert_index_equal(result, expected)
result_lvl = result.get_level_values(0)
expected_lvl = CategoricalIndex(list("bacb"),
categories=cidx.categories,
ordered=cidx.ordered)
tm.assert_index_equal(result_lvl, expected_lvl)
def test_metadata_immutable(self):
levels, labels = self.index.levels, self.index.labels
# shouldn't be able to set at either the top level or base level
mutable_regex = re.compile('does not support mutable operations')
with tm.assert_raises_regex(TypeError, mutable_regex):
levels[0] = levels[0]
with tm.assert_raises_regex(TypeError, mutable_regex):
levels[0][0] = levels[0][0]
# ditto for labels
with tm.assert_raises_regex(TypeError, mutable_regex):
labels[0] = labels[0]
with tm.assert_raises_regex(TypeError, mutable_regex):
labels[0][0] = labels[0][0]
# and for names
names = self.index.names
with tm.assert_raises_regex(TypeError, mutable_regex):
names[0] = names[0]
def test_inplace_mutation_resets_values(self):
levels = [['a', 'b', 'c'], [4]]
levels2 = [[1, 2, 3], ['a']]
labels = [[0, 1, 0, 2, 2, 0], [0, 0, 0, 0, 0, 0]]
mi1 = MultiIndex(levels=levels, labels=labels)
mi2 = MultiIndex(levels=levels2, labels=labels)
vals = mi1.values.copy()
vals2 = mi2.values.copy()
assert mi1._tuples is not None
# Make sure level setting works
new_vals = mi1.set_levels(levels2).values
tm.assert_almost_equal(vals2, new_vals)
# Non-inplace doesn't kill _tuples [implementation detail]
tm.assert_almost_equal(mi1._tuples, vals)
# ...and values is still same too
tm.assert_almost_equal(mi1.values, vals)
# Inplace should kill _tuples
mi1.set_levels(levels2, inplace=True)
tm.assert_almost_equal(mi1.values, vals2)
# Make sure label setting works too
labels2 = [[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0]]
exp_values = np.empty((6,), dtype=object)
exp_values[:] = [(long(1), 'a')] * 6
# Must be 1d array of tuples
assert exp_values.shape == (6,)
new_values = mi2.set_labels(labels2).values
# Not inplace shouldn't change
tm.assert_almost_equal(mi2._tuples, vals2)
# Should have correct values
tm.assert_almost_equal(exp_values, new_values)
# ...and again setting inplace should kill _tuples, etc
mi2.set_labels(labels2, inplace=True)
tm.assert_almost_equal(mi2.values, new_values)
def test_copy_in_constructor(self):
levels = np.array(["a", "b", "c"])
labels = np.array([1, 1, 2, 0, 0, 1, 1])
val = labels[0]
mi = MultiIndex(levels=[levels, levels], labels=[labels, labels],
copy=True)
assert mi.labels[0][0] == val
labels[0] = 15
assert mi.labels[0][0] == val
val = levels[0]
levels[0] = "PANDA"
assert mi.levels[0][0] == val
def test_set_value_keeps_names(self):
# motivating example from #3742
lev1 = ['hans', 'hans', 'hans', 'grethe', 'grethe', 'grethe']
lev2 = ['1', '2', '3'] * 2
idx = pd.MultiIndex.from_arrays([lev1, lev2], names=['Name', 'Number'])
df = pd.DataFrame(
np.random.randn(6, 4),
columns=['one', 'two', 'three', 'four'],
index=idx)
df = df.sort_index()
assert df._is_copy is None
assert df.index.names == ('Name', 'Number')
df.at[('grethe', '4'), 'one'] = 99.34
assert df._is_copy is None
assert df.index.names == ('Name', 'Number')
def test_copy_names(self):
# Check that adding a "names" parameter to the copy is honored
# GH14302
multi_idx = pd.Index([(1, 2), (3, 4)], names=['MyName1', 'MyName2'])
multi_idx1 = multi_idx.copy()
assert multi_idx.equals(multi_idx1)
assert multi_idx.names == ['MyName1', 'MyName2']
assert multi_idx1.names == ['MyName1', 'MyName2']
multi_idx2 = multi_idx.copy(names=['NewName1', 'NewName2'])
assert multi_idx.equals(multi_idx2)
assert multi_idx.names == ['MyName1', 'MyName2']
assert multi_idx2.names == ['NewName1', 'NewName2']
multi_idx3 = multi_idx.copy(name=['NewName1', 'NewName2'])
assert multi_idx.equals(multi_idx3)
assert multi_idx.names == ['MyName1', 'MyName2']
assert multi_idx3.names == ['NewName1', 'NewName2']
def test_names(self):
# names are assigned in setup
names = self.index_names
level_names = [level.name for level in self.index.levels]
assert names == level_names
# setting bad names on existing
index = self.index
tm.assert_raises_regex(ValueError, "^Length of names",
setattr, index, "names",
list(index.names) + ["third"])
tm.assert_raises_regex(ValueError, "^Length of names",
setattr, index, "names", [])
# initializing with bad names (should always be equivalent)
major_axis, minor_axis = self.index.levels
major_labels, minor_labels = self.index.labels
tm.assert_raises_regex(ValueError, "^Length of names", MultiIndex,
levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels],
names=['first'])
tm.assert_raises_regex(ValueError, "^Length of names", MultiIndex,
levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels],
names=['first', 'second', 'third'])
# names are assigned
index.names = ["a", "b"]
ind_names = list(index.names)
level_names = [level.name for level in index.levels]
assert ind_names == level_names
def test_astype(self):
expected = self.index.copy()
actual = self.index.astype('O')
assert_copy(actual.levels, expected.levels)
assert_copy(actual.labels, expected.labels)
self.check_level_names(actual, expected.names)
with tm.assert_raises_regex(TypeError, "^Setting.*dtype.*object"):
self.index.astype(np.dtype(int))
@pytest.mark.parametrize('ordered', [True, False])
def test_astype_category(self, ordered):
# GH 18630
msg = '> 1 ndim Categorical are not supported at this time'
with tm.assert_raises_regex(NotImplementedError, msg):
self.index.astype(CategoricalDtype(ordered=ordered))
if ordered is False:
# dtype='category' defaults to ordered=False, so only test once
with tm.assert_raises_regex(NotImplementedError, msg):
self.index.astype('category')
def test_constructor_single_level(self):
result = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux']],
labels=[[0, 1, 2, 3]], names=['first'])
assert isinstance(result, MultiIndex)
expected = Index(['foo', 'bar', 'baz', 'qux'], name='first')
tm.assert_index_equal(result.levels[0], expected)
assert result.names == ['first']
def test_constructor_no_levels(self):
tm.assert_raises_regex(ValueError, "non-zero number "
"of levels/labels",
MultiIndex, levels=[], labels=[])
both_re = re.compile('Must pass both levels and labels')
with tm.assert_raises_regex(TypeError, both_re):
MultiIndex(levels=[])
with tm.assert_raises_regex(TypeError, both_re):
MultiIndex(labels=[])
def test_constructor_mismatched_label_levels(self):
labels = [np.array([1]), np.array([2]), np.array([3])]
levels = ["a"]
tm.assert_raises_regex(ValueError, "Length of levels and labels "
"must be the same", MultiIndex,
levels=levels, labels=labels)
length_error = re.compile('>= length of level')
label_error = re.compile(r'Unequal label lengths: \[4, 2\]')
# important to check that it's looking at the right thing.
with tm.assert_raises_regex(ValueError, length_error):
MultiIndex(levels=[['a'], ['b']],
labels=[[0, 1, 2, 3], [0, 3, 4, 1]])
with tm.assert_raises_regex(ValueError, label_error):
MultiIndex(levels=[['a'], ['b']], labels=[[0, 0, 0, 0], [0, 0]])
# external API
with tm.assert_raises_regex(ValueError, length_error):
self.index.copy().set_levels([['a'], ['b']])
with tm.assert_raises_regex(ValueError, label_error):
self.index.copy().set_labels([[0, 0, 0, 0], [0, 0]])
def test_constructor_nonhashable_names(self):
# GH 20527
levels = [[1, 2], [u'one', u'two']]
labels = [[0, 0, 1, 1], [0, 1, 0, 1]]
names = ((['foo'], ['bar']))
message = "MultiIndex.name must be a hashable type"
tm.assert_raises_regex(TypeError, message,
MultiIndex, levels=levels,
labels=labels, names=names)
# With .rename()
mi = MultiIndex(levels=[[1, 2], [u'one', u'two']],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=('foo', 'bar'))
renamed = [['foor'], ['barr']]
tm.assert_raises_regex(TypeError, message, mi.rename, names=renamed)
# With .set_names()
tm.assert_raises_regex(TypeError, message, mi.set_names, names=renamed)
@pytest.mark.parametrize('names', [['a', 'b', 'a'], ['1', '1', '2'],
['1', 'a', '1']])
def test_duplicate_level_names(self, names):
# GH18872
pytest.raises(ValueError, pd.MultiIndex.from_product,
[[0, 1]] * 3, names=names)
# With .rename()
mi = pd.MultiIndex.from_product([[0, 1]] * 3)
tm.assert_raises_regex(ValueError, "Duplicated level name:",
mi.rename, names)
# With .rename(., level=)
mi.rename(names[0], level=1, inplace=True)
tm.assert_raises_regex(ValueError, "Duplicated level name:",
mi.rename, names[:2], level=[0, 2])
def assert_multiindex_copied(self, copy, original):
# Levels should be (at least, shallow copied)
tm.assert_copy(copy.levels, original.levels)
tm.assert_almost_equal(copy.labels, original.labels)
# Labels doesn't matter which way copied
tm.assert_almost_equal(copy.labels, original.labels)
assert copy.labels is not original.labels
# Names doesn't matter which way copied
assert copy.names == original.names
assert copy.names is not original.names
# Sort order should be copied
assert copy.sortorder == original.sortorder
def test_copy(self):
i_copy = self.index.copy()
self.assert_multiindex_copied(i_copy, self.index)
def test_shallow_copy(self):
i_copy = self.index._shallow_copy()
self.assert_multiindex_copied(i_copy, self.index)
def test_view(self):
i_view = self.index.view()
self.assert_multiindex_copied(i_view, self.index)
def check_level_names(self, index, names):
assert [level.name for level in index.levels] == list(names)
def test_changing_names(self):
# names should be applied to levels
level_names = [level.name for level in self.index.levels]
self.check_level_names(self.index, self.index.names)
view = self.index.view()
copy = self.index.copy()
shallow_copy = self.index._shallow_copy()
# changing names should change level names on object
new_names = [name + "a" for name in self.index.names]
self.index.names = new_names
self.check_level_names(self.index, new_names)
# but not on copies
self.check_level_names(view, level_names)
self.check_level_names(copy, level_names)
self.check_level_names(shallow_copy, level_names)
# and copies shouldn't change original
shallow_copy.names = [name + "c" for name in shallow_copy.names]
self.check_level_names(self.index, new_names)
def test_get_level_number_integer(self):
self.index.names = [1, 0]
assert self.index._get_level_number(1) == 0
assert self.index._get_level_number(0) == 1
pytest.raises(IndexError, self.index._get_level_number, 2)
tm.assert_raises_regex(KeyError, 'Level fourth not found',
self.index._get_level_number, 'fourth')
def test_from_arrays(self):
arrays = []
for lev, lab in zip(self.index.levels, self.index.labels):
arrays.append(np.asarray(lev).take(lab))
# list of arrays as input
result = MultiIndex.from_arrays(arrays, names=self.index.names)
tm.assert_index_equal(result, self.index)
# infer correctly
result = MultiIndex.from_arrays([[pd.NaT, Timestamp('20130101')],
['a', 'b']])
assert result.levels[0].equals(Index([Timestamp('20130101')]))
assert result.levels[1].equals(Index(['a', 'b']))
def test_from_arrays_iterator(self):
# GH 18434
arrays = []
for lev, lab in zip(self.index.levels, self.index.labels):
arrays.append(np.asarray(lev).take(lab))
# iterator as input
result = MultiIndex.from_arrays(iter(arrays), names=self.index.names)
tm.assert_index_equal(result, self.index)
# invalid iterator input
with tm.assert_raises_regex(
TypeError, "Input must be a list / sequence of array-likes."):
MultiIndex.from_arrays(0)
def test_from_arrays_index_series_datetimetz(self):
idx1 = pd.date_range('2015-01-01 10:00', freq='D', periods=3,
tz='US/Eastern')
idx2 = pd.date_range('2015-01-01 10:00', freq='H', periods=3,
tz='Asia/Tokyo')
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_series_timedelta(self):
idx1 = pd.timedelta_range('1 days', freq='D', periods=3)
idx2 = pd.timedelta_range('2 hours', freq='H', periods=3)
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_series_period(self):
idx1 = pd.period_range('2011-01-01', freq='D', periods=3)
idx2 = pd.period_range('2015-01-01', freq='H', periods=3)
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_datetimelike_mixed(self):
idx1 = pd.date_range('2015-01-01 10:00', freq='D', periods=3,
tz='US/Eastern')
idx2 = pd.date_range('2015-01-01 10:00', freq='H', periods=3)
idx3 = pd.timedelta_range('1 days', freq='D', periods=3)
idx4 = pd.period_range('2011-01-01', freq='D', periods=3)
result = pd.MultiIndex.from_arrays([idx1, idx2, idx3, idx4])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
tm.assert_index_equal(result.get_level_values(2), idx3)
tm.assert_index_equal(result.get_level_values(3), idx4)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1),
pd.Series(idx2),
pd.Series(idx3),
pd.Series(idx4)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result2.get_level_values(2), idx3)
tm.assert_index_equal(result2.get_level_values(3), idx4)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_series_categorical(self):
# GH13743
idx1 = pd.CategoricalIndex(list("abcaab"), categories=list("bac"),
ordered=False)
idx2 = pd.CategoricalIndex(list("abcaab"), categories=list("bac"),
ordered=True)
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
result3 = pd.MultiIndex.from_arrays([idx1.values, idx2.values])
tm.assert_index_equal(result3.get_level_values(0), idx1)
tm.assert_index_equal(result3.get_level_values(1), idx2)
def test_from_arrays_empty(self):
# 0 levels
with tm.assert_raises_regex(
ValueError, "Must pass non-zero number of levels/labels"):
MultiIndex.from_arrays(arrays=[])
# 1 level
result = MultiIndex.from_arrays(arrays=[[]], names=['A'])
assert isinstance(result, MultiIndex)
expected = Index([], name='A')
tm.assert_index_equal(result.levels[0], expected)
# N levels
for N in [2, 3]:
arrays = [[]] * N
names = list('ABC')[:N]
result = MultiIndex.from_arrays(arrays=arrays, names=names)
expected = MultiIndex(levels=[[]] * N, labels=[[]] * N,
names=names)
tm.assert_index_equal(result, expected)
def test_from_arrays_invalid_input(self):
invalid_inputs = [1, [1], [1, 2], [[1], 2],
'a', ['a'], ['a', 'b'], [['a'], 'b']]
for i in invalid_inputs:
pytest.raises(TypeError, MultiIndex.from_arrays, arrays=i)
def test_from_arrays_different_lengths(self):
# see gh-13599
idx1 = [1, 2, 3]
idx2 = ['a', 'b']
tm.assert_raises_regex(ValueError, '^all arrays must '
'be same length$',
MultiIndex.from_arrays, [idx1, idx2])
idx1 = []
idx2 = ['a', 'b']
tm.assert_raises_regex(ValueError, '^all arrays must '
'be same length$',
MultiIndex.from_arrays, [idx1, idx2])
idx1 = [1, 2, 3]
idx2 = []
tm.assert_raises_regex(ValueError, '^all arrays must '
'be same length$',
MultiIndex.from_arrays, [idx1, idx2])
def test_from_product(self):
first = ['foo', 'bar', 'buz']
second = ['a', 'b', 'c']
names = ['first', 'second']
result = MultiIndex.from_product([first, second], names=names)
tuples = [('foo', 'a'), ('foo', 'b'), ('foo', 'c'), ('bar', 'a'),
('bar', 'b'), ('bar', 'c'), ('buz', 'a'), ('buz', 'b'),
('buz', 'c')]
expected = MultiIndex.from_tuples(tuples, names=names)
tm.assert_index_equal(result, expected)
def test_from_product_iterator(self):
# GH 18434
first = ['foo', 'bar', 'buz']
second = ['a', 'b', 'c']
names = ['first', 'second']
tuples = [('foo', 'a'), ('foo', 'b'), ('foo', 'c'), ('bar', 'a'),
('bar', 'b'), ('bar', 'c'), ('buz', 'a'), ('buz', 'b'),
('buz', 'c')]
expected = MultiIndex.from_tuples(tuples, names=names)
# iterator as input
result = MultiIndex.from_product(iter([first, second]), names=names)
tm.assert_index_equal(result, expected)
# Invalid non-iterable input
with tm.assert_raises_regex(
TypeError, "Input must be a list / sequence of iterables."):
MultiIndex.from_product(0)
def test_from_product_empty(self):
# 0 levels
with tm.assert_raises_regex(
ValueError, "Must pass non-zero number of levels/labels"):
MultiIndex.from_product([])
# 1 level
result = MultiIndex.from_product([[]], names=['A'])
expected = pd.Index([], name='A')
tm.assert_index_equal(result.levels[0], expected)
# 2 levels
l1 = [[], ['foo', 'bar', 'baz'], []]
l2 = [[], [], ['a', 'b', 'c']]
names = ['A', 'B']
for first, second in zip(l1, l2):
result = MultiIndex.from_product([first, second], names=names)
expected = MultiIndex(levels=[first, second],
labels=[[], []], names=names)
tm.assert_index_equal(result, expected)
# GH12258
names = ['A', 'B', 'C']
for N in range(4):
lvl2 = lrange(N)
result = MultiIndex.from_product([[], lvl2, []], names=names)
expected = MultiIndex(levels=[[], lvl2, []],
labels=[[], [], []], names=names)
tm.assert_index_equal(result, expected)
def test_from_product_invalid_input(self):
invalid_inputs = [1, [1], [1, 2], [[1], 2],
'a', ['a'], ['a', 'b'], [['a'], 'b']]
for i in invalid_inputs:
pytest.raises(TypeError, MultiIndex.from_product, iterables=i)
def test_from_product_datetimeindex(self):
dt_index = date_range('2000-01-01', periods=2)
mi = pd.MultiIndex.from_product([[1, 2], dt_index])
etalon = construct_1d_object_array_from_listlike([(1, pd.Timestamp(
'2000-01-01')), (1, pd.Timestamp('2000-01-02')), (2, pd.Timestamp(
'2000-01-01')), (2, pd.Timestamp('2000-01-02'))])
tm.assert_numpy_array_equal(mi.values, etalon)
def test_from_product_index_series_categorical(self):
# GH13743
first = ['foo', 'bar']
for ordered in [False, True]:
idx = pd.CategoricalIndex(list("abcaab"), categories=list("bac"),
ordered=ordered)
expected = pd.CategoricalIndex(list("abcaab") + list("abcaab"),
categories=list("bac"),
ordered=ordered)
for arr in [idx, pd.Series(idx), idx.values]:
result = pd.MultiIndex.from_product([first, arr])
tm.assert_index_equal(result.get_level_values(1), expected)
def test_values_boxed(self):
tuples = [(1, pd.Timestamp('2000-01-01')), (2, pd.NaT),
(3, pd.Timestamp('2000-01-03')),
(1, pd.Timestamp('2000-01-04')),
(2, pd.Timestamp('2000-01-02')),
(3, pd.Timestamp('2000-01-03'))]
result = pd.MultiIndex.from_tuples(tuples)
expected = construct_1d_object_array_from_listlike(tuples)
tm.assert_numpy_array_equal(result.values, expected)
# Check that code branches for boxed values produce identical results
tm.assert_numpy_array_equal(result.values[:4], result[:4].values)
def test_values_multiindex_datetimeindex(self):
# Test to ensure we hit the boxing / nobox part of MI.values
ints = np.arange(10 ** 18, 10 ** 18 + 5)
naive = pd.DatetimeIndex(ints)
aware = pd.DatetimeIndex(ints, tz='US/Central')
idx = pd.MultiIndex.from_arrays([naive, aware])
result = idx.values
outer = pd.DatetimeIndex([x[0] for x in result])
tm.assert_index_equal(outer, naive)
inner = pd.DatetimeIndex([x[1] for x in result])
tm.assert_index_equal(inner, aware)
# n_lev > n_lab
result = idx[:2].values
outer = pd.DatetimeIndex([x[0] for x in result])
tm.assert_index_equal(outer, naive[:2])
inner = pd.DatetimeIndex([x[1] for x in result])
tm.assert_index_equal(inner, aware[:2])
def test_values_multiindex_periodindex(self):
# Test to ensure we hit the boxing / nobox part of MI.values
ints = np.arange(2007, 2012)
pidx = pd.PeriodIndex(ints, freq='D')
idx = pd.MultiIndex.from_arrays([ints, pidx])
result = idx.values
outer = pd.Int64Index([x[0] for x in result])
tm.assert_index_equal(outer, pd.Int64Index(ints))
inner = pd.PeriodIndex([x[1] for x in result])
tm.assert_index_equal(inner, pidx)
# n_lev > n_lab
result = idx[:2].values
outer = pd.Int64Index([x[0] for x in result])
tm.assert_index_equal(outer, pd.Int64Index(ints[:2]))
inner = pd.PeriodIndex([x[1] for x in result])
tm.assert_index_equal(inner, pidx[:2])
def test_append(self):
result = self.index[:3].append(self.index[3:])
assert result.equals(self.index)
foos = [self.index[:1], self.index[1:3], self.index[3:]]
result = foos[0].append(foos[1:])
assert result.equals(self.index)
# empty
result = self.index.append([])
assert result.equals(self.index)
def test_append_mixed_dtypes(self):
# GH 13660
dti = date_range('2011-01-01', freq='M', periods=3, )
dti_tz = date_range('2011-01-01', freq='M', periods=3, tz='US/Eastern')
pi = period_range('2011-01', freq='M', periods=3)
mi = MultiIndex.from_arrays([[1, 2, 3],
[1.1, np.nan, 3.3],
['a', 'b', 'c'],
dti, dti_tz, pi])
assert mi.nlevels == 6
res = mi.append(mi)
exp = MultiIndex.from_arrays([[1, 2, 3, 1, 2, 3],
[1.1, np.nan, 3.3, 1.1, np.nan, 3.3],
['a', 'b', 'c', 'a', 'b', 'c'],
dti.append(dti),
dti_tz.append(dti_tz),
pi.append(pi)])
tm.assert_index_equal(res, exp)
other = MultiIndex.from_arrays([['x', 'y', 'z'], ['x', 'y', 'z'],
['x', 'y', 'z'], ['x', 'y', 'z'],
['x', 'y', 'z'], ['x', 'y', 'z']])
res = mi.append(other)
exp = MultiIndex.from_arrays([[1, 2, 3, 'x', 'y', 'z'],
[1.1, np.nan, 3.3, 'x', 'y', 'z'],
['a', 'b', 'c', 'x', 'y', 'z'],
dti.append(pd.Index(['x', 'y', 'z'])),
dti_tz.append(pd.Index(['x', 'y', 'z'])),
pi.append(pd.Index(['x', 'y', 'z']))])
tm.assert_index_equal(res, exp)
def test_get_level_values(self):
result = self.index.get_level_values(0)
expected = Index(['foo', 'foo', 'bar', 'baz', 'qux', 'qux'],
name='first')
tm.assert_index_equal(result, expected)
assert result.name == 'first'
result = self.index.get_level_values('first')
expected = self.index.get_level_values(0)
tm.assert_index_equal(result, expected)
# GH 10460
index = MultiIndex(
levels=[CategoricalIndex(['A', 'B']),
CategoricalIndex([1, 2, 3])],
labels=[np.array([0, 0, 0, 1, 1, 1]),
np.array([0, 1, 2, 0, 1, 2])])
exp = CategoricalIndex(['A', 'A', 'A', 'B', 'B', 'B'])
tm.assert_index_equal(index.get_level_values(0), exp)
exp = CategoricalIndex([1, 2, 3, 1, 2, 3])
tm.assert_index_equal(index.get_level_values(1), exp)
def test_get_level_values_int_with_na(self):
# GH 17924
arrays = [['a', 'b', 'b'], [1, np.nan, 2]]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(1)
expected = Index([1, np.nan, 2])
|
tm.assert_index_equal(result, expected)
|
pandas.util.testing.assert_index_equal
|
import h5py
import os
from torch.utils.data import TensorDataset, DataLoader, Dataset, Sampler
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.utils.data import TensorDataset, DataLoader, random_split
import numpy as np
import pandas as pd
from scipy.stats import pearsonr
import argparse
import time
class Net(nn.Module):
def __init__(self, feature_num):
super(Net, self).__init__()
self.layer_1 = nn.Linear(feature_num, 500)
self.layer_2 = nn.Linear(500, 20)
def forward(self, x):
x = F.relu(self.layer_1(x))
x = self.layer_2(x)
return x
def process_data_to_same_gene(gene, root_dir, output_dir, mode):
dataset_list = os.listdir(root_dir)
for l in range(len(dataset_list)):
dataset = dataset_list[l]
if '.txt' in dataset:
all_data = pd.read_csv(root_dir + dataset, sep='\t')
elif '.csv' in dataset:
all_data = pd.read_csv(root_dir + dataset)
elif '.h5' in dataset and '_processed' not in dataset:
all_data = pd.read_hdf(root_dir + dataset)
else:
continue
add_gene = []
all_data_columns = []
for all_data_gene in all_data.columns:
all_data_columns.append(all_data_gene.lower())
all_data.columns = all_data_columns
for gene_ in gene:
if gene_ not in all_data_columns:
add_gene.append(gene_)
df = pd.DataFrame(
np.zeros((all_data.shape[0], len(add_gene))), index=all_data.index, columns=add_gene)
df =
|
pd.concat([all_data, df], axis=1)
|
pandas.concat
|
"""
Coding: UTF-8
Author: Randal
Time: 2021/2/20
E-mail: <EMAIL>
Description: This is a simple toolkit for data extraction of text.
The most important function in the script is about word frequency statistics.
Using re, I generalized the process in words counting, regardless of any preset
word segmentation. Besides, many interesting functions, like getting top sentences are built here.
All rights reserved.
"""
import xlwings as xw
import pandas as pd
import numpy as np
import os
import re
from alive_progress import alive_bar
from alive_progress import show_bars, show_spinners
import jieba
import datetime
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
import math
class jieba_vectorizer(CountVectorizer):
def __init__(self, tf, userdict, stopwords, orient=False):
"""
:param tf: 输入的样本框,{axis: 1, 0: id, 1: 标题, 2: 正文, 3: 来源, 4: freq}
:param stopwords: 停用词表的路径
:param user_dict_link: 关键词清单的路径
:param orient: {True: 返回的 DTM 只包括关键词清单中的词,False: 返回 DTM 中包含全部词语}
:return: 可以直接使用的词向量样本
"""
self.userdict = userdict
self.orient = orient
self.stopwords = stopwords
jieba.load_userdict(self.userdict) # 载入关键词词典
tf = tf.copy() # 防止对函数之外的原样本框造成改动
print('切词中,请稍候……')
rule = re.compile(u'[^\u4e00-\u9fa5]') # 清洗所有样本,只保留汉字
for i in range(0, tf.shape[0]):
try:
tf.iloc[i, 2] = rule.sub('', tf.iloc[i, 2])
except TypeError:
print('样本清洗Error: doc_id = ' + str(i))
continue
if self.stopwords is not None:
stopwords = txt_to_list(self.stopwords) # 载入停用词表
else:
stopwords = []
# 开始切词
words = []
items = range(0, len(tf))
with alive_bar(len(items), force_tty=True, bar='circles') as bar:
for i, row in tf.iterrows():
item = row['正文']
result = jieba.cut(item)
# 同时过滤停用词
word = ''
for element in result:
if element not in stopwords:
if element != '\t':
word += element
word += " "
words.append(word)
bar()
# CountVectorizer() 可以自动完成词频统计,通过fit_transform生成文本向量和词袋库
# 如果需要换成 tfidfVectorizer, 把下面三行修改一下就可以了
vect = CountVectorizer()
X = vect.fit_transform(words)
self.vectorizer = vect
matrix = X
X = X.toarray()
# 二维ndarray可以展示在pycharm里,但是和DataFrame性质完全不同
# ndarray 没有 index 和 column
features = vect.get_feature_names()
XX = pd.DataFrame(X, index=tf['id'], columns=features)
self.DTM0 = matrix
self.DTM = XX
self.features = features
# # 下面是之前走的弯路,不足一哂
# words_bag = vect.vocabulary_
# # 字典的转置(注意只适用于vk一一对应的情况,1v多k请参考setdefault)
# bag_words = dict((v, k) for k, v in words_bag.items())
#
# # 字典元素的排列顺序不等于字典元素值的排列顺序
# lst = []
# for i in range(0, len(XX.columns)):
# lst.append(bag_words[i])
# XX.columns = lst
if orient:
dict_filter = txt_to_list(self.userdict)
for word in features:
if word not in dict_filter:
XX.drop([word], axis=1, inplace=True)
self.DTM_key = XX
def get_feature_names(self):
return self.features
def strip_non_keywords(self, df):
ff = df.copy()
dict_filter = txt_to_list(self.userdict)
for word in self.features:
if word not in dict_filter:
ff.drop([word], axis=1, inplace=True)
return ff
def make_doc_freq(word, doc):
"""
:param word: 指的是要对其进行词频统计的关键词
:param doc: 指的是要遍历的文本
:return: lst: 返回字典,记录关键词在文本当中出现的频次以及上下文
"""
# 使用正则表达式进行匹配, 拼接成pattern
# re.S表示会自动换行
# finditer是findall的迭代器版本,通过遍历可以依次打印出子串所在的位置
it = re.finditer(word, doc, re.S)
# match.group()可以返回子串,match.span()可以返回索引
lst = []
for match in it:
lst.append(match.span())
freq = dict()
freq['Frequency'] = len(lst)
# 将上下文结果也整理为一个字典
context = dict()
for i in range(0, len(lst)):
# 将span的范围前后各扩展不多于10个字符,得到上下文
try:
# 为了划出适宜的前后文范围,需要设定索引的最大值和最小值
# 因此要比较span+10和doc极大值,span-10和doc极小值
# 最大值在两者间取小,最小值在两者间取大
MAX = min(lst[i][1] + 10, len(doc))
MIN = max(0, lst[i][0] - 10)
# 取得上下文
context[str(i)] = doc[MIN: MAX]
except IndexError:
print('IndexError: ' + word)
freq['Context'] = context
return freq
def make_info_freq(name, pattern, doc):
"""
:param name: 指的是对其进行词频统计的形式
:param pattern: 指的是对其进行词频统计的正则表达式
:param doc: 指的是要遍历的文本
:return: lst: 返回字典,记录关键词在文本当中出现的频次以及上下文
注:该函数返回字典中的context元素为元组:(关键词,上下文)
"""
# 使用正则表达式进行匹配, 拼接成pattern
# re.S表示会自动换行
# finditer是findall的迭代器版本,通过遍历可以依次打印出子串所在的位置
it = re.finditer(pattern[0], doc, re.S)
# match.group()可以返回子串,match.span()可以返回索引
cls = pattern[1]
lst = []
for match in it:
lst.append(match.span())
freq = dict()
freq['Frequency'] = len(lst)
freq['Name'] = name
# 将上下文结果也整理为一个字典
context = dict()
for i in range(0, len(lst)):
# 将span的范围前后各扩展不多于10个字符,得到上下文
try:
# 为了划出适宜的前后文范围,需要设定索引的最大值和最小值
# 因此要比较span+10和doc极大值,span-10和doc极小值
# 最大值在两者间取小,最小值在两者间取大
MAX = min(lst[i][1] + 10, len(doc))
MIN = max(0, lst[i][0] - 10)
# 取得匹配到的关键词,并做掐头去尾处理
word = match_cut(doc[lst[i][0]: lst[i][1]], cls)
# 将关键词和上下文打包,存储到 context 条目中
context[str(i)] = (word, doc[MIN: MAX])
except IndexError:
print('IndexError: ' + name)
freq['Context'] = context
return freq
def make_docs_freq(word, docs):
"""
:param word: 指的是要对其进行词频统计的关键词
:param docs: 是要遍历的文本的集合,必须是pandas DataFrame的形式,至少包含id列 (iloc: 0),正文列 (iloc: 2) 和预留出的频次列 (iloc: 4)
:return: 返回字典,其中包括“单关键词-单文本”的词频字典集合,以及计数结果汇总
"""
freq = dict()
# 因为总频数是通过"+="的方式计算,不是简单赋值,所以要预设为0
freq['Total Frequency'] = 0
docs = docs.copy() # 防止对函数之外的原样本框造成改动
for i in range(0, len(docs)):
# 对于每个文档,都形成一个字典,字典包括关键词在该文档出现的频数和上下文
# id需要在第0列,正文需要在第2列
freq['Doc' + str(docs.iloc[i, 0])] = make_doc_freq(word, docs.iloc[i, 2])
# 在给每个文档形成字典的同时,对于总概率进行滚动加总
freq['Total Frequency'] += freq['Doc' + str(docs.iloc[i, 0])]['Frequency']
docs.iloc[i, 4] = freq['Doc' + str(docs.iloc[i, 0])]['Frequency']
# 接下来建立一个DFC(doc-freq-context)统计面板,汇总所有文档对应的词频数和上下文
# 首先构建(id, freq)的字典映射
xs = docs['id']
ys = docs['freq']
# zip(迭代器)是一个很好用的方法,建议多用
id_freq = {x: y for x, y in zip(xs, ys)}
# 新建一个空壳DataFrame,接下来把数据一条一条粘贴进去
data = pd.DataFrame(columns=['id', 'freq', 'word', 'num', 'context'])
for item in xs:
doc = freq['Doc' + str(item)]
num = doc['Frequency']
context = doc['Context']
for i in range(0, num):
strip = {'id': item, 'freq': id_freq[item], 'word': word, 'num': i, 'context': context[str(i)]}
# 默认orient参数等于columns
# 如果字典的值是标量,那就必须传递一个index,这是规定
strip = pd.DataFrame(strip, index=[None])
# df的append方法只能通过重新赋值来进行修改
data = data.append(strip)
data.set_index(['id', 'freq', 'word'], drop=True, inplace=True)
freq['DFC'] = data
return freq
def make_infos_freq(name, pattern, docs):
"""
:param name: 指的是对其进行词频统计的形式
:param pattern: 指的是对其进行词频统计的(正则表达式, 裁剪方法)
:param docs: 是要遍历的文本的集合,必须是pandas DataFrame的形式,至少包含id列(iloc: 0)和正文列(iloc: 2)
:return: 返回字典,其中包括“单关键词-单文本”的词频字典集合,以及计数结果汇总
"""
freq = dict()
# 因为总频数是通过"+="的方式计算,不是简单赋值,所以要预设为0
freq['Total Frequency'] = 0
docs = docs.copy() # 防止对函数之外的原样本框造成改动
items = range(0, len(docs))
with alive_bar(len(items), force_tty=True, bar='circles') as bar:
for i in items:
# 对于每个文档,都形成一个字典,字典包括关键词在该文档出现的频数和上下文
# id需要在第0列,正文需要在第2列
# pattern 要全须全尾地传递进去,因为make_info_freq两个参数都要用
freq['Doc' + str(docs.iloc[i, 0])] = make_info_freq(name, pattern, docs.iloc[i, 2])
# 在给每个文档形成字典的同时,对于总概率进行滚动加总
freq['Total Frequency'] += freq['Doc' + str(docs.iloc[i, 0])]['Frequency']
docs.iloc[i, 4] = freq['Doc' + str(docs.iloc[i, 0])]['Frequency']
bar()
# 接下来建立一个DFC(doc-freq-context)统计面板,汇总所有文档对应的词频数和上下文
# 首先构建(id, freq)的字典映射
xs = docs['id']
ys = docs['freq']
# zip(迭代器)是一个很好用的方法,建议多用
id_freq = {x: y for x, y in zip(xs, ys)}
# 新建一个空壳DataFrame,接下来把数据一条一条粘贴进去
data = pd.DataFrame(columns=['id', 'freq', 'form', 'word', 'num', 'context'])
for item in xs:
doc = freq['Doc' + str(item)]
num = doc['Frequency']
# 从(关键词,上下文)中取出两个元素
context = doc['Context']
for i in range(0, num):
# context 中的关键词已经 match_cut 完毕,不需要重复处理
strip = {'id': item, 'form': name, 'freq': id_freq[item], 'word': context[str(i)][0],
'num': i, 'context': context[str(i)][1]}
# 默认orient参数等于columns
# 如果字典的值是标量,那就必须传递一个index,这是规定
strip = pd.DataFrame(strip, index=[None])
# df的append方法只能通过重新赋值来进行修改
data = data.append(strip)
data.set_index(['id', 'freq', 'form', 'word'], drop=True, inplace=True)
freq['DFC'] = data
print(name + ' Completed')
return freq
def words_docs_freq(words, docs):
"""
:param words: 表示要对其做词频统计的关键词清单
:param docs: 是要遍历的文本的集合,必须是pandas DataFrame的形式,至少包含id列、正文列、和频率列
:return: 返回字典,其中包括“单关键词-多文本”的词频字典集合,以及最终的DFC(doc-frequency-context)和DTM(doc-term matrix)
"""
freqs = dict()
# 与此同时新建一个空壳DataFrame,用于汇总DFC
data = pd.DataFrame()
# 新建一个空壳,用于汇总DTM(Doc-Term-Matrix)
dtm =
|
pd.DataFrame(None, columns=words, index=docs['id'])
|
pandas.DataFrame
|
import pandas as pd
from datetime import timedelta
import warnings
import plotly.graph_objs as go
warnings.filterwarnings("ignore")
def weekly_percentage(path_csv):
"""Return the weekly average of a specific region
Params:
path_csv (str): CSV path
Returns:
(dict): containing data informations of image
"""
history_daily = pd.read_csv(f'data/{path_csv}.csv')
BAIRROS_FOR_STUDY = ['Haight-Ashbury', 'San Francisco', 'The Castro',
'Others', 'Union Square', 'Chinatown',
'Alamo Square', 'Mission District',
'SoMa', 'Fisherman’s wharf']
history_daily = history_daily.loc[history_daily['bairro']
.isin(BAIRROS_FOR_STUDY)]
# Data Preprocessing
history_daily = history_daily.loc[history_daily['bairro']
.isin(BAIRROS_FOR_STUDY)]
history_daily['dia'] =
|
pd.to_datetime(history_daily['dia'])
|
pandas.to_datetime
|
from datetime import datetime
import numpy as np
import pytest
from pandas.core.dtypes.cast import find_common_type, is_dtype_equal
import pandas as pd
from pandas import DataFrame, Index, MultiIndex, Series
import pandas._testing as tm
class TestDataFrameCombineFirst:
def test_combine_first_mixed(self):
a = Series(["a", "b"], index=range(2))
b = Series(range(2), index=range(2))
f = DataFrame({"A": a, "B": b})
a = Series(["a", "b"], index=range(5, 7))
b = Series(range(2), index=range(5, 7))
g = DataFrame({"A": a, "B": b})
exp = DataFrame({"A": list("abab"), "B": [0, 1, 0, 1]}, index=[0, 1, 5, 6])
combined = f.combine_first(g)
tm.assert_frame_equal(combined, exp)
def test_combine_first(self, float_frame):
# disjoint
head, tail = float_frame[:5], float_frame[5:]
combined = head.combine_first(tail)
reordered_frame = float_frame.reindex(combined.index)
tm.assert_frame_equal(combined, reordered_frame)
assert tm.equalContents(combined.columns, float_frame.columns)
tm.assert_series_equal(combined["A"], reordered_frame["A"])
# same index
fcopy = float_frame.copy()
fcopy["A"] = 1
del fcopy["C"]
fcopy2 = float_frame.copy()
fcopy2["B"] = 0
del fcopy2["D"]
combined = fcopy.combine_first(fcopy2)
assert (combined["A"] == 1).all()
tm.assert_series_equal(combined["B"], fcopy["B"])
tm.assert_series_equal(combined["C"], fcopy2["C"])
tm.assert_series_equal(combined["D"], fcopy["D"])
# overlap
head, tail = reordered_frame[:10].copy(), reordered_frame
head["A"] = 1
combined = head.combine_first(tail)
assert (combined["A"][:10] == 1).all()
# reverse overlap
tail["A"][:10] = 0
combined = tail.combine_first(head)
assert (combined["A"][:10] == 0).all()
# no overlap
f = float_frame[:10]
g = float_frame[10:]
combined = f.combine_first(g)
tm.assert_series_equal(combined["A"].reindex(f.index), f["A"])
tm.assert_series_equal(combined["A"].reindex(g.index), g["A"])
# corner cases
comb = float_frame.combine_first(DataFrame())
tm.assert_frame_equal(comb, float_frame)
comb = DataFrame().combine_first(float_frame)
tm.assert_frame_equal(comb, float_frame)
comb = float_frame.combine_first(DataFrame(index=["faz", "boo"]))
assert "faz" in comb.index
# #2525
df = DataFrame({"a": [1]}, index=[datetime(2012, 1, 1)])
df2 = DataFrame(columns=["b"])
result = df.combine_first(df2)
assert "b" in result
def test_combine_first_mixed_bug(self):
idx = Index(["a", "b", "c", "e"])
ser1 = Series([5.0, -9.0, 4.0, 100.0], index=idx)
ser2 = Series(["a", "b", "c", "e"], index=idx)
ser3 = Series([12, 4, 5, 97], index=idx)
frame1 = DataFrame({"col0": ser1, "col2": ser2, "col3": ser3})
idx = Index(["a", "b", "c", "f"])
ser1 = Series([5.0, -9.0, 4.0, 100.0], index=idx)
ser2 = Series(["a", "b", "c", "f"], index=idx)
ser3 = Series([12, 4, 5, 97], index=idx)
frame2 = DataFrame({"col1": ser1, "col2": ser2, "col5": ser3})
combined = frame1.combine_first(frame2)
assert len(combined.columns) == 5
def test_combine_first_same_as_in_update(self):
# gh 3016 (same as in update)
df = DataFrame(
[[1.0, 2.0, False, True], [4.0, 5.0, True, False]],
columns=["A", "B", "bool1", "bool2"],
)
other = DataFrame([[45, 45]], index=[0], columns=["A", "B"])
result = df.combine_first(other)
tm.assert_frame_equal(result, df)
df.loc[0, "A"] = np.nan
result = df.combine_first(other)
df.loc[0, "A"] = 45
tm.assert_frame_equal(result, df)
def test_combine_first_doc_example(self):
# doc example
df1 = DataFrame(
{"A": [1.0, np.nan, 3.0, 5.0, np.nan], "B": [np.nan, 2.0, 3.0, np.nan, 6.0]}
)
df2 = DataFrame(
{
"A": [5.0, 2.0, 4.0, np.nan, 3.0, 7.0],
"B": [np.nan, np.nan, 3.0, 4.0, 6.0, 8.0],
}
)
result = df1.combine_first(df2)
expected = DataFrame({"A": [1, 2, 3, 5, 3, 7.0], "B": [np.nan, 2, 3, 4, 6, 8]})
tm.assert_frame_equal(result, expected)
def test_combine_first_return_obj_type_with_bools(self):
# GH3552
df1 = DataFrame(
[[np.nan, 3.0, True], [-4.6, np.nan, True], [np.nan, 7.0, False]]
)
df2 = DataFrame([[-42.6, np.nan, True], [-5.0, 1.6, False]], index=[1, 2])
expected = Series([True, True, False], name=2, dtype=bool)
result_12 = df1.combine_first(df2)[2]
tm.assert_series_equal(result_12, expected)
result_21 = df2.combine_first(df1)[2]
tm.assert_series_equal(result_21, expected)
@pytest.mark.parametrize(
"data1, data2, data_expected",
(
(
[datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)],
[pd.NaT, pd.NaT, pd.NaT],
[datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)],
),
(
[pd.NaT, pd.NaT, pd.NaT],
[datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)],
[datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)],
),
(
[datetime(2000, 1, 2), pd.NaT, pd.NaT],
[datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)],
[datetime(2000, 1, 2), datetime(2000, 1, 2), datetime(2000, 1, 3)],
),
(
[datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)],
[datetime(2000, 1, 2), pd.NaT, pd.NaT],
[datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)],
),
),
)
def test_combine_first_convert_datatime_correctly(
self, data1, data2, data_expected
):
# GH 3593
df1, df2 = DataFrame({"a": data1}), DataFrame({"a": data2})
result = df1.combine_first(df2)
expected = DataFrame({"a": data_expected})
tm.assert_frame_equal(result, expected)
def test_combine_first_align_nan(self):
# GH 7509 (not fixed)
dfa = DataFrame([[pd.Timestamp("2011-01-01"), 2]], columns=["a", "b"])
dfb = DataFrame([[4], [5]], columns=["b"])
assert dfa["a"].dtype == "datetime64[ns]"
assert dfa["b"].dtype == "int64"
res = dfa.combine_first(dfb)
exp = DataFrame(
{"a": [pd.Timestamp("2011-01-01"), pd.NaT], "b": [2, 5]},
columns=["a", "b"],
)
tm.assert_frame_equal(res, exp)
assert res["a"].dtype == "datetime64[ns]"
# ToDo: this must be int64
assert res["b"].dtype == "int64"
res = dfa.iloc[:0].combine_first(dfb)
exp = DataFrame({"a": [np.nan, np.nan], "b": [4, 5]}, columns=["a", "b"])
tm.assert_frame_equal(res, exp)
# ToDo: this must be datetime64
assert res["a"].dtype == "float64"
# ToDo: this must be int64
assert res["b"].dtype == "int64"
def test_combine_first_timezone(self):
# see gh-7630
data1 = pd.to_datetime("20100101 01:01").tz_localize("UTC")
df1 = DataFrame(
columns=["UTCdatetime", "abc"],
data=data1,
index=pd.date_range("20140627", periods=1),
)
data2 = pd.to_datetime("20121212 12:12").tz_localize("UTC")
df2 = DataFrame(
columns=["UTCdatetime", "xyz"],
data=data2,
index=pd.date_range("20140628", periods=1),
)
res = df2[["UTCdatetime"]].combine_first(df1)
exp = DataFrame(
{
"UTCdatetime": [
pd.Timestamp("2010-01-01 01:01", tz="UTC"),
pd.Timestamp("2012-12-12 12:12", tz="UTC"),
],
"abc": [pd.Timestamp("2010-01-01 01:01:00", tz="UTC"), pd.NaT],
},
columns=["UTCdatetime", "abc"],
index=pd.date_range("20140627", periods=2, freq="D"),
)
assert res["UTCdatetime"].dtype == "datetime64[ns, UTC]"
assert res["abc"].dtype == "datetime64[ns, UTC]"
tm.assert_frame_equal(res, exp)
# see gh-10567
dts1 = pd.date_range("2015-01-01", "2015-01-05", tz="UTC")
df1 = DataFrame({"DATE": dts1})
dts2 = pd.date_range("2015-01-03", "2015-01-05", tz="UTC")
df2 = DataFrame({"DATE": dts2})
res = df1.combine_first(df2)
tm.assert_frame_equal(res, df1)
assert res["DATE"].dtype == "datetime64[ns, UTC]"
dts1 = pd.DatetimeIndex(
["2011-01-01", "NaT", "2011-01-03", "2011-01-04"], tz="US/Eastern"
)
df1 = DataFrame({"DATE": dts1}, index=[1, 3, 5, 7])
dts2 = pd.DatetimeIndex(
["2012-01-01", "2012-01-02", "2012-01-03"], tz="US/Eastern"
)
df2 = DataFrame({"DATE": dts2}, index=[2, 4, 5])
res = df1.combine_first(df2)
exp_dts = pd.DatetimeIndex(
[
"2011-01-01",
"2012-01-01",
"NaT",
"2012-01-02",
"2011-01-03",
"2011-01-04",
],
tz="US/Eastern",
)
exp = DataFrame({"DATE": exp_dts}, index=[1, 2, 3, 4, 5, 7])
tm.assert_frame_equal(res, exp)
# different tz
dts1 = pd.date_range("2015-01-01", "2015-01-05", tz="US/Eastern")
df1 = DataFrame({"DATE": dts1})
dts2 = pd.date_range("2015-01-03", "2015-01-05")
df2 = DataFrame({"DATE": dts2})
# if df1 doesn't have NaN, keep its dtype
res = df1.combine_first(df2)
tm.assert_frame_equal(res, df1)
assert res["DATE"].dtype == "datetime64[ns, US/Eastern]"
dts1 = pd.date_range("2015-01-01", "2015-01-02", tz="US/Eastern")
df1 = DataFrame({"DATE": dts1})
dts2 = pd.date_range("2015-01-01", "2015-01-03")
df2 = DataFrame({"DATE": dts2})
res = df1.combine_first(df2)
exp_dts = [
pd.Timestamp("2015-01-01", tz="US/Eastern"),
pd.Timestamp("2015-01-02", tz="US/Eastern"),
pd.Timestamp("2015-01-03"),
]
exp = DataFrame({"DATE": exp_dts})
tm.assert_frame_equal(res, exp)
assert res["DATE"].dtype == "object"
def test_combine_first_timedelta(self):
data1 = pd.TimedeltaIndex(["1 day", "NaT", "3 day", "4day"])
df1 = DataFrame({"TD": data1}, index=[1, 3, 5, 7])
data2 = pd.TimedeltaIndex(["10 day", "11 day", "12 day"])
df2 = DataFrame({"TD": data2}, index=[2, 4, 5])
res = df1.combine_first(df2)
exp_dts = pd.TimedeltaIndex(
["1 day", "10 day", "NaT", "11 day", "3 day", "4 day"]
)
exp = DataFrame({"TD": exp_dts}, index=[1, 2, 3, 4, 5, 7])
tm.assert_frame_equal(res, exp)
assert res["TD"].dtype == "timedelta64[ns]"
def test_combine_first_period(self):
data1 = pd.PeriodIndex(["2011-01", "NaT", "2011-03", "2011-04"], freq="M")
df1 = DataFrame({"P": data1}, index=[1, 3, 5, 7])
data2 = pd.PeriodIndex(["2012-01-01", "2012-02", "2012-03"], freq="M")
df2 = DataFrame({"P": data2}, index=[2, 4, 5])
res = df1.combine_first(df2)
exp_dts = pd.PeriodIndex(
["2011-01", "2012-01", "NaT", "2012-02", "2011-03", "2011-04"], freq="M"
)
exp = DataFrame({"P": exp_dts}, index=[1, 2, 3, 4, 5, 7])
tm.assert_frame_equal(res, exp)
assert res["P"].dtype == data1.dtype
# different freq
dts2 = pd.PeriodIndex(["2012-01-01", "2012-01-02", "2012-01-03"], freq="D")
df2 = DataFrame({"P": dts2}, index=[2, 4, 5])
res = df1.combine_first(df2)
exp_dts = [
pd.Period("2011-01", freq="M"),
pd.Period("2012-01-01", freq="D"),
pd.NaT,
pd.Period("2012-01-02", freq="D"),
pd.Period("2011-03", freq="M"),
pd.Period("2011-04", freq="M"),
]
exp = DataFrame({"P": exp_dts}, index=[1, 2, 3, 4, 5, 7])
tm.assert_frame_equal(res, exp)
assert res["P"].dtype == "object"
def test_combine_first_int(self):
# GH14687 - integer series that do no align exactly
df1 = DataFrame({"a": [0, 1, 3, 5]}, dtype="int64")
df2 = DataFrame({"a": [1, 4]}, dtype="int64")
result_12 = df1.combine_first(df2)
expected_12 = DataFrame({"a": [0, 1, 3, 5]})
tm.assert_frame_equal(result_12, expected_12)
result_21 = df2.combine_first(df1)
expected_21 = DataFrame({"a": [1, 4, 3, 5]})
tm.assert_frame_equal(result_21, expected_21)
@pytest.mark.parametrize("val", [1, 1.0])
def test_combine_first_with_asymmetric_other(self, val):
# see gh-20699
df1 = DataFrame({"isNum": [val]})
df2 =
|
DataFrame({"isBool": [True]})
|
pandas.DataFrame
|
# TODO move away from this test generator style since its we need to manage the generator file,
# which is no longer in this project workspace, as well as the output test file.
## ##
# #
# THIS TEST WAS AUTOGENERATED BY groupby_test_generator.py #
# #
##
# TODO refactor this into table driven tests using pytest parameterize since each test body follows the same structure
# and a single test body with multiple test tabe entries will be more readable and flexible.
from .groupby_unit_test_parameters import *
import pandas as pd
import riptable as rt
import unittest
class autogenerated_gb_tests(unittest.TestCase):
def safe_assert(self, ary1, ary2):
for a, b in zip(ary1, ary2):
if a == a and b == b:
self.assertAlmostEqual(a, b, places=7)
def test_multikey___aggs_median__symb_ratio_01__nvalcols_1__nkeycols_1(self):
aggs = ['median']
test_class = groupby_everything(1, 1, 0.1, ['median'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median__symb_ratio_01__nvalcols_4__nkeycols_1(self):
aggs = ['median']
test_class = groupby_everything(4, 1, 0.1, ['median'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median__symb_ratio_01__nvalcols_7__nkeycols_1(self):
aggs = ['median']
test_class = groupby_everything(7, 1, 0.1, ['median'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median__symb_ratio_01__nvalcols_2__nkeycols_2(self):
aggs = ['median']
test_class = groupby_everything(2, 2, 0.1, ['median'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median__symb_ratio_01__nvalcols_5__nkeycols_2(self):
aggs = ['median']
test_class = groupby_everything(5, 2, 0.1, ['median'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median__symb_ratio_01__nvalcols_1__nkeycols_3(self):
aggs = ['median']
test_class = groupby_everything(1, 3, 0.1, ['median'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median__symb_ratio_01__nvalcols_4__nkeycols_3(self):
aggs = ['median']
test_class = groupby_everything(4, 3, 0.1, ['median'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median__symb_ratio_01__nvalcols_7__nkeycols_3(self):
aggs = ['median']
test_class = groupby_everything(7, 3, 0.1, ['median'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median__symb_ratio_0300__nvalcols_2__nkeycols_1(self):
aggs = ['median']
test_class = groupby_everything(2, 1, 0.30, ['median'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median__symb_ratio_0300__nvalcols_5__nkeycols_1(self):
aggs = ['median']
test_class = groupby_everything(5, 1, 0.30, ['median'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median__symb_ratio_0300__nvalcols_1__nkeycols_2(self):
aggs = ['median']
test_class = groupby_everything(1, 2, 0.30, ['median'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median__symb_ratio_0300__nvalcols_4__nkeycols_2(self):
aggs = ['median']
test_class = groupby_everything(4, 2, 0.30, ['median'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median__symb_ratio_0300__nvalcols_7__nkeycols_2(self):
aggs = ['median']
test_class = groupby_everything(7, 2, 0.30, ['median'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median__symb_ratio_0300__nvalcols_2__nkeycols_3(self):
aggs = ['median']
test_class = groupby_everything(2, 3, 0.30, ['median'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median__symb_ratio_0300__nvalcols_5__nkeycols_3(self):
aggs = ['median']
test_class = groupby_everything(5, 3, 0.30, ['median'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median_min__symb_ratio_01__nvalcols_1__nkeycols_1(self):
aggs = ['median', 'min']
test_class = groupby_everything(1, 1, 0.1, ['median', 'min'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median_min__symb_ratio_01__nvalcols_4__nkeycols_1(self):
aggs = ['median', 'min']
test_class = groupby_everything(4, 1, 0.1, ['median', 'min'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median_min__symb_ratio_01__nvalcols_7__nkeycols_1(self):
aggs = ['median', 'min']
test_class = groupby_everything(7, 1, 0.1, ['median', 'min'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median_min__symb_ratio_01__nvalcols_2__nkeycols_2(self):
aggs = ['median', 'min']
test_class = groupby_everything(2, 2, 0.1, ['median', 'min'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median_min__symb_ratio_01__nvalcols_5__nkeycols_2(self):
aggs = ['median', 'min']
test_class = groupby_everything(5, 2, 0.1, ['median', 'min'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median_min__symb_ratio_01__nvalcols_1__nkeycols_3(self):
aggs = ['median', 'min']
test_class = groupby_everything(1, 3, 0.1, ['median', 'min'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median_min__symb_ratio_01__nvalcols_4__nkeycols_3(self):
aggs = ['median', 'min']
test_class = groupby_everything(4, 3, 0.1, ['median', 'min'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median_min__symb_ratio_01__nvalcols_7__nkeycols_3(self):
aggs = ['median', 'min']
test_class = groupby_everything(7, 3, 0.1, ['median', 'min'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median_min__symb_ratio_0300__nvalcols_2__nkeycols_1(self):
aggs = ['median', 'min']
test_class = groupby_everything(2, 1, 0.30, ['median', 'min'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median_min__symb_ratio_0300__nvalcols_5__nkeycols_1(self):
aggs = ['median', 'min']
test_class = groupby_everything(5, 1, 0.30, ['median', 'min'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median_min__symb_ratio_0300__nvalcols_1__nkeycols_2(self):
aggs = ['median', 'min']
test_class = groupby_everything(1, 2, 0.30, ['median', 'min'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median_min__symb_ratio_0300__nvalcols_4__nkeycols_2(self):
aggs = ['median', 'min']
test_class = groupby_everything(4, 2, 0.30, ['median', 'min'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median_min__symb_ratio_0300__nvalcols_7__nkeycols_2(self):
aggs = ['median', 'min']
test_class = groupby_everything(7, 2, 0.30, ['median', 'min'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median_min__symb_ratio_0300__nvalcols_2__nkeycols_3(self):
aggs = ['median', 'min']
test_class = groupby_everything(2, 3, 0.30, ['median', 'min'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median_min__symb_ratio_0300__nvalcols_5__nkeycols_3(self):
aggs = ['median', 'min']
test_class = groupby_everything(5, 3, 0.30, ['median', 'min'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_mean_max__symb_ratio_01__nvalcols_1__nkeycols_1(self):
aggs = ['var', 'mean', 'max']
test_class = groupby_everything(1, 1, 0.1, ['var', 'mean', 'max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_mean_max__symb_ratio_01__nvalcols_4__nkeycols_1(self):
aggs = ['var', 'mean', 'max']
test_class = groupby_everything(4, 1, 0.1, ['var', 'mean', 'max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_mean_max__symb_ratio_01__nvalcols_7__nkeycols_1(self):
aggs = ['var', 'mean', 'max']
test_class = groupby_everything(7, 1, 0.1, ['var', 'mean', 'max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_mean_max__symb_ratio_01__nvalcols_2__nkeycols_2(self):
aggs = ['var', 'mean', 'max']
test_class = groupby_everything(2, 2, 0.1, ['var', 'mean', 'max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_mean_max__symb_ratio_01__nvalcols_5__nkeycols_2(self):
aggs = ['var', 'mean', 'max']
test_class = groupby_everything(5, 2, 0.1, ['var', 'mean', 'max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_mean_max__symb_ratio_01__nvalcols_1__nkeycols_3(self):
aggs = ['var', 'mean', 'max']
test_class = groupby_everything(1, 3, 0.1, ['var', 'mean', 'max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_mean_max__symb_ratio_01__nvalcols_4__nkeycols_3(self):
aggs = ['var', 'mean', 'max']
test_class = groupby_everything(4, 3, 0.1, ['var', 'mean', 'max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_mean_max__symb_ratio_01__nvalcols_7__nkeycols_3(self):
aggs = ['var', 'mean', 'max']
test_class = groupby_everything(7, 3, 0.1, ['var', 'mean', 'max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_mean_max__symb_ratio_0300__nvalcols_2__nkeycols_1(
self,
):
aggs = ['var', 'mean', 'max']
test_class = groupby_everything(2, 1, 0.30, ['var', 'mean', 'max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_mean_max__symb_ratio_0300__nvalcols_5__nkeycols_1(
self,
):
aggs = ['var', 'mean', 'max']
test_class = groupby_everything(5, 1, 0.30, ['var', 'mean', 'max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_mean_max__symb_ratio_0300__nvalcols_1__nkeycols_2(
self,
):
aggs = ['var', 'mean', 'max']
test_class = groupby_everything(1, 2, 0.30, ['var', 'mean', 'max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_mean_max__symb_ratio_0300__nvalcols_4__nkeycols_2(
self,
):
aggs = ['var', 'mean', 'max']
test_class = groupby_everything(4, 2, 0.30, ['var', 'mean', 'max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_mean_max__symb_ratio_0300__nvalcols_7__nkeycols_2(
self,
):
aggs = ['var', 'mean', 'max']
test_class = groupby_everything(7, 2, 0.30, ['var', 'mean', 'max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_mean_max__symb_ratio_0300__nvalcols_2__nkeycols_3(
self,
):
aggs = ['var', 'mean', 'max']
test_class = groupby_everything(2, 3, 0.30, ['var', 'mean', 'max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_mean_max__symb_ratio_0300__nvalcols_5__nkeycols_3(
self,
):
aggs = ['var', 'mean', 'max']
test_class = groupby_everything(5, 3, 0.30, ['var', 'mean', 'max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_max_sum_mean__symb_ratio_01__nvalcols_1__nkeycols_1(
self,
):
aggs = ['var', 'max', 'sum', 'mean']
test_class = groupby_everything(1, 1, 0.1, ['var', 'max', 'sum', 'mean'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_max_sum_mean__symb_ratio_01__nvalcols_4__nkeycols_1(
self,
):
aggs = ['var', 'max', 'sum', 'mean']
test_class = groupby_everything(4, 1, 0.1, ['var', 'max', 'sum', 'mean'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_max_sum_mean__symb_ratio_01__nvalcols_7__nkeycols_1(
self,
):
aggs = ['var', 'max', 'sum', 'mean']
test_class = groupby_everything(7, 1, 0.1, ['var', 'max', 'sum', 'mean'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_max_sum_mean__symb_ratio_01__nvalcols_2__nkeycols_2(
self,
):
aggs = ['var', 'max', 'sum', 'mean']
test_class = groupby_everything(2, 2, 0.1, ['var', 'max', 'sum', 'mean'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_max_sum_mean__symb_ratio_01__nvalcols_5__nkeycols_2(
self,
):
aggs = ['var', 'max', 'sum', 'mean']
test_class = groupby_everything(5, 2, 0.1, ['var', 'max', 'sum', 'mean'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_max_sum_mean__symb_ratio_01__nvalcols_1__nkeycols_3(
self,
):
aggs = ['var', 'max', 'sum', 'mean']
test_class = groupby_everything(1, 3, 0.1, ['var', 'max', 'sum', 'mean'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_max_sum_mean__symb_ratio_01__nvalcols_4__nkeycols_3(
self,
):
aggs = ['var', 'max', 'sum', 'mean']
test_class = groupby_everything(4, 3, 0.1, ['var', 'max', 'sum', 'mean'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_max_sum_mean__symb_ratio_01__nvalcols_7__nkeycols_3(
self,
):
aggs = ['var', 'max', 'sum', 'mean']
test_class = groupby_everything(7, 3, 0.1, ['var', 'max', 'sum', 'mean'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_max_sum_mean__symb_ratio_0300__nvalcols_2__nkeycols_1(
self,
):
aggs = ['var', 'max', 'sum', 'mean']
test_class = groupby_everything(2, 1, 0.30, ['var', 'max', 'sum', 'mean'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_max_sum_mean__symb_ratio_0300__nvalcols_5__nkeycols_1(
self,
):
aggs = ['var', 'max', 'sum', 'mean']
test_class = groupby_everything(5, 1, 0.30, ['var', 'max', 'sum', 'mean'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_max_sum_mean__symb_ratio_0300__nvalcols_1__nkeycols_2(
self,
):
aggs = ['var', 'max', 'sum', 'mean']
test_class = groupby_everything(1, 2, 0.30, ['var', 'max', 'sum', 'mean'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_max_sum_mean__symb_ratio_0300__nvalcols_4__nkeycols_2(
self,
):
aggs = ['var', 'max', 'sum', 'mean']
test_class = groupby_everything(4, 2, 0.30, ['var', 'max', 'sum', 'mean'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_max_sum_mean__symb_ratio_0300__nvalcols_7__nkeycols_2(
self,
):
aggs = ['var', 'max', 'sum', 'mean']
test_class = groupby_everything(7, 2, 0.30, ['var', 'max', 'sum', 'mean'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_max_sum_mean__symb_ratio_0300__nvalcols_2__nkeycols_3(
self,
):
aggs = ['var', 'max', 'sum', 'mean']
test_class = groupby_everything(2, 3, 0.30, ['var', 'max', 'sum', 'mean'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_max_sum_mean__symb_ratio_0300__nvalcols_5__nkeycols_3(
self,
):
aggs = ['var', 'max', 'sum', 'mean']
test_class = groupby_everything(5, 3, 0.30, ['var', 'max', 'sum', 'mean'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_max__symb_ratio_01__nvalcols_1__nkeycols_1(self):
aggs = ['max']
test_class = groupby_everything(1, 1, 0.1, ['max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_max__symb_ratio_01__nvalcols_4__nkeycols_1(self):
aggs = ['max']
test_class = groupby_everything(4, 1, 0.1, ['max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_max__symb_ratio_01__nvalcols_7__nkeycols_1(self):
aggs = ['max']
test_class = groupby_everything(7, 1, 0.1, ['max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_max__symb_ratio_01__nvalcols_2__nkeycols_2(self):
aggs = ['max']
test_class = groupby_everything(2, 2, 0.1, ['max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_max__symb_ratio_01__nvalcols_5__nkeycols_2(self):
aggs = ['max']
test_class = groupby_everything(5, 2, 0.1, ['max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_max__symb_ratio_01__nvalcols_1__nkeycols_3(self):
aggs = ['max']
test_class = groupby_everything(1, 3, 0.1, ['max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_max__symb_ratio_01__nvalcols_4__nkeycols_3(self):
aggs = ['max']
test_class = groupby_everything(4, 3, 0.1, ['max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_max__symb_ratio_01__nvalcols_7__nkeycols_3(self):
aggs = ['max']
test_class = groupby_everything(7, 3, 0.1, ['max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_max__symb_ratio_0300__nvalcols_2__nkeycols_1(self):
aggs = ['max']
test_class = groupby_everything(2, 1, 0.30, ['max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_max__symb_ratio_0300__nvalcols_5__nkeycols_1(self):
aggs = ['max']
test_class = groupby_everything(5, 1, 0.30, ['max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_max__symb_ratio_0300__nvalcols_1__nkeycols_2(self):
aggs = ['max']
test_class = groupby_everything(1, 2, 0.30, ['max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_max__symb_ratio_0300__nvalcols_4__nkeycols_2(self):
aggs = ['max']
test_class = groupby_everything(4, 2, 0.30, ['max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_max__symb_ratio_0300__nvalcols_7__nkeycols_2(self):
aggs = ['max']
test_class = groupby_everything(7, 2, 0.30, ['max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_max__symb_ratio_0300__nvalcols_2__nkeycols_3(self):
aggs = ['max']
test_class = groupby_everything(2, 3, 0.30, ['max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_max__symb_ratio_0300__nvalcols_5__nkeycols_3(self):
aggs = ['max']
test_class = groupby_everything(5, 3, 0.30, ['max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_min_sum__symb_ratio_01__nvalcols_1__nkeycols_1(self):
aggs = ['min', 'sum']
test_class = groupby_everything(1, 1, 0.1, ['min', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_min_sum__symb_ratio_01__nvalcols_4__nkeycols_1(self):
aggs = ['min', 'sum']
test_class = groupby_everything(4, 1, 0.1, ['min', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_min_sum__symb_ratio_01__nvalcols_7__nkeycols_1(self):
aggs = ['min', 'sum']
test_class = groupby_everything(7, 1, 0.1, ['min', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_min_sum__symb_ratio_01__nvalcols_2__nkeycols_2(self):
aggs = ['min', 'sum']
test_class = groupby_everything(2, 2, 0.1, ['min', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_min_sum__symb_ratio_01__nvalcols_5__nkeycols_2(self):
aggs = ['min', 'sum']
test_class = groupby_everything(5, 2, 0.1, ['min', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_min_sum__symb_ratio_01__nvalcols_1__nkeycols_3(self):
aggs = ['min', 'sum']
test_class = groupby_everything(1, 3, 0.1, ['min', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_min_sum__symb_ratio_01__nvalcols_4__nkeycols_3(self):
aggs = ['min', 'sum']
test_class = groupby_everything(4, 3, 0.1, ['min', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_min_sum__symb_ratio_01__nvalcols_7__nkeycols_3(self):
aggs = ['min', 'sum']
test_class = groupby_everything(7, 3, 0.1, ['min', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_min_sum__symb_ratio_0300__nvalcols_2__nkeycols_1(self):
aggs = ['min', 'sum']
test_class = groupby_everything(2, 1, 0.30, ['min', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_min_sum__symb_ratio_0300__nvalcols_5__nkeycols_1(self):
aggs = ['min', 'sum']
test_class = groupby_everything(5, 1, 0.30, ['min', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_min_sum__symb_ratio_0300__nvalcols_1__nkeycols_2(self):
aggs = ['min', 'sum']
test_class = groupby_everything(1, 2, 0.30, ['min', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_min_sum__symb_ratio_0300__nvalcols_4__nkeycols_2(self):
aggs = ['min', 'sum']
test_class = groupby_everything(4, 2, 0.30, ['min', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_min_sum__symb_ratio_0300__nvalcols_7__nkeycols_2(self):
aggs = ['min', 'sum']
test_class = groupby_everything(7, 2, 0.30, ['min', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_min_sum__symb_ratio_0300__nvalcols_2__nkeycols_3(self):
aggs = ['min', 'sum']
test_class = groupby_everything(2, 3, 0.30, ['min', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_min_sum__symb_ratio_0300__nvalcols_5__nkeycols_3(self):
aggs = ['min', 'sum']
test_class = groupby_everything(5, 3, 0.30, ['min', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_median_sum__symb_ratio_01__nvalcols_1__nkeycols_1(
self,
):
aggs = ['var', 'median', 'sum']
test_class = groupby_everything(1, 1, 0.1, ['var', 'median', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_median_sum__symb_ratio_01__nvalcols_4__nkeycols_1(
self,
):
aggs = ['var', 'median', 'sum']
test_class = groupby_everything(4, 1, 0.1, ['var', 'median', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_median_sum__symb_ratio_01__nvalcols_7__nkeycols_1(
self,
):
aggs = ['var', 'median', 'sum']
test_class = groupby_everything(7, 1, 0.1, ['var', 'median', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_median_sum__symb_ratio_01__nvalcols_2__nkeycols_2(
self,
):
aggs = ['var', 'median', 'sum']
test_class = groupby_everything(2, 2, 0.1, ['var', 'median', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_median_sum__symb_ratio_01__nvalcols_5__nkeycols_2(
self,
):
aggs = ['var', 'median', 'sum']
test_class = groupby_everything(5, 2, 0.1, ['var', 'median', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_median_sum__symb_ratio_01__nvalcols_1__nkeycols_3(
self,
):
aggs = ['var', 'median', 'sum']
test_class = groupby_everything(1, 3, 0.1, ['var', 'median', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_median_sum__symb_ratio_01__nvalcols_4__nkeycols_3(
self,
):
aggs = ['var', 'median', 'sum']
test_class = groupby_everything(4, 3, 0.1, ['var', 'median', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_median_sum__symb_ratio_01__nvalcols_7__nkeycols_3(
self,
):
aggs = ['var', 'median', 'sum']
test_class = groupby_everything(7, 3, 0.1, ['var', 'median', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_median_sum__symb_ratio_0300__nvalcols_2__nkeycols_1(
self,
):
aggs = ['var', 'median', 'sum']
test_class = groupby_everything(2, 1, 0.30, ['var', 'median', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_median_sum__symb_ratio_0300__nvalcols_5__nkeycols_1(
self,
):
aggs = ['var', 'median', 'sum']
test_class = groupby_everything(5, 1, 0.30, ['var', 'median', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_median_sum__symb_ratio_0300__nvalcols_1__nkeycols_2(
self,
):
aggs = ['var', 'median', 'sum']
test_class = groupby_everything(1, 2, 0.30, ['var', 'median', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_median_sum__symb_ratio_0300__nvalcols_4__nkeycols_2(
self,
):
aggs = ['var', 'median', 'sum']
test_class = groupby_everything(4, 2, 0.30, ['var', 'median', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_median_sum__symb_ratio_0300__nvalcols_7__nkeycols_2(
self,
):
aggs = ['var', 'median', 'sum']
test_class = groupby_everything(7, 2, 0.30, ['var', 'median', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_median_sum__symb_ratio_0300__nvalcols_2__nkeycols_3(
self,
):
aggs = ['var', 'median', 'sum']
test_class = groupby_everything(2, 3, 0.30, ['var', 'median', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_median_sum__symb_ratio_0300__nvalcols_5__nkeycols_3(
self,
):
aggs = ['var', 'median', 'sum']
test_class = groupby_everything(5, 3, 0.30, ['var', 'median', 'sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_max_median_mean_var__symb_ratio_01__nvalcols_1__nkeycols_1(
self,
):
aggs = ['max', 'median', 'mean', 'var']
test_class = groupby_everything(1, 1, 0.1, ['max', 'median', 'mean', 'var'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_max_median_mean_var__symb_ratio_01__nvalcols_4__nkeycols_1(
self,
):
aggs = ['max', 'median', 'mean', 'var']
test_class = groupby_everything(4, 1, 0.1, ['max', 'median', 'mean', 'var'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_max_median_mean_var__symb_ratio_01__nvalcols_7__nkeycols_1(
self,
):
aggs = ['max', 'median', 'mean', 'var']
test_class = groupby_everything(7, 1, 0.1, ['max', 'median', 'mean', 'var'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_max_median_mean_var__symb_ratio_01__nvalcols_2__nkeycols_2(
self,
):
aggs = ['max', 'median', 'mean', 'var']
test_class = groupby_everything(2, 2, 0.1, ['max', 'median', 'mean', 'var'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_max_median_mean_var__symb_ratio_01__nvalcols_5__nkeycols_2(
self,
):
aggs = ['max', 'median', 'mean', 'var']
test_class = groupby_everything(5, 2, 0.1, ['max', 'median', 'mean', 'var'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_max_median_mean_var__symb_ratio_01__nvalcols_1__nkeycols_3(
self,
):
aggs = ['max', 'median', 'mean', 'var']
test_class = groupby_everything(1, 3, 0.1, ['max', 'median', 'mean', 'var'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_max_median_mean_var__symb_ratio_01__nvalcols_4__nkeycols_3(
self,
):
aggs = ['max', 'median', 'mean', 'var']
test_class = groupby_everything(4, 3, 0.1, ['max', 'median', 'mean', 'var'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_max_median_mean_var__symb_ratio_01__nvalcols_7__nkeycols_3(
self,
):
aggs = ['max', 'median', 'mean', 'var']
test_class = groupby_everything(7, 3, 0.1, ['max', 'median', 'mean', 'var'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_max_median_mean_var__symb_ratio_0300__nvalcols_2__nkeycols_1(
self,
):
aggs = ['max', 'median', 'mean', 'var']
test_class = groupby_everything(2, 1, 0.30, ['max', 'median', 'mean', 'var'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_max_median_mean_var__symb_ratio_0300__nvalcols_5__nkeycols_1(
self,
):
aggs = ['max', 'median', 'mean', 'var']
test_class = groupby_everything(5, 1, 0.30, ['max', 'median', 'mean', 'var'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_max_median_mean_var__symb_ratio_0300__nvalcols_1__nkeycols_2(
self,
):
aggs = ['max', 'median', 'mean', 'var']
test_class = groupby_everything(1, 2, 0.30, ['max', 'median', 'mean', 'var'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_max_median_mean_var__symb_ratio_0300__nvalcols_4__nkeycols_2(
self,
):
aggs = ['max', 'median', 'mean', 'var']
test_class = groupby_everything(4, 2, 0.30, ['max', 'median', 'mean', 'var'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_max_median_mean_var__symb_ratio_0300__nvalcols_7__nkeycols_2(
self,
):
aggs = ['max', 'median', 'mean', 'var']
test_class = groupby_everything(7, 2, 0.30, ['max', 'median', 'mean', 'var'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_max_median_mean_var__symb_ratio_0300__nvalcols_2__nkeycols_3(
self,
):
aggs = ['max', 'median', 'mean', 'var']
test_class = groupby_everything(2, 3, 0.30, ['max', 'median', 'mean', 'var'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_max_median_mean_var__symb_ratio_0300__nvalcols_5__nkeycols_3(
self,
):
aggs = ['max', 'median', 'mean', 'var']
test_class = groupby_everything(5, 3, 0.30, ['max', 'median', 'mean', 'var'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_sum__symb_ratio_01__nvalcols_1__nkeycols_1(self):
aggs = ['sum']
test_class = groupby_everything(1, 1, 0.1, ['sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_sum__symb_ratio_01__nvalcols_4__nkeycols_1(self):
aggs = ['sum']
test_class = groupby_everything(4, 1, 0.1, ['sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_sum__symb_ratio_01__nvalcols_7__nkeycols_1(self):
aggs = ['sum']
test_class = groupby_everything(7, 1, 0.1, ['sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_sum__symb_ratio_01__nvalcols_2__nkeycols_2(self):
aggs = ['sum']
test_class = groupby_everything(2, 2, 0.1, ['sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_sum__symb_ratio_01__nvalcols_5__nkeycols_2(self):
aggs = ['sum']
test_class = groupby_everything(5, 2, 0.1, ['sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_sum__symb_ratio_01__nvalcols_1__nkeycols_3(self):
aggs = ['sum']
test_class = groupby_everything(1, 3, 0.1, ['sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_sum__symb_ratio_01__nvalcols_4__nkeycols_3(self):
aggs = ['sum']
test_class = groupby_everything(4, 3, 0.1, ['sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_sum__symb_ratio_01__nvalcols_7__nkeycols_3(self):
aggs = ['sum']
test_class = groupby_everything(7, 3, 0.1, ['sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_sum__symb_ratio_0300__nvalcols_2__nkeycols_1(self):
aggs = ['sum']
test_class = groupby_everything(2, 1, 0.30, ['sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_sum__symb_ratio_0300__nvalcols_5__nkeycols_1(self):
aggs = ['sum']
test_class = groupby_everything(5, 1, 0.30, ['sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_sum__symb_ratio_0300__nvalcols_1__nkeycols_2(self):
aggs = ['sum']
test_class = groupby_everything(1, 2, 0.30, ['sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_sum__symb_ratio_0300__nvalcols_4__nkeycols_2(self):
aggs = ['sum']
test_class = groupby_everything(4, 2, 0.30, ['sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_sum__symb_ratio_0300__nvalcols_7__nkeycols_2(self):
aggs = ['sum']
test_class = groupby_everything(7, 2, 0.30, ['sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_sum__symb_ratio_0300__nvalcols_2__nkeycols_3(self):
aggs = ['sum']
test_class = groupby_everything(2, 3, 0.30, ['sum'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_sum__symb_ratio_0300__nvalcols_5__nkeycols_3(self):
aggs = ['sum']
test_class = groupby_everything(5, 3, 0.30, ['sum'])
pd_out = (
|
pd.DataFrame(test_class.data)
|
pandas.DataFrame
|
import json
import pandas as pd
import os
file_list = os.listdir("./json_folder")
kr_list, en_list = [], []
for file in file_list:
file_name = os.getcwd() + '/json_folder/' + file
with open(file_name, 'r') as f:
json_data = json.load(f)
for data in json_data:
kr_list.append(data["kr"])
en_list.append(data["en"])
train_kr, train_en = kr_list[:13500], en_list[:13500]
test_kr, test_en = kr_list[13500:], en_list[13500:]
train_df = pd.DataFrame({"kr": train_kr, "en": train_en})
test_df =
|
pd.DataFrame({"kr": test_kr, "en": test_en})
|
pandas.DataFrame
|
import argparse
import os
from copy import deepcopy
from os.path import join, split
from typing import Dict, List, Any, Tuple
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import ptitprince as pt
import seaborn as sns
from common import load_pickle, load_json_file, ScoresAttributes, extract_pattern_type
from configuration import Config
from evaluation import _generate_true_feature_importance
A = ScoresAttributes.get()
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
NAME_MAPPING = {
'llr': 'LLR',
'pfi': 'PFI',
'mr': 'PFIO',
'mr_empirical': 'EMR',
'anchors': 'Anchors',
'lime': 'LIME',
'shap_linear': 'SHAP',
'pattern': 'Pattern',
'firm': 'FIRM',
'tree_fi': 'Impurity',
'model_weights': '$|w_{LLR}|$',
'model_weights_NN': '$|w_{NN}|$', # '$|w_{NN, 0} - w_{NN, 1}|$',
'gradient': '$Grad_{NN}$',
'deep_taylor': 'DTD',
'lrp.z': '$LRP_{z}$',
'sample': 'Sample',
'lrp.alpha_beta': '$LRP_{\\alpha\\beta}$',
'pattern.net': 'PatternNet',
'pattern.attribution': 'PatternAttr.',
'input_t_gradient': 't_gradient',
'impurity': 'Impurity',
'correlation': 'Corr.',
'binary_mask': 'Ground\nTruth',
'pattern_distractor': 'Pattern/Distractor'
}
METHOD_BLACK_LIST = ['input_t_gradient', 'impurity']
NAME_MAPPING_SCORES = {
'pr_auc': 'Precision-Recall AUC',
# 'max_precision': 'Max Precision',
# 'max_precision': 'Precision \n for Specificity $\\approx$ 0.9',
'max_precision': 'PREC90',
'avg_precision': 'Average Precision',
'auc': 'AUROC'
}
FONT_SIZES = {
'ticks': 10,
'label': 12,
'legend': 10
}
def save_figure(file_path: str, fig: plt.Figure, dpi: int) -> None:
output_dir = split(file_path)[0]
if not os.path.isdir(output_dir):
os.makedirs(output_dir)
fig.savefig(fname=file_path, dpi=dpi, bbox_inches='tight')
plt.close(fig=fig)
def create_accuracy_plot(model_accuracies: Dict, config: Config,
file_name_suffix: str) -> None:
data_dict = {'SNR': list(), 'Accuracy': list(), 'data_type': list()}
for weights, values in model_accuracies.items():
snr = f'{weights.split("_")[0]}'
data_dict['SNR'] += [snr]
data_dict['Accuracy'] += [np.mean(values['train'])]
data_dict['data_type'] += ['train']
data_dict['SNR'] += [snr]
data_dict['Accuracy'] += [np.mean(values['val'])]
data_dict['data_type'] += ['val']
sns.set_theme('paper')
with sns.axes_style("whitegrid"):
g = sns.lineplot(data=pd.DataFrame(data_dict), x='SNR', y='Accuracy', hue='data_type')
g.set_ylim(0, 1)
plt.legend(loc='lower right')
plt.tight_layout()
file_name = '_'.join(['accuracy_avg_plot', file_name_suffix, '.png'])
output_path = join(config.output_dir_plots, file_name)
fig = g.get_figure()
save_figure(file_path=output_path, fig=fig, dpi=config.dpi)
def overall_accuracy_plot(scores: Dict, config: Config) -> None:
data_dict = {'SNR': list(), 'Accuracy': list(),
'Dataset': list(), 'Model': list()}
for weights, values in scores[A.model_accuracies].items():
snr = f'{weights.split("_")[0]}'
for model_name, accuracies in values.items():
data_dict['SNR'] += [snr]
data_dict['Accuracy'] += [np.mean(accuracies['train'])]
data_dict['Dataset'] += ['train']
data_dict['SNR'] += [snr]
data_dict['Accuracy'] += [np.mean(accuracies['val'])]
data_dict['Dataset'] += ['val']
data_dict['Model'] += [model_name]
data_dict['Model'] += [model_name]
sns.set_theme('paper')
with sns.axes_style("whitegrid"):
g = sns.lineplot(data=pd.DataFrame(data_dict), x='SNR', linewidth=4,
y='Accuracy', hue='Dataset', style='Model', markers=False)
g.set_ylim(0, 1)
g.set_aspect(aspect=2.5)
g.set(xlabel='$\lambda_1$')
g.set_yticklabels(labels=[f'{float(l):.2f}' for l in g.get_yticks()], size=FONT_SIZES['label'])
g.set_xticklabels(labels=np.unique(data_dict['SNR']), size=FONT_SIZES['label'])
for item in ([g.xaxis.label, g.yaxis.label]):
item.set_fontsize(FONT_SIZES['label'])
plt.legend(loc='lower right', prop={'size': FONT_SIZES['legend']})
plt.tight_layout()
fig = g.get_figure()
file_name = '_'.join(['overall_accuracy_avg_plot', '.png'])
output_path = join(config.output_dir_plots, file_name)
save_figure(file_path=output_path, fig=fig, dpi=config.dpi)
def create_rain_cloud_data(data: Dict, metric_name: str) -> pd.DataFrame:
data_dict = {'$\lambda_1$': list(), 'Method': list(),
metric_name: list(), 'Methods': list()}
for snr, snr_data in data.items():
for method, method_data in snr_data.items():
for roc_auc_data in method_data:
for score in roc_auc_data[metric_name]:
if method in METHOD_BLACK_LIST:
continue
data_dict[metric_name] += [score]
data_dict['$\lambda_1$'] += [snr.split('_')[0]]
data_dict['Method'] += [NAME_MAPPING.get(method, method)]
# data_dict['Methods'] += ['1']
data_dict['Methods'] += [NAME_MAPPING.get(method, method)]
return pd.DataFrame(data_dict)
def create_rain_cloud_plots(data: Dict, config: Config,
score_attribute: str, file_name_suffix: str) -> None:
df = create_rain_cloud_data(data=data, metric_name=score_attribute)
sigma = .5
sns.set_theme('paper')
sns.set(font_scale=1)
with sns.axes_style("whitegrid"):
g = sns.FacetGrid(df, col='$\lambda_1$', height=6, ylim=(0, 1.05), )
g.map_dataframe(pt.RainCloud, x='Method', y=score_attribute, data=df,
orient='v', bw=sigma, width_viol=.5, linewidth=1)
for ax in g.axes.flat:
labels = ax.get_xticklabels()
ax.set_xticklabels(labels, rotation=20)
g.tight_layout()
file_name = '_'.join(['rain_cloud_plot', file_name_suffix, score_attribute, '.png'])
output_path = join(config.output_dir_plots, file_name)
save_figure(file_path=output_path, fig=g.fig, dpi=config.dpi)
def create_violin_plots(data: Dict, config: Config,
score_attribute: str, file_name_suffix: str) -> None:
df = create_rain_cloud_data(data=data, metric_name=score_attribute)
sigma = .5
sns.set_theme('paper')
sns.set(font_scale=1)
with sns.axes_style("whitegrid"):
g = sns.FacetGrid(df, col='SNR', height=6, ylim=(0, 1.05), )
g.map_dataframe(sns.violinplot, x='Method', y=score_attribute, data=df,
orient='v', hue='Method', bw=sigma, width_viol=.9,
palette='muted', linewidth=1)
for ax in g.axes.flat:
labels = ax.get_xticklabels()
ax.set_xticklabels(labels, rotation=20)
g.fig.subplots_adjust(bottom=0.15)
g.tight_layout()
file_name = '_'.join(['violin_plot', file_name_suffix, score_attribute, '.png'])
output_path = join(config.output_dir_plots, file_name)
save_figure(file_path=output_path, fig=g.fig, dpi=config.dpi)
def is_keras_model(data: np.ndarray):
shape = data.shape
p = np.prod(shape)
return not (p == shape[0] or p == shape[1])
def is_sample_based(data: np.ndarray):
shape = data.shape
p = np.prod(shape)
return not (p == shape[0] or p == shape[1])
def get_randomized_heat_map_data(scores: Dict, data: Dict, rnd_idx: int) -> Dict:
data_dict = dict()
for weight, data_list in data.items():
model_weights = scores[A.model_weights][weight]
data_dict[weight] = {'data': data_list[rnd_idx],
'model_weights_nn': model_weights[A.neural_net][rnd_idx],
'model_weights_lr': model_weights[A.logistic_regression][rnd_idx],
'rnd_experiment_idx': rnd_idx}
return data_dict
def add_column_for_class_of_explanation_method(data: pd.DataFrame) -> pd.DataFrame:
num_samples = data.shape[0]
if any(data['Method'].map(lambda x: 'LIME' == x)):
data['class'] = ['Agnostic Sample Based'] * num_samples
elif any(data['Method'].map(lambda x: 'Deep' in x)):
data['class'] = ['Saliency'] * num_samples
else:
data['class'] = ['Agnostic Global'] * num_samples
return data
def overview_rain_cloud_plot(paths: List[str], config: Config,
score_data_key: str, metric_name: str):
df = pd.DataFrame()
for score_path in paths:
scores = load_pickle(file_path=score_path)
aux_df = create_rain_cloud_data(data=scores[score_data_key], metric_name=metric_name)
aux_df = add_column_for_class_of_explanation_method(data=aux_df)
df = df.append(aux_df)
sigma = .5
sns.set_theme('paper')
sns.set(font_scale=1)
with sns.axes_style("whitegrid"):
g = sns.FacetGrid(df, row='class', col='SNR', height=6, ylim=(0, 1.05))
g.map_dataframe(pt.RainCloud, x='Method', y=metric_name, data=df,
orient='v', bw=sigma, width_viol=.0)
for ax in g.axes.flat:
labels = ax.get_xticklabels()
ax.set_xticklabels(labels, rotation=20)
g.fig.subplots_adjust(bottom=0.15)
file_name = '_'.join(['rain_cloud_plot', 'overview', metric_name, '.png'])
output_path = join(config.output_dir_plots, file_name)
save_figure(file_path=output_path, fig=g.fig, dpi=config.dpi)
def rain_clouds(scores: Dict, config: Config,
score_data_keys: List[Tuple], mode: str = 'sample_based'):
dfs = list()
metric_name_plot = '.'.join([item[1] for item in score_data_keys])
for score_data_key, metric_name in score_data_keys:
aux_df = create_rain_cloud_data(data=scores[score_data_key], metric_name=metric_name)
aux_df['class'] = A.sample_based
aux_df[metric_name_plot] = aux_df[metric_name]
aux_df['Metric'] = [metric_name] * aux_df.shape[0]
dfs += [deepcopy(aux_df)]
df = pd.concat(dfs, axis=0, ignore_index=True)
sns.set_theme('paper')
with sns.axes_style('white'):
f = sns.catplot(x='Method', y=metric_name_plot, hue='Methods',
col='$\lambda_1$', row='Metric', legend_out=True, legend=True,
data=df, kind='box', width=0.7, seed=config.seed,
height=4, aspect=0.7, palette='Set2')
legend_handles = f.legend.legendHandles
plt.close(fig=f.fig)
g = sns.FacetGrid(df, col='$\lambda_1$', row='Metric', height=4,
ylim=(0, 1.05), aspect=0.7,
legend_out=True, palette='Set2')
g.map_dataframe(pt.RainCloud, x='Method', y=metric_name_plot, data=df,
orient='v', bw=0.45, width_viol=0.7, width_box=0.1)
ax = g.axes
configure_axes(ax=ax)
g.fig.subplots_adjust(bottom=0.15)
g.fig.subplots_adjust(wspace=0.05, hspace=0.05)
# plt.legend(handles=legend_handles, bbox_to_anchor=(1.05, 1.5),
# loc='upper left', borderaxespad=0., facecolor='white', framealpha=1)
plt.legend(handles=legend_handles, bbox_to_anchor=(-1.3, -0.05),
ncol=int(np.unique(df['Method'].values).shape[0] / 2 + 0.5), fancybox=True,
loc='upper center', borderaxespad=0., facecolor='white', framealpha=1)
file_name = '_'.join(['rain_cloud_plot', mode, metric_name_plot, '.png'])
output_path = join(config.output_dir_plots, file_name)
save_figure(file_path=output_path, fig=g.fig, dpi=config.dpi)
def configure_axes(ax: Any) -> None:
for row_idx in range(ax.shape[0]):
t = ax[row_idx, 0].get_title(loc='center')
name_of_metric = t.split('|')[0].split('=')[-1].strip()
ax[row_idx, 0].set_ylabel(ylabel=NAME_MAPPING_SCORES[name_of_metric],
fontdict={'fontsize': 18})
for col_idx in range(ax.shape[1]):
if 0 == row_idx:
t = ax[row_idx, col_idx].get_title(loc='center')
new_title = t.split('|')[-1].strip()
ax[row_idx, col_idx].set_title(
label=new_title, fontdict={'fontsize': 18})
else:
ax[row_idx, col_idx].set_title(label='')
ax[row_idx, col_idx].set_xlabel(xlabel='',
fontdict={'fontsize': 18})
labels = ax[row_idx, col_idx].get_xticklabels()
# ax.set_xticklabels(labels, rotation=45, fontdict={'fontsize': 9})
ax[row_idx, col_idx].set_xticklabels('')
ax[row_idx, col_idx].patch.set_edgecolor('black')
ax[row_idx, col_idx].grid(True)
sns.despine(ax=ax[row_idx, col_idx],
top=False, bottom=False, left=False, right=False)
def box_plot(scores: Dict, config: Config, snrs_of_interest: list,
score_data_keys: List[Tuple], mode: str = 'sample_based'):
dfs = list()
metric_name_plot = '.'.join([item[1] for item in score_data_keys])
for score_data_key, metric_name in score_data_keys:
aux_df = create_rain_cloud_data(data=scores[score_data_key], metric_name=metric_name)
aux_df['class'] = A.sample_based
aux_df[metric_name_plot] = aux_df[metric_name]
aux_df['Metric'] = [metric_name] * aux_df.shape[0]
dfs += [deepcopy(aux_df)]
df = pd.concat(dfs, axis=0, ignore_index=True)
snr_filter = df['$\lambda_1$'].map(lambda x: x in snrs_of_interest).values
# anchors_filter = df['Method'].map(lambda x: 'Anchors' == x)
# anchors = df.loc[anchors_filter, :]
# metric_filter = anchors['Metric'].map(lambda x: 'max_precision' == x)
# anchors_max_precision = anchors.loc[metric_filter, :]
df = df.loc[snr_filter, :]
with sns.axes_style('white'):
f = sns.catplot(x='Method', y=metric_name_plot, hue='Methods',
col='$\lambda_1$', row='Metric', legend_out=True, seed=config.seed,
data=df, kind='box', width=0.7,
height=4, aspect=1, palette='colorblind')
legend_handles = f.legend.legendHandles
plt.close(fig=f.fig)
g = sns.catplot(x='Method', y=metric_name_plot, seed=config.seed,
col='$\lambda_1$', legend_out=True, row='Metric',
data=df, kind='box', width=0.7,
height=4, aspect=1, palette='colorblind')
ax = g.axes
configure_axes(ax=ax)
g.fig.subplots_adjust(bottom=0.15)
g.fig.subplots_adjust(wspace=0.05, hspace=0.05)
# plt.legend(handles=legend_handles, bbox_to_anchor=(1.05, 1.5),
# loc='upper left', borderaxespad=0., facecolor='white', framealpha=1)
plt.legend(handles=legend_handles, bbox_to_anchor=(-0.55, -0.05), prop={'size': 14},
ncol=int(np.unique(df['Method'].values).shape[0] / 2 + 0.5), fancybox=True,
loc='upper center', borderaxespad=0., facecolor='white', framealpha=1)
# loc = 'upper center', bbox_to_anchor = (0.5, -0.05),
# fancybox = True, shadow = True, ncol = 5
file_name = '_'.join(['box_plot', mode, metric_name_plot, '.png'])
output_path = join(config.output_dir_plots, file_name)
save_figure(file_path=output_path, fig=g.fig, dpi=config.dpi)
def is_saliency(method_names: List) -> bool:
return True if 'deep_taylor' in method_names else False
def is_sample_based_agnostic(method_names: List) -> bool:
return True if 'lime' in method_names else False
def global_heat_maps(scores: Dict, config: Config, pattern_type: int,
rnd_experiment_idx: int, snrs_of_interest: list) -> None:
def _heat_map(result: np.ndarray, sub_ax: Any):
return sns.heatmap(result, vmin=0, ax=sub_ax, square=True,
cbar=False, cbar_kws={"shrink": shrinking})
methods = [item for item in scores[A.global_based][A.method_names] if
item not in METHOD_BLACK_LIST]
explanations = dict()
model_weights = scores[A.model_weights]
data_weights = deepcopy(scores[A.data_weights])
for d in data_weights:
if d.split('_')[0] not in snrs_of_interest:
data_weights.remove(d)
for j, w in enumerate(data_weights):
explanations_per_method = dict()
for method in methods:
explanations_per_method[method] = scores[A.global_based][A.explanations][w][method]
explanations[w] = explanations_per_method
print(f'Number of methods: {len(methods)}')
methods.insert(0, 'model_weights')
# methods.insert(0, 'model_weights_NN')
methods.insert(0, 'binary_mask')
num_methods = len(methods)
num_weights = len(data_weights)
num_cols = np.maximum(num_methods, 2)
num_rows = np.maximum(num_weights, 2)
sns.set_theme('paper')
shrinking = 1.0
hspace_values = {5: -885, 3: -0.900}
fig, ax = plt.subplots(ncols=num_cols, nrows=num_rows, sharex=True, sharey=True,
gridspec_kw={'wspace': 0.05, 'hspace': hspace_values[len(data_weights)]})
for i, weight in enumerate(data_weights):
print(weight)
model_weight = model_weights[weight]
for k, method in enumerate(methods):
if 0 == i:
ax[i, k].set_title(NAME_MAPPING[method], rotation=90,
fontdict={'fontsize': 4})
if 1 == k:
w = model_weight[A.logistic_regression][rnd_experiment_idx]
heat_map = np.abs(w)
g = _heat_map(result=heat_map.reshape(8, 8), sub_ax=ax[i, k])
# elif 1 == k:
# w = model_weight[A.neural_net][rnd_experiment_idx]
# heat_map = np.abs(w[:, 0] - w[:, 1])
# g = _heat_map(result=heat_map.reshape(8, 8), sub_ax=ax[i, k])
elif 0 == k:
b = _generate_true_feature_importance(pattern_type=pattern_type)
g = _heat_map(result=np.abs(b).reshape((8, 8)), sub_ax=ax[i, k])
ylabel = f'{weight.split("_")[0]}'
g.set_ylabel(f'$\lambda_1=${ylabel}', fontdict={'fontsize': 4})
else:
explanation = explanations[weight][method][rnd_experiment_idx]
if is_sample_based(data=explanation):
heat_map = np.mean(np.abs(explanation), axis=0)
else:
heat_map = np.abs(explanation)
g = _heat_map(result=heat_map.reshape((8, 8)), sub_ax=ax[i, k])
g.set(yticks=[])
g.set(xticks=[])
if 0 != k:
ax[i, k].yaxis.set_visible(False)
if (num_weights - 1) != i:
ax[i, k].xaxis.set_visible(False)
ax[i, k].set_aspect('equal', adjustable='box')
file_name = '_'.join(['heat_map_global_mean', '.png'])
output_path = join(config.output_dir_plots, file_name)
save_figure(file_path=output_path, fig=fig, dpi=config.dpi)
x = 1
def sample_based_heat_maps(scores: Dict, config: Config, data: Dict, pattern_type: int,
rnd_sample_idx: int, snrs_of_interest: list) -> None:
def _heat_map(result: np.ndarray, sub_ax: Any):
return sns.heatmap(result, vmin=0, ax=sub_ax, square=True,
cbar=False, cbar_kws={"shrink": shrinking})
local_method_black_list = METHOD_BLACK_LIST + ['gradient']
methods = [item for item in scores[A.sample_based][A.method_names] if
item not in local_method_black_list]
explanations = dict()
model_weights = scores[A.model_weights]
data_weights = deepcopy(scores[A.data_weights])
for d in data_weights:
if d.split('_')[0] not in snrs_of_interest:
data_weights.remove(d)
for j, w in enumerate(data_weights):
explanations_per_method = dict()
for method in methods:
explanations_per_method[method] = scores[A.sample_based][A.explanations][w][method]
explanations[w] = explanations_per_method
print(f'Number of methods: {len(methods)}')
# methods.insert(0, 'model_weights')
# methods.insert(0, 'model_weights_NN')
methods.insert(0, 'binary_mask')
methods.insert(0, 'sample')
num_methods = len(methods)
num_weights = len(data_weights)
num_cols = np.maximum(num_methods, 2)
num_rows = np.maximum(num_weights, 2)
sns.set_theme('paper')
shrinking = 1.0
hspace_values = {5: -825, 3: -0.815}
fig, ax = plt.subplots(ncols=num_cols, nrows=num_rows, sharex=True, sharey=True,
gridspec_kw={'wspace': 0.05, 'hspace': hspace_values[len(data_weights)]})
sample = None
for i, weight in enumerate(data_weights):
print(weight)
model_weight = model_weights[weight]
dataset = data[weight]['data']
rnd_experiment_idx = data[weight]['rnd_experiment_idx']
for k, method in enumerate(methods):
if 0 == i:
ax[i, k].set_title(NAME_MAPPING[method], rotation=90,
fontdict={'fontsize': 8})
# if 3 == k:
# w = model_weight[A.logistic_regression][rnd_experiment_idx]
# heat_map = np.abs(w)
# g = _heat_map(result=heat_map.reshape(8, 8), sub_ax=ax[i, k])
# elif 2 == k:
# w = model_weight[A.neural_net][rnd_experiment_idx]
# heat_map = np.abs(w[:, 0] - w[:, 1])
# g = _heat_map(result=heat_map.reshape(8, 8), sub_ax=ax[i, k])
if 1 == k:
b = _generate_true_feature_importance(pattern_type=pattern_type)
g = _heat_map(result=np.abs(b).reshape((8, 8)), sub_ax=ax[i, k])
elif 0 == k:
x = dataset['val']['x'][rnd_sample_idx]
g = sns.heatmap(x.reshape((8, 8)), ax=ax[i, k], center=0.0,
square=True, cbar=False, cbar_kws={"shrink": shrinking})
ylabel = f'{weight.split("_")[0]}'
g.set_ylabel(f'$\lambda_1=${ylabel}', fontdict={'fontsize': 7})
else:
explanation = explanations[weight][method][rnd_experiment_idx]
heat_map = np.abs(explanation[rnd_sample_idx, :])
g = _heat_map(result=heat_map.reshape((8, 8)), sub_ax=ax[i, k])
g.set(yticks=[])
g.set(xticks=[])
if 0 != k:
ax[i, k].yaxis.set_visible(False)
if (num_weights - 1) != i:
ax[i, k].xaxis.set_visible(False)
ax[i, k].set_aspect('equal', adjustable='box')
file_name = '_'.join(['heat_map_sample_based', '.png'])
output_path = join(config.output_dir_plots, file_name)
save_figure(file_path=output_path, fig=fig, dpi=config.dpi)
def overview_correlation_plot(scores: Dict, config: Config) -> None:
nn_data = {'Model Weight': list(), 'SNR': list(), 'Model': list()}
lr_data = {'Model Weight': list(), 'SNR': list(), 'Model': list()}
for data_weight, model_weights in scores[A.model_weights].items():
for l in range(len(model_weights[A.neural_net])):
snr = data_weight.split('_')[0]
nn_data['Model Weight'] += [
(model_weights[A.neural_net][l][:, 1] - model_weights[A.neural_net][l][:,
0]).flatten()]
nn_data['Model'] += ['Single Layer NN']
nn_data['SNR'] += [snr]
lr_data['Model Weight'] += [model_weights[A.logistic_regression][l].flatten()]
lr_data['Model'] += ['Logistic Regression']
lr_data['SNR'] += [snr]
model_weights = pd.merge(left=pd.DataFrame(nn_data), right=
|
pd.DataFrame(lr_data)
|
pandas.DataFrame
|
# This code extract the features from the raw joined dataset (data.csv)
# and save it in the LibSVM format.
# Usage: python construct_features.py
import pandas as pd
import numpy as np
from sklearn.datasets import dump_svmlight_file
df = pd.read_csv("data.csv", low_memory=False)
# NPU
NPU = df.NPU.copy()
NPU[NPU == ' '] = np.nan
NPU = pd.get_dummies(NPU, prefix="NPU")
# SiteZip
SiteZip = df.SiteZip.copy()
SiteZip = SiteZip.str.replace(',','')
SiteZip = SiteZip.str.replace('\.00','')
SiteZip = SiteZip.replace('0',np.nan)
SiteZip = pd.get_dummies(SiteZip, prefix="SiteZip")
# Submarket1
Submarket1 = df.Submarket1.copy()
Submarket1 = pd.get_dummies(Submarket1, prefix="Submarket1")
# TAX_DISTR
TAX_DISTR = df.TAX_DISTR.copy()
TAX_DISTR[TAX_DISTR == ' '] = np.nan
TAX_DISTR = pd.get_dummies(TAX_DISTR, prefix="TAX_DISTR")
# NBHD
NBHD = df.NBHD.copy()
NBHD[NBHD == ' '] = np.nan
NBHD = pd.get_dummies(NBHD, prefix="NBHD")
# ZONING_NUM
ZONING_NUM = df.ZONING_NUM.copy()
ZONING_NUM[ZONING_NUM == ' '] = np.nan
ZONING_NUM = pd.get_dummies(ZONING_NUM, prefix="ZONING_NUM")
# building_c
building_c = df.building_c.copy()
building_c[building_c == ' '] = np.nan
building_c = pd.get_dummies(building_c, prefix="building_c")
# PROP_CLASS
PROP_CLASS = df.PROP_CLASS.copy()
PROP_CLASS[PROP_CLASS == ' '] = np.nan
PROP_CLASS = pd.get_dummies(PROP_CLASS, prefix="PROP_CLASS")
# Existing_p
Existing_p = df.Existing_p.copy()
Existing_p[Existing_p == ' '] = np.nan
Existing_p = pd.get_dummies(Existing_p, prefix="Existing_p")
# PropertyTy
PropertyTy = df.PropertyTy.copy()
PropertyTy = pd.get_dummies(PropertyTy, prefix="PropertyTy")
# secondaryT
secondaryT = df.secondaryT.copy()
secondaryT[secondaryT == ' '] = np.nan
secondaryT = pd.get_dummies(secondaryT, prefix="secondaryT")
# LUC
LUC = df.LUC.copy()
LUC[LUC == ' '] = np.nan
LUC = pd.get_dummies(LUC, prefix="LUC")
# Taxes_Per_
Taxes_Per_ = df.Taxes_Per_.copy()
Taxes_Per_zero = (Taxes_Per_ == "0").apply(int)
Taxes_Per_zero.name = 'Taxes_Per_zero'
Taxes_Per_ = Taxes_Per_.str.replace(',','').astype(float)
Taxes_Per_ = np.log1p(Taxes_Per_)
Taxes_Per_ = Taxes_Per_ / Taxes_Per_.max()
Taxes_Per_ = pd.concat([Taxes_Per_, Taxes_Per_zero], axis=1)
# Taxes_Tota
Taxes_Tota = df.Taxes_Tota.copy()
Taxes_Tota_zero = (Taxes_Tota == "0").apply(int)
Taxes_Tota_zero.name = 'Taxes_Tota_zero'
Taxes_Tota = Taxes_Tota.str.replace(',','').astype(float)
Taxes_Tota = np.log1p(Taxes_Tota)
Taxes_Tota = Taxes_Tota / Taxes_Tota.max()
Taxes_Tota = pd.concat([Taxes_Tota, Taxes_Tota_zero], axis=1)
# TOT_APPR
TOT_APPR = df.TOT_APPR.copy()
TOT_APPR_zero = (TOT_APPR == "0").apply(int)
TOT_APPR_zero.name = 'TOT_APPR_zero'
TOT_APPR = TOT_APPR.str.replace(',','').astype(float)
TOT_APPR = np.log1p(TOT_APPR)
TOT_APPR = TOT_APPR / TOT_APPR.max()
TOT_APPR = pd.concat([TOT_APPR, TOT_APPR_zero], axis=1)
# VAL_ACRES
VAL_ACRES = df.VAL_ACRES.copy()
VAL_ACRES_zero = (VAL_ACRES == 0).apply(int)
VAL_ACRES_zero.name = 'VAL_ACRES_zero'
VAL_ACRES = np.log1p(VAL_ACRES)
VAL_ACRES = VAL_ACRES / VAL_ACRES.max()
VAL_ACRES = pd.concat([VAL_ACRES, VAL_ACRES_zero], axis=1)
# For_Sale_P
For_Sale_P = df.For_Sale_P.copy()
For_Sale_P_notNA = (For_Sale_P != " ").apply(int)
For_Sale_P_notNA.name = 'For_Sale_P_notNA'
For_Sale_P[For_Sale_P == ' '] = 0
For_Sale_P = For_Sale_P.astype(float)
For_Sale_P = np.log1p(For_Sale_P)
For_Sale_P = For_Sale_P / For_Sale_P.max()
For_Sale_P = pd.concat([For_Sale_P, For_Sale_P_notNA], axis=1)
# Last_Sale1
Last_Sale1 = df.Last_Sale1.copy()
Last_Sale1_zero = (Last_Sale1 == "0").apply(int)
Last_Sale1_zero.name = "Last_Sale1_zero"
Last_Sale1 = Last_Sale1.str.replace(',','').astype(float)
Last_Sale1 = np.log1p(Last_Sale1)
Last_Sale1 = (Last_Sale1 - Last_Sale1.min()) / (Last_Sale1.max() - Last_Sale1.min())
Last_Sale1 = pd.concat([Last_Sale1, Last_Sale1_zero], axis=1)
# yearbuilt
yearbuilt = df.yearbuilt.copy()
yearbuilt_zero = (yearbuilt == "0").apply(int)
yearbuilt_zero.name = "yearbuilt_zero"
yearbuilt[yearbuilt == "0"] = np.nan
yearbuilt = yearbuilt.str.replace(',','').astype(float)
yearbuilt = (yearbuilt - yearbuilt.min()) / (yearbuilt.max() - yearbuilt.min())
yearbuilt = yearbuilt.fillna(0)
yearbuilt = pd.concat([yearbuilt, yearbuilt_zero], axis=1)
# year_reno
year_reno = df.year_reno.copy()
reno = (year_reno != "0").apply(int)
reno.name = "reno"
year_reno[year_reno == "0"] = np.nan
year_reno = year_reno.str.replace(',','').astype(float)
year_reno = (year_reno - year_reno.min()) / (year_reno.max() - year_reno.min())
year_reno = year_reno.fillna(0)
year_reno =
|
pd.concat([year_reno, reno], axis=1)
|
pandas.concat
|
#!/usr/bin/env python
# coding: utf-8
# In[77]:
import pandas as pd
from datetime import datetime
import requests
import re
from urllib.request import urlopen
from lxml import etree
import io
import numpy as np
from alphacast import Alphacast
from dotenv import dotenv_values
API_KEY = dotenv_values(".env").get("API_KEY")
alphacast = Alphacast(API_KEY)
# In[78]:
url = "https://www.bcu.gub.uy/Estadisticas-e-Indicadores/Indice_Cambio_Real/TCRE.xls"
r = requests.get(url, allow_redirects=True, verify=False)
df = pd.read_excel(r.content,skiprows = 1, sheet_name = 'Hoja1', header=[6])
df = df.replace(np.nan, "")
df.columns = df.columns + " " + df.loc[0]
df.columns = df.columns.str.replace(".1", " - ")
df.columns = df.columns.str.replace(".2", " - ")
df.columns = df.columns.str.replace("Efectivo Global", "Efectivo - Global")
df = df.replace("", np.nan)
# In[79]:
#Elimino NaNs
df = df.dropna(how='all', subset=df.columns[2:])
df = df.loc[:, ~(df == '(*)').any()]
df = df.dropna(how='all')
df = df.iloc[1:]
# In[80]:
df.columns = df.columns.str.replace("Unnamed: - ", "Date")
df['Date']=
|
pd.to_datetime(df['Date'])
|
pandas.to_datetime
|
import pandas as pd
import numpy as np
# TODO
# Add a feature that will compare a df lib import (from self.import_lib) find
# SATICMETHODS
# refactor to use this method
# Move these to a tools lib??
# Expand on for new libs
# compile all sheets does not work
class CardDB(object):
"""
Deck list or card library class
Methods:
:method add_card: Add card to lib
:method remove_card: Remove card to lib
:method replace_card: Replace card_out with card_in
:method save: Save CardDB as card project (excel)
:method new_lib: Make new lib
:method import_lib: Import lib
:method replace_lib: replace a lib with a new one
:method del_lib: delete lib
"""
def __init__(self, filename,
parser='read_excel',
primary_sheet=0,
change_log='Change_Log',
sideboard=None,
maybeboard=None,
expand_composite=False,
expand_on='Count',
supporting_sheet_index='Date',
populate_all_sheets=True,
**kwargs):
"""
Initialize CardDB from a saved lib
:param filename: str() - filename to load lib into CardDb
:param parser: pd method to read filename
:param primary_sheet: str()/int() - location of decklist. Used only
for 'read_excel' parser
:param change_log: str()/int() - name of change_log sheet. Used only
for 'read_excel' parser
:param sideboard: str()/int() - name of sideboard sheet. Used only for
'read_excel' parser
:param maybeboard: str()/int() - name of maybeboard sheet. Used only
for 'read_excel' parser
:param expand_composite: bool - Used with 'expand_on' str. If
multiples are used, this bool will expand each multiple.
(e.g. 5 Islands expands to five independent entries of Island)
:param expand_on: str() - Used with 'expand_composite' bool. Column
name to expand index
:param supporting_sheet_index: str() - Index to be aligned for all
supporting indicies
:param populate_all_sheets: bool - Used to populate optional sheets,
such as
change_log, sideboard, and maybeboard
:param kwargs: dict() - keyword arguments to use in parser method
"""
self.primary_sheet = None
self.ExcelFileIO = None
self.pandas_sheets = None
# Private
def _set_sheet_attr(sheet_name_, loc_):
if loc_ is None:
setattr(self, sheet_name_, None)
# Check for required sheets
if sheet_name_ is 'change_log':
print('Warning! Change Log not set!\nCreating one...')
self._format_change_log()
elif (isinstance(loc_, int) or isinstance(loc_, float)
and parser is 'read_excel'):
self.pandas_sheets[sheet_name_] = file_sheet_names.pop(loc_)
elif parser is 'read_excel' and isinstance(loc_, str):
file_sheet_names.pop(file_sheet_names.index(loc_))
def _parse_sheet_name(sheet_name_, loc_):
if loc_ is not None:
if parser is 'read_excel':
kwargs['sheet_name'] = loc_
sheet = parser_handle(self.filename, **kwargs)
# Handle primary differently
if sheet_name_ is 'primary_sheet':
if expand_composite:
sheet = self._expand_df(sheet, expand_on=expand_on)
else:
sheet = self._sort_lib_on_key(sheet,
key=supporting_sheet_index,
drop=True)
setattr(self, sheet_name_, sheet)
def _populate_empty_sheets():
empty_sheets = [sheet_name_ for sheet_name_, loc_ in
self.pandas_sheets.items() if loc_ is None]
for sheet in empty_sheets:
self.new_lib(sheet_name=sheet, force=True)
# kwargs
self.filename = filename
parser_handle = getattr(pd, parser)
if 'sheet_name' in kwargs.keys():
primary_sheet = kwargs.pop('sheet_name')
if parser is 'read_excel':
file_sheet_names = pd.ExcelFile(filename).sheet_names
self.ExcelFileIO = pd.ExcelFile(filename)
# Housekeeping - populate_sheets into housekeeping var
self.pandas_sheets = {'primary_sheet': primary_sheet,
'change_log': change_log,
'sideboard': sideboard,
'maybeboard': maybeboard}
for sheet_name, location in self.pandas_sheets.items():
_set_sheet_attr(sheet_name, location)
# Parse housekeeping sheets into vars
for sheet_name, location in self.pandas_sheets.items():
_parse_sheet_name(sheet_name, location)
# check populate_all_sheets bool
if populate_all_sheets:
_populate_empty_sheets()
def save(self,
filename=None,
method='to_excel',
sheet_name='primary_sheet',
index=False):
"""
Save Card_DB Object
:param filename: str() - save location. If None, saves to self.filename
:param method: str() - Method used to save pd.df obj, must be a
pd.df method
:param sheet_name: pd.df object - Only used to save formats that are
not to_excel - Save specific object to filename.
:param index: Used in save method to insert index to spreadsheet,
if True
:return: None
"""
if filename is None:
filename = self.filename
if method is 'to_excel':
writer =
|
pd.ExcelWriter(filename)
|
pandas.ExcelWriter
|
"""Functions to interactively cut the data into buckets and plot the results"""
__version__ = '0.1.0' # Ensure this is kept in-sync with VERSION in the SETUP.PY
############
# Contents #
############
# - Setup
# - Assign buckets
# - Group and aggregate
# - Set coordinates
# - Pipeline functions
# - Plotting
# - Running interactively
#########
# Setup #
#########
# Import built-in modules
import functools
import inspect
# Import external modules
import numpy as np
import pandas as pd
import bokeh
import bokeh.palettes
##################
# Assign buckets #
##################
def divide_n(df, bucket_var, n_bins=10):
"""
Assign each row of `df` to a bucket by dividing the range of the
`bucket_var` column into `n_bins` number of equal width intervals.
df: DataFrame
bucket_var: Name of the column of df to use for dividing.
n_bins: positive integer number of buckets.
Returns: df with the additional `bucket` column
The `bucket` column is Categorical data type consisting of Intervals
that partition the interval from just below min(bucket_var) to
max(bucket_var).
"""
df_w_buckets = df.assign(
bucket=lambda df: pd.cut(df[bucket_var], bins=n_bins)
)
return(df_w_buckets)
def custom_width(df, bucket_var, width, boundary=0, first_break=None, last_break=None):
"""
Assign each row of `df` to a bucket by dividing the range of the
`bucket_var` column into `n_bins` number of equal width intervals.
df: DataFrame
bucket_var: Name of the column of df to use for dividing.
width: Positive width of the buckets
boundary: Edge of one of the buckets, if the data extended that far
first_break: All values below this (if any) are grouped into one bucket
last_break: All values above this (if any) are grouped into one bucket
Returns: df with the additional `bucket` column
The `bucket` column is Categorical data type consisting of Intervals
that partition the interval from just below min(bucket_var) to
max(bucket_var).
"""
var_min, var_max = df[bucket_var].min(), df[bucket_var].max()
extended_min = var_min - 0.001 * np.min([(var_max - var_min), width])
# Set bucket edges
start = np.floor((extended_min - boundary) / width) * width + boundary
stop = np.ceil((var_max - boundary) / width) * width + boundary
num = int((stop - start) / width) + 1
breaks_all = np.array([
extended_min,
*np.linspace(start, stop, num)[1:-1],
var_max,
])
# Clip lower and upper buckets
breaks_clipped = breaks_all
if first_break is not None or last_break is not None:
breaks_clipped = np.unique(np.array([
breaks_all.min(),
*np.clip(breaks_all, first_break, last_break),
breaks_all.max(),
]))
breaks_clipped
df_w_buckets = df.assign(
bucket=lambda df: pd.cut(df[bucket_var], bins=breaks_clipped)
)
return(df_w_buckets)
def weighted_quantiles(df, bucket_var, n_bins=10, bucket_wgt=None, validate=True):
"""
Assign each row of `df` to a bucket by splitting column `bucket_var`
into `n_bins` weighted quantiles, weighted by `bucket_wgt`.
bucket_var: Column name of the values to find the quantiles.
Must not be constant (i.e. just one value for all rows).
n_bins: Target number of quantiles, but could end up with fewer because
there are only a finite number of potential cut points.
bucket_wgt: Weights to use to calculate the weighted quantiles.
If None (default) or 'const' then equal weights are used for all rows.
Must be non-negative with at least one postive value.
validate: boolean. Set to False to omit validation checks on inputs.
Returns: df with the additional `bucket` column
The `bucket` column is Categorical data type consisting of Intervals
that partition the interval from 0 to sum(bucket_wgt).
"""
if bucket_wgt is None:
bucket_wgt = 'const'
if bucket_wgt == 'const':
df = df.assign(const = 1)
if validate:
if df[bucket_var].nunique() == 1:
raise ValueError(
f"weighted_quantiles: bucket_var column '{bucket_var}' "
"must not be constant"
)
if (df[bucket_wgt] < 0).any() or (df[bucket_wgt] == 0).all():
raise ValueError(
f"weighted_quantiles: bucket_wgt column '{bucket_wgt}' "
"must be non-negative with at least one strictly positive value"
)
res = df.sort_values(bucket_var).assign(
**{'cum_rows_' + col: lambda df: (
df[bucket_wgt].cumsum()
) for col in [bucket_wgt]},
# Ensure that the quantiles cannot split rows with the same value of bucket_var
**{'cum_' + col: lambda df: (
df.groupby(bucket_var)['cum_rows_' + col].transform('max')
) for col in [bucket_wgt]},
bucket=lambda df: pd.qcut(df['cum_' + bucket_wgt], q=n_bins, duplicates='drop'),
)
return(res)
def all_levels(df, bucket_var, include_levels=None, ret_map=False):
"""
Assign each row of `df` to a bucket according to the unique
values of `bucket_var`.
bucket_var: Column name of the values to split on.
Missing values will not be assigned to an interval.
include_levels: Level values to guarantee to include
even if they do not appear in the values of bucket_var.
Missing values are ignored.
Returns:
df with the additional `bucket` column
The `bucket` column is Categorical data type consisting of
Intervals that partition a range, plus possible NaN.
If ret_map is True, also return a Series mapping bucket values
to bucket intervals.
"""
# Format inputs
if include_levels is not None:
if not isinstance(include_levels, pd.Series):
include_levels = pd.Series(include_levels)
# Get the mapping from level value to an appropriate interval
buckets_vals = pd.concat([
df[bucket_var], include_levels
]).drop_duplicates().sort_values(
).reset_index(drop=True).dropna().to_frame('val')
# Add a column of intervals (there may be some intervals with no rows)
if np.issubdtype(df[bucket_var].dtype, np.number):
# If the values are numeric then take the smallest width
min_diff = np.min(np.diff(buckets_vals['val']))
buckets_map = buckets_vals.assign(
interval=lambda df: pd.cut(df['val'], pd.interval_range(
start=df['val'].min() - min_diff/2,
end=df['val'].max() + min_diff/2,
freq=min_diff
))
)
else:
buckets_map = buckets_vals.assign(
interval=lambda df: pd.interval_range(start=0., periods=df.shape[0], freq=1.)
)
# Convert to a Series
buckets_map = buckets_map.reset_index(drop=True)
# Assign buckets and map to intervals
res = df.assign(
bucket=lambda df: df[bucket_var].astype(
pd.CategoricalDtype(buckets_map['val'])
).cat.rename_categories(
buckets_map.set_index('val')['interval']
)
)
if ret_map:
return(res, buckets_map)
return(res)
#######################
# Group and aggregate #
#######################
def group_and_agg(df_w_buckets, x_var, stat_wgt=None, stat_vars=None):
"""
Group by bucket and calculate aggregate values in each bucket
df_w_buckets: Result of an 'assign_buckets' function.
i.e. a DataFrame with a `bucket` column the is Categorical
with Interval categories that partition a range.
Rows with missing `bucket` value are excluded from the grouping.
x_var: Column name of variable that will be plotted on the x axis.
stat_wgt: Weights for the weighted distributions of stat_vars.
If None (default) or 'const' then equal weights are used for all rows.
Must be non-negative with at least one postive value.
stat_vars:
If None (default) or empty list, no values are calculated.
Returns: Aggregated DataFrame for plotting.
"""
# Set defaults
if stat_wgt is None:
stat_wgt = 'const'
if stat_wgt == 'const':
df_w_buckets = df_w_buckets.assign(const = 1)
if stat_vars is None:
stat_vars = []
# Format inputs and defaults
if not isinstance(stat_vars, list):
stat_vars = [stat_vars]
# Variables for which we want the (weighted) distribution in each bucket
agg_vars_all = stat_vars
if np.issubdtype(df_w_buckets[x_var].dtype, np.number):
agg_vars_all = [x_var] + agg_vars_all
# Ensure they are unique (and maintain order)
agg_vars = pd.Series(agg_vars_all).drop_duplicates()
df_agg = df_w_buckets.assign(
**{col + '_x_wgt': (
lambda df, col=col: df[col] * df[stat_wgt]
) for col in agg_vars},
).groupby(
# Group by the buckets
'bucket', sort=False
).agg(
# Aggregate calculation for rows in each bucket
n_obs=('bucket', 'size'), # It is possible that a bucket contains zero rows
**{col: (col, 'sum') for col in [stat_wgt]},
**{stat_var + '_wgt_sum': (
stat_var + '_x_wgt', 'sum'
) for stat_var in agg_vars},
x_min=(x_var, 'min'),
x_max=(x_var, 'max'),
).pipe(
# Convert the index to an IntervalIndex
lambda df: df.set_index(df.index.categories)
).sort_index().assign(
# Calculate the weighted average of the stats
**{stat_var + '_wgt_av': (
lambda df, stat_var=stat_var: df[stat_var + '_wgt_sum'] / df[stat_wgt]
) for stat_var in agg_vars},
)
return(df_agg)
###################
# Set coordinates #
###################
# Functions to set the x-axis edges `x_left` and `x_right`
def x_edges_min_max(df_agg):
"""
Set the x-axis edges to be the min and max values of `x_var`.
Does not make sense to use this option when min and max are not numeric.
This might result in zero width intervals, in which case a warning is given.
"""
if not np.issubdtype(df_agg['x_min'].dtype, np.number):
raise ValueError(
"\n\tx_edges_min_max: This method can only be used when"
"\n\tx_min and x_max are numeric data types."
)
if (df_agg['x_min'] == df_agg['x_max']).any():
warning(
"x_edges_min_max: At least one bucket has x_min == x_max, "
"so using this method will result in zero width intervals."
)
res = df_agg.assign(
# Get the coordinates for plot: interval edges
x_left=lambda df: df['x_min'],
x_right=lambda df: df['x_max'],
)
return(res)
def x_edges_interval(df_agg):
"""Set the x-axis edges to be the edges of the bucket interval"""
res = df_agg.assign(
x_left=lambda df: df.index.left,
x_right=lambda df: df.index.right,
)
return(res)
def x_edges_unit(df_agg):
"""
Set the x-axis edges to be the edges of equally spaced intervals
of width 1.
"""
res = df_agg.assign(
interval=lambda df: pd.interval_range(start=0., periods=df.shape[0], freq=1.),
x_left=lambda df: pd.IntervalIndex(df['interval']).left,
x_right=lambda df: pd.IntervalIndex(df['interval']).right,
).drop(columns='interval')
return(res)
# Functions to set the x-axis point
def x_point_mid(df_agg):
"""Set the x_point to be mid-way between x_left and x_right"""
res = df_agg.assign(
x_point=lambda df: (df['x_left'] + df['x_right']) / 2.
)
return(res)
def x_point_wgt_av(df_agg, x_var):
"""
Set the x_point to be the weighted average of x_var within the bucket,
weighted by stat_wgt.
"""
if not (x_var + '_wgt_av') in df_agg.columns:
raise ValueError(
"\n\tx_point_wgt_av: This method can only be used when"
"\n\tthe weighted average has already been calculated."
)
res = df_agg.assign(
x_point=lambda df: df[x_var + '_wgt_av']
)
return(res)
# Functions to set the x-axis labels
def x_label_none(df_agg):
res = df_agg.copy()
if 'x_label' in df_agg.columns:
res = res.drop(columns='x_label')
return(res)
######################
# Pipeline functions #
######################
# Constant to store pipeline functions
pipe_funcs_df = pd.DataFrame(
columns=['task', 'func', 'alias'],
data = [
('x_edges', x_edges_interval, ['interval']),
('x_edges', x_edges_min_max, ['min_max']),
('x_edges', x_edges_unit, ['unit']),
('x_point', x_point_mid, ['mid']),
('x_point', x_point_wgt_av, ['wgt_av']),
('x_label', x_label_none, ['none']),
],
).assign(
name=lambda df: df['func'].apply(lambda f: f.__name__),
arg_names=lambda df: df['func'].apply(
lambda f: inspect.getfullargspec(f)[0][1:]
),
).set_index(['task', 'name'])
def get_pipeline_func(
task, search_term,
kwarg_keys=None, calling_func='',
pipe_funcs_df=pipe_funcs_df
):
"""
TODO: Write docstring <<<<<<<<<<<<<
"""
# Set defaults
if kwarg_keys is None:
kwarg_keys = []
# Find function row
task_df = pipe_funcs_df.loc[task,:]
func_row = task_df.loc[task_df.index == search_term, :]
if func_row.shape[0] != 1:
func_row = task_df.loc[[search_term in ali for ali in task_df.alias], :]
if func_row.shape[0] != 1:
raise ValueError(
f"\n\t{calling_func}: Cannot find '{search_term}' within the"
f"\n\tavailable '{task}' pipeline functions."
)
# Check arguments are supplied
for req_arg in func_row['arg_names'][0]:
if not req_arg in kwarg_keys:
raise ValueError(
f"\n\t{calling_func}: To use the '{search_term}' as a '{task}' pipeline"
f"\n\tfunction, you must specify '{req_arg}' as a keyword argument."
)
return(func_row['func'][0], func_row['arg_names'][0])
def add_x_coords(df_agg, x_edges=None, x_point=None, x_label=None, **kwargs):
"""
Given a DataFrame where each row is a bucket, add x-axis
properties to be used for plotting. See pipe_funcs_df for
available options.
x_edges: How to position the x-axis edges.
Default: 'interval'
x_point: Where to position each bucket point on the x-axis.
Default: 'mid'
x_label: Option for x-axis label.
Default: 'none'
**kwargs: Additional arguments to pass to the functions.
"""
# Set variables for use throughout the function
calling_func = 'add_x_coords'
kwarg_keys = list(kwargs.keys())
# Set defaults
if x_edges is None:
x_edges = 'interval'
if x_point is None:
x_point = 'mid'
if x_label is None:
x_label = 'none'
# Get pipeline functions
x_edges_func = [
functools.partial(full_func, **{arg_name: kwargs[arg_name] for arg_name in arg_names})
for full_func, arg_names in [get_pipeline_func('x_edges', x_edges, kwarg_keys, calling_func)]
][0]
x_point_func = [
functools.partial(full_func, **{arg_name: kwargs[arg_name] for arg_name in arg_names})
for full_func, arg_names in [get_pipeline_func('x_point', x_point, kwarg_keys, calling_func)]
][0]
x_label_func = [
functools.partial(full_func, **{arg_name: kwargs[arg_name] for arg_name in arg_names})
for full_func, arg_names in [get_pipeline_func('x_label', x_label, kwarg_keys, calling_func)]
][0]
# Apply the functions
res = df_agg.pipe(
lambda df: x_edges_func(df)
).pipe(
lambda df: x_point_func(df)
).pipe(
lambda df: x_label_func(df)
)
return(res)
############
# Plotting #
############
def expand_lims(df, pct_buffer_below=0.05, pct_buffer_above=0.05, include_vals=None):
"""
Find the range over all columns of df. Then expand these
below and above by a percentage of the total range.
df: Consider all values in all columns
include_vals: Additional values to consider
Returns: Series with rows 'start' and 'end' of the expanded range
"""
# If a Series is passed, convert it to a DataFrame
try:
df = df.to_frame()
except:
pass
# Case where df has no columns, just fill in default vals
if df.shape[1] == 0:
res_range = pd.Series({'start': 0, 'end': 1})
return(res_range)
if include_vals is None:
include_vals = []
if not isinstance(include_vals, list):
include_vals = [include_vals]
res_range = pd.concat([
df.reset_index(drop=True),
# Add a column of extra values to the DataFrame to take these into account
|
pd.DataFrame({'_extra_vals': include_vals})
|
pandas.DataFrame
|
def load_blood_data(train=True, SEED=97, scale = False,
minmax = False,
norm = False,
nointercept = False,
engineering = False):
"""
Load training and test datasets
for DrivenData's Predict Blood Donations warmup contest
The training data is shuffled before it's returned; test data is not
Note: patsy returns float64 data; Theano requires float32 so conversion
will be required; the y values are converted to int32, so they're OK
Arguments
---------
train (bool) if True
y_train, X_train = load_blood_data(train=True, ...
if False
X_test, IDs = load_blood_data(train=False, ...
SEED (int) random seed
scale (bool) if True, scale the data to mean zero, var 1; standard normal
minmax (2-tuple) to scale the data to a specified range, provide a
2-tuple (min, max)
norm (bool) if True, L2 normalize for distance and similarity measures
nointercept (bool) if True, patsy will not create an intercept
Usage
-----
from load_blood_data import load_blood_data
"""
from sklearn.utils import shuffle
from patsy import dmatrices, dmatrix
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import Normalizer
import numpy as np
import pandas as pd
import re
global scaler
global minmaxer
global normalizer
if (scale and minmax): raise ValueError("cannot specify both scale and minmax")
if (scale and norm): raise ValueError("cannot specify both scale and norm")
if (norm and minmax): raise ValueError("cannot specify both norm and minmax")
if type(train) is not bool: raise ValueError("train must be boolean")
if type(SEED) is not int: raise ValueError("SEED must be int")
if type(scale) is not bool: raise ValueError("scale must be boolean")
if type(norm) is not bool: raise ValueError("norm must be boolean")
if type(nointercept) is not bool: raise ValueError("nointercept must be boolean")
if type(engineering) is not bool: raise ValueError("engineering must be boolean")
# ------------- read the file -------------
file_name = '../input/train.csv' if train else '../input/test.csv'
data = pd.read_csv(file_name)
# ------------- shorten the column names -------------
column_names = ['ID','moSinceLast','numDonations','volume','moSinceFirst','donated']
data.columns = column_names if train else column_names[:-1]
# ------------- create new variables -------------
if engineering:
# Ratio of moSinceLast / moSinceFirst = moRatio
data['moRatio'] =
|
pd.Series(data.moSinceLast / data.moSinceFirst, index=data.index)
|
pandas.Series
|
import numpy as np
import pandas as pd
from joblib import Parallel
from joblib import delayed
from tqdm import tqdm_notebook as tqdm
import sobol_seq
from .batch_base import BatchBase
class SobolSearch(BatchBase):
"""
Implementation of Sobol Sequence.
Parameters
----------
:type para_space: dict or list of dictionaries
:param para_space: It has three types:
Continuous:
Specify `Type` as `continuous`, and include the keys of `Range` (a list with lower-upper elements pair) and
`Wrapper`, a callable function for wrapping the values.
Integer:
Specify `Type` as `integer`, and include the keys of `Mapping` (a list with all the sortted integer elements).
Categorical:
Specify `Type` as `categorical`, and include the keys of `Mapping` (a list with all the possible categories).
:type max_runs: int, optional, default=100
:param max_runs: The maximum number of trials to be evaluated. When this values is reached,
then the algorithm will stop.
:type estimator: estimator object
:param estimator: This is assumed to implement the scikit-learn estimator interface.
:type cv: cross-validation method, an sklearn object.
:param cv: e.g., `StratifiedKFold` and KFold` is used.
:type scoring: string, callable, list/tuple, dict or None, optional, default=None
:param scoring: A sklearn type scoring function.
If None, the estimator's default scorer (if available) is used. See the package `sklearn` for details.
:type refit: boolean, or string, optional, default=True
:param refit: It controls whether to refit an estimator using the best found parameters on the whole dataset.
:type n_jobs: int or None, optional, optional, default=None
:param n_jobs: Number of jobs to run in parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code
is used at all, which is useful for debugging. See the package `joblib` for details.
:type random_state: int, optional, default=0
:param random_state: The random seed for optimization.
:type verbose: boolean, optional, default=False
:param verbose: It controls whether the searching history will be printed.
Examples
----------
>>> import numpy as np
>>> from sklearn import svm
>>> from sklearn import datasets
>>> from sequd import SobolSearch
>>> from sklearn.model_selection import KFold
>>> iris = datasets.load_iris()
>>> ParaSpace = {'C':{'Type': 'continuous', 'Range': [-6, 16], 'Wrapper': np.exp2},
'gamma': {'Type': 'continuous', 'Range': [-16, 6], 'Wrapper': np.exp2}}
>>> estimator = svm.SVC()
>>> cv = KFold(n_splits=5, random_state=0, shuffle=True)
>>> clf = SobolSearch(ParaSpace, max_runs=100, estimator=estimator, cv=cv,
scoring=None, n_jobs=None, refit=False, random_state=0, verbose=False)
>>> clf.fit(iris.data, iris.target)
Attributes
----------
:vartype best_score\_: float
:ivar best_score\_: The best average cv score among the evaluated trials.
:vartype best_params\_: dict
:ivar best_params\_: Parameters that reaches `best_score_`.
:vartype best_estimator\_: sklearn estimator
:ivar best_estimator\_: The estimator refitted based on the `best_params_`.
Not available if estimator=None or `refit=False`.
:vartype search_time_consumed\_: float
:ivar search_time_consumed\_: Seconds used for whole searching procedure.
:vartype refit_time\_: float
:ivar refit_time\_: Seconds used for refitting the best model on the whole dataset.
Not available if estimator = None or `refit=False`.
"""
def __init__(self, para_space, max_runs=100, estimator=None, cv=None,
scoring=None, refit=True, n_jobs=None, random_state=0, verbose=False):
super(SobolSearch, self).__init__(para_space, max_runs, n_jobs, verbose)
self.cv = cv
self.refit = refit
self.scoring = scoring
self.estimator = estimator
self.random_state = random_state
self.method = "Sobol Search"
def _run(self, obj_func):
"""
Main loop for searching the best hyperparameters.
"""
para_set_ud = sobol_seq.i4_sobol_generate(self.extend_factor_number, self.max_runs)
para_set_ud = pd.DataFrame(para_set_ud, columns=self.para_ud_names)
para_set = self._para_mapping(para_set_ud)
para_set_ud.columns = self.para_ud_names
candidate_params = [{para_set.columns[j]: para_set.iloc[i, j]
for j in range(para_set.shape[1])}
for i in range(para_set.shape[0])]
if self.verbose:
if self.n_jobs > 1:
out = Parallel(n_jobs=self.n_jobs)(delayed(obj_func)(parameters) for parameters in tqdm(candidate_params))
else:
out = []
for parameters in tqdm(candidate_params):
out.append(obj_func(parameters))
out = np.array(out)
else:
if self.n_jobs > 1:
out = Parallel(n_jobs=self.n_jobs)(delayed(obj_func)(parameters) for parameters in candidate_params)
else:
out = []
for parameters in candidate_params:
out.append(obj_func(parameters))
out = np.array(out)
self.logs = para_set_ud.to_dict()
self.logs.update(para_set)
self.logs.update(
|
pd.DataFrame(out, columns=["score"])
|
pandas.DataFrame
|
#!/usr/bin/env python
# coding: utf-8
# # Benchmark Results
# This notebook visualizes the output from the different models on different classification problems
# In[1]:
import collections
import glob
import json
import os
import numpy as np
import pandas as pd
from plotnine import *
from saged.utils import split_sample_names, create_dataset_stat_df, get_dataset_stats, parse_map_file
# ## Set Up Functions and Get Metadata
# In[3]:
def return_unlabeled():
# For use in a defaultdict
return 'unlabeled'
# In[4]:
data_dir = '../../data/'
map_file = os.path.join(data_dir, 'sample_classifications.pkl')
sample_to_label = parse_map_file(map_file)
sample_to_label = collections.defaultdict(return_unlabeled, sample_to_label)
# In[ ]:
metadata_path = os.path.join(data_dir, 'aggregated_metadata.json')
metadata = None
with open(metadata_path) as json_file:
metadata = json.load(json_file)
sample_metadata = metadata['samples']
# In[ ]:
experiments = metadata['experiments']
sample_to_study = {}
for study in experiments:
for accession in experiments[study]['sample_accession_codes']:
sample_to_study[accession] = study
# ## Sepsis classification
# In[8]:
in_files = glob.glob('../../results/single_label.*')
in_files = [file for file in in_files if 'be_corrected' not in file]
print(in_files[:5])
# In[9]:
sepsis_metrics = pd.DataFrame()
for path in in_files:
new_df = pd.read_csv(path, sep='\t')
model_info = path.strip('.tsv').split('sepsis.')[-1]
model_info = model_info.split('.')
if len(model_info) == 4:
unsupervised_model = model_info[0]
supervised_model = model_info[1]
else:
unsupervised_model = 'untransformed'
supervised_model = model_info[0]
new_df['unsupervised'] = unsupervised_model
new_df['supervised'] = supervised_model
sepsis_metrics = pd.concat([sepsis_metrics, new_df])
sepsis_metrics
# In[10]:
plot = ggplot(sepsis_metrics, aes(x='supervised', y='accuracy', fill='unsupervised'))
plot += geom_violin()
plot += ggtitle('PCA vs untransformed data for classifying sepsis')
print(plot)
# In[11]:
plot = ggplot(sepsis_metrics, aes(x='supervised', y='accuracy', fill='unsupervised'))
plot += geom_jitter(size=3)
plot += ggtitle('PCA vs untransformed data for classifying sepsis')
print(plot)
# ## All labels
# In[12]:
in_files = glob.glob('../../results/all_labels.*')
in_files = [file for file in in_files if 'be_corrected' not in file]
print(in_files[:5])
# In[13]:
metrics = None
for path in in_files:
if metrics is None:
metrics = pd.read_csv(path, sep='\t')
model_info = path.strip('.tsv').split('all_labels.')[-1]
model_info = model_info.split('.')
if len(model_info) == 4:
unsupervised_model = model_info[0]
supervised_model = model_info[1]
else:
unsupervised_model = 'untransformed'
supervised_model = model_info[0]
metrics['unsupervised'] = unsupervised_model
metrics['supervised'] = supervised_model
else:
new_df = pd.read_csv(path, sep='\t')
model_info = path.strip('.tsv').split('all_labels.')[-1]
model_info = model_info.split('.')
if len(model_info) == 4:
unsupervised_model = model_info[0]
supervised_model = model_info[1]
else:
unsupervised_model = 'untransformed'
supervised_model = model_info[0]
new_df['unsupervised'] = unsupervised_model
new_df['supervised'] = supervised_model
metrics = pd.concat([metrics, new_df])
metrics
# In[14]:
plot = ggplot(metrics, aes(x='supervised', y='accuracy', fill='unsupervised'))
plot += geom_violin()
plot += ggtitle('PCA vs untransformed data for all label classification')
print(plot)
# In[15]:
plot = ggplot(metrics, aes(x='supervised', y='accuracy', fill='unsupervised'))
plot += geom_jitter(size=2)
plot += ggtitle('PCA vs untransformed data for all label classification')
print(plot)
# # Subsets of healthy labels
# In[16]:
in_files = glob.glob('../../results/subset_label.sepsis*')
in_files = [file for file in in_files if 'be_corrected' not in file]
print(in_files[:5])
# In[17]:
sepsis_metrics = pd.DataFrame()
for path in in_files:
new_df = pd.read_csv(path, sep='\t')
model_info = path.strip('.tsv').split('sepsis.')[-1]
model_info = model_info.split('.')
if len(model_info) == 4:
unsupervised_model = model_info[0]
supervised_model = model_info[1]
else:
unsupervised_model = 'untransformed'
supervised_model = model_info[0]
new_df['unsupervised'] = unsupervised_model
new_df['supervised'] = supervised_model
sepsis_metrics = pd.concat([sepsis_metrics, new_df])
sepsis_metrics = sepsis_metrics.rename({'fraction of healthy used': 'healthy_used'}, axis='columns')
sepsis_metrics['healthy_used'] = sepsis_metrics['healthy_used'].round(1)
sepsis_metrics
# In[18]:
print(sepsis_metrics[sepsis_metrics['healthy_used'] == 1])
# In[19]:
plot = ggplot(sepsis_metrics, aes(x='factor(healthy_used)', y='accuracy', ))
plot += geom_violin()
plot += ggtitle('Effect of subsetting healthy data on prediction accuracy')
print(plot)
# In[20]:
plot = ggplot(sepsis_metrics, aes(x='factor(healthy_used)', y='accuracy', fill='unsupervised'))
plot += geom_violin()
plot += ggtitle('Effect of subsetting healthy data on prediction accuracy')
print(plot)
# In[21]:
plot = ggplot(sepsis_metrics, aes(x='factor(healthy_used)', y='accuracy', fill='supervised'))
plot += geom_violin()
plot += ggtitle('Effect of subsetting healthy data on prediction accuracy')
print(plot)
# ## Same analysis, but with tb instead of sepsis
# In[22]:
in_files = glob.glob('../../results/subset_label.tb*')
in_files = [file for file in in_files if 'be_corrected' not in file]
print(in_files[:5])
# In[23]:
tuberculosis_metrics = pd.DataFrame()
for path in in_files:
new_df = pd.read_csv(path, sep='\t')
model_info = path.strip('.tsv').split('tb.')[-1]
model_info = model_info.split('.')
if len(model_info) == 4:
unsupervised_model = model_info[0]
supervised_model = model_info[1]
else:
unsupervised_model = 'untransformed'
supervised_model = model_info[0]
new_df['unsupervised'] = unsupervised_model
new_df['supervised'] = supervised_model
tuberculosis_metrics = pd.concat([tuberculosis_metrics, new_df])
tuberculosis_metrics = tuberculosis_metrics.rename({'fraction of healthy used': 'healthy_used'}, axis='columns')
tuberculosis_metrics['healthy_used'] = tuberculosis_metrics['healthy_used'].round(1)
tuberculosis_metrics
# In[24]:
print(tuberculosis_metrics[tuberculosis_metrics['healthy_used'] == 1])
# In[25]:
plot = ggplot(tuberculosis_metrics, aes(x='factor(healthy_used)', y='accuracy'))
plot += geom_violin()
plot += ggtitle('Effect of subsetting healthy data on prediction accuracy')
print(plot)
# In[26]:
plot = ggplot(tuberculosis_metrics, aes(x='factor(healthy_used)', y='accuracy', fill='unsupervised'))
plot += geom_violin()
plot += ggtitle('Effect of subsetting healthy data on prediction accuracy')
print(plot)
# In[27]:
plot = ggplot(tuberculosis_metrics, aes(x='factor(healthy_used)', y='accuracy', fill='supervised'))
plot += geom_violin()
plot += ggtitle('Effect of subsetting healthy data on prediction accuracy')
print(plot)
# ## Supervised Results Only
# The results above show that unsupervised learning mostly hurts performance rather than helping.
# The visualizations below compare each model based only on its supervised results.
# In[28]:
supervised_sepsis = sepsis_metrics[sepsis_metrics['unsupervised'] == 'untransformed']
# In[29]:
plot = ggplot(supervised_sepsis, aes(x='factor(healthy_used)', y='accuracy', fill='supervised'))
plot += geom_violin()
plot += ggtitle('Effect of subsetting healthy data on prediction accuracy')
print(plot)
# In[30]:
supervised_tb = tuberculosis_metrics[tuberculosis_metrics['unsupervised'] == 'untransformed']
# In[31]:
plot = ggplot(supervised_tb, aes(x='factor(healthy_used)', y='accuracy', fill='supervised'))
plot += geom_violin()
plot += ggtitle('Effect of subsetting healthy data on prediction accuracy')
print(plot)
# In[32]:
plot = ggplot(supervised_tb, aes(x='factor(healthy_used)', y='accuracy', fill='supervised'))
plot += geom_boxplot()
plot += ggtitle('Effect of subsetting healthy data on prediction accuracy')
print(plot)
# ## Batch Effect Correction
# In[33]:
in_files = glob.glob('../../results/subset_label.sepsis*be_corrected.tsv')
print(in_files[:5])
# In[34]:
sepsis_metrics = pd.DataFrame()
for path in in_files:
new_df = pd.read_csv(path, sep='\t')
model_info = path.strip('.tsv').split('sepsis.')[-1]
print(model_info)
model_info = model_info.split('.')
print(model_info)
supervised_model = model_info[0]
new_df['supervised'] = supervised_model
sepsis_metrics = pd.concat([sepsis_metrics, new_df])
sepsis_metrics = sepsis_metrics.rename({'fraction of healthy used': 'healthy_used'}, axis='columns')
sepsis_metrics['healthy_used'] = sepsis_metrics['healthy_used'].round(1)
sepsis_metrics
# In[35]:
plot = ggplot(sepsis_metrics, aes(x='factor(healthy_used)', y='accuracy', ))
plot += geom_violin()
plot += ggtitle('Effect of subsetting healthy data on prediction accuracy')
print(plot)
# In[36]:
plot = ggplot(sepsis_metrics, aes(x='factor(healthy_used)', y='accuracy', fill='supervised'))
plot += geom_violin()
plot += ggtitle('Effect of subsetting healthy data on prediction accuracy')
print(plot)
# ## TB Batch effect corrected
# In[37]:
in_files = glob.glob('../../results/subset_label.tb*be_corrected.tsv')
print(in_files[:5])
# In[38]:
tuberculosis_metrics = pd.DataFrame()
for path in in_files:
new_df = pd.read_csv(path, sep='\t')
model_info = path.strip('.tsv').split('tb.')[-1]
model_info = model_info.split('.')
supervised_model = model_info[0]
new_df['supervised'] = supervised_model
tuberculosis_metrics = pd.concat([tuberculosis_metrics, new_df])
tuberculosis_metrics = tuberculosis_metrics.rename({'fraction of healthy used': 'healthy_used'}, axis='columns')
tuberculosis_metrics['healthy_used'] = tuberculosis_metrics['healthy_used'].round(1)
tuberculosis_metrics
# In[39]:
plot = ggplot(tuberculosis_metrics, aes(x='factor(healthy_used)', y='accuracy'))
plot += geom_boxplot()
plot += ggtitle('Effect of subsetting healthy data on prediction accuracy')
print(plot)
# In[40]:
plot = ggplot(tuberculosis_metrics, aes(x='factor(healthy_used)', y='accuracy', fill='supervised'))
plot += geom_violin()
plot += ggtitle('Effect of subsetting healthy data on prediction accuracy')
print(plot)
# ## Better Metrics, Same Label Distribution in Train and Val sets
# In[11]:
in_files = glob.glob('../../results/keep_ratios.sepsis*be_corrected.tsv')
print(in_files[:5])
# In[12]:
sepsis_metrics = pd.DataFrame()
for path in in_files:
new_df = pd.read_csv(path, sep='\t')
model_info = path.strip('.tsv').split('sepsis.')[-1]
model_info = model_info.split('.')
supervised_model = model_info[0]
new_df['supervised'] = supervised_model
new_df['seed'] = model_info[-2]
sepsis_metrics = pd.concat([sepsis_metrics, new_df])
sepsis_metrics = sepsis_metrics.rename({'fraction of data used': 'healthy_used'}, axis='columns')
sepsis_metrics['healthy_used'] = sepsis_metrics['healthy_used'].round(1)
# Looking at the training curves, deep_net isn't actually training
# I need to fix it going forward, but for now I can clean up the visualizations by removing it
sepsis_metrics = sepsis_metrics[~(sepsis_metrics['supervised'] == 'deep_net')]
sepsis_metrics['supervised'] = sepsis_metrics['supervised'].str.replace('pytorch_supervised', 'three_layer_net')
sepsis_metrics
# In[13]:
plot = ggplot(sepsis_metrics, aes(x='factor(healthy_used)', y='accuracy'))
plot += geom_boxplot()
plot += ggtitle('Effect of subsetting healthy data on prediction accuracy')
print(plot)
# In[14]:
plot = ggplot(sepsis_metrics, aes(x='factor(healthy_used)', y='accuracy', fill='supervised'))
plot += geom_violin()
plot += ggtitle('Effect of subsetting healthy data on prediction accuracy')
print(plot)
# In[15]:
plot = ggplot(sepsis_metrics, aes(x='factor(healthy_used)', y='balanced_accuracy', fill='supervised'))
plot += geom_violin()
plot += ggtitle('Effect of subsetting healthy data on prediction accuracy')
print(plot)
# In[16]:
sepsis_stat_df = create_dataset_stat_df(sepsis_metrics,
sample_to_study,
sample_metadata,
sample_to_label,
'sepsis')
sepsis_stat_df.tail(5)
# In[17]:
ggplot(sepsis_stat_df, aes(x='train_val_diff',
y='balanced_accuracy',
color='val_disease_count')) + geom_point() + facet_grid('model ~ .')
# In[18]:
plot = ggplot(sepsis_metrics, aes(x='train sample count', y='balanced_accuracy', color='supervised'))
plot += geom_smooth()
plot += geom_point(alpha=.2)
plot += geom_hline(yintercept=.5, linetype='dashed')
plot += ggtitle('Effect of All Sepsis Data')
plot
# ## Same Distribution Tuberculosis
# In[19]:
in_files = glob.glob('../../results/keep_ratios.tb*be_corrected.tsv')
print(in_files[:5])
# In[20]:
tb_metrics = pd.DataFrame()
for path in in_files:
new_df = pd.read_csv(path, sep='\t')
model_info = path.strip('.tsv').split('tb.')[-1]
model_info = model_info.split('.')
supervised_model = model_info[0]
new_df['supervised'] = supervised_model
new_df['seed'] = model_info[-2]
tb_metrics = pd.concat([tb_metrics, new_df])
tb_metrics = tb_metrics.rename({'fraction of data used': 'healthy_used'}, axis='columns')
tb_metrics['healthy_used'] = tb_metrics['healthy_used'].round(1)
tb_metrics
# In[21]:
plot = ggplot(tb_metrics, aes(x='factor(healthy_used)', y='accuracy'))
plot += geom_boxplot()
plot += ggtitle('Effect of subsetting healthy data on prediction accuracy')
print(plot)
# In[22]:
plot = ggplot(tb_metrics, aes(x='factor(healthy_used)', y='accuracy', fill='supervised'))
plot += geom_violin()
plot += ggtitle('Effect of subsetting healthy data on prediction accuracy')
print(plot)
# In[23]:
plot = ggplot(tb_metrics, aes(x='factor(healthy_used)', y='balanced_accuracy', fill='supervised'))
plot += geom_violin()
plot += ggtitle('Effect of subsetting healthy data on prediction accuracy')
print(plot)
# In[24]:
tb_stat_df = create_dataset_stat_df(tb_metrics,
sample_to_study,
sample_metadata,
sample_to_label,
'tb')
tb_stat_df.tail(5)
# In[55]:
ggplot(tb_stat_df, aes(x='train_val_diff',
y='balanced_accuracy',
color='val_disease_count')) + geom_point() + facet_grid('model ~ .')
# In[25]:
plot = ggplot(tb_metrics, aes(x='train sample count', y='balanced_accuracy', color='supervised'))
plot += geom_smooth()
plot += geom_point(alpha=.2)
plot
# ## Results from Small Datasets
# In[57]:
in_files = glob.glob('../../results/small_subsets.sepsis*be_corrected.tsv')
print(in_files[:5])
# In[58]:
sepsis_metrics = pd.DataFrame()
for path in in_files:
new_df = pd.read_csv(path, sep='\t')
model_info = path.strip('.tsv').split('sepsis.')[-1]
model_info = model_info.split('.')
supervised_model = model_info[0]
new_df['supervised'] = supervised_model
new_df['seed'] = model_info[-2]
sepsis_metrics = pd.concat([sepsis_metrics, new_df])
sepsis_metrics['train_count'] = sepsis_metrics['train sample count']
# Looking at the training curves, deep_net isn't actually training
# I need to fix it going forward, but for now I can clean up the visualizations by removing it
sepsis_metrics = sepsis_metrics[~(sepsis_metrics['supervised'] == 'deep_net')]
sepsis_metrics['supervised'] = sepsis_metrics['supervised'].str.replace('pytorch_supervised', 'three_layer_net')
sepsis_metrics
# In[59]:
plot = ggplot(sepsis_metrics, aes(x='factor(train_count)', y='balanced_accuracy'))
plot += geom_boxplot()
plot += ggtitle('Sepsis Dataset Size Effects (equal label counts)')
print(plot)
# In[60]:
plot = ggplot(sepsis_metrics, aes(x='factor(train_count)', y='balanced_accuracy', fill='supervised'))
plot += geom_boxplot()
plot += ggtitle('Sepsis Datset Size by Model (equal label counts)')
print(plot)
# In[61]:
plot = ggplot(sepsis_metrics, aes(x='train_count', y='balanced_accuracy', color='supervised'))
plot += geom_smooth()
plot += geom_point(alpha=.2)
plot += geom_hline(yintercept=.5, linetype='dashed')
plot += ggtitle('Sepsis Crossover Point')
plot
# ## Small Training Set TB
# In[62]:
in_files = glob.glob('../../results/small_subsets.tb*be_corrected.tsv')
print(in_files[:5])
# In[63]:
tb_metrics = pd.DataFrame()
for path in in_files:
new_df = pd.read_csv(path, sep='\t')
model_info = path.strip('.tsv').split('tb.')[-1]
model_info = model_info.split('.')
supervised_model = model_info[0]
new_df['supervised'] = supervised_model
new_df['seed'] = model_info[-2]
tb_metrics = pd.concat([tb_metrics, new_df])
tb_metrics['train_count'] = tb_metrics['train sample count']
# Looking at the training curves, deep_net isn't actually training
# I need to fix it going forward, but for now I can clean up the visualizations by removing it
tb_metrics = tb_metrics[~(tb_metrics['supervised'] == 'deep_net')]
tb_metrics['supervised'] = tb_metrics['supervised'].str.replace('pytorch_supervised', 'three_layer_net')
tb_metrics
# In[64]:
plot = ggplot(tb_metrics, aes(x='factor(train_count)', y='balanced_accuracy'))
plot += geom_boxplot()
plot += ggtitle('TB Dataset Size Effects (equal label counts)')
print(plot)
# In[65]:
plot = ggplot(tb_metrics, aes(x='factor(train_count)', y='balanced_accuracy', fill='supervised'))
plot += geom_boxplot()
plot += ggtitle('TB Dataset Size vs Models (equal label counts)')
print(plot)
# In[66]:
plot = ggplot(tb_metrics, aes(x='train_count', y='balanced_accuracy', color='supervised'))
plot += geom_smooth(method='loess')
plot += geom_point(alpha=.2)
plot += geom_hline(yintercept=.5, linetype='dashed')
plot += ggtitle('TB (lack of a) Crossover Point')
plot
# ## Small training sets without be correction
# In[67]:
in_files = glob.glob('../../results/small_subsets.sepsis*.tsv')
in_files = [file for file in in_files if 'be_corrected' not in file]
print(in_files[:5])
# In[68]:
sepsis_metrics = pd.DataFrame()
for path in in_files:
new_df = pd.read_csv(path, sep='\t')
model_info = path.strip('.tsv').split('sepsis.')[-1]
model_info = model_info.split('.')
supervised_model = model_info[0]
new_df['supervised'] = supervised_model
new_df['seed'] = model_info[-2]
sepsis_metrics =
|
pd.concat([sepsis_metrics, new_df])
|
pandas.concat
|
"""Tests for the sdv.constraints.tabular module."""
import uuid
import numpy as np
import pandas as pd
import pytest
from sdv.constraints.errors import MissingConstraintColumnError
from sdv.constraints.tabular import (
Between, ColumnFormula, CustomConstraint, GreaterThan, Negative, OneHotEncoding, Positive,
Rounding, UniqueCombinations)
def dummy_transform():
pass
def dummy_reverse_transform():
pass
def dummy_is_valid():
pass
class TestCustomConstraint():
def test___init__(self):
"""Test the ``CustomConstraint.__init__`` method.
The ``transform``, ``reverse_transform`` and ``is_valid`` methods
should be replaced by the given ones, importing them if necessary.
Setup:
- Create dummy functions (created above this class).
Input:
- dummy transform and revert_transform + is_valid FQN
Output:
- Instance with all the methods replaced by the dummy versions.
"""
is_valid_fqn = __name__ + '.dummy_is_valid'
# Run
instance = CustomConstraint(
transform=dummy_transform,
reverse_transform=dummy_reverse_transform,
is_valid=is_valid_fqn
)
# Assert
assert instance.transform == dummy_transform
assert instance.reverse_transform == dummy_reverse_transform
assert instance.is_valid == dummy_is_valid
class TestUniqueCombinations():
def test___init__(self):
"""Test the ``UniqueCombinations.__init__`` method.
It is expected to create a new Constraint instance and receiving the names of
the columns that need to produce unique combinations.
Side effects:
- instance._colums == columns
"""
# Setup
columns = ['b', 'c']
# Run
instance = UniqueCombinations(columns=columns)
# Assert
assert instance._columns == columns
def test_fit(self):
"""Test the ``UniqueCombinations.fit`` method.
The ``UniqueCombinations.fit`` method is expected to:
- Call ``UniqueCombinations._valid_separator``.
- Find a valid separator for the data and generate the joint column name.
Input:
- Table data (pandas.DataFrame)
"""
# Setup
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
# Run
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
instance.fit(table_data)
# Asserts
expected_combinations = pd.DataFrame({
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
assert instance._separator == '#'
assert instance._joint_column == 'b#c'
pd.testing.assert_frame_equal(instance._combinations, expected_combinations)
def test_is_valid_true(self):
"""Test the ``UniqueCombinations.is_valid`` method.
If the input data satisfies the constraint, result is a series of ``True`` values.
Input:
- Table data (pandas.DataFrame), satisfying the constraint.
Output:
- Series of ``True`` values (pandas.Series)
Side effects:
- Since the ``is_valid`` method needs ``self._combinations``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
out = instance.is_valid(table_data)
expected_out = pd.Series([True, True, True], name='b#c')
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_false(self):
"""Test the ``UniqueCombinations.is_valid`` method.
If the input data doesn't satisfy the constraint, result is a series of ``False`` values.
Input:
- Table data (pandas.DataFrame), which does not satisfy the constraint.
Output:
- Series of ``False`` values (pandas.Series)
Side effects:
- Since the ``is_valid`` method needs ``self._combinations``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
incorrect_table = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['D', 'E', 'F'],
'c': ['g', 'h', 'i']
})
out = instance.is_valid(incorrect_table)
# Assert
expected_out = pd.Series([False, False, False], name='b#c')
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_non_string_true(self):
"""Test the ``UniqueCombinations.is_valid`` method with non string columns.
If the input data satisfies the constraint, result is a series of ``True`` values.
Input:
- Table data (pandas.DataFrame), satisfying the constraint.
Output:
- Series of ``True`` values (pandas.Series)
Side effects:
- Since the ``is_valid`` method needs ``self._combinations``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': [1, 2, 3],
'c': ['g', 'h', 'i'],
'd': [2.4, 1.23, 5.6]
})
columns = ['b', 'c', 'd']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
out = instance.is_valid(table_data)
expected_out = pd.Series([True, True, True], name='b#c#d')
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_non_string_false(self):
"""Test the ``UniqueCombinations.is_valid`` method with non string columns.
If the input data doesn't satisfy the constraint, result is a series of ``False`` values.
Input:
- Table data (pandas.DataFrame), which does not satisfy the constraint.
Output:
- Series of ``False`` values (pandas.Series)
Side effects:
- Since the ``is_valid`` method needs ``self._combinations``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': [1, 2, 3],
'c': ['g', 'h', 'i'],
'd': [2.4, 1.23, 5.6]
})
columns = ['b', 'c', 'd']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
incorrect_table = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': [6, 7, 8],
'c': ['g', 'h', 'i'],
'd': [2.4, 1.23, 5.6]
})
out = instance.is_valid(incorrect_table)
# Assert
expected_out = pd.Series([False, False, False], name='b#c#d')
pd.testing.assert_series_equal(expected_out, out)
def test_transform(self):
"""Test the ``UniqueCombinations.transform`` method.
It is expected to return a Table data with the columns concatenated by the separator.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data transformed, with the columns concatenated (pandas.DataFrame)
Side effects:
- Since the ``transform`` method needs ``self._joint_column``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
out = instance.transform(table_data)
# Assert
assert instance._combinations_to_uuids is not None
assert instance._uuids_to_combinations is not None
expected_out_a = pd.Series(['a', 'b', 'c'], name='a')
pd.testing.assert_series_equal(expected_out_a, out['a'])
try:
[uuid.UUID(u) for c, u in out['b#c'].items()]
except ValueError:
assert False
def test_transform_non_string(self):
"""Test the ``UniqueCombinations.transform`` method with non strings.
It is expected to return a Table data with the columns concatenated by the separator.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data transformed, with the columns as UUIDs.
Side effects:
- Since the ``transform`` method needs ``self._joint_column``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': [1, 2, 3],
'c': ['g', 'h', 'i'],
'd': [2.4, 1.23, 5.6]
})
columns = ['b', 'c', 'd']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
out = instance.transform(table_data)
# Assert
assert instance._combinations_to_uuids is not None
assert instance._uuids_to_combinations is not None
expected_out_a = pd.Series(['a', 'b', 'c'], name='a')
pd.testing.assert_series_equal(expected_out_a, out['a'])
try:
[uuid.UUID(u) for c, u in out['b#c#d'].items()]
except ValueError:
assert False
def test_transform_not_all_columns_provided(self):
"""Test the ``UniqueCombinations.transform`` method.
If some of the columns needed for the transform are missing, and
``fit_columns_model`` is False, it will raise a ``MissingConstraintColumnError``.
Input:
- Table data (pandas.DataFrame)
Output:
- Raises ``MissingConstraintColumnError``.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns, fit_columns_model=False)
instance.fit(table_data)
# Run/Assert
with pytest.raises(MissingConstraintColumnError):
instance.transform(pd.DataFrame({'a': ['a', 'b', 'c']}))
def test_reverse_transform(self):
"""Test the ``UniqueCombinations.reverse_transform`` method.
It is expected to return the original data separating the concatenated columns.
Input:
- Table data transformed (pandas.DataFrame)
Output:
- Original table data, with the concatenated columns separated (pandas.DataFrame)
Side effects:
- Since the ``transform`` method needs ``self._joint_column``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
transformed_data = instance.transform(table_data)
out = instance.reverse_transform(transformed_data)
# Assert
assert instance._combinations_to_uuids is not None
assert instance._uuids_to_combinations is not None
expected_out = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
pd.testing.assert_frame_equal(expected_out, out)
def test_reverse_transform_non_string(self):
"""Test the ``UniqueCombinations.reverse_transform`` method with a non string column.
It is expected to return the original data separating the concatenated columns.
Input:
- Table data transformed (pandas.DataFrame)
Output:
- Original table data, with the concatenated columns separated (pandas.DataFrame)
Side effects:
- Since the ``transform`` method needs ``self._joint_column``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': [1, 2, 3],
'c': ['g', 'h', 'i'],
'd': [2.4, 1.23, 5.6]
})
columns = ['b', 'c', 'd']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
transformed_data = instance.transform(table_data)
out = instance.reverse_transform(transformed_data)
# Assert
assert instance._combinations_to_uuids is not None
assert instance._uuids_to_combinations is not None
expected_out = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': [1, 2, 3],
'c': ['g', 'h', 'i'],
'd': [2.4, 1.23, 5.6]
})
pd.testing.assert_frame_equal(expected_out, out)
class TestGreaterThan():
def test___init___strict_false(self):
"""Test the ``GreaterThan.__init__`` method.
The passed arguments should be stored as attributes.
Input:
- low = 'a'
- high = 'b'
Side effects:
- instance._low == 'a'
- instance._high == 'b'
- instance._strict == False
"""
# Run
instance = GreaterThan(low='a', high='b')
# Asserts
assert instance._low == 'a'
assert instance._high == 'b'
assert instance._strict is False
assert instance._high_is_scalar is None
assert instance._low_is_scalar is None
assert instance._drop is None
def test___init___all_parameters_passed(self):
"""Test the ``GreaterThan.__init__`` method.
The passed arguments should be stored as attributes.
Input:
- low = 'a'
- high = 'b'
- strict = True
- drop = 'high'
- high_is_scalar = True
- low_is_scalar = False
Side effects:
- instance._low == 'a'
- instance._high == 'b'
- instance._stric == True
- instance._drop = 'high'
- instance._high_is_scalar = True
- instance._low_is_scalar = False
"""
# Run
instance = GreaterThan(low='a', high='b', strict=True, drop='high',
high_is_scalar=True, low_is_scalar=False)
# Asserts
assert instance._low == 'a'
assert instance._high == 'b'
assert instance._strict is True
assert instance._high_is_scalar is True
assert instance._low_is_scalar is False
assert instance._drop == 'high'
def test_fit__low_is_scalar_is_none_determined_as_scalar(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should figure out if low is
a scalar if ``_low_is_scalar`` is None.
Input:
- Table without ``low`` in columns.
Side Effect:
- ``_low_is_scalar`` should be set to ``True``.
"""
# Setup
instance = GreaterThan(low=3, high='b')
# Run
table_data = pd.DataFrame({
'a': [1., 2., 3.],
'b': [4, 5, 6]
})
instance.fit(table_data)
# Asserts
assert instance._low_is_scalar is True
def test_fit__low_is_scalar_is_none_determined_as_column(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should figure out if low is
a column name if ``_low_is_scalar`` is None.
Input:
- Table with ``low`` in columns.
Side Effect:
- ``_low_is_scalar`` should be set to ``False``.
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run
table_data = pd.DataFrame({
'a': [1., 2., 3.],
'b': [4, 5, 6]
})
instance.fit(table_data)
# Asserts
assert instance._low_is_scalar is False
def test_fit__high_is_scalar_is_none_determined_as_scalar(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should figure out if high is
a scalar if ``_high_is_scalar`` is None.
Input:
- Table without ``high`` in columns.
Side Effect:
- ``_high_is_scalar`` should be set to ``True``.
"""
# Setup
instance = GreaterThan(low='a', high=3)
# Run
table_data = pd.DataFrame({
'a': [1., 2., 3.],
'b': [4, 5, 6]
})
instance.fit(table_data)
# Asserts
assert instance._high_is_scalar is True
def test_fit__high_is_scalar_is_none_determined_as_column(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should figure out if high is
a column name if ``_high_is_scalar`` is None.
Input:
- Table with ``high`` in columns.
Side Effect:
- ``_high_is_scalar`` should be set to ``False``.
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run
table_data = pd.DataFrame({
'a': [1., 2., 3.],
'b': [4, 5, 6]
})
instance.fit(table_data)
# Asserts
assert instance._high_is_scalar is False
def test_fit__high_is_scalar__low_is_scalar_raises_error(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should raise an error if
`_low_is_scalar` and `_high_is_scalar` are true.
Input:
- Table with one column.
Side Effect:
- ``TypeError`` is raised.
"""
# Setup
instance = GreaterThan(low=1, high=2)
# Run / Asserts
table_data = pd.DataFrame({'a': [1, 2, 3]})
with pytest.raises(TypeError):
instance.fit(table_data)
def test_fit__column_to_reconstruct_drop_high(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should set ``_column_to_reconstruct``
to ``instance._high`` if ``instance_drop`` is `high`.
Input:
- Table with two columns.
Side Effect:
- ``_column_to_reconstruct`` is ``instance._high``
"""
# Setup
instance = GreaterThan(low='a', high='b', drop='high')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6]
})
instance.fit(table_data)
# Asserts
assert instance._column_to_reconstruct == 'b'
def test_fit__column_to_reconstruct_drop_low(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should set ``_column_to_reconstruct``
to ``instance._low`` if ``instance_drop`` is `low`.
Input:
- Table with two columns.
Side Effect:
- ``_column_to_reconstruct`` is ``instance._low``
"""
# Setup
instance = GreaterThan(low='a', high='b', drop='low')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6]
})
instance.fit(table_data)
# Asserts
assert instance._column_to_reconstruct == 'a'
def test_fit__column_to_reconstruct_default(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should set ``_column_to_reconstruct``
to `high` by default.
Input:
- Table with two columns.
Side Effect:
- ``_column_to_reconstruct`` is ``instance._high``
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6]
})
instance.fit(table_data)
# Asserts
assert instance._column_to_reconstruct == 'b'
def test_fit__column_to_reconstruct_high_is_scalar(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should set ``_column_to_reconstruct``
to `low` if ``instance._high_is_scalar`` is ``True``.
Input:
- Table with two columns.
Side Effect:
- ``_column_to_reconstruct`` is ``instance._low``
"""
# Setup
instance = GreaterThan(low='a', high='b', high_is_scalar=True)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6]
})
instance.fit(table_data)
# Asserts
assert instance._column_to_reconstruct == 'a'
def test_fit__diff_column_one_column(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should set ``_diff_column``
to the one column in ``instance.constraint_columns`` plus a
token if there is only one column in that set.
Input:
- Table with one column.
Side Effect:
- ``_column_to_reconstruct`` is ``instance._low``
"""
# Setup
instance = GreaterThan(low='a', high=3, high_is_scalar=True)
# Run
table_data = pd.DataFrame({'a': [1, 2, 3]})
instance.fit(table_data)
# Asserts
assert instance._diff_column == 'a#'
def test_fit__diff_column_multiple_columns(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should set ``_diff_column``
to the two columns in ``instance.constraint_columns`` separated
by a token if there both columns are in that set.
Input:
- Table with two column.
Side Effect:
- ``_column_to_reconstruct`` is ``instance._low``
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6]
})
instance.fit(table_data)
# Asserts
assert instance._diff_column == 'a#b'
def test_fit_int(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should only learn and store the
``dtype`` of the ``high`` column as the ``_dtype`` attribute
if ``_low_is_scalar`` and ``high_is_scalar`` are ``False``.
Input:
- Table that contains two constrained columns with the high one
being made of integers.
Side Effect:
- The _dtype attribute gets `int` as the value even if the low
column has a different dtype.
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run
table_data = pd.DataFrame({
'a': [1., 2., 3.],
'b': [4, 5, 6],
'c': [7, 8, 9]
})
instance.fit(table_data)
# Asserts
assert instance._dtype.kind == 'i'
def test_fit_float(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should only learn and store the
``dtype`` of the ``high`` column as the ``_dtype`` attribute
if ``_low_is_scalar`` and ``high_is_scalar`` are ``False``.
Input:
- Table that contains two constrained columns with the high one
being made of float values.
Side Effect:
- The _dtype attribute gets `float` as the value even if the low
column has a different dtype.
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4., 5., 6.],
'c': [7, 8, 9]
})
instance.fit(table_data)
# Asserts
assert instance._dtype.kind == 'f'
def test_fit_datetime(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should only learn and store the
``dtype`` of the ``high`` column as the ``_dtype`` attribute
if ``_low_is_scalar`` and ``high_is_scalar`` are ``False``.
Input:
- Table that contains two constrained columns of datetimes.
Side Effect:
- The _dtype attribute gets `datetime` as the value.
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run
table_data = pd.DataFrame({
'a': pd.to_datetime(['2020-01-01']),
'b': pd.to_datetime(['2020-01-02'])
})
instance.fit(table_data)
# Asserts
assert instance._dtype.kind == 'M'
def test_fit_type__high_is_scalar(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should learn and store the
``dtype`` of the ``low`` column as the ``_dtype`` attribute
if ``_high_is_scalar`` is ``True``.
Input:
- Table that contains two constrained columns with the low one
being made of floats.
Side Effect:
- The _dtype attribute gets `float` as the value.
"""
# Setup
instance = GreaterThan(low='a', high=3)
# Run
table_data = pd.DataFrame({
'a': [1., 2., 3.],
'b': [4, 5, 6],
'c': [7, 8, 9]
})
instance.fit(table_data)
# Asserts
assert instance._dtype.kind == 'f'
def test_fit_type__low_is_scalar(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should learn and store the
``dtype`` of the ``high`` column as the ``_dtype`` attribute
if ``_low_is_scalar`` is ``True``.
Input:
- Table that contains two constrained columns with the high one
being made of floats.
Side Effect:
- The _dtype attribute gets `float` as the value.
"""
# Setup
instance = GreaterThan(low=3, high='b')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4., 5., 6.],
'c': [7, 8, 9]
})
instance.fit(table_data)
# Asserts
assert instance._dtype.kind == 'f'
def test_is_valid_strict_false(self):
"""Test the ``GreaterThan.is_valid`` method with strict False.
If strict is False, equal values should count as valid.
Input:
- Table with a strictly valid row, a strictly invalid row and
a row that has the same value for both high and low.
Output:
- False should be returned for the strictly invalid row and True
for the other two.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=False)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 2, 2],
'c': [7, 8, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, True, False])
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_strict_true(self):
"""Test the ``GreaterThan.is_valid`` method with strict True.
If strict is True, equal values should count as invalid.
Input:
- Table with a strictly valid row, a strictly invalid row and
a row that has the same value for both high and low.
Output:
- True should be returned for the strictly valid row and False
for the other two.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 2, 2],
'c': [7, 8, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, False, False])
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_low_is_scalar_high_is_column(self):
"""Test the ``GreaterThan.is_valid`` method.
If low is a scalar, and high is a column name, then
the values in that column should all be higher than
``instance._low``.
Input:
- Table with values above and below low.
Output:
- True should be returned for the rows where the high
column is above low.
"""
# Setup
instance = GreaterThan(low=3, high='b', strict=False, low_is_scalar=True)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 2, 2],
'c': [7, 8, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, False, False], name='b')
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_high_is_scalar_low_is_column(self):
"""Test the ``GreaterThan.is_valid`` method.
If high is a scalar, and low is a column name, then
the values in that column should all be lower than
``instance._high``.
Input:
- Table with values above and below high.
Output:
- True should be returned for the rows where the low
column is below high.
"""
# Setup
instance = GreaterThan(low='a', high=2, strict=False, high_is_scalar=True)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 2, 2],
'c': [7, 8, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, True, False], name='a')
pd.testing.assert_series_equal(expected_out, out)
def test_transform_int_drop_none(self):
"""Test the ``GreaterThan.transform`` method passing a high column of type int.
The ``GreaterThan.transform`` method is expected to compute the distance
between the high and low columns and create a diff column with the
logarithm of the distance + 1.
Setup:
- ``_drop`` is set to ``None``, so all original columns will be in output.
Input:
- Table with two columns two constrained columns at a constant distance of
exactly 3 and one additional dummy column.
Output:
- Same table with a diff column of the logarithms of the distances + 1,
which is np.log(4).
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
instance._diff_column = 'a#b'
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
out = instance.transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
pd.testing.assert_frame_equal(out, expected_out)
def test_transform_int_drop_high(self):
"""Test the ``GreaterThan.transform`` method passing a high column of type int.
The ``GreaterThan.transform`` method is expected to compute the distance
between the high and low columns and create a diff column with the
logarithm of the distance + 1. It should also drop the high column.
Setup:
- ``_drop`` is set to ``high``.
Input:
- Table with two columns two constrained columns at a constant distance of
exactly 3 and one additional dummy column.
Output:
- Same table with a diff column of the logarithms of the distances + 1,
which is np.log(4) and the high column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, drop='high')
instance._diff_column = 'a#b'
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
out = instance.transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
pd.testing.assert_frame_equal(out, expected_out)
def test_transform_int_drop_low(self):
"""Test the ``GreaterThan.transform`` method passing a high column of type int.
The ``GreaterThan.transform`` method is expected to compute the distance
between the high and low columns and create a diff column with the
logarithm of the distance + 1. It should also drop the low column.
Setup:
- ``_drop`` is set to ``low``.
Input:
- Table with two columns two constrained columns at a constant distance of
exactly 3 and one additional dummy column.
Output:
- Same table with a diff column of the logarithms of the distances + 1,
which is np.log(4) and the low column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, drop='low')
instance._diff_column = 'a#b'
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
out = instance.transform(table_data)
# Assert
expected_out = pd.DataFrame({
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
pd.testing.assert_frame_equal(out, expected_out)
def test_transform_float_drop_none(self):
"""Test the ``GreaterThan.transform`` method passing a high column of type float.
The ``GreaterThan.transform`` method is expected to compute the distance
between the high and low columns and create a diff column with the
logarithm of the distance + 1.
Setup:
- ``_drop`` is set to ``None``, so all original columns will be in output.
Input:
- Table with two constrained columns at a constant distance of
exactly 3 and one additional dummy column.
Output:
- Same table with a diff column of the logarithms of the distances + 1,
which is np.log(4).
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
instance._diff_column = 'a#b'
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4., 5., 6.],
'c': [7, 8, 9],
})
out = instance.transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4., 5., 6.],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
pd.testing.assert_frame_equal(out, expected_out)
def test_transform_datetime_drop_none(self):
"""Test the ``GreaterThan.transform`` method passing a high column of type datetime.
If the columns are of type datetime, ``transform`` is expected
to convert the timedelta distance into numeric before applying
the +1 and logarithm.
Setup:
- ``_drop`` is set to ``None``, so all original columns will be in output.
Input:
- Table with values at a distance of exactly 1 second.
Output:
- Same table with a diff column of the logarithms
of the dinstance in nanoseconds + 1, which is np.log(1_000_000_001).
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
instance._diff_column = 'a#b'
instance._is_datetime = True
# Run
table_data = pd.DataFrame({
'a': pd.to_datetime(['2020-01-01T00:00:00', '2020-01-02T00:00:00']),
'b': pd.to_datetime(['2020-01-01T00:00:01', '2020-01-02T00:00:01']),
'c': [1, 2],
})
out = instance.transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': pd.to_datetime(['2020-01-01T00:00:00', '2020-01-02T00:00:00']),
'b': pd.to_datetime(['2020-01-01T00:00:01', '2020-01-02T00:00:01']),
'c': [1, 2],
'a#b': [np.log(1_000_000_001), np.log(1_000_000_001)],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_transform_not_all_columns_provided(self):
"""Test the ``GreaterThan.transform`` method.
If some of the columns needed for the transform are missing, it will raise
a ``MissingConstraintColumnError``.
Input:
- Table data (pandas.DataFrame)
Output:
- Raises ``MissingConstraintColumnError``.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, fit_columns_model=False)
# Run/Assert
with pytest.raises(MissingConstraintColumnError):
instance.transform(pd.DataFrame({'a': ['a', 'b', 'c']}))
def test_transform_high_is_scalar(self):
"""Test the ``GreaterThan.transform`` method with high as scalar.
The ``GreaterThan.transform`` method is expected to compute the distance
between the high scalar value and the low column and create a diff column
with the logarithm of the distance + 1.
Setup:
- ``_high`` is set to 5 and ``_high_is_scalar`` is ``True``.
Input:
- Table with one low column and two dummy columns.
Output:
- Same table with a diff column of the logarithms of the distances + 1,
which is np.log(4).
"""
# Setup
instance = GreaterThan(low='a', high=5, strict=True, high_is_scalar=True)
instance._diff_column = 'a#b'
instance.constraint_columns = ['a']
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
out = instance.transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#b': [np.log(5), np.log(4), np.log(3)],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_transform_low_is_scalar(self):
"""Test the ``GreaterThan.transform`` method with high as scalar.
The ``GreaterThan.transform`` method is expected to compute the distance
between the high scalar value and the low column and create a diff column
with the logarithm of the distance + 1.
Setup:
- ``_high`` is set to 5 and ``_high_is_scalar`` is ``True``.
Input:
- Table with one low column and two dummy columns.
Output:
- Same table with a diff column of the logarithms of the distances + 1,
which is np.log(4).
"""
# Setup
instance = GreaterThan(low=2, high='b', strict=True, low_is_scalar=True)
instance._diff_column = 'a#b'
instance.constraint_columns = ['b']
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
out = instance.transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#b': [np.log(3), np.log(4), np.log(5)],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_int_drop_high(self):
"""Test the ``GreaterThan.reverse_transform`` method for dtype int.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- add the low column
- convert the output to integers
- add back the dropped column
Setup:
- ``_drop`` is set to ``high``.
Input:
- Table with a diff column that contains the constant np.log(4).
Output:
- Same table with the high column replaced by the low one + 3, as int
and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, drop='high')
instance._dtype = pd.Series([1]).dtype # exact dtype (32 or 64) depends on OS
instance._diff_column = 'a#b'
instance._column_to_reconstruct = 'b'
# Run
transformed = pd.DataFrame({
'a': [1, 2, 3],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'c': [7, 8, 9],
'b': [4, 5, 6],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_float_drop_high(self):
"""Test the ``GreaterThan.reverse_transform`` method for dtype float.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- add the low column
- convert the output to float values
- add back the dropped column
Setup:
- ``_drop`` is set to ``high``.
Input:
- Table with a diff column that contains the constant np.log(4).
Output:
- Same table with the high column replaced by the low one + 3, as float values
and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, drop='high')
instance._dtype = np.dtype('float')
instance._diff_column = 'a#b'
instance._column_to_reconstruct = 'b'
# Run
transformed = pd.DataFrame({
'a': [1.1, 2.2, 3.3],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': [1.1, 2.2, 3.3],
'c': [7, 8, 9],
'b': [4.1, 5.2, 6.3],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_datetime_drop_high(self):
"""Test the ``GreaterThan.reverse_transform`` method for dtype datetime.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- convert the distance to a timedelta
- add the low column
- convert the output to datetimes
Setup:
- ``_drop`` is set to ``high``.
Input:
- Table with a diff column that contains the constant np.log(1_000_000_001).
Output:
- Same table with the high column replaced by the low one + one second
and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, drop='high')
instance._dtype = np.dtype('<M8[ns]')
instance._diff_column = 'a#b'
instance._is_datetime = True
instance._column_to_reconstruct = 'b'
# Run
transformed = pd.DataFrame({
'a': pd.to_datetime(['2020-01-01T00:00:00', '2020-01-02T00:00:00']),
'c': [1, 2],
'a#b': [np.log(1_000_000_001), np.log(1_000_000_001)],
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': pd.to_datetime(['2020-01-01T00:00:00', '2020-01-02T00:00:00']),
'c': [1, 2],
'b': pd.to_datetime(['2020-01-01T00:00:01', '2020-01-02T00:00:01'])
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_int_drop_low(self):
"""Test the ``GreaterThan.reverse_transform`` method for dtype int.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- subtract from the high column
- convert the output to integers
- add back the dropped column
Setup:
- ``_drop`` is set to ``low``.
Input:
- Table with a diff column that contains the constant np.log(4).
Output:
- Same table with the low column replaced by the high one - 3, as int
and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, drop='low')
instance._dtype = pd.Series([1]).dtype # exact dtype (32 or 64) depends on OS
instance._diff_column = 'a#b'
instance._column_to_reconstruct = 'a'
# Run
transformed = pd.DataFrame({
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'b': [4, 5, 6],
'c': [7, 8, 9],
'a': [1, 2, 3],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_datetime_drop_low(self):
"""Test the ``GreaterThan.reverse_transform`` method for dtype datetime.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- convert the distance to a timedelta
- subtract from the high column
- convert the output to datetimes
Setup:
- ``_drop`` is set to ``low``.
Input:
- Table with a diff column that contains the constant np.log(1_000_000_001).
Output:
- Same table with the low column replaced by the high one - one second
and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, drop='low')
instance._dtype = np.dtype('<M8[ns]')
instance._diff_column = 'a#b'
instance._is_datetime = True
instance._column_to_reconstruct = 'a'
# Run
transformed = pd.DataFrame({
'b': pd.to_datetime(['2020-01-01T00:00:01', '2020-01-02T00:00:01']),
'c': [1, 2],
'a#b': [np.log(1_000_000_001), np.log(1_000_000_001)],
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'b': pd.to_datetime(['2020-01-01T00:00:01', '2020-01-02T00:00:01']),
'c': [1, 2],
'a': pd.to_datetime(['2020-01-01T00:00:00', '2020-01-02T00:00:00'])
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_int_drop_none(self):
"""Test the ``GreaterThan.reverse_transform`` method for dtype int.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- add the low column when the row is invalid
- convert the output to integers
Setup:
- ``_drop`` is set to ``None``.
Input:
- Table with a diff column that contains the constant np.log(4).
The table should have one invalid row where the low column is
higher than the high column.
Output:
- Same table with the high column replaced by the low one + 3 for all
invalid rows, as int and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
instance._dtype = pd.Series([1]).dtype # exact dtype (32 or 64) depends on OS
instance._diff_column = 'a#b'
instance._column_to_reconstruct = 'b'
# Run
transformed = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 1, 6],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_datetime_drop_none(self):
"""Test the ``GreaterThan.reverse_transform`` method for dtype datetime.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- convert the distance to a timedelta
- add the low column when the row is invalid
- convert the output to datetimes
Setup:
- ``_drop`` is set to ``None``.
Input:
- Table with a diff column that contains the constant np.log(1_000_000_001).
The table should have one invalid row where the low column is
higher than the high column.
Output:
- Same table with the high column replaced by the low one + one second
for all invalid rows, and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
instance._dtype = np.dtype('<M8[ns]')
instance._diff_column = 'a#b'
instance._is_datetime = True
instance._column_to_reconstruct = 'b'
# Run
transformed = pd.DataFrame({
'a': pd.to_datetime(['2020-01-01T00:00:00', '2020-01-02T00:00:00']),
'b': pd.to_datetime(['2020-01-01T00:00:01', '2020-01-01T00:00:01']),
'c': [1, 2],
'a#b': [np.log(1_000_000_001), np.log(1_000_000_001)],
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': pd.to_datetime(['2020-01-01T00:00:00', '2020-01-02T00:00:00']),
'b': pd.to_datetime(['2020-01-01T00:00:01', '2020-01-02T00:00:01']),
'c': [1, 2]
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_low_is_scalar(self):
"""Test the ``GreaterThan.reverse_transform`` method with low as a scalar.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- add the low value when the row is invalid
- convert the output to integers
Setup:
- ``_drop`` is set to ``None``.
- ``_low`` is set to an int and ``_low_is_scalar`` is ``True``.
Input:
- Table with a diff column that contains the constant np.log(4).
The table should have one invalid row where the low value is
higher than the high column.
Output:
- Same table with the high column replaced by the low value + 3 for all
invalid rows, as int and the diff column dropped.
"""
# Setup
instance = GreaterThan(low=3, high='b', strict=True, low_is_scalar=True)
instance._dtype = pd.Series([1]).dtype # exact dtype (32 or 64) depends on OS
instance._diff_column = 'a#b'
instance._column_to_reconstruct = 'b'
# Run
transformed = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 1, 6],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 6, 6],
'c': [7, 8, 9],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_high_is_scalar(self):
"""Test the ``GreaterThan.reverse_transform`` method with high as a scalar.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- subtract from the high value when the row is invalid
- convert the output to integers
Setup:
- ``_drop`` is set to ``None``.
- ``_high`` is set to an int and ``_high_is_scalar`` is ``True``.
Input:
- Table with a diff column that contains the constant np.log(4).
The table should have one invalid row where the low column is
higher than the high value.
Output:
- Same table with the low column replaced by the high one - 3 for all
invalid rows, as int and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high=3, strict=True, high_is_scalar=True)
instance._dtype = pd.Series([1]).dtype # exact dtype (32 or 64) depends on OS
instance._diff_column = 'a#b'
instance._column_to_reconstruct = 'a'
# Run
transformed = pd.DataFrame({
'a': [1, 2, 4],
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 0],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
pd.testing.assert_frame_equal(out, expected_out)
class TestPositive():
def test__init__(self):
"""
Test the ``Positive.__init__`` method.
The method is expected to set the ``_low`` instance variable
to 0, the ``_low_is_scalar`` variable to ``True`` and the
``_high_is_scalar`` variable to ``False``. The rest of the
parameters should be passed.
Input:
- strict = True
- high = 'a'
- drop = None
Side effects:
- instance._low == 0
- instance._high == 'a'
- instance._strict == True
- instance._high_is_scalar = False
- instance._low_is_scalar = True
- instance._drop = None
"""
# Run
instance = Positive(high='a', strict=True, drop=None)
# Asserts
assert instance._low == 0
assert instance._high == 'a'
assert instance._strict is True
assert instance._high_is_scalar is False
assert instance._low_is_scalar is True
assert instance._drop is None
class TestNegative():
def test__init__(self):
"""
Test the ``Negative.__init__`` method.
The method is expected to set the ``_high`` instance variable
to 0, the ``_high_is_scalar`` variable to ``True`` and the
``_low_is_scalar`` variable to ``False``. The rest of the
parameters should be passed.
Input:
- strict = True
- low = 'a'
- drop = None
Side effects:
- instance._low == 'a'
- instance._high == 0
- instance._strict == True
- instance._high_is_scalar = True
- instance._low_is_scalar = False
- instance._drop = None
"""
# Run
instance = Negative(low='a', strict=True, drop=None)
# Asserts
assert instance._low == 'a'
assert instance._high == 0
assert instance._strict is True
assert instance._high_is_scalar is True
assert instance._low_is_scalar is False
assert instance._drop is None
def new_column(data):
"""Formula to be used for the ``TestColumnFormula`` class."""
return data['a'] + data['b']
class TestColumnFormula():
def test___init__(self):
"""Test the ``ColumnFormula.__init__`` method.
It is expected to create a new Constraint instance
and import the formula to use for the computation.
Input:
- column = 'c'
- formula = new_column
"""
# Setup
column = 'c'
# Run
instance = ColumnFormula(column=column, formula=new_column)
# Assert
assert instance._column == column
assert instance._formula == new_column
def test_is_valid_valid(self):
"""Test the ``ColumnFormula.is_valid`` method for a valid data.
If the data fulfills the formula, result is a series of ``True`` values.
Input:
- Table data fulfilling the formula (pandas.DataFrame)
Output:
- Series of ``True`` values (pandas.Series)
"""
# Setup
column = 'c'
instance = ColumnFormula(column=column, formula=new_column)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [5, 7, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, True, True])
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_non_valid(self):
"""Test the ``ColumnFormula.is_valid`` method for a non-valid data.
If the data does not fulfill the formula, result is a series of ``False`` values.
Input:
- Table data not fulfilling the formula (pandas.DataFrame)
Output:
- Series of ``False`` values (pandas.Series)
"""
# Setup
column = 'c'
instance = ColumnFormula(column=column, formula=new_column)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [1, 2, 3]
})
instance = ColumnFormula(column=column, formula=new_column)
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([False, False, False])
pd.testing.assert_series_equal(expected_out, out)
def test_transform(self):
"""Test the ``ColumnFormula.transform`` method.
It is expected to drop the indicated column from the table.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data without the indicated column (pandas.DataFrame)
"""
# Setup
column = 'c'
instance = ColumnFormula(column=column, formula=new_column)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [5, 7, 9]
})
out = instance.transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
})
pd.testing.assert_frame_equal(expected_out, out)
def test_reverse_transform(self):
"""Test the ``ColumnFormula.reverse_transform`` method.
It is expected to compute the indicated column by applying the given formula.
Input:
- Table data with the column with incorrect values (pandas.DataFrame)
Output:
- Table data with the computed column (pandas.DataFrame)
"""
# Setup
column = 'c'
instance = ColumnFormula(column=column, formula=new_column)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [1, 1, 1]
})
out = instance.reverse_transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [5, 7, 9]
})
pd.testing.assert_frame_equal(expected_out, out)
class TestRounding():
def test___init__(self):
"""Test the ``Rounding.__init__`` method.
It is expected to create a new Constraint instance
and set the rounding args.
Input:
- columns = ['b', 'c']
- digits = 2
"""
# Setup
columns = ['b', 'c']
digits = 2
# Run
instance = Rounding(columns=columns, digits=digits)
# Assert
assert instance._columns == columns
assert instance._digits == digits
def test___init__invalid_digits(self):
"""Test the ``Rounding.__init__`` method with an invalid argument.
Pass in an invalid ``digits`` argument, and expect a ValueError.
Input:
- columns = ['b', 'c']
- digits = 20
"""
# Setup
columns = ['b', 'c']
digits = 20
# Run
with pytest.raises(ValueError):
Rounding(columns=columns, digits=digits)
def test___init__invalid_tolerance(self):
"""Test the ``Rounding.__init__`` method with an invalid argument.
Pass in an invalid ``tolerance`` argument, and expect a ValueError.
Input:
- columns = ['b', 'c']
- digits = 2
- tolerance = 0.1
"""
# Setup
columns = ['b', 'c']
digits = 2
tolerance = 0.1
# Run
with pytest.raises(ValueError):
Rounding(columns=columns, digits=digits, tolerance=tolerance)
def test_is_valid_positive_digits(self):
"""Test the ``Rounding.is_valid`` method for a positive digits argument.
Input:
- Table data with desired decimal places (pandas.DataFrame)
Output:
- Series of ``True`` values (pandas.Series)
"""
# Setup
columns = ['b', 'c']
digits = 2
tolerance = 1e-3
instance = Rounding(columns=columns, digits=digits, tolerance=tolerance)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3, 4, 5],
'b': [4.12, 5.51, None, 6.941, 1.129],
'c': [5.315, 7.12, 1.12, 9.131, 12.329],
'd': ['a', 'b', 'd', 'e', None],
'e': [123.31598, -1.12001, 1.12453, 8.12129, 1.32923]
})
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([False, True, False, True, True])
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_negative_digits(self):
"""Test the ``Rounding.is_valid`` method for a negative digits argument.
Input:
- Table data with desired decimal places (pandas.DataFrame)
Output:
- Series of ``True`` values (pandas.Series)
"""
# Setup
columns = ['b']
digits = -2
tolerance = 1
instance = Rounding(columns=columns, digits=digits, tolerance=tolerance)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3, 4, 5],
'b': [401, 500, 6921, 799, None],
'c': [5.3134, 7.1212, 9.1209, 101.1234, None],
'd': ['a', 'b', 'd', 'e', 'f']
})
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, True, False, True, False])
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_zero_digits(self):
"""Test the ``Rounding.is_valid`` method for a zero digits argument.
Input:
- Table data not with the desired decimal places (pandas.DataFrame)
Output:
- Series of ``False`` values (pandas.Series)
"""
# Setup
columns = ['b', 'c']
digits = 0
tolerance = 1e-4
instance = Rounding(columns=columns, digits=digits, tolerance=tolerance)
# Run
table_data = pd.DataFrame({
'a': [1, 2, None, 3, 4],
'b': [4, 5.5, 1.2, 6.0001, 5.99999],
'c': [5, 7.12, 1.31, 9.00001, 4.9999],
'd': ['a', 'b', None, 'd', 'e'],
'e': [2.1254, 17.12123, 124.12, 123.0112, -9.129434]
})
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, False, False, True, True])
pd.testing.assert_series_equal(expected_out, out)
def test_reverse_transform_positive_digits(self):
"""Test the ``Rounding.reverse_transform`` method with positive digits.
Expect that the columns are rounded to the specified integer digit.
Input:
- Table data with the column with incorrect values (pandas.DataFrame)
Output:
- Table data with the computed column (pandas.DataFrame)
"""
# Setup
columns = ['b', 'c']
digits = 3
instance = Rounding(columns=columns, digits=digits)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3, None, 4],
'b': [4.12345, None, 5.100, 6.0001, 1.7999],
'c': [1.1, 1.234, 9.13459, 4.3248, 6.1312],
'd': ['a', 'b', 'd', 'e', None]
})
out = instance.reverse_transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3, None, 4],
'b': [4.123, None, 5.100, 6.000, 1.800],
'c': [1.100, 1.234, 9.135, 4.325, 6.131],
'd': ['a', 'b', 'd', 'e', None]
})
pd.testing.assert_frame_equal(expected_out, out)
def test_reverse_transform_negative_digits(self):
"""Test the ``Rounding.reverse_transform`` method with negative digits.
Expect that the columns are rounded to the specified integer digit.
Input:
- Table data with the column with incorrect values (pandas.DataFrame)
Output:
- Table data with the computed column (pandas.DataFrame)
"""
# Setup
columns = ['b']
digits = -3
instance = Rounding(columns=columns, digits=digits)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3, 4, 5],
'b': [41234.5, None, 5000, 6001, 5928],
'c': [1.1, 1.23423, 9.13459, 12.12125, 18.12152],
'd': ['a', 'b', 'd', 'e', 'f']
})
out = instance.reverse_transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3, 4, 5],
'b': [41000.0, None, 5000.0, 6000.0, 6000.0],
'c': [1.1, 1.23423, 9.13459, 12.12125, 18.12152],
'd': ['a', 'b', 'd', 'e', 'f']
})
pd.testing.assert_frame_equal(expected_out, out)
def test_reverse_transform_zero_digits(self):
"""Test the ``Rounding.reverse_transform`` method with zero digits.
Expect that the columns are rounded to the specified integer digit.
Input:
- Table data with the column with incorrect values (pandas.DataFrame)
Output:
- Table data with the computed column (pandas.DataFrame)
"""
# Setup
columns = ['b', 'c']
digits = 0
instance = Rounding(columns=columns, digits=digits)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3, 4, 5],
'b': [4.12345, None, 5.0, 6.01, 7.9],
'c': [1.1, 1.0, 9.13459, None, 8.89],
'd': ['a', 'b', 'd', 'e', 'f']
})
out = instance.reverse_transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3, 4, 5],
'b': [4.0, None, 5.0, 6.0, 8.0],
'c': [1.0, 1.0, 9.0, None, 9.0],
'd': ['a', 'b', 'd', 'e', 'f']
})
pd.testing.assert_frame_equal(expected_out, out)
def transform(data, low, high):
"""Transform to be used for the TestBetween class."""
data = (data - low) / (high - low) * 0.95 + 0.025
return np.log(data / (1.0 - data))
class TestBetween():
def test_transform_scalar_scalar(self):
"""Test the ``Between.transform`` method by passing ``low`` and ``high`` as scalars.
It is expected to create a new column similar to the constraint ``column``, and then
scale and apply a logit function to that column.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data with an extra column containing the transformed ``column`` (pandas.DataFrame)
"""
# Setup
column = 'a'
low = 0.0
high = 1.0
instance = Between(column=column, low=low, high=high, high_is_scalar=True,
low_is_scalar=True)
# Run
table_data = pd.DataFrame({
'a': [0.1, 0.5, 0.9],
'b': [4, 5, 6],
})
instance.fit(table_data)
out = instance.transform(table_data)
# Assert
expected_out = pd.DataFrame({
'b': [4, 5, 6],
'a#0.0#1.0': transform(table_data[column], low, high)
})
pd.testing.assert_frame_equal(expected_out, out)
def test_transform_scalar_column(self):
"""Test the ``Between.transform`` method with ``low`` as scalar and ``high`` as a column.
It is expected to create a new column similar to the constraint ``column``, and then
scale and apply a logit function to that column.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data with an extra column containing the transformed ``column`` (pandas.DataFrame)
"""
# Setup
column = 'a'
low = 0.0
high = 'b'
instance = Between(column=column, low=low, high=high, low_is_scalar=True)
# Run
table_data = pd.DataFrame({
'a': [0.1, 0.5, 0.9],
'b': [0.5, 1, 6],
})
instance.fit(table_data)
out = instance.transform(table_data)
# Assert
expected_out = pd.DataFrame({
'b': [0.5, 1, 6],
'a#0.0#b': transform(table_data[column], low, table_data[high])
})
pd.testing.assert_frame_equal(expected_out, out)
def test_transform_column_scalar(self):
"""Test the ``Between.transform`` method with ``low`` as a column and ``high`` as scalar.
It is expected to create a new column similar to the constraint ``column``, and then
scale and apply a logit function to that column.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data with an extra column containing the transformed ``column`` (pandas.DataFrame)
"""
# Setup
column = 'a'
low = 'b'
high = 1.0
instance = Between(column=column, low=low, high=high, high_is_scalar=True)
# Run
table_data = pd.DataFrame({
'a': [0.1, 0.5, 0.9],
'b': [0, -1, 0.5],
})
instance.fit(table_data)
out = instance.transform(table_data)
# Assert
expected_out = pd.DataFrame({
'b': [0, -1, 0.5],
'a#b#1.0': transform(table_data[column], table_data[low], high)
})
pd.testing.assert_frame_equal(expected_out, out)
def test_transform_column_column(self):
"""Test the ``Between.transform`` method by passing ``low`` and ``high`` as columns.
It is expected to create a new column similar to the constraint ``column``, and then
scale and apply a logit function to that column.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data with an extra column containing the transformed ``column`` (pandas.DataFrame)
"""
# Setup
column = 'a'
low = 'b'
high = 'c'
instance = Between(column=column, low=low, high=high)
# Run
table_data = pd.DataFrame({
'a': [0.1, 0.5, 0.9],
'b': [0, -1, 0.5],
'c': [0.5, 1, 6]
})
instance.fit(table_data)
out = instance.transform(table_data)
# Assert
expected_out = pd.DataFrame({
'b': [0, -1, 0.5],
'c': [0.5, 1, 6],
'a#b#c': transform(table_data[column], table_data[low], table_data[high])
})
pd.testing.assert_frame_equal(expected_out, out)
def test_reverse_transform_scalar_scalar(self):
"""Test ``Between.reverse_transform`` with ``low`` and ``high`` as scalars.
It is expected to recover the original table which was transformed, but with different
column order. It does so by applying a sigmoid to the transformed column and then
scaling it back to the original space. It also replaces the transformed column with
an equal column but with the original name.
Input:
- Transformed table data (pandas.DataFrame)
Output:
- Original table data, without necessarily keepying the column order (pandas.DataFrame)
"""
# Setup
column = 'a'
low = 0.0
high = 1.0
instance = Between(column=column, low=low, high=high, high_is_scalar=True,
low_is_scalar=True)
table_data = pd.DataFrame({
'b': [4, 5, 6],
'a': [0.1, 0.5, 0.9]
})
# Run
instance.fit(table_data)
transformed = pd.DataFrame({
'b': [4, 5, 6],
'a#0.0#1.0': transform(table_data[column], low, high)
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = table_data
pd.testing.assert_frame_equal(expected_out, out)
def test_reverse_transform_scalar_column(self):
"""Test ``Between.reverse_transform`` with ``low`` as scalar and ``high`` as a column.
It is expected to recover the original table which was transformed, but with different
column order. It does so by applying a sigmoid to the transformed column and then
scaling it back to the original space. It also replaces the transformed column with
an equal column but with the original name.
Input:
- Transformed table data (pandas.DataFrame)
Output:
- Original table data, without necessarily keepying the column order (pandas.DataFrame)
"""
# Setup
column = 'a'
low = 0.0
high = 'b'
instance = Between(column=column, low=low, high=high, low_is_scalar=True)
table_data = pd.DataFrame({
'b': [0.5, 1, 6],
'a': [0.1, 0.5, 0.9]
})
# Run
instance.fit(table_data)
transformed = pd.DataFrame({
'b': [0.5, 1, 6],
'a#0.0#b': transform(table_data[column], low, table_data[high])
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = table_data
pd.testing.assert_frame_equal(expected_out, out)
def test_reverse_transform_column_scalar(self):
"""Test ``Between.reverse_transform`` with ``low`` as a column and ``high`` as scalar.
It is expected to recover the original table which was transformed, but with different
column order. It does so by applying a sigmoid to the transformed column and then
scaling it back to the original space. It also replaces the transformed column with
an equal column but with the original name.
Input:
- Transformed table data (pandas.DataFrame)
Output:
- Original table data, without necessarily keepying the column order (pandas.DataFrame)
"""
# Setup
column = 'a'
low = 'b'
high = 1.0
instance = Between(column=column, low=low, high=high, high_is_scalar=True)
table_data = pd.DataFrame({
'b': [0, -1, 0.5],
'a': [0.1, 0.5, 0.9]
})
# Run
instance.fit(table_data)
transformed = pd.DataFrame({
'b': [0, -1, 0.5],
'a#b#1.0': transform(table_data[column], table_data[low], high)
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = table_data
pd.testing.assert_frame_equal(expected_out, out)
def test_reverse_transform_column_column(self):
"""Test ``Between.reverse_transform`` with ``low`` and ``high`` as columns.
It is expected to recover the original table which was transformed, but with different
column order. It does so by applying a sigmoid to the transformed column and then
scaling it back to the original space. It also replaces the transformed column with
an equal column but with the original name.
Input:
- Transformed table data (pandas.DataFrame)
Output:
- Original table data, without necessarily keepying the column order (pandas.DataFrame)
"""
# Setup
column = 'a'
low = 'b'
high = 'c'
instance = Between(column=column, low=low, high=high)
table_data = pd.DataFrame({
'b': [0, -1, 0.5],
'c': [0.5, 1, 6],
'a': [0.1, 0.5, 0.9]
})
# Run
instance.fit(table_data)
transformed = pd.DataFrame({
'b': [0, -1, 0.5],
'c': [0.5, 1, 6],
'a#b#c': transform(table_data[column], table_data[low], table_data[high])
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = table_data
pd.testing.assert_frame_equal(expected_out, out)
def test_is_valid_strict_true(self):
"""Test the ``Between.is_valid`` method with strict True.
If strict is True, equal values should count as invalid.
Input:
- Table with a valid row, a strictly invalid row and an
invalid row. (pandas.DataFrame)
Output:
- True should be returned for the valid row and False
for the other two. (pandas.Series)
"""
# Setup
column = 'a'
low = 0.0
high = 1.0
instance = Between(column=column, low=low, high=high, strict=True, high_is_scalar=True,
low_is_scalar=True)
# Run
table_data = pd.DataFrame({
'a': [0.1, 1, 3],
})
instance.fit(table_data)
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, False, False])
pd.testing.assert_series_equal(expected_out, out, check_names=False)
def test_is_valid_strict_false(self):
"""Test the ``Between.is_valid`` method with strict False.
If strict is False, equal values should count as valid.
Input:
- Table with a valid row, a strictly invalid row and an
invalid row. (pandas.DataFrame)
Output:
- True should be returned for the first two rows, and False
for the last one (pandas.Series)
"""
# Setup
column = 'a'
low = 0.0
high = 1.0
instance = Between(column=column, low=low, high=high, strict=False, high_is_scalar=True,
low_is_scalar=True)
# Run
table_data = pd.DataFrame({
'a': [0.1, 1, 3],
})
instance.fit(table_data)
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, True, False])
|
pd.testing.assert_series_equal(expected_out, out, check_names=False)
|
pandas.testing.assert_series_equal
|
import numpy as np
import pandas as pd
import os.path as path
import abydos.distance as abd
import abydos.phonetic as abp
import pytest
from scipy.sparse import csc_matrix
from sklearn.feature_extraction.text import TfidfVectorizer
import name_matching.name_matcher as nm
@pytest.fixture
def name_match():
package_dir = path.dirname(path.dirname(path.dirname(path.abspath(__file__))))
data = pd.read_csv(path.join(package_dir, 'test','test_names.csv'))
name_matcher = nm.NameMatcher()
name_matcher.load_and_process_master_data(
'company_name', data, start_processing=False, transform=False)
return name_matcher
@pytest.fixture
def adjusted_name():
package_dir = path.dirname(path.dirname(path.dirname(path.abspath(__file__))))
return pd.read_csv(path.join(package_dir, 'test','adjusted_test_names.csv'))
@pytest.fixture
def words():
return ['fun', 'small', 'pool', 'fun', 'small', 'pool', 'sign',
'small', 'pool', 'sign', 'sign', 'small', 'pool', 'sign', 'paper',
'oppose', 'paper', 'oppose', 'brown', 'pig', 'fat', 'oppose', 'paper',
'oppose', 'brown', 'pig', 'fat', 'snail']
@pytest.mark.parametrize("method",
["",
None,
'no_method']
)
def test_make_distance_metrics_error(name_match, method):
with pytest.raises(TypeError):
name_match.set_distance_metrics([method])
@pytest.mark.parametrize("method, result",
[['indel', abd.Indel()],
['discounted_levenshtein', abd.DiscountedLevenshtein()],
['tichy', abd.Tichy()],
['cormodeL_z', abd.CormodeLZ()],
['iterative_sub_string', abd.IterativeSubString()],
['baulieu_xiii', abd.BaulieuXIII()],
['clement', abd.Clement()],
['dice_asymmetricI', abd.DiceAsymmetricI()],
['kuhns_iii', abd.KuhnsIII()],
['overlap', abd.Overlap()],
['pearson_ii', abd.PearsonII()],
['weighted_jaccard', abd.WeightedJaccard()],
['warrens_iv', abd.WarrensIV()],
['bag', abd.Bag()],
['rouge_l', abd.RougeL()],
['ratcliff_obershelp', abd.RatcliffObershelp()],
['ncd_bz2', abd.NCDbz2()],
['fuzzy_wuzzy_partial_string',
abd.FuzzyWuzzyPartialString()],
['fuzzy_wuzzy_token_sort', abd.FuzzyWuzzyTokenSort()],
['fuzzy_wuzzy_token_set', abd.FuzzyWuzzyTokenSet()],
['editex', abd.Editex()],
['typo', abd.Typo()],
['lig_3', abd.LIG3()],
['ssk', abd.SSK()],
['refined_soundex', abd.PhoneticDistance(transforms=abp.RefinedSoundex(
max_length=30), metric=abd.Levenshtein(), encode_alpha=True)],
['double_metaphone', abd.PhoneticDistance(transforms=abp.DoubleMetaphone(max_length=30), metric=abd.Levenshtein(), encode_alpha=True)]]
)
def test_make_distance_metrics(name_match, method, result):
name_match.set_distance_metrics([method])
assert type(name_match._distance_metrics.popitem()[1][0]) == type(result)
@pytest.mark.parametrize("kwargs_str, result_1, result_2, result_3, result_4",
[[{"ngrams": (4, 5)}, 0, False, (4, 5), 5000],
[{"low_memory": True}, 0, True, (2, 3), 5000],
[{"legal_suffixes": True}, 244, False, (2, 3), 5000],
[{"legal_suffixes": True, "number_of_rows": 8,
"ngrams": (1, 2, 3)}, 244, False, (1, 2, 3), 8],
])
def test_initialisation(kwargs_str, result_1, result_2, result_3, result_4):
name_match = nm.NameMatcher(**kwargs_str)
assert len(name_match._word_set) == result_1
assert name_match._low_memory == result_2
assert name_match._vec.ngram_range == result_3
assert name_match._number_of_rows == result_4
@pytest.mark.parametrize("occ, result_1, result_2, result_3, result_4, result_5",
[[1, '', '', '', '', ''],
[2, 'a-nd', 'Hndkiewicz,2Nicolas',
'Tashirian', '<NAME>', 'Marquardt,'],
[3, '<NAME>-nd', 'Hndkiewicz,2Nicolas',
'Runolfsson, <NAME>', '<NAME>', '<NAME>,'],
])
def test_preprocess_reduce(name_match, adjusted_name, occ, result_1, result_2, result_3, result_4, result_5):
name_match._column_matching = 'company_name'
new_names = name_match._preprocess_reduce(
adjusted_name, occurence_count=occ)
assert new_names.loc[1866, 'company_name'] == result_1
assert new_names.loc[1423, 'company_name'] == result_2
assert new_names.loc[268, 'company_name'] == result_3
assert new_names.loc[859, 'company_name'] == result_4
assert new_names.loc[1918, 'company_name'] == result_5
@pytest.mark.parametrize("col, start_pro, transform",
[['company_name', False, False],
['no_name', False, False],
['company_name', True, False],
['company_name', True, True],
['company_name', True, True],
])
def test_load_and_process_master_data(adjusted_name, col, start_pro, transform):
name_matcher = nm.NameMatcher()
name_matcher.load_and_process_master_data(
column=col,
df_matching_data=adjusted_name,
start_processing=start_pro,
transform=transform)
assert name_matcher._column == col
pd.testing.assert_frame_equal(
name_matcher._df_matching_data, adjusted_name)
assert name_matcher._preprocessed == start_pro
if transform & start_pro:
assert type(name_matcher._n_grams_matching) == csc_matrix
@pytest.mark.parametrize("trans, common",
[[False, False],
[True, False],
[False, True],
[True, True],
])
def test_process_matching_data(name_match, trans, common):
name_match._postprocess_common_words = common
name_match._process_matching_data(transform=trans)
assert name_match._preprocessed
if trans:
assert type(name_match._n_grams_matching) == csc_matrix
else:
assert name_match._n_grams_matching is None
if common:
assert len(name_match._word_set) > 0
else:
assert len(name_match._word_set) == 0
@pytest.mark.parametrize("lower_case, punctuations, ascii, result_1, result_2, result_3",
[[False, False, False, 'Schumm PLC', 'Towne, Johnston and Murray', 'Ösinski-Schinner'],
[True, False, False, 'schumm plc',
'towne, johnston and murray', 'ösinski-schinner'],
[False, True, False, 'Schumm PLC',
'Towne Johnston and Murray', 'ÖsinskiSchinner'],
[False, False, True, 'Schumm PLC',
'Towne, Johnston and Murray', 'Osinski-Schinner'],
[False, True, True, 'Schumm PLC',
'Towne Johnston and Murray', 'OsinskiSchinner'],
[True, False, True, 'schumm plc',
'towne, johnston and murray', 'osinski-schinner'],
[True, True, False, 'schumm plc',
'towne johnston and murray', 'ösinskischinner'],
[True, True, True, 'schumm plc',
'towne johnston and murray', 'osinskischinner'],
])
def test_preprocess(name_match, lower_case, punctuations, ascii, result_1, result_2, result_3):
name_match._preprocess_lowercase = lower_case
name_match._preprocess_punctuations = punctuations
name_match._preprocess_ascii = ascii
new_df = name_match.preprocess(
name_match._df_matching_data, 'company_name')
assert new_df.loc[0, 'company_name'] == result_1
assert new_df.loc[2, 'company_name'] == result_2
assert new_df.loc[784, 'company_name'] == result_3
@pytest.mark.parametrize("low_memory, ngrams, result_1, result_2, result_3",
[[1, (5, 6), 0.02579, 0.00781, 0.01738],
[6, (2, 3), 0.009695, 0.01022, 0.01120],
[8, (1, 2), 0.027087, 0.02765, 0.02910],
[0, (5, 6), 0.02579, 0.00781, 0.01738],
[0, (2, 3), 0.009695, 0.01022, 0.01120],
[0, (1, 2), 0.027087, 0.02765, 0.02910],
])
def test_transform_data(name_match, low_memory, ngrams, result_1, result_2, result_3):
name_match._low_memory = low_memory
name_match._vec = TfidfVectorizer(
lowercase=False, analyzer="char", ngram_range=ngrams)
name_match._process_matching_data(transform=False)
name_match.transform_data()
assert name_match._n_grams_matching.data[10] == pytest.approx(
result_1, 0.001)
assert name_match._n_grams_matching.data[181] == pytest.approx(
result_2, 0.001)
assert name_match._n_grams_matching.data[1000] == pytest.approx(
result_3, 0.001)
@pytest.mark.parametrize("to_be_matched, possible_matches, metrics, result",
[('De Nederlandsche Bank', ['Nederlandsche Bank', 'De Nederlancsh Bank', 'De Nederlandse Bank', 'Bank de Nederlandsche'], ['weighted_jaccard'], 2),
('De Nederlandsche Bank', ['Nederlandsche Bank', 'De Nederlancsh Bank', 'De Nederlandse Bank', 'Bank de Nederlandsche'], [
'weighted_jaccard', 'discounted_levenshtein'], 5),
('De Nederlandsche Bank', ['Nederlandsche Bank', 'De Nederlancsh Bank', 'De Nederlandse Bank', 'Bank de Nederlandsche'], [
'weighted_jaccard', 'discounted_levenshtein', 'iterative_sub_string'], 7),
('De Nederlandsche Bank', ['Nederlandsche Bank', 'De Nederlancsh Bank', 'De Nederlandse Bank', 'Bank de Nederlandsche'], [
'weighted_jaccard', 'overlap', 'iterative_sub_string'], 6),
('De Nederlandsche Bank', ['Nederlandsche Bank', 'De Nederlancsh Bank', 'De Nederlandse Bank', 'Bank de Nederlandsche'], [
'weighted_jaccard', 'overlap', 'bag'], 11),
('De Nederlandsche Bank', ['Nederlandsche Bank', 'De Nederlancsh Bank',
'De Nederlandsche Bank', 'Bank de Nederlandsche'], ['weighted_jaccard'], 2),
('De Nederlandsche Bank', ['Nederlandsche Bank', 'De Nederlancsh Bank', 'De Nederlandsche Bank', 'Bank de Nederlandsche'], [
'weighted_jaccard', 'discounted_levenshtein'], 4),
('De Nederlandsche Bank', ['Nederlandsche Bank', 'De Nederlancsh Bank', 'De Nederlandsche Bank', 'Bank de Nederlandsche'], [
'weighted_jaccard', 'discounted_levenshtein', 'iterative_sub_string'], 6),
('De Nederlandsche Bank', ['Nederlandsche Bank', 'De Nederlancsh Bank', 'De Nederlandsche Bank', 'Bank de Nederlandsche'], [
'weighted_jaccard', 'overlap', 'iterative_sub_string'], 6),
('De Nederlandsche Bank', ['Nederlandsche Bank', 'De Nederlancsh Bank', 'De Nederlandsche Bank', 'Bank de Nederlandsche'], [
'weighted_jaccard', 'overlap', 'bag'], 6),
('Schumm PLC', ['Torphy-Corkery', 'Hansen, Hoppe and Tillman',
'Gerlach and Sons', 'Bank de Nederlandsche'], ['weighted_jaccard'], 2),
('Schumm PLC', ['Torphy-Corkery', 'Hansen, Hoppe and Tillman', 'Gerlach and Sons',
'Bank de Nederlandsche'], ['weighted_jaccard', 'discounted_levenshtein'], 4),
('Schumm PLC', ['Torphy-Corkery', '<NAME>', 'Gerlach and Sons', 'Bank de Nederlandsche'], [
'weighted_jaccard', 'discounted_levenshtein', 'iterative_sub_string'], 6),
('Schumm PLC', ['Torphy-Corkery', '<NAME> and Tillman', 'Gerlach and Sons',
'Bank de Nederlandsche'], ['weighted_jaccard', 'overlap', 'iterative_sub_string'], 8),
('Schumm PLC', ['Torphy-Corkery', '<NAME>', 'Gerlach and Sons',
'Bank de Nederlandsche'], ['weighted_jaccard', 'overlap', 'bag'], 8)
])
def test_score_matches(to_be_matched, possible_matches, metrics, result):
name_match = nm.NameMatcher()
name_match.set_distance_metrics(metrics)
assert np.argmax(name_match._score_matches(
to_be_matched, possible_matches)) == result
@pytest.mark.parametrize("number_of_matches, match_score, metrics, result",
[(1, np.array([[0.9, 0.3, 0.5, 0.2, 0.1]]), ['weighted_jaccard'], [0]),
(2, np.array([[0.9, 0.3, 0.5, 0.2, 0.1], [0.6, 0.7, 0.8, 0.4, 0.5]]), [
'weighted_jaccard', 'discounted_levenshtein'], [0, 1]),
(3, np.array([[0.9, 0.3, 0.5, 0.2, 0.1], [0.6, 0.7, 0.8, 0.4, 0.5], [1, 0.2, 0.3, 0.2, 0.1]]), [
'weighted_jaccard', 'discounted_levenshtein', 'iterative_sub_string'], [2, 1, 1]),
(2, np.array([[0.9, 0.3, 0.5, 0.2, 0.1], [0.6, 0.7, 0.8, 0.4, 0.5], [
1, 0.2, 0.3, 0.2, 0.1]]), ['tichy', 'overlap', 'bag'], [2, 1]),
(2, np.array([[0.9, 0.3, 0.5, 0.2, 0.1], [0.6, 0.7, 0.8, 0.4, 0.5]]), [
'overlap', 'bag'], [0, 2]),
(1, np.array([[0.9, 0.3, 0.5, 0.2, 0.1], [0.6, 0.7, 0.8, 0.4, 0.5], [
1, 0.2, 0.3, 0.2, 0.1]]), ['weighted_jaccard', 'overlap', 'iterative_sub_string'], [1]),
(2, np.array([[0.9, 0.3, 0.5, 0.2, 0.1], [0.6, 0.7, 0.8, 0.4, 0.5], [
1, 0.2, 0.3, 0.2, 0.1]]), ['weighted_jaccard', 'overlap', 'bag'], [1, 0]),
(1, np.array([[0.3, 0.3, 0.8, 0.2, 0.2]]), [
'weighted_jaccard'], [0]),
(3, np.array([[0.3, 0.3, 0.8, 0.2, 0.2], [0.3, 0.3, 0.8, 0.1, 0.1]]), [
'weighted_jaccard', 'discounted_levenshtein'], [0, 1]),
(2, np.array([[0.3, 0.3, 0.2, 0.1, 0.02], [0.1, 0.1, 0.2, 0.3, 0.02]]), [
'weighted_jaccard', 'iterative_sub_string'], [0, 0]),
(1, np.array([[0.3, 0.3, 0.2, 0.1, 0.02], [0.3, 0.3, 0.2, 0.3, 0.02]]), [
'overlap', 'iterative_sub_string'], [1]),
(1, np.array(
[[-0.5, -0.8, -0.3, -0.7, 0, 2]]), ['bag'], [0]),
(3, np.array([[10, 8, 7, 6, 12, 15, 14, 88]]), [
'weighted_jaccard'], [0]),
(2, np.array([[1, 0.3], [0.1, 0.4]]), [
'weighted_jaccard', 'discounted_levenshtein'], [0, 1])
])
def test_rate_matches(number_of_matches, match_score, metrics, result):
name_match = nm.NameMatcher()
name_match._number_of_matches = number_of_matches
name_match.set_distance_metrics(metrics)
ind = name_match._rate_matches(match_score)
print(ind)
assert len(ind) == np.min([number_of_matches, match_score.shape[0]])
assert list(ind) == result
def test_vectorise_data(name_match):
name_match._vectorise_data(transform=False)
assert len(name_match._vec.vocabulary_) > 0
@pytest.mark.parametrize("match, number_of_matches, word_set, score, result",
[(
|
pd.Series(['Nederandsche', 0, 2, 'De Nederlandsche Bank'], index=['match_name_0', 'score_0', 'match_index_0', 'original_name'])
|
pandas.Series
|
from tendo import singleton
me = singleton.SingleInstance()
from food.psql import *
from food.tools import get_logger
logger = get_logger(engine,'bot_logs','food')
logger.debug({'msg':'starting bot'})
from aiogram import Bot, Dispatcher, executor, types
from aiogram.types import ContentType
from aiogram.dispatcher.filters.state import State, StatesGroup
from aiogram.types.message import ContentTypes
from aiogram.dispatcher import FSMContext
from aiogram.contrib.fsm_storage.memory import MemoryStorage
from sqlalchemy import update
from aiogram.dispatcher.filters.state import State, StatesGroup
from aiogram.utils.callback_data import CallbackData
import typing
import numpy as np
API_TOKEN = "<KEY>"
from food.paths import *
from food.search import *
import pandas as pd
import pytz
timezones = pytz.all_timezones
import requests
from requests.structures import CaseInsensitiveDict
import urllib
from tzwhere import tzwhere
import nest_asyncio
nest_asyncio.apply()
def geocode(q):
geocoding_key = '<KEY>'
url = "https://api.geoapify.com/v1/geocode/search?"
params = {"apiKey":geocoding_key,
"text":q}
resp = requests.get(url + urllib.parse.urlencode(params)).json()
return pd.json_normalize(resp['features']).sort_values('properties.rank.importance',ascending = False)[['properties.lat','properties.lon']].iloc[0].to_list()
def get_tz(q):
lat,lon = geocode(q)
return tzwhere.tzwhere().tzNameAt(lat,lon)
async def async_get_tz(q):
return get_tz(q)
async def async_search_image(url, env='prod'):
return search_image(url,env)
async def async_geocode(q):
return geocode(q)
async def async_insert_on_conflict(*args, **qwargs):
return insert_on_conflict(*args, **qwargs)
async def add_sender(message):
sender = message['from'].to_python()
sender = pd.DataFrame(sender,index=[0]).drop(columns =['is_bot'])
await async_insert_on_conflict(sender,'users',unique_cols=['id'])
def get_msg(query):
dish = pd.read_sql(f"""select energy,protein,carb,fat from food.dishes
where user_id={query['from']['id']} and
message_id = {query['message']['message_id']}
order by id desc limit 1""",engine)
plot_numtients = dish[['energy','protein','carb','fat']].reset_index(drop=True)
plot_numtients.index = ['']
return plot_numtients.astype(int).to_string()
def get_today_consumed(user_id):
today_consumed = pd.read_sql(f"""select energy,grams,timestamp from {schema}.dishes
where user_id = {user_id} and timestamp > now() - interval '24 hours'
and grams is not null;""",engine).set_index("timestamp")
today_consumed= today_consumed['energy']/100*today_consumed['grams']
user_tz = engine.execute(f"""select value from food.user_properties
where user_id={user_id} and
property='tz'
order by id desc limit 1""").first()
user_tz = user_tz[0] if user_tz else 'UTC'
today_consumed = today_consumed.tz_convert(user_tz)
now = pd.Timestamp.now(tz = user_tz)
today_consumed = today_consumed.reset_index()
this_morning = pd.Timestamp(year = now.year,month = now.month,day = now.day,hour = 3,tz = user_tz)
today_consumed = today_consumed[today_consumed['timestamp'] > pd.Timestamp(this_morning)][0].sum()
return int(today_consumed),user_tz
import asyncio
bot = Bot(token=API_TOKEN)
storage = MemoryStorage()
dp = Dispatcher(bot, storage=storage)
dishes_table = Dishes.__table__
add_dish_cb = CallbackData('add dish', 'action')
measurment_cb = CallbackData('measurment', 'weight')
edit_dish_cb = CallbackData('edit_dish', 'action')
choose_metr_cb = CallbackData('choose_metr', 'choice')
ml_version = 0.2
set_timezone_command = types.BotCommand('set_timezone','set you timezone so that we know when your day starts')
commands = [set_timezone_command]
asyncio.run(bot.set_my_commands(commands))
grams_grid = list(np.arange(10,1000,10)[:56])
grams_grid = [str(int(v)) for v in grams_grid]
ounces_grid = list(np.arange(0.4,23,0.4)[:56])
ounces_grid = [str(round(v,1)) for v in ounces_grid]
grid_values = list(set(grams_grid+ounces_grid))
def get_keyboard(t, unit = None):
markup = types.InlineKeyboardMarkup()
if t == 'add dish' :
markup.add(types.InlineKeyboardButton('add dish', callback_data=add_dish_cb.new(action='add_dish')))
elif t == 'measurment':
btns_text = tuple(ounces_grid) if unit == 'ounces' else grams_grid
markup = types.InlineKeyboardMarkup(row_width=8)
markup.add(*(types.InlineKeyboardButton(text, callback_data=measurment_cb.new(weight=text)) for text in btns_text))
elif t == 'edit_dish':
btns_text = ('remove','edit weight','add again')
markup.add(*(types.InlineKeyboardButton(text, callback_data=edit_dish_cb.new(action=text)) for text in btns_text))
elif t == 'choose_metr':
btns_text = ('grams','ounces')
markup.add(*(types.InlineKeyboardButton(text, callback_data=choose_metr_cb.new(choice=text)) for text in btns_text))
return markup
async def measurment(unit, query: types.CallbackQuery, callback_data: typing.Dict[str, str]):
logger.debug({'func':'measurment','id_key':'user_id','id_value':query['from']['id'],'msg':'measurment'})
await query.answer()
msg = query.to_python()['message']['text']
msg = msg.split('\xa0')[0] if '\xa0' in msg else msg
msg = f"{msg}\n \xa0 please choose weight of the dish in {unit}"
await bot.edit_message_text(
msg,
query.from_user.id,
query.message.message_id,
reply_markup=get_keyboard('measurment',unit),
)
def get_update(query,weight):
energy = engine.execute(f"""select energy from food.dishes
where user_id={query['from']['id']}
and message_id = {query['message']['message_id']}
order by id desc limit 1""").first()[0]
stmt = (
dishes_table.update()
.where(dishes_table.c.message_id == query['message']['message_id'])
.values(grams=weight)
.returning(dishes_table.c.id)
)
session.execute(stmt)
session.commit()
return int(energy)
#photo recieved
@dp.message_handler(content_types=ContentType.PHOTO,state='*')
async def process_photo(message: types.Message, state: FSMContext):
logger.debug({'func':'process_photo','id_key':'user_id','id_value':message['from']['id'],'msg':'process_photo started'})
await state.finish()
await types.ChatActions.typing()
await add_sender(message)
photo = message['photo'][-1]
await photo.download(reference_images_path/photo['file_id'])
image_url = await photo.get_url()
dish = await async_search_image(url=image_url, env='prod')
description = dish['description'].iloc[0]
dish['photo_id'] = photo['file_id']
dish['photo_message_id'] = message['message_id']
sender = message['from'].to_python()
dish['user_id'] = sender['id']
dish['ml_version'] = ml_version
dish['timestamp']=pd.Timestamp.utcnow()
plot_numtients = dish[['energy','protein','carb','fat']].reset_index(drop=True)
plot_numtients.index = ['']
msg = f'{description}, per 100 gram \n {plot_numtients.astype(int).to_string()}'
# msg = description + '\n'+ plot_numtients.astype(int).to_string()
reply_message = await message.reply(msg, reply_markup=get_keyboard('add dish'))
dish['message_id'] = reply_message['message_id']
dish.to_sql('dishes',schema = schema,if_exists = 'append',index = False,con=engine)
logger.debug({'func':'process_photo','id_key':'user_id','id_value':message['from']['id'],'msg':'process_photo finished'})
class CState(StatesGroup):
set_timezone = State()
@dp.message_handler(commands=['set_timezone'])
async def set_timezone_command(message: types.Message, state: FSMContext):
logger.debug({'func':'set_timezone_command','id_key':'user_id','id_value':message['from']['id'],'msg':'set_timezone pushed'})
await CState.set_timezone.set()
await message.reply(f"please search your town to set timezone")
@dp.message_handler(state=CState.set_timezone)
async def set_timezone(message: types.Message, state: FSMContext):
logger.debug({'func':'set_timezone','id_key':'user_id','id_value':message['from']['id'],'msg':f'set_timezone to {message.text} started'})
await types.ChatActions.typing()
await add_sender(message)
tz = await async_get_tz(message.text)
df = pd.DataFrame([[message['from']['id'],'tz',tz,pd.Timestamp.utcnow()]],columns = ['user_id','property','value','timestamp'])
df.to_sql('user_properties',schema = schema,con = engine,if_exists = 'append',index = False)
await state.finish()
await message.reply(f"your tz is set to {tz}")
logger.debug({'func':'set_timezone','id_key':'user_id','id_value':message['from']['id'],'msg':f'set_timezone to {message.text} finished'})
def get_metric_unit(user_id):
unit = engine.execute(f"""select value from food.user_properties
where user_id={user_id} and
property='metric_unit'
order by id desc limit 1""").first()
return unit[0] if unit else None
@dp.message_handler(commands=['start'])
async def start_command(message: types.Message):
logger.debug({'func':'start_command','id_key':'user_id','id_value':message['from']['id'],'msg':'start'})
await message.reply("""Counting calories as easy as taking pictures. Just capture everything before you eat it\n
Now send a photo of your meal to try""")
#add_dish pushed
@dp.callback_query_handler(add_dish_cb.filter(action=['add_dish']))
async def add_dish(query: types.CallbackQuery, callback_data: typing.Dict[str, str]):
logger.debug({'func':'add_dish','id_key':'user_id','id_value':query['from']['id'],'msg':'add_dish'})
unit = get_metric_unit(query['from']['id'])
if not unit:
msg = query.to_python()['message']['text']
msg = msg.split('\xa0')[0] if '\xa0' in msg else msg
msg = f"{msg}\n \xa0 please choose unit for your food weight measurement"
await bot.edit_message_text(
msg,
query.from_user.id,
query.message.message_id,
reply_markup=get_keyboard('choose_metr'))
else:
await measurment(unit,query, callback_data)
#add_dish pushed and no metric selected
@dp.callback_query_handler(choose_metr_cb.filter(choice=['grams']))
async def select_metric_grams(query: types.CallbackQuery, callback_data: typing.Dict[str, str]):
logger.debug({'func':'select_metric_grams','id_key':'user_id','id_value':query['from']['id'],'msg':'select_metric_grams'})
df = pd.DataFrame([[query['from']['id'],'metric_unit','grams',pd.Timestamp.utcnow()]],columns = ['user_id','property','value','timestamp'])
df.to_sql('user_properties',schema = schema,con = engine,if_exists = 'append',index = False)
await measurment('grams',query, callback_data)
#add_dish pushed and no metric selected
@dp.callback_query_handler(choose_metr_cb.filter(choice=['ounces']))
async def callback_vote_action(query: types.CallbackQuery, callback_data: typing.Dict[str, str]):
logger.debug({'func':'select_metric_ounces','id_key':'user_id','id_value':query['from']['id'],'msg':'select_metric_ounces'})
df = pd.DataFrame([[query['from']['id'],'metric_unit','ounces',pd.Timestamp.utcnow()]],columns = ['user_id','property','value','timestamp'])
df.to_sql('user_properties',schema = schema,con = engine,if_exists = 'append',index = False)
await measurment('ounces',query, callback_data)
#add_dish pushed
@dp.callback_query_handler(edit_dish_cb.filter(action=['edit weight']))
async def edit_weight(query: types.CallbackQuery, callback_data: typing.Dict[str, str]):
logger.debug({'func':'edit_weight','id_key':'user_id','id_value':query['from']['id'],'msg':'edit_weight'})
unit = get_metric_unit(query['from']['id'])
await measurment(unit,query, callback_data)
#measure provided
@dp.callback_query_handler(measurment_cb.filter(weight=grid_values))
async def weight_processing(query: types.CallbackQuery, callback_data: typing.Dict[str, str]):
logger.debug({'func':'weight_processing','id_key':'user_id','id_value':query['from']['id'],'msg':'weight_processing started'})
await query.answer()
t = 'ounces' if 'ounces' in query.to_python()['message']['text'] else 'grams'
u = 28.3495 if t == 'ounces' else 1
weight = float(callback_data['weight'])
energy = get_update(query,weight)
msg = query.to_python()['message']['text']
msg = msg.split('\xa0')[0] if '\xa0' in msg else msg #
msg = f"{msg} \xa0 \n consumed {weight} {t} \xa0 \n {int(energy/100*u*weight)} kcall"
today_consumed,usertz = get_today_consumed(query['from']['id'])
msg = f"{msg} \xa0 \n today consumed {today_consumed}"
if usertz=='UTC':
msg = f"{msg} \xa0 \n please /set_timezone so bot knows when your day is started"
# await bot.send_message(chat_id=query['from']['id'],
# text='please /set_timezone so bot knows when your day is started')
await bot.edit_message_text(
msg,
query.from_user.id,
query.message.message_id,
reply_markup=get_keyboard('edit_dish')
)
logger.debug({'func':'weight_processing','id_key':'user_id','id_value':query['from']['id'],'msg':'weight_processing finished'})
#remove pushed
@dp.callback_query_handler(edit_dish_cb.filter(action=['remove']))
async def remove_dish(query: types.CallbackQuery, callback_data: typing.Dict[str, str]):
logger.debug({'func':'remove_dish','id_key':'user_id','id_value':query['from']['id'],'msg':'remove_dish'})
_ = get_update(query,0)
msg = query.to_python()['message']['text']
msg = msg.split('\xa0')[0] if '\xa0' in msg else msg
today_consumed,usertz = get_today_consumed(query['from']['id'])
msg = f"{msg} \xa0 \n today consumed {today_consumed}"
if usertz=='UTC':
msg = f"{msg} \xa0 \n please /set_timezone so bot knows when your day is started"
# await bot.send_message(chat_id=query['from']['id'],
# text='please /set_timezone so bot knows when your day is started')
await query.answer()
await bot.edit_message_text(
msg,
query.from_user.id,
query.message.message_id,
reply_markup=get_keyboard('add dish'))
#add again pushed
@dp.callback_query_handler(edit_dish_cb.filter(action=['add again']))
async def add_again(query: types.CallbackQuery, callback_data: typing.Dict[str, str]):
logger.debug({'func':'add_again','id_key':'user_id','id_value':query['from']['id'],'msg':'add_again'})
dish = pd.read_sql(f"""select description,energy,protein,carb,fat,score,photo_id,user_id,ml_version,photo_message_id
from food.dishes
where user_id={query['from']['id']}
and message_id = {query['message']['message_id']} limit 1""",engine)
dish['timestamp'] =
|
pd.Timestamp.utcnow()
|
pandas.Timestamp.utcnow
|
"""
Functions for loading the data
"""
import numpy as np
import pandas as pd
def load_berkeley_earth_data(fname):
"""
Load monthly temperature data from a Berkeley Earth file.
The moving average columns are ignored. For convenience, reads the
reference temperature and calculates the absolute temperature as well. Also
calculates decimal years for calculating trends and plotting.
Parameters
----------
fname : str
The name/path of the data file.
Returns
-------
data : pandas.DataFrame
The data in a pandas.DataFrame with columns: year, month, year_decimal,
monthy_anomaly, monthly_error, monthly_temperature.
"""
# Used to find the absolute temperature in the header
marker = '% Estimated Jan 1951-Dec 1980 absolute temperature (C):'
with open(fname) as datafile:
# Read the absolute temperature from the header
for line in datafile:
if line.startswith(marker):
numbers = line.split(':')[1]
abs_temp = float(numbers.split('+/-')[0].strip())
break
# Load the rest of the data into a DataFrame
year, month, anom, error = np.loadtxt(datafile, comments='%',
usecols=[0, 1, 2, 3],
unpack=True)
columns = dict(year=year.astype(np.int),
month=month.astype(np.int),
monthly_anomaly=anom,
monthly_error=error,
year_decimal=year + (month - 1)/12,
monthly_temperature=anom + abs_temp)
data =
|
pd.DataFrame(columns)
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
# pylint: disable=W0612,E1101
from datetime import datetime
import operator
import nose
from functools import wraps
import numpy as np
import pandas as pd
from pandas import Series, DataFrame, Index, isnull, notnull, pivot, MultiIndex
from pandas.core.datetools import bday
from pandas.core.nanops import nanall, nanany
from pandas.core.panel import Panel
from pandas.core.series import remove_na
import pandas.core.common as com
from pandas import compat
from pandas.compat import range, lrange, StringIO, OrderedDict, signature
from pandas import SparsePanel
from pandas.util.testing import (assert_panel_equal, assert_frame_equal,
assert_series_equal, assert_almost_equal,
assert_produces_warning, ensure_clean,
assertRaisesRegexp, makeCustomDataframe as
mkdf, makeMixedDataFrame)
import pandas.core.panel as panelm
import pandas.util.testing as tm
def ignore_sparse_panel_future_warning(func):
"""
decorator to ignore FutureWarning if we have a SparsePanel
can be removed when SparsePanel is fully removed
"""
@wraps(func)
def wrapper(self, *args, **kwargs):
if isinstance(self.panel, SparsePanel):
with assert_produces_warning(FutureWarning,
check_stacklevel=False):
return func(self, *args, **kwargs)
else:
return func(self, *args, **kwargs)
return wrapper
class PanelTests(object):
panel = None
def test_pickle(self):
unpickled = self.round_trip_pickle(self.panel)
assert_frame_equal(unpickled['ItemA'], self.panel['ItemA'])
def test_rank(self):
self.assertRaises(NotImplementedError, lambda: self.panel.rank())
def test_cumsum(self):
cumsum = self.panel.cumsum()
assert_frame_equal(cumsum['ItemA'], self.panel['ItemA'].cumsum())
def not_hashable(self):
c_empty = Panel()
c = Panel(Panel([[[1]]]))
self.assertRaises(TypeError, hash, c_empty)
self.assertRaises(TypeError, hash, c)
class SafeForLongAndSparse(object):
_multiprocess_can_split_ = True
def test_repr(self):
repr(self.panel)
@ignore_sparse_panel_future_warning
def test_copy_names(self):
for attr in ('major_axis', 'minor_axis'):
getattr(self.panel, attr).name = None
cp = self.panel.copy()
getattr(cp, attr).name = 'foo'
self.assertIsNone(getattr(self.panel, attr).name)
def test_iter(self):
tm.equalContents(list(self.panel), self.panel.items)
def test_count(self):
f = lambda s: notnull(s).sum()
self._check_stat_op('count', f, obj=self.panel, has_skipna=False)
def test_sum(self):
self._check_stat_op('sum', np.sum)
def test_mean(self):
self._check_stat_op('mean', np.mean)
def test_prod(self):
self._check_stat_op('prod', np.prod)
def test_median(self):
def wrapper(x):
if isnull(x).any():
return np.nan
return np.median(x)
self._check_stat_op('median', wrapper)
def test_min(self):
self._check_stat_op('min', np.min)
def test_max(self):
self._check_stat_op('max', np.max)
def test_skew(self):
try:
from scipy.stats import skew
except ImportError:
raise nose.SkipTest("no scipy.stats.skew")
def this_skew(x):
if len(x) < 3:
return np.nan
return skew(x, bias=False)
self._check_stat_op('skew', this_skew)
# def test_mad(self):
# f = lambda x: np.abs(x - x.mean()).mean()
# self._check_stat_op('mad', f)
def test_var(self):
def alt(x):
if len(x) < 2:
return np.nan
return np.var(x, ddof=1)
self._check_stat_op('var', alt)
def test_std(self):
def alt(x):
if len(x) < 2:
return np.nan
return np.std(x, ddof=1)
self._check_stat_op('std', alt)
def test_sem(self):
def alt(x):
if len(x) < 2:
return np.nan
return np.std(x, ddof=1) / np.sqrt(len(x))
self._check_stat_op('sem', alt)
# def test_skew(self):
# from scipy.stats import skew
# def alt(x):
# if len(x) < 3:
# return np.nan
# return skew(x, bias=False)
# self._check_stat_op('skew', alt)
def _check_stat_op(self, name, alternative, obj=None, has_skipna=True):
if obj is None:
obj = self.panel
# # set some NAs
# obj.ix[5:10] = np.nan
# obj.ix[15:20, -2:] = np.nan
f = getattr(obj, name)
if has_skipna:
def skipna_wrapper(x):
nona = remove_na(x)
if len(nona) == 0:
return np.nan
return alternative(nona)
def wrapper(x):
return alternative(np.asarray(x))
for i in range(obj.ndim):
result = f(axis=i, skipna=False)
assert_frame_equal(result, obj.apply(wrapper, axis=i))
else:
skipna_wrapper = alternative
wrapper = alternative
for i in range(obj.ndim):
result = f(axis=i)
if not tm._incompat_bottleneck_version(name):
assert_frame_equal(result, obj.apply(skipna_wrapper, axis=i))
self.assertRaises(Exception, f, axis=obj.ndim)
# Unimplemented numeric_only parameter.
if 'numeric_only' in signature(f).args:
self.assertRaisesRegexp(NotImplementedError, name, f,
numeric_only=True)
class SafeForSparse(object):
_multiprocess_can_split_ = True
@classmethod
def assert_panel_equal(cls, x, y):
assert_panel_equal(x, y)
def test_get_axis(self):
assert (self.panel._get_axis(0) is self.panel.items)
assert (self.panel._get_axis(1) is self.panel.major_axis)
assert (self.panel._get_axis(2) is self.panel.minor_axis)
def test_set_axis(self):
new_items = Index(np.arange(len(self.panel.items)))
new_major = Index(np.arange(len(self.panel.major_axis)))
new_minor = Index(np.arange(len(self.panel.minor_axis)))
# ensure propagate to potentially prior-cached items too
item = self.panel['ItemA']
self.panel.items = new_items
if hasattr(self.panel, '_item_cache'):
self.assertNotIn('ItemA', self.panel._item_cache)
self.assertIs(self.panel.items, new_items)
# TODO: unused?
item = self.panel[0] # noqa
self.panel.major_axis = new_major
self.assertIs(self.panel[0].index, new_major)
self.assertIs(self.panel.major_axis, new_major)
# TODO: unused?
item = self.panel[0] # noqa
self.panel.minor_axis = new_minor
self.assertIs(self.panel[0].columns, new_minor)
self.assertIs(self.panel.minor_axis, new_minor)
def test_get_axis_number(self):
self.assertEqual(self.panel._get_axis_number('items'), 0)
self.assertEqual(self.panel._get_axis_number('major'), 1)
self.assertEqual(self.panel._get_axis_number('minor'), 2)
def test_get_axis_name(self):
self.assertEqual(self.panel._get_axis_name(0), 'items')
self.assertEqual(self.panel._get_axis_name(1), 'major_axis')
self.assertEqual(self.panel._get_axis_name(2), 'minor_axis')
def test_get_plane_axes(self):
# what to do here?
index, columns = self.panel._get_plane_axes('items')
index, columns = self.panel._get_plane_axes('major_axis')
index, columns = self.panel._get_plane_axes('minor_axis')
index, columns = self.panel._get_plane_axes(0)
@ignore_sparse_panel_future_warning
def test_truncate(self):
dates = self.panel.major_axis
start, end = dates[1], dates[5]
trunced = self.panel.truncate(start, end, axis='major')
expected = self.panel['ItemA'].truncate(start, end)
assert_frame_equal(trunced['ItemA'], expected)
trunced = self.panel.truncate(before=start, axis='major')
expected = self.panel['ItemA'].truncate(before=start)
assert_frame_equal(trunced['ItemA'], expected)
trunced = self.panel.truncate(after=end, axis='major')
expected = self.panel['ItemA'].truncate(after=end)
assert_frame_equal(trunced['ItemA'], expected)
# XXX test other axes
def test_arith(self):
self._test_op(self.panel, operator.add)
self._test_op(self.panel, operator.sub)
self._test_op(self.panel, operator.mul)
self._test_op(self.panel, operator.truediv)
self._test_op(self.panel, operator.floordiv)
self._test_op(self.panel, operator.pow)
self._test_op(self.panel, lambda x, y: y + x)
self._test_op(self.panel, lambda x, y: y - x)
self._test_op(self.panel, lambda x, y: y * x)
self._test_op(self.panel, lambda x, y: y / x)
self._test_op(self.panel, lambda x, y: y ** x)
self._test_op(self.panel, lambda x, y: x + y) # panel + 1
self._test_op(self.panel, lambda x, y: x - y) # panel - 1
self._test_op(self.panel, lambda x, y: x * y) # panel * 1
self._test_op(self.panel, lambda x, y: x / y) # panel / 1
self._test_op(self.panel, lambda x, y: x ** y) # panel ** 1
self.assertRaises(Exception, self.panel.__add__, self.panel['ItemA'])
@staticmethod
def _test_op(panel, op):
result = op(panel, 1)
assert_frame_equal(result['ItemA'], op(panel['ItemA'], 1))
def test_keys(self):
tm.equalContents(list(self.panel.keys()), self.panel.items)
def test_iteritems(self):
# Test panel.iteritems(), aka panel.iteritems()
# just test that it works
for k, v in self.panel.iteritems():
pass
self.assertEqual(len(list(self.panel.iteritems())),
len(self.panel.items))
@ignore_sparse_panel_future_warning
def test_combineFrame(self):
def check_op(op, name):
# items
df = self.panel['ItemA']
func = getattr(self.panel, name)
result = func(df, axis='items')
assert_frame_equal(result['ItemB'], op(self.panel['ItemB'], df))
# major
xs = self.panel.major_xs(self.panel.major_axis[0])
result = func(xs, axis='major')
idx = self.panel.major_axis[1]
assert_frame_equal(result.major_xs(idx),
op(self.panel.major_xs(idx), xs))
# minor
xs = self.panel.minor_xs(self.panel.minor_axis[0])
result = func(xs, axis='minor')
idx = self.panel.minor_axis[1]
assert_frame_equal(result.minor_xs(idx),
op(self.panel.minor_xs(idx), xs))
ops = ['add', 'sub', 'mul', 'truediv', 'floordiv']
if not compat.PY3:
ops.append('div')
# pow, mod not supported for SparsePanel as flex ops (for now)
if not isinstance(self.panel, SparsePanel):
ops.extend(['pow', 'mod'])
else:
idx = self.panel.minor_axis[1]
with assertRaisesRegexp(ValueError, "Simple arithmetic.*scalar"):
self.panel.pow(self.panel.minor_xs(idx), axis='minor')
with assertRaisesRegexp(ValueError, "Simple arithmetic.*scalar"):
self.panel.mod(self.panel.minor_xs(idx), axis='minor')
for op in ops:
try:
check_op(getattr(operator, op), op)
except:
com.pprint_thing("Failing operation: %r" % op)
raise
if compat.PY3:
try:
check_op(operator.truediv, 'div')
except:
com.pprint_thing("Failing operation: %r" % 'div')
raise
@ignore_sparse_panel_future_warning
def test_combinePanel(self):
result = self.panel.add(self.panel)
self.assert_panel_equal(result, self.panel * 2)
@ignore_sparse_panel_future_warning
def test_neg(self):
self.assert_panel_equal(-self.panel, self.panel * -1)
# issue 7692
def test_raise_when_not_implemented(self):
p = Panel(np.arange(3 * 4 * 5).reshape(3, 4, 5),
items=['ItemA', 'ItemB', 'ItemC'],
major_axis=pd.date_range('20130101', periods=4),
minor_axis=list('ABCDE'))
d = p.sum(axis=1).ix[0]
ops = ['add', 'sub', 'mul', 'truediv', 'floordiv', 'div', 'mod', 'pow']
for op in ops:
with self.assertRaises(NotImplementedError):
getattr(p, op)(d, axis=0)
@ignore_sparse_panel_future_warning
def test_select(self):
p = self.panel
# select items
result = p.select(lambda x: x in ('ItemA', 'ItemC'), axis='items')
expected = p.reindex(items=['ItemA', 'ItemC'])
self.assert_panel_equal(result, expected)
# select major_axis
result = p.select(lambda x: x >= datetime(2000, 1, 15), axis='major')
new_major = p.major_axis[p.major_axis >= datetime(2000, 1, 15)]
expected = p.reindex(major=new_major)
self.assert_panel_equal(result, expected)
# select minor_axis
result = p.select(lambda x: x in ('D', 'A'), axis=2)
expected = p.reindex(minor=['A', 'D'])
self.assert_panel_equal(result, expected)
# corner case, empty thing
result = p.select(lambda x: x in ('foo', ), axis='items')
self.assert_panel_equal(result, p.reindex(items=[]))
def test_get_value(self):
for item in self.panel.items:
for mjr in self.panel.major_axis[::2]:
for mnr in self.panel.minor_axis:
result = self.panel.get_value(item, mjr, mnr)
expected = self.panel[item][mnr][mjr]
assert_almost_equal(result, expected)
@ignore_sparse_panel_future_warning
def test_abs(self):
result = self.panel.abs()
result2 = abs(self.panel)
expected = np.abs(self.panel)
self.assert_panel_equal(result, expected)
self.assert_panel_equal(result2, expected)
df = self.panel['ItemA']
result = df.abs()
result2 = abs(df)
expected = np.abs(df)
assert_frame_equal(result, expected)
assert_frame_equal(result2, expected)
s = df['A']
result = s.abs()
result2 = abs(s)
expected = np.abs(s)
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
self.assertEqual(result.name, 'A')
self.assertEqual(result2.name, 'A')
class CheckIndexing(object):
_multiprocess_can_split_ = True
def test_getitem(self):
self.assertRaises(Exception, self.panel.__getitem__, 'ItemQ')
def test_delitem_and_pop(self):
expected = self.panel['ItemA']
result = self.panel.pop('ItemA')
assert_frame_equal(expected, result)
self.assertNotIn('ItemA', self.panel.items)
del self.panel['ItemB']
self.assertNotIn('ItemB', self.panel.items)
self.assertRaises(Exception, self.panel.__delitem__, 'ItemB')
values = np.empty((3, 3, 3))
values[0] = 0
values[1] = 1
values[2] = 2
panel = Panel(values, lrange(3), lrange(3), lrange(3))
# did we delete the right row?
panelc = panel.copy()
del panelc[0]
assert_frame_equal(panelc[1], panel[1])
assert_frame_equal(panelc[2], panel[2])
panelc = panel.copy()
del panelc[1]
assert_frame_equal(panelc[0], panel[0])
assert_frame_equal(panelc[2], panel[2])
panelc = panel.copy()
del panelc[2]
assert_frame_equal(panelc[1], panel[1])
assert_frame_equal(panelc[0], panel[0])
def test_setitem(self):
# LongPanel with one item
lp = self.panel.filter(['ItemA', 'ItemB']).to_frame()
with tm.assertRaises(ValueError):
self.panel['ItemE'] = lp
# DataFrame
df = self.panel['ItemA'][2:].filter(items=['A', 'B'])
self.panel['ItemF'] = df
self.panel['ItemE'] = df
df2 = self.panel['ItemF']
assert_frame_equal(df, df2.reindex(index=df.index, columns=df.columns))
# scalar
self.panel['ItemG'] = 1
self.panel['ItemE'] = True
self.assertEqual(self.panel['ItemG'].values.dtype, np.int64)
self.assertEqual(self.panel['ItemE'].values.dtype, np.bool_)
# object dtype
self.panel['ItemQ'] = 'foo'
self.assertEqual(self.panel['ItemQ'].values.dtype, np.object_)
# boolean dtype
self.panel['ItemP'] = self.panel['ItemA'] > 0
self.assertEqual(self.panel['ItemP'].values.dtype, np.bool_)
self.assertRaises(TypeError, self.panel.__setitem__, 'foo',
self.panel.ix[['ItemP']])
# bad shape
p = Panel(np.random.randn(4, 3, 2))
with tm.assertRaisesRegexp(ValueError,
"shape of value must be \(3, 2\), "
"shape of given object was \(4, 2\)"):
p[0] = np.random.randn(4, 2)
def test_setitem_ndarray(self):
from pandas import date_range, datetools
timeidx = date_range(start=datetime(2009, 1, 1),
end=datetime(2009, 12, 31),
freq=datetools.MonthEnd())
lons_coarse = np.linspace(-177.5, 177.5, 72)
lats_coarse = np.linspace(-87.5, 87.5, 36)
P = Panel(items=timeidx, major_axis=lons_coarse,
minor_axis=lats_coarse)
data = np.random.randn(72 * 36).reshape((72, 36))
key = datetime(2009, 2, 28)
P[key] = data
assert_almost_equal(P[key].values, data)
def test_set_minor_major(self):
# GH 11014
df1 = DataFrame(['a', 'a', 'a', np.nan, 'a', np.nan])
df2 = DataFrame([1.0, np.nan, 1.0, np.nan, 1.0, 1.0])
panel = Panel({'Item1': df1, 'Item2': df2})
newminor = notnull(panel.iloc[:, :, 0])
panel.loc[:, :, 'NewMinor'] = newminor
assert_frame_equal(panel.loc[:, :, 'NewMinor'],
newminor.astype(object))
newmajor = notnull(panel.iloc[:, 0, :])
panel.loc[:, 'NewMajor', :] = newmajor
assert_frame_equal(panel.loc[:, 'NewMajor', :],
newmajor.astype(object))
def test_major_xs(self):
ref = self.panel['ItemA']
idx = self.panel.major_axis[5]
xs = self.panel.major_xs(idx)
result = xs['ItemA']
assert_series_equal(result, ref.xs(idx), check_names=False)
self.assertEqual(result.name, 'ItemA')
# not contained
idx = self.panel.major_axis[0] - bday
self.assertRaises(Exception, self.panel.major_xs, idx)
def test_major_xs_mixed(self):
self.panel['ItemD'] = 'foo'
xs = self.panel.major_xs(self.panel.major_axis[0])
self.assertEqual(xs['ItemA'].dtype, np.float64)
self.assertEqual(xs['ItemD'].dtype, np.object_)
def test_minor_xs(self):
ref = self.panel['ItemA']
idx = self.panel.minor_axis[1]
xs = self.panel.minor_xs(idx)
assert_series_equal(xs['ItemA'], ref[idx], check_names=False)
# not contained
self.assertRaises(Exception, self.panel.minor_xs, 'E')
def test_minor_xs_mixed(self):
self.panel['ItemD'] = 'foo'
xs = self.panel.minor_xs('D')
self.assertEqual(xs['ItemA'].dtype, np.float64)
self.assertEqual(xs['ItemD'].dtype, np.object_)
def test_xs(self):
itemA = self.panel.xs('ItemA', axis=0)
expected = self.panel['ItemA']
assert_frame_equal(itemA, expected)
# get a view by default
itemA_view = self.panel.xs('ItemA', axis=0)
itemA_view.values[:] = np.nan
self.assertTrue(np.isnan(self.panel['ItemA'].values).all())
# mixed-type yields a copy
self.panel['strings'] = 'foo'
result = self.panel.xs('D', axis=2)
self.assertIsNotNone(result.is_copy)
def test_getitem_fancy_labels(self):
p = self.panel
items = p.items[[1, 0]]
dates = p.major_axis[::2]
cols = ['D', 'C', 'F']
# all 3 specified
assert_panel_equal(p.ix[items, dates, cols],
p.reindex(items=items, major=dates, minor=cols))
# 2 specified
assert_panel_equal(p.ix[:, dates, cols],
p.reindex(major=dates, minor=cols))
assert_panel_equal(p.ix[items, :, cols],
p.reindex(items=items, minor=cols))
assert_panel_equal(p.ix[items, dates, :],
p.reindex(items=items, major=dates))
# only 1
assert_panel_equal(p.ix[items, :, :], p.reindex(items=items))
assert_panel_equal(p.ix[:, dates, :], p.reindex(major=dates))
assert_panel_equal(p.ix[:, :, cols], p.reindex(minor=cols))
def test_getitem_fancy_slice(self):
pass
def test_getitem_fancy_ints(self):
p = self.panel
# #1603
result = p.ix[:, -1, :]
expected = p.ix[:, p.major_axis[-1], :]
assert_frame_equal(result, expected)
def test_getitem_fancy_xs(self):
p = self.panel
item = 'ItemB'
date = p.major_axis[5]
col = 'C'
# get DataFrame
# item
assert_frame_equal(p.ix[item], p[item])
assert_frame_equal(p.ix[item, :], p[item])
assert_frame_equal(p.ix[item, :, :], p[item])
# major axis, axis=1
assert_frame_equal(p.ix[:, date], p.major_xs(date))
assert_frame_equal(p.ix[:, date, :], p.major_xs(date))
# minor axis, axis=2
assert_frame_equal(p.ix[:, :, 'C'], p.minor_xs('C'))
# get Series
assert_series_equal(p.ix[item, date], p[item].ix[date])
assert_series_equal(p.ix[item, date, :], p[item].ix[date])
assert_series_equal(p.ix[item, :, col], p[item][col])
assert_series_equal(p.ix[:, date, col], p.major_xs(date).ix[col])
def test_getitem_fancy_xs_check_view(self):
item = 'ItemB'
date = self.panel.major_axis[5]
# make sure it's always a view
NS = slice(None, None)
# DataFrames
comp = assert_frame_equal
self._check_view(item, comp)
self._check_view((item, NS), comp)
self._check_view((item, NS, NS), comp)
self._check_view((NS, date), comp)
self._check_view((NS, date, NS), comp)
self._check_view((NS, NS, 'C'), comp)
# Series
comp = assert_series_equal
self._check_view((item, date), comp)
self._check_view((item, date, NS), comp)
self._check_view((item, NS, 'C'), comp)
self._check_view((NS, date, 'C'), comp)
def test_ix_setitem_slice_dataframe(self):
a = Panel(items=[1, 2, 3], major_axis=[11, 22, 33],
minor_axis=[111, 222, 333])
b = DataFrame(np.random.randn(2, 3), index=[111, 333],
columns=[1, 2, 3])
a.ix[:, 22, [111, 333]] = b
assert_frame_equal(a.ix[:, 22, [111, 333]], b)
def test_ix_align(self):
from pandas import Series
b = Series(np.random.randn(10), name=0)
b.sort()
df_orig = Panel(np.random.randn(3, 10, 2))
df = df_orig.copy()
df.ix[0, :, 0] = b
assert_series_equal(df.ix[0, :, 0].reindex(b.index), b)
df = df_orig.swapaxes(0, 1)
df.ix[:, 0, 0] = b
assert_series_equal(df.ix[:, 0, 0].reindex(b.index), b)
df = df_orig.swapaxes(1, 2)
df.ix[0, 0, :] = b
assert_series_equal(df.ix[0, 0, :].reindex(b.index), b)
def test_ix_frame_align(self):
p_orig = tm.makePanel()
df = p_orig.ix[0].copy()
assert_frame_equal(p_orig['ItemA'], df)
p = p_orig.copy()
p.ix[0, :, :] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.ix[0] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.iloc[0, :, :] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.iloc[0] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.loc['ItemA'] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.loc['ItemA', :, :] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p['ItemA'] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.ix[0, [0, 1, 3, 5], -2:] = df
out = p.ix[0, [0, 1, 3, 5], -2:]
assert_frame_equal(out, df.iloc[[0, 1, 3, 5], [2, 3]])
# GH3830, panel assignent by values/frame
for dtype in ['float64', 'int64']:
panel = Panel(np.arange(40).reshape((2, 4, 5)),
items=['a1', 'a2'], dtype=dtype)
df1 = panel.iloc[0]
df2 = panel.iloc[1]
tm.assert_frame_equal(panel.loc['a1'], df1)
tm.assert_frame_equal(panel.loc['a2'], df2)
# Assignment by Value Passes for 'a2'
panel.loc['a2'] = df1.values
tm.assert_frame_equal(panel.loc['a1'], df1)
tm.assert_frame_equal(panel.loc['a2'], df1)
# Assignment by DataFrame Ok w/o loc 'a2'
panel['a2'] = df2
tm.assert_frame_equal(panel.loc['a1'], df1)
tm.assert_frame_equal(panel.loc['a2'], df2)
# Assignment by DataFrame Fails for 'a2'
panel.loc['a2'] = df2
tm.assert_frame_equal(panel.loc['a1'], df1)
tm.assert_frame_equal(panel.loc['a2'], df2)
def _check_view(self, indexer, comp):
cp = self.panel.copy()
obj = cp.ix[indexer]
obj.values[:] = 0
self.assertTrue((obj.values == 0).all())
comp(cp.ix[indexer].reindex_like(obj), obj)
def test_logical_with_nas(self):
d = Panel({'ItemA': {'a': [np.nan, False]},
'ItemB': {'a': [True, True]}})
result = d['ItemA'] | d['ItemB']
expected = DataFrame({'a': [np.nan, True]})
assert_frame_equal(result, expected)
# this is autodowncasted here
result = d['ItemA'].fillna(False) | d['ItemB']
expected = DataFrame({'a': [True, True]})
assert_frame_equal(result, expected)
def test_neg(self):
# what to do?
assert_panel_equal(-self.panel, -1 * self.panel)
def test_invert(self):
assert_panel_equal(-(self.panel < 0), ~(self.panel < 0))
def test_comparisons(self):
p1 = tm.makePanel()
p2 = tm.makePanel()
tp = p1.reindex(items=p1.items + ['foo'])
df = p1[p1.items[0]]
def test_comp(func):
# versus same index
result = func(p1, p2)
self.assert_numpy_array_equal(result.values,
func(p1.values, p2.values))
# versus non-indexed same objs
self.assertRaises(Exception, func, p1, tp)
# versus different objs
self.assertRaises(Exception, func, p1, df)
# versus scalar
result3 = func(self.panel, 0)
self.assert_numpy_array_equal(result3.values,
func(self.panel.values, 0))
test_comp(operator.eq)
test_comp(operator.ne)
test_comp(operator.lt)
test_comp(operator.gt)
test_comp(operator.ge)
test_comp(operator.le)
def test_get_value(self):
for item in self.panel.items:
for mjr in self.panel.major_axis[::2]:
for mnr in self.panel.minor_axis:
result = self.panel.get_value(item, mjr, mnr)
expected = self.panel[item][mnr][mjr]
assert_almost_equal(result, expected)
with tm.assertRaisesRegexp(TypeError,
"There must be an argument for each axis"):
self.panel.get_value('a')
def test_set_value(self):
for item in self.panel.items:
for mjr in self.panel.major_axis[::2]:
for mnr in self.panel.minor_axis:
self.panel.set_value(item, mjr, mnr, 1.)
assert_almost_equal(self.panel[item][mnr][mjr], 1.)
# resize
res = self.panel.set_value('ItemE', 'foo', 'bar', 1.5)
tm.assertIsInstance(res, Panel)
self.assertIsNot(res, self.panel)
self.assertEqual(res.get_value('ItemE', 'foo', 'bar'), 1.5)
res3 = self.panel.set_value('ItemE', 'foobar', 'baz', 5)
self.assertTrue(com.is_float_dtype(res3['ItemE'].values))
with tm.assertRaisesRegexp(TypeError,
"There must be an argument for each axis"
" plus the value provided"):
self.panel.set_value('a')
_panel = tm.makePanel()
tm.add_nans(_panel)
class TestPanel(tm.TestCase, PanelTests, CheckIndexing, SafeForLongAndSparse,
SafeForSparse):
_multiprocess_can_split_ = True
@classmethod
def assert_panel_equal(cls, x, y):
assert_panel_equal(x, y)
def setUp(self):
self.panel = _panel.copy()
self.panel.major_axis.name = None
self.panel.minor_axis.name = None
self.panel.items.name = None
def test_panel_warnings(self):
with tm.assert_produces_warning(FutureWarning):
shifted1 = self.panel.shift(lags=1)
with tm.assert_produces_warning(False):
shifted2 = self.panel.shift(periods=1)
tm.assert_panel_equal(shifted1, shifted2)
with tm.assert_produces_warning(False):
shifted3 = self.panel.shift()
tm.assert_panel_equal(shifted1, shifted3)
def test_constructor(self):
# with BlockManager
wp = Panel(self.panel._data)
self.assertIs(wp._data, self.panel._data)
wp = Panel(self.panel._data, copy=True)
self.assertIsNot(wp._data, self.panel._data)
assert_panel_equal(wp, self.panel)
# strings handled prop
wp = Panel([[['foo', 'foo', 'foo', ], ['foo', 'foo', 'foo']]])
self.assertEqual(wp.values.dtype, np.object_)
vals = self.panel.values
# no copy
wp = Panel(vals)
self.assertIs(wp.values, vals)
# copy
wp = Panel(vals, copy=True)
self.assertIsNot(wp.values, vals)
# GH #8285, test when scalar data is used to construct a Panel
# if dtype is not passed, it should be inferred
value_and_dtype = [(1, 'int64'), (3.14, 'float64'),
('foo', np.object_)]
for (val, dtype) in value_and_dtype:
wp = Panel(val, items=range(2), major_axis=range(3),
minor_axis=range(4))
vals = np.empty((2, 3, 4), dtype=dtype)
vals.fill(val)
assert_panel_equal(wp, Panel(vals, dtype=dtype))
# test the case when dtype is passed
wp = Panel(1, items=range(2), major_axis=range(3), minor_axis=range(4),
dtype='float32')
vals = np.empty((2, 3, 4), dtype='float32')
vals.fill(1)
assert_panel_equal(wp, Panel(vals, dtype='float32'))
def test_constructor_cast(self):
zero_filled = self.panel.fillna(0)
casted = Panel(zero_filled._data, dtype=int)
casted2 = Panel(zero_filled.values, dtype=int)
exp_values = zero_filled.values.astype(int)
assert_almost_equal(casted.values, exp_values)
assert_almost_equal(casted2.values, exp_values)
casted = Panel(zero_filled._data, dtype=np.int32)
casted2 = Panel(zero_filled.values, dtype=np.int32)
exp_values = zero_filled.values.astype(np.int32)
assert_almost_equal(casted.values, exp_values)
assert_almost_equal(casted2.values, exp_values)
# can't cast
data = [[['foo', 'bar', 'baz']]]
self.assertRaises(ValueError, Panel, data, dtype=float)
def test_constructor_empty_panel(self):
empty = Panel()
self.assertEqual(len(empty.items), 0)
self.assertEqual(len(empty.major_axis), 0)
self.assertEqual(len(empty.minor_axis), 0)
def test_constructor_observe_dtype(self):
# GH #411
panel = Panel(items=lrange(3), major_axis=lrange(3),
minor_axis=lrange(3), dtype='O')
self.assertEqual(panel.values.dtype, np.object_)
def test_constructor_dtypes(self):
# GH #797
def _check_dtype(panel, dtype):
for i in panel.items:
self.assertEqual(panel[i].values.dtype.name, dtype)
# only nan holding types allowed here
for dtype in ['float64', 'float32', 'object']:
panel = Panel(items=lrange(2), major_axis=lrange(10),
minor_axis=lrange(5), dtype=dtype)
_check_dtype(panel, dtype)
for dtype in ['float64', 'float32', 'int64', 'int32', 'object']:
panel = Panel(np.array(np.random.randn(2, 10, 5), dtype=dtype),
items=lrange(2),
major_axis=lrange(10),
minor_axis=lrange(5), dtype=dtype)
_check_dtype(panel, dtype)
for dtype in ['float64', 'float32', 'int64', 'int32', 'object']:
panel = Panel(np.array(np.random.randn(2, 10, 5), dtype='O'),
items=lrange(2),
major_axis=lrange(10),
minor_axis=lrange(5), dtype=dtype)
_check_dtype(panel, dtype)
for dtype in ['float64', 'float32', 'int64', 'int32', 'object']:
panel = Panel(np.random.randn(2, 10, 5), items=lrange(
2), major_axis=lrange(10), minor_axis=lrange(5), dtype=dtype)
_check_dtype(panel, dtype)
for dtype in ['float64', 'float32', 'int64', 'int32', 'object']:
df1 = DataFrame(np.random.randn(2, 5),
index=lrange(2), columns=lrange(5))
df2 = DataFrame(np.random.randn(2, 5),
index=lrange(2), columns=lrange(5))
panel = Panel.from_dict({'a': df1, 'b': df2}, dtype=dtype)
_check_dtype(panel, dtype)
def test_constructor_fails_with_not_3d_input(self):
with tm.assertRaisesRegexp(ValueError,
"The number of dimensions required is 3"):
Panel(np.random.randn(10, 2))
def test_consolidate(self):
self.assertTrue(self.panel._data.is_consolidated())
self.panel['foo'] = 1.
self.assertFalse(self.panel._data.is_consolidated())
panel = self.panel.consolidate()
self.assertTrue(panel._data.is_consolidated())
def test_ctor_dict(self):
itema = self.panel['ItemA']
itemb = self.panel['ItemB']
d = {'A': itema, 'B': itemb[5:]}
d2 = {'A': itema._series, 'B': itemb[5:]._series}
d3 = {'A': None,
'B': DataFrame(itemb[5:]._series),
'C': DataFrame(itema._series)}
wp = Panel.from_dict(d)
wp2 = Panel.from_dict(d2) # nested Dict
# TODO: unused?
wp3 = Panel.from_dict(d3) # noqa
self.assertTrue(wp.major_axis.equals(self.panel.major_axis))
assert_panel_equal(wp, wp2)
# intersect
wp = Panel.from_dict(d, intersect=True)
self.assertTrue(wp.major_axis.equals(itemb.index[5:]))
# use constructor
assert_panel_equal(Panel(d), Panel.from_dict(d))
assert_panel_equal(Panel(d2), Panel.from_dict(d2))
assert_panel_equal(Panel(d3), Panel.from_dict(d3))
# a pathological case
d4 = {'A': None, 'B': None}
# TODO: unused?
wp4 = Panel.from_dict(d4) # noqa
assert_panel_equal(Panel(d4), Panel(items=['A', 'B']))
# cast
dcasted = dict((k, v.reindex(wp.major_axis).fillna(0))
for k, v in compat.iteritems(d))
result = Panel(dcasted, dtype=int)
expected = Panel(dict((k, v.astype(int))
for k, v in compat.iteritems(dcasted)))
assert_panel_equal(result, expected)
result = Panel(dcasted, dtype=np.int32)
expected = Panel(dict((k, v.astype(np.int32))
for k, v in compat.iteritems(dcasted)))
assert_panel_equal(result, expected)
def test_constructor_dict_mixed(self):
data = dict((k, v.values) for k, v in self.panel.iteritems())
result = Panel(data)
exp_major = Index(np.arange(len(self.panel.major_axis)))
self.assertTrue(result.major_axis.equals(exp_major))
result = Panel(data, items=self.panel.items,
major_axis=self.panel.major_axis,
minor_axis=self.panel.minor_axis)
assert_panel_equal(result, self.panel)
data['ItemC'] = self.panel['ItemC']
result = Panel(data)
assert_panel_equal(result, self.panel)
# corner, blow up
data['ItemB'] = data['ItemB'][:-1]
self.assertRaises(Exception, Panel, data)
data['ItemB'] = self.panel['ItemB'].values[:, :-1]
self.assertRaises(Exception, Panel, data)
def test_ctor_orderedDict(self):
keys = list(set(np.random.randint(0, 5000, 100)))[
:50] # unique random int keys
d = OrderedDict([(k, mkdf(10, 5)) for k in keys])
p = Panel(d)
self.assertTrue(list(p.items) == keys)
p = Panel.from_dict(d)
self.assertTrue(list(p.items) == keys)
def test_constructor_resize(self):
data = self.panel._data
items = self.panel.items[:-1]
major = self.panel.major_axis[:-1]
minor = self.panel.minor_axis[:-1]
result = Panel(data, items=items, major_axis=major, minor_axis=minor)
expected = self.panel.reindex(items=items, major=major, minor=minor)
assert_panel_equal(result, expected)
result = Panel(data, items=items, major_axis=major)
expected = self.panel.reindex(items=items, major=major)
assert_panel_equal(result, expected)
result = Panel(data, items=items)
expected = self.panel.reindex(items=items)
assert_panel_equal(result, expected)
result = Panel(data, minor_axis=minor)
expected = self.panel.reindex(minor=minor)
assert_panel_equal(result, expected)
def test_from_dict_mixed_orient(self):
df = tm.makeDataFrame()
df['foo'] = 'bar'
data = {'k1': df, 'k2': df}
panel = Panel.from_dict(data, orient='minor')
self.assertEqual(panel['foo'].values.dtype, np.object_)
self.assertEqual(panel['A'].values.dtype, np.float64)
def test_constructor_error_msgs(self):
def testit():
Panel(np.random.randn(3, 4, 5), lrange(4), lrange(5), lrange(5))
assertRaisesRegexp(ValueError,
"Shape of passed values is \(3, 4, 5\), "
"indices imply \(4, 5, 5\)",
testit)
def testit():
Panel(np.random.randn(3, 4, 5), lrange(5), lrange(4), lrange(5))
assertRaisesRegexp(ValueError,
"Shape of passed values is \(3, 4, 5\), "
"indices imply \(5, 4, 5\)",
testit)
def testit():
Panel(np.random.randn(3, 4, 5), lrange(5), lrange(5), lrange(4))
assertRaisesRegexp(ValueError,
"Shape of passed values is \(3, 4, 5\), "
"indices imply \(5, 5, 4\)",
testit)
def test_conform(self):
df = self.panel['ItemA'][:-5].filter(items=['A', 'B'])
conformed = self.panel.conform(df)
assert (conformed.index.equals(self.panel.major_axis))
assert (conformed.columns.equals(self.panel.minor_axis))
def test_convert_objects(self):
# GH 4937
p = Panel(dict(A=dict(a=['1', '1.0'])))
expected = Panel(dict(A=dict(a=[1, 1.0])))
result = p._convert(numeric=True, coerce=True)
assert_panel_equal(result, expected)
def test_dtypes(self):
result = self.panel.dtypes
expected = Series(np.dtype('float64'), index=self.panel.items)
assert_series_equal(result, expected)
def test_apply(self):
# GH1148
# ufunc
applied = self.panel.apply(np.sqrt)
self.assertTrue(assert_almost_equal(applied.values, np.sqrt(
self.panel.values)))
# ufunc same shape
result = self.panel.apply(lambda x: x * 2, axis='items')
expected = self.panel * 2
assert_panel_equal(result, expected)
result = self.panel.apply(lambda x: x * 2, axis='major_axis')
expected = self.panel * 2
assert_panel_equal(result, expected)
result = self.panel.apply(lambda x: x * 2, axis='minor_axis')
expected = self.panel * 2
assert_panel_equal(result, expected)
# reduction to DataFrame
result = self.panel.apply(lambda x: x.dtype, axis='items')
expected = DataFrame(np.dtype('float64'), index=self.panel.major_axis,
columns=self.panel.minor_axis)
assert_frame_equal(result, expected)
result = self.panel.apply(lambda x: x.dtype, axis='major_axis')
expected = DataFrame(np.dtype('float64'), index=self.panel.minor_axis,
columns=self.panel.items)
assert_frame_equal(result, expected)
result = self.panel.apply(lambda x: x.dtype, axis='minor_axis')
expected = DataFrame(np.dtype('float64'), index=self.panel.major_axis,
columns=self.panel.items)
assert_frame_equal(result, expected)
# reductions via other dims
expected = self.panel.sum(0)
result = self.panel.apply(lambda x: x.sum(), axis='items')
assert_frame_equal(result, expected)
expected = self.panel.sum(1)
result = self.panel.apply(lambda x: x.sum(), axis='major_axis')
assert_frame_equal(result, expected)
expected = self.panel.sum(2)
result = self.panel.apply(lambda x: x.sum(), axis='minor_axis')
assert_frame_equal(result, expected)
# pass kwargs
result = self.panel.apply(lambda x, y: x.sum() + y, axis='items', y=5)
expected = self.panel.sum(0) + 5
assert_frame_equal(result, expected)
def test_apply_slabs(self):
# same shape as original
result = self.panel.apply(lambda x: x * 2,
axis=['items', 'major_axis'])
expected = (self.panel * 2).transpose('minor_axis', 'major_axis',
'items')
assert_panel_equal(result, expected)
result = self.panel.apply(lambda x: x * 2,
axis=['major_axis', 'items'])
assert_panel_equal(result, expected)
result = self.panel.apply(lambda x: x * 2,
axis=['items', 'minor_axis'])
expected = (self.panel * 2).transpose('major_axis', 'minor_axis',
'items')
assert_panel_equal(result, expected)
result = self.panel.apply(lambda x: x * 2,
axis=['minor_axis', 'items'])
assert_panel_equal(result, expected)
result = self.panel.apply(lambda x: x * 2,
axis=['major_axis', 'minor_axis'])
expected = self.panel * 2
assert_panel_equal(result, expected)
result = self.panel.apply(lambda x: x * 2,
axis=['minor_axis', 'major_axis'])
assert_panel_equal(result, expected)
# reductions
result = self.panel.apply(lambda x: x.sum(0), axis=[
'items', 'major_axis'
])
expected = self.panel.sum(1).T
assert_frame_equal(result, expected)
result = self.panel.apply(lambda x: x.sum(1), axis=[
'items', 'major_axis'
])
expected = self.panel.sum(0)
assert_frame_equal(result, expected)
# transforms
f = lambda x: ((x.T - x.mean(1)) / x.std(1)).T
# make sure that we don't trigger any warnings
with tm.assert_produces_warning(False):
result = self.panel.apply(f, axis=['items', 'major_axis'])
expected = Panel(dict([(ax, f(self.panel.loc[:, :, ax]))
for ax in self.panel.minor_axis]))
assert_panel_equal(result, expected)
result = self.panel.apply(f, axis=['major_axis', 'minor_axis'])
expected = Panel(dict([(ax, f(self.panel.loc[ax]))
for ax in self.panel.items]))
assert_panel_equal(result, expected)
result = self.panel.apply(f, axis=['minor_axis', 'items'])
expected = Panel(dict([(ax, f(self.panel.loc[:, ax]))
for ax in self.panel.major_axis]))
assert_panel_equal(result, expected)
# with multi-indexes
# GH7469
index = MultiIndex.from_tuples([('one', 'a'), ('one', 'b'), (
'two', 'a'), ('two', 'b')])
dfa = DataFrame(np.array(np.arange(12, dtype='int64')).reshape(
4, 3), columns=list("ABC"), index=index)
dfb = DataFrame(np.array(np.arange(10, 22, dtype='int64')).reshape(
4, 3), columns=list("ABC"), index=index)
p = Panel({'f': dfa, 'g': dfb})
result = p.apply(lambda x: x.sum(), axis=0)
# on windows this will be in32
result = result.astype('int64')
expected = p.sum(0)
assert_frame_equal(result, expected)
def test_apply_no_or_zero_ndim(self):
# GH10332
self.panel = Panel(np.random.rand(5, 5, 5))
result_int = self.panel.apply(lambda df: 0, axis=[1, 2])
result_float = self.panel.apply(lambda df: 0.0, axis=[1, 2])
result_int64 = self.panel.apply(lambda df: np.int64(0), axis=[1, 2])
result_float64 = self.panel.apply(lambda df: np.float64(0.0),
axis=[1, 2])
expected_int = expected_int64 = Series([0] * 5)
expected_float = expected_float64 = Series([0.0] * 5)
assert_series_equal(result_int, expected_int)
assert_series_equal(result_int64, expected_int64)
assert_series_equal(result_float, expected_float)
assert_series_equal(result_float64, expected_float64)
def test_reindex(self):
ref = self.panel['ItemB']
# items
result = self.panel.reindex(items=['ItemA', 'ItemB'])
assert_frame_equal(result['ItemB'], ref)
# major
new_major = list(self.panel.major_axis[:10])
result = self.panel.reindex(major=new_major)
assert_frame_equal(result['ItemB'], ref.reindex(index=new_major))
# raise exception put both major and major_axis
self.assertRaises(Exception, self.panel.reindex, major_axis=new_major,
major=new_major)
# minor
new_minor = list(self.panel.minor_axis[:2])
result = self.panel.reindex(minor=new_minor)
assert_frame_equal(result['ItemB'], ref.reindex(columns=new_minor))
# this ok
result = self.panel.reindex()
assert_panel_equal(result, self.panel)
self.assertFalse(result is self.panel)
# with filling
smaller_major = self.panel.major_axis[::5]
smaller = self.panel.reindex(major=smaller_major)
larger = smaller.reindex(major=self.panel.major_axis, method='pad')
assert_frame_equal(larger.major_xs(self.panel.major_axis[1]),
smaller.major_xs(smaller_major[0]))
# don't necessarily copy
result = self.panel.reindex(major=self.panel.major_axis, copy=False)
assert_panel_equal(result, self.panel)
self.assertTrue(result is self.panel)
def test_reindex_multi(self):
# with and without copy full reindexing
result = self.panel.reindex(items=self.panel.items,
major=self.panel.major_axis,
minor=self.panel.minor_axis, copy=False)
self.assertIs(result.items, self.panel.items)
self.assertIs(result.major_axis, self.panel.major_axis)
self.assertIs(result.minor_axis, self.panel.minor_axis)
result = self.panel.reindex(items=self.panel.items,
major=self.panel.major_axis,
minor=self.panel.minor_axis, copy=False)
assert_panel_equal(result, self.panel)
# multi-axis indexing consistency
# GH 5900
df = DataFrame(np.random.randn(4, 3))
p = Panel({'Item1': df})
expected = Panel({'Item1': df})
expected['Item2'] = np.nan
items = ['Item1', 'Item2']
major_axis = np.arange(4)
minor_axis = np.arange(3)
results = []
results.append(p.reindex(items=items, major_axis=major_axis,
copy=True))
results.append(p.reindex(items=items, major_axis=major_axis,
copy=False))
results.append(p.reindex(items=items, minor_axis=minor_axis,
copy=True))
results.append(p.reindex(items=items, minor_axis=minor_axis,
copy=False))
results.append(p.reindex(items=items, major_axis=major_axis,
minor_axis=minor_axis, copy=True))
results.append(p.reindex(items=items, major_axis=major_axis,
minor_axis=minor_axis, copy=False))
for i, r in enumerate(results):
assert_panel_equal(expected, r)
def test_reindex_like(self):
# reindex_like
smaller = self.panel.reindex(items=self.panel.items[:-1],
major=self.panel.major_axis[:-1],
minor=self.panel.minor_axis[:-1])
smaller_like = self.panel.reindex_like(smaller)
assert_panel_equal(smaller, smaller_like)
def test_take(self):
# axis == 0
result = self.panel.take([2, 0, 1], axis=0)
expected = self.panel.reindex(items=['ItemC', 'ItemA', 'ItemB'])
assert_panel_equal(result, expected)
# axis >= 1
result = self.panel.take([3, 0, 1, 2], axis=2)
expected = self.panel.reindex(minor=['D', 'A', 'B', 'C'])
assert_panel_equal(result, expected)
# neg indicies ok
expected = self.panel.reindex(minor=['D', 'D', 'B', 'C'])
result = self.panel.take([3, -1, 1, 2], axis=2)
assert_panel_equal(result, expected)
self.assertRaises(Exception, self.panel.take, [4, 0, 1, 2], axis=2)
def test_sort_index(self):
import random
ritems = list(self.panel.items)
rmajor = list(self.panel.major_axis)
rminor = list(self.panel.minor_axis)
random.shuffle(ritems)
random.shuffle(rmajor)
random.shuffle(rminor)
random_order = self.panel.reindex(items=ritems)
sorted_panel = random_order.sort_index(axis=0)
assert_panel_equal(sorted_panel, self.panel)
# descending
random_order = self.panel.reindex(items=ritems)
sorted_panel = random_order.sort_index(axis=0, ascending=False)
assert_panel_equal(sorted_panel,
self.panel.reindex(items=self.panel.items[::-1]))
random_order = self.panel.reindex(major=rmajor)
sorted_panel = random_order.sort_index(axis=1)
assert_panel_equal(sorted_panel, self.panel)
random_order = self.panel.reindex(minor=rminor)
sorted_panel = random_order.sort_index(axis=2)
assert_panel_equal(sorted_panel, self.panel)
def test_fillna(self):
filled = self.panel.fillna(0)
self.assertTrue(np.isfinite(filled.values).all())
filled = self.panel.fillna(method='backfill')
assert_frame_equal(filled['ItemA'],
self.panel['ItemA'].fillna(method='backfill'))
panel = self.panel.copy()
panel['str'] = 'foo'
filled = panel.fillna(method='backfill')
assert_frame_equal(filled['ItemA'],
panel['ItemA'].fillna(method='backfill'))
empty = self.panel.reindex(items=[])
filled = empty.fillna(0)
assert_panel_equal(filled, empty)
self.assertRaises(ValueError, self.panel.fillna)
self.assertRaises(ValueError, self.panel.fillna, 5, method='ffill')
self.assertRaises(TypeError, self.panel.fillna, [1, 2])
self.assertRaises(TypeError, self.panel.fillna, (1, 2))
# limit not implemented when only value is specified
p = Panel(np.random.randn(3, 4, 5))
p.iloc[0:2, 0:2, 0:2] = np.nan
self.assertRaises(NotImplementedError, lambda: p.fillna(999, limit=1))
def test_ffill_bfill(self):
assert_panel_equal(self.panel.ffill(),
self.panel.fillna(method='ffill'))
assert_panel_equal(self.panel.bfill(),
self.panel.fillna(method='bfill'))
def test_truncate_fillna_bug(self):
# #1823
result = self.panel.truncate(before=None, after=None, axis='items')
# it works!
result.fillna(value=0.0)
def test_swapaxes(self):
result = self.panel.swapaxes('items', 'minor')
self.assertIs(result.items, self.panel.minor_axis)
result = self.panel.swapaxes('items', 'major')
self.assertIs(result.items, self.panel.major_axis)
result = self.panel.swapaxes('major', 'minor')
self.assertIs(result.major_axis, self.panel.minor_axis)
panel = self.panel.copy()
result = panel.swapaxes('major', 'minor')
panel.values[0, 0, 1] = np.nan
expected = panel.swapaxes('major', 'minor')
assert_panel_equal(result, expected)
# this should also work
result = self.panel.swapaxes(0, 1)
self.assertIs(result.items, self.panel.major_axis)
# this works, but return a copy
result = self.panel.swapaxes('items', 'items')
assert_panel_equal(self.panel, result)
self.assertNotEqual(id(self.panel), id(result))
def test_transpose(self):
result = self.panel.transpose('minor', 'major', 'items')
expected = self.panel.swapaxes('items', 'minor')
assert_panel_equal(result, expected)
# test kwargs
result = self.panel.transpose(items='minor', major='major',
minor='items')
expected = self.panel.swapaxes('items', 'minor')
assert_panel_equal(result, expected)
# text mixture of args
result = self.panel.transpose('minor', major='major', minor='items')
expected = self.panel.swapaxes('items', 'minor')
assert_panel_equal(result, expected)
result = self.panel.transpose('minor', 'major', minor='items')
expected = self.panel.swapaxes('items', 'minor')
assert_panel_equal(result, expected)
# duplicate axes
with tm.assertRaisesRegexp(TypeError,
'not enough/duplicate arguments'):
self.panel.transpose('minor', maj='major', minor='items')
with tm.assertRaisesRegexp(ValueError, 'repeated axis in transpose'):
self.panel.transpose('minor', 'major', major='minor',
minor='items')
result = self.panel.transpose(2, 1, 0)
assert_panel_equal(result, expected)
result = self.panel.transpose('minor', 'items', 'major')
expected = self.panel.swapaxes('items', 'minor')
expected = expected.swapaxes('major', 'minor')
assert_panel_equal(result, expected)
result = self.panel.transpose(2, 0, 1)
|
assert_panel_equal(result, expected)
|
pandas.util.testing.assert_panel_equal
|
# ETL Pipeline
# This python utility processes data file and loads them in a sqlite database
# Import python libraries
import sys
import pandas as pd
from sqlalchemy import create_engine
def load_data(messages_filepath, categories_filepath):
"""
Load data from files
Input:
:param messages_filepath: file containing message data to be loaded in the database
:param categories_filepath: file containing category data to be loaded in the database
:return: pandas dataframe containing messages and categories
"""
# Load message data set
messages = pd.read_csv(messages_filepath)
if messages.shape[0] > 0:
print('messages loaded successfully with {} rows and {} columns' .format(messages.shape[0], messages.shape[1]))
# load categories data set
categories =
|
pd.read_csv(categories_filepath)
|
pandas.read_csv
|
from datetime import timedelta
from functools import partial
from itertools import permutations
import dask.bag as db
import numpy as np
import pandas as pd
import pandas.testing as pdt
import pytest
from hypothesis import given, settings
from hypothesis import strategies as st
from kartothek.core.cube.conditions import (
C,
Conjunction,
EqualityCondition,
GreaterEqualCondition,
GreaterThanCondition,
InequalityCondition,
InIntervalCondition,
IsInCondition,
LessEqualCondition,
LessThanCondition,
)
from kartothek.core.cube.cube import Cube
from kartothek.io.dask.bag_cube import build_cube_from_bag
from kartothek.io.eager import build_dataset_indices
from kartothek.io.eager_cube import append_to_cube, build_cube, remove_partitions
__all__ = (
"apply_condition_unsafe",
"data_no_part",
"fullrange_cube",
"fullrange_data",
"fullrange_df",
"massive_partitions_cube",
"massive_partitions_data",
"massive_partitions_df",
"multipartition_cube",
"multipartition_df",
"no_part_cube",
"no_part_df",
"other_part_cube",
"sparse_outer_cube",
"sparse_outer_data",
"sparse_outer_df",
"sparse_outer_opt_cube",
"sparse_outer_opt_df",
"test_complete",
"test_condition",
"test_condition_on_null",
"test_cube",
"test_delayed_index_build_correction_restriction",
"test_delayed_index_build_partition_by",
"test_df",
"test_fail_blocksize_negative",
"test_fail_blocksize_wrong_type",
"test_fail_blocksize_zero",
"test_fail_empty_dimension_columns",
"test_fail_missing_condition_columns",
"test_fail_missing_dimension_columns",
"test_fail_missing_partition_by",
"test_fail_missing_payload_columns",
"test_fail_no_store_factory",
"test_fail_projection",
"test_fail_unindexed_partition_by",
"test_fail_unstable_dimension_columns",
"test_fail_unstable_partition_by",
"test_filter_select",
"test_hypothesis",
"test_overlay_tricky",
"test_partition_by",
"test_projection",
"test_select",
"test_simple_roundtrip",
"test_sort",
"test_stresstest_index_select_row",
"test_wrong_condition_type",
"testset",
"updated_cube",
"updated_df",
)
@pytest.fixture(scope="module")
def fullrange_data():
return {
"seed": pd.DataFrame(
{
"x": [0, 1, 0, 1, 0, 1, 0, 1, 2, 3, 2, 3, 2, 3, 2, 3],
"y": [0, 0, 1, 1, 2, 2, 3, 3, 0, 0, 1, 1, 2, 2, 3, 3],
"z": 0,
"p": [0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1],
"q": [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1],
"v1": np.arange(16),
"i1": np.arange(16),
}
),
"enrich_dense": pd.DataFrame(
{
"x": [0, 1, 0, 1, 0, 1, 0, 1, 2, 3, 2, 3, 2, 3, 2, 3],
"y": [0, 0, 1, 1, 2, 2, 3, 3, 0, 0, 1, 1, 2, 2, 3, 3],
"z": 0,
"p": [0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1],
"q": [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1],
"v2": np.arange(16),
"i2": np.arange(16),
}
),
"enrich_sparse": pd.DataFrame(
{
"y": [0, 1, 2, 3, 0, 1, 2, 3],
"z": 0,
"p": [0, 0, 1, 1, 0, 0, 1, 1],
"q": [0, 0, 0, 0, 1, 1, 1, 1],
"v3": np.arange(8),
"i3": np.arange(8),
}
),
}
@pytest.fixture(scope="module")
def fullrange_cube(module_store, fullrange_data):
cube = Cube(
dimension_columns=["x", "y", "z"],
partition_columns=["p", "q"],
uuid_prefix="fullrange_cube",
index_columns=["i1", "i2", "i3"],
)
build_cube(data=fullrange_data, store=module_store, cube=cube)
return cube
@pytest.fixture(scope="module")
def multipartition_cube(module_store, fullrange_data, fullrange_cube):
def _gen(part):
result = {}
for dataset_id, df in fullrange_data.items():
df = df.copy()
df["z"] = part
result[dataset_id] = df
return result
cube = fullrange_cube.copy(uuid_prefix="multipartition_cube")
build_cube_from_bag(
data=db.from_sequence([0, 1], partition_size=1).map(_gen),
store=module_store,
cube=cube,
ktk_cube_dataset_ids=["seed", "enrich_dense", "enrich_sparse"],
).compute()
return cube
@pytest.fixture(scope="module")
def sparse_outer_data():
return {
"seed": pd.DataFrame(
{
"x": [0, 1, 0],
"y": [0, 0, 1],
"z": 0,
"p": [0, 1, 2],
"q": 0,
"v1": [0, 3, 7],
"i1": [0, 3, 7],
}
),
"enrich_dense": pd.DataFrame(
{
"x": [0, 0],
"y": [0, 1],
"z": 0,
"p": [0, 2],
"q": 0,
"v2": [0, 7],
"i2": [0, 7],
}
),
"enrich_sparse": pd.DataFrame(
{"y": [0, 0], "z": 0, "p": [0, 1], "q": 0, "v3": [0, 3], "i3": [0, 3]}
),
}
@pytest.fixture(scope="module")
def sparse_outer_cube(module_store, sparse_outer_data):
cube = Cube(
dimension_columns=["x", "y", "z"],
partition_columns=["p", "q"],
uuid_prefix="sparse_outer_cube",
index_columns=["i1", "i2", "i3"],
)
build_cube(data=sparse_outer_data, store=module_store, cube=cube)
return cube
@pytest.fixture(scope="module")
def sparse_outer_opt_cube(
module_store,
sparse_outer_data,
sparse_outer_cube,
sparse_outer_df,
sparse_outer_opt_df,
):
data = {}
for dataset_id in sparse_outer_data.keys():
df = sparse_outer_data[dataset_id].copy()
for col in sparse_outer_opt_df.columns:
if col in df.columns:
dtype = sparse_outer_opt_df[col].dtype
if dtype == np.float64:
dtype = np.int64
elif dtype == np.float32:
dtype = np.int32
elif dtype == np.float16:
dtype = np.int16
df[col] = df[col].astype(dtype)
data[dataset_id] = df
cube = sparse_outer_cube.copy(uuid_prefix="sparse_outer_opt_cube")
build_cube(data=data, store=module_store, cube=cube)
return cube
@pytest.fixture(scope="module")
def massive_partitions_data():
n = 17
return {
"seed": pd.DataFrame(
{
"x": np.arange(n),
"y": np.arange(n),
"z": np.arange(n),
"p": np.arange(n),
"q": np.arange(n),
"v1": np.arange(n),
"i1": np.arange(n),
}
),
"enrich_1": pd.DataFrame(
{
"x": np.arange(n),
"y": np.arange(n),
"z": np.arange(n),
"p": np.arange(n),
"q": np.arange(n),
"v2": np.arange(n),
"i2": np.arange(n),
}
),
"enrich_2": pd.DataFrame(
{
"y": np.arange(n),
"z": np.arange(n),
"p": np.arange(n),
"q": np.arange(n),
"v3": np.arange(n),
"i3": np.arange(n),
}
),
}
@pytest.fixture(scope="module")
def massive_partitions_cube(module_store, massive_partitions_data):
cube = Cube(
dimension_columns=["x", "y", "z"],
partition_columns=["p", "q"],
uuid_prefix="massive_partitions_cube",
index_columns=["i1", "i2", "i3"],
)
build_cube(data=massive_partitions_data, store=module_store, cube=cube)
return cube
@pytest.fixture(scope="module")
def fullrange_df():
return (
pd.DataFrame(
data={
"x": [0, 1, 0, 1, 0, 1, 0, 1, 2, 3, 2, 3, 2, 3, 2, 3],
"y": [0, 0, 1, 1, 2, 2, 3, 3, 0, 0, 1, 1, 2, 2, 3, 3],
"z": 0,
"p": [0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1],
"q": [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1],
"v1": np.arange(16),
"v2": np.arange(16),
"v3": [0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7],
"i1": np.arange(16),
"i2": np.arange(16),
"i3": [0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7],
},
columns=["i1", "i2", "i3", "p", "q", "v1", "v2", "v3", "x", "y", "z"],
)
.sort_values(["x", "y", "z", "p", "q"])
.reset_index(drop=True)
)
@pytest.fixture(scope="module")
def multipartition_df(fullrange_df):
dfs = []
for z in (0, 1):
df = fullrange_df.copy()
df["z"] = z
dfs.append(df)
return (
pd.concat(dfs, ignore_index=True)
.sort_values(["x", "y", "z", "p", "q"])
.reset_index(drop=True)
)
@pytest.fixture(scope="module")
def sparse_outer_df():
return (
pd.DataFrame(
data={
"x": [0, 1, 0],
"y": [0, 0, 1],
"z": 0,
"p": [0, 1, 2],
"q": 0,
"v1": [0, 3, 7],
"v2": [0, np.nan, 7],
"v3": [0, 3, np.nan],
"i1": [0, 3, 7],
"i2": [0, np.nan, 7],
"i3": [0, 3, np.nan],
},
columns=["i1", "i2", "i3", "p", "q", "v1", "v2", "v3", "x", "y", "z"],
)
.sort_values(["x", "y", "z", "p", "q"])
.reset_index(drop=True)
)
@pytest.fixture(scope="module")
def sparse_outer_opt_df(sparse_outer_df):
df = sparse_outer_df.copy()
df["x"] = df["x"].astype(np.int16)
df["y"] = df["y"].astype(np.int32)
df["z"] = df["z"].astype(np.int8)
df["v1"] = df["v1"].astype(np.int8)
df["i1"] = df["i1"].astype(np.int8)
return df
@pytest.fixture(scope="module")
def massive_partitions_df():
n = 17
return (
pd.DataFrame(
data={
"x": np.arange(n),
"y": np.arange(n),
"z": np.arange(n),
"p": np.arange(n),
"q": np.arange(n),
"v1": np.arange(n),
"v2": np.arange(n),
"v3": np.arange(n),
"i1": np.arange(n),
"i2": np.arange(n),
"i3": np.arange(n),
},
columns=["i1", "i2", "i3", "p", "q", "v1", "v2", "v3", "x", "y", "z"],
)
.sort_values(["x", "y", "z", "p", "q"])
.reset_index(drop=True)
)
@pytest.fixture(scope="module")
def updated_cube(module_store, fullrange_data):
cube = Cube(
dimension_columns=["x", "y", "z"],
partition_columns=["p", "q"],
uuid_prefix="updated_cube",
index_columns=["i1", "i2", "i3"],
)
build_cube(
data={
cube.seed_dataset: pd.DataFrame(
{
"x": [0, 0, 1, 1, 2, 2],
"y": [0, 1, 0, 1, 0, 1],
"z": 0,
"p": [0, 0, 1, 1, 2, 2],
"q": 0,
"v1": np.arange(6),
"i1": np.arange(6),
}
),
"enrich": pd.DataFrame(
{
"x": [0, 0, 1, 1, 2, 2],
"y": [0, 1, 0, 1, 0, 1],
"z": 0,
"p": [0, 0, 1, 1, 2, 2],
"q": 0,
"v2": np.arange(6),
"i2": np.arange(6),
}
),
"extra": pd.DataFrame(
{
"y": [0, 1, 0, 1, 0, 1],
"z": 0,
"p": [0, 0, 1, 1, 2, 2],
"q": 0,
"v3": np.arange(6),
"i3": np.arange(6),
}
),
},
store=module_store,
cube=cube,
)
remove_partitions(
cube=cube,
store=module_store,
ktk_cube_dataset_ids=["enrich"],
conditions=C("p") >= 1,
)
append_to_cube(
data={
"enrich": pd.DataFrame(
{
"x": [1, 1],
"y": [0, 1],
"z": 0,
"p": [1, 1],
"q": 0,
"v2": [7, 8],
"i2": [7, 8],
}
)
},
store=module_store,
cube=cube,
)
return cube
@pytest.fixture(scope="module")
def updated_df():
return (
pd.DataFrame(
data={
"x": [0, 0, 1, 1, 2, 2],
"y": [0, 1, 0, 1, 0, 1],
"z": 0,
"p": [0, 0, 1, 1, 2, 2],
"q": 0,
"v1": np.arange(6),
"v2": [0, 1, 7, 8, np.nan, np.nan],
"v3": np.arange(6),
"i1": np.arange(6),
"i2": [0, 1, 7, 8, np.nan, np.nan],
"i3": np.arange(6),
},
columns=["i1", "i2", "i3", "p", "q", "v1", "v2", "v3", "x", "y", "z"],
)
.sort_values(["x", "y", "z", "p", "q"])
.reset_index(drop=True)
)
@pytest.fixture(scope="module")
def data_no_part():
return {
"seed": pd.DataFrame(
{
"x": [0, 1, 0, 1, 0, 1, 0, 1, 2, 3, 2, 3, 2, 3, 2, 3],
"y": [0, 0, 1, 1, 2, 2, 3, 3, 0, 0, 1, 1, 2, 2, 3, 3],
"z": 0,
"p": [0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1],
"q": [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1],
"v1": np.arange(16),
"i1": np.arange(16),
}
),
"enrich_dense": pd.DataFrame(
{
"x": [0, 1, 0, 1, 0, 1, 0, 1, 2, 3, 2, 3, 2, 3, 2, 3],
"y": [0, 0, 1, 1, 2, 2, 3, 3, 0, 0, 1, 1, 2, 2, 3, 3],
"z": 0,
"v2": np.arange(16),
"i2": np.arange(16),
}
),
"enrich_sparse": pd.DataFrame(
{"y": [0, 1, 2, 3], "z": 0, "v3": np.arange(4), "i3": np.arange(4)}
),
}
@pytest.fixture(scope="module")
def no_part_cube(module_store, data_no_part):
cube = Cube(
dimension_columns=["x", "y", "z"],
partition_columns=["p", "q"],
uuid_prefix="data_no_part",
index_columns=["i1", "i2", "i3"],
)
build_cube(
data=data_no_part,
store=module_store,
cube=cube,
partition_on={"enrich_dense": [], "enrich_sparse": []},
)
return cube
@pytest.fixture(scope="module")
def other_part_cube(module_store, data_no_part):
cube = Cube(
dimension_columns=["x", "y", "z"],
partition_columns=["p", "q"],
uuid_prefix="other_part_cube",
index_columns=["i1", "i2", "i3"],
)
build_cube(
data=data_no_part,
store=module_store,
cube=cube,
partition_on={"enrich_dense": ["i2"], "enrich_sparse": ["i3"]},
)
return cube
@pytest.fixture(scope="module")
def no_part_df():
return (
pd.DataFrame(
data={
"x": [0, 1, 0, 1, 0, 1, 0, 1, 2, 3, 2, 3, 2, 3, 2, 3],
"y": [0, 0, 1, 1, 2, 2, 3, 3, 0, 0, 1, 1, 2, 2, 3, 3],
"z": 0,
"p": [0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1],
"q": [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1],
"v1": np.arange(16),
"v2": np.arange(16),
"v3": [0, 0, 1, 1, 2, 2, 3, 3, 0, 0, 1, 1, 2, 2, 3, 3],
"i1": np.arange(16),
"i2": np.arange(16),
"i3": [0, 0, 1, 1, 2, 2, 3, 3, 0, 0, 1, 1, 2, 2, 3, 3],
},
columns=["i1", "i2", "i3", "p", "q", "v1", "v2", "v3", "x", "y", "z"],
)
.sort_values(["x", "y", "z", "p", "q"])
.reset_index(drop=True)
)
@pytest.fixture(
params=[
"fullrange",
"multipartition",
"sparse_outer",
"sparse_outer_opt",
"massive_partitions",
"updated",
"no_part",
"other_part",
],
scope="module",
)
def testset(request):
return request.param
@pytest.fixture(scope="module")
def test_cube(
testset,
fullrange_cube,
multipartition_cube,
sparse_outer_cube,
sparse_outer_opt_cube,
massive_partitions_cube,
updated_cube,
no_part_cube,
other_part_cube,
):
if testset == "fullrange":
return fullrange_cube
elif testset == "multipartition":
return multipartition_cube
elif testset == "sparse_outer":
return sparse_outer_cube
elif testset == "sparse_outer_opt":
return sparse_outer_opt_cube
elif testset == "massive_partitions":
return massive_partitions_cube
elif testset == "updated":
return updated_cube
elif testset == "no_part":
return no_part_cube
elif testset == "other_part":
return other_part_cube
else:
raise ValueError("Unknown param {}".format(testset))
@pytest.fixture(scope="module")
def test_df(
testset,
fullrange_df,
multipartition_df,
sparse_outer_df,
sparse_outer_opt_df,
massive_partitions_df,
updated_df,
no_part_df,
):
if testset == "fullrange":
return fullrange_df
elif testset == "multipartition":
return multipartition_df
elif testset == "sparse_outer":
return sparse_outer_df
elif testset == "sparse_outer_opt":
return sparse_outer_opt_df
elif testset == "massive_partitions":
return massive_partitions_df
elif testset == "updated":
return updated_df
elif testset in ("no_part", "other_part"):
return no_part_df
else:
raise ValueError("Unknown param {}".format(testset))
def test_simple_roundtrip(driver, function_store, function_store_rwro):
df = pd.DataFrame({"x": [0, 1, 2, 3], "p": [0, 0, 1, 1], "v": [10, 11, 12, 13]})
cube = Cube(dimension_columns=["x"], partition_columns=["p"], uuid_prefix="cube")
build_cube(data=df, cube=cube, store=function_store)
result = driver(cube=cube, store=function_store_rwro)
assert len(result) == 1
df_actual = result[0]
df_expected = df.reindex(columns=["p", "v", "x"])
pdt.assert_frame_equal(df_actual, df_expected)
def test_complete(driver, module_store, test_cube, test_df):
result = driver(cube=test_cube, store=module_store)
assert len(result) == 1
df_actual = result[0]
pdt.assert_frame_equal(df_actual, test_df)
def apply_condition_unsafe(df, cond):
# For the sparse_outer testset, the test_df has the wrong datatype because we cannot encode missing integer data in
# pandas.
#
# The condition will not be applicable to the DF because the DF has floats while conditions have ints. We fix that
# by modifying the the condition.
#
# In case there is no missing data because of the right conditions, kartothek will return integer data.
# assert_frame_equal will then complain about this. So in case there is no missing data, let's recover the correct
# dtype here.
if not isinstance(cond, Conjunction):
cond = Conjunction(cond)
float_cols = {col for col in df.columns if df[col].dtype == float}
# convert int to float conditions
cond2 = Conjunction([])
for col, conj in cond.split_by_column().items():
if col in float_cols:
parts = []
for part in conj.conditions:
if isinstance(part, IsInCondition):
part = IsInCondition(
column=part.column, value=tuple((float(v) for v in part.value))
)
elif isinstance(part, InIntervalCondition):
part = InIntervalCondition(
column=part.column,
start=float(part.start),
stop=float(part.stop),
)
else:
part = part.__class__(column=part.column, value=float(part.value))
parts.append(part)
conj = Conjunction(parts)
cond2 &= conj
# apply conditions
df = cond2.filter_df(df).reset_index(drop=True)
# convert float columns to int columns
for col in df.columns:
if df[col].notnull().all():
dtype = df[col].dtype
if dtype == np.float64:
dtype = np.int64
elif dtype == np.float32:
dtype = np.int32
elif dtype == np.float16:
dtype = np.int16
df[col] = df[col].astype(dtype)
return df
@pytest.mark.parametrize(
"cond",
[
C("v1") >= 7,
C("v1") >= 10000,
C("v2") >= 7,
C("v3") >= 3,
C("i1") >= 7,
C("i1") >= 10000,
C("i2") >= 7,
C("i2") != 0,
C("i3") >= 3,
C("p") >= 1,
C("q") >= 1,
C("x") >= 1,
C("y") >= 1,
(C("x") == 3) & (C("y") == 3),
(C("i1") > 0) & (C("i2") > 0),
Conjunction([]),
],
)
def test_condition(driver, module_store, test_cube, test_df, cond):
result = driver(cube=test_cube, store=module_store, conditions=cond)
df_expected = apply_condition_unsafe(test_df, cond)
if df_expected.empty:
assert len(result) == 0
else:
assert len(result) == 1
df_actual = result[0]
pdt.assert_frame_equal(df_actual, df_expected)
@pytest.mark.parametrize("payload_columns", [["v1", "v2"], ["v2", "v3"], ["v3"]])
def test_select(driver, module_store, test_cube, test_df, payload_columns):
result = driver(cube=test_cube, store=module_store, payload_columns=payload_columns)
assert len(result) == 1
df_actual = result[0]
df_expected = test_df.loc[
:, sorted(set(payload_columns) | {"x", "y", "z", "p", "q"})
]
pdt.assert_frame_equal(df_actual, df_expected)
def test_filter_select(driver, module_store, test_cube, test_df):
result = driver(
cube=test_cube,
store=module_store,
payload_columns=["v1", "v2"],
conditions=(C("i3") >= 3), # completely unrelated to the payload
)
assert len(result) == 1
df_actual = result[0]
df_expected = test_df.loc[
test_df["i3"] >= 3, ["p", "q", "v1", "v2", "x", "y", "z"]
].reset_index(drop=True)
pdt.assert_frame_equal(df_actual, df_expected)
@pytest.mark.parametrize(
"partition_by",
[["i1"], ["i2"], ["i3"], ["x"], ["y"], ["p"], ["q"], ["i1", "i2"], ["x", "y"]],
)
def test_partition_by(driver, module_store, test_cube, test_df, partition_by):
dfs_actual = driver(cube=test_cube, store=module_store, partition_by=partition_by)
dfs_expected = [
df_g.reset_index(drop=True)
for g, df_g in test_df.groupby(partition_by, sort=True)
]
for df_expected in dfs_expected:
for col in df_expected.columns:
if df_expected[col].dtype == float:
try:
df_expected[col] = df_expected[col].astype(int)
except Exception:
pass
assert len(dfs_actual) == len(dfs_expected)
for df_actual, df_expected in zip(dfs_actual, dfs_expected):
pdt.assert_frame_equal(df_actual, df_expected)
@pytest.mark.parametrize("dimension_columns", list(permutations(["x", "y", "z"])))
def test_sort(driver, module_store, test_cube, test_df, dimension_columns):
result = driver(
cube=test_cube, store=module_store, dimension_columns=dimension_columns
)
assert len(result) == 1
df_actual = result[0]
df_expected = test_df.sort_values(
list(dimension_columns) + list(test_cube.partition_columns)
).reset_index(drop=True)
pdt.assert_frame_equal(df_actual, df_expected)
@pytest.mark.parametrize("payload_columns", [["y", "z"], ["y", "z", "v3"]])
def test_projection(driver, module_store, test_cube, test_df, payload_columns):
result = driver(
cube=test_cube,
store=module_store,
dimension_columns=["y", "z"],
payload_columns=payload_columns,
)
assert len(result) == 1
df_actual = result[0]
df_expected = (
test_df.loc[:, sorted(set(payload_columns) | {"y", "z", "p", "q"})]
.drop_duplicates()
.sort_values(["y", "z", "p", "q"])
.reset_index(drop=True)
)
pdt.assert_frame_equal(df_actual, df_expected)
def test_stresstest_index_select_row(driver, function_store):
n_indices = 100
n_rows = 1000
data = {"x": np.arange(n_rows), "p": 0}
for i in range(n_indices):
data["i{}".format(i)] = np.arange(n_rows)
df = pd.DataFrame(data)
cube = Cube(
dimension_columns=["x"],
partition_columns=["p"],
uuid_prefix="cube",
index_columns=["i{}".format(i) for i in range(n_indices)],
)
build_cube(data=df, cube=cube, store=function_store)
conditions = Conjunction([(C("i{}".format(i)) == 0) for i in range(n_indices)])
result = driver(
cube=cube,
store=function_store,
conditions=conditions,
payload_columns=["p", "x"],
)
assert len(result) == 1
df_actual = result[0]
df_expected = df.loc[df["x"] == 0].reindex(columns=["p", "x"])
pdt.assert_frame_equal(df_actual, df_expected)
def test_fail_missing_dimension_columns(driver, module_store, test_cube, test_df):
with pytest.raises(ValueError) as exc:
driver(cube=test_cube, store=module_store, dimension_columns=["x", "a", "b"])
assert (
"Following dimension columns were requested but are missing from the cube: a, b"
in str(exc.value)
)
def test_fail_empty_dimension_columns(driver, module_store, test_cube, test_df):
with pytest.raises(ValueError) as exc:
driver(cube=test_cube, store=module_store, dimension_columns=[])
assert "Dimension columns cannot be empty." in str(exc.value)
def test_fail_missing_partition_by(driver, module_store, test_cube, test_df):
with pytest.raises(ValueError) as exc:
driver(cube=test_cube, store=module_store, partition_by=["foo"])
assert (
"Following partition-by columns were requested but are missing from the cube: foo"
in str(exc.value)
)
def test_fail_unindexed_partition_by(driver, module_store, test_cube, test_df):
with pytest.raises(ValueError) as exc:
driver(cube=test_cube, store=module_store, partition_by=["v1", "v2"])
assert (
"Following partition-by columns are not indexed and cannot be used: v1, v2"
in str(exc.value)
)
def test_fail_missing_condition_columns(driver, module_store, test_cube, test_df):
with pytest.raises(ValueError) as exc:
driver(
cube=test_cube,
store=module_store,
conditions=(C("foo") == 1) & (C("bar") == 2),
)
assert (
"Following condition columns are required but are missing from the cube: bar, foo"
in str(exc.value)
)
def test_fail_missing_payload_columns(driver, module_store, test_cube, test_df):
with pytest.raises(ValueError) as exc:
driver(cube=test_cube, store=module_store, payload_columns=["foo", "bar"])
assert "Cannot find the following requested payload columns: bar, foo" in str(
exc.value
)
def test_fail_projection(driver, module_store, test_cube, test_df):
with pytest.raises(ValueError) as exc:
driver(
cube=test_cube,
store=module_store,
dimension_columns=["y", "z"],
payload_columns=["v1"],
)
assert (
'Cannot project dataset "seed" with dimensionality [x, y, z] to [y, z] '
"while keeping the following payload intact: v1" in str(exc.value)
)
def test_fail_unstable_dimension_columns(driver, module_store, test_cube, test_df):
with pytest.raises(TypeError) as exc:
driver(cube=test_cube, store=module_store, dimension_columns={"x", "y"})
assert "which has type set has an unstable iteration order" in str(exc.value)
def test_fail_unstable_partition_by(driver, module_store, test_cube, test_df):
with pytest.raises(TypeError) as exc:
driver(cube=test_cube, store=module_store, partition_by={"x", "y"})
assert "which has type set has an unstable iteration order" in str(exc.value)
def test_wrong_condition_type(driver, function_store, driver_name):
types = {
"int": pd.Series([-1], dtype=np.int64),
"uint": pd.Series([1], dtype=np.uint64),
"float": pd.Series([1.3], dtype=np.float64),
"bool": pd.Series([True], dtype=np.bool_),
"str": pd.Series(["foo"], dtype=object),
}
cube = Cube(
dimension_columns=["d_{}".format(t) for t in sorted(types.keys())],
partition_columns=["p_{}".format(t) for t in sorted(types.keys())],
uuid_prefix="typed_cube",
index_columns=["i_{}".format(t) for t in sorted(types.keys())],
)
data = {
"seed": pd.DataFrame(
{
"{}_{}".format(prefix, t): types[t]
for t in sorted(types.keys())
for prefix in ["d", "p", "v1"]
}
),
"enrich": pd.DataFrame(
{
"{}_{}".format(prefix, t): types[t]
for t in sorted(types.keys())
for prefix in ["d", "p", "i", "v2"]
}
),
}
build_cube(data=data, store=function_store, cube=cube)
df = pd.DataFrame(
{
"{}_{}".format(prefix, t): types[t]
for t in sorted(types.keys())
for prefix in ["d", "p", "i", "v1", "v2"]
}
)
for col in df.columns:
t1 = col.split("_")[1]
for t2 in sorted(types.keys()):
cond = C(col) == types[t2].values[0]
if t1 == t2:
result = driver(cube=cube, store=function_store, conditions=cond)
assert len(result) == 1
df_actual = result[0]
df_expected = cond.filter_df(df).reset_index(drop=True)
pdt.assert_frame_equal(df_actual, df_expected, check_like=True)
else:
with pytest.raises(TypeError) as exc:
driver(cube=cube, store=function_store, conditions=cond)
assert "has wrong type" in str(exc.value)
def test_condition_on_null(driver, function_store):
df = pd.DataFrame(
{
"x": pd.Series([0, 1, 2], dtype=np.int64),
"p": pd.Series([0, 0, 1], dtype=np.int64),
"v_f1": pd.Series([0, np.nan, 2], dtype=np.float64),
"v_f2": pd.Series([0, 1, np.nan], dtype=np.float64),
"v_f3": pd.Series([np.nan, np.nan, np.nan], dtype=np.float64),
"v_s1": pd.Series(["a", None, "c"], dtype=object),
"v_s2": pd.Series(["a", "b", None], dtype=object),
"v_s3": pd.Series([None, None, None], dtype=object),
}
)
cube = Cube(
dimension_columns=["x"],
partition_columns=["p"],
uuid_prefix="nulled_cube",
index_columns=[],
)
build_cube(data=df, store=function_store, cube=cube)
for col in df.columns:
# only iterate over the value columns (not the dimension / partition column):
if not col.startswith("v"):
continue
# col_type will be either 'f' for float or 's' for string; see column
# names above
col_type = col.split("_")[1][0]
if col_type == "f":
value = 1.2
elif col_type == "s":
value = "foo"
else:
raise RuntimeError("unknown type")
cond = C(col) == value
df_expected = cond.filter_df(df).reset_index(drop=True)
result = driver(cube=cube, store=function_store, conditions=cond)
if df_expected.empty:
assert len(result) == 0
else:
assert len(result) == 1
df_actual = result[0]
pdt.assert_frame_equal(df_actual, df_expected, check_like=True)
def test_fail_no_store_factory(driver, module_store, test_cube, skip_eager):
store = module_store()
with pytest.raises(TypeError) as exc:
driver(cube=test_cube, store=store, no_run=True)
assert str(exc.value) == "store must be a factory but is HFilesystemStore"
def test_delayed_index_build_partition_by(driver, function_store):
df_seed = pd.DataFrame({"x": [0, 1, 2, 3], "p": [0, 0, 1, 1]})
df_extend = pd.DataFrame({"x": [0, 1, 2, 3], "p": [0, 0, 1, 1], "v": [0, 0, 0, 1]})
cube = Cube(
dimension_columns=["x"],
partition_columns=["p"],
uuid_prefix="delayed_index_cube",
index_columns=[],
)
build_cube(
data={"seed": df_seed, "extend": df_extend}, store=function_store, cube=cube
)
build_dataset_indices(
store=function_store,
dataset_uuid=cube.ktk_dataset_uuid("extend"),
columns=["v"],
)
results = driver(cube=cube, store=function_store, partition_by=["v"])
assert len(results) == 2
df_result1 = pd.DataFrame(
data={"x": [0, 1, 2], "p": [0, 0, 1], "v": [0, 0, 0]}, columns=["p", "v", "x"]
)
df_result2 = pd.DataFrame(
data={"x": [3], "p": [1], "v": [1]}, columns=["p", "v", "x"]
)
pdt.assert_frame_equal(results[0], df_result1)
pdt.assert_frame_equal(results[1], df_result2)
def test_fail_blocksize_wrong_type(
driver, module_store, test_cube, skip_eager, driver_name
):
if driver_name == "dask_dataframe":
pytest.skip("not relevant for dask.dataframe")
with pytest.raises(TypeError, match="blocksize must be an integer but is str"):
driver(cube=test_cube, store=module_store, blocksize="foo")
def test_fail_blocksize_negative(
driver, module_store, test_cube, skip_eager, driver_name
):
if driver_name == "dask_dataframe":
pytest.skip("not relevant for dask.dataframe")
with pytest.raises(ValueError, match="blocksize must be > 0 but is -1"):
driver(cube=test_cube, store=module_store, blocksize=-1)
def test_fail_blocksize_zero(driver, module_store, test_cube, skip_eager, driver_name):
if driver_name == "dask_dataframe":
pytest.skip("not relevant for dask.dataframe")
with pytest.raises(ValueError, match="blocksize must be > 0 but is 0"):
driver(cube=test_cube, store=module_store, blocksize=0)
def test_delayed_index_build_correction_restriction(driver, function_store):
"""
Ensure that adding extra indices for dimension columns does not mark other datasets as restrictive.
"""
df_seed = pd.DataFrame({"x": [0, 1, 2, 3, 4, 5], "p": [0, 0, 1, 1, 2, 2]})
df_extend = pd.DataFrame({"x": [0, 1, 2], "p": [0, 0, 1], "v": [0, 1, 2]})
cube = Cube(
dimension_columns=["x"],
partition_columns=["p"],
uuid_prefix="delayed_index_cube",
index_columns=[],
)
build_cube(
data={"seed": df_seed, "extend": df_extend}, store=function_store, cube=cube
)
build_dataset_indices(
store=function_store,
dataset_uuid=cube.ktk_dataset_uuid("extend"),
columns=["x"],
)
results = driver(cube=cube, store=function_store, conditions=C("x") >= 0)
assert len(results) == 1
df_actual = results[0]
df_expected = pd.DataFrame(
{
"x": [0, 1, 2, 3, 4, 5],
"p": [0, 0, 1, 1, 2, 2],
"v": [0, 1, 2, np.nan, np.nan, np.nan],
},
columns=["p", "v", "x"],
)
pdt.assert_frame_equal(df_actual, df_expected)
time_travel_stages_ops_df = [
(
partial(
build_cube,
data={
"source": pd.DataFrame(
{
"x": [0, 1, 2, 3, 4, 5],
"p": [0, 0, 1, 1, 2, 2],
"v1": [0, 1, 2, 3, 4, 5],
"i1": [0, 1, 2, 3, 4, 5],
}
),
"enrich": pd.DataFrame(
{
"x": [0, 1, 2, 3, 4, 5],
"p": [0, 0, 1, 1, 2, 2],
"v2": [0, 1, 2, 3, 4, 5],
"i2": [0, 1, 2, 3, 4, 5],
}
),
},
),
pd.DataFrame(
data={
"x": [0, 1, 2, 3, 4, 5],
"p": [0, 0, 1, 1, 2, 2],
"v1": [0, 1, 2, 3, 4, 5],
"i1": [0, 1, 2, 3, 4, 5],
"v2": [0, 1, 2, 3, 4, 5],
"i2": [0, 1, 2, 3, 4, 5],
},
columns=["i1", "i2", "p", "v1", "v2", "x"],
),
),
(
partial(
remove_partitions, ktk_cube_dataset_ids=["enrich"], conditions=C("p") > 0
),
pd.DataFrame(
data={
"x": [0, 1, 2, 3, 4, 5],
"p": [0, 0, 1, 1, 2, 2],
"v1": [0, 1, 2, 3, 4, 5],
"i1": [0, 1, 2, 3, 4, 5],
"v2": [0, 1, np.nan, np.nan, np.nan, np.nan],
"i2": [0, 1, np.nan, np.nan, np.nan, np.nan],
},
columns=["i1", "i2", "p", "v1", "v2", "x"],
),
),
(
partial(
append_to_cube,
data={"enrich": pd.DataFrame({"x": [2], "p": [1], "v2": [20], "i2": [20]})},
),
pd.DataFrame(
data={
"x": [0, 1, 2, 3, 4, 5],
"p": [0, 0, 1, 1, 2, 2],
"v1": [0, 1, 2, 3, 4, 5],
"i1": [0, 1, 2, 3, 4, 5],
"v2": [0, 1, 20, np.nan, np.nan, np.nan],
"i2": [0, 1, 20, np.nan, np.nan, np.nan],
},
columns=["i1", "i2", "p", "v1", "v2", "x"],
),
),
(
partial(
append_to_cube,
data={
"source": pd.DataFrame(
{
"x": [4, 5, 6, 7],
"p": [2, 2, 3, 3],
"v1": [40, 50, 60, 70],
"i1": [40, 50, 60, 70],
}
)
},
),
pd.DataFrame(
data={
"x": [0, 1, 2, 3, 4, 5, 6, 7],
"p": [0, 0, 1, 1, 2, 2, 3, 3],
"v1": [0, 1, 2, 3, 40, 50, 60, 70],
"i1": [0, 1, 2, 3, 40, 50, 60, 70],
"v2": [0, 1, 20, np.nan, np.nan, np.nan, np.nan, np.nan],
"i2": [0, 1, 20, np.nan, np.nan, np.nan, np.nan, np.nan],
},
columns=["i1", "i2", "p", "v1", "v2", "x"],
),
),
]
def test_overlay_tricky(driver, function_store):
cube = Cube(
dimension_columns=["x", "y"],
partition_columns=["p", "q"],
uuid_prefix="time_travel_cube_tricky",
seed_dataset="source",
)
build_cube(
data={
cube.seed_dataset: pd.DataFrame(
{
"x": [0, 1, 0, 1, 0, 1, 0, 1, 2, 3, 2, 3, 2, 3, 2, 3],
"y": [0, 0, 1, 1, 2, 2, 3, 3, 0, 0, 1, 1, 2, 2, 3, 3],
"p": [0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1],
"q": [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1],
"v1": 1,
}
),
"no_part": pd.DataFrame(
{
"x": [0, 1, 0, 1, 0, 1, 0, 1, 2, 3, 2, 3, 2, 3, 2, 3],
"y": [0, 0, 1, 1, 2, 2, 3, 3, 0, 0, 1, 1, 2, 2, 3, 3],
"v2": 1,
}
),
"q": pd.DataFrame(
{
"x": [0, 1, 0, 1, 0, 1, 0, 1, 2, 3, 2, 3, 2, 3, 2, 3],
"y": [0, 0, 1, 1, 2, 2, 3, 3, 0, 0, 1, 1, 2, 2, 3, 3],
"q": [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1],
"v3": 1,
}
),
"a": pd.DataFrame(
{
"x": [0, 1, 0, 1, 0, 1, 0, 1, 2, 3, 2, 3, 2, 3, 2, 3],
"y": [0, 0, 1, 1, 2, 2, 3, 3, 0, 0, 1, 1, 2, 2, 3, 3],
"a": [0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1],
"v4": 1,
}
),
},
cube=cube,
store=function_store,
partition_on={"no_part": [], "q": ["q"], "a": ["a"]},
)
append_to_cube(
data={
cube.seed_dataset: pd.DataFrame(
{
"x": [0, 1, 0, 1, 2, 3, 2, 3],
"y": [2, 2, 3, 3, 0, 0, 1, 1],
"p": [1, 1, 1, 1, 0, 0, 0, 0],
"q": [0, 0, 0, 0, 1, 1, 1, 1],
"v1": 2,
}
),
"no_part": pd.DataFrame(
{
"x": [0, 1, 0, 1, 0, 1, 0, 1, 2, 3, 2, 3, 2, 3, 2, 3],
"y": [0, 0, 1, 1, 2, 2, 3, 3, 0, 0, 1, 1, 2, 2, 3, 3],
"v2": 2,
}
),
"q": pd.DataFrame(
{
"x": [0, 1, 0, 1, 0, 1, 0, 1],
"y": [0, 0, 1, 1, 2, 2, 3, 3],
"q": [0, 0, 0, 0, 0, 0, 0, 0],
"v3": 2,
}
),
"a": pd.DataFrame(
{
"x": [1, 0, 1, 2, 3, 2, 3, 3],
"y": [0, 2, 2, 1, 1, 2, 2, 3],
"a": [1, 1, 1, 1, 1, 1, 1, 1],
"v4": 2,
}
),
},
cube=cube,
store=function_store,
)
df_expected = (
pd.DataFrame(
data={
"x": [0, 1, 0, 1, 0, 1, 0, 1, 2, 3, 2, 3, 2, 3, 2, 3],
"y": [0, 0, 1, 1, 2, 2, 3, 3, 0, 0, 1, 1, 2, 2, 3, 3],
"p": [0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1],
"q": [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1],
"v1": [1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1],
"v2": [2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2],
"v3": [2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1],
"v4": [1, 2, 1, 1, 2, 2, 1, 1, 1, 1, 2, 2, 2, 2, 1, 2],
"a": [0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1],
},
columns=["a", "p", "q", "v1", "v2", "v3", "v4", "x", "y"],
)
.sort_values(["x", "y", "p", "q"])
.reset_index(drop=True)
)
result = driver(cube=cube, store=function_store)
assert len(result) == 1
df_actual = result[0]
pdt.assert_frame_equal(df_actual, df_expected)
cond_types_simple = [
EqualityCondition,
LessEqualCondition,
LessThanCondition,
GreaterEqualCondition,
GreaterThanCondition,
InequalityCondition,
]
cond_types_all = cond_types_simple + [IsInCondition, InIntervalCondition] # type:ignore
def _tuple_to_condition(t):
col, cond_type, v1, v2, vset = t
if issubclass(cond_type, tuple(cond_types_simple)):
return cond_type(col, v1)
elif cond_type == IsInCondition:
return cond_type(col, vset)
elif cond_type == InIntervalCondition:
return cond_type(col, v1, v2)
raise ValueError("Unknown condition type {}".format(cond_type))
st_columns = st.sampled_from(
["x", "y", "z", "p", "q", "i1", "i2", "i3", "v1", "v2", "v3"]
)
st_values = st.integers(min_value=-1, max_value=17)
st_cond_types = st.sampled_from(cond_types_all)
st_conditions = st.tuples(
st_columns, st_cond_types, st_values, st_values, st.sets(st_values)
).map(_tuple_to_condition)
@given(
conditions=st.lists(st_conditions).map(Conjunction),
dimension_columns=st.permutations(["x", "y", "z"]),
payload_columns=st.sets(st_columns),
)
@settings(deadline=timedelta(seconds=5))
def test_hypothesis(
driver,
driver_name,
module_store,
test_cube,
test_df,
dimension_columns,
payload_columns,
conditions,
):
if driver_name != "eager":
pytest.skip("only eager is fast enough")
result = driver(
cube=test_cube,
store=module_store,
dimension_columns=dimension_columns,
payload_columns=payload_columns,
conditions=conditions,
)
df_expected = (
apply_condition_unsafe(test_df, conditions)
.sort_values(dimension_columns + list(test_cube.partition_columns))
.loc[:, sorted({"x", "y", "z", "p", "q"} | payload_columns)]
.reset_index(drop=True)
)
if df_expected.empty:
assert len(result) == 0
else:
assert len(result) == 1
df_actual = result[0]
|
pdt.assert_frame_equal(df_actual, df_expected)
|
pandas.testing.assert_frame_equal
|
import numpy as np
import collections
import SimpleITK as sitk
from scipy.ndimage.interpolation import zoom
import os,sys
import pandas as pd
from keras.preprocessing import image
## Set GPU
#os.environ["CUDA_VISIBLE_DEVICES"]="0"
# Load model
from keras.applications.resnet50 import ResNet50
from keras.applications.imagenet_utils import preprocess_input, decode_predictions
from keras.models import Model
#fc
base_model = ResNet50(weights='imagenet', include_top=True)
from keras.models import Model
model = Model(inputs=base_model.input, outputs=base_model.layers[-1].output)
# Load batch file
imgDir = '../example/data'
dirlist = os.listdir(imgDir)[1:]
print(dirlist)
# read images in Nifti format
def loadSegArraywithID(fold,iden):
path = fold
pathList = os.listdir(path)
segPath = [os.path.join(path,i) for i in pathList if ('seg' in i.lower()) & (iden in i.lower())][0]
seg = sitk.ReadImage(segPath)
return seg
# read regions of interest (ROI) in Nifti format
def loadImgArraywithID(fold,iden):
path = fold
pathList = os.listdir(path)
imgPath = [os.path.join(path,i) for i in pathList if ('im' in i.lower()) & (iden in i.lower())][0]
img = sitk.ReadImage(imgPath)
return img
# Feature Extraction
#Cropping box
def maskcroppingbox(images_array, use2D=False):
images_array_2 = np.argwhere(images_array)
(zstart, ystart, xstart), (zstop, ystop, xstop) = images_array_2.min(axis=0), images_array_2.max(axis=0) + 1
return (zstart, ystart, xstart), (zstop, ystop, xstop)
def featureextraction(imageFilepath,maskFilepath):
image_array = sitk.GetArrayFromImage(imageFilepath)
mask_array = sitk.GetArrayFromImage(maskFilepath)
(zstart, ystart, xstart), (zstop, ystop, xstop) = maskcroppingbox(mask_array, use2D=False)
roi_images = image_array[zstart-1:zstop+1,ystart:ystop,xstart:xstop].transpose((2,1,0))
roi_images1 = zoom(roi_images, zoom=[224/roi_images.shape[0], 224/roi_images.shape[1],1], order=3)
roi_images2 = np.array(roi_images1,dtype=np.float)
x = image.img_to_array(roi_images2)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
base_model_pool_features = model.predict(x)
features = base_model_pool_features[0]
deeplearningfeatures = collections.OrderedDict()
for ind_,f_ in enumerate(features):
deeplearningfeatures[str(ind_)] = f_
return deeplearningfeatures
featureDict = {}
for ind in range(len(dirlist)):
path = os.path.join(imgDir,dirlist[ind])
seg = loadSegArraywithID(path,'seg')
im = loadImgArraywithID(path,'im')
deeplearningfeatures = featureextraction(im,seg)
result = deeplearningfeatures
key = list(result.keys())
key = key[0:]
feature = []
for jind in range(len(key)):
feature.append(result[key[jind]])
featureDict[dirlist[ind]] = feature
dictkey = key
print(dirlist[ind])
dataframe =
|
pd.DataFrame.from_dict(featureDict, orient='index', columns=dictkey)
|
pandas.DataFrame.from_dict
|
from datetime import datetime
import itertools
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from utils.matrix_convert import MatrixConversion
from calculations.AllMetrics import Metrics
from utils.constants import TYPES
from utils.helpers import remove_offset_from_julian_date
from params import summer_params
from params import fall_params
from params import spring_params
from params import winter_params
def upload_files(start_date, files, flow_class):
output_files = 'user_output_files'
for file in files:
file_name = output_files + '/' + file.split('/')[1].split('.csv')[0]
dataset = read_csv_to_arrays(file)
matrix = MatrixConversion(
dataset['date'], dataset['flow'], start_date)
julian_start_date = datetime.strptime(
"{}/2001".format(start_date), "%m/%d/%Y").timetuple().tm_yday
result = get_result(matrix, julian_start_date, int(flow_class))
write_to_csv(file_name, result, 'annual_flow_matrix')
write_to_csv(file_name, result, 'drh')
write_to_csv(file_name, result, 'annual_flow_result')
write_to_csv(file_name, result, 'parameters', flow_class)
# draw_plots(file_name, result)
return True
def get_result(matrix, julian_start_date, flow_class):
result = {}
result["year_ranges"] = [int(i) + 1 for i in matrix.year_array]
result["flow_matrix"] = np.where(
|
pd.isnull(matrix.flow_matrix)
|
pandas.isnull
|
import numpy as np
import pandas as pd
dataset = pd.read_csv('Churn_Modelling.csv')
X = dataset.iloc[:, 3:13].values
y= dataset.iloc[:, 13].values
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
labelEncoder_X_1 = LabelEncoder()
X[:, 1] = labelEncoder_X_1.fit_transform(X[:, 1])
labelEncoder_X_2 = LabelEncoder()
X[:, 2] = labelEncoder_X_2.fit_transform(X[:, 2])
df_X=
|
pd.DataFrame(X)
|
pandas.DataFrame
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 9 14:13:23 2021
@author: willrichardson
"""
# This script calculates other relevant stats and quantities of interest for each half-hour period
# includes 30-minute quantites and spectral quantites in each period
# leaving RMSDs as local variables; pass them into this script
#%% import libraries
import sys
sys.path.insert(0, '/Users/willrichardson/opt/anaconda3/lib/python3.8/site-packages')
import os
import glob
import pandas as pd
import numpy as np
from funcs import sort_ByDate_DMY, read_conc
from funcs import anc_unit_conv, Fco2_name, sigma_m
#%% get current working directory, relevant environment variables
args = sys.argv
RMSDq_slope = np.float64(args[1])
RMSDc_slope = np.float64(args[2])
base_path = os.getcwd()
site_yr = os.environ['site_yr']
run_ID = os.environ['run_ID']
fn_LB = os.environ['LB']
fn_UB = os.environ['UB']
DT = np.float64(os.environ['DT'])
nj = int(np.rint(np.log2((30*60)/DT)))
zm = np.float64(os.environ['Zm'])
N_wvlt = 2**nj
# make output file name
output_fname = base_path + '/ref_data/Output/%s/%s/%s_Interm_RefDf_allHH_PostProcStats.csv'%(site_yr, run_ID, site_yr)
#%% load reference dfs, outputs from partitioning program, other ancillary info
df_ref = pd.read_csv(base_path + '/ref_data/Output/%s/%s_Interm_RefDf_allHH.csv' %(site_yr, site_yr),
index_col='Timestamp', infer_datetime_format=True, parse_dates=True)
RLM_df = pd.read_csv(base_path + '/ref_data/Output/%s/%s_RLMfitRMSDs_fnLB_%s_fnUB_%s.csv' %(site_yr, site_yr, fn_LB, fn_UB),
index_col=0, infer_datetime_format=True, parse_dates=True)
zc_df = pd.read_csv(os.environ['zc_file'], sep='\t', index_col=0, names=['Timestamp', 'zc'], parse_dates=True, infer_datetime_format=True)
# partitioning program outputs
## load and sort partitioned fluxes and random error estimates
flux = glob.glob(base_path + '/flux/%s/flux*.txt'%site_yr)
rerr = glob.glob(base_path + '/flux/%s/rerror*.txt'%site_yr)
flux = sort_ByDate_DMY(flux); rerr = sort_ByDate_DMY(rerr)
raw_files, part_df = read_conc(flux, rerr)
## partitioning program yields fluxes in mmol m-2 s-1; convert to umol m-2 s-1
part_df = part_df*1000
#%% join into a unified dataframe (use 'inner' join so as to only keep rows where raw data made it all the way to the partitioning stage)
df_master = df_ref.join([part_df, RLM_df], how='inner')
#%% 30 minute stats
# add raw filenames as a column in the data frame
df_master['raw_file'] = raw_files
# fraction of total CH4 flux that is ebullition
df_master['frac_eb_q'] = df_master['CH4_eb_q']/df_master['CH4_tot']
df_master['frac_eb_c'] = df_master['CH4_eb_c']/df_master['CH4_tot']
df_master['frac_eb_T'] = df_master['CH4_eb_T']/df_master['CH4_tot']
# diffusive fluxes
df_master['CH4_diff_q'] = df_master['CH4_tot'] - df_master['CH4_eb_q']
df_master['CH4_diff_c'] = df_master['CH4_tot'] - df_master['CH4_eb_c']
df_master['CH4_diff_T'] = df_master['CH4_tot'] - df_master['CH4_eb_T']
# Some unit sonversions if desired
if anc_unit_conv == True:
from funcs import air_temp_K, T_dew, P_atm, VP, VP_sat, VPD_name
df_master['T_air_C'] = df_master[air_temp_K] - 273.15; df_master[T_dew] = df_master[T_dew] - 273.15 #[K to C]
df_master[VP] = df_master[VP]/1000; df_master[VP_sat] = df_master[VP_sat]/1000 #[Pa to kPa]
df_master[VPD_name] = df_master[VPD_name]/1000; df_master['P'] = df_master[P_atm]/1000 # [Pa to kPa]
df_master.drop([P_atm, air_temp_K], axis=1, inplace=True)
# co2 flux magnitude (for reference scalar thresholding)
df_master['co2_flux_mag'] = np.absolute(df_master[Fco2_name])
# normalized random error stats
df_master['Ebq_rerr_FebNorm'] = df_master['CH4_ebq_err']/df_master['CH4_eb_q']
df_master['Ebq_rerr_FtotNorm'] = df_master['CH4_ebq_err']/df_master['CH4_tot']
df_master['Diffq_rerr_FdiffNorm'] = df_master['CH4_diffq_err']/df_master['CH4_diff_q']
df_master['Diffq_rerr_FtotNorm'] = df_master['CH4_diffq_err']/df_master['CH4_tot']
df_master['Ebc_rerr_FebNorm'] = df_master['CH4_ebc_err']/df_master['CH4_eb_c']
df_master['Ebc_rerr_FtotNorm'] = df_master['CH4_ebc_err']/df_master['CH4_tot']
df_master['Diffc_rerr_FdiffNorm'] = df_master['CH4_diffc_err']/df_master['CH4_diff_c']
df_master['Diffc_rerr_FtotNorm'] = df_master['CH4_diffc_err']/df_master['CH4_tot']
#%% function for spectral stats on each period
def SpectralStats(tstamp):
# convert timestamp to format of file naming convention
day = tstamp.strftime('%Y%m%d'); datetime = tstamp.strftime('%Y%m%d_%H%M')
# load data; first row is coarse-grained mean, skip it
wvlt_df = pd.read_table(base_path + '/wvlet/%s/'%site_yr + day + '/' + 'wvlet-' + datetime + '.dat', sep='\s+',
names=['Index_wvlt', 'u', 'w', 'T', 'q', 'c', 'm'], delim_whitespace=False,
skiprows=1)
# get canopy height
zc = np.float64(zc_df.loc[tstamp.floor('D')])
# calculate displacement height as 0.66*canopy height
d = 0.67*zc #[m]
# calculate frequency for filtering
u = np.mean(wvlt_df['u'])
wvlt_df['j'] = (N_wvlt/wvlt_df['Index_wvlt'])*0.05
wvlt_df['fn'] = (zm - d)/(wvlt_df['j']*u)
# filter out low frequency components for partitioning
wvlt_df_filt = wvlt_df[wvlt_df['fn'] > np.float64(fn_LB)]
# add ebullition flags so that stats on diffusive/ebullitive fluxes can be calculated
wvlt_df_filt.loc[:, 'thresh_q_upp'] = df_master.loc[tstamp, 'slope_mq']*wvlt_df_filt.loc[:, 'q'] + (3*(RMSDq_slope*df_master.loc[tstamp, sigma_m]))
wvlt_df_filt.loc[:, 'thresh_q_low'] = df_master.loc[tstamp, 'slope_mq']*wvlt_df_filt.loc[:, 'q'] - (3*(RMSDq_slope*df_master.loc[tstamp, sigma_m]))
wvlt_df_filt.loc[:, 'thresh_c_upp'] = df_master.loc[tstamp, 'slope_mc']*wvlt_df_filt.loc[:, 'c'] + (3*(RMSDc_slope*df_master.loc[tstamp, sigma_m]))
wvlt_df_filt.loc[:, 'thresh_c_low'] = df_master.loc[tstamp, 'slope_mc']*wvlt_df_filt.loc[:, 'c'] - (3*(RMSDc_slope*df_master.loc[tstamp, sigma_m]))
wvlt_df_filt.loc[:, 'Ebq'] = (wvlt_df_filt.loc[:, 'm'] > wvlt_df_filt.loc[:, 'thresh_q_upp']) | (wvlt_df_filt.loc[:, 'm'] < wvlt_df_filt.loc[:, 'thresh_q_low'])
wvlt_df_filt.loc[:, 'Ebc'] = (wvlt_df_filt.loc[:, 'm'] > wvlt_df_filt.loc[:, 'thresh_c_upp']) | (wvlt_df_filt.loc[:, 'm'] < wvlt_df_filt.loc[:, 'thresh_c_low'])
# do spectral stats
## result objects
### master output for funtion
out = pd.DataFrame(index=[tstamp])
### frames for scale-wise stats
#### frequency values
fn_res = pd.DataFrame(index=[tstamp])
#### variances
mvar_res = pd.DataFrame(index=[tstamp]); qvar_res = pd.DataFrame(index=[tstamp])
cvar_res = pd.DataFrame(index=[tstamp]); Tvar_res = pd.DataFrame(index=[tstamp]); wvar_res = pd.DataFrame(index=[tstamp])
#### ebullitive covariances
Ebq_cov_res = pd.DataFrame(index=[tstamp]); Ebc_cov_res =
|
pd.DataFrame(index=[tstamp])
|
pandas.DataFrame
|
import pandas as pd
import numpy as np
import os
import abc
import pathlib
from redis import StrictRedis
from utils import LogMixin, timeit, dftocoo, Redis
S3BASEPATH = 's3://pyrecs-io/input-data'
LOCALBASEPATH = '/'.join((os.path.dirname(os.path.abspath(__file__)), 'data'))
class Dataset(object):
__metaclass__ = abc.ABCMeta
@abc.abstractclassmethod
def load(self):
""" load data """
return
@abc.abstractmethod
def toredis(self):
""" write records to redis """
return
class MovieMetaData(Dataset, LogMixin):
FILES = ['movies.csv']
def __init__(self, **kwargs):
super().__init__()
for key, value in kwargs.items():
setattr(self, key, value)
@classmethod
def todisk(cls, data, path):
path = pathlib.Path(path)
path.parent.mkdir(parents=True, exist_ok=True)
return data.to_csv(path, index=False)
@classmethod
def __cleanup(cls, data):
# extract the years
years = data.title.str.strip().str.slice(-5, -1)
data['year'] =
|
pd.to_numeric(years, errors='coerce')
|
pandas.to_numeric
|
import os
import time
import csv
import argparse
import shutil
import pandas as pd
from pynytimes import NYTAPI
import datetime
from tenacity import retry, stop_after_attempt, wait_fixed
API_KEY = '<INSERT YOUR KEY HERE>'
MAX_RANK = 10
QUERIES = {
'Tesla': 'Tesla Motors Inc',
'Ford': 'Ford Motor Co',
'General Motors': 'General Motors'
}
INCLUDE_TAGS = [
'Automobiles',
'Electric and Hybrid Vehicles',
'Driverless and Semiautonomous Vehicles'
]
def filter_by_keyword(a, keywords, max_rank=9999):
akw = [kw['value'] for kw in a['keywords']]
for i, kw in enumerate(akw):
if kw in keywords and i <= max_rank:
return True
return False
@retry(stop=stop_after_attempt(5), wait=wait_fixed(5))
def get_relevant_headlines(nyt, query, start, end, max_results=200, keywords=None):
# Get datetime objects
s, e = to_datetime(start), to_datetime(end)
# Query articles
articles = nyt.article_search(
query=query,
results=max_results,
dates={
"begin": s,
"end": e
},
options={
"sort": "oldest",
}
)
# Filter
if keywords is not None:
articles = [a for a in articles if filter_by_keyword(a, keywords, max_rank=MAX_RANK)]
# Prune
if not len(articles) > 0:
return pd.DataFrame()
headlines = []
for a in articles:
failed = False
d = {}
try:
d['publication'] = a['source']
except:
d['publication'] = 'The New York Times'
try:
d['abstract'] = a['abstract']
except:
d['abstract'] = ''
try:
d['section'] = a['section_name']
except:
d['section'] = 'Unknown'
try:
d['title'] = a['headline']['main']
d['date'] = a['pub_date']
d['matches'] = query
except:
failed = True
for i in range(MAX_RANK):
try:
d[f'tag{i}'] = a['keywords'][i]['value']
except:
d[f'tag{i}'] = ''
if not failed:
headlines.append(d)
return pd.DataFrame(headlines)
def to_datetime(dt):
if isinstance(dt, pd.Timestamp):
return dt.to_pydatetime()
if isinstance(dt, str):
return datetime.datetime.strptime(dt, '%Y-%m-%d') # Must be in this format
if isinstance(dt, datetime.date):
return datetime.datetime(dt.year, dt.month, dt.day)
if isinstance(dt, datetime.datetime):
return dt
raise NotImplementedError(f'Cannot convert object of type {str(type(dt))}')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-s', "--start_date", help="Start date in ISO format.", type=str, required=False, default='2011-01-01')
parser.add_argument('-e', "--end_date", help="End date in ISO format.", type=str, required=False, default='2021-12-01')
parser.add_argument('-o', "--output_file", help="Location of the output file.", type=str, required=False, default='ny_times.csv')
args = parser.parse_args()
# Create the session
session = NYTAPI(API_KEY, parse_dates=True)
# Directory keeping temporary files
os.makedirs('./tmp/nytimes_chunks', exist_ok=True)
# Split the requests in months to avoid breaking the API
months =
|
pd.date_range(args.start_date, args.end_date, freq='MS')
|
pandas.date_range
|
try:
import camelot
except ModuleNotFoundError:
raise ModuleNotFoundError('Модуль camelot не найден! Проверьте установку и попробуйте еще раз.')
else:
try:
from camelot import read_pdf
except ImportError:
raise ImportError(f'Ошибка импорта функции из camelot-py. Возможно вы установили "camelot"?')
import datetime
import requests
import os
import pandas
import re
import warnings
import sys
warnings.filterwarnings("ignore") # Для более красивого вывода
_axis_days = { # Координаты дней, если бы листы PDF были слитными, как таблица
'1': [4, 11],
'2': [14, 21],
'3': [24, 31],
'4': [34, 41],
'5': [44, 51]
}
def download() -> None:
"""Скачивает PDF файл с таблицами в директорию temp относительно выполнения данной функции."""
print('INFO: Начало скачивания файла')
URL = "http://next.krstc.ru:8081/index.php/s/C2FEBCzoT8xECei/download?path=%2F&files=%D0%A1%D0%90%2C%20%D0%98%D0" \
"%A1%2C%D0%9E.pdf" # Постоянная ссылка на скачивание pdf расписания
try:
r = requests.get(URL)
except Exception as e:
raise Exception(f"Невозможно установить соединение! Причина: {e}")
print('INFO: начало записи файла')
with open('temp/temp.pdf', mode="wb") as file:
file.write(r.content)
file.close()
print('INFO: файл успешно записан')
def file_is_exist() -> bool:
"""Проверяет существования CVS таблиц соответствующие текущей дате (модуль datetime)"""
today = datetime.datetime.now().date()
try:
for page in range(1, 4): # Так как в основном таблиц 3, то идет их проверка.
name = str(today) + f'-page-{page}-table-1.csv'
with open(f'temp/{name}', "r") as f:
print(f'INFO: Файл {name} существует.')
f.close()
return True
except FileNotFoundError as e:
print(f'WARNING: Файла не обнаружено! Текст ошибки: {e}')
return False
def _delete_the_files() -> None:
"""Удаляет вчерашние CVS таблицы"""
print('INFO: проверка существования вчерашнего файла для удаления')
yesterday = datetime.datetime.now().date() - datetime.timedelta(days=1)
for page in range(1, 4): # Так как в основном таблиц 3, то идет их проверка.
name = str(yesterday) + f'-page-{page}-table-1.csv'
if os.path.exists(f'temp/{name}'):
print(f'INFO: файл temp/{name} существует, удаление...')
os.remove(f'temp/{name}')
print(f'INFO: файл temp/{name} успешно удален!')
def convert_to_csv() -> None:
"""Обрабатывает PDF файл с помощью camelot-py и переводит в CVS. Данные таблицы нежелательно использовать сразу,
так как их корректность не идеальна. Чтобы скорректировать данные используйте функцию extract_data."""
print('INFO: Начата обработка PDF...')
pdf = camelot.read_pdf('temp/temp.pdf', pages='all')
print('INFO: Обработка PDF закончена!')
print("INFO: Импортирование обработанного PDF...")
name = str(datetime.datetime.now().date()) + '.csv'
pdf.export(path=f'temp/{name}', f='csv')
os.remove('temp/temp.pdf')
print("INFO: Импортировано!")
def import_csv() -> list[pandas.DataFrame]:
"""Импортирует CSV файлы с таблицами в pandas. Возвращает список с таблицами в виде Pandas.DataFrame"""
today = datetime.datetime.now().date()
tables = []
if file_is_exist():
for page in range(1, 4):
name = str(today) + f'-page-{page}-table-1.csv'
table = pandas.read_csv(f'temp/{name}', names=list(range(0, 36))) # Параметр names используется
# для названий колонок, так как pandas по умолчанию использует первую строчку cvs
tables.append(table) # Создание списка с таблицами для дальнейших манипуляций
elif not file_is_exist():
_delete_the_files()
try:
download()
except Exception as e:
sys.exit(f"ERROR: {e}")
convert_to_csv()
tables = import_csv() # Рекурсивность используется для того, чтобы после скачивания недостающих данных
# импортировать их в pandas.DataFrame
return tables
def correct_axis(tables: list, x: int, y: int = None) -> (list[list[int]], bool):
"""Функция корректирует порядковый номер ячеек с учетом разделений листов PDF и отдает список с списками номеров,
а также bool значение показывающее на разных ли таблицах ячейки"""
lens = [len(tables[0]), len(tables[1]), len(tables[2])]
if y is None:
is_split = False
for no_table, len0 in enumerate(lens):
if x < len0:
corrected_axis = [[no_table, x]]
elif x > len0:
x -= lens[no_table]
else:
for no_table, len0 in enumerate(lens):
if x < y < len0:
corrected_axis = [[no_table, x, y]]
is_split = False
break
elif x < len0 <= y:
corrected_axis = [[no_table, x, len0], [no_table + 1, 0, y - len0]]
if corrected_axis[1][0] >= len(lens):
print('WARN: Исправленные координаты выходят за пределы таблиц')
corrected_axis.remove(corrected_axis[1])
is_split = False
else:
is_split = True
break
x -= lens[no_table]
y -= lens[no_table]
if is_split not in locals() and not corrected_axis:
raise Exception('ERROR: Ошибка! Исправление осей не выдало ответ!')
return corrected_axis, is_split
def get_index_groups(tables: list[pandas.DataFrame]) -> int:
"""Спрашивает у пользователя группу из которой нужно забирать данные. Возвращает индекс колонки группы."""
table = tables[0]
raw_groups = table.loc[1, :]
groups = raw_groups.dropna()
groups.index = list(range(1, len(groups) + 1))
print('Выберите группу из предложенных: ')
for score, i in enumerate(groups):
print(f'{score + 1}: {i}')
no_group = int(input('(введите порядковый номер группы) >> '))
name_of_group = groups[no_group]
index = raw_groups[raw_groups == name_of_group].index.to_list()[0]
return index
def get_time(tables: list[pandas.DataFrame], day: str) -> pandas.Series:
"""Парсит время из таблицы, так как оно имеет свойство меняться. Возвращает pandas.Series с временем."""
x1, x2 = _axis_days[day][0], _axis_days[day][1]
axis, splited = correct_axis(tables=tables, x=x1, y=x2)
if splited:
time1 = tables[axis[0][0]][2][axis[0][1]:axis[0][2]]
time2 = tables[axis[1][0]][2][axis[0][1]:axis[1][2]]
time = pandas.concat([time1, time2], ignore_index=True)
elif not splited:
time = tables[axis[0][0]][2][axis[0][1]:axis[0][2]]
time.index = list(range(1, len(time) + 1))
else:
raise Exception('ERROR: Критическая ошибка при извлечении даты')
return time
def correct_a_table(table: pandas.DataFrame, is_distant: bool) -> None:
"""Корректирует таблицу с учетом того, что данные в основном 'съезжают' в правую колонку (кабинеты) при обработке
с помощью camelot-py. Так как изменения вносятся сразу в переданную таблицу функция ничего не выдает в ответ"""
for score, cabinet in enumerate(table['cabinets']):
# Основная корректировка таблицы. Так как имя учителя или название урока может оказаться в левом столбце
# (с кабинетом), то данный цикл проверяет каждое значение кабинета и если находит по регулярному выражению имя
# учителя или название урока, то перемещает по столбикам соответствующие значения и удаляет из ячейки кабинета
if not isinstance(cabinet, float):
pattern_teacher = r'\w+ \w\.\w\.'
teacher = re.findall(pattern_teacher, cabinet)
cabinet = re.sub(pattern_teacher, '', cabinet)
pattern_lesson = r'\s*[А-Я][а-я]+\s[а-я]+\s*[а-я]*\s*|\s*[А-Я][а-я]+\s*\n|\s*[А-Я]+\s*'
lesson = re.findall(pattern_lesson, cabinet)
cabinet = re.sub(pattern_lesson, '', cabinet)
cabinet = re.sub('\n', '', cabinet)
table['cabinets'][score + 1] = cabinet
if lesson:
table['lessons'][score + 1] = lesson
if teacher:
if len(table['teachers'][score + 1]) == 1:
table['teachers'][score + 1].append(teacher[0])
elif len(table['teachers'][score + 1]) == 0:
table['teachers'][score + 1] = teacher
if is_distant:
# Удаляет лишние символы если есть ИД и пароль. В основном из ИД удаляются тире, а из пароля при нескольких
# учителях удаляется слеш
for name in ('ids', 'passwords'):
for score, item in enumerate(table[name]):
if isinstance(item, list) and item:
if len(item) == 1:
items = item[0].split()
raw_item = ' '.join(items[1:])
corrected_raw_item = re.sub('-', ' ', raw_item)
table[name][score + 1] = corrected_raw_item
elif len(item) == 2:
for name_item in item:
splited_name_item = name_item.split()
corrected_item = ' '.join(splited_name_item[1:])
corrected_item = re.sub('/', '', corrected_item)
table[name][score + 1].remove(name_item)
table[name][score + 1].append(corrected_item)
table[name][score + 1].reverse()
for name in ('lessons', 'teachers'):
# Удаляет лишние пробелы и переносы строк из названий урока и имен учителей
for score, item in enumerate(table[name]):
if isinstance(item, list) and item:
for i in range(0, len(item)): # Данная i не используется в цикле
if len(item) == 1:
item[0] = re.sub(r'\s{2,}', '', item[0])
table[name][score + 1] = re.sub('\\n', '', item[0])
elif len(item) == 2:
for name_item in item:
corrected_item = re.sub('\n', '', name_item)
corrected_item = re.sub(r'\s{2,}', '', corrected_item)
table[name][score + 1].remove(name_item)
table[name][score + 1].append(corrected_item)
table[name][score + 1].reverse()
for score, lesson in enumerate(table['lessons']):
# Экспериментальное извлечение объединенных ячеек, например учебной практики
# Определяет на принципе того, что если имеется единственный урок, но кабинетов несколько то это объедененная
# После этого просто постоянно копирует значения из нижней строки все выше
cabinet = table['cabinets'][score + 1]
teacher = table['teachers'][score + 1]
if is_distant:
zoom_id = table['ids'][score + 1]
password = table['passwords'][score + 1]
if not pandas.isnull(lesson):
past_lesson = lesson
past_teacher = teacher
if is_distant:
past_id = zoom_id
past_password = password
elif pandas.isnull(lesson):
if pandas.notnull(cabinet):
table['lessons'][score + 1] = past_lesson
table['teachers'][score + 1] = past_teacher
if is_distant:
table['ids'][score + 1] = past_id
table['passwords'][score + 1] = past_password
def is_distant(raw_tables: list, day: str, group_index: int) -> bool:
"""Проверяет дистанционное обучение ли в заданный день, у также заданной группы. Возвращает bool значение дистант
или нет"""
x = _axis_days[day][0] - 1
axis = correct_axis(tables=raw_tables, x=x)[0][0]
place = raw_tables[axis[0]][group_index][axis[1]]
if place == 'Дистант':
return True
elif place != 'Дистант':
return False
def extract_data(day: str = str(datetime.datetime.now().date())) -> pandas.DataFrame:
"""Обрабатывает информацию из CSV файлов и выдает pandas.DataFrame с расписанием.
Колонки: время, название предмета, кабинет, учитель, если есть дистант, то еще и ИД и пароль"""
main_tables = import_csv()
index_group = get_index_groups(tables=main_tables)
x1 = _axis_days[day][0]
x2 = _axis_days[day][1]
axis, splited = correct_axis(tables=main_tables, x=x1, y=x2)
distant = is_distant(main_tables, day, index_group)
if splited: # Выборка по ячейкам в pandas.DataFrame (импортированном из CVS, см. import_csv())
day_axis1 = axis[0]
day_axis2 = axis[1]
s1 = main_tables[day_axis1[0]][index_group][day_axis1[1]:day_axis1[2]] # Так как данные разделены (переменная
s2 = main_tables[day_axis2[0]][index_group][day_axis2[1]:day_axis2[2]] # splited), то парсится два листа сразу
cab1 = main_tables[day_axis1[0]][index_group + 1][day_axis1[1]:day_axis1[2]]
cab2 = main_tables[day_axis2[0]][index_group + 1][day_axis2[1]:day_axis2[2]]
series = pandas.concat([s1, s2], ignore_index=True)
cabinets = pandas.concat([cab1, cab2], ignore_index=True)
else:
day_axis = axis[0]
series = main_tables[day_axis[0]][index_group][day_axis[1]:day_axis[2]]
cabinets = main_tables[day_axis[0]][index_group + 1][day_axis[1]:day_axis[2]]
series.index = cabinets.index = list(range(1, len(series) + 1))
# Далее по коду используются регулярные выражения для извлечения значений.
# Так же эти значения сразу удаляются из переменной series для более точного следующего извлечения
teachers = series.str.findall(r'\n*\w+\s*\n?\w\.\w\.')
series.str.replace(r'\n*\w+\s*\n?\w\.\w\.', '')
lessons = series.str.findall(r'^\s*[А-Я][а-я]+\s[а-я]+\s*|^\s*[А-Я][а-я]+\s*|^\s*[А-Я]+\s*')
series.str.replace(r'^\s*[А-Я][а-я]+\s[а-я]+\s*|^\s*[А-Я][а-я]+\s*\n|^\s*[А-Я]+\s*', '')
time = get_time(main_tables, day)
if distant: # Если дистант, то еще парсится ИД и пароли и добавляются в таблицу. Иначе просто значения объединяются
ids = series.str.findall(r"ИК\:*\s*[\d*\s*|\d*\-]*")
series.str.replace(r"ИК\:*\s*[\d*\s*|\d*\-]*", '')
passwords = series.str.findall(r'Пароль\n?.+|Код\n?.+')
series.str.replace(r'Пароль\n?.+|Код\n?.+', '')
table = pandas.concat(objs=[time, lessons, cabinets, teachers, ids, passwords],
axis=1,
ignore_index=True)
table.index = list(range(1, len(table) + 1))
table.columns = ["time", "lessons", "cabinets", "teachers", "ids", "passwords"]
elif not distant:
table = pandas.concat(objs=[time, lessons, cabinets, teachers],
axis=1,
ignore_index=True)
table.index = list(range(1, len(table) + 1))
table.columns = ["time", "lessons", "cabinets", "teachers"]
correct_a_table(table, distant)
return table
if __name__ == '__main__':
day = input('Введите на какой день вам нужно расписание в виде цифры (понедельник - 1, вторник - 2, и тд.) >> ')
result = extract_data(day=day)
|
pandas.set_option('display.max_columns', None)
|
pandas.set_option
|
import datetime
import glob
import re
import time
import pickle
import pandas as pd
import numpy as np
from datetime import datetime
def mergefiles(path='E:\py-dev\COVID-19\csse_covid_19_data\csse_covid_19_daily_reports', extension='csv'):
# Reading the column names to run correct code
pickle_in = open("columns.pickle", "rb")
colnames = pickle.load(pickle_in)
print(datetime.now().strftime("%d/%m/%Y %H:%M:%S"), 'Read column names dictionary')
# Remove any . used in extension
extension = re.sub('[.]', '', extension)
# Create final file path to scan
finalpath = path + '\\*.' + extension
# Find the files which are present
files = glob.glob(finalpath)
# Create empty list to be used in for loop
df = []
# Get current time to create job id
current_time = int(time.time())
# For loop to find the files and process them
for f in files:
print(datetime.now().strftime("%d/%m/%Y %H:%M:%S"), '[START] Reading', f)
temp = pd.read_csv(f)
print(datetime.now().strftime("%d/%m/%Y %H:%M:%S"), '[FINISHED] Read', f)
# Classify and process based on which columns were detected
if temp.columns.to_list() == colnames[0]:
# Print logs
print(datetime.now().strftime("%d/%m/%Y %H:%M:%S"), '[INFO] Case 0 identified for', f)
print(datetime.now().strftime("%d/%m/%Y %H:%M:%S"), '[START] Processing ', f)
# Create missing columns
temp['Latitude'] = np.nan
temp['Longitude'] = np.nan
temp['County'] = np.nan
temp['Active'] = np.nan
# Rename some columns to remove special characters in column names
temp.rename(columns={'Province/State': 'Province_State',
'Country/Region': 'Country_Region',
'Last Update': 'Last_Update'}, inplace=True)
# Fill 0 instead of NaN or inf or NA in integer columns
temp[['Confirmed', 'Deaths', 'Recovered']] = temp[['Confirmed', 'Deaths', 'Recovered']].fillna(value=0)
# Create a column based on the file name
date_time_str = f.split('\\')[-1].split('.')[0].split('_')[0]
date_time_obj = datetime.strptime(date_time_str, '%m-%d-%Y').date()
temp['Created_Date'] = date_time_obj
# Converting columns to correct data types
# Convert to string
temp['Province_State'] = temp['Province_State'].astype(str)
temp['Country_Region'] = temp['Country_Region'].astype(str)
# Convert to date time
try:
temp['Last_Update'] = pd.to_datetime(temp['Last_Update'], format='%m/%d/%y %H:%M')
except ValueError:
try:
temp['Last_Update'] = pd.to_datetime(temp['Last_Update'], format='%Y-%m-%dT%H:%M:%S')
except ValueError:
try:
temp['Last_Update'] = pd.to_datetime(temp['Last_Update'], format='%m/%d/%Y %H:%M')
except ValueError as error:
print(error)
# Convert to date
temp['Created_Date'] = pd.to_datetime(temp['Created_Date'], format='%Y-%m-%d')
temp['Created_Date'] = temp['Created_Date'].dt.normalize()
# Convert to int32
temp['Confirmed'] = temp['Confirmed'].astype('int32')
temp['Deaths'] = temp['Deaths'].astype('int32')
temp['Recovered'] = temp['Recovered'].astype('int32')
df.append(temp)
print(datetime.now().strftime("%d/%m/%Y %H:%M:%S"), '[FINISHED] Processing ', f)
elif temp.columns.to_list() == colnames[1]:
# Print logs
print(datetime.now().strftime("%d/%m/%Y %H:%M:%S"), '[INFO] Case 1 identified for', f)
print(datetime.now().strftime("%d/%m/%Y %H:%M:%S"), '[START] Processing ', f)
# Create missing columns
temp['County'] = np.nan
temp['Active'] = np.nan
# Rename some columns to remove special characters in column names
temp.rename(columns={'Province/State': 'Province_State',
'Country/Region': 'Country_Region',
'Last Update': 'Last_Update'}, inplace=True)
# Fill 0 instead of NaN or inf or NA in integer columns
temp[['Confirmed', 'Deaths', 'Recovered']] = temp[['Confirmed', 'Deaths', 'Recovered']].fillna(value=0)
# Create a column based on the file name
date_time_str = f.split('\\')[-1].split('.')[0].split('_')[0]
date_time_obj = datetime.strptime(date_time_str, '%m-%d-%Y').date()
temp['Created_Date'] = date_time_obj
# Converting columns to correct data types
# Convert to string
temp['Province_State'] = temp['Province_State'].astype(str)
temp['Country_Region'] = temp['Country_Region'].astype(str)
# Convert to date time
try:
temp['Last_Update'] = pd.to_datetime(temp['Last_Update'], format='%m/%d/%y %H:%M')
except ValueError:
try:
temp['Last_Update'] =
|
pd.to_datetime(temp['Last_Update'], format='%Y-%m-%dT%H:%M:%S')
|
pandas.to_datetime
|
import operator
import numpy as np
import pytest
from pandas import (
DataFrame,
MultiIndex,
Series,
)
import pandas._testing as tm
from pandas.tests.apply.common import frame_transform_kernels
from pandas.tests.frame.common import zip_frames
def unpack_obj(obj, klass, axis):
"""
Helper to ensure we have the right type of object for a test parametrized
over frame_or_series.
"""
if klass is not DataFrame:
obj = obj["A"]
if axis != 0:
pytest.skip(f"Test is only for DataFrame with axis={axis}")
return obj
def test_transform_ufunc(axis, float_frame, frame_or_series):
# GH 35964
obj = unpack_obj(float_frame, frame_or_series, axis)
with np.errstate(all="ignore"):
f_sqrt = np.sqrt(obj)
# ufunc
result = obj.transform(np.sqrt, axis=axis)
expected = f_sqrt
tm.assert_equal(result, expected)
@pytest.mark.parametrize("op", frame_transform_kernels)
def test_transform_groupby_kernel(axis, float_frame, op, request):
# GH 35964
args = [0.0] if op == "fillna" else []
if axis == 0 or axis == "index":
ones = np.ones(float_frame.shape[0])
else:
ones = np.ones(float_frame.shape[1])
expected = float_frame.groupby(ones, axis=axis).transform(op, *args)
result = float_frame.transform(op, axis, *args)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"ops, names",
[
([np.sqrt], ["sqrt"]),
([np.abs, np.sqrt], ["absolute", "sqrt"]),
(np.array([np.sqrt]), ["sqrt"]),
(np.array([np.abs, np.sqrt]), ["absolute", "sqrt"]),
],
)
def test_transform_listlike(axis, float_frame, ops, names):
# GH 35964
other_axis = 1 if axis in {0, "index"} else 0
with np.errstate(all="ignore"):
expected = zip_frames([op(float_frame) for op in ops], axis=other_axis)
if axis in {0, "index"}:
expected.columns = MultiIndex.from_product([float_frame.columns, names])
else:
expected.index = MultiIndex.from_product([float_frame.index, names])
result = float_frame.transform(ops, axis=axis)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("ops", [[], np.array([])])
def test_transform_empty_listlike(float_frame, ops, frame_or_series):
obj = unpack_obj(float_frame, frame_or_series, 0)
with pytest.raises(ValueError, match="No transform functions were provided"):
obj.transform(ops)
@pytest.mark.parametrize("box", [dict, Series])
def test_transform_dictlike(axis, float_frame, box):
# GH 35964
if axis == 0 or axis == "index":
e = float_frame.columns[0]
expected = float_frame[[e]].transform(np.abs)
else:
e = float_frame.index[0]
expected = float_frame.iloc[[0]].transform(np.abs)
result = float_frame.transform(box({e: np.abs}), axis=axis)
tm.assert_frame_equal(result, expected)
def test_transform_dictlike_mixed():
# GH 40018 - mix of lists and non-lists in values of a dictionary
df = DataFrame({"a": [1, 2], "b": [1, 4], "c": [1, 4]})
result = df.transform({"b": ["sqrt", "abs"], "c": "sqrt"})
expected = DataFrame(
[[1.0, 1, 1.0], [2.0, 4, 2.0]],
columns=MultiIndex([("b", "c"), ("sqrt", "abs")], [(0, 0, 1), (0, 1, 0)]),
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"ops",
[
{},
{"A": []},
{"A": [], "B": "cumsum"},
{"A": "cumsum", "B": []},
{"A": [], "B": ["cumsum"]},
{"A": ["cumsum"], "B": []},
],
)
def test_transform_empty_dictlike(float_frame, ops, frame_or_series):
obj = unpack_obj(float_frame, frame_or_series, 0)
with pytest.raises(ValueError, match="No transform functions were provided"):
obj.transform(ops)
@pytest.mark.parametrize("use_apply", [True, False])
def test_transform_udf(axis, float_frame, use_apply, frame_or_series):
# GH 35964
obj = unpack_obj(float_frame, frame_or_series, axis)
# transform uses UDF either via apply or passing the entire DataFrame
def func(x):
# transform is using apply iff x is not a DataFrame
if use_apply == isinstance(x, frame_or_series):
# Force transform to fallback
raise ValueError
return x + 1
result = obj.transform(func, axis=axis)
expected = obj + 1
tm.assert_equal(result, expected)
@pytest.mark.parametrize("method", ["abs", "shift", "pct_change", "cumsum", "rank"])
def test_transform_method_name(method):
# GH 19760
df = DataFrame({"A": [-1, 2]})
result = df.transform(method)
expected = operator.methodcaller(method)(df)
|
tm.assert_frame_equal(result, expected)
|
pandas._testing.assert_frame_equal
|
# Importar librerias
import pandas # importar libreria pandas
import time # importar libreria time
import datetime # importar libreria de fecha y hora
from datetime import datetime # importar la libreria de datetime
import os # importar la libreria de os
from termcolor import colored # importar la libreria termcolor
import sqlite3 # importar libreria sqlite3
os.system('CLS') # limpiar la terminal
# Seccion carga de datos desde CSV (base de datos)
"""-----------------------------------------------------------------------------------------------------------------------"""
conn = sqlite3.connect('./database.db') # conexion a la base de datos
matrixpandas =
|
pandas.read_sql_query("SELECT * FROM productos", conn)
|
pandas.read_sql_query
|
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import pandas as pd
import numpy as np
isc =
|
pd.read_csv("information_schema.columns.csv")
|
pandas.read_csv
|
####################
# Import Libraries
####################
import os
import sys
from PIL import Image
import cv2
import numpy as np
import pandas as pd
import pytorch_lightning as pl
from pytorch_lightning.metrics import Accuracy
from pytorch_lightning import loggers
from pytorch_lightning import seed_everything
from pytorch_lightning import Trainer
from pytorch_lightning.callbacks import LearningRateMonitor, ModelCheckpoint
import torch
import torch.nn as nn
from torch.utils.data import Dataset, DataLoader
from sklearn.model_selection import StratifiedKFold
from sklearn import model_selection
import albumentations as A
import timm
from omegaconf import OmegaConf
import glob
from tqdm import tqdm
from sklearn.metrics import roc_auc_score
from nnAudio.Spectrogram import CQT1992v2, CQT2010v2
from scipy import signal
####################
# Utils
####################
def get_score(y_true, y_pred):
score = roc_auc_score(y_true, y_pred)
return score
def load_pytorch_model(ckpt_name, model, ignore_suffix='model'):
state_dict = torch.load(ckpt_name, map_location='cpu')["state_dict"]
new_state_dict = {}
for k, v in state_dict.items():
name = k
if name.startswith(str(ignore_suffix)+"."):
name = name.replace(str(ignore_suffix)+".", "", 1) # remove `model.`
new_state_dict[name] = v
model.load_state_dict(new_state_dict, strict=False)
return model
class CWT(nn.Module):
def __init__(
self,
wavelet_width,
fs,
lower_freq,
upper_freq,
n_scales,
size_factor=1.0,
border_crop=0,
stride=1
):
super().__init__()
self.initial_wavelet_width = wavelet_width
self.fs = fs
self.lower_freq = lower_freq
self.upper_freq = upper_freq
self.size_factor = size_factor
self.n_scales = n_scales
self.wavelet_width = wavelet_width
self.border_crop = border_crop
self.stride = stride
wavelet_bank_real, wavelet_bank_imag = self._build_wavelet_kernel()
self.wavelet_bank_real = nn.Parameter(wavelet_bank_real, requires_grad=False)
self.wavelet_bank_imag = nn.Parameter(wavelet_bank_imag, requires_grad=False)
self.kernel_size = self.wavelet_bank_real.size(3)
def _build_wavelet_kernel(self):
s_0 = 1 / self.upper_freq
s_n = 1 / self.lower_freq
base = np.power(s_n / s_0, 1 / (self.n_scales - 1))
scales = s_0 * np.power(base, np.arange(self.n_scales))
frequencies = 1 / scales
truncation_size = scales.max() * np.sqrt(4.5 * self.initial_wavelet_width) * self.fs
one_side = int(self.size_factor * truncation_size)
kernel_size = 2 * one_side + 1
k_array = np.arange(kernel_size, dtype=np.float32) - one_side
t_array = k_array / self.fs
wavelet_bank_real = []
wavelet_bank_imag = []
for scale in scales:
norm_constant = np.sqrt(np.pi * self.wavelet_width) * scale * self.fs / 2.0
scaled_t = t_array / scale
exp_term = np.exp(-(scaled_t ** 2) / self.wavelet_width)
kernel_base = exp_term / norm_constant
kernel_real = kernel_base * np.cos(2 * np.pi * scaled_t)
kernel_imag = kernel_base * np.sin(2 * np.pi * scaled_t)
wavelet_bank_real.append(kernel_real)
wavelet_bank_imag.append(kernel_imag)
wavelet_bank_real = np.stack(wavelet_bank_real, axis=0)
wavelet_bank_imag = np.stack(wavelet_bank_imag, axis=0)
wavelet_bank_real = torch.from_numpy(wavelet_bank_real).unsqueeze(1).unsqueeze(2)
wavelet_bank_imag = torch.from_numpy(wavelet_bank_imag).unsqueeze(1).unsqueeze(2)
return wavelet_bank_real, wavelet_bank_imag
def forward(self, x):
x = x.unsqueeze(dim=0)
border_crop = self.border_crop // self.stride
start = border_crop
end = (-border_crop) if border_crop > 0 else None
# x [n_batch, n_channels, time_len]
out_reals = []
out_imags = []
in_width = x.size(2)
out_width = int(np.ceil(in_width / self.stride))
pad_along_width = np.max((out_width - 1) * self.stride + self.kernel_size - in_width, 0)
padding = pad_along_width // 2 + 1
for i in range(3):
# [n_batch, 1, 1, time_len]
x_ = x[:, i, :].unsqueeze(1).unsqueeze(2)
out_real = nn.functional.conv2d(x_, self.wavelet_bank_real, stride=(1, self.stride), padding=(0, padding))
out_imag = nn.functional.conv2d(x_, self.wavelet_bank_imag, stride=(1, self.stride), padding=(0, padding))
out_real = out_real.transpose(2, 1)
out_imag = out_imag.transpose(2, 1)
out_reals.append(out_real)
out_imags.append(out_imag)
out_real = torch.cat(out_reals, axis=1)
out_imag = torch.cat(out_imags, axis=1)
out_real = out_real[:, :, :, start:end]
out_imag = out_imag[:, :, :, start:end]
scalograms = torch.sqrt(out_real ** 2 + out_imag ** 2)
return scalograms[0]
####################
# Config
####################
conf_dict = {'batch_size': 8,#32,
'epoch': 30,
'height': 512,#640,
'width': 512,
'model_name': 'efficientnet_b0',
'lr': 0.001,
'drop_rate': 0.0,
'drop_path_rate': 0.0,
'data_dir': '../input/seti-breakthrough-listen',
'model_path': None,
'output_dir': './',
'seed': 2021,
'snap': 1}
conf_base = OmegaConf.create(conf_dict)
####################
# Dataset
####################
class G2NetDataset(Dataset):
def __init__(self, df, transform=None, conf=None, train=True):
self.df = df.reset_index(drop=True)
self.dir_names = df['dir'].values
self.labels = df['target'].values
self.wave_transform = [
CQT1992v2(sr=2048, fmin=20, fmax=1024, hop_length=8, bins_per_octave=8, window='flattop'),
CQT1992v2(sr=2048, fmin=20, fmax=1024, hop_length=8, bins_per_octave=8, window='blackmanharris'),
CQT1992v2(sr=2048, fmin=20, fmax=1024, hop_length=8, bins_per_octave=8, window='nuttall'),
CWT(wavelet_width=8,fs=2048,lower_freq=20,upper_freq=1024,n_scales=384,stride=8)]
#self.wave_transform = CQT1992v2(sr=2048, fmin=10, fmax=1024, hop_length=8, bins_per_octave=8, window='flattop')
#self.wave_transform = CQT1992v2(sr=2048, fmin=20, fmax=1024, hop_length=1, bins_per_octave=14, window='flattop')
#self.wave_transform = CQT2010v2(sr=2048, fmin=10, fmax=1024, hop_length=32, n_bins=32, bins_per_octave=8, window='flattop')
self.stat = [
[0.013205823003608798,0.037445450696502146],
[0.009606230606511236,0.02489221471650526], # 10000 sample
[0.009523397709568962,0.024628402379527688],
[0.0010164694150735158,0.0015815201992169022]] # 10000 sample
# hop lengthは変えてみたほうが良いかも
self.transform = transform
self.conf = conf
self.train = train
def __len__(self):
return len(self.df)
def apply_qtransform(self, waves, transform):
#print(waves.shape)
#waves = np.hstack(waves)
#print(np.max(np.abs(waves), axis=1))
#waves = waves / np.max(np.abs(waves), axis=1, keepdims=True)
#waves = waves / np.max(waves)
waves = waves / 4.6152116213830774e-20
waves = torch.from_numpy(waves).float()
image = transform(waves)
return image
def __getitem__(self, idx):
img_id = self.df.loc[idx, 'id']
file_path = os.path.join(self.dir_names[idx],"{}/{}/{}/{}.npy".format(img_id[0], img_id[1], img_id[2], img_id))
waves = np.load(file_path)
label = torch.tensor([self.labels[idx]]).float()
image1 = self.apply_qtransform(waves, self.wave_transform[0])
image1 = image1.squeeze().numpy().transpose(1,2,0)
image1 = cv2.vconcat([image1[:,:,0],image1[:,:,1],image1[:,:,2]])
image1 = (image1-self.stat[0][0])/self.stat[0][1]
image1 = cv2.resize(image1, (self.conf.width, self.conf.height), interpolation=cv2.INTER_CUBIC)
image2 = self.apply_qtransform(waves, self.wave_transform[1])
image2 = image2.squeeze().numpy().transpose(1,2,0)
image2 = cv2.vconcat([image2[:,:,0],image2[:,:,1],image2[:,:,2]])
image2 = (image2-self.stat[1][0])/self.stat[1][1]
image2 = cv2.resize(image2, (self.conf.width, self.conf.height), interpolation=cv2.INTER_CUBIC)
image3 = self.apply_qtransform(waves, self.wave_transform[2])
image3 = image3.squeeze().numpy().transpose(1,2,0)
image3 = cv2.vconcat([image3[:,:,0],image3[:,:,1],image3[:,:,2]])
image3 = (image3-self.stat[2][0])/self.stat[2][1]
image3 = cv2.resize(image3, (self.conf.width, self.conf.height), interpolation=cv2.INTER_CUBIC)
image4 = self.apply_qtransform(waves, self.wave_transform[3])
image4 = image4.squeeze().numpy().transpose(1,2,0)
image4 = cv2.vconcat([image4[:,:,0],image4[:,:,1],image4[:,:,2]])
image4 = (image4-self.stat[3][0])/self.stat[3][1]
image4 = cv2.resize(image4, (self.conf.width, self.conf.height), interpolation=cv2.INTER_CUBIC)
#if self.transform is not None:
# image = self.transform(image=image)['image']
image1 = torch.from_numpy(image1).unsqueeze(dim=0)
image2 = torch.from_numpy(image2).unsqueeze(dim=0)
image3 = torch.from_numpy(image3).unsqueeze(dim=0)
image4 = torch.from_numpy(image4).unsqueeze(dim=0)
return image1, image2, image3, image4, label
####################
# Data Module
####################
class SETIDataModule(pl.LightningDataModule):
def __init__(self, conf):
super().__init__()
self.conf = conf
# OPTIONAL, called only on 1 GPU/machine(for download or tokenize)
def prepare_data(self):
pass
# OPTIONAL, called for every GPU/machine
def setup(self, stage=None, fold=None):
if stage == 'test':
#test_df = pd.read_csv(os.path.join(self.conf.data_dir, "sample_submission.csv"))
#test_df['dir'] = os.path.join(self.conf.data_dir, "test")
#self.test_dataset = G2NetDataset(test_df, transform=None,conf=self.conf, train=False)
df = pd.read_csv(os.path.join(self.conf.data_dir, "training_labels.csv"))
df['dir'] = os.path.join(self.conf.data_dir, "train")
# cv split
skf = StratifiedKFold(n_splits=5, shuffle=True, random_state=self.conf.seed)
for n, (train_index, val_index) in enumerate(skf.split(df, df['target'])):
df.loc[val_index, 'fold'] = int(n)
df['fold'] = df['fold'].astype(int)
train_df = df[df['fold'] != fold]
self.valid_df = df[df['fold'] == fold]
self.valid_dataset = G2NetDataset(self.valid_df, transform=None,conf=self.conf, train=False)
# ====================================================
# Inference function
# ====================================================
def inference(models, test_loader):
tk0 = tqdm(enumerate(test_loader), total=len(test_loader))
raw_probs = [[] for i in range(len(models))]
probs = []
probs_flattop = []
probs_blackmanharris = []
probs_nuttall = []
probs_cwt = []
with torch.no_grad():
for i, (images) in tk0:
images1 = images[0].cuda()
images2 = images[1].cuda()
images3 = images[2].cuda()
images4 = images[3].cuda()
avg_preds = []
flattop = []
blackmanharris = []
nuttall = []
cwt = []
for mid, model in enumerate(models):
y_preds_1 = model(images1)
y_preds_2 = model(images2)
y_preds_3 = model(images3)
y_preds_4 = model(images4)
y_preds = (y_preds_1 + y_preds_2 + y_preds_3 + y_preds_4)/4
avg_preds.append(y_preds.sigmoid().to('cpu').numpy())
flattop.append(y_preds_1.sigmoid().to('cpu').numpy())
blackmanharris.append(y_preds_2.sigmoid().to('cpu').numpy())
nuttall.append(y_preds_3.sigmoid().to('cpu').numpy())
cwt.append(y_preds_4.sigmoid().to('cpu').numpy())
#raw_probs[mid].append(y_preds.sigmoid().to('cpu').numpy())
avg_preds = np.mean(avg_preds, axis=0)
flattop = np.mean(flattop, axis=0)
blackmanharris = np.mean(blackmanharris, axis=0)
nuttall = np.mean(nuttall, axis=0)
cwt = np.mean(cwt, axis=0)
probs.append(avg_preds)
probs_flattop.append(flattop)
probs_blackmanharris.append(blackmanharris)
probs_nuttall.append(nuttall)
probs_cwt.append(cwt)
#for mid in range(len(models)):
# raw_probs[mid] = np.concatenate(raw_probs[mid])
probs = np.concatenate(probs)
probs_flattop = np.concatenate(probs_flattop)
probs_blackmanharris = np.concatenate(probs_blackmanharris)
probs_nuttall = np.concatenate(probs_nuttall)
probs_cwt = np.concatenate(probs_cwt)
return probs, probs_flattop, probs_blackmanharris, probs_nuttall, probs_cwt#, raw_probs
####################
# Train
####################
def main():
conf_cli = OmegaConf.from_cli()
conf = OmegaConf.merge(conf_base, conf_cli)
print(OmegaConf.to_yaml(conf))
seed_everything(2021)
# get model path
model_path = []
for i in range(5):
target_model = glob.glob(os.path.join(conf.model_dir, f'fold{i}/ckpt/*epoch*.ckpt'))
scores = [float(os.path.splitext(os.path.basename(i))[0].split('=')[-1]) for i in target_model]
model_path.append(target_model[scores.index(max(scores))])
models = []
for ckpt in model_path:
m = timm.create_model(model_name=conf.model_name, num_classes=1, pretrained=False, in_chans=1)
m = load_pytorch_model(ckpt, m, ignore_suffix='model')
m.cuda()
m.eval()
models.append(m)
# make oof
oof_df = pd.DataFrame()
oof_df_flattop = pd.DataFrame()
oof_df_blackmanharris = pd.DataFrame()
oof_df_nuttall = pd.DataFrame()
oof_df_cwt = pd.DataFrame()
for f, m in enumerate(models):
data_module = SETIDataModule(conf)
data_module.setup(stage='test', fold=f)
valid_df = data_module.valid_df
valid_dataset = data_module.valid_dataset
valid_loader = DataLoader(valid_dataset, batch_size=conf.batch_size, num_workers=4, shuffle=False, pin_memory=True, drop_last=False)
predictions, probs_flattop, probs_blackmanharris, probs_nuttall, probs_cwt = inference([m], valid_loader)
valid_df['preds'] = predictions
oof_df = pd.concat([oof_df, valid_df])
valid_df['preds'] = probs_flattop
oof_df_flattop = pd.concat([oof_df_flattop, valid_df])
valid_df['preds'] = probs_blackmanharris
oof_df_blackmanharris = pd.concat([oof_df_blackmanharris, valid_df])
valid_df['preds'] = probs_nuttall
oof_df_nuttall =
|
pd.concat([oof_df_nuttall, valid_df])
|
pandas.concat
|
# -*- coding: utf-8 -*-
import pandas as pd
class BasicCleaning:
@classmethod
def CleanData(cls, path_to_data, var):
data_set =
|
pd.read_csv(path_to_data, sep=',')
|
pandas.read_csv
|
import sys
import time
import pandas as pd
import numpy as np
import copyreg, types
from tqdm import tqdm
import matplotlib as mpl
import matplotlib.pyplot as plt
import seaborn as sns
plt.style.use('seaborn-talk')
plt.style.use('bmh')
#plt.rcParams['font.family'] = 'DejaVu Sans Mono'
plt.rcParams['font.size'] = 9.5
plt.rcParams['font.weight'] = 'medium'
# =======================================================
# Symmetric CUSUM Filter [2.5.2.1]
def getTEvents(gRaw, h):
"""cusum filter
args
----
gRaw: array-like
h: int() or float()
returns
-------
pd.DatetimeIndex()
"""
tEvents, sPos, sNeg = [], 0, 0
diff = np.log(gRaw).diff().dropna().abs()
for i in tqdm(diff.index[1:]):
try:
pos, neg = float(sPos+diff.loc[i]), float(sNeg+diff.loc[i])
except Exception as e:
print(e)
print(sPos+diff.loc[i], type(sPos+diff.loc[i]))
print(sNeg+diff.loc[i], type(sNeg+diff.loc[i]))
break
sPos, sNeg=max(0., pos), min(0., neg)
if sNeg<-h:
sNeg=0;tEvents.append(i)
elif sPos>h:
sPos=0;tEvents.append(i)
return pd.DatetimeIndex(tEvents)
# =======================================================
# Daily Volatility Estimator [3.1]
## for wtvr reason dates are not aligned for return calculation
## must account for it for computation
def getDailyVol(close,span0=100):
# daily vol reindexed to close
df0=close.index.searchsorted(close.index-pd.Timedelta(days=1))
#bp()
df0=df0[df0>0]
#bp()
df0=(pd.Series(close.index[df0-1],
index=close.index[close.shape[0]-df0.shape[0]:]))
#bp()
try:
df0=close.loc[df0.index]/close.loc[df0.values].values-1 # daily rets
except Exception as e:
print(e)
print('adjusting shape of close.loc[df0.index]')
cut = close.loc[df0.index].shape[0] - close.loc[df0.values].shape[0]
df0=close.loc[df0.index].iloc[:-cut]/close.loc[df0.values].values-1
df0=df0.ewm(span=span0).std().rename('dailyVol')
return df0
# =======================================================
# Triple-Barrier Labeling Method [3.2]
def applyPtSlOnT1(close,events,ptSl,molecule):
# apply stop loss/profit taking, if it takes place before t1 (end of event)
events_=events.loc[molecule]
out=events_[['t1']].copy(deep=True)
if ptSl[0]>0: pt=ptSl[0]*events_['trgt']
else: pt=pd.Series(index=events.index) # NaNs
if ptSl[1]>0: sl=-ptSl[1]*events_['trgt']
else: sl=pd.Series(index=events.index) # NaNs
for loc,t1 in events_['t1'].fillna(close.index[-1]).iteritems():
df0=close[loc:t1] # path prices
df0=(df0/close[loc]-1)*events_.at[loc,'side'] # path returns
out.loc[loc,'sl']=df0[df0<sl[loc]].index.min() # earliest stop loss
out.loc[loc,'pt']=df0[df0>pt[loc]].index.min() # earliest profit taking
return out
# =======================================================
# Gettting Time of First Touch (getEvents) [3.3]
def getEvents(close, tEvents, ptSl, trgt, minRet, numThreads,t1=False, side=None):
#1) get target
trgt=trgt.loc[tEvents]
trgt=trgt[trgt>minRet] # minRet
#2) get t1 (max holding period)
if t1 is False:t1=pd.Series(pd.NaT, index=tEvents)
#3) form events object, apply stop loss on t1
if side is None:side_,ptSl_=pd.Series(1.,index=trgt.index), [ptSl[0],ptSl[0]]
else: side_,ptSl_=side.loc[trgt.index],ptSl[:2]
events=(pd.concat({'t1':t1,'trgt':trgt,'side':side_}, axis=1)
.dropna(subset=['trgt']))
df0=mpPandasObj(func=applyPtSlOnT1,pdObj=('molecule',events.index),
numThreads=numThreads,close=close,events=events,
ptSl=ptSl_)
events['t1']=df0.dropna(how='all').min(axis=1) #pd.min ignores nan
if side is None:events=events.drop('side',axis=1)
return events
# =======================================================
# Adding Vertical Barrier [3.4]
def addVerticalBarrier(tEvents, close, numDays=1):
t1=close.index.searchsorted(tEvents+pd.Timedelta(days=numDays))
t1=t1[t1<close.shape[0]]
t1=(pd.Series(close.index[t1],index=tEvents[:t1.shape[0]]))
return t1
# =======================================================
# Labeling for side and size [3.5, 3.8]
def getBins(events, close, t1=None):
'''
Compute event's outcome (including side information, if provided).
events is a DataFrame where:
-events.index is event's starttime
-events['t1'] is event's endtime
-events['trgt'] is event's target
-events['side'] (optional) implies the algo's position side
-t1 is original vertical barrier series
Case 1: ('side' not in events): bin in (-1,1) <-label by price action
Case 2: ('side' in events): bin in (0,1) <-label by pnl (meta-labeling)
'''
# 1) prices aligned with events
events_ = events.dropna(subset=['t1'])
px = events_.index.union(events_['t1'].values).drop_duplicates()
px = close.reindex(px, method='bfill')
# 2) create out object
out = pd.DataFrame(index=events_.index)
out['ret'] = px.loc[events_['t1'].values].values / px.loc[
events_.index] - 1
if 'side' in events_: out['ret'] *= events_['side'] # meta-labeling
out['bin'] = np.sign(out['ret'])
if 'side' not in events_:
# only applies when not meta-labeling.
# to update bin to 0 when vertical barrier is touched, we need the
# original vertical barrier series since the events['t1'] is the time
# of first touch of any barrier and not the vertical barrier
# specifically. The index of the intersection of the vertical barrier
# values and the events['t1'] values indicate which bin labels needs
# to be turned to 0.
vtouch_first_idx = events[events['t1'].isin(t1.values)].index
out.loc[vtouch_first_idx, 'bin'] = 0.
if 'side' in events_: out.loc[out['ret'] <= 0, 'bin'] = 0 # meta-labeling
return out
# =======================================================
# Expanding getBins to Incorporate Meta-Labeling [3.7]
def getBinsOld(events, close):
'''
Compute event's outcome (including side information, if provided).
events is a DataFrame where:
-events.index is event's starttime
-events['t1'] is event's endtime
-events['trgt'] is event's target
-events['side'] (optional) implies the algo's position side
Case 1: ('side' not in events): bin in (-1,1) <-label by price action
Case 2: ('side' in events): bin in (0,1) <-label by pnl (meta-labeling)
'''
#1) prices aligned with events
events_=events.dropna(subset=['t1'])
px=events_.index.union(events_['t1'].values).drop_duplicates()
px=close.reindex(px,method='bfill')
#2) create out object
out=pd.DataFrame(index=events_.index)
out['ret']=px.loc[events_['t1'].values].values/px.loc[events_.index]-1
if 'side' in events_:out['ret']*=events_['side'] # meta-labeling
out['bin']=np.sign(out['ret'])
if 'side' in events_:out.loc[out['ret']<=0,'bin']=0 # meta-labeling
return out
# =======================================================
# Dropping Unnecessary Labels [3.8]
def dropLabels(events, minPct=.05):
# apply weights, drop labels with insufficient examples
while True:
df0=events['bin'].value_counts(normalize=True)
if df0.min()>minPct or df0.shape[0]<3:break
print('dropped label: ', df0.argmin(),df0.min())
events=events[events['bin']!=df0.argmin()]
return events
# =======================================================
# Linear Partitions [20.4.1]
def linParts(numAtoms,numThreads):
# partition of atoms with a single loop
parts=np.linspace(0,numAtoms,min(numThreads,numAtoms)+1)
parts=np.ceil(parts).astype(int)
return parts
def nestedParts(numAtoms,numThreads,upperTriang=False):
# partition of atoms with an inner loop
parts,numThreads_=[0],min(numThreads,numAtoms)
for num in range(numThreads_):
part=1+4*(parts[-1]**2+parts[-1]+numAtoms*(numAtoms+1.)/numThreads_)
part=(-1+part**.5)/2.
parts.append(part)
parts=np.round(parts).astype(int)
if upperTriang: # the first rows are heaviest
parts=np.cumsum(np.diff(parts)[::-1])
parts=np.append(np.array([0]),parts)
return parts
# =======================================================
# multiprocessing snippet [20.7]
def mpPandasObj(func,pdObj,numThreads=24,mpBatches=1,linMols=True,**kargs):
'''
Parallelize jobs, return a dataframe or series
+ func: function to be parallelized. Returns a DataFrame
+ pdObj[0]: Name of argument used to pass the molecule
+ pdObj[1]: List of atoms that will be grouped into molecules
+ kwds: any other argument needed by func
Example: df1=mpPandasObj(func,('molecule',df0.index),24,**kwds)
'''
import pandas as pd
#if linMols:parts=linParts(len(argList[1]),numThreads*mpBatches)
#else:parts=nestedParts(len(argList[1]),numThreads*mpBatches)
if linMols:parts=linParts(len(pdObj[1]),numThreads*mpBatches)
else:parts=nestedParts(len(pdObj[1]),numThreads*mpBatches)
jobs=[]
for i in range(1,len(parts)):
job={pdObj[0]:pdObj[1][parts[i-1]:parts[i]],'func':func}
job.update(kargs)
jobs.append(job)
if numThreads==1:out=processJobs_(jobs)
else: out=processJobs(jobs,numThreads=numThreads)
if isinstance(out[0],pd.DataFrame):df0=pd.DataFrame()
elif isinstance(out[0],pd.Series):df0=pd.Series()
else:return out
for i in out:df0=df0.append(i)
df0=df0.sort_index()
return df0
# =======================================================
# single-thread execution for debugging [20.8]
def processJobs_(jobs):
# Run jobs sequentially, for debugging
out=[]
for job in jobs:
out_=expandCall(job)
out.append(out_)
return out
# =======================================================
# Example of async call to multiprocessing lib [20.9]
import multiprocessing as mp
import datetime as dt
#________________________________
def reportProgress(jobNum,numJobs,time0,task):
# Report progress as asynch jobs are completed
msg=[float(jobNum)/numJobs, (time.time()-time0)/60.]
msg.append(msg[1]*(1/msg[0]-1))
timeStamp=str(dt.datetime.fromtimestamp(time.time()))
msg=timeStamp+' '+str(round(msg[0]*100,2))+'% '+task+' done after '+ \
str(round(msg[1],2))+' minutes. Remaining '+str(round(msg[2],2))+' minutes.'
if jobNum<numJobs:sys.stderr.write(msg+'\r')
else:sys.stderr.write(msg+'\n')
return
#________________________________
def processJobs(jobs,task=None,numThreads=24):
# Run in parallel.
# jobs must contain a 'func' callback, for expandCall
if task is None:task=jobs[0]['func'].__name__
pool=mp.Pool(processes=numThreads)
outputs,out,time0=pool.imap_unordered(expandCall,jobs),[],time.time()
# Process asyn output, report progress
for i,out_ in enumerate(outputs,1):
out.append(out_)
reportProgress(i,len(jobs),time0,task)
pool.close();pool.join() # this is needed to prevent memory leaks
return out
# =======================================================
# Unwrapping the Callback [20.10]
def expandCall(kargs):
# Expand the arguments of a callback function, kargs['func']
func=kargs['func']
del kargs['func']
out=func(**kargs)
return out
# =======================================================
# Pickle Unpickling Objects [20.11]
def _pickle_method(method):
func_name=method.im_func.__name__
obj=method.im_self
cls=method.im_class
return _unpickle_method, (func_name,obj,cls)
#________________________________
def _unpickle_method(func_name,obj,cls):
for cls in cls.mro():
try:func=cls.__dict__[func_name]
except KeyError:pass
else:break
return func.__get__(obj,cls)
#________________________________
# =======================================================
# Estimating uniqueness of a label [4.1]
def mpNumCoEvents(closeIdx,t1,molecule):
'''
Compute the number of concurrent events per bar.
+molecule[0] is the date of the first event on which the weight will be computed
+molecule[-1] is the date of the last event on which the weight will be computed
Any event that starts before t1[modelcule].max() impacts the count.
'''
#1) find events that span the period [molecule[0],molecule[-1]]
t1=t1.fillna(closeIdx[-1]) # unclosed events still must impact other weights
t1=t1[t1>=molecule[0]] # events that end at or after molecule[0]
t1=t1.loc[:t1[molecule].max()] # events that start at or before t1[molecule].max()
#2) count events spanning a bar
iloc=closeIdx.searchsorted(np.array([t1.index[0],t1.max()]))
count=pd.Series(0,index=closeIdx[iloc[0]:iloc[1]+1])
for tIn,tOut in t1.iteritems():count.loc[tIn:tOut]+=1.
return count.loc[molecule[0]:t1[molecule].max()]
# =======================================================
# Estimating the average uniqueness of a label [4.2]
def mpSampleTW(t1,numCoEvents,molecule):
# Derive avg. uniqueness over the events lifespan
wght=pd.Series(index=molecule)
for tIn,tOut in t1.loc[wght.index].iteritems():
wght.loc[tIn]=(1./numCoEvents.loc[tIn:tOut]).mean()
return wght
# =======================================================
# Sequential Bootstrap [4.5.2]
## Build Indicator Matrix [4.3]
def getIndMatrix(barIx,t1):
# Get Indicator matrix
indM=(pd.DataFrame(0,index=barIx,columns=range(t1.shape[0])))
for i,(t0,t1) in enumerate(t1.iteritems()):indM.loc[t0:t1,i]=1.
return indM
# =======================================================
# Compute average uniqueness [4.4]
def getAvgUniqueness(indM):
# Average uniqueness from indicator matrix
c=indM.sum(axis=1) # concurrency
u=indM.div(c,axis=0) # uniqueness
avgU=u[u>0].mean() # avg. uniqueness
return avgU
# =======================================================
# return sample from sequential bootstrap [4.5]
def seqBootstrap(indM,sLength=None):
# Generate a sample via sequential bootstrap
if sLength is None:sLength=indM.shape[1]
phi=[]
while len(phi)<sLength:
avgU=pd.Series()
for i in indM:
indM_=indM[phi+[i]] # reduce indM
avgU.loc[i]=getAvgUniqueness(indM_).iloc[-1]
prob=avgU/avgU.sum() # draw prob
phi+=[np.random.choice(indM.columns,p=prob)]
return phi
# =======================================================
# Determination of sample weight by absolute return attribution [4.10]
def mpSampleW(t1,numCoEvents,close,molecule):
# Derive sample weight by return attribution
ret=np.log(close).diff() # log-returns, so that they are additive
wght=pd.Series(index=molecule)
for tIn,tOut in t1.loc[wght.index].iteritems():
wght.loc[tIn]=(ret.loc[tIn:tOut]/numCoEvents.loc[tIn:tOut]).sum()
return wght.abs()
# =======================================================
# fractionally differentiated features snippets
# =======================================================
# get weights
def getWeights(d,size):
# thres>0 drops insignificant weights
w=[1.]
for k in range(1,size):
w_ = -w[-1]/k*(d-k+1)
w.append(w_)
w=np.array(w[::-1]).reshape(-1,1)
return w
def getWeights_FFD(d,thres):
w,k=[1.],1
while True:
w_=-w[-1]/k*(d-k+1)
if abs(w_)<thres:break
w.append(w_);k+=1
return np.array(w[::-1]).reshape(-1,1)
# =======================================================
# expanding window fractional differentiation
def fracDiff(series, d, thres=0.01):
'''
Increasing width window, with treatment of NaNs
Note 1: For thres=1, nothing is skipped
Note 2: d can be any positive fractional, not necessarily
bounded between [0,1]
'''
#1) Compute weights for the longest series
w=getWeights(d, series.shape[0])
#2) Determine initial calcs to be skipped based on weight-loss threshold
w_=np.cumsum(abs(w))
w_ /= w_[-1]
skip = w_[w_>thres].shape[0]
#3) Apply weights to values
df={}
for name in series.columns:
seriesF, df_=series[[name]].fillna(method='ffill').dropna(), pd.Series()
for iloc in range(skip, seriesF.shape[0]):
loc=seriesF.index[iloc]
if not np.isfinite(series.loc[loc,name]).any():continue # exclude NAs
try:
df_.loc[loc]=np.dot(w[-(iloc+1):,:].T, seriesF.loc[:loc])[0,0]
except:
continue
df[name]=df_.copy(deep=True)
df=
|
pd.concat(df,axis=1)
|
pandas.concat
|
# -*- coding: utf-8 -*-
import os
import pandas as pd
from .material_properties import MaterialProperties
from .material_transport_properties import MaterialTransportProperties
from .time_series import TimeSeries
__all__ = ['write_hot_start_file', 'read_bc_file',
'write_bc_file']
def write_hot_start_file(file_name, hot_start_list):
"""
Writes the ADH hot start file from a list of hot start data sets
Args:
file_name: file name for *.hot file
hot_start_list: list of HotStartDataSet classes
"""
with open(file_name, 'w') as mesh_file:
for ht in hot_start_list:
mesh_file.write('DATASET\nOBJTYPE "mesh2d"\n')
if len(ht.values.columns) > 1:
mesh_file.write('BEGVEC\n')
else:
mesh_file.write('BEGSCL\n')
mesh_file.write('ND {}\n'.format(len(ht.values)))
mesh_file.write('NC {}\n'.format(ht.number_of_cells))
mesh_file.write('NAME "{}"\n'.format(ht.name))
mesh_file.write('TS 0 0\n')
mesh_file.write(ht.values.to_csv(sep=' ', index=False, header=False).replace('\r\n', '\n'))
mesh_file.write('ENDDS\n')
def read_bc_file(file_name, bc_class):
"""
Reads the *.bc file and fills the AdhModel class
Args:
file_name: File name of the *.bc file
bc_class: Boundary Condition class
"""
# set all not required to deactivated
bc_class.operation_parameters.set_not_required(False)
bc_class.constituent_properties.set_not_required(False)
bc_class.model_constants.set_not_required(False)
bc_string_cards = {'NDS', 'EGS', 'MDS', 'MTS'}
bc_cards = {'NB', 'DB', 'BR', 'OB', 'OFF', 'WER', 'WRS', 'FLP', 'FGT', 'SLUICE', 'SLS'}
xy_series_cards = {'XY1', 'XY2', 'XYC', 'SERIES'}
pc_cards = {'PC', 'OC', 'OS', 'FLX', 'SOUT', 'FOUT'}
temp_data = {}
xy_data_list = []
with open(file_name, "r") as file:
for line_number, line in enumerate(file):
# remove new line character
line = line.rstrip()
line_split = line.split()
# remove blank strings
line_split[:] = (part for part in line_split if part != '')
# skip blank line, comment line
if len(line_split) == 0 or line_split[0] == '' or line_split[0][0] == '!':
continue
try:
if line_split[0] == 'OP':
read_op_cards(line_split, bc_class, temp_data)
elif line_split[0] == 'IP':
read_ip_cards(line_split, bc_class, temp_data)
elif line_split[0] == 'CN':
read_cn_cards(line_split, bc_class, temp_data)
elif line_split[0] == 'MP':
read_mp_cards(line_split, bc_class)
elif line_split[0] in bc_string_cards:
read_bc_string_cards(line_split, temp_data)
elif line_split[0] in xy_series_cards:
read_xy_cards(line_split, temp_data)
elif line_split[0] == 'FR':
read_fr_cards(line_split, temp_data)
elif line_split[0] in pc_cards:
read_pc_cards(line_split, bc_class, temp_data)
elif line_split[0] in bc_cards:
read_bc_cards(line_split, bc_class, temp_data)
elif line_split[0] == 'TC':
read_tc_cards(line_split, bc_class)
elif 'xy_type' in temp_data:
xyt = temp_data['xy_type']
if xyt == 'SERIES AWRITE':
labels = ['START_TIME', 'END_TIME', 'TIME_STEP_SIZE', 'UNITS']
xy_data_list.append([float(line_split[0]), float(line_split[1]), float(line_split[2]),
int(line_split[3])])
elif xyt == 'SERIES WIND' or xyt == 'SERIES WAVE':
labels = ['X', 'Y', 'Y2']
xy_data_list.append([float(line_split[0]), float(line_split[1]), float(line_split[2])])
else:
labels = ['X', 'Y']
xy_data_list.append([float(line_split[0]), float(line_split[1])])
# set the time step option in the output control if we read 'SERIES DT'
if xyt == 'SERIES DT':
bc_class.time_control.time_step_option = 'Time step series (SERIES DT)'
bc_class.time_control.max_time_step_size_time_series = temp_data['xy_id']
if len(xy_data_list) == temp_data['xy_number_points']:
ts = TimeSeries()
ts.series_type = xyt
if xyt == 'SERIES AWRITE':
# objs = list(bc_class.output_control.param.output_control_option.get_range())
bc_class.output_control.output_control_option = 'Specify autobuild (SERIES AWRITE)'
ts.units = temp_data['xy_units']
ts.output_units = temp_data['xy_output_units']
ts.time_series = pd.DataFrame.from_records(xy_data_list, columns=labels)
if 'xy_x_location' in temp_data:
ts.x_location = temp_data['xy_x_location']
ts.y_location = temp_data['xy_y_location']
temp_data.pop('xy_x_location')
temp_data.pop('xy_y_location')
xy_data_list = []
# set time series ID as both the key and in the ID column
ts.series_id = temp_data['xy_id']
bc_class.time_series[temp_data['xy_id']] = ts
# empty out temp_data #todo poor practice
temp_data.pop('xy_number_points')
temp_data.pop('xy_id')
temp_data.pop('xy_type')
temp_data.pop('xy_units')
temp_data.pop('xy_output_units')
except:
msg = 'Error reading line {} of file: {}.\nLine: {}'.format(line_number+1,
os.path.basename(file_name), line)
raise IOError(msg)
lists_to_data_frames(bc_class, temp_data)
def lists_to_data_frames(bc_class, temp_data):
"""
Converts temporary lists to DataFrames in the AdhModel class
Args:
bc_class: The ADH boundary condition class that holds the data
temp_data: Dictionary of data that is not stored in the ADH simulation but is needed while reading the file
"""
if 'bc_string_list' in temp_data:
labels = ['CARD', 'ID', 'ID_0', 'ID_1']
df = pd.DataFrame.from_records(temp_data['bc_string_list'], columns=labels)
for x in range(1, len(labels)):
df[labels[x]] = df[labels[x]].astype(dtype='Int64')
bc_class.boundary_strings = df
if 'bc_list' in temp_data:
labels = ['CARD', 'CARD_2', 'STRING_ID', 'XY_ID1', 'XY_ID2', 'XY_ID3']
df = pd.DataFrame.from_records(temp_data['bc_list'], columns=labels)
for x in range(2, len(labels)):
df[labels[x]] = df[labels[x]].astype(dtype='Int64')
bc_class.solution_controls = df
if 'nb_sdr_list' in temp_data:
labels = ['CARD', 'CARD_1', 'S_ID', 'COEF_A', 'COEF_B', 'COEF_C', 'COEF_D', 'COEF_E']
df = pd.DataFrame.from_records(temp_data['nb_sdr_list'], columns=labels)
bc_class.stage_discharge_boundary = df
if 'fr_list' in temp_data:
labels = ['CARD', 'CARD_2', 'STRING_ID', 'REAL_01', 'REAL_02', 'REAL_03', 'REAL_04', 'REAL_05']
df = pd.DataFrame.from_records(temp_data['fr_list'], columns=labels)
bc_class.friction_controls = df
if 'br_list' in temp_data:
labels = ['CARD', 'CARD_1', 'C_0', 'C_1', 'C_2', 'C_3', 'C_4', 'C_5', 'C_6', 'C_7', 'C_8']
df = pd.DataFrame.from_records(temp_data['br_list'], columns=labels)
bc_class.breach_controls = df
if 'wrs_list' in temp_data:
labels = ['CARD', 'NUMBER', 'S_UPSTREAM', 'S_DOWNSTREAM', 'WS_UPSTREAM', 'WS_DOWNSTREAM', 'LENGTH',
'CREST_ELEV', 'HEIGHT']
df = pd.DataFrame.from_records(temp_data['wrs_list'], columns=labels)
bc_class.weirs = df
if 'fgt_list' in temp_data:
labels = ['CARD', 'NUMBER', 'USER', 'S_UPSTREAM', 'S_DOWNSTREAM', 'FS_UPSTREAM', 'FS_DOWNSTREAM', 'COEF_A',
'COEF_B', 'COEF_C', 'COEF_D', 'COEF_E', 'COEF_F', 'LENGTH']
df = pd.DataFrame.from_records(temp_data['fgt_list'], columns=labels)
bc_class.flap_gates = df
if 'sls_list' in temp_data:
labels = ['CARD', 'NUMBER', 'S_UPSTREAM', 'S_DOWNSTREAM', 'SS_UPSTREAM', 'SS_DOWNSTREAM', 'LENGTH',
'TS_OPENING']
df =
|
pd.DataFrame.from_records(temp_data['sls_list'], columns=labels)
|
pandas.DataFrame.from_records
|
# -*- coding: utf-8 -*-
# pylint: disable=E1101
# flake8: noqa
from datetime import datetime
import csv
import os
import sys
import re
import nose
import platform
from multiprocessing.pool import ThreadPool
from numpy import nan
import numpy as np
from pandas.io.common import DtypeWarning
from pandas import DataFrame, Series, Index, MultiIndex, DatetimeIndex
from pandas.compat import(
StringIO, BytesIO, PY3, range, long, lrange, lmap, u
)
from pandas.io.common import URLError
import pandas.io.parsers as parsers
from pandas.io.parsers import (read_csv, read_table, read_fwf,
TextFileReader, TextParser)
import pandas.util.testing as tm
import pandas as pd
from pandas.compat import parse_date
import pandas.lib as lib
from pandas import compat
from pandas.lib import Timestamp
from pandas.tseries.index import date_range
import pandas.tseries.tools as tools
from numpy.testing.decorators import slow
import pandas.parser
class ParserTests(object):
"""
Want to be able to test either C+Cython or Python+Cython parsers
"""
data1 = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
def read_csv(self, *args, **kwargs):
raise NotImplementedError
def read_table(self, *args, **kwargs):
raise NotImplementedError
def setUp(self):
import warnings
warnings.filterwarnings(action='ignore', category=FutureWarning)
self.dirpath = tm.get_data_path()
self.csv1 = os.path.join(self.dirpath, 'test1.csv')
self.csv2 = os.path.join(self.dirpath, 'test2.csv')
self.xls1 = os.path.join(self.dirpath, 'test.xls')
def construct_dataframe(self, num_rows):
df = DataFrame(np.random.rand(num_rows, 5), columns=list('abcde'))
df['foo'] = 'foo'
df['bar'] = 'bar'
df['baz'] = 'baz'
df['date'] = pd.date_range('20000101 09:00:00',
periods=num_rows,
freq='s')
df['int'] = np.arange(num_rows, dtype='int64')
return df
def generate_multithread_dataframe(self, path, num_rows, num_tasks):
def reader(arg):
start, nrows = arg
if not start:
return pd.read_csv(path, index_col=0, header=0, nrows=nrows,
parse_dates=['date'])
return pd.read_csv(path,
index_col=0,
header=None,
skiprows=int(start) + 1,
nrows=nrows,
parse_dates=[9])
tasks = [
(num_rows * i / num_tasks,
num_rows / num_tasks) for i in range(num_tasks)
]
pool = ThreadPool(processes=num_tasks)
results = pool.map(reader, tasks)
header = results[0].columns
for r in results[1:]:
r.columns = header
final_dataframe = pd.concat(results)
return final_dataframe
def test_converters_type_must_be_dict(self):
with tm.assertRaisesRegexp(TypeError, 'Type converters.+'):
self.read_csv(StringIO(self.data1), converters=0)
def test_empty_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), decimal='')
def test_empty_thousands_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), thousands='')
def test_multi_character_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), thousands=',,')
def test_empty_string(self):
data = """\
One,Two,Three
a,1,one
b,2,two
,3,three
d,4,nan
e,5,five
nan,6,
g,7,seven
"""
df = self.read_csv(StringIO(data))
xp = DataFrame({'One': ['a', 'b', np.nan, 'd', 'e', np.nan, 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', np.nan, 'five',
np.nan, 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(StringIO(data), na_values={'One': [], 'Three': []},
keep_default_na=False)
xp = DataFrame({'One': ['a', 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', 'nan', 'five',
'', 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(
StringIO(data), na_values=['a'], keep_default_na=False)
xp = DataFrame({'One': [np.nan, 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', 'nan', 'five', '',
'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(StringIO(data), na_values={'One': [], 'Three': []})
xp = DataFrame({'One': ['a', 'b', np.nan, 'd', 'e', np.nan, 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', np.nan, 'five',
np.nan, 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
# GH4318, passing na_values=None and keep_default_na=False yields
# 'None' as a na_value
data = """\
One,Two,Three
a,1,None
b,2,two
,3,None
d,4,nan
e,5,five
nan,6,
g,7,seven
"""
df = self.read_csv(
StringIO(data), keep_default_na=False)
xp = DataFrame({'One': ['a', 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['None', 'two', 'None', 'nan', 'five', '',
'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
def test_read_csv(self):
if not compat.PY3:
if compat.is_platform_windows():
prefix = u("file:///")
else:
prefix = u("file://")
fname = prefix + compat.text_type(self.csv1)
# it works!
read_csv(fname, index_col=0, parse_dates=True)
def test_dialect(self):
data = """\
label1,label2,label3
index1,"a,c,e
index2,b,d,f
"""
dia = csv.excel()
dia.quoting = csv.QUOTE_NONE
df = self.read_csv(StringIO(data), dialect=dia)
data = '''\
label1,label2,label3
index1,a,c,e
index2,b,d,f
'''
exp = self.read_csv(StringIO(data))
exp.replace('a', '"a', inplace=True)
tm.assert_frame_equal(df, exp)
def test_dialect_str(self):
data = """\
fruit:vegetable
apple:brocolli
pear:tomato
"""
exp = DataFrame({
'fruit': ['apple', 'pear'],
'vegetable': ['brocolli', 'tomato']
})
dia = csv.register_dialect('mydialect', delimiter=':') # noqa
df = self.read_csv(StringIO(data), dialect='mydialect')
tm.assert_frame_equal(df, exp)
csv.unregister_dialect('mydialect')
def test_1000_sep(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334, 13],
'C': [5, 10.]
})
df = self.read_csv(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
def test_1000_sep_with_decimal(self):
data = """A|B|C
1|2,334.01|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334.01, 13],
'C': [5, 10.]
})
tm.assert_equal(expected.A.dtype, 'int64')
tm.assert_equal(expected.B.dtype, 'float')
tm.assert_equal(expected.C.dtype, 'float')
df = self.read_csv(StringIO(data), sep='|', thousands=',', decimal='.')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|',
thousands=',', decimal='.')
tm.assert_frame_equal(df, expected)
data_with_odd_sep = """A|B|C
1|2.334,01|5
10|13|10,
"""
df = self.read_csv(StringIO(data_with_odd_sep),
sep='|', thousands='.', decimal=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data_with_odd_sep),
sep='|', thousands='.', decimal=',')
tm.assert_frame_equal(df, expected)
def test_separator_date_conflict(self):
# Regression test for issue #4678: make sure thousands separator and
# date parsing do not conflict.
data = '06-02-2013;13:00;1-000.215'
expected = DataFrame(
[[datetime(2013, 6, 2, 13, 0, 0), 1000.215]],
columns=['Date', 2]
)
df = self.read_csv(StringIO(data), sep=';', thousands='-',
parse_dates={'Date': [0, 1]}, header=None)
tm.assert_frame_equal(df, expected)
def test_squeeze(self):
data = """\
a,1
b,2
c,3
"""
idx = Index(['a', 'b', 'c'], name=0)
expected = Series([1, 2, 3], name=1, index=idx)
result = self.read_table(StringIO(data), sep=',', index_col=0,
header=None, squeeze=True)
tm.assertIsInstance(result, Series)
tm.assert_series_equal(result, expected)
def test_squeeze_no_view(self):
# GH 8217
# series should not be a view
data = """time,data\n0,10\n1,11\n2,12\n4,14\n5,15\n3,13"""
result = self.read_csv(StringIO(data), index_col='time', squeeze=True)
self.assertFalse(result._is_view)
def test_inf_parsing(self):
data = """\
,A
a,inf
b,-inf
c,Inf
d,-Inf
e,INF
f,-INF
g,INf
h,-INf
i,inF
j,-inF"""
inf = float('inf')
expected = Series([inf, -inf] * 5)
df = read_csv(StringIO(data), index_col=0)
tm.assert_almost_equal(df['A'].values, expected.values)
df = read_csv(StringIO(data), index_col=0, na_filter=False)
tm.assert_almost_equal(df['A'].values, expected.values)
def test_multiple_date_col(self):
# Can use multiple date parsers
data = """\
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
def func(*date_cols):
return lib.try_parse_dates(parsers._concat_date_cols(date_cols))
df = self.read_csv(StringIO(data), header=None,
date_parser=func,
prefix='X',
parse_dates={'nominal': [1, 2],
'actual': [1, 3]})
self.assertIn('nominal', df)
self.assertIn('actual', df)
self.assertNotIn('X1', df)
self.assertNotIn('X2', df)
self.assertNotIn('X3', df)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.ix[0, 'nominal'], d)
df = self.read_csv(StringIO(data), header=None,
date_parser=func,
parse_dates={'nominal': [1, 2],
'actual': [1, 3]},
keep_date_col=True)
self.assertIn('nominal', df)
self.assertIn('actual', df)
self.assertIn(1, df)
self.assertIn(2, df)
self.assertIn(3, df)
data = """\
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
df = read_csv(StringIO(data), header=None,
prefix='X',
parse_dates=[[1, 2], [1, 3]])
self.assertIn('X1_X2', df)
self.assertIn('X1_X3', df)
self.assertNotIn('X1', df)
self.assertNotIn('X2', df)
self.assertNotIn('X3', df)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.ix[0, 'X1_X2'], d)
df = read_csv(StringIO(data), header=None,
parse_dates=[[1, 2], [1, 3]], keep_date_col=True)
self.assertIn('1_2', df)
self.assertIn('1_3', df)
self.assertIn(1, df)
self.assertIn(2, df)
self.assertIn(3, df)
data = '''\
KORD,19990127 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
'''
df = self.read_csv(StringIO(data), sep=',', header=None,
parse_dates=[1], index_col=1)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.index[0], d)
def test_multiple_date_cols_int_cast(self):
data = ("KORD,19990127, 19:00:00, 18:56:00, 0.8100\n"
"KORD,19990127, 20:00:00, 19:56:00, 0.0100\n"
"KORD,19990127, 21:00:00, 20:56:00, -0.5900\n"
"KORD,19990127, 21:00:00, 21:18:00, -0.9900\n"
"KORD,19990127, 22:00:00, 21:56:00, -0.5900\n"
"KORD,19990127, 23:00:00, 22:56:00, -0.5900")
date_spec = {'nominal': [1, 2], 'actual': [1, 3]}
import pandas.io.date_converters as conv
# it works!
df = self.read_csv(StringIO(data), header=None, parse_dates=date_spec,
date_parser=conv.parse_date_time)
self.assertIn('nominal', df)
def test_multiple_date_col_timestamp_parse(self):
data = """05/31/2012,15:30:00.029,1306.25,1,E,0,,1306.25
05/31/2012,15:30:00.029,1306.25,8,E,0,,1306.25"""
result = self.read_csv(StringIO(data), sep=',', header=None,
parse_dates=[[0, 1]], date_parser=Timestamp)
ex_val = Timestamp('05/31/2012 15:30:00.029')
self.assertEqual(result['0_1'][0], ex_val)
def test_single_line(self):
# GH 6607
# Test currently only valid with python engine because sep=None and
# delim_whitespace=False. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError,
'sep=None with delim_whitespace=False'):
# sniff separator
buf = StringIO()
sys.stdout = buf
# printing warning message when engine == 'c' for now
try:
# it works!
df = self.read_csv(StringIO('1,2'), names=['a', 'b'],
header=None, sep=None)
tm.assert_frame_equal(DataFrame({'a': [1], 'b': [2]}), df)
finally:
sys.stdout = sys.__stdout__
def test_multiple_date_cols_with_header(self):
data = """\
ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""
df = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]})
self.assertNotIsInstance(df.nominal[0], compat.string_types)
ts_data = """\
ID,date,nominalTime,actualTime,A,B,C,D,E
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
def test_multiple_date_col_name_collision(self):
self.assertRaises(ValueError, self.read_csv, StringIO(self.ts_data),
parse_dates={'ID': [1, 2]})
data = """\
date_NominalTime,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir
KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000""" # noqa
self.assertRaises(ValueError, self.read_csv, StringIO(data),
parse_dates=[[1, 2]])
def test_index_col_named(self):
no_header = """\
KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""
h = "ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir\n"
data = h + no_header
rs = self.read_csv(StringIO(data), index_col='ID')
xp = self.read_csv(StringIO(data), header=0).set_index('ID')
tm.assert_frame_equal(rs, xp)
self.assertRaises(ValueError, self.read_csv, StringIO(no_header),
index_col='ID')
data = """\
1,2,3,4,hello
5,6,7,8,world
9,10,11,12,foo
"""
names = ['a', 'b', 'c', 'd', 'message']
xp = DataFrame({'a': [1, 5, 9], 'b': [2, 6, 10], 'c': [3, 7, 11],
'd': [4, 8, 12]},
index=Index(['hello', 'world', 'foo'], name='message'))
rs = self.read_csv(StringIO(data), names=names, index_col=['message'])
tm.assert_frame_equal(xp, rs)
self.assertEqual(xp.index.name, rs.index.name)
rs = self.read_csv(StringIO(data), names=names, index_col='message')
tm.assert_frame_equal(xp, rs)
self.assertEqual(xp.index.name, rs.index.name)
def test_usecols_index_col_False(self):
# Issue 9082
s = "a,b,c,d\n1,2,3,4\n5,6,7,8"
s_malformed = "a,b,c,d\n1,2,3,4,\n5,6,7,8,"
cols = ['a', 'c', 'd']
expected = DataFrame({'a': [1, 5], 'c': [3, 7], 'd': [4, 8]})
df = self.read_csv(StringIO(s), usecols=cols, index_col=False)
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(s_malformed),
usecols=cols, index_col=False)
tm.assert_frame_equal(expected, df)
def test_index_col_is_True(self):
# Issue 9798
self.assertRaises(ValueError, self.read_csv, StringIO(self.ts_data),
index_col=True)
def test_converter_index_col_bug(self):
# 1835
data = "A;B\n1;2\n3;4"
rs = self.read_csv(StringIO(data), sep=';', index_col='A',
converters={'A': lambda x: x})
xp = DataFrame({'B': [2, 4]}, index=Index([1, 3], name='A'))
tm.assert_frame_equal(rs, xp)
self.assertEqual(rs.index.name, xp.index.name)
def test_date_parser_int_bug(self):
# #3071
log_file = StringIO(
'posix_timestamp,elapsed,sys,user,queries,query_time,rows,'
'accountid,userid,contactid,level,silo,method\n'
'1343103150,0.062353,0,4,6,0.01690,3,'
'12345,1,-1,3,invoice_InvoiceResource,search\n'
)
def f(posix_string):
return datetime.utcfromtimestamp(int(posix_string))
# it works!
read_csv(log_file, index_col=0, parse_dates=0, date_parser=f)
def test_multiple_skts_example(self):
data = "year, month, a, b\n 2001, 01, 0.0, 10.\n 2001, 02, 1.1, 11."
pass
def test_malformed(self):
# all
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
"""
try:
df = self.read_table(
StringIO(data), sep=',', header=1, comment='#')
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 4, saw 5', str(inst))
# skip_footer
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
footer
"""
# GH 6607
# Test currently only valid with python engine because
# skip_footer != 0. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
try:
with tm.assertRaisesRegexp(ValueError, 'skip_footer'): # XXX
df = self.read_table(
StringIO(data), sep=',', header=1, comment='#',
skip_footer=1)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 4, saw 5', str(inst))
# first chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#',
iterator=True, chunksize=1,
skiprows=[2])
df = it.read(5)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
# middle chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',', header=1,
comment='#', iterator=True, chunksize=1,
skiprows=[2])
df = it.read(1)
it.read(2)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
# last chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#',
iterator=True, chunksize=1, skiprows=[2])
df = it.read(1)
it.read()
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
def test_passing_dtype(self):
# GH 6607
# Passing dtype is currently only supported by the C engine.
# Temporarily copied to TestCParser*.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError,
"The 'dtype' option is not supported"):
df = DataFrame(np.random.rand(5, 2), columns=list(
'AB'), index=['1A', '1B', '1C', '1D', '1E'])
with tm.ensure_clean('__passing_str_as_dtype__.csv') as path:
df.to_csv(path)
# GH 3795
# passing 'str' as the dtype
result = self.read_csv(path, dtype=str, index_col=0)
tm.assert_series_equal(result.dtypes, Series(
{'A': 'object', 'B': 'object'}))
# we expect all object columns, so need to convert to test for
# equivalence
result = result.astype(float)
tm.assert_frame_equal(result, df)
# invalid dtype
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'foo', 'B': 'float64'},
index_col=0)
# valid but we don't support it (date)
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'datetime64', 'B': 'float64'},
index_col=0)
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'datetime64', 'B': 'float64'},
index_col=0, parse_dates=['B'])
# valid but we don't support it
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'timedelta64', 'B': 'float64'},
index_col=0)
with tm.assertRaisesRegexp(ValueError,
"The 'dtype' option is not supported"):
# empty frame
# GH12048
self.read_csv(StringIO('A,B'), dtype=str)
def test_quoting(self):
bad_line_small = """printer\tresult\tvariant_name
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jacob
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jakob
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\t"Furststiftische Hofdruckerei, <Kempten""
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tGaller, Alois
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tHochfurstliche Buchhandlung <Kempten>"""
self.assertRaises(Exception, self.read_table, StringIO(bad_line_small),
sep='\t')
good_line_small = bad_line_small + '"'
df = self.read_table(StringIO(good_line_small), sep='\t')
self.assertEqual(len(df), 3)
def test_non_string_na_values(self):
# GH3611, na_values that are not a string are an issue
with tm.ensure_clean('__non_string_na_values__.csv') as path:
df = DataFrame({'A': [-999, 2, 3], 'B': [1.2, -999, 4.5]})
df.to_csv(path, sep=' ', index=False)
result1 = read_csv(path, sep=' ', header=0,
na_values=['-999.0', '-999'])
result2 = read_csv(path, sep=' ', header=0,
na_values=[-999, -999.0])
result3 = read_csv(path, sep=' ', header=0,
na_values=[-999.0, -999])
tm.assert_frame_equal(result1, result2)
tm.assert_frame_equal(result2, result3)
result4 = read_csv(path, sep=' ', header=0, na_values=['-999.0'])
result5 = read_csv(path, sep=' ', header=0, na_values=['-999'])
result6 = read_csv(path, sep=' ', header=0, na_values=[-999.0])
result7 = read_csv(path, sep=' ', header=0, na_values=[-999])
tm.assert_frame_equal(result4, result3)
tm.assert_frame_equal(result5, result3)
tm.assert_frame_equal(result6, result3)
tm.assert_frame_equal(result7, result3)
good_compare = result3
# with an odd float format, so we can't match the string 999.0
# exactly, but need float matching
df.to_csv(path, sep=' ', index=False, float_format='%.3f')
result1 = read_csv(path, sep=' ', header=0,
na_values=['-999.0', '-999'])
result2 = read_csv(path, sep=' ', header=0,
na_values=[-999, -999.0])
result3 = read_csv(path, sep=' ', header=0,
na_values=[-999.0, -999])
tm.assert_frame_equal(result1, good_compare)
tm.assert_frame_equal(result2, good_compare)
tm.assert_frame_equal(result3, good_compare)
result4 = read_csv(path, sep=' ', header=0, na_values=['-999.0'])
result5 = read_csv(path, sep=' ', header=0, na_values=['-999'])
result6 = read_csv(path, sep=' ', header=0, na_values=[-999.0])
result7 = read_csv(path, sep=' ', header=0, na_values=[-999])
tm.assert_frame_equal(result4, good_compare)
tm.assert_frame_equal(result5, good_compare)
tm.assert_frame_equal(result6, good_compare)
tm.assert_frame_equal(result7, good_compare)
def test_default_na_values(self):
_NA_VALUES = set(['-1.#IND', '1.#QNAN', '1.#IND', '-1.#QNAN',
'#N/A', 'N/A', 'NA', '#NA', 'NULL', 'NaN',
'nan', '-NaN', '-nan', '#N/A N/A', ''])
self.assertEqual(_NA_VALUES, parsers._NA_VALUES)
nv = len(_NA_VALUES)
def f(i, v):
if i == 0:
buf = ''
elif i > 0:
buf = ''.join([','] * i)
buf = "{0}{1}".format(buf, v)
if i < nv - 1:
buf = "{0}{1}".format(buf, ''.join([','] * (nv - i - 1)))
return buf
data = StringIO('\n'.join([f(i, v) for i, v in enumerate(_NA_VALUES)]))
expected = DataFrame(np.nan, columns=range(nv), index=range(nv))
df = self.read_csv(data, header=None)
tm.assert_frame_equal(df, expected)
def test_custom_na_values(self):
data = """A,B,C
ignore,this,row
1,NA,3
-1.#IND,5,baz
7,8,NaN
"""
expected = [[1., nan, 3],
[nan, 5, nan],
[7, 8, nan]]
df = self.read_csv(StringIO(data), na_values=['baz'], skiprows=[1])
tm.assert_almost_equal(df.values, expected)
df2 = self.read_table(StringIO(data), sep=',', na_values=['baz'],
skiprows=[1])
tm.assert_almost_equal(df2.values, expected)
df3 = self.read_table(StringIO(data), sep=',', na_values='baz',
skiprows=[1])
tm.assert_almost_equal(df3.values, expected)
def test_nat_parse(self):
# GH 3062
df = DataFrame(dict({
'A': np.asarray(lrange(10), dtype='float64'),
'B': pd.Timestamp('20010101')}))
df.iloc[3:6, :] = np.nan
with tm.ensure_clean('__nat_parse_.csv') as path:
df.to_csv(path)
result = read_csv(path, index_col=0, parse_dates=['B'])
tm.assert_frame_equal(result, df)
expected = Series(dict(A='float64', B='datetime64[ns]'))
tm.assert_series_equal(expected, result.dtypes)
# test with NaT for the nan_rep
# we don't have a method to specif the Datetime na_rep (it defaults
# to '')
df.to_csv(path)
result = read_csv(path, index_col=0, parse_dates=['B'])
tm.assert_frame_equal(result, df)
def test_skiprows_bug(self):
# GH #505
text = """#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
1/1/2000,1.,2.,3.
1/2/2000,4,5,6
1/3/2000,7,8,9
"""
data = self.read_csv(StringIO(text), skiprows=lrange(6), header=None,
index_col=0, parse_dates=True)
data2 = self.read_csv(StringIO(text), skiprows=6, header=None,
index_col=0, parse_dates=True)
expected = DataFrame(np.arange(1., 10.).reshape((3, 3)),
columns=[1, 2, 3],
index=[datetime(2000, 1, 1), datetime(2000, 1, 2),
datetime(2000, 1, 3)])
expected.index.name = 0
tm.assert_frame_equal(data, expected)
tm.assert_frame_equal(data, data2)
def test_deep_skiprows(self):
# GH #4382
text = "a,b,c\n" + \
"\n".join([",".join([str(i), str(i + 1), str(i + 2)])
for i in range(10)])
condensed_text = "a,b,c\n" + \
"\n".join([",".join([str(i), str(i + 1), str(i + 2)])
for i in [0, 1, 2, 3, 4, 6, 8, 9]])
data = self.read_csv(StringIO(text), skiprows=[6, 8])
condensed_data = self.read_csv(StringIO(condensed_text))
tm.assert_frame_equal(data, condensed_data)
def test_skiprows_blank(self):
# GH 9832
text = """#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
1/1/2000,1.,2.,3.
1/2/2000,4,5,6
1/3/2000,7,8,9
"""
data = self.read_csv(StringIO(text), skiprows=6, header=None,
index_col=0, parse_dates=True)
expected = DataFrame(np.arange(1., 10.).reshape((3, 3)),
columns=[1, 2, 3],
index=[datetime(2000, 1, 1), datetime(2000, 1, 2),
datetime(2000, 1, 3)])
expected.index.name = 0
tm.assert_frame_equal(data, expected)
def test_detect_string_na(self):
data = """A,B
foo,bar
NA,baz
NaN,nan
"""
expected = [['foo', 'bar'],
[nan, 'baz'],
[nan, nan]]
df = self.read_csv(StringIO(data))
tm.assert_almost_equal(df.values, expected)
def test_unnamed_columns(self):
data = """A,B,C,,
1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
expected = [[1, 2, 3, 4, 5.],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]]
df = self.read_table(StringIO(data), sep=',')
tm.assert_almost_equal(df.values, expected)
self.assert_numpy_array_equal(df.columns,
['A', 'B', 'C', 'Unnamed: 3',
'Unnamed: 4'])
def test_string_nas(self):
data = """A,B,C
a,b,c
d,,f
,g,h
"""
result = self.read_csv(StringIO(data))
expected = DataFrame([['a', 'b', 'c'],
['d', np.nan, 'f'],
[np.nan, 'g', 'h']],
columns=['A', 'B', 'C'])
tm.assert_frame_equal(result, expected)
def test_duplicate_columns(self):
for engine in ['python', 'c']:
data = """A,A,B,B,B
1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
# check default beahviour
df = self.read_table(StringIO(data), sep=',', engine=engine)
self.assertEqual(list(df.columns), ['A', 'A.1', 'B', 'B.1', 'B.2'])
df = self.read_table(StringIO(data), sep=',',
engine=engine, mangle_dupe_cols=False)
self.assertEqual(list(df.columns), ['A', 'A', 'B', 'B', 'B'])
df = self.read_table(StringIO(data), sep=',',
engine=engine, mangle_dupe_cols=True)
self.assertEqual(list(df.columns), ['A', 'A.1', 'B', 'B.1', 'B.2'])
def test_csv_mixed_type(self):
data = """A,B,C
a,1,2
b,3,4
c,4,5
"""
df = self.read_csv(StringIO(data))
# TODO
def test_csv_custom_parser(self):
data = """A,B,C
20090101,a,1,2
20090102,b,3,4
20090103,c,4,5
"""
f = lambda x: datetime.strptime(x, '%Y%m%d')
df = self.read_csv(StringIO(data), date_parser=f)
expected = self.read_csv(StringIO(data), parse_dates=True)
tm.assert_frame_equal(df, expected)
def test_parse_dates_implicit_first_col(self):
data = """A,B,C
20090101,a,1,2
20090102,b,3,4
20090103,c,4,5
"""
df = self.read_csv(StringIO(data), parse_dates=True)
expected = self.read_csv(StringIO(data), index_col=0, parse_dates=True)
self.assertIsInstance(
df.index[0], (datetime, np.datetime64, Timestamp))
tm.assert_frame_equal(df, expected)
def test_parse_dates_string(self):
data = """date,A,B,C
20090101,a,1,2
20090102,b,3,4
20090103,c,4,5
"""
rs = self.read_csv(
StringIO(data), index_col='date', parse_dates='date')
idx = date_range('1/1/2009', periods=3)
idx.name = 'date'
xp = DataFrame({'A': ['a', 'b', 'c'],
'B': [1, 3, 4],
'C': [2, 4, 5]}, idx)
tm.assert_frame_equal(rs, xp)
def test_yy_format(self):
data = """date,time,B,C
090131,0010,1,2
090228,1020,3,4
090331,0830,5,6
"""
rs = self.read_csv(StringIO(data), index_col=0,
parse_dates=[['date', 'time']])
idx = DatetimeIndex([datetime(2009, 1, 31, 0, 10, 0),
datetime(2009, 2, 28, 10, 20, 0),
datetime(2009, 3, 31, 8, 30, 0)],
dtype=object, name='date_time')
xp = DataFrame({'B': [1, 3, 5], 'C': [2, 4, 6]}, idx)
tm.assert_frame_equal(rs, xp)
rs = self.read_csv(StringIO(data), index_col=0,
parse_dates=[[0, 1]])
idx = DatetimeIndex([datetime(2009, 1, 31, 0, 10, 0),
datetime(2009, 2, 28, 10, 20, 0),
datetime(2009, 3, 31, 8, 30, 0)],
dtype=object, name='date_time')
xp = DataFrame({'B': [1, 3, 5], 'C': [2, 4, 6]}, idx)
tm.assert_frame_equal(rs, xp)
def test_parse_dates_column_list(self):
from pandas.core.datetools import to_datetime
data = '''date;destination;ventilationcode;unitcode;units;aux_date
01/01/2010;P;P;50;1;12/1/2011
01/01/2010;P;R;50;1;13/1/2011
15/01/2010;P;P;50;1;14/1/2011
01/05/2010;P;P;50;1;15/1/2011'''
expected = self.read_csv(StringIO(data), sep=";", index_col=lrange(4))
lev = expected.index.levels[0]
levels = list(expected.index.levels)
levels[0] = lev.to_datetime(dayfirst=True)
# hack to get this to work - remove for final test
levels[0].name = lev.name
expected.index.set_levels(levels, inplace=True)
expected['aux_date'] = to_datetime(expected['aux_date'],
dayfirst=True)
expected['aux_date'] = lmap(Timestamp, expected['aux_date'])
tm.assertIsInstance(expected['aux_date'][0], datetime)
df = self.read_csv(StringIO(data), sep=";", index_col=lrange(4),
parse_dates=[0, 5], dayfirst=True)
tm.assert_frame_equal(df, expected)
df = self.read_csv(StringIO(data), sep=";", index_col=lrange(4),
parse_dates=['date', 'aux_date'], dayfirst=True)
tm.assert_frame_equal(df, expected)
def test_no_header(self):
data = """1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
df = self.read_table(StringIO(data), sep=',', header=None)
df_pref = self.read_table(StringIO(data), sep=',', prefix='X',
header=None)
names = ['foo', 'bar', 'baz', 'quux', 'panda']
df2 = self.read_table(StringIO(data), sep=',', names=names)
expected = [[1, 2, 3, 4, 5.],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]]
tm.assert_almost_equal(df.values, expected)
tm.assert_almost_equal(df.values, df2.values)
self.assert_numpy_array_equal(df_pref.columns,
['X0', 'X1', 'X2', 'X3', 'X4'])
self.assert_numpy_array_equal(df.columns, lrange(5))
self.assert_numpy_array_equal(df2.columns, names)
def test_no_header_prefix(self):
data = """1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
df_pref = self.read_table(StringIO(data), sep=',', prefix='Field',
header=None)
expected = [[1, 2, 3, 4, 5.],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]]
tm.assert_almost_equal(df_pref.values, expected)
self.assert_numpy_array_equal(df_pref.columns,
['Field0', 'Field1', 'Field2', 'Field3', 'Field4'])
def test_header_with_index_col(self):
data = """foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
names = ['A', 'B', 'C']
df = self.read_csv(StringIO(data), names=names)
self.assertEqual(names, ['A', 'B', 'C'])
values = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
expected = DataFrame(values, index=['foo', 'bar', 'baz'],
columns=['A', 'B', 'C'])
tm.assert_frame_equal(df, expected)
def test_read_csv_dataframe(self):
df = self.read_csv(self.csv1, index_col=0, parse_dates=True)
df2 = self.read_table(self.csv1, sep=',', index_col=0,
parse_dates=True)
self.assert_numpy_array_equal(df.columns, ['A', 'B', 'C', 'D'])
self.assertEqual(df.index.name, 'index')
self.assertIsInstance(
df.index[0], (datetime, np.datetime64, Timestamp))
self.assertEqual(df.values.dtype, np.float64)
tm.assert_frame_equal(df, df2)
def test_read_csv_no_index_name(self):
df = self.read_csv(self.csv2, index_col=0, parse_dates=True)
df2 = self.read_table(self.csv2, sep=',', index_col=0,
parse_dates=True)
self.assert_numpy_array_equal(df.columns, ['A', 'B', 'C', 'D', 'E'])
self.assertIsInstance(
df.index[0], (datetime, np.datetime64, Timestamp))
self.assertEqual(df.ix[:, ['A', 'B', 'C', 'D']
].values.dtype, np.float64)
tm.assert_frame_equal(df, df2)
def test_read_csv_infer_compression(self):
# GH 9770
expected = self.read_csv(self.csv1, index_col=0, parse_dates=True)
inputs = [self.csv1, self.csv1 + '.gz',
self.csv1 + '.bz2', open(self.csv1)]
for f in inputs:
df = self.read_csv(f, index_col=0, parse_dates=True,
compression='infer')
tm.assert_frame_equal(expected, df)
inputs[3].close()
def test_read_table_unicode(self):
fin = BytesIO(u('\u0141aski, Jan;1').encode('utf-8'))
df1 = read_table(fin, sep=";", encoding="utf-8", header=None)
tm.assertIsInstance(df1[0].values[0], compat.text_type)
def test_read_table_wrong_num_columns(self):
# too few!
data = """A,B,C,D,E,F
1,2,3,4,5,6
6,7,8,9,10,11,12
11,12,13,14,15,16
"""
self.assertRaises(Exception, self.read_csv, StringIO(data))
def test_read_table_duplicate_index(self):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
result = self.read_csv(StringIO(data), index_col=0)
expected = self.read_csv(StringIO(data)).set_index('index',
verify_integrity=False)
tm.assert_frame_equal(result, expected)
def test_read_table_duplicate_index_implicit(self):
data = """A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
# it works!
result = self.read_csv(StringIO(data))
def test_parse_bools(self):
data = """A,B
True,1
False,2
True,3
"""
data = self.read_csv(StringIO(data))
self.assertEqual(data['A'].dtype, np.bool_)
data = """A,B
YES,1
no,2
yes,3
No,3
Yes,3
"""
data = self.read_csv(StringIO(data),
true_values=['yes', 'Yes', 'YES'],
false_values=['no', 'NO', 'No'])
self.assertEqual(data['A'].dtype, np.bool_)
data = """A,B
TRUE,1
FALSE,2
TRUE,3
"""
data = self.read_csv(StringIO(data))
self.assertEqual(data['A'].dtype, np.bool_)
data = """A,B
foo,bar
bar,foo"""
result = self.read_csv(StringIO(data), true_values=['foo'],
false_values=['bar'])
expected = DataFrame({'A': [True, False], 'B': [False, True]})
tm.assert_frame_equal(result, expected)
def test_int_conversion(self):
data = """A,B
1.0,1
2.0,2
3.0,3
"""
data = self.read_csv(StringIO(data))
self.assertEqual(data['A'].dtype, np.float64)
self.assertEqual(data['B'].dtype, np.int64)
def test_infer_index_col(self):
data = """A,B,C
foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
data = self.read_csv(StringIO(data))
self.assertTrue(data.index.equals(Index(['foo', 'bar', 'baz'])))
def test_read_nrows(self):
df = self.read_csv(StringIO(self.data1), nrows=3)
expected = self.read_csv(StringIO(self.data1))[:3]
tm.assert_frame_equal(df, expected)
def test_read_chunksize(self):
reader = self.read_csv(StringIO(self.data1), index_col=0, chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunks = list(reader)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
def test_read_chunksize_named(self):
reader = self.read_csv(
StringIO(self.data1), index_col='index', chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col='index')
chunks = list(reader)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
def test_get_chunk_passed_chunksize(self):
data = """A,B,C
1,2,3
4,5,6
7,8,9
1,2,3"""
result = self.read_csv(StringIO(data), chunksize=2)
piece = result.get_chunk()
self.assertEqual(len(piece), 2)
def test_read_text_list(self):
data = """A,B,C\nfoo,1,2,3\nbar,4,5,6"""
as_list = [['A', 'B', 'C'], ['foo', '1', '2', '3'], ['bar',
'4', '5', '6']]
df = self.read_csv(StringIO(data), index_col=0)
parser = TextParser(as_list, index_col=0, chunksize=2)
chunk = parser.read(None)
tm.assert_frame_equal(chunk, df)
def test_iterator(self):
# GH 6607
# Test currently only valid with python engine because
# skip_footer != 0. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError, 'skip_footer'):
reader = self.read_csv(StringIO(self.data1), index_col=0,
iterator=True)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunk = reader.read(3)
tm.assert_frame_equal(chunk, df[:3])
last_chunk = reader.read(5)
tm.assert_frame_equal(last_chunk, df[3:])
# pass list
lines = list(csv.reader(StringIO(self.data1)))
parser = TextParser(lines, index_col=0, chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunks = list(parser)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
# pass skiprows
parser = TextParser(lines, index_col=0, chunksize=2, skiprows=[1])
chunks = list(parser)
tm.assert_frame_equal(chunks[0], df[1:3])
# test bad parameter (skip_footer)
reader = self.read_csv(StringIO(self.data1), index_col=0,
iterator=True, skip_footer=True)
self.assertRaises(ValueError, reader.read, 3)
treader = self.read_table(StringIO(self.data1), sep=',', index_col=0,
iterator=True)
tm.assertIsInstance(treader, TextFileReader)
# stopping iteration when on chunksize is specified, GH 3967
data = """A,B,C
foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
reader = self.read_csv(StringIO(data), iterator=True)
result = list(reader)
expected = DataFrame(dict(A=[1, 4, 7], B=[2, 5, 8], C=[
3, 6, 9]), index=['foo', 'bar', 'baz'])
tm.assert_frame_equal(result[0], expected)
# chunksize = 1
reader = self.read_csv(StringIO(data), chunksize=1)
result = list(reader)
expected = DataFrame(dict(A=[1, 4, 7], B=[2, 5, 8], C=[
3, 6, 9]), index=['foo', 'bar', 'baz'])
self.assertEqual(len(result), 3)
tm.assert_frame_equal(pd.concat(result), expected)
def test_header_not_first_line(self):
data = """got,to,ignore,this,line
got,to,ignore,this,line
index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
"""
data2 = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
"""
df = self.read_csv(StringIO(data), header=2, index_col=0)
expected = self.read_csv(StringIO(data2), header=0, index_col=0)
tm.assert_frame_equal(df, expected)
def test_header_multi_index(self):
expected = tm.makeCustomDataframe(
5, 3, r_idx_nlevels=2, c_idx_nlevels=4)
data = """\
C0,,C_l0_g0,C_l0_g1,C_l0_g2
C1,,C_l1_g0,C_l1_g1,C_l1_g2
C2,,C_l2_g0,C_l2_g1,C_l2_g2
C3,,C_l3_g0,C_l3_g1,C_l3_g2
R0,R1,,,
R_l0_g0,R_l1_g0,R0C0,R0C1,R0C2
R_l0_g1,R_l1_g1,R1C0,R1C1,R1C2
R_l0_g2,R_l1_g2,R2C0,R2C1,R2C2
R_l0_g3,R_l1_g3,R3C0,R3C1,R3C2
R_l0_g4,R_l1_g4,R4C0,R4C1,R4C2
"""
df = self.read_csv(StringIO(data), header=[0, 1, 2, 3], index_col=[
0, 1], tupleize_cols=False)
tm.assert_frame_equal(df, expected)
# skipping lines in the header
df = self.read_csv(StringIO(data), header=[0, 1, 2, 3], index_col=[
0, 1], tupleize_cols=False)
tm.assert_frame_equal(df, expected)
#### invalid options ####
# no as_recarray
self.assertRaises(ValueError, self.read_csv, StringIO(data), header=[0, 1, 2, 3],
index_col=[0, 1], as_recarray=True, tupleize_cols=False)
# names
self.assertRaises(ValueError, self.read_csv, StringIO(data), header=[0, 1, 2, 3],
index_col=[0, 1], names=['foo', 'bar'], tupleize_cols=False)
# usecols
self.assertRaises(ValueError, self.read_csv, StringIO(data), header=[0, 1, 2, 3],
index_col=[0, 1], usecols=['foo', 'bar'], tupleize_cols=False)
# non-numeric index_col
self.assertRaises(ValueError, self.read_csv, StringIO(data), header=[0, 1, 2, 3],
index_col=['foo', 'bar'], tupleize_cols=False)
def test_header_multiindex_common_format(self):
df = DataFrame([[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]],
index=['one', 'two'],
columns=MultiIndex.from_tuples([('a', 'q'), ('a', 'r'), ('a', 's'),
('b', 't'), ('c', 'u'), ('c', 'v')]))
# to_csv
data = """,a,a,a,b,c,c
,q,r,s,t,u,v
,,,,,,
one,1,2,3,4,5,6
two,7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(df, result)
# common
data = """,a,a,a,b,c,c
,q,r,s,t,u,v
one,1,2,3,4,5,6
two,7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(df, result)
# common, no index_col
data = """a,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=None)
tm.assert_frame_equal(df.reset_index(drop=True), result)
# malformed case 1
expected = DataFrame(np.array([[2, 3, 4, 5, 6],
[8, 9, 10, 11, 12]], dtype='int64'),
index=Index([1, 7]),
columns=MultiIndex(levels=[[u('a'), u('b'), u('c')], [u('r'), u('s'), u('t'), u('u'), u('v')]],
labels=[[0, 0, 1, 2, 2], [
0, 1, 2, 3, 4]],
names=[u('a'), u('q')]))
data = """a,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(expected, result)
# malformed case 2
expected = DataFrame(np.array([[2, 3, 4, 5, 6],
[8, 9, 10, 11, 12]], dtype='int64'),
index=Index([1, 7]),
columns=MultiIndex(levels=[[u('a'), u('b'), u('c')], [u('r'), u('s'), u('t'), u('u'), u('v')]],
labels=[[0, 0, 1, 2, 2], [
0, 1, 2, 3, 4]],
names=[None, u('q')]))
data = """,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(expected, result)
# mi on columns and index (malformed)
expected = DataFrame(np.array([[3, 4, 5, 6],
[9, 10, 11, 12]], dtype='int64'),
index=MultiIndex(levels=[[1, 7], [2, 8]],
labels=[[0, 1], [0, 1]]),
columns=MultiIndex(levels=[[u('a'), u('b'), u('c')], [u('s'), u('t'), u('u'), u('v')]],
labels=[[0, 1, 2, 2],
[0, 1, 2, 3]],
names=[None, u('q')]))
data = """,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=[0, 1])
tm.assert_frame_equal(expected, result)
def test_pass_names_with_index(self):
lines = self.data1.split('\n')
no_header = '\n'.join(lines[1:])
# regular index
names = ['index', 'A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=0, names=names)
expected = self.read_csv(StringIO(self.data1), index_col=0)
tm.assert_frame_equal(df, expected)
# multi index
data = """index1,index2,A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
lines = data.split('\n')
no_header = '\n'.join(lines[1:])
names = ['index1', 'index2', 'A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=[0, 1],
names=names)
expected = self.read_csv(StringIO(data), index_col=[0, 1])
tm.assert_frame_equal(df, expected)
df = self.read_csv(StringIO(data), index_col=['index1', 'index2'])
tm.assert_frame_equal(df, expected)
def test_multi_index_no_level_names(self):
data = """index1,index2,A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
data2 = """A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
lines = data.split('\n')
no_header = '\n'.join(lines[1:])
names = ['A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=[0, 1],
header=None, names=names)
expected = self.read_csv(StringIO(data), index_col=[0, 1])
tm.assert_frame_equal(df, expected, check_names=False)
# 2 implicit first cols
df2 = self.read_csv(StringIO(data2))
tm.assert_frame_equal(df2, df)
# reverse order of index
df = self.read_csv(StringIO(no_header), index_col=[1, 0], names=names,
header=None)
expected = self.read_csv(StringIO(data), index_col=[1, 0])
tm.assert_frame_equal(df, expected, check_names=False)
def test_multi_index_parse_dates(self):
data = """index1,index2,A,B,C
20090101,one,a,1,2
20090101,two,b,3,4
20090101,three,c,4,5
20090102,one,a,1,2
20090102,two,b,3,4
20090102,three,c,4,5
20090103,one,a,1,2
20090103,two,b,3,4
20090103,three,c,4,5
"""
df = self.read_csv(StringIO(data), index_col=[0, 1], parse_dates=True)
self.assertIsInstance(df.index.levels[0][0],
(datetime, np.datetime64, Timestamp))
# specify columns out of order!
df2 = self.read_csv(StringIO(data), index_col=[1, 0], parse_dates=True)
self.assertIsInstance(df2.index.levels[1][0],
(datetime, np.datetime64, Timestamp))
def test_skip_footer(self):
# GH 6607
# Test currently only valid with python engine because
# skip_footer != 0. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError, 'skip_footer'):
data = """A,B,C
1,2,3
4,5,6
7,8,9
want to skip this
also also skip this
"""
result = self.read_csv(StringIO(data), skip_footer=2)
no_footer = '\n'.join(data.split('\n')[:-3])
expected = self.read_csv(StringIO(no_footer))
tm.assert_frame_equal(result, expected)
result = self.read_csv(StringIO(data), nrows=3)
tm.assert_frame_equal(result, expected)
# skipfooter alias
result = read_csv(StringIO(data), skipfooter=2)
no_footer = '\n'.join(data.split('\n')[:-3])
expected = read_csv(StringIO(no_footer))
tm.assert_frame_equal(result, expected)
def test_no_unnamed_index(self):
data = """ id c0 c1 c2
0 1 0 a b
1 2 0 c d
2 2 2 e f
"""
df = self.read_table(StringIO(data), sep=' ')
self.assertIsNone(df.index.name)
def test_converters(self):
data = """A,B,C,D
a,1,2,01/01/2009
b,3,4,01/02/2009
c,4,5,01/03/2009
"""
from pandas.compat import parse_date
result = self.read_csv(StringIO(data), converters={'D': parse_date})
result2 = self.read_csv(StringIO(data), converters={3: parse_date})
expected = self.read_csv(StringIO(data))
expected['D'] = expected['D'].map(parse_date)
tm.assertIsInstance(result['D'][0], (datetime, Timestamp))
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(result2, expected)
# produce integer
converter = lambda x: int(x.split('/')[2])
result = self.read_csv(StringIO(data), converters={'D': converter})
expected = self.read_csv(StringIO(data))
expected['D'] = expected['D'].map(converter)
tm.assert_frame_equal(result, expected)
def test_converters_no_implicit_conv(self):
# GH2184
data = """000102,1.2,A\n001245,2,B"""
f = lambda x: x.strip()
converter = {0: f}
df = self.read_csv(StringIO(data), header=None, converters=converter)
self.assertEqual(df[0].dtype, object)
def test_converters_euro_decimal_format(self):
data = """Id;Number1;Number2;Text1;Text2;Number3
1;1521,1541;187101,9543;ABC;poi;4,738797819
2;121,12;14897,76;DEF;uyt;0,377320872
3;878,158;108013,434;GHI;rez;2,735694704"""
f = lambda x: float(x.replace(",", "."))
converter = {'Number1': f, 'Number2': f, 'Number3': f}
df2 = self.read_csv(StringIO(data), sep=';', converters=converter)
self.assertEqual(df2['Number1'].dtype, float)
self.assertEqual(df2['Number2'].dtype, float)
self.assertEqual(df2['Number3'].dtype, float)
def test_converter_return_string_bug(self):
# GH #583
data = """Id;Number1;Number2;Text1;Text2;Number3
1;1521,1541;187101,9543;ABC;poi;4,738797819
2;121,12;14897,76;DEF;uyt;0,377320872
3;878,158;108013,434;GHI;rez;2,735694704"""
f = lambda x: float(x.replace(",", "."))
converter = {'Number1': f, 'Number2': f, 'Number3': f}
df2 = self.read_csv(StringIO(data), sep=';', converters=converter)
self.assertEqual(df2['Number1'].dtype, float)
def test_read_table_buglet_4x_multiindex(self):
# GH 6607
# Parsing multi-level index currently causes an error in the C parser.
# Temporarily copied to TestPythonParser.
# Here test that CParserError is raised:
with tm.assertRaises(pandas.parser.CParserError):
text = """ A B C D E
one two three four
a b 10.0032 5 -0.5109 -2.3358 -0.4645 0.05076 0.3640
a q 20 4 0.4473 1.4152 0.2834 1.00661 0.1744
x q 30 3 -0.6662 -0.5243 -0.3580 0.89145 2.5838"""
# it works!
df = self.read_table(StringIO(text), sep='\s+')
self.assertEqual(df.index.names, ('one', 'two', 'three', 'four'))
def test_line_comment(self):
data = """# empty
A,B,C
1,2.,4.#hello world
#ignore this line
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
df = self.read_csv(StringIO(data), comment='#')
tm.assert_almost_equal(df.values, expected)
def test_comment_skiprows(self):
data = """# empty
random line
# second empty line
1,2,3
A,B,C
1,2.,4.
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
# this should ignore the first four lines (including comments)
df = self.read_csv(StringIO(data), comment='#', skiprows=4)
tm.assert_almost_equal(df.values, expected)
def test_comment_header(self):
data = """# empty
# second empty line
1,2,3
A,B,C
1,2.,4.
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
# header should begin at the second non-comment line
df = self.read_csv(StringIO(data), comment='#', header=1)
tm.assert_almost_equal(df.values, expected)
def test_comment_skiprows_header(self):
data = """# empty
# second empty line
# third empty line
X,Y,Z
1,2,3
A,B,C
1,2.,4.
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
# skiprows should skip the first 4 lines (including comments), while
# header should start from the second non-commented line starting
# with line 5
df = self.read_csv(StringIO(data), comment='#', skiprows=4, header=1)
tm.assert_almost_equal(df.values, expected)
def test_read_csv_parse_simple_list(self):
text = """foo
bar baz
qux foo
foo
bar"""
df = read_csv(StringIO(text), header=None)
expected = DataFrame({0: ['foo', 'bar baz', 'qux foo',
'foo', 'bar']})
tm.assert_frame_equal(df, expected)
def test_parse_dates_custom_euroformat(self):
text = """foo,bar,baz
31/01/2010,1,2
01/02/2010,1,NA
02/02/2010,1,2
"""
parser = lambda d: parse_date(d, dayfirst=True)
df = self.read_csv(StringIO(text),
names=['time', 'Q', 'NTU'], header=0,
index_col=0, parse_dates=True,
date_parser=parser, na_values=['NA'])
exp_index = Index([datetime(2010, 1, 31), datetime(2010, 2, 1),
datetime(2010, 2, 2)], name='time')
expected = DataFrame({'Q': [1, 1, 1], 'NTU': [2, np.nan, 2]},
index=exp_index, columns=['Q', 'NTU'])
tm.assert_frame_equal(df, expected)
parser = lambda d: parse_date(d, day_first=True)
self.assertRaises(Exception, self.read_csv,
StringIO(text), skiprows=[0],
names=['time', 'Q', 'NTU'], index_col=0,
parse_dates=True, date_parser=parser,
na_values=['NA'])
def test_na_value_dict(self):
data = """A,B,C
foo,bar,NA
bar,foo,foo
foo,bar,NA
bar,foo,foo"""
df = self.read_csv(StringIO(data),
na_values={'A': ['foo'], 'B': ['bar']})
expected = DataFrame({'A': [np.nan, 'bar', np.nan, 'bar'],
'B': [np.nan, 'foo', np.nan, 'foo'],
'C': [np.nan, 'foo', np.nan, 'foo']})
tm.assert_frame_equal(df, expected)
data = """\
a,b,c,d
0,NA,1,5
"""
xp = DataFrame({'b': [np.nan], 'c': [1], 'd': [5]}, index=[0])
xp.index.name = 'a'
df = self.read_csv(StringIO(data), na_values={}, index_col=0)
tm.assert_frame_equal(df, xp)
xp = DataFrame({'b': [np.nan], 'd': [5]},
MultiIndex.from_tuples([(0, 1)]))
xp.index.names = ['a', 'c']
df = self.read_csv(StringIO(data), na_values={}, index_col=[0, 2])
tm.assert_frame_equal(df, xp)
xp = DataFrame({'b': [np.nan], 'd': [5]},
MultiIndex.from_tuples([(0, 1)]))
xp.index.names = ['a', 'c']
df = self.read_csv(StringIO(data), na_values={}, index_col=['a', 'c'])
tm.assert_frame_equal(df, xp)
@tm.network
def test_url(self):
# HTTP(S)
url = ('https://raw.github.com/pydata/pandas/master/'
'pandas/io/tests/data/salary.table')
url_table = self.read_table(url)
dirpath = tm.get_data_path()
localtable = os.path.join(dirpath, 'salary.table')
local_table = self.read_table(localtable)
tm.assert_frame_equal(url_table, local_table)
# TODO: ftp testing
@slow
def test_file(self):
# FILE
if sys.version_info[:2] < (2, 6):
raise nose.SkipTest("file:// not supported with Python < 2.6")
dirpath = tm.get_data_path()
localtable = os.path.join(dirpath, 'salary.table')
local_table = self.read_table(localtable)
try:
url_table = self.read_table('file://localhost/' + localtable)
except URLError:
# fails on some systems
raise nose.SkipTest("failing on %s" %
' '.join(platform.uname()).strip())
tm.assert_frame_equal(url_table, local_table)
def test_parse_tz_aware(self):
import pytz
# #1693
data = StringIO("Date,x\n2012-06-13T01:39:00Z,0.5")
# it works
result = read_csv(data, index_col=0, parse_dates=True)
stamp = result.index[0]
self.assertEqual(stamp.minute, 39)
try:
self.assertIs(result.index.tz, pytz.utc)
except AssertionError: # hello Yaroslav
arr = result.index.to_pydatetime()
result = tools.to_datetime(arr, utc=True)[0]
self.assertEqual(stamp.minute, result.minute)
self.assertEqual(stamp.hour, result.hour)
self.assertEqual(stamp.day, result.day)
def test_multiple_date_cols_index(self):
data = """\
ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir
KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""
xp = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]})
df = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]},
index_col='nominal')
tm.assert_frame_equal(xp.set_index('nominal'), df)
df2 = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]},
index_col=0)
tm.assert_frame_equal(df2, df)
df3 = self.read_csv(StringIO(data), parse_dates=[[1, 2]], index_col=0)
tm.assert_frame_equal(df3, df, check_names=False)
def test_multiple_date_cols_chunked(self):
df = self.read_csv(StringIO(self.ts_data), parse_dates={
'nominal': [1, 2]}, index_col='nominal')
reader = self.read_csv(StringIO(self.ts_data), parse_dates={'nominal':
[1, 2]}, index_col='nominal', chunksize=2)
chunks = list(reader)
self.assertNotIn('nominalTime', df)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
def test_multiple_date_col_named_components(self):
xp = self.read_csv(StringIO(self.ts_data),
parse_dates={'nominal': [1, 2]},
index_col='nominal')
colspec = {'nominal': ['date', 'nominalTime']}
df = self.read_csv(StringIO(self.ts_data), parse_dates=colspec,
index_col='nominal')
tm.assert_frame_equal(df, xp)
def test_multiple_date_col_multiple_index(self):
df = self.read_csv(StringIO(self.ts_data),
parse_dates={'nominal': [1, 2]},
index_col=['nominal', 'ID'])
xp = self.read_csv(StringIO(self.ts_data),
parse_dates={'nominal': [1, 2]})
tm.assert_frame_equal(xp.set_index(['nominal', 'ID']), df)
def test_comment(self):
data = """A,B,C
1,2.,4.#hello world
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
df = self.read_csv(StringIO(data), comment='#')
tm.assert_almost_equal(df.values, expected)
df = self.read_table(StringIO(data), sep=',', comment='#',
na_values=['NaN'])
tm.assert_almost_equal(df.values, expected)
def test_bool_na_values(self):
data = """A,B,C
True,False,True
NA,True,False
False,NA,True"""
result = self.read_csv(StringIO(data))
expected = DataFrame({'A': np.array([True, nan, False], dtype=object),
'B': np.array([False, True, nan], dtype=object),
'C': [True, False, True]})
tm.assert_frame_equal(result, expected)
def test_nonexistent_path(self):
# don't segfault pls #2428
path = '%s.csv' % tm.rands(10)
self.assertRaises(Exception, self.read_csv, path)
def test_missing_trailing_delimiters(self):
data = """A,B,C,D
1,2,3,4
1,3,3,
1,4,5"""
result = self.read_csv(StringIO(data))
self.assertTrue(result['D'].isnull()[1:].all())
def test_skipinitialspace(self):
s = ('"09-Apr-2012", "01:10:18.300", 2456026.548822908, 12849, '
'1.00361, 1.12551, 330.65659, 0355626618.16711, 73.48821, '
'314.11625, 1917.09447, 179.71425, 80.000, 240.000, -350, '
'70.06056, 344.98370, 1, 1, -0.689265, -0.692787, '
'0.212036, 14.7674, 41.605, -9999.0, -9999.0, '
'-9999.0, -9999.0, -9999.0, -9999.0, 000, 012, 128')
sfile = StringIO(s)
# it's 33 columns
result = self.read_csv(sfile, names=lrange(33), na_values=['-9999.0'],
header=None, skipinitialspace=True)
self.assertTrue(pd.isnull(result.ix[0, 29]))
def test_utf16_bom_skiprows(self):
# #2298
data = u("""skip this
skip this too
A\tB\tC
1\t2\t3
4\t5\t6""")
data2 = u("""skip this
skip this too
A,B,C
1,2,3
4,5,6""")
path = '__%s__.csv' % tm.rands(10)
with tm.ensure_clean(path) as path:
for sep, dat in [('\t', data), (',', data2)]:
for enc in ['utf-16', 'utf-16le', 'utf-16be']:
bytes = dat.encode(enc)
with open(path, 'wb') as f:
f.write(bytes)
s = BytesIO(dat.encode('utf-8'))
if compat.PY3:
# somewhat False since the code never sees bytes
from io import TextIOWrapper
s = TextIOWrapper(s, encoding='utf-8')
result = self.read_csv(path, encoding=enc, skiprows=2,
sep=sep)
expected = self.read_csv(s, encoding='utf-8', skiprows=2,
sep=sep)
tm.assert_frame_equal(result, expected)
def test_utf16_example(self):
path = tm.get_data_path('utf16_ex.txt')
# it works! and is the right length
result = self.read_table(path, encoding='utf-16')
self.assertEqual(len(result), 50)
if not compat.PY3:
buf = BytesIO(open(path, 'rb').read())
result = self.read_table(buf, encoding='utf-16')
self.assertEqual(len(result), 50)
def test_converters_corner_with_nas(self):
# skip aberration observed on Win64 Python 3.2.2
if hash(np.int64(-1)) != -2:
raise nose.SkipTest("skipping because of windows hash on Python"
" 3.2.2")
csv = """id,score,days
1,2,12
2,2-5,
3,,14+
4,6-12,2"""
def convert_days(x):
x = x.strip()
if not x:
return np.nan
is_plus = x.endswith('+')
if is_plus:
x = int(x[:-1]) + 1
else:
x = int(x)
return x
def convert_days_sentinel(x):
x = x.strip()
if not x:
return np.nan
is_plus = x.endswith('+')
if is_plus:
x = int(x[:-1]) + 1
else:
x = int(x)
return x
def convert_score(x):
x = x.strip()
if not x:
return np.nan
if x.find('-') > 0:
valmin, valmax = lmap(int, x.split('-'))
val = 0.5 * (valmin + valmax)
else:
val = float(x)
return val
fh = StringIO(csv)
result = self.read_csv(fh, converters={'score': convert_score,
'days': convert_days},
na_values=['', None])
self.assertTrue(pd.isnull(result['days'][1]))
fh = StringIO(csv)
result2 = self.read_csv(fh, converters={'score': convert_score,
'days': convert_days_sentinel},
na_values=['', None])
tm.assert_frame_equal(result, result2)
def test_unicode_encoding(self):
pth = tm.get_data_path('unicode_series.csv')
result = self.read_csv(pth, header=None, encoding='latin-1')
result = result.set_index(0)
got = result[1][1632]
expected = u('\xc1 k\xf6ldum klaka (Cold Fever) (1994)')
self.assertEqual(got, expected)
def test_trailing_delimiters(self):
# #2442. grumble grumble
data = """A,B,C
1,2,3,
4,5,6,
7,8,9,"""
result = self.read_csv(StringIO(data), index_col=False)
expected = DataFrame({'A': [1, 4, 7], 'B': [2, 5, 8],
'C': [3, 6, 9]})
tm.assert_frame_equal(result, expected)
def test_escapechar(self):
# http://stackoverflow.com/questions/13824840/feature-request-for-
# pandas-read-csv
data = '''SEARCH_TERM,ACTUAL_URL
"bra tv bord","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"
"tv p\xc3\xa5 hjul","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"
"SLAGBORD, \\"Bergslagen\\", IKEA:s 1700-tals serie","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"'''
result = self.read_csv(StringIO(data), escapechar='\\',
quotechar='"', encoding='utf-8')
self.assertEqual(result['SEARCH_TERM'][2],
'SLAGBORD, "Bergslagen", IKEA:s 1700-tals serie')
self.assertTrue(np.array_equal(result.columns,
['SEARCH_TERM', 'ACTUAL_URL']))
def test_header_names_backward_compat(self):
# #2539
data = '1,2,3\n4,5,6'
result = self.read_csv(StringIO(data), names=['a', 'b', 'c'])
expected = self.read_csv(StringIO(data), names=['a', 'b', 'c'],
header=None)
tm.assert_frame_equal(result, expected)
data2 = 'foo,bar,baz\n' + data
result = self.read_csv(StringIO(data2), names=['a', 'b', 'c'],
header=0)
tm.assert_frame_equal(result, expected)
def test_int64_min_issues(self):
# #2599
data = 'A,B\n0,0\n0,'
result = self.read_csv(StringIO(data))
expected = DataFrame({'A': [0, 0], 'B': [0, np.nan]})
tm.assert_frame_equal(result, expected)
def test_parse_integers_above_fp_precision(self):
data = """Numbers
17007000002000191
17007000002000191
17007000002000191
17007000002000191
17007000002000192
17007000002000192
17007000002000192
17007000002000192
17007000002000192
17007000002000194"""
result = self.read_csv(StringIO(data))
expected = DataFrame({'Numbers': [17007000002000191,
17007000002000191,
17007000002000191,
17007000002000191,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000194]})
self.assertTrue(np.array_equal(result['Numbers'], expected['Numbers']))
def test_usecols_index_col_conflict(self):
# Issue 4201 Test that index_col as integer reflects usecols
data = """SecId,Time,Price,P2,P3
10000,2013-5-11,100,10,1
500,2013-5-12,101,11,1
"""
expected = DataFrame({'Price': [100, 101]}, index=[
datetime(2013, 5, 11), datetime(2013, 5, 12)])
expected.index.name = 'Time'
df = self.read_csv(StringIO(data), usecols=[
'Time', 'Price'], parse_dates=True, index_col=0)
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(data), usecols=[
'Time', 'Price'], parse_dates=True, index_col='Time')
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(data), usecols=[
1, 2], parse_dates=True, index_col='Time')
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(data), usecols=[
1, 2], parse_dates=True, index_col=0)
tm.assert_frame_equal(expected, df)
expected = DataFrame(
{'P3': [1, 1], 'Price': (100, 101), 'P2': (10, 11)})
expected = expected.set_index(['Price', 'P2'])
df = self.read_csv(StringIO(data), usecols=[
'Price', 'P2', 'P3'], parse_dates=True, index_col=['Price', 'P2'])
tm.assert_frame_equal(expected, df)
def test_chunks_have_consistent_numerical_type(self):
integers = [str(i) for i in range(499999)]
data = "a\n" + "\n".join(integers + ["1.0", "2.0"] + integers)
with tm.assert_produces_warning(False):
df = self.read_csv(StringIO(data))
# Assert that types were coerced.
self.assertTrue(type(df.a[0]) is np.float64)
self.assertEqual(df.a.dtype, np.float)
def test_warn_if_chunks_have_mismatched_type(self):
# See test in TestCParserLowMemory.
integers = [str(i) for i in range(499999)]
data = "a\n" + "\n".join(integers + ['a', 'b'] + integers)
with tm.assert_produces_warning(False):
df = self.read_csv(StringIO(data))
self.assertEqual(df.a.dtype, np.object)
def test_usecols(self):
data = """\
a,b,c
1,2,3
4,5,6
7,8,9
10,11,12"""
result = self.read_csv(StringIO(data), usecols=(1, 2))
result2 = self.read_csv(StringIO(data), usecols=('b', 'c'))
exp = self.read_csv(StringIO(data))
self.assertEqual(len(result.columns), 2)
self.assertTrue((result['b'] == exp['b']).all())
self.assertTrue((result['c'] == exp['c']).all())
tm.assert_frame_equal(result, result2)
result = self.read_csv(StringIO(data), usecols=[1, 2], header=0,
names=['foo', 'bar'])
expected = self.read_csv(StringIO(data), usecols=[1, 2])
expected.columns = ['foo', 'bar']
tm.assert_frame_equal(result, expected)
data = """\
1,2,3
4,5,6
7,8,9
10,11,12"""
result = self.read_csv(StringIO(data), names=['b', 'c'],
header=None, usecols=[1, 2])
expected = self.read_csv(StringIO(data), names=['a', 'b', 'c'],
header=None)
expected = expected[['b', 'c']]
tm.assert_frame_equal(result, expected)
result2 = self.read_csv(StringIO(data), names=['a', 'b', 'c'],
header=None, usecols=['b', 'c'])
tm.assert_frame_equal(result2, result)
# 5766
result = self.read_csv(StringIO(data), names=['a', 'b'],
header=None, usecols=[0, 1])
expected = self.read_csv(StringIO(data), names=['a', 'b', 'c'],
header=None)
expected = expected[['a', 'b']]
tm.assert_frame_equal(result, expected)
# length conflict, passed names and usecols disagree
self.assertRaises(ValueError, self.read_csv, StringIO(data),
names=['a', 'b'], usecols=[1], header=None)
def test_integer_overflow_bug(self):
# #2601
data = "65248E10 11\n55555E55 22\n"
result = self.read_csv(StringIO(data), header=None, sep=' ')
self.assertTrue(result[0].dtype == np.float64)
result = self.read_csv(StringIO(data), header=None, sep='\s+')
self.assertTrue(result[0].dtype == np.float64)
def test_catch_too_many_names(self):
# Issue 5156
data = """\
1,2,3
4,,6
7,8,9
10,11,12\n"""
tm.assertRaises(Exception, read_csv, StringIO(data),
header=0, names=['a', 'b', 'c', 'd'])
def test_ignore_leading_whitespace(self):
# GH 6607, GH 3374
data = ' a b c\n 1 2 3\n 4 5 6\n 7 8 9'
result = self.read_table(StringIO(data), sep='\s+')
expected = DataFrame({'a': [1, 4, 7], 'b': [2, 5, 8], 'c': [3, 6, 9]})
tm.assert_frame_equal(result, expected)
def test_nrows_and_chunksize_raises_notimplemented(self):
data = 'a b c'
self.assertRaises(NotImplementedError, self.read_csv, StringIO(data),
nrows=10, chunksize=5)
def test_single_char_leading_whitespace(self):
# GH 9710
data = """\
MyColumn
a
b
a
b\n"""
expected = DataFrame({'MyColumn': list('abab')})
result = self.read_csv(StringIO(data), skipinitialspace=True)
tm.assert_frame_equal(result, expected)
def test_chunk_begins_with_newline_whitespace(self):
# GH 10022
data = '\n hello\nworld\n'
result = self.read_csv(StringIO(data), header=None)
self.assertEqual(len(result), 2)
# GH 9735
chunk1 = 'a' * (1024 * 256 - 2) + '\na'
chunk2 = '\n a'
result = pd.read_csv(StringIO(chunk1 + chunk2), header=None)
expected = pd.DataFrame(['a' * (1024 * 256 - 2), 'a', ' a'])
tm.assert_frame_equal(result, expected)
def test_empty_with_index(self):
# GH 10184
data = 'x,y'
result = self.read_csv(StringIO(data), index_col=0)
expected = DataFrame([], columns=['y'], index=Index([], name='x'))
tm.assert_frame_equal(result, expected)
def test_emtpy_with_multiindex(self):
# GH 10467
data = 'x,y,z'
result = self.read_csv(StringIO(data), index_col=['x', 'y'])
expected = DataFrame([], columns=['z'],
index=MultiIndex.from_arrays([[]] * 2, names=['x', 'y']))
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_empty_with_reversed_multiindex(self):
data = 'x,y,z'
result = self.read_csv(StringIO(data), index_col=[1, 0])
expected = DataFrame([], columns=['z'],
index=MultiIndex.from_arrays([[]] * 2, names=['y', 'x']))
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_empty_index_col_scenarios(self):
data = 'x,y,z'
# None, no index
index_col, expected = None, DataFrame([], columns=list('xyz')),
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# False, no index
index_col, expected = False, DataFrame([], columns=list('xyz')),
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# int, first column
index_col, expected = 0, DataFrame(
[], columns=['y', 'z'], index=Index([], name='x'))
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# int, not first column
index_col, expected = 1, DataFrame(
[], columns=['x', 'z'], index=Index([], name='y'))
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# str, first column
index_col, expected = 'x', DataFrame(
[], columns=['y', 'z'], index=Index([], name='x'))
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# str, not the first column
index_col, expected = 'y', DataFrame(
[], columns=['x', 'z'], index=Index([], name='y'))
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# list of int
index_col, expected = [0, 1], DataFrame([], columns=['z'],
index=MultiIndex.from_arrays([[]] * 2, names=['x', 'y']))
tm.assert_frame_equal(self.read_csv(StringIO(data), index_col=index_col), expected,
check_index_type=False)
# list of str
index_col = ['x', 'y']
expected = DataFrame([], columns=['z'], index=MultiIndex.from_arrays(
[[]] * 2, names=['x', 'y']))
tm.assert_frame_equal(self.read_csv(StringIO(data), index_col=index_col), expected,
check_index_type=False)
# list of int, reversed sequence
index_col = [1, 0]
expected = DataFrame([], columns=['z'], index=MultiIndex.from_arrays(
[[]] * 2, names=['y', 'x']))
tm.assert_frame_equal(self.read_csv(StringIO(data), index_col=index_col), expected,
check_index_type=False)
# list of str, reversed sequence
index_col = ['y', 'x']
expected = DataFrame([], columns=['z'], index=MultiIndex.from_arrays(
[[]] * 2, names=['y', 'x']))
tm.assert_frame_equal(self.read_csv(StringIO(data), index_col=index_col), expected,
check_index_type=False)
def test_empty_with_index_col_false(self):
# GH 10413
data = 'x,y'
result = self.read_csv(StringIO(data), index_col=False)
expected = DataFrame([], columns=['x', 'y'])
tm.assert_frame_equal(result, expected)
def test_float_parser(self):
# GH 9565
data = '45e-1,4.5,45.,inf,-inf'
result = self.read_csv(StringIO(data), header=None)
expected = pd.DataFrame([[float(s) for s in data.split(',')]])
tm.assert_frame_equal(result, expected)
def test_int64_overflow(self):
data = """ID
00013007854817840016671868
00013007854817840016749251
00013007854817840016754630
00013007854817840016781876
00013007854817840017028824
00013007854817840017963235
00013007854817840018860166"""
result = self.read_csv(StringIO(data))
self.assertTrue(result['ID'].dtype == object)
self.assertRaises((OverflowError, pandas.parser.OverflowError),
self.read_csv, StringIO(data),
converters={'ID': np.int64})
# Just inside int64 range: parse as integer
i_max = np.iinfo(np.int64).max
i_min = np.iinfo(np.int64).min
for x in [i_max, i_min]:
result = pd.read_csv(StringIO(str(x)), header=None)
expected = pd.DataFrame([x])
tm.assert_frame_equal(result, expected)
# Just outside int64 range: parse as string
too_big = i_max + 1
too_small = i_min - 1
for x in [too_big, too_small]:
result = pd.read_csv(StringIO(str(x)), header=None)
expected = pd.DataFrame([str(x)])
tm.assert_frame_equal(result, expected)
def test_empty_with_nrows_chunksize(self):
# GH 9535
expected = pd.DataFrame([], columns=['foo', 'bar'])
result = self.read_csv(StringIO('foo,bar\n'), nrows=10)
tm.assert_frame_equal(result, expected)
result = next(iter(pd.read_csv(StringIO('foo,bar\n'), chunksize=10)))
tm.assert_frame_equal(result, expected)
result = pd.read_csv(StringIO('foo,bar\n'), nrows=10, as_recarray=True)
result = pd.DataFrame(result[2], columns=result[1], index=result[0])
tm.assert_frame_equal(pd.DataFrame.from_records(
result), expected, check_index_type=False)
result = next(
iter(pd.read_csv(StringIO('foo,bar\n'), chunksize=10, as_recarray=True)))
result = pd.DataFrame(result[2], columns=result[1], index=result[0])
tm.assert_frame_equal(pd.DataFrame.from_records(
result), expected, check_index_type=False)
def test_eof_states(self):
# GH 10728 and 10548
# With skip_blank_lines = True
expected = pd.DataFrame([[4, 5, 6]], columns=['a', 'b', 'c'])
# GH 10728
# WHITESPACE_LINE
data = 'a,b,c\n4,5,6\n '
result = self.read_csv(StringIO(data))
tm.assert_frame_equal(result, expected)
# GH 10548
# EAT_LINE_COMMENT
data = 'a,b,c\n4,5,6\n#comment'
result = self.read_csv(StringIO(data), comment='#')
tm.assert_frame_equal(result, expected)
# EAT_CRNL_NOP
data = 'a,b,c\n4,5,6\n\r'
result = self.read_csv(StringIO(data))
tm.assert_frame_equal(result, expected)
# EAT_COMMENT
data = 'a,b,c\n4,5,6#comment'
result = self.read_csv(StringIO(data), comment='#')
tm.assert_frame_equal(result, expected)
# SKIP_LINE
data = 'a,b,c\n4,5,6\nskipme'
result = self.read_csv(StringIO(data), skiprows=[2])
tm.assert_frame_equal(result, expected)
# With skip_blank_lines = False
# EAT_LINE_COMMENT
data = 'a,b,c\n4,5,6\n#comment'
result = self.read_csv(
StringIO(data), comment='#', skip_blank_lines=False)
expected = pd.DataFrame([[4, 5, 6]], columns=['a', 'b', 'c'])
tm.assert_frame_equal(result, expected)
# IN_FIELD
data = 'a,b,c\n4,5,6\n '
result = self.read_csv(StringIO(data), skip_blank_lines=False)
expected = pd.DataFrame(
[['4', 5, 6], [' ', None, None]], columns=['a', 'b', 'c'])
tm.assert_frame_equal(result, expected)
# EAT_CRNL
data = 'a,b,c\n4,5,6\n\r'
result = self.read_csv(StringIO(data), skip_blank_lines=False)
expected = pd.DataFrame(
[[4, 5, 6], [None, None, None]], columns=['a', 'b', 'c'])
tm.assert_frame_equal(result, expected)
# Should produce exceptions
# ESCAPED_CHAR
data = "a,b,c\n4,5,6\n\\"
self.assertRaises(Exception, self.read_csv,
StringIO(data), escapechar='\\')
# ESCAPE_IN_QUOTED_FIELD
data = 'a,b,c\n4,5,6\n"\\'
self.assertRaises(Exception, self.read_csv,
StringIO(data), escapechar='\\')
# IN_QUOTED_FIELD
data = 'a,b,c\n4,5,6\n"'
self.assertRaises(Exception, self.read_csv,
StringIO(data), escapechar='\\')
class TestPythonParser(ParserTests, tm.TestCase):
def test_negative_skipfooter_raises(self):
text = """#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
1/1/2000,1.,2.,3.
1/2/2000,4,5,6
1/3/2000,7,8,9
"""
with tm.assertRaisesRegexp(ValueError,
'skip footer cannot be negative'):
df = self.read_csv(StringIO(text), skipfooter=-1)
def read_csv(self, *args, **kwds):
kwds = kwds.copy()
kwds['engine'] = 'python'
return read_csv(*args, **kwds)
def read_table(self, *args, **kwds):
kwds = kwds.copy()
kwds['engine'] = 'python'
return read_table(*args, **kwds)
def test_sniff_delimiter(self):
text = """index|A|B|C
foo|1|2|3
bar|4|5|6
baz|7|8|9
"""
data = self.read_csv(StringIO(text), index_col=0, sep=None)
self.assertTrue(data.index.equals(Index(['foo', 'bar', 'baz'])))
data2 = self.read_csv(StringIO(text), index_col=0, delimiter='|')
tm.assert_frame_equal(data, data2)
text = """ignore this
ignore this too
index|A|B|C
foo|1|2|3
bar|4|5|6
baz|7|8|9
"""
data3 = self.read_csv(StringIO(text), index_col=0,
sep=None, skiprows=2)
tm.assert_frame_equal(data, data3)
text = u("""ignore this
ignore this too
index|A|B|C
foo|1|2|3
bar|4|5|6
baz|7|8|9
""").encode('utf-8')
s = BytesIO(text)
if compat.PY3:
# somewhat False since the code never sees bytes
from io import TextIOWrapper
s = TextIOWrapper(s, encoding='utf-8')
data4 = self.read_csv(s, index_col=0, sep=None, skiprows=2,
encoding='utf-8')
tm.assert_frame_equal(data, data4)
def test_regex_separator(self):
data = """ A B C D
a 1 2 3 4
b 1 2 3 4
c 1 2 3 4
"""
df = self.read_table(StringIO(data), sep='\s+')
expected = self.read_csv(StringIO(re.sub('[ ]+', ',', data)),
index_col=0)
self.assertIsNone(expected.index.name)
tm.assert_frame_equal(df, expected)
def test_1000_fwf(self):
data = """
1 2,334.0 5
10 13 10.
"""
expected = [[1, 2334., 5],
[10, 13, 10]]
df = read_fwf(StringIO(data), colspecs=[(0, 3), (3, 11), (12, 16)],
thousands=',')
tm.assert_almost_equal(df.values, expected)
def test_1000_sep_with_decimal(self):
data = """A|B|C
1|2,334.01|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334.01, 13],
'C': [5, 10.]
})
df = self.read_csv(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
def test_comment_fwf(self):
data = """
1 2. 4 #hello world
5 NaN 10.0
"""
expected = [[1, 2., 4],
[5, np.nan, 10.]]
df = read_fwf(StringIO(data), colspecs=[(0, 3), (4, 9), (9, 25)],
comment='#')
tm.assert_almost_equal(df.values, expected)
def test_fwf(self):
data_expected = """\
2011,58,360.242940,149.910199,11950.7
2011,59,444.953632,166.985655,11788.4
2011,60,364.136849,183.628767,11806.2
2011,61,413.836124,184.375703,11916.8
2011,62,502.953953,173.237159,12468.3
"""
expected = self.read_csv(StringIO(data_expected), header=None)
data1 = """\
201158 360.242940 149.910199 11950.7
201159 444.953632 166.985655 11788.4
201160 364.136849 183.628767 11806.2
201161 413.836124 184.375703 11916.8
201162 502.953953 173.237159 12468.3
"""
colspecs = [(0, 4), (4, 8), (8, 20), (21, 33), (34, 43)]
df = read_fwf(StringIO(data1), colspecs=colspecs, header=None)
tm.assert_frame_equal(df, expected)
data2 = """\
2011 58 360.242940 149.910199 11950.7
2011 59 444.953632 166.985655 11788.4
2011 60 364.136849 183.628767 11806.2
2011 61 413.836124 184.375703 11916.8
2011 62 502.953953 173.237159 12468.3
"""
df = read_fwf(StringIO(data2), widths=[5, 5, 13, 13, 7], header=None)
tm.assert_frame_equal(df, expected)
# From <NAME>: apparently some non-space filler characters can
# be seen, this is supported by specifying the 'delimiter' character:
# http://publib.boulder.ibm.com/infocenter/dmndhelp/v6r1mx/index.jsp?topic=/com.ibm.wbit.612.help.config.doc/topics/rfixwidth.html
data3 = """\
201158~~~~360.242940~~~149.910199~~~11950.7
201159~~~~444.953632~~~166.985655~~~11788.4
201160~~~~364.136849~~~183.628767~~~11806.2
201161~~~~413.836124~~~184.375703~~~11916.8
201162~~~~502.953953~~~173.237159~~~12468.3
"""
df = read_fwf(
StringIO(data3), colspecs=colspecs, delimiter='~', header=None)
tm.assert_frame_equal(df, expected)
with tm.assertRaisesRegexp(ValueError, "must specify only one of"):
read_fwf(StringIO(data3), colspecs=colspecs, widths=[6, 10, 10, 7])
with tm.assertRaisesRegexp(ValueError, "Must specify either"):
read_fwf(StringIO(data3), colspecs=None, widths=None)
def test_fwf_colspecs_is_list_or_tuple(self):
with tm.assertRaisesRegexp(TypeError,
'column specifications must be a list or '
'tuple.+'):
pd.io.parsers.FixedWidthReader(StringIO(self.data1),
{'a': 1}, ',', '#')
def test_fwf_colspecs_is_list_or_tuple_of_two_element_tuples(self):
with tm.assertRaisesRegexp(TypeError,
'Each column specification must be.+'):
read_fwf(StringIO(self.data1), [('a', 1)])
def test_fwf_colspecs_None(self):
# GH 7079
data = """\
123456
456789
"""
colspecs = [(0, 3), (3, None)]
result = read_fwf(StringIO(data), colspecs=colspecs, header=None)
expected = DataFrame([[123, 456], [456, 789]])
tm.assert_frame_equal(result, expected)
colspecs = [(None, 3), (3, 6)]
result = read_fwf(StringIO(data), colspecs=colspecs, header=None)
expected = DataFrame([[123, 456], [456, 789]])
tm.assert_frame_equal(result, expected)
colspecs = [(0, None), (3, None)]
result = read_fwf(StringIO(data), colspecs=colspecs, header=None)
expected = DataFrame([[123456, 456], [456789, 789]])
tm.assert_frame_equal(result, expected)
colspecs = [(None, None), (3, 6)]
result = read_fwf(StringIO(data), colspecs=colspecs, header=None)
expected = DataFrame([[123456, 456], [456789, 789]])
tm.assert_frame_equal(result, expected)
def test_fwf_regression(self):
# GH 3594
# turns out 'T060' is parsable as a datetime slice!
tzlist = [1, 10, 20, 30, 60, 80, 100]
ntz = len(tzlist)
tcolspecs = [16] + [8] * ntz
tcolnames = ['SST'] + ["T%03d" % z for z in tzlist[1:]]
data = """ 2009164202000 9.5403 9.4105 8.6571 7.8372 6.0612 5.8843 5.5192
2009164203000 9.5435 9.2010 8.6167 7.8176 6.0804 5.8728 5.4869
2009164204000 9.5873 9.1326 8.4694 7.5889 6.0422 5.8526 5.4657
2009164205000 9.5810 9.0896 8.4009 7.4652 6.0322 5.8189 5.4379
2009164210000 9.6034 9.0897 8.3822 7.4905 6.0908 5.7904 5.4039
"""
df = read_fwf(StringIO(data),
index_col=0,
header=None,
names=tcolnames,
widths=tcolspecs,
parse_dates=True,
date_parser=lambda s: datetime.strptime(s, '%Y%j%H%M%S'))
for c in df.columns:
res = df.loc[:, c]
self.assertTrue(len(res))
def test_fwf_for_uint8(self):
data = """1421302965.213420 PRI=3 PGN=0xef00 DST=0x17 SRC=0x28 04 154 00 00 00 00 00 127
1421302964.226776 PRI=6 PGN=0xf002 SRC=0x47 243 00 00 255 247 00 00 71"""
df = read_fwf(StringIO(data),
colspecs=[(0, 17), (25, 26), (33, 37),
(49, 51), (58, 62), (63, 1000)],
names=['time', 'pri', 'pgn', 'dst', 'src', 'data'],
converters={
'pgn': lambda x: int(x, 16),
'src': lambda x: int(x, 16),
'dst': lambda x: int(x, 16),
'data': lambda x: len(x.split(' '))})
expected = DataFrame([[1421302965.213420, 3, 61184, 23, 40, 8],
[1421302964.226776, 6, 61442, None, 71, 8]],
columns=["time", "pri", "pgn", "dst", "src", "data"])
expected["dst"] = expected["dst"].astype(object)
tm.assert_frame_equal(df, expected)
def test_fwf_compression(self):
try:
import gzip
import bz2
except ImportError:
raise nose.SkipTest("Need gzip and bz2 to run this test")
data = """1111111111
2222222222
3333333333""".strip()
widths = [5, 5]
names = ['one', 'two']
expected = read_fwf(StringIO(data), widths=widths, names=names)
if compat.PY3:
data = bytes(data, encoding='utf-8')
comps = [('gzip', gzip.GzipFile), ('bz2', bz2.BZ2File)]
for comp_name, compresser in comps:
with tm.ensure_clean() as path:
tmp = compresser(path, mode='wb')
tmp.write(data)
tmp.close()
result = read_fwf(path, widths=widths, names=names,
compression=comp_name)
tm.assert_frame_equal(result, expected)
def test_BytesIO_input(self):
if not compat.PY3:
raise nose.SkipTest(
"Bytes-related test - only needs to work on Python 3")
result = pd.read_fwf(BytesIO("שלום\nשלום".encode('utf8')), widths=[
2, 2], encoding='utf8')
expected = pd.DataFrame([["של", "ום"]], columns=["של", "ום"])
tm.assert_frame_equal(result, expected)
data = BytesIO("שלום::1234\n562::123".encode('cp1255'))
result = pd.read_table(data, sep="::", engine='python',
encoding='cp1255')
expected = pd.DataFrame([[562, 123]], columns=["שלום", "1234"])
tm.assert_frame_equal(result, expected)
def test_verbose_import(self):
text = """a,b,c,d
one,1,2,3
one,1,2,3
,1,2,3
one,1,2,3
,1,2,3
,1,2,3
one,1,2,3
two,1,2,3"""
buf = StringIO()
sys.stdout = buf
try:
# it works!
df = self.read_csv(StringIO(text), verbose=True)
self.assertEqual(
buf.getvalue(), 'Filled 3 NA values in column a\n')
finally:
sys.stdout = sys.__stdout__
buf = StringIO()
sys.stdout = buf
text = """a,b,c,d
one,1,2,3
two,1,2,3
three,1,2,3
four,1,2,3
five,1,2,3
,1,2,3
seven,1,2,3
eight,1,2,3"""
try:
# it works!
df = self.read_csv(StringIO(text), verbose=True, index_col=0)
self.assertEqual(
buf.getvalue(), 'Filled 1 NA values in column a\n')
finally:
sys.stdout = sys.__stdout__
def test_float_precision_specified(self):
# Should raise an error if float_precision (C parser option) is
# specified
with tm.assertRaisesRegexp(ValueError, "The 'float_precision' option "
"is not supported with the 'python' engine"):
self.read_csv(StringIO('a,b,c\n1,2,3'), float_precision='high')
def test_iteration_open_handle(self):
if PY3:
raise nose.SkipTest(
"won't work in Python 3 {0}".format(sys.version_info))
with tm.ensure_clean() as path:
with open(path, 'wb') as f:
f.write('AAA\nBBB\nCCC\nDDD\nEEE\nFFF\nGGG')
with open(path, 'rb') as f:
for line in f:
if 'CCC' in line:
break
try:
read_table(f, squeeze=True, header=None, engine='c')
except Exception:
pass
else:
raise ValueError('this should not happen')
result = read_table(f, squeeze=True, header=None,
engine='python')
expected = Series(['DDD', 'EEE', 'FFF', 'GGG'], name=0)
tm.assert_series_equal(result, expected)
def test_iterator(self):
# GH 6607
# This is a copy which should eventually be merged into ParserTests
# when the issue with the C parser is fixed
reader = self.read_csv(StringIO(self.data1), index_col=0,
iterator=True)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunk = reader.read(3)
tm.assert_frame_equal(chunk, df[:3])
last_chunk = reader.read(5)
tm.assert_frame_equal(last_chunk, df[3:])
# pass list
lines = list(csv.reader(StringIO(self.data1)))
parser = TextParser(lines, index_col=0, chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunks = list(parser)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
# pass skiprows
parser = TextParser(lines, index_col=0, chunksize=2, skiprows=[1])
chunks = list(parser)
tm.assert_frame_equal(chunks[0], df[1:3])
# test bad parameter (skip_footer)
reader = self.read_csv(StringIO(self.data1), index_col=0,
iterator=True, skip_footer=True)
self.assertRaises(ValueError, reader.read, 3)
treader = self.read_table(StringIO(self.data1), sep=',', index_col=0,
iterator=True)
tm.assertIsInstance(treader, TextFileReader)
# stopping iteration when on chunksize is specified, GH 3967
data = """A,B,C
foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
reader = self.read_csv(StringIO(data), iterator=True)
result = list(reader)
expected = DataFrame(dict(A=[1, 4, 7], B=[2, 5, 8], C=[
3, 6, 9]), index=['foo', 'bar', 'baz'])
tm.assert_frame_equal(result[0], expected)
# chunksize = 1
reader = self.read_csv(StringIO(data), chunksize=1)
result = list(reader)
expected = DataFrame(dict(A=[1, 4, 7], B=[2, 5, 8], C=[
3, 6, 9]), index=['foo', 'bar', 'baz'])
self.assertEqual(len(result), 3)
tm.assert_frame_equal(pd.concat(result), expected)
def test_single_line(self):
# GH 6607
# This is a copy which should eventually be merged into ParserTests
# when the issue with the C parser is fixed
# sniff separator
buf = StringIO()
sys.stdout = buf
# printing warning message when engine == 'c' for now
try:
# it works!
df = self.read_csv(StringIO('1,2'), names=['a', 'b'],
header=None, sep=None)
tm.assert_frame_equal(DataFrame({'a': [1], 'b': [2]}), df)
finally:
sys.stdout = sys.__stdout__
def test_malformed(self):
# GH 6607
# This is a copy which should eventually be merged into ParserTests
# when the issue with the C parser is fixed
# all
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
"""
try:
df = self.read_table(
StringIO(data), sep=',', header=1, comment='#')
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 4, saw 5', str(inst))
# skip_footer
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
footer
"""
try:
df = self.read_table(
StringIO(data), sep=',', header=1, comment='#',
skip_footer=1)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 4, saw 5', str(inst))
# first chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#', iterator=True, chunksize=1,
skiprows=[2])
df = it.read(5)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
# middle chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',', header=1,
comment='#', iterator=True, chunksize=1,
skiprows=[2])
df = it.read(1)
it.read(2)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
# last chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#', iterator=True, chunksize=1,
skiprows=[2])
df = it.read(1)
it.read()
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
def test_skip_footer(self):
# GH 6607
# This is a copy which should eventually be merged into ParserTests
# when the issue with the C parser is fixed
data = """A,B,C
1,2,3
4,5,6
7,8,9
want to skip this
also also skip this
"""
result = self.read_csv(StringIO(data), skip_footer=2)
no_footer = '\n'.join(data.split('\n')[:-3])
expected = self.read_csv(StringIO(no_footer))
tm.assert_frame_equal(result, expected)
result = self.read_csv(StringIO(data), nrows=3)
tm.assert_frame_equal(result, expected)
# skipfooter alias
result = self.read_csv(StringIO(data), skipfooter=2)
no_footer = '\n'.join(data.split('\n')[:-3])
expected = self.read_csv(StringIO(no_footer))
tm.assert_frame_equal(result, expected)
def test_decompression_regex_sep(self):
# GH 6607
# This is a copy which should eventually be moved to ParserTests
# when the issue with the C parser is fixed
try:
import gzip
import bz2
except ImportError:
raise nose.SkipTest('need gzip and bz2 to run')
data = open(self.csv1, 'rb').read()
data = data.replace(b',', b'::')
expected = self.read_csv(self.csv1)
with tm.ensure_clean() as path:
tmp = gzip.GzipFile(path, mode='wb')
tmp.write(data)
tmp.close()
result = self.read_csv(path, sep='::', compression='gzip')
tm.assert_frame_equal(result, expected)
with tm.ensure_clean() as path:
tmp = bz2.BZ2File(path, mode='wb')
tmp.write(data)
tmp.close()
result = self.read_csv(path, sep='::', compression='bz2')
tm.assert_frame_equal(result, expected)
self.assertRaises(ValueError, self.read_csv,
path, compression='bz3')
def test_read_table_buglet_4x_multiindex(self):
# GH 6607
# This is a copy which should eventually be merged into ParserTests
# when the issue with multi-level index is fixed in the C parser.
text = """ A B C D E
one two three four
a b 10.0032 5 -0.5109 -2.3358 -0.4645 0.05076 0.3640
a q 20 4 0.4473 1.4152 0.2834 1.00661 0.1744
x q 30 3 -0.6662 -0.5243 -0.3580 0.89145 2.5838"""
# it works!
df = self.read_table(StringIO(text), sep='\s+')
self.assertEqual(df.index.names, ('one', 'two', 'three', 'four'))
# GH 6893
data = ' A B C\na b c\n1 3 7 0 3 6\n3 1 4 1 5 9'
expected = DataFrame.from_records([(1, 3, 7, 0, 3, 6), (3, 1, 4, 1, 5, 9)],
columns=list('abcABC'), index=list('abc'))
actual = self.read_table(StringIO(data), sep='\s+')
tm.assert_frame_equal(actual, expected)
def test_line_comment(self):
data = """# empty
A,B,C
1,2.,4.#hello world
#ignore this line
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
df = self.read_csv(StringIO(data), comment='#')
tm.assert_almost_equal(df.values, expected)
def test_empty_lines(self):
data = """\
A,B,C
1,2.,4.
5.,NaN,10.0
-70,.4,1
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.],
[-70., .4, 1.]]
df = self.read_csv(StringIO(data))
tm.assert_almost_equal(df.values, expected)
df = self.read_csv(StringIO(data.replace(',', ' ')), sep='\s+')
tm.assert_almost_equal(df.values, expected)
expected = [[1., 2., 4.],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[5., np.nan, 10.],
[np.nan, np.nan, np.nan],
[-70., .4, 1.]]
df = self.read_csv(StringIO(data), skip_blank_lines=False)
tm.assert_almost_equal(list(df.values), list(expected))
def test_whitespace_lines(self):
data = """
\t \t\t
\t
A,B,C
\t 1,2.,4.
5.,NaN,10.0
"""
expected = [[1, 2., 4.],
[5., np.nan, 10.]]
df = self.read_csv(StringIO(data))
tm.assert_almost_equal(df.values, expected)
class TestFwfColspaceSniffing(tm.TestCase):
def test_full_file(self):
# File with all values
test = '''index A B C
2000-01-03T00:00:00 0.980268513777 3 foo
2000-01-04T00:00:00 1.04791624281 -4 bar
2000-01-05T00:00:00 0.498580885705 73 baz
2000-01-06T00:00:00 1.12020151869 1 foo
2000-01-07T00:00:00 0.487094399463 0 bar
2000-01-10T00:00:00 0.836648671666 2 baz
2000-01-11T00:00:00 0.157160753327 34 foo'''
colspecs = ((0, 19), (21, 35), (38, 40), (42, 45))
expected = read_fwf(StringIO(test), colspecs=colspecs)
tm.assert_frame_equal(expected, read_fwf(StringIO(test)))
def test_full_file_with_missing(self):
# File with missing values
test = '''index A B C
2000-01-03T00:00:00 0.980268513777 3 foo
2000-01-04T00:00:00 1.04791624281 -4 bar
0.498580885705 73 baz
2000-01-06T00:00:00 1.12020151869 1 foo
2000-01-07T00:00:00 0 bar
2000-01-10T00:00:00 0.836648671666 2 baz
34'''
colspecs = ((0, 19), (21, 35), (38, 40), (42, 45))
expected = read_fwf(StringIO(test), colspecs=colspecs)
tm.assert_frame_equal(expected, read_fwf(StringIO(test)))
def test_full_file_with_spaces(self):
# File with spaces in columns
test = '''
Account Name Balance CreditLimit AccountCreated
101 <NAME> 9315.45 10000.00 1/17/1998
312 <NAME> 90.00 1000.00 8/6/2003
868 <NAME> 0 17000.00 5/25/1985
761 Jada Pinkett-Smith 49654.87 100000.00 12/5/2006
317 <NAME> 789.65 5000.00 2/5/2007
'''.strip('\r\n')
colspecs = ((0, 7), (8, 28), (30, 38), (42, 53), (56, 70))
expected = read_fwf(StringIO(test), colspecs=colspecs)
tm.assert_frame_equal(expected, read_fwf(StringIO(test)))
def test_full_file_with_spaces_and_missing(self):
# File with spaces and missing values in columsn
test = '''
Account Name Balance CreditLimit AccountCreated
101 10000.00 1/17/1998
312 <NAME> 90.00 1000.00 8/6/2003
868 5/25/1985
761 <NAME>-Smith 49654.87 100000.00 12/5/2006
317 <NAME> 789.65
'''.strip('\r\n')
colspecs = ((0, 7), (8, 28), (30, 38), (42, 53), (56, 70))
expected = read_fwf(StringIO(test), colspecs=colspecs)
tm.assert_frame_equal(expected, read_fwf(StringIO(test)))
def test_messed_up_data(self):
# Completely messed up file
test = '''
Account Name Balance Credit Limit Account Created
101 10000.00 1/17/1998
312 <NAME> 90.00 1000.00
761 <NAME> 49654.87 100000.00 12/5/2006
317 <NAME> 789.65
'''.strip('\r\n')
colspecs = ((2, 10), (15, 33), (37, 45), (49, 61), (64, 79))
expected = read_fwf(StringIO(test), colspecs=colspecs)
tm.assert_frame_equal(expected, read_fwf(StringIO(test)))
def test_multiple_delimiters(self):
test = r'''
col1~~~~~col2 col3++++++++++++++++++col4
~~22.....11.0+++foo~~~~~~~~~~<NAME>
33+++122.33\\\bar.........<NAME>
++44~~~~12.01 baz~~<NAME>
~~55 11+++foo++++<NAME>-Smith
..66++++++.03~~~bar <NAME>
'''.strip('\r\n')
colspecs = ((0, 4), (7, 13), (15, 19), (21, 41))
expected = read_fwf(StringIO(test), colspecs=colspecs,
delimiter=' +~.\\')
tm.assert_frame_equal(expected, read_fwf(StringIO(test),
delimiter=' +~.\\'))
def test_variable_width_unicode(self):
if not compat.PY3:
raise nose.SkipTest(
'Bytes-related test - only needs to work on Python 3')
test = '''
שלום שלום
ום שלל
של ום
'''.strip('\r\n')
expected = pd.read_fwf(BytesIO(test.encode('utf8')),
colspecs=[(0, 4), (5, 9)], header=None, encoding='utf8')
tm.assert_frame_equal(expected, read_fwf(BytesIO(test.encode('utf8')),
header=None, encoding='utf8'))
class CParserTests(ParserTests):
""" base class for CParser Testsing """
def test_buffer_overflow(self):
# GH9205
# test certain malformed input files that cause buffer overflows in
# tokenizer.c
malfw = "1\r1\r1\r 1\r 1\r" # buffer overflow in words pointer
malfs = "1\r1\r1\r 1\r 1\r11\r" # buffer overflow in stream pointer
malfl = "1\r1\r1\r 1\r 1\r11\r1\r" # buffer overflow in lines pointer
for malf in (malfw, malfs, malfl):
try:
df = self.read_table(StringIO(malf))
except Exception as cperr:
self.assertIn(
'Buffer overflow caught - possible malformed input file.', str(cperr))
def test_buffer_rd_bytes(self):
# GH 12098
# src->buffer can be freed twice leading to a segfault if a corrupt
# gzip file is read with read_csv and the buffer is filled more than
# once before gzip throws an exception
data = '\x1F\x8B\x08\x00\x00\x00\x00\x00\x00\x03\xED\xC3\x41\x09' \
'\x00\x00\x08\x00\xB1\xB7\xB6\xBA\xFE\xA5\xCC\x21\x6C\xB0' \
'\xA6\x4D' + '\x55' * 267 + \
'\x7D\xF7\x00\x91\xE0\x47\x97\x14\x38\x04\x00' \
'\x1f\x8b\x08\x00VT\x97V\x00\x03\xed]\xefO'
for i in range(100):
try:
_ = self.read_csv(StringIO(data),
compression='gzip',
delim_whitespace=True)
except Exception as e:
pass
class TestCParserHighMemory(CParserTests, tm.TestCase):
def read_csv(self, *args, **kwds):
kwds = kwds.copy()
kwds['engine'] = 'c'
kwds['low_memory'] = False
return read_csv(*args, **kwds)
def read_table(self, *args, **kwds):
kwds = kwds.copy()
kwds['engine'] = 'c'
kwds['low_memory'] = False
return read_table(*args, **kwds)
def test_compact_ints(self):
if compat.is_platform_windows():
raise nose.SkipTest(
"segfaults on win-64, only when all tests are run")
data = ('0,1,0,0\n'
'1,1,0,0\n'
'0,1,0,1')
result = read_csv(StringIO(data), delimiter=',', header=None,
compact_ints=True, as_recarray=True)
ex_dtype = np.dtype([(str(i), 'i1') for i in range(4)])
self.assertEqual(result.dtype, ex_dtype)
result = read_csv(StringIO(data), delimiter=',', header=None,
as_recarray=True, compact_ints=True,
use_unsigned=True)
ex_dtype = np.dtype([(str(i), 'u1') for i in range(4)])
self.assertEqual(result.dtype, ex_dtype)
def test_parse_dates_empty_string(self):
# #2263
s = StringIO("Date, test\n2012-01-01, 1\n,2")
result = self.read_csv(s, parse_dates=["Date"], na_filter=False)
self.assertTrue(result['Date'].isnull()[1])
def test_usecols(self):
raise nose.SkipTest(
"Usecols is not supported in C High Memory engine.")
def test_line_comment(self):
data = """# empty
A,B,C
1,2.,4.#hello world
#ignore this line
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
df = self.read_csv(StringIO(data), comment='#')
tm.assert_almost_equal(df.values, expected)
# check with delim_whitespace=True
df = self.read_csv(StringIO(data.replace(',', ' ')), comment='#',
delim_whitespace=True)
tm.assert_almost_equal(df.values, expected)
# check with custom line terminator
df = self.read_csv(StringIO(data.replace('\n', '*')), comment='#',
lineterminator='*')
tm.assert_almost_equal(df.values, expected)
def test_comment_skiprows(self):
data = """# empty
random line
# second empty line
1,2,3
A,B,C
1,2.,4.
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
# this should ignore the first four lines (including comments)
df = self.read_csv(StringIO(data), comment='#', skiprows=4)
tm.assert_almost_equal(df.values, expected)
def test_skiprows_lineterminator(self):
# GH #9079
data = '\n'.join(['SMOSMANIA ThetaProbe-ML2X ',
'2007/01/01 01:00 0.2140 U M ',
'2007/01/01 02:00 0.2141 M O ',
'2007/01/01 04:00 0.2142 D M '])
expected = pd.DataFrame([['2007/01/01', '01:00', 0.2140, 'U', 'M'],
['2007/01/01', '02:00', 0.2141, 'M', 'O'],
['2007/01/01', '04:00', 0.2142, 'D', 'M']],
columns=['date', 'time', 'var', 'flag',
'oflag'])
# test with the three default lineterminators LF, CR and CRLF
df = self.read_csv(StringIO(data), skiprows=1, delim_whitespace=True,
names=['date', 'time', 'var', 'flag', 'oflag'])
tm.assert_frame_equal(df, expected)
df = self.read_csv(StringIO(data.replace('\n', '\r')),
skiprows=1, delim_whitespace=True,
names=['date', 'time', 'var', 'flag', 'oflag'])
tm.assert_frame_equal(df, expected)
df = self.read_csv(StringIO(data.replace('\n', '\r\n')),
skiprows=1, delim_whitespace=True,
names=['date', 'time', 'var', 'flag', 'oflag'])
tm.assert_frame_equal(df, expected)
def test_trailing_spaces(self):
data = "A B C \nrandom line with trailing spaces \nskip\n1,2,3\n1,2.,4.\nrandom line with trailing tabs\t\t\t\n \n5.1,NaN,10.0\n"
expected = pd.DataFrame([[1., 2., 4.],
[5.1, np.nan, 10.]])
# this should ignore six lines including lines with trailing
# whitespace and blank lines. issues 8661, 8679
df = self.read_csv(StringIO(data.replace(',', ' ')),
header=None, delim_whitespace=True,
skiprows=[0, 1, 2, 3, 5, 6], skip_blank_lines=True)
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data.replace(',', ' ')),
header=None, delim_whitespace=True,
skiprows=[0, 1, 2, 3, 5, 6], skip_blank_lines=True)
tm.assert_frame_equal(df, expected)
# test skipping set of rows after a row with trailing spaces, issue
# #8983
expected = pd.DataFrame({"A": [1., 5.1], "B": [2., np.nan],
"C": [4., 10]})
df = self.read_table(StringIO(data.replace(',', ' ')),
delim_whitespace=True,
skiprows=[1, 2, 3, 5, 6], skip_blank_lines=True)
tm.assert_frame_equal(df, expected)
def test_comment_header(self):
data = """# empty
# second empty line
1,2,3
A,B,C
1,2.,4.
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
# header should begin at the second non-comment line
df = self.read_csv(StringIO(data), comment='#', header=1)
tm.assert_almost_equal(df.values, expected)
def test_comment_skiprows_header(self):
data = """# empty
# second empty line
# third empty line
X,Y,Z
1,2,3
A,B,C
1,2.,4.
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
# skiprows should skip the first 4 lines (including comments), while
# header should start from the second non-commented line starting
# with line 5
df = self.read_csv(StringIO(data), comment='#', skiprows=4, header=1)
tm.assert_almost_equal(df.values, expected)
def test_empty_lines(self):
data = """\
A,B,C
1,2.,4.
5.,NaN,10.0
-70,.4,1
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.],
[-70., .4, 1.]]
df = self.read_csv(StringIO(data))
tm.assert_almost_equal(df.values, expected)
df = self.read_csv(StringIO(data.replace(',', ' ')), sep='\s+')
tm.assert_almost_equal(df.values, expected)
expected = [[1., 2., 4.],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[5., np.nan, 10.],
[np.nan, np.nan, np.nan],
[-70., .4, 1.]]
df = self.read_csv(StringIO(data), skip_blank_lines=False)
tm.assert_almost_equal(list(df.values), list(expected))
def test_whitespace_lines(self):
data = """
\t \t\t
\t
A,B,C
\t 1,2.,4.
5.,NaN,10.0
"""
expected = [[1, 2., 4.],
[5., np.nan, 10.]]
df = self.read_csv(StringIO(data))
tm.assert_almost_equal(df.values, expected)
def test_passing_dtype(self):
# GH 6607
# This is a copy which should eventually be merged into ParserTests
# when the dtype argument is supported by all engines.
df = DataFrame(np.random.rand(5, 2), columns=list(
'AB'), index=['1A', '1B', '1C', '1D', '1E'])
with tm.ensure_clean('__passing_str_as_dtype__.csv') as path:
df.to_csv(path)
# GH 3795
# passing 'str' as the dtype
result = self.read_csv(path, dtype=str, index_col=0)
tm.assert_series_equal(result.dtypes, Series(
{'A': 'object', 'B': 'object'}))
# we expect all object columns, so need to convert to test for
# equivalence
result = result.astype(float)
tm.assert_frame_equal(result, df)
# invalid dtype
self.assertRaises(TypeError, self.read_csv, path, dtype={'A': 'foo', 'B': 'float64'},
index_col=0)
# valid but we don't support it (date)
self.assertRaises(TypeError, self.read_csv, path, dtype={'A': 'datetime64', 'B': 'float64'},
index_col=0)
self.assertRaises(TypeError, self.read_csv, path, dtype={'A': 'datetime64', 'B': 'float64'},
index_col=0, parse_dates=['B'])
# valid but we don't support it
self.assertRaises(TypeError, self.read_csv, path, dtype={'A': 'timedelta64', 'B': 'float64'},
index_col=0)
# empty frame
# GH12048
actual = self.read_csv(StringIO('A,B'), dtype=str)
expected = DataFrame({'A': [], 'B': []}, index=[], dtype=str)
tm.assert_frame_equal(actual, expected)
def test_dtype_and_names_error(self):
# GH 8833
# passing both dtype and names resulting in an error reporting issue
data = """
1.0 1
2.0 2
3.0 3
"""
# base cases
result = self.read_csv(StringIO(data), sep='\s+', header=None)
expected = DataFrame([[1.0, 1], [2.0, 2], [3.0, 3]])
tm.assert_frame_equal(result, expected)
result = self.read_csv(StringIO(data), sep='\s+',
header=None, names=['a', 'b'])
expected = DataFrame(
[[1.0, 1], [2.0, 2], [3.0, 3]], columns=['a', 'b'])
tm.assert_frame_equal(result, expected)
# fallback casting
result = self.read_csv(StringIO(
data), sep='\s+', header=None, names=['a', 'b'], dtype={'a': np.int32})
expected = DataFrame([[1, 1], [2, 2], [3, 3]], columns=['a', 'b'])
expected['a'] = expected['a'].astype(np.int32)
tm.assert_frame_equal(result, expected)
data = """
1.0 1
nan 2
3.0 3
"""
# fallback casting, but not castable
with tm.assertRaisesRegexp(ValueError, 'cannot safely convert'):
self.read_csv(StringIO(data), sep='\s+', header=None,
names=['a', 'b'], dtype={'a': np.int32})
def test_fallback_to_python(self):
# GH 6607
data = 'a b c\n1 2 3'
# specify C engine with unsupported options (raise)
with tm.assertRaisesRegexp(ValueError, 'does not support'):
self.read_table(StringIO(data), engine='c', sep=None,
delim_whitespace=False)
with tm.assertRaisesRegexp(ValueError, 'does not support'):
self.read_table(StringIO(data), engine='c', sep='\s')
with tm.assertRaisesRegexp(ValueError, 'does not support'):
self.read_table(StringIO(data), engine='c', skip_footer=1)
def test_single_char_leading_whitespace(self):
# GH 9710
data = """\
MyColumn
a
b
a
b\n"""
expected = DataFrame({'MyColumn': list('abab')})
result = self.read_csv(StringIO(data), delim_whitespace=True,
skipinitialspace=True)
tm.assert_frame_equal(result, expected)
result = self.read_csv(StringIO(data), lineterminator='\n',
skipinitialspace=True)
tm.assert_frame_equal(result, expected)
class TestCParserLowMemory(CParserTests, tm.TestCase):
def read_csv(self, *args, **kwds):
kwds = kwds.copy()
kwds['engine'] = 'c'
kwds['low_memory'] = True
kwds['buffer_lines'] = 2
return read_csv(*args, **kwds)
def read_table(self, *args, **kwds):
kwds = kwds.copy()
kwds['engine'] = 'c'
kwds['low_memory'] = True
kwds['buffer_lines'] = 2
return read_table(*args, **kwds)
def test_compact_ints(self):
data = ('0,1,0,0\n'
'1,1,0,0\n'
'0,1,0,1')
result = read_csv(StringIO(data), delimiter=',', header=None,
compact_ints=True)
ex_dtype = np.dtype([(str(i), 'i1') for i in range(4)])
self.assertEqual(result.to_records(index=False).dtype, ex_dtype)
result = read_csv(StringIO(data), delimiter=',', header=None,
compact_ints=True,
use_unsigned=True)
ex_dtype = np.dtype([(str(i), 'u1') for i in range(4)])
self.assertEqual(result.to_records(index=False).dtype, ex_dtype)
def test_compact_ints_as_recarray(self):
if compat.is_platform_windows():
raise nose.SkipTest(
"segfaults on win-64, only when all tests are run")
data = ('0,1,0,0\n'
'1,1,0,0\n'
'0,1,0,1')
result = read_csv(StringIO(data), delimiter=',', header=None,
compact_ints=True, as_recarray=True)
ex_dtype = np.dtype([(str(i), 'i1') for i in range(4)])
self.assertEqual(result.dtype, ex_dtype)
result = read_csv(StringIO(data), delimiter=',', header=None,
as_recarray=True, compact_ints=True,
use_unsigned=True)
ex_dtype = np.dtype([(str(i), 'u1') for i in range(4)])
self.assertEqual(result.dtype, ex_dtype)
def test_precise_conversion(self):
# GH #8002
tm._skip_if_32bit()
from decimal import Decimal
normal_errors = []
precise_errors = []
for num in np.linspace(1., 2., num=500): # test numbers between 1 and 2
text = 'a\n{0:.25}'.format(num) # 25 decimal digits of precision
normal_val = float(self.read_csv(StringIO(text))['a'][0])
precise_val = float(self.read_csv(
StringIO(text), float_precision='high')['a'][0])
roundtrip_val = float(self.read_csv(
StringIO(text), float_precision='round_trip')['a'][0])
actual_val = Decimal(text[2:])
def error(val):
return abs(Decimal('{0:.100}'.format(val)) - actual_val)
normal_errors.append(error(normal_val))
precise_errors.append(error(precise_val))
# round-trip should match float()
self.assertEqual(roundtrip_val, float(text[2:]))
self.assertTrue(sum(precise_errors) <= sum(normal_errors))
self.assertTrue(max(precise_errors) <= max(normal_errors))
def test_pass_dtype(self):
data = """\
one,two
1,2.5
2,3.5
3,4.5
4,5.5"""
result = self.read_csv(StringIO(data), dtype={'one': 'u1', 1: 'S1'})
self.assertEqual(result['one'].dtype, 'u1')
self.assertEqual(result['two'].dtype, 'object')
def test_pass_dtype_as_recarray(self):
data = """\
one,two
1,2.5
2,3.5
3,4.5
4,5.5"""
if compat.is_platform_windows():
raise nose.SkipTest(
"segfaults on win-64, only when all tests are run")
result = self.read_csv(StringIO(data), dtype={'one': 'u1', 1: 'S1'},
as_recarray=True)
self.assertEqual(result['one'].dtype, 'u1')
self.assertEqual(result['two'].dtype, 'S1')
def test_empty_pass_dtype(self):
data = 'one,two'
result = self.read_csv(StringIO(data), dtype={'one': 'u1'})
expected = DataFrame({'one': np.empty(0, dtype='u1'),
'two': np.empty(0, dtype=np.object)})
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_empty_with_index_pass_dtype(self):
data = 'one,two'
result = self.read_csv(StringIO(data), index_col=['one'],
dtype={'one': 'u1', 1: 'f'})
expected = DataFrame({'two': np.empty(0, dtype='f')},
index=Index([], dtype='u1', name='one'))
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_empty_with_multiindex_pass_dtype(self):
data = 'one,two,three'
result = self.read_csv(StringIO(data), index_col=['one', 'two'],
dtype={'one': 'u1', 1: 'f8'})
exp_idx = MultiIndex.from_arrays([np.empty(0, dtype='u1'), np.empty(0, dtype='O')],
names=['one', 'two'])
expected = DataFrame(
{'three': np.empty(0, dtype=np.object)}, index=exp_idx)
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_empty_with_mangled_column_pass_dtype_by_names(self):
data = 'one,one'
result = self.read_csv(StringIO(data), dtype={
'one': 'u1', 'one.1': 'f'})
expected = DataFrame(
{'one': np.empty(0, dtype='u1'), 'one.1': np.empty(0, dtype='f')})
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_empty_with_mangled_column_pass_dtype_by_indexes(self):
data = 'one,one'
result = self.read_csv(StringIO(data), dtype={0: 'u1', 1: 'f'})
expected = DataFrame(
{'one': np.empty(0, dtype='u1'), 'one.1': np.empty(0, dtype='f')})
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_empty_with_dup_column_pass_dtype_by_names(self):
data = 'one,one'
result = self.read_csv(
StringIO(data), mangle_dupe_cols=False, dtype={'one': 'u1'})
expected = pd.concat([Series([], name='one', dtype='u1')] * 2, axis=1)
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_empty_with_dup_column_pass_dtype_by_indexes(self):
### FIXME in GH9424
raise nose.SkipTest(
"GH 9424; known failure read_csv with duplicate columns")
data = 'one,one'
result = self.read_csv(
StringIO(data), mangle_dupe_cols=False, dtype={0: 'u1', 1: 'f'})
expected = pd.concat([Series([], name='one', dtype='u1'),
Series([], name='one', dtype='f')], axis=1)
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_usecols_dtypes(self):
data = """\
1,2,3
4,5,6
7,8,9
10,11,12"""
result = self.read_csv(StringIO(data), usecols=(0, 1, 2),
names=('a', 'b', 'c'),
header=None,
converters={'a': str},
dtype={'b': int, 'c': float},
)
result2 = self.read_csv(StringIO(data), usecols=(0, 2),
names=('a', 'b', 'c'),
header=None,
converters={'a': str},
dtype={'b': int, 'c': float},
)
self.assertTrue((result.dtypes == [object, np.int, np.float]).all())
self.assertTrue((result2.dtypes == [object, np.float]).all())
def test_usecols_implicit_index_col(self):
# #2654
data = 'a,b,c\n4,apple,bat,5.7\n8,orange,cow,10'
result = self.read_csv(StringIO(data), usecols=['a', 'b'])
expected = DataFrame({'a': ['apple', 'orange'],
'b': ['bat', 'cow']}, index=[4, 8])
tm.assert_frame_equal(result, expected)
def test_usecols_with_whitespace(self):
data = 'a b c\n4 apple bat 5.7\n8 orange cow 10'
result = self.read_csv(StringIO(data), delim_whitespace=True,
usecols=('a', 'b'))
expected = DataFrame({'a': ['apple', 'orange'],
'b': ['bat', 'cow']}, index=[4, 8])
tm.assert_frame_equal(result, expected)
def test_usecols_regex_sep(self):
# #2733
data = 'a b c\n4 apple bat 5.7\n8 orange cow 10'
df = self.read_csv(StringIO(data), sep='\s+', usecols=('a', 'b'))
expected = DataFrame({'a': ['apple', 'orange'],
'b': ['bat', 'cow']}, index=[4, 8])
tm.assert_frame_equal(df, expected)
def test_pure_python_failover(self):
data = "a,b,c\n1,2,3#ignore this!\n4,5,6#ignorethistoo"
result = self.read_csv(StringIO(data), comment='#')
expected = DataFrame({'a': [1, 4], 'b': [2, 5], 'c': [3, 6]})
tm.assert_frame_equal(result, expected)
def test_decompression(self):
try:
import gzip
import bz2
except ImportError:
raise nose.SkipTest('need gzip and bz2 to run')
data = open(self.csv1, 'rb').read()
expected = self.read_csv(self.csv1)
with tm.ensure_clean() as path:
tmp = gzip.GzipFile(path, mode='wb')
tmp.write(data)
tmp.close()
result = self.read_csv(path, compression='gzip')
tm.assert_frame_equal(result, expected)
result = self.read_csv(open(path, 'rb'), compression='gzip')
tm.assert_frame_equal(result, expected)
with tm.ensure_clean() as path:
tmp = bz2.BZ2File(path, mode='wb')
tmp.write(data)
tmp.close()
result = self.read_csv(path, compression='bz2')
tm.assert_frame_equal(result, expected)
# result = self.read_csv(open(path, 'rb'), compression='bz2')
# tm.assert_frame_equal(result, expected)
self.assertRaises(ValueError, self.read_csv,
path, compression='bz3')
with open(path, 'rb') as fin:
if compat.PY3:
result = self.read_csv(fin, compression='bz2')
tm.assert_frame_equal(result, expected)
else:
self.assertRaises(ValueError, self.read_csv,
fin, compression='bz2')
def test_decompression_regex_sep(self):
try:
import gzip
import bz2
except ImportError:
raise nose.SkipTest('need gzip and bz2 to run')
data = open(self.csv1, 'rb').read()
data = data.replace(b',', b'::')
expected = self.read_csv(self.csv1)
with tm.ensure_clean() as path:
tmp = gzip.GzipFile(path, mode='wb')
tmp.write(data)
tmp.close()
# GH 6607
# Test currently only valid with the python engine because of
# regex sep. Temporarily copied to TestPythonParser.
# Here test for ValueError when passing regex sep:
with tm.assertRaisesRegexp(ValueError, 'regex sep'): # XXX
result = self.read_csv(path, sep='::', compression='gzip')
tm.assert_frame_equal(result, expected)
with tm.ensure_clean() as path:
tmp = bz2.BZ2File(path, mode='wb')
tmp.write(data)
tmp.close()
# GH 6607
with tm.assertRaisesRegexp(ValueError, 'regex sep'): # XXX
result = self.read_csv(path, sep='::', compression='bz2')
tm.assert_frame_equal(result, expected)
self.assertRaises(ValueError, self.read_csv,
path, compression='bz3')
def test_memory_map(self):
# it works!
result = self.read_csv(self.csv1, memory_map=True)
def test_disable_bool_parsing(self):
# #2090
data = """A,B,C
Yes,No,Yes
No,Yes,Yes
Yes,,Yes
No,No,No"""
result = read_csv(StringIO(data), dtype=object)
self.assertTrue((result.dtypes == object).all())
result = read_csv(StringIO(data), dtype=object, na_filter=False)
self.assertEqual(result['B'][2], '')
def test_euro_decimal_format(self):
data = """Id;Number1;Number2;Text1;Text2;Number3
1;1521,1541;187101,9543;ABC;poi;4,738797819
2;121,12;14897,76;DEF;uyt;0,377320872
3;878,158;108013,434;GHI;rez;2,735694704"""
df2 = self.read_csv(StringIO(data), sep=';', decimal=',')
self.assertEqual(df2['Number1'].dtype, float)
self.assertEqual(df2['Number2'].dtype, float)
self.assertEqual(df2['Number3'].dtype, float)
def test_custom_lineterminator(self):
data = 'a,b,c~1,2,3~4,5,6'
result = self.read_csv(StringIO(data), lineterminator='~')
expected = self.read_csv(StringIO(data.replace('~', '\n')))
tm.assert_frame_equal(result, expected)
data2 = data.replace('~', '~~')
result = self.assertRaises(ValueError, read_csv, StringIO(data2),
lineterminator='~~')
def test_raise_on_passed_int_dtype_with_nas(self):
# #2631
data = """YEAR, DOY, a
2001,106380451,10
2001,,11
2001,106380451,67"""
self.assertRaises(Exception, read_csv, StringIO(data), sep=",",
skipinitialspace=True,
dtype={'DOY': np.int64})
def test_na_trailing_columns(self):
data = """Date,Currenncy,Symbol,Type,Units,UnitPrice,Cost,Tax
2012-03-14,USD,AAPL,BUY,1000
2012-05-12,USD,SBUX,SELL,500"""
result = self.read_csv(StringIO(data))
self.assertEqual(result['Date'][1], '2012-05-12')
self.assertTrue(result['UnitPrice'].isnull().all())
def test_parse_ragged_csv(self):
data = """1,2,3
1,2,3,4
1,2,3,4,5
1,2
1,2,3,4"""
nice_data = """1,2,3,,
1,2,3,4,
1,2,3,4,5
1,2,,,
1,2,3,4,"""
result = self.read_csv(StringIO(data), header=None,
names=['a', 'b', 'c', 'd', 'e'])
expected = self.read_csv(StringIO(nice_data), header=None,
names=['a', 'b', 'c', 'd', 'e'])
tm.assert_frame_equal(result, expected)
# too many columns, cause segfault if not careful
data = "1,2\n3,4,5"
result = self.read_csv(StringIO(data), header=None,
names=lrange(50))
expected = self.read_csv(StringIO(data), header=None,
names=lrange(3)).reindex(columns=lrange(50))
tm.assert_frame_equal(result, expected)
def test_tokenize_CR_with_quoting(self):
# #3453, this doesn't work with Python parser for some reason
data = ' a,b,c\r"a,b","e,d","f,f"'
result = self.read_csv(StringIO(data), header=None)
expected = self.read_csv(StringIO(data.replace('\r', '\n')),
header=None)
tm.assert_frame_equal(result, expected)
result = self.read_csv(StringIO(data))
expected = self.read_csv(StringIO(data.replace('\r', '\n')))
tm.assert_frame_equal(result, expected)
def test_raise_on_no_columns(self):
# single newline
data = "\n"
self.assertRaises(ValueError, self.read_csv, StringIO(data))
# test with more than a single newline
data = "\n\n\n"
self.assertRaises(ValueError, self.read_csv, StringIO(data))
def test_warn_if_chunks_have_mismatched_type(self):
# Issue #3866 If chunks are different types and can't
# be coerced using numerical types, then issue warning.
integers = [str(i) for i in range(499999)]
data = "a\n" + "\n".join(integers + ['a', 'b'] + integers)
with tm.assert_produces_warning(DtypeWarning):
df = self.read_csv(StringIO(data))
self.assertEqual(df.a.dtype, np.object)
def test_invalid_c_parser_opts_with_not_c_parser(self):
from pandas.io.parsers import _c_parser_defaults as c_defaults
data = """1,2,3,,
1,2,3,4,
1,2,3,4,5
1,2,,,
1,2,3,4,"""
engines = 'python', 'python-fwf'
for default in c_defaults:
for engine in engines:
kwargs = {default: object()}
with tm.assertRaisesRegexp(ValueError,
'The %r option is not supported '
'with the %r engine' % (default,
engine)):
read_csv(StringIO(data), engine=engine, **kwargs)
def test_passing_dtype(self):
# GH 6607
# This is a copy which should eventually be merged into ParserTests
# when the dtype argument is supported by all engines.
df = DataFrame(np.random.rand(5, 2), columns=list(
'AB'), index=['1A', '1B', '1C', '1D', '1E'])
with tm.ensure_clean('__passing_str_as_dtype__.csv') as path:
df.to_csv(path)
# GH 3795
# passing 'str' as the dtype
result = self.read_csv(path, dtype=str, index_col=0)
tm.assert_series_equal(result.dtypes, Series(
{'A': 'object', 'B': 'object'}))
# we expect all object columns, so need to convert to test for
# equivalence
result = result.astype(float)
tm.assert_frame_equal(result, df)
# invalid dtype
self.assertRaises(TypeError, self.read_csv, path, dtype={'A': 'foo', 'B': 'float64'},
index_col=0)
# valid but we don't support it (date)
self.assertRaises(TypeError, self.read_csv, path, dtype={'A': 'datetime64', 'B': 'float64'},
index_col=0)
self.assertRaises(TypeError, self.read_csv, path, dtype={'A': 'datetime64', 'B': 'float64'},
index_col=0, parse_dates=['B'])
# valid but we don't support it
self.assertRaises(TypeError, self.read_csv, path, dtype={'A': 'timedelta64', 'B': 'float64'},
index_col=0)
def test_fallback_to_python(self):
# GH 6607
data = 'a b c\n1 2 3'
# specify C engine with C-unsupported options (raise)
with tm.assertRaisesRegexp(ValueError, 'does not support'):
self.read_table(StringIO(data), engine='c', sep=None,
delim_whitespace=False)
with tm.assertRaisesRegexp(ValueError, 'does not support'):
self.read_table(StringIO(data), engine='c', sep='\s')
with tm.assertRaisesRegexp(ValueError, 'does not support'):
self.read_table(StringIO(data), engine='c', skip_footer=1)
def test_raise_on_sep_with_delim_whitespace(self):
# GH 6607
data = 'a b c\n1 2 3'
with tm.assertRaisesRegexp(ValueError, 'you can only specify one'):
self.read_table(StringIO(data), sep='\s', delim_whitespace=True)
def test_single_char_leading_whitespace(self):
# GH 9710
data = """\
MyColumn
a
b
a
b\n"""
expected = DataFrame({'MyColumn': list('abab')})
result = self.read_csv(StringIO(data), delim_whitespace=True,
skipinitialspace=True)
tm.assert_frame_equal(result, expected)
result = self.read_csv(StringIO(data), lineterminator='\n',
skipinitialspace=True)
tm.assert_frame_equal(result, expected)
def test_bool_header_arg(self):
# GH 6114
data = """\
MyColumn
a
b
a
b"""
for arg in [True, False]:
with tm.assertRaises(TypeError):
pd.read_csv(StringIO(data), header=arg)
with tm.assertRaises(TypeError):
pd.read_table(StringIO(data), header=arg)
with tm.assertRaises(TypeError):
pd.read_fwf(StringIO(data), header=arg)
def test_multithread_stringio_read_csv(self):
# GH 11786
max_row_range = 10000
num_files = 100
bytes_to_df = [
'\n'.join(
['%d,%d,%d' % (i, i, i) for i in range(max_row_range)]
).encode() for j in range(num_files)]
files = [BytesIO(b) for b in bytes_to_df]
# Read all files in many threads
pool = ThreadPool(8)
results = pool.map(pd.read_csv, files)
first_result = results[0]
for result in results:
tm.assert_frame_equal(first_result, result)
def test_multithread_path_multipart_read_csv(self):
# GH 11786
num_tasks = 4
file_name = '__threadpool_reader__.csv'
num_rows = 100000
df = self.construct_dataframe(num_rows)
with tm.ensure_clean(file_name) as path:
df.to_csv(path)
final_dataframe = self.generate_multithread_dataframe(path,
num_rows,
num_tasks)
tm.assert_frame_equal(df, final_dataframe)
class TestMiscellaneous(tm.TestCase):
# for tests that don't fit into any of the other classes, e.g. those that
# compare results for different engines or test the behavior when 'engine'
# is not passed
def test_compare_whitespace_regex(self):
# GH 6607
data = ' a b c\n1 2 3 \n4 5 6\n 7 8 9'
result_c = pd.read_table(StringIO(data), sep='\s+', engine='c')
result_py = pd.read_table(StringIO(data), sep='\s+', engine='python')
print(result_c)
tm.assert_frame_equal(result_c, result_py)
def test_fallback_to_python(self):
# GH 6607
data = 'a b c\n1 2 3'
# specify C-unsupported options with python-unsupported option
# (options will be ignored on fallback, raise)
with tm.assertRaisesRegexp(ValueError, 'Falling back'):
pd.read_table(StringIO(data), sep=None,
delim_whitespace=False, dtype={'a': float})
with tm.assertRaisesRegexp(ValueError, 'Falling back'):
pd.read_table(StringIO(data), sep='\s', dtype={'a': float})
with tm.assertRaisesRegexp(ValueError, 'Falling back'):
pd.read_table(StringIO(data), skip_footer=1, dtype={'a': float})
# specify C-unsupported options without python-unsupported options
with tm.assert_produces_warning(parsers.ParserWarning):
pd.read_table(StringIO(data), sep=None, delim_whitespace=False)
with tm.assert_produces_warning(parsers.ParserWarning):
pd.read_table(StringIO(data), sep='\s')
with tm.assert_produces_warning(parsers.ParserWarning):
pd.read_table(StringIO(data), skip_footer=1)
class TestParseSQL(tm.TestCase):
def test_convert_sql_column_floats(self):
arr = np.array([1.5, None, 3, 4.2], dtype=object)
result = lib.convert_sql_column(arr)
expected = np.array([1.5, np.nan, 3, 4.2], dtype='f8')
assert_same_values_and_dtype(result, expected)
def test_convert_sql_column_strings(self):
arr = np.array(['1.5', None, '3', '4.2'], dtype=object)
result = lib.convert_sql_column(arr)
expected = np.array(['1.5', np.nan, '3', '4.2'], dtype=object)
assert_same_values_and_dtype(result, expected)
def test_convert_sql_column_unicode(self):
arr = np.array([u('1.5'), None, u('3'), u('4.2')],
dtype=object)
result = lib.convert_sql_column(arr)
expected = np.array([u('1.5'), np.nan, u('3'), u('4.2')],
dtype=object)
assert_same_values_and_dtype(result, expected)
def test_convert_sql_column_ints(self):
arr = np.array([1, 2, 3, 4], dtype='O')
arr2 = np.array([1, 2, 3, 4], dtype='i4').astype('O')
result = lib.convert_sql_column(arr)
result2 = lib.convert_sql_column(arr2)
expected = np.array([1, 2, 3, 4], dtype='i8')
assert_same_values_and_dtype(result, expected)
assert_same_values_and_dtype(result2, expected)
arr = np.array([1, 2, 3, None, 4], dtype='O')
result = lib.convert_sql_column(arr)
expected = np.array([1, 2, 3, np.nan, 4], dtype='f8')
assert_same_values_and_dtype(result, expected)
def test_convert_sql_column_longs(self):
arr = np.array([long(1), long(2), long(3), long(4)], dtype='O')
result = lib.convert_sql_column(arr)
expected = np.array([1, 2, 3, 4], dtype='i8')
assert_same_values_and_dtype(result, expected)
arr = np.array([long(1), long(2), long(3), None, long(4)], dtype='O')
result = lib.convert_sql_column(arr)
expected = np.array([1, 2, 3, np.nan, 4], dtype='f8')
assert_same_values_and_dtype(result, expected)
def test_convert_sql_column_bools(self):
arr = np.array([True, False, True, False], dtype='O')
result = lib.convert_sql_column(arr)
expected = np.array([True, False, True, False], dtype=bool)
assert_same_values_and_dtype(result, expected)
arr = np.array([True, False, None, False], dtype='O')
result = lib.convert_sql_column(arr)
expected = np.array([True, False, np.nan, False], dtype=object)
assert_same_values_and_dtype(result, expected)
def test_convert_sql_column_decimals(self):
from decimal import Decimal
arr = np.array([Decimal('1.5'), None, Decimal('3'), Decimal('4.2')])
result = lib.convert_sql_column(arr)
expected = np.array([1.5, np.nan, 3, 4.2], dtype='f8')
assert_same_values_and_dtype(result, expected)
class TestUrlGz(tm.TestCase):
def setUp(self):
dirpath = tm.get_data_path()
localtable = os.path.join(dirpath, 'salary.table')
self.local_table = read_table(localtable)
@tm.network
def test_url_gz(self):
url = 'https://raw.github.com/pydata/pandas/master/pandas/io/tests/data/salary.table.gz'
url_table = read_table(url, compression="gzip", engine="python")
tm.assert_frame_equal(url_table, self.local_table)
@tm.network
def test_url_gz_infer(self):
url = ('https://s3.amazonaws.com/pandas-test/salary.table.gz')
url_table = read_table(url, compression="infer", engine="python")
|
tm.assert_frame_equal(url_table, self.local_table)
|
pandas.util.testing.assert_frame_equal
|
#!/usr/bin/env python
# coding: utf-8
import os
import torch
import random
import numpy as np
np.warnings.filterwarnings('ignore')
from datasets import datasets
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
import pandas as pd
# for MEPS
def condition(x, y=None):
return int(x[0][-1]>0)
from cqr import helper
from cqr.nonconformist.nc import RegressorNc
from cqr.nonconformist.nc import SignErrorErrFunc
from cqr.nonconformist.nc import QuantileRegAsymmetricErrFunc
def append_statistics(coverage_sample,
length_sample,
method_name,
dataset_name_vec,
method_vec,
coverage_vec,
length_vec,
seed_vec,
test_ratio_vec,
seed,
test_ratio,
dataset_name_group_0,
dataset_name_group_1):
dataset_name_group = [dataset_name_group_0, dataset_name_group_1]
for group_id in range(len(dataset_name_group)):
coverage = (coverage_sample[group_id]).astype(np.float)
length = length_sample[group_id]
for i in range(len(coverage)):
dataset_name_vec.append(dataset_name_group[group_id])
method_vec.append(method_name)
coverage_vec.append(coverage[i])
length_vec.append(length[i])
seed_vec.append(seed)
test_ratio_vec.append(test_ratio)
def run_equalized_coverage_experiment(dataset_name, method, seed, save_to_csv=True, test_ratio = 0.2):
random_state_train_test = seed
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(seed)
if os.path.isdir('/scratch'):
local_machine = 0
else:
local_machine = 1
if local_machine:
dataset_base_path = '/Users/romano/mydata/regression_data/'
else:
dataset_base_path = '/scratch/users/yromano/data/regression_data/'
# desired miscoverage error
alpha = 0.1
# desired quanitile levels
quantiles = [0.05, 0.95]
# name of dataset
dataset_name_group_0 = dataset_name + "_non_white"
dataset_name_group_1 = dataset_name + "_white"
# load the dataset
X, y = datasets.GetDataset(dataset_name, dataset_base_path)
# divide the dataset into test and train based on the test_ratio parameter
x_train, x_test, y_train, y_test = train_test_split(X,
y,
test_size=test_ratio,
random_state=random_state_train_test)
# In[2]:
# compute input dimensions
n_train = x_train.shape[0]
in_shape = x_train.shape[1]
# divide the data into proper training set and calibration set
idx = np.random.permutation(n_train)
n_half = int(np.floor(n_train/2))
idx_train, idx_cal = idx[:n_half], idx[n_half:2*n_half]
# zero mean and unit variance scaling
scalerX = StandardScaler()
scalerX = scalerX.fit(x_train[idx_train])
# scale
x_train = scalerX.transform(x_train)
x_test = scalerX.transform(x_test)
y_train = np.log(1.0 + y_train)
y_test = np.log(1.0 + y_test)
# reshape the data
x_train = np.asarray(x_train)
y_train = np.squeeze(np.asarray(y_train))
x_test = np.asarray(x_test)
y_test = np.squeeze(np.asarray(y_test))
# display basic information
print("Dataset: %s" % (dataset_name))
print("Dimensions: train set (n=%d, p=%d) ; test set (n=%d, p=%d)" %
(x_train.shape[0], x_train.shape[1], x_test.shape[0], x_test.shape[1]))
# In[3]:
dataset_name_vec = []
method_vec = []
coverage_vec = []
length_vec = []
seed_vec = []
test_ratio_vec = []
if method == "net":
# pytorch's optimizer object
nn_learn_func = torch.optim.Adam
# number of epochs
epochs = 1000
# learning rate
lr = 0.0005
# mini-batch size
batch_size = 64
# hidden dimension of the network
hidden_size = 64
# dropout regularization rate
dropout = 0.1
# weight decay regularization
wd = 1e-6
# ratio of held-out data, used in cross-validation
cv_test_ratio = 0.1
# seed for splitting the data in cross-validation.
# Also used as the seed in quantile random forests function
cv_random_state = 1
# In[4]:
model = helper.MSENet_RegressorAdapter(model=None,
fit_params=None,
in_shape = in_shape,
hidden_size = hidden_size,
learn_func = nn_learn_func,
epochs = epochs,
batch_size=batch_size,
dropout=dropout,
lr=lr,
wd=wd,
test_ratio=cv_test_ratio,
random_state=cv_random_state)
nc = RegressorNc(model, SignErrorErrFunc())
y_lower, y_upper = helper.run_icp(nc, x_train, y_train, x_test, idx_train, idx_cal, alpha)
method_name = "Marginal Conformal Neural Network"
# compute and print average coverage and average length
coverage_sample, length_sample = helper.compute_coverage_per_sample(y_test,
y_lower,
y_upper,
alpha,
method_name,
x_test,
condition)
append_statistics(coverage_sample,
length_sample,
method_name,
dataset_name_vec,
method_vec,
coverage_vec,
length_vec,
seed_vec,
test_ratio_vec,
seed,
test_ratio,
dataset_name_group_0,
dataset_name_group_1)
# In[]
model = helper.MSENet_RegressorAdapter(model=None,
fit_params=None,
in_shape = in_shape,
hidden_size = hidden_size,
learn_func = nn_learn_func,
epochs = epochs,
batch_size=batch_size,
dropout=dropout,
lr=lr,
wd=wd,
test_ratio=cv_test_ratio,
random_state=cv_random_state)
nc = RegressorNc(model, SignErrorErrFunc())
y_lower, y_upper = helper.run_icp(nc, x_train, y_train, x_test, idx_train, idx_cal, alpha, condition)
method_name = "Conditional Conformal Neural Network (joint)"
# compute and print average coverage and average length
coverage_sample, length_sample = helper.compute_coverage_per_sample(y_test,
y_lower,
y_upper,
alpha,
method_name,
x_test,
condition)
append_statistics(coverage_sample,
length_sample,
method_name,
dataset_name_vec,
method_vec,
coverage_vec,
length_vec,
seed_vec,
test_ratio_vec,
seed,
test_ratio,
dataset_name_group_0,
dataset_name_group_1)
# In[6]
category_map = np.array([condition((x_train[i, :], None)) for i in range(x_train.shape[0])])
categories = np.unique(category_map)
estimator_list = []
nc_list = []
for i in range(len(categories)):
# define a QRF model per group
estimator_list.append(helper.MSENet_RegressorAdapter(model=None,
fit_params=None,
in_shape = in_shape,
hidden_size = hidden_size,
learn_func = nn_learn_func,
epochs = epochs,
batch_size=batch_size,
dropout=dropout,
lr=lr,
wd=wd,
test_ratio=cv_test_ratio,
random_state=cv_random_state))
# define the CQR object
nc_list.append(RegressorNc(estimator_list[i], SignErrorErrFunc()))
# run CQR procedure
y_lower, y_upper = helper.run_icp_sep(nc_list, x_train, y_train, x_test, idx_train, idx_cal, alpha, condition)
method_name = "Conditional Conformal Neural Network (groupwise)"
# compute and print average coverage and average length
coverage_sample, length_sample = helper.compute_coverage_per_sample(y_test,
y_lower,
y_upper,
alpha,
method_name,
x_test,
condition)
append_statistics(coverage_sample,
length_sample,
method_name,
dataset_name_vec,
method_vec,
coverage_vec,
length_vec,
seed_vec,
test_ratio_vec,
seed,
test_ratio,
dataset_name_group_0,
dataset_name_group_1)
# In[]
if method == "qnet":
# pytorch's optimizer object
nn_learn_func = torch.optim.Adam
# number of epochs
epochs = 1000
# learning rate
lr = 0.0005
# mini-batch size
batch_size = 64
# hidden dimension of the network
hidden_size = 64
# dropout regularization rate
dropout = 0.1
# weight decay regularization
wd = 1e-6
# desired quantiles
quantiles_net = [0.05, 0.95]
# ratio of held-out data, used in cross-validation
cv_test_ratio = 0.1
# seed for splitting the data in cross-validation.
# Also used as the seed in quantile random forests function
cv_random_state = 1
# In[7]:
# define quantile neural network model
quantile_estimator = helper.AllQNet_RegressorAdapter(model=None,
fit_params=None,
in_shape=in_shape,
hidden_size=hidden_size,
quantiles=quantiles_net,
learn_func=nn_learn_func,
epochs=epochs,
batch_size=batch_size,
dropout=dropout,
lr=lr,
wd=wd,
test_ratio=cv_test_ratio,
random_state=cv_random_state,
use_rearrangement=False)
# define the CQR object, computing the absolute residual error of points
# located outside the estimated quantile neural network band
nc = RegressorNc(quantile_estimator, QuantileRegAsymmetricErrFunc())
# run CQR procedure
y_lower, y_upper = helper.run_icp(nc, x_train, y_train, x_test, idx_train, idx_cal, alpha)
method_name = "Marginal CQR Neural Network"
# compute and print average coverage and average length
coverage_sample, length_sample = helper.compute_coverage_per_sample(y_test,
y_lower,
y_upper,
alpha,
method_name,
x_test,
condition)
append_statistics(coverage_sample,
length_sample,
method_name,
dataset_name_vec,
method_vec,
coverage_vec,
length_vec,
seed_vec,
test_ratio_vec,
seed,
test_ratio,
dataset_name_group_0,
dataset_name_group_1)
# In[]
# define qnet model
quantile_estimator = helper.AllQNet_RegressorAdapter(model=None,
fit_params=None,
in_shape=in_shape,
hidden_size=hidden_size,
quantiles=quantiles_net,
learn_func=nn_learn_func,
epochs=epochs,
batch_size=batch_size,
dropout=dropout,
lr=lr,
wd=wd,
test_ratio=cv_test_ratio,
random_state=cv_random_state,
use_rearrangement=False)
# define the CQR object
nc = RegressorNc(quantile_estimator, QuantileRegAsymmetricErrFunc())
# run CQR procedure
y_lower, y_upper = helper.run_icp(nc, x_train, y_train, x_test, idx_train, idx_cal, alpha, condition)
method_name = "Conditional CQR Neural Network (joint)"
# compute and print average coverage and average length
coverage_sample, length_sample = helper.compute_coverage_per_sample(y_test,
y_lower,
y_upper,
alpha,
method_name,
x_test,
condition)
append_statistics(coverage_sample,
length_sample,
method_name,
dataset_name_vec,
method_vec,
coverage_vec,
length_vec,
seed_vec,
test_ratio_vec,
seed,
test_ratio,
dataset_name_group_0,
dataset_name_group_1)
# In[6]
category_map = np.array([condition((x_train[i, :], None)) for i in range(x_train.shape[0])])
categories = np.unique(category_map)
quantile_estimator_list = []
nc_list = []
for i in range(len(categories)):
# define a QRF model per group
quantile_estimator_list.append(helper.AllQNet_RegressorAdapter(model=None,
fit_params=None,
in_shape=in_shape,
hidden_size=hidden_size,
quantiles=quantiles_net,
learn_func=nn_learn_func,
epochs=epochs,
batch_size=batch_size,
dropout=dropout,
lr=lr,
wd=wd,
test_ratio=cv_test_ratio,
random_state=cv_random_state,
use_rearrangement=False))
# append a CQR object
nc_list.append(RegressorNc(quantile_estimator_list[i], QuantileRegAsymmetricErrFunc()))
# run CQR procedure
y_lower, y_upper = helper.run_icp_sep(nc_list, x_train, y_train, x_test, idx_train, idx_cal, alpha, condition)
method_name = "Conditional CQR Neural Network (groupwise)"
# compute and print average coverage and average length
coverage_sample, length_sample = helper.compute_coverage_per_sample(y_test,
y_lower,
y_upper,
alpha,
method_name,
x_test,
condition)
append_statistics(coverage_sample,
length_sample,
method_name,
dataset_name_vec,
method_vec,
coverage_vec,
length_vec,
seed_vec,
test_ratio_vec,
seed,
test_ratio,
dataset_name_group_0,
dataset_name_group_1)
# In[]
############### Summary
coverage_str = 'Coverage (expected ' + str(100 - alpha*100) + '%)'
if save_to_csv:
outdir = './results/'
if not os.path.exists(outdir):
os.mkdir(outdir)
out_name = outdir + 'results.csv'
df = pd.DataFrame({'name': dataset_name_vec,
'method': method_vec,
coverage_str : coverage_vec,
'Avg. Length' : length_vec,
'seed' : seed_vec,
'train test ratio' : test_ratio_vec})
if os.path.isfile(out_name):
df2 =
|
pd.read_csv(out_name)
|
pandas.read_csv
|
__author__ = "unknow"
__copyright__ = "Sprace.org.br"
__version__ = "1.0.0"
import pandas as pd
import sys
from math import sqrt
import sys
import os
import ntpath
import scipy.stats
import seaborn as sns
from matplotlib import pyplot as plt
#sys.path.append('/home/silvio/git/track-ml-1/utils')
#sys.path.append('../')
from core.utils.tracktop import *
#def create_graphic(reconstructed_tracks, original_tracks, tracks_diffs):
def create_graphic_org(**kwargs):
if kwargs.get('original_tracks'):
original_tracks = kwargs.get('original_tracks')
if kwargs.get('path_original_track'):
path_original_track = kwargs.get('path_original_track')
if kwargs.get('tracks'):
tracks = kwargs.get('tracks')
dfOriginal = pd.read_csv(original_tracks)
# dfOriginal2=dfOriginal.iloc(10:,:)
track_plot_new(dfOriginal, track_color = 'blue', n_tracks = tracks, title = 'Original to be Reconstructed', path=path_original_track)
#def create_graphic(reconstructed_tracks, original_tracks, tracks_diffs):
def create_graphic(**kwargs):
if kwargs.get('reconstructed_tracks'):
reconstructed_tracks = kwargs.get('reconstructed_tracks')
if kwargs.get('original_tracks'):
original_tracks = kwargs.get('original_tracks')
if kwargs.get('tracks_diffs'):
tracks_diffs = kwargs.get('tracks_diffs')
if kwargs.get('path_original_track'):
path_original_track = kwargs.get('path_original_track')
if kwargs.get('path_recons_track'):
path_recons_track = kwargs.get('path_recons_track')
'''
dftracks_diffs_aux = pd.read_csv(tracks_diffs)
dftracks_diffs=dftracks_diffs_aux.iloc[:,28:29]
dfOriginal = pd.read_csv(original_tracks)
dfaux3=dfOriginal.drop(dfOriginal.columns[0], axis=1)
dfaux32=dfaux3.drop(dfaux3.columns[0], axis=1)
dfaux32.insert(174, "diff", dftracks_diffs, True)
org = dfaux32.sort_values(by=['diff'])
dfRecons = pd.read_csv(reconstructed_tracks)
dfaux33=dfRecons.drop(dfRecons.columns[0], axis=1)
dfaux22=dfaux33.drop(dfaux33.columns[0], axis=1)
dfaux22.insert(65, "diff", dftracks_diffs, True)
org2 = dfaux22.sort_values(by=['diff'])
'''
dfRecons = pd.read_csv(reconstructed_tracks)
dfOriginal = pd.read_csv(original_tracks)
dftracks_diffs_aux = pd.read_csv(tracks_diffs)
dftracks_diffs=dftracks_diffs_aux.iloc[:,28:29]
dfRecons.insert(171, "diff", dftracks_diffs, True)
dfOriginal.insert(171, "diff", dftracks_diffs, True)
recaux=dfRecons.iloc[:,12:]
orgaux=dfOriginal.iloc[:,11:]
org = orgaux.sort_values(by=['diff'])
rec = recaux.sort_values(by=['diff'])
track_plot_new(org, track_color = 'blue', n_tracks = 19, title = 'Original to be Reconstructed', path=path_original_track)
#track_plot(org, track_color = 'blue', n_tracks = 20, title = 'Original to be Reconstructed', path=path_recons_track)
track_plot_new(rec, track_color = 'red', n_tracks = 19, title = 'reconstructed LSTM', path=path_recons_track)
#track_plot(org2, track_color = 'red', n_tracks = 20, title = 'reconstructed LSTM', path=path_original_track)
#def create_histogram(tracks_diffs):
def create_histogram(**kwargs):
if kwargs.get('tracks_diffs'):
tracks_diffs = kwargs.get('tracks_diffs')
if kwargs.get('path_hist'):
path_hist = kwargs.get('path_hist')
dftracks_diffs_aux = pd.read_csv(tracks_diffs)
dftracks_diffs = dftracks_diffs_aux.iloc[:,28:29]
dftracks_diffs_aux[29] = (dftracks_diffs_aux.iloc[:,28:29]/10)
dfff=dftracks_diffs_aux.sort_values(by=[29])
track_plot_hist(dfff.iloc[:,-1:], title = "AVG distance - Real x LSTM",
x_title = "Average of Distance (cm)",
y_title = "frequency",
n_bins = 20, bar_color = 'indianred',
path = path_hist)
#def create_histogram_seaborn(tracks_diffs,outputfig):
def create_histogram_seaborn(**kwargs):
if kwargs.get('tracks_diffs'):
tracks_diffs = kwargs.get('tracks_diffs')
if kwargs.get('outputfig'):
outputfig = kwargs.get('outputfig')
dftracks_diffs_aux = pd.read_csv(tracks_diffs)
dftracks_diffs = dftracks_diffs_aux.iloc[:,28:29]
dftracks_diffs_aux[29] = (dftracks_diffs_aux.iloc[:,28:29]/10)
dfff=dftracks_diffs_aux.sort_values(by=[29])
#sns_plot = sns.distplot(dfEval.iloc[:,[27]])
sns_plot = sns.distplot(dfff.iloc[:,-1:])
sns_plot.set(xlabel='Average Distance in MM', ylabel='Frequency')
plt.savefig(outputfig)
#def create_diference_per_track(reconstructed_tracks, original_tracks, eval_file):
def create_diference_per_track(**kwargs):
if kwargs.get('reconstructed_tracks'):
reconstructed_tracks = kwargs.get('reconstructed_tracks')
if kwargs.get('original_tracks'):
original_tracks = kwargs.get('original_tracks')
if kwargs.get('eval_file'):
eval_file = kwargs.get('eval_file')
dfOriginal = pd.read_csv(original_tracks)
dfReconstructed = pd.read_csv(reconstructed_tracks)
lines = dfOriginal.shape[0]
columns = dfOriginal.shape[1]
dfEval = pd.DataFrame(index=range(lines),columns=range(29))
ind_dfEval=0
#for hit in range(1, 28):
for hit in range(1, 16):
print(hit)
print(dfOriginal.shape)
print(dfReconstructed.shape)
#original track
dataOR=dfOriginal.iloc[:, [ (hit*8)+2,(hit*8)+3,(hit*8)+4 ]]
#reconstructed track
dataRE=dfReconstructed.iloc[:, [ (hit*8)+2,(hit*8)+3,(hit*8)+4 ]]
dftemp = pd.DataFrame(index=range(lines),columns=range(7))
dftemp[0]=dataOR.iloc[:,[0]]
dftemp[1]=dataOR.iloc[:,[1]]
dftemp[2]=dataOR.iloc[:,[2]]
dftemp[3]=dataRE.iloc[:,[0]]
dftemp[4]=dataRE.iloc[:,[1]]
dftemp[5]=dataRE.iloc[:,[2]]
dftemp[6]= (((dftemp[0]-dftemp[3])**2)+((dftemp[1]-dftemp[4])**2)+((dftemp[2]-dftemp[5])**2)).pow(1./2)
#Dataframe with geometric distante from hit to hit
dfEval[ind_dfEval] = dftemp[6]
ind_dfEval=ind_dfEval+1
ind=27
col = dfEval.loc[: , 0:26]
dfEval[27] = col.mean(axis=1)
dfEval.to_csv(eval_file)
def create_input_data(**kwargs):
#this function select a specific number of track with specific amount of hists
maximunAmountofHitsinDB=20
columnsperhit=8
firstColumnAfterParticle=9
if kwargs.get('event_prefix'):
event_prefix = kwargs.get('event_prefix')
if kwargs.get('output_prefix'):
output_prefix = kwargs.get('output_prefix')
if kwargs.get('aux_am_per_hit'):
aux_am_per_hit = kwargs.get('aux_am_per_hit')
if kwargs.get('min'):
min = kwargs.get('min')
if kwargs.get('max'):
max = kwargs.get('max')
if kwargs.get('maximunAmountofHitsinDB'):
maximunAmountofHitsinDB = kwargs.get('maximunAmountofHitsinDB')
if kwargs.get('columnsperhit'):
columnsperhit = kwargs.get('columnsperhit')
if kwargs.get('firstColumnAfterParticle'):
firstColumnAfterParticle = kwargs.get('firstColumnAfterParticle')
nrowsvar=1500000 #1000 #500000
skip=0
totdf = pd.read_csv(event_prefix,skiprows=0,nrows=1)
totdf = totdf.iloc[0:0]
for z in range(1):
dfOriginal = pd.read_csv(event_prefix,skiprows=skip,nrows=nrowsvar)
#print(dfOriginal.shape)
#dfOriginal2=dfOriginal.iloc[:,7:]
dfOriginal2=dfOriginal.iloc[:,firstColumnAfterParticle:]
#print(dfOriginal2)
#all zero columns in a single line
dfOriginal['totalZeros'] = (dfOriginal2 == 0.0).sum(axis=1)
dfOriginal['totalHits'] = maximunAmountofHitsinDB-(dfOriginal['totalZeros']/columnsperhit)
dfOriginal["totalHits"] = dfOriginal["totalHits"].astype(int)
#print("min: " , dfOriginal.iloc[:,:-1].min())
#print("max: " , dfOriginal.iloc[:,:-1].max())
print(int(min))
print(int(max)+1)
#print(dfOriginal['totalZeros'])
#print(dfOriginal['totalHits'])
for i in range(int(min), (int(max)+1)):
#print("i: " , i)
auxDF = dfOriginal.loc[dfOriginal['totalHits'] == i]
auxDF2 = auxDF.iloc[0:aux_am_per_hit,:]
totdf = totdf.append(auxDF2, sort=False)
print("auxDF2.shape: ", i, auxDF2.shape)
dfOriginal = dfOriginal.iloc[0:0]
auxDF2 = auxDF2.iloc[0:0]
auxDF = auxDF.iloc[0:0]
skip=skip+nrowsvar
totdf.drop('totalHits', axis=1, inplace=True)
totdf.drop('totalZeros', axis=1, inplace=True)
totdf.to_csv(output_prefix, index = False)
def put_each_hit_in_a_single_line_train(**kwargs):
if kwargs.get('event_prefix'):
event_prefix = kwargs.get('event_prefix')
if kwargs.get('output_prefix'):
output_prefix = kwargs.get('output_prefix')
print("event_prefix ", event_prefix, " output_prefix ", output_prefix)
auxdf3 = pd.DataFrame()
totfinaldfaux = pd.DataFrame()
totdf = pd.read_csv(event_prefix) #,skiprows=0,nrows=1)
print("[Data] Particle information:")
print(totdf.iloc[:,0:10])
print("totdf hit 1")
print(totdf.iloc[:,9:18])
print("totdf hit 2")
print(totdf.iloc[:,18:26])
print("totdf hit 3")
print(totdf.iloc[:,26:34])
print("totdf hit 4")
print(totdf.iloc[:,34:42])
print("totdf hit 5")
print(totdf.iloc[:,42:50])
#print(totdf.shape)
#totdf.drop('Unnamed: 0', axis=1, inplace=True)
print(totdf.shape)
columnsPerHit=8
maximumHitPerTrack=20
positionB=1
#positionE=40 #17+8+8+8+8
positionE=49 #17+8+8+8+8
amount_of_hits = maximumHitPerTrack*columnsPerHit
#define interval of first hit as the third hit
#positionB=(((positionB+columnsPerHit)+columnsPerHit)+columnsPerHit)
#positionE=(((positionE+columnsPerHit)+columnsPerHit)+columnsPerHit)
#initialize dataframe that will receive each hit as a row
#totfinaldfaux = pd.DataFrame(columns=range(columnsPerHit))
#for from 3 to 20 -> 17 hits
#for k in range(3,int(maximumHitPerTrack)-3):
for k in range(3,4):
#for k in range(3,10):
print("K: ", k)
print("interval: ", positionB,positionE)
totdf3 = totdf.iloc[:,positionB:positionE]
print("totdf3.shape: ", totdf3.shape)
print("totdf3: ", totdf3)
#rename column names to append in resulta dataframe
for i in range(totdf3.columns.shape[0]):
totdf3 = totdf3.rename(columns={totdf3.columns[i]: i})
'''
totdf3['totalZeros'] = (totdf3.iloc[:,:] == 0.0).sum(axis=1)
#print( totdf3['totalZeros'] )
print( "totdf3: " , totdf3.shape )
auxdf3 = totdf3[ totdf3['totalZeros'] == 32 ]
print( "auxdf3 :" , auxdf3.shape )
# Get names of indexes for which column Age has value 30
indexNames = totdf3[ totdf3['totalZeros'] == 32 ].index
# Delete these row indexes from dataFrame
totdf3.drop(indexNames , inplace=True)
print( "totdf3 after drop :" , totdf3.shape )
'''
#print("totalZeros!!")
#auxdf3['totalZeros'] = (totdf3 == 0.0).sum(axis=1)
#for auxIndex, auxRow in totdf3.iterrows():
#obj=(totdf3.iloc[auxIndex:auxIndex+1,:] == 0.0).sum(axis=1)
#obj=obj.flatten()
#print(obj)
#print(obj.shape)
#print(obj.iloc[0:10])
#print(obj[1])
#if (((totdf3.iloc[auxIndex:auxIndex+1,:] == 0.0).sum(axis=1)) > 32):
# print("dropRow")
#else:
# print("keepRow")
#df3d.drop('X', axis=1, inplace=True)
#totdf3 = totdf3.rename(columns={totdf3.columns[i]: i})
#print(auxdf3)
#append hit as row
totfinaldfaux = totfinaldfaux.append(totdf3)
positionB=positionB+columnsPerHit
positionE=positionE+columnsPerHit
print(totfinaldfaux.shape)
print(totfinaldfaux)
totfinaldfaux.to_csv(output_prefix)
#totfinaldfaux.to_csv(output_prefix)
'''
def put_each_hit_in_a_single_line_v2(**kwargs):
if kwargs.get('event_prefix'):
event_prefix = kwargs.get('event_prefix')
if kwargs.get('output_prefix'):
output_prefix = kwargs.get('output_prefix')
totdf = pd.read_csv(event_prefix) #,skiprows=0,nrows=1)
resorg= totdf.iloc[:, [ 26,27,28,29,30,31,32]]
totfinaldfaux.to_csv(output_prefix)
'''
def put_each_hit_in_a_single_line(**kwargs):
if kwargs.get('event_prefix'):
event_prefix = kwargs.get('event_prefix')
if kwargs.get('output_prefix'):
output_prefix = kwargs.get('output_prefix')
totdf = pd.read_csv(event_prefix) #,skiprows=0,nrows=1)
print(totdf)
print(totdf.shape)
columnsPerHit=8
maximumHitPerTrack=20
#positionB=10
#positionE=18
positionB=1
positionE=8
amount_of_hits = maximumHitPerTrack*columnsPerHit
#define interval of first hit as the third hit
positionB=(((positionB+columnsPerHit)+columnsPerHit)+columnsPerHit)
positionE=(((positionE+columnsPerHit)+columnsPerHit)+columnsPerHit)
#initialize dataframe that will receive each hit as a row
totfinaldfaux = pd.DataFrame(columns=range(columnsPerHit))
#for from 3 to 20 -> 17 hits
for k in range(3,int(maximumHitPerTrack)):
#for k in range(1,1):
print("interval: ", positionB,positionE)
totdf3 = totdf.iloc[:,positionB:positionE]
print(totdf3)
#rename column names to append in resulta dataframe
for i in range(totdf3.columns.shape[0]):
totdf3 = totdf3.rename(columns={totdf3.columns[i]: i})
#append hit as row
totfinaldfaux = totfinaldfaux.append(totdf3)
positionB=positionB+columnsPerHit
positionE=positionE+columnsPerHit
print(totfinaldfaux.shape)
totfinaldfaux.to_csv(output_prefix)
totfinaldfaux.to_csv(output_prefix)
def create_input_data_24(**kwargs):
if kwargs.get('event_prefix'):
event_prefix = kwargs.get('event_prefix')
if kwargs.get('output_prefix'):
output_prefix = kwargs.get('output_prefix')
hitB = 2
amount_of_hits = 24
totdf = pd.read_csv(event_prefix) #,skiprows=0,nrows=1)
totfinaldfaux = pd.DataFrame(columns=range(amount_of_hits))
totfinaldfaux2 = pd.DataFrame(columns=range(amount_of_hits))
for z in range(1,26):
hitB = hitB+6
hitEnd = hitB+amount_of_hits
totfinaldf=totdf.iloc[:,hitB:hitEnd]
totfinaldfaux2 = pd.DataFrame({0:totfinaldf.iloc[:,0], 1:totfinaldf.iloc[:,1], 2:totfinaldf.iloc[:,2], 3:totfinaldf.iloc[:,3], 4:totfinaldf.iloc[:,4], 5:totfinaldf.iloc[:,5], 6:totfinaldf.iloc[:,6], 7:totfinaldf.iloc[:,7] , 8:totfinaldf.iloc[:,8] , 9:totfinaldf.iloc[:,9], 10:totfinaldf.iloc[:,10], 11:totfinaldf.iloc[:,11], 12:totfinaldf.iloc[:,12], 13:totfinaldf.iloc[:,13], 14:totfinaldf.iloc[:,14], 15:totfinaldf.iloc[:,15], 16:totfinaldf.iloc[:,16], 17:totfinaldf.iloc[:,17] , 18:totfinaldf.iloc[:,18] , 19:totfinaldf.iloc[:,19], 20:totfinaldf.iloc[:,20], 21:totfinaldf.iloc[:,21], 22:totfinaldf.iloc[:,22] , 23:totfinaldf.iloc[:,23] })
totfinaldfaux = totfinaldfaux.append(totfinaldfaux2, sort=False, ignore_index=True)
totfinaldfaux.to_csv(output_prefix)
def create_input_data_6(**kwargs):
if kwargs.get('event_prefix'):
event_prefix = kwargs.get('event_prefix')
if kwargs.get('output_prefix'):
output_prefix = kwargs.get('output_prefix')
totdf = pd.read_csv(event_prefix) #,skiprows=0,nrows=1)
hitB = 2
amount_of_hits = 6
#dfEval = pd.DataFrame(index=range(lines),columns=range(29))
totfinaldfaux = pd.DataFrame(columns=range(amount_of_hits))
totfinaldfaux2 = pd.DataFrame(columns=range(amount_of_hits))
#print(totfinaldfaux)
##totfinaldfaux = pd.DataFrame(columns = [0,1,2,3,4,5])
#totfinaldfaux2 = pd.DataFrame(columns = [0,1,2,3,4,5])
#print(totfinaldfaux2)
#for z in range(1,26):
for z in range(1,3):
hitB = hitB+amount_of_hits
hitEnd = hitB+amount_of_hits
totfinaldf=totdf.iloc[:,hitB:hitEnd]
print(hitB,hitEnd)
totfinaldfaux2 =
|
pd.DataFrame({0:totfinaldf.iloc[:,0], 1:totfinaldf.iloc[:,1], 2:totfinaldf.iloc[:,2], 3:totfinaldf.iloc[:,3], 4:totfinaldf.iloc[:,4], 5:totfinaldf.iloc[:,5] })
|
pandas.DataFrame
|
"""Build industry sector ratios."""
import pandas as pd
# GWh/ktoe OR MWh/toe
toe_to_MWh = 11.630
eu28 = [
"FR",
"DE",
"GB",
"IT",
"ES",
"PL",
"SE",
"NL",
"BE",
"FI",
"DK",
"PT",
"RO",
"AT",
"BG",
"EE",
"GR",
"LV",
"CZ",
"HU",
"IE",
"SK",
"LT",
"HR",
"LU",
"SI",
"CY",
"MT",
]
sheet_names = {
"Iron and steel": "ISI",
"Chemicals Industry": "CHI",
"Non-metallic mineral products": "NMM",
"Pulp, paper and printing": "PPA",
"Food, beverages and tobacco": "FBT",
"Non Ferrous Metals": "NFM",
"Transport Equipment": "TRE",
"Machinery Equipment": "MAE",
"Textiles and leather": "TEL",
"Wood and wood products": "WWP",
"Other Industrial Sectors": "OIS",
}
index = [
"elec",
"coal",
"coke",
"biomass",
"methane",
"hydrogen",
"heat",
"naphtha",
"process emission",
"process emission from feedstock",
]
def load_idees_data(sector, country="EU28"):
suffixes = {"out": "", "fec": "_fec", "ued": "_ued", "emi": "_emi"}
sheets = {k: sheet_names[sector] + v for k, v in suffixes.items()}
def usecols(x):
return isinstance(x, str) or x == year
idees = pd.read_excel(
f"{snakemake.input.idees}/JRC-IDEES-2015_Industry_{country}.xlsx",
sheet_name=list(sheets.values()),
index_col=0,
header=0,
squeeze=True,
usecols=usecols,
)
for k, v in sheets.items():
idees[k] = idees.pop(v)
return idees
def iron_and_steel():
# There are two different approaches to produce iron and steel:
# i.e., integrated steelworks and electric arc.
# Electric arc approach has higher efficiency and relies more on electricity.
# We assume that integrated steelworks will be replaced by electric arc entirely.
sector = "Iron and steel"
idees = load_idees_data(sector)
df = pd.DataFrame(index=index)
## Electric arc
sector = "Electric arc"
df[sector] = 0.0
s_fec = idees["fec"][51:57]
assert s_fec.index[0] == sector
sel = ["Lighting", "Air compressors", "Motor drives", "Fans and pumps"]
df.at["elec", sector] += s_fec[sel].sum()
df.at["heat", sector] += s_fec["Low enthalpy heat"]
subsector = "Steel: Smelters"
s_fec = idees["fec"][61:67]
s_ued = idees["ued"][61:67]
assert s_fec.index[0] == subsector
assert s_ued.index[0] == subsector
# efficiency changes due to transforming all the smelters into methane
key = "Natural gas (incl. biogas)"
eff_met = s_ued[key] / s_fec[key]
df.at["methane", sector] += s_ued[subsector] / eff_met
subsector = "Steel: Electric arc"
s_fec = idees["fec"][67:68]
assert s_fec.index[0] == subsector
df.at["elec", sector] += s_fec[subsector]
subsector = "Steel: Furnaces, Refining and Rolling"
s_fec = idees["fec"][68:75]
s_ued = idees["ued"][68:75]
assert s_fec.index[0] == subsector
assert s_ued.index[0] == subsector
key = "Steel: Furnaces, Refining and Rolling - Electric"
eff = s_ued[key] / s_fec[key]
# assume fully electrified, other processes scaled by used energy
df.at["elec", sector] += s_ued[subsector] / eff
subsector = "Steel: Products finishing"
s_fec = idees["fec"][75:92]
s_ued = idees["ued"][75:92]
assert s_fec.index[0] == subsector
assert s_ued.index[0] == subsector
key = "Steel: Products finishing - Electric"
eff = s_ued[key] / s_fec[key]
# assume fully electrified
df.at["elec", sector] += s_ued[subsector] / eff
# Process emissions (per physical output)
s_emi = idees["emi"][51:93]
assert s_emi.index[0] == sector
s_out = idees["out"][7:8]
assert s_out.index[0] == sector
# tCO2/t material
df.loc["process emission", sector] += s_emi["Process emissions"] / s_out[sector]
# final energy consumption MWh/t material
sel = ["elec", "heat", "methane"]
df.loc[sel, sector] = df.loc[sel, sector] * toe_to_MWh / s_out[sector]
## DRI + Electric arc
# For primary route: DRI with H2 + EAF
sector = "DRI + Electric arc"
df[sector] = df["Electric arc"]
# add H2 consumption for DRI at 1.7 MWh H2 /ton steel
df.at["hydrogen", sector] = config["H2_DRI"]
# add electricity consumption in DRI shaft (0.322 MWh/tSl)
df.at["elec", sector] += config["elec_DRI"]
## Integrated steelworks
# could be used in combination with CCS)
# Assume existing fuels are kept, except for furnaces, refining, rolling, finishing
# Ignore 'derived gases' since these are top gases from furnaces
sector = "Integrated steelworks"
df[sector] = 0.0
s_fec = idees["fec"][3:9]
assert s_fec.index[0] == sector
sel = ["Lighting", "Air compressors", "Motor drives", "Fans and pumps"]
df.loc["elec", sector] += s_fec[sel].sum()
df.loc["heat", sector] += s_fec["Low enthalpy heat"]
subsector = "Steel: Sinter/Pellet making"
s_fec = idees["fec"][13:19]
s_ued = idees["ued"][13:19]
assert s_fec.index[0] == subsector
assert s_ued.index[0] == subsector
df.loc["elec", sector] += s_fec["Electricity"]
sel = ["Natural gas (incl. biogas)", "Residual fuel oil"]
df.loc["methane", sector] += s_fec[sel].sum()
df.loc["coal", sector] += s_fec["Solids"]
subsector = "Steel: Blast /Basic oxygen furnace"
s_fec = idees["fec"][19:25]
s_ued = idees["ued"][19:25]
assert s_fec.index[0] == subsector
assert s_ued.index[0] == subsector
sel = ["Natural gas (incl. biogas)", "Residual fuel oil"]
df.loc["methane", sector] += s_fec[sel].sum()
df.loc["coal", sector] += s_fec["Solids"]
df.loc["coke", sector] = s_fec["Coke"]
subsector = "Steel: Furnaces, Refining and Rolling"
s_fec = idees["fec"][25:32]
s_ued = idees["ued"][25:32]
assert s_fec.index[0] == subsector
assert s_ued.index[0] == subsector
key = "Steel: Furnaces, Refining and Rolling - Electric"
eff = s_ued[key] / s_fec[key]
# assume fully electrified, other processes scaled by used energy
df.loc["elec", sector] += s_ued[subsector] / eff
subsector = "Steel: Products finishing"
s_fec = idees["fec"][32:49]
s_ued = idees["ued"][32:49]
assert s_fec.index[0] == subsector
assert s_ued.index[0] == subsector
key = "Steel: Products finishing - Electric"
eff = s_ued[key] / s_fec[key]
# assume fully electrified
df.loc["elec", sector] += s_ued[subsector] / eff
# Process emissions (per physical output)
s_emi = idees["emi"][3:50]
assert s_emi.index[0] == sector
s_out = idees["out"][6:7]
assert s_out.index[0] == sector
# tCO2/t material
df.loc["process emission", sector] = s_emi["Process emissions"] / s_out[sector]
# final energy consumption MWh/t material
sel = ["elec", "heat", "methane", "coke", "coal"]
df.loc[sel, sector] = df.loc[sel, sector] * toe_to_MWh / s_out[sector]
return df
def chemicals_industry():
sector = "Chemicals Industry"
idees = load_idees_data(sector)
df = pd.DataFrame(index=index)
# Basic chemicals
sector = "Basic chemicals"
df[sector] = 0.0
s_fec = idees["fec"][3:9]
assert s_fec.index[0] == sector
sel = ["Lighting", "Air compressors", "Motor drives", "Fans and pumps"]
df.loc["elec", sector] += s_fec[sel].sum()
df.loc["heat", sector] += s_fec["Low enthalpy heat"]
subsector = "Chemicals: Feedstock (energy used as raw material)"
# There are Solids, Refinery gas, LPG, Diesel oil, Residual fuel oil,
# Other liquids, Naphtha, Natural gas for feedstock.
# Naphta represents 47%, methane 17%. LPG (18%) solids, refinery gas,
# diesel oil, residual fuel oils and other liquids are asimilated to Naphtha
s_fec = idees["fec"][13:22]
assert s_fec.index[0] == subsector
df.loc["naphtha", sector] += s_fec["Naphtha"]
df.loc["methane", sector] += s_fec["Natural gas"]
# LPG and other feedstock materials are assimilated to naphtha
# since they will be produced through Fischer-Tropsh process
sel = [
"Solids",
"Refinery gas",
"LPG",
"Diesel oil",
"Residual fuel oil",
"Other liquids",
]
df.loc["naphtha", sector] += s_fec[sel].sum()
subsector = "Chemicals: Steam processing"
# All the final energy consumption in the steam processing is
# converted to methane, since we need >1000 C temperatures here.
# The current efficiency of methane is assumed in the conversion.
s_fec = idees["fec"][22:33]
s_ued = idees["ued"][22:33]
assert s_fec.index[0] == subsector
assert s_ued.index[0] == subsector
# efficiency of natural gas
eff_ch4 = s_ued["Natural gas (incl. biogas)"] / s_fec["Natural gas (incl. biogas)"]
# replace all fec by methane
df.loc["methane", sector] += s_ued[subsector] / eff_ch4
subsector = "Chemicals: Furnaces"
s_fec = idees["fec"][33:41]
s_ued = idees["ued"][33:41]
assert s_fec.index[0] == subsector
assert s_ued.index[0] == subsector
# efficiency of electrification
key = "Chemicals: Furnaces - Electric"
eff_elec = s_ued[key] / s_fec[key]
# assume fully electrified
df.loc["elec", sector] += s_ued[subsector] / eff_elec
subsector = "Chemicals: Process cooling"
s_fec = idees["fec"][41:55]
s_ued = idees["ued"][41:55]
assert s_fec.index[0] == subsector
assert s_ued.index[0] == subsector
key = "Chemicals: Process cooling - Electric"
eff_elec = s_ued[key] / s_fec[key]
# assume fully electrified
df.loc["elec", sector] += s_ued[subsector] / eff_elec
subsector = "Chemicals: Generic electric process"
s_fec = idees["fec"][55:56]
assert s_fec.index[0] == subsector
df.loc["elec", sector] += s_fec[subsector]
# Process emissions
# Correct everything by subtracting 2015's ammonia demand and
# putting in ammonia demand for H2 and electricity separately
s_emi = idees["emi"][3:57]
assert s_emi.index[0] == sector
# convert from MtHVC/a to ktHVC/a
s_out = config["HVC_production_today"] * 1e3
# tCO2/t material
df.loc["process emission", sector] += (
s_emi["Process emissions"]
- config["petrochemical_process_emissions"] * 1e3
- config["NH3_process_emissions"] * 1e3
) / s_out
# emissions originating from feedstock, could be non-fossil origin
# tCO2/t material
df.loc["process emission from feedstock", sector] += (
config["petrochemical_process_emissions"] * 1e3
) / s_out
# convert from ktoe/a to GWh/a
sources = ["elec", "biomass", "methane", "hydrogen", "heat", "naphtha"]
df.loc[sources, sector] *= toe_to_MWh
# subtract ammonia energy demand (in ktNH3/a)
ammonia = pd.read_csv(snakemake.input.ammonia_production, index_col=0)
ammonia_total = ammonia.loc[ammonia.index.intersection(eu28), str(year)].sum()
df.loc["methane", sector] -= ammonia_total * config["MWh_CH4_per_tNH3_SMR"]
df.loc["elec", sector] -= ammonia_total * config["MWh_elec_per_tNH3_SMR"]
# subtract chlorine demand
chlorine_total = config["chlorine_production_today"]
df.loc["hydrogen", sector] -= chlorine_total * config["MWh_H2_per_tCl"]
df.loc["elec", sector] -= chlorine_total * config["MWh_elec_per_tCl"]
# subtract methanol demand
methanol_total = config["methanol_production_today"]
df.loc["methane", sector] -= methanol_total * config["MWh_CH4_per_tMeOH"]
df.loc["elec", sector] -= methanol_total * config["MWh_elec_per_tMeOH"]
# MWh/t material
df.loc[sources, sector] = df.loc[sources, sector] / s_out
df.rename(columns={sector: "HVC"}, inplace=True)
# HVC mechanical recycling
sector = "HVC (mechanical recycling)"
df[sector] = 0.0
df.loc["elec", sector] = config["MWh_elec_per_tHVC_mechanical_recycling"]
# HVC chemical recycling
sector = "HVC (chemical recycling)"
df[sector] = 0.0
df.loc["elec", sector] = config["MWh_elec_per_tHVC_chemical_recycling"]
# Ammonia
sector = "Ammonia"
df[sector] = 0.0
df.loc["hydrogen", sector] = config["MWh_H2_per_tNH3_electrolysis"]
df.loc["elec", sector] = config["MWh_elec_per_tNH3_electrolysis"]
# Chlorine
sector = "Chlorine"
df[sector] = 0.0
df.loc["hydrogen", sector] = config["MWh_H2_per_tCl"]
df.loc["elec", sector] = config["MWh_elec_per_tCl"]
# Methanol
sector = "Methanol"
df[sector] = 0.0
df.loc["methane", sector] = config["MWh_CH4_per_tMeOH"]
df.loc["elec", sector] = config["MWh_elec_per_tMeOH"]
# Other chemicals
sector = "Other chemicals"
df[sector] = 0.0
s_fec = idees["fec"][58:64]
assert s_fec.index[0] == sector
sel = ["Lighting", "Air compressors", "Motor drives", "Fans and pumps"]
df.loc["elec", sector] += s_fec[sel].sum()
df.loc["heat", sector] += s_fec["Low enthalpy heat"]
subsector = "Chemicals: High enthalpy heat processing"
s_fec = idees["fec"][68:81]
s_ued = idees["ued"][68:81]
assert s_fec.index[0] == subsector
assert s_ued.index[0] == subsector
key = "High enthalpy heat processing - Electric (microwave)"
eff_elec = s_ued[key] / s_fec[key]
# assume fully electrified
df.loc["elec", sector] += s_ued[subsector] / eff_elec
subsector = "Chemicals: Furnaces"
s_fec = idees["fec"][81:89]
s_ued = idees["ued"][81:89]
assert s_fec.index[0] == subsector
assert s_ued.index[0] == subsector
key = "Chemicals: Furnaces - Electric"
eff_elec = s_ued[key] / s_fec[key]
# assume fully electrified
df.loc["elec", sector] += s_ued[subsector] / eff_elec
subsector = "Chemicals: Process cooling"
s_fec = idees["fec"][89:103]
s_ued = idees["ued"][89:103]
assert s_fec.index[0] == subsector
assert s_ued.index[0] == subsector
key = "Chemicals: Process cooling - Electric"
eff = s_ued[key] / s_fec[key]
# assume fully electrified
df.loc["elec", sector] += s_ued[subsector] / eff
subsector = "Chemicals: Generic electric process"
s_fec = idees["fec"][103:104]
assert s_fec.index[0] == subsector
df.loc["elec", sector] += s_fec[subsector]
# Process emissions
s_emi = idees["emi"][58:105]
s_out = idees["out"][9:10]
assert s_emi.index[0] == sector
assert sector in str(s_out.index)
# tCO2/t material
df.loc["process emission", sector] += s_emi["Process emissions"] / s_out.values
# MWh/t material
sources = ["elec", "biomass", "methane", "hydrogen", "heat", "naphtha"]
df.loc[sources, sector] = df.loc[sources, sector] * toe_to_MWh / s_out.values
# Pharmaceutical products
sector = "Pharmaceutical products etc."
df[sector] = 0.0
s_fec = idees["fec"][106:112]
assert s_fec.index[0] == sector
sel = ["Lighting", "Air compressors", "Motor drives", "Fans and pumps"]
df.loc["elec", sector] += s_fec[sel].sum()
df.loc["heat", sector] += s_fec["Low enthalpy heat"]
subsector = "Chemicals: High enthalpy heat processing"
s_fec = idees["fec"][116:129]
s_ued = idees["ued"][116:129]
assert s_fec.index[0] == subsector
assert s_ued.index[0] == subsector
key = "High enthalpy heat processing - Electric (microwave)"
eff_elec = s_ued[key] / s_fec[key]
# assume fully electrified
df.loc["elec", sector] += s_ued[subsector] / eff_elec
subsector = "Chemicals: Furnaces"
s_fec = idees["fec"][129:137]
s_ued = idees["ued"][129:137]
assert s_fec.index[0] == subsector
assert s_ued.index[0] == subsector
key = "Chemicals: Furnaces - Electric"
eff = s_ued[key] / s_fec[key]
# assume fully electrified
df.loc["elec", sector] += s_ued[subsector] / eff
subsector = "Chemicals: Process cooling"
s_fec = idees["fec"][137:151]
s_ued = idees["ued"][137:151]
assert s_fec.index[0] == subsector
assert s_ued.index[0] == subsector
key = "Chemicals: Process cooling - Electric"
eff_elec = s_ued[key] / s_fec[key]
# assume fully electrified
df.loc["elec", sector] += s_ued[subsector] / eff_elec
subsector = "Chemicals: Generic electric process"
s_fec = idees["fec"][151:152]
s_out = idees["out"][10:11]
assert s_fec.index[0] == subsector
assert sector in str(s_out.index)
df.loc["elec", sector] += s_fec[subsector]
# tCO2/t material
df.loc["process emission", sector] += 0.0
# MWh/t material
sources = ["elec", "biomass", "methane", "hydrogen", "heat", "naphtha"]
df.loc[sources, sector] = df.loc[sources, sector] * toe_to_MWh / s_out.values
return df
def nonmetalic_mineral_products():
# This includes cement, ceramic and glass production.
# This includes process emissions related to the fabrication of clinker.
sector = "Non-metallic mineral products"
idees = load_idees_data(sector)
df =
|
pd.DataFrame(index=index)
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
import os
import redis
import pandas as pd
metals_dict = {
'XAU': 'Gold',
'XAG': 'Silver'
}
running_in_docker= os.environ.get('RUNNING_IN_DOCKER', False)
if running_in_docker:
r = redis.Redis(host='192.168.1.21')
else:
r = redis.Redis(host='127.0.0.1')
def redis_to_dataframe(key):
timeseries_data = r.hgetall(key)
timeseries_data = {
k.decode('utf-8'):float(v) for (k,v) in timeseries_data.items()
}
df = pd.DataFrame(timeseries_data.items(), columns=['Date', 'DateValue'])
pd.to_datetime(df['Date'])
return df
def get_mangled_dataframe(metal_currency_key):
"""
Parameter: The key in Redis.
E.g. XAU-USD
"""
currency = metal_currency_key.split('-')[1]
metal = metal_currency_key.split('-')[0]
df = redis_to_dataframe(metal_currency_key)
# convert usd pricing
if currency == 'USD':
df['DateValue'] = df.DateValue.apply(lambda x: 1/x)
# Add description columns
df.insert(1, 'Stock', metals_dict[metal], allow_duplicates=True)
df.insert(2, 'Currency', currency, allow_duplicates=True)
df.insert(3, 'Stock-Currency', metals_dict[metal]+'-'+currency, allow_duplicates=True)
# Add daily change column. As easy as.
df['Change'] = df['DateValue'].diff()
# Make Date column a datetime series
df['Date'] = pd.to_datetime(df['Date'])
# Make the Date column the index
df.set_index('Date', inplace=True)
return df
def concatenate_dataframes(args, ignore_index=False):
return
|
pd.concat(args, ignore_index)
|
pandas.concat
|
import pandas as pd
import numpy as np
from pathlib import Path
from data_parsing import extract_dataframe, open_xml
import gzip
from collections import defaultdict
# ########### 1. INTERMEDIARY FUNCTIONS ###########
def unzip_file(path: Path):
""" Unzips a file ending with .gz
Parameters
----------
path: pathlib.Path object or os.path object
Returns
-------
path: string
Path of the unzipped folder or file
"""
if Path(path).suffix == ".gz":
return gzip.open(path)
else:
return path
def parse_bus_fare_input(bus_fare_data_df, route_ids):
"""Processes the `MassTransitFares.csv` input file into a dataframe with rows = ages and columns = routes
Parameters
----------
bus_fare_data_df: pandas DataFrame
Bus fares extracted from the "submission-inputs/MassTransitFares.csv"
route_ids: list of strings
All routes ids where buses operate (from `routes.txt` file in the GTFS data)
Returns
-------
bus_fare_per_route_df: pandas DataFrame
Dataframe with rows = ages and columns = routes
"""
bus_fare_per_route_df = pd.DataFrame(np.zeros((120, len(route_ids))), columns=route_ids)
routes = bus_fare_data_df['routeId'].unique()
for r in routes:
if np.isnan(r):
cols = route_ids
else:
cols = int(r)
# get all fare rows for this route:
r_fares = bus_fare_data_df.loc[np.isnan(bus_fare_data_df['routeId']),]
for i, row in r_fares.iterrows():
age_group = row['age']
left = age_group[0]
ages = age_group[1:-1].split(':')
right = age_group[-1]
if left == '(':
min_a = int(ages[0]) + 1
else:
min_a = int(ages[0])
if right == ')':
max_a = int(ages[1]) - 1
else:
max_a = int(ages[1])
bus_fare_per_route_df.loc[min_a:max_a, cols] = float(row['amount'])
return bus_fare_per_route_df
def calc_fuel_costs(legs_df, fuel_cost_dict):
# legs_df: legs_dataframe
# fuel_cost_dict: {fuel_type: $/MJoule}
# returns: legs_df augmented with an additional column of estimated fuel costs
legs_df.loc[:, "FuelCost"] = np.zeros(legs_df.shape[0])
for f in fuel_cost_dict.keys():
legs_df.loc[legs_df["fuelType"] == f.capitalize(), "FuelCost"] = (pd.to_numeric(
legs_df.loc[legs_df["fuelType"] == f.capitalize(), "fuel"]) * float(fuel_cost_dict[f])) / 1000000
return legs_df
def calc_transit_fares(row, bus_fare_dict, person_df, trip_to_route):
pid = row['PID']
age = person_df.loc[pid,'Age']
vehicle = row['Veh']
route = trip_to_route[vehicle.split(':')[1]]
fare = bus_fare_dict.loc[age,route]
return fare
def calc_fares(legs_df, ride_hail_fares, bus_fare_dict, person_df, trip_to_route):
# legs_df: legs_dataframe
# ride_hail_fares: {'base': $, 'duration': $/hour, 'distance': $/km}
# transit_fares isnt being used currently - would need to be updated to compute fare based on age
# returns: legs_df augmented with an additional column of estimated transit and on-demand ride fares
legs_df["Fare"] = np.zeros(legs_df.shape[0])
legs_df.loc[legs_df["Mode"] == 'bus', "Fare"] = legs_df.loc[legs_df["Mode"] == 'bus'].apply(
lambda row: calc_transit_fares(row, bus_fare_dict, person_df, trip_to_route), axis=1)
legs_df.loc[legs_df["Mode"] == 'OnDemand_ride', "Fare"] = ride_hail_fares['base'] + (
pd.to_timedelta(legs_df['Duration_sec']).dt.seconds / 60) * float(ride_hail_fares['duration']) + (
pd.to_numeric(
legs_df['Distance_m']) / 1000) * (
0.621371) * float(ride_hail_fares['distance'])
return legs_df
def one_path(path_trav, leg_id, pid, trip_id):
# extracts leg data from path traversal
# returns a leg_dataframe row
l_row = path_trav
leg_id_this = leg_id + l_row.name
leg_id_full = trip_id + "_l-" + str(leg_id)
veh_id = l_row["vehicle"]
leg_start_time = l_row['departureTime']
veh_type = l_row['vehicleType']
distance = l_row['length']
leg_end_time = l_row['arrivalTime']
leg_duration = int(leg_end_time) - int(leg_start_time)
leg_path = l_row['links']
leg_mode = l_row['mode']
leg_fuel = l_row['fuel']
leg_fuel_type = l_row['fuelType']
# return the leg record
return [pid, trip_id, leg_id_full, leg_mode, veh_id, veh_type, leg_start_time, leg_end_time, leg_duration, distance,
leg_path, leg_fuel, leg_fuel_type]
def parse_transit_trips(row, non_bus_path_traversal_events, bus_path_traversal_events, enter_veh_events):
# inputs:
# row: a row from transit_trips_df
# path_traversal_events: non-transit path traversal df
# bus_path_traversal_events: transit path traversal df
# enter_veh_events: enter vehicle events
leg_array = []
pid = row['PID']
trip_id = row['Trip_ID']
start_time = row['Start_time'].total_seconds()
duration = row['Duration_sec']
end_time = row['End_time']
mode = row['Mode']
# initiate the leg ID counter
leg_id = 0
# get all path traversals occuring within the time frame of this trip in which the driver is the person making this trip
path_trav = non_bus_path_traversal_events[
(non_bus_path_traversal_events['driver'] == pid) & (non_bus_path_traversal_events['arrivalTime'] <= end_time) & (
non_bus_path_traversal_events['departureTime'] >= start_time)]
path_trav = path_trav.reset_index(drop=True)
# get the vehicle entry events corresponding to this person during the time frame of this trip
veh_entries = enter_veh_events[
(enter_veh_events['person'] == pid) & (enter_veh_events['time'] >= start_time) & (
enter_veh_events['time'] <= end_time)]
# get bus entry events for this person & trip
bus_entries = veh_entries[veh_entries['vehicle'].str.startswith('siouxareametro-sd-us:', na=False)]
bus_entries = bus_entries.reset_index(drop=True)
if len(bus_entries) > 0:
prev_entry_time = start_time
for idx, bus_entry in bus_entries.iterrows():
if idx < len(bus_entries)-1:
next_entry = bus_entries.loc[idx+1]
next_entry_time = next_entry['time']
else:
next_entry_time = end_time
# get all path traversals occuring before the bus entry
prev_path_trav = path_trav[
(path_trav['arrivalTime'] <= bus_entry['time']) & (path_trav['arrivalTime'] >= prev_entry_time)]
# get all path traversals occuring after the bus entry
post_path_trav = path_trav[(path_trav['arrivalTime'] > bus_entry['time']) & (path_trav['arrivalTime'] <= next_entry_time)]
prev_path_trav = prev_path_trav.reset_index(drop=True)
post_path_trav = post_path_trav.reset_index(drop=True)
prev_entry_time = bus_entry['time']
# iterate through the path traversals prior to the bus entry
if len(prev_path_trav)>0:
these_legs = prev_path_trav.apply(lambda row1: one_path(row1, leg_id, pid, trip_id), axis=1)
leg_array.extend(these_legs)
# record transit leg
leg_id += 1
leg_id_full = trip_id + "_l-" + str(leg_id)
veh_id = bus_entry['vehicle']
leg_start_time = int(bus_entry['time'])
if len(post_path_trav)> 0:
leg_end_time = int(post_path_trav['departureTime'].values[0])
bus_path_trav = bus_path_traversal_events[(bus_path_traversal_events['vehicle'] == veh_id) & (
bus_path_traversal_events['arrivalTime'] <= leg_end_time) & (bus_path_traversal_events[
'departureTime']>=leg_start_time)]
else:
leg_end_time = next_entry_time
bus_path_trav = bus_path_traversal_events[(bus_path_traversal_events['vehicle'] == veh_id) & (
bus_path_traversal_events['arrivalTime'] < leg_end_time) & (bus_path_traversal_events[
'departureTime']>=leg_start_time)]
leg_duration = int(leg_end_time - bus_entry['time'])
# find the path traversals of the bus corresponding to the bus entry for this trip, occuring between the last prev_path_traversal and the first post_path_traversal
if len(bus_path_trav) > 0:
veh_type = bus_path_trav['vehicleType'].values[0]
distance = bus_path_trav['length'].sum()
leg_path = [path['links'] for p, path in bus_path_trav.iterrows()]
leg_mode = bus_path_trav['mode'].values[0]
leg_fuel = 0
leg_fuel_type = 'Diesel'
leg_array.append(
[pid, trip_id, leg_id_full, leg_mode, veh_id, veh_type, leg_start_time, leg_end_time,
leg_duration,
distance, leg_path, leg_fuel, leg_fuel_type])
# iterate through the path traversals after the bus entry
if len(post_path_trav) > 0:
these_legs = post_path_trav.apply(lambda row1: one_path(row1, leg_id, pid, trip_id), axis=1)
leg_array.extend(these_legs)
# if the agent underwent replanning, there will be no bus entry
else:
leg_array = parse_walk_car_trips(row, non_bus_path_traversal_events, enter_veh_events)
return leg_array
def parse_walk_car_trips(row, path_traversal_events, enter_veh_events):
# inputs:
# row: a row from transit_trips_df
# path_traversal_events: non-transit path traversal df
# person_costs: person cost events df
# enter_veh_events: enter vehicle events
leg_array = []
pid = row['PID']
trip_id = row['Trip_ID']
start_time = row['Start_time']
duration = row['Duration_sec']
end_time = row['End_time']
mode = row['Mode']
# initiate the leg ID counter
leg_id = 0
# get all path traversals occuring within the time frame of this trip in which the driver is the person making this trip
path_trav = path_traversal_events.loc[
(path_traversal_events['driver'] == pid) & (path_traversal_events['arrivalTime'] <= end_time) & (
path_traversal_events['departureTime'] >= start_time.total_seconds()),]
path_trav.reset_index(drop=True, inplace=True)
# iterate through the path traversals
if len(path_trav > 0):
these_legs = path_trav.apply(lambda row: one_path(row, leg_id, pid, trip_id), axis=1)
leg_array.extend(these_legs)
return leg_array
def parse_ridehail_trips(row, path_traversal_events, enter_veh_events):
# inputs:
# row: a row from transit_trips_df
# path_traversal_events: non-transit path traversal df
# person_costs: person cost events df
# enter_veh_events: enter vehicle events
leg_array = []
pid = row['PID']
trip_id = row['Trip_ID']
start_time = row['Start_time']
duration = row['Duration_sec']
end_time = row['End_time']
mode = row['Mode']
# initiate the leg ID counter
leg_id = 0
# get all vehicle entry events corresponding to this person during the time frame of this trip, not including those corresponding to a walking leg
veh_entry = enter_veh_events.loc[
(enter_veh_events['person'] == pid) & (enter_veh_events['time'] >= start_time.total_seconds()) & (
enter_veh_events['time'] <= end_time),]
veh_entry2 = veh_entry.loc[(veh_entry['vehicle'] != 'body-' + pid),]
try:
veh_id = veh_entry2['vehicle'].item()
leg_start_time = veh_entry2['time'].item()
# get the path traversal corresponding to this ridehail trip
path_trav = path_traversal_events.loc[(path_traversal_events['vehicle'] == veh_id) & (
path_traversal_events['departureTime'] == int(leg_start_time)) & (path_traversal_events['numPassengers'] > 0),]
except:
path_trav = []
print(row)
leg_id += 1
# create leg ID
leg_id_full = trip_id + "_l-" + str(leg_id)
if len(path_trav) > 0:
veh_type = path_trav['vehicleType'].values[0]
distance = path_trav['length'].item()
leg_end_time = path_trav['arrivalTime'].item()
leg_duration = int(leg_end_time) - int(leg_start_time)
leg_path = path_trav['links'].item()
leg_mode = 'OnDemand_ride'
leg_fuel = path_trav['fuel'].item()
leg_fuel_type = path_trav['fuelType'].item()
leg_array.append(
[pid, trip_id, leg_id_full, leg_mode, veh_id, veh_type, leg_start_time, leg_end_time, leg_duration,
distance, leg_path, leg_fuel, leg_fuel_type])
return leg_array
def label_trip_mode(modes):
if ('walk' in modes) and ('car' in modes) and ('bus' in modes):
return 'drive_transit'
elif ('car' in modes) and ('bus' in modes):
return 'drive_transit'
elif ('walk' in modes) and ('bus' in modes):
return 'walk_transit'
elif ('walk' in modes) and ('car' in modes):
return 'car'
elif ('car' == modes):
return 'car'
elif ('OnDemand_ride' in modes):
return 'OnDemand_ride'
elif ('walk' == modes):
return 'walk'
else:
print(modes)
def merge_legs_trips(legs_df, trips_df):
trips_df = trips_df[ ['PID', 'Trip_ID', 'Origin_Activity_ID', 'Destination_activity_ID', 'Trip_Purpose',
'Mode']]
trips_df.columns = ['PID', 'Trip_ID', 'Origin_Activity_ID', 'Destination_activity_ID', 'Trip_Purpose',
'plannedTripMode']
legs_grouped = legs_df.groupby("Trip_ID")
unique_modes = legs_grouped['Mode'].unique()
unique_modes_df = pd.DataFrame(unique_modes)
unique_modes_df.columns = ['legModes']
merged_trips = trips_df.merge(legs_grouped['Duration_sec','Distance_m','fuel','FuelCost','Fare'].sum(),on='Trip_ID')
merged_trips.set_index('Trip_ID',inplace=True)
legs_transit = legs_df.loc[legs_df['Mode']=='bus',]
legs_transit_grouped = legs_transit.groupby("Trip_ID")
count_modes = legs_transit_grouped['Mode'].count()
merged_trips.loc[count_modes.loc[count_modes.values >1].index.values,'Fare'] = merged_trips.loc[count_modes.loc[count_modes.values >1].index.values,'Fare']/count_modes.loc[count_modes.values >1].values
merged_trips = merged_trips.merge(unique_modes_df,on='Trip_ID')
legs_grouped_start_min = pd.DataFrame(legs_grouped['Start_time'].min())
legs_grouped_end_max = pd.DataFrame(legs_grouped['End_time'].max())
merged_trips= merged_trips.merge(legs_grouped_start_min,on='Trip_ID')
merged_trips= merged_trips.merge(legs_grouped_end_max,on='Trip_ID')
merged_trips['realizedTripMode'] = merged_trips['legModes'].apply(lambda row: label_trip_mode(row))
return merged_trips
# ########### 2. PARSING AND PROCESSING THE XML FILES INTO PANDAS DATA FRAMES ###############
def get_person_output_from_households_xml(households_xml, output_folder_path):
"""
- Parses the outputHouseholds file to create the households_dataframe gathering each person's household attributes
(person id, household id, number of vehicles in the household, overall income of the household)
- Saves the household dataframe to csv
Parameters
----------
households_xml: ElementTree object
Output of the open_xml() function for the `outputHouseholds.xml` file
output_folder_path: pathlib.Path object
Absolute path of the output folder of the simulation
(format of the output folder name: `<scenario_name>-<sample_size>__<date and time>`)
Returns
-------
households_df: pandas Dataframe
Record of each person's household attributes
(person id, household id, number of vehicles in the household, overall income of the household)
"""
# get root of the `outputHouseholds.xml` file
households_root = households_xml.getroot()
hhd_array = []
for hhd in households_root.getchildren():
hhd_id = hhd.get('id').strip()
hhd_children = hhd.getchildren()
# check for vehicles; record household attributes
if len(hhd_children) == 3:
members = hhd_children[0]
vehicles = hhd_children[1]
income = hhd_children[2]
vehs = vehicles.getchildren()
hdd_num_veh = len(vehs)
else:
members = hhd_children[0]
vehicles = []
income = hhd_children[1]
hdd_num_veh = 0
hhd_income = income.text.strip()
# get list of persons in household and make a record of each person
list_members = members.getchildren()
for person in list_members:
pid = person.attrib['refId'].strip()
hhd_array.append([pid, hhd_id, hdd_num_veh, hhd_income])
# convert array to dataframe and save
households_df = pd.DataFrame(hhd_array, columns=['PID', 'Household_ID', 'Household_num_vehicles', 'Household_income [$]'])
households_df.to_csv(str(output_folder_path) + "/households_dataframe.csv")
return households_df
def get_person_output_from_output_plans_xml(output_plans_xml):
""" Parses the outputPlans file to create the person_dataframe gathering individual attributes of each person
(person id, age, sex, home location)
Parameters
----------
output_plans_xml: ElementTree object
Output of the open_xml() function for the `outputPlans.xml` file
Returns
-------
person_df: pandas DataFrame
Record of some of each person's individual attributes (person id, age, sex, home location)
"""
# get root of the `outputPlans.xml` file
output_plans_root = output_plans_xml.getroot()
person_array = []
for person in output_plans_root.findall('./person'):
pid = person.get('id')
attributes = person.findall('./attributes')[0]
age = int(attributes.findall('./attribute[<EMAIL>="age"]')[0].text)
sex = attributes.findall('./attribute[<EMAIL>="sex"]')[0].text
plan = person.findall('./plan')[0]
home = plan.findall('./activity')[0]
home_x = home.get('x')
home_y = home.get('y')
person_array.append([pid, age, sex, home_x, home_y])
# convert person array to dataframe
person_df = pd.DataFrame(person_array, columns=['PID', 'Age', 'Sex', 'Home_X', 'Home_Y'])
return person_df
def get_person_output_from_output_person_attributes_xml(persons_xml):
""" Parses outputPersonAttributes.xml file to create population_attributes_dataframe gathering individual attributes
of the population (person id, excluded modes (i.e. transportation modes that the peron is not allowed to use),
income, rank, value of time).
Parameters
----------
persons_xml: ElementTree object
Output of the open_xml() function for the `outputPersonAttributes.xml` file
Returns
-------
person_df_2: pandas DataFrame
Record of some of each person's individual attributes (person id, excluded modes (i.e. transportation modes
that the peron is not allowed to use), income, rank, value of time)
"""
# get root of the `outputPersonAttributes.xml` file
persons_root = persons_xml.getroot()
population_attributes = []
population = persons_root.getchildren()
for person in population:
pid = person.get('id')
attributes = person.findall("./attribute")
population_attributes_dict = {}
population_attributes_dict['PID'] = pid
for attribute in attributes:
population_attributes_dict[attribute.attrib['name']] = attribute.text
population_attributes.append(population_attributes_dict)
# convert attribute array to dataframe
person_df_2 = pd.DataFrame(population_attributes)
return person_df_2
import time
def get_persons_attributes_output(output_plans_xml, persons_xml, households_xml, output_folder_path):
"""Outputs the augmented persons dataframe, including all individual and household attributes for each person
Parameters
----------
output_plans_xml: ElementTree object
Output of the open_xml() function for the `outputPlans.xml` file
persons_xml: ElementTree object
Output of the open_xml() function for the `outputPersonAttributes.xml` file
households_xml: ElementTree object
Output of the open_xml() function for the `outputHouseholds.xml` file
output_folder_path: pathlib.Path object
Absolute path of the output folder of the simulation (format of the output folder name: `<scenario_name>-<sample_size>__<date and time>`)
Returns
-------
persons_attributes_df: pandas DataFrame
Record of all individual and household attributes for each person
"""
# get the person attributes dataframes
households_df = get_person_output_from_households_xml(households_xml, output_folder_path)
person_df = get_person_output_from_output_plans_xml(output_plans_xml)
person_df_2 = get_person_output_from_output_person_attributes_xml(persons_xml)
# set the index of all dataframes to PID (person ID)
person_df.set_index('PID', inplace=True)
person_df_2.set_index('PID', inplace=True)
households_df.set_index('PID', inplace=True)
# join the three dataframes together
persons_attributes_df = person_df.join(person_df_2)
persons_attributes_df = persons_attributes_df.join(households_df)
return persons_attributes_df
def get_activities_output(experienced_plans_xml):
""" Parses the experiencedPlans.xml file to create the activities_dataframe, gathering each person's activities' attributes
(person id, activity id, activity type, activity start time, activity end time)
Parameters
----------
experienced_plans_xml: ElementTree object
Output of the open_xml() function for the `<num_iterations>.experiencedPlans.xml` file located in
the `/ITERS/it.<num_iterations> folder
Returns
-------
activities_df: pandas DataFrame
Record of each person's activities' attributes
trip_purposes: list of string
purpose of each trip, ege, "Work", "Home".etc...
"""
# get root of experiencedPlans xml file
plans_root = experienced_plans_xml.getroot()
acts_array = []
# iterate through persons, recording activities and trips for each person
for person in plans_root.findall('./person'):
# we use the person ID from the raw output
pid = person.get('id')
plan = person.getchildren()[0]
activities = plan.findall('./activity')
# initialize activity ID counters (we create activity IDs using these)
act_id = 0
trip_purposes = []
# iterate through activities and make record of each activity
for activity in activities:
act_id += 1
# create activity ID
activity_id = pid + "_a-" + str(act_id)
act_type = activity.get('type')
if activity.get('start_time') is None:
act_start_time = None
else:
act_start_time = activity.get('start_time')
if activity.get('end_time') is None:
act_end_time = None
else:
act_end_time = activity.get('end_time')
# record all activity types to determine trip trip_purposes
trip_purposes.append([act_type])
acts_array.append([pid, activity_id, act_type, act_start_time, act_end_time])
# convert the activity_array to a dataframe
activities_df = pd.DataFrame(acts_array, columns=['PID', 'Activity_ID', 'Activity_Type', 'Start_time', 'End_time'])
return activities_df, trip_purposes
def get_trips_output(experienced_plans_xml_path):
""" Parses the experiencedPlans.xml file to create the trips dataframe, gathering each person's trips' attributes
(person id, trip id, id of the origin activity of the trip, id of the destination activity of the trip, trip purpose,
mode used, start time of the trip, duration of the trip, distance of the trip, path of the trip)
Parameters
----------
experienced_plans_xml_path: str
Output of the open_xml() function for the `<num_iterations>.experiencedPlans.xml` file located in
the `/ITERS/it.<num_iterations> folder
Returns
-------
trips_df: pandas DataFrame
Record of each person's trips' attributes
"""
# get root of experiencedPlans xml file
experienced_plans_xml = open_xml(experienced_plans_xml_path)
plans_root = experienced_plans_xml.getroot()
trip_array = []
# Getting the activities dataframe
_, trip_purposes = get_activities_output(experienced_plans_xml)
# iterate through persons, recording activities and trips for each person
for person in plans_root.findall('./person'):
# we use the person ID from the raw output
pid = person.get('id')
plan = person.getchildren()[0]
legs = plan.findall('./leg')
# initialize trip ID counters (we create trip IDs using these)
trip_id = 0
# iterate through trips (called legs in the `experiencedPlans.xml` file) and make record of each trip
for trip in legs:
trip_id += 1
# create trip ID
trip_id_full = pid + "_t-" + str(trip_id)
# record activity IDs for origin and destination activities of the trip
o_act_id = pid + "_a-" + str(trip_id)
d_act_id = pid + "_a-" + str(trip_id + 1)
# identify the activity type of the trip destination to record as the trip trip_purpose
trip_purpose = trip_purposes[trip_id][0]
mode = trip.get('mode')
dep_time = trip.get('dep_time')
duration = trip.get('trav_time')
route = trip.find('./route')
distance = route.get('distance')
path = route.text
trip_array.append(
[pid, trip_id_full, o_act_id, d_act_id, trip_purpose, mode, dep_time, duration, distance, path])
# convert the trip_array to a dataframe
trips_df = pd.DataFrame(trip_array,
columns=['PID', 'Trip_ID', 'Origin_Activity_ID', 'Destination_activity_ID', 'Trip_Purpose',
'Mode', 'Start_time', 'Duration_sec', 'Distance_m', 'Path_linkIds'])
return trips_df
# def get_events_output(events):
# """ Parses the outputEvents.xml to gather the event types into a pandas DataFrame
#
# Parameters
# ----------
# events: xml.etree.ElementTree.ElementTree
# Element tree instance of the xml file of interest
#
# Returns
# -------
# :pandas Dataframe
#
# """
# event_data = {}
# root = events.getroot()
# for event in root.getchildren():
# add_event_type_data_to_library(event, event_data)
# return pd.DataFrame(event_data)
#
#
# def add_event_type_data_to_library(event, event_data):
# """For each child element in the tree, creates a dictionary with the "type" attribute.
#
# Parameters
# ----------
# event: xml.etree.ElementTree.Element
# Child of the element tree instance
#
# event_data: dictionary
# Dictionary where the "type" attribute of the child element will be stored
#
# """
# attrib = event.attrib
# event_type = attrib['type']
# if event_type not in event_data:
# dd = defaultdict(list)
# event_data[event_type] = dd
# else:
# dd = event_data[event_type]
# for k, v in attrib.items():
# dd[k].append(v)
def get_path_traversal_output(events_df):
""" Parses the experiencedPlans.xml file to create the trips dataframe, gathering each person's trips' attributes
(person id, trip id, id of the origin activity of the trip, id of the destination activity of the trip, trip purpose,
mode used, start time of the trip, duration of the trip, distance of the trip, path of the trip)
Parameters
----------
events_df: pandas DataFrame
DataFrame extracted from the outputEvents.xml` file: output of the extract_dataframe() function
trips_df: pandas DataFrame
Record of each person's trips' attributes: output of the get_trips_output() function
Returns
-------
path_traversal_events_df: pandas DataFrame
"""
# creates a dataframe of trip legs using the events dataframe; creates and saves a pathTraversal dataframe to csv
# outputs the legs dataframe
# Selecting the columns of interest
events_df = events_df[['time', 'type', 'person', 'vehicle', 'driver', 'vehicleType', 'length',
'numPassengers', 'departureTime', 'arrivalTime', 'mode', 'links',
'fuelType', 'fuel']]
# get all path traversal events (all vehicle movements, and all person walk movements)
path_traversal_events_df = events_df[(events_df['type'] == 'PathTraversal') & (events_df['length'] > 0)]
path_traversal_events_df = path_traversal_events_df.reset_index(drop=True)
path_traversal_events_df = path_traversal_events_df
return path_traversal_events_df
def get_legs_output(events_df, trips_df):
""" Parses the outputEvents.xml and trips_df file to create the legs dataframe, gathering each person's trips' legs' attributes
(PID, Trip_ID, Leg_ID, Mode, Veh, Veh_type, Start_time, End_time,
Duration, Distance, Path, fuel, fuelType)
Parameters
----------
events_df: pandas DataFrame
DataFrame extracted from the outputEvents.xml` file: output of the extract_dataframe() function
trips_df: pandas DataFrame
Record of each person's trips' attributes: output of the get_trips_output() function
Returns
-------
legs_df: pandas DataFrame
Records the legs attributes for each person's trip
"""
# convert trip times to timedelta; calculate end time of trips
trips_df['Start_time'] = pd.to_timedelta(trips_df['Start_time'])
trips_df['Duration_sec'] = pd.to_timedelta(trips_df['Duration_sec'])
trips_df['End_time'] = trips_df['Start_time'].dt.seconds + trips_df['Duration_sec'].dt.seconds + (
3600 * 24 * trips_df['Start_time'].dt.days)
path_traversal_events_full = get_path_traversal_output(events_df)
# get all relevant personEntersVehicle events (those occurring at time ==0 are all ridehail/bus drivers)
enter_veh_events = events_df[(events_df['type'] == 'PersonEntersVehicle') & (events_df['time'] > 0)]
# filter for bus path traversals only
bus_path_traversal_events = path_traversal_events_full[path_traversal_events_full['mode'] == "bus"]
# filter for car & body path traversals only
non_bus_path_traversal_events = path_traversal_events_full[path_traversal_events_full['mode'] != "bus"]
# get all PersonCost events (record the expenditures of persons during a trip)
# person_costs = events_df.loc[events_df['type']=='PersonCost',]
legs_array = []
# record all legs corresponding to OnDemand_ride trips
on_demand_ride_trips = trips_df.loc[((trips_df['Mode'] == 'OnDemand_ride') | (trips_df['Mode'] == 'ride_hail')),]
on_demand_ride_legs_array = on_demand_ride_trips.apply(
lambda row: parse_ridehail_trips(row, non_bus_path_traversal_events, enter_veh_events), axis=1)
for bit in on_demand_ride_legs_array.tolist():
legs_array.extend(tid for tid in bit)
# record all legs corresponding to transit trips
transit_trips_df = trips_df[(trips_df['Mode'] == 'drive_transit') | (trips_df['Mode'] == 'walk_transit')]
transit_legs_array = transit_trips_df.apply(
lambda row: parse_transit_trips(row, non_bus_path_traversal_events, bus_path_traversal_events, enter_veh_events),
axis=1)
for bit in transit_legs_array.tolist():
legs_array.extend(tid for tid in bit)
# record all legs corresponding to walk and car trips
walk_car_trips_df = trips_df.loc[(trips_df['Mode'] == 'car') | (trips_df['Mode'] == 'walk'),]
walk_car_legs_array = walk_car_trips_df.apply(
lambda row: parse_walk_car_trips(row, non_bus_path_traversal_events, enter_veh_events), axis=1)
for bit in walk_car_legs_array.tolist():
legs_array.extend(tid for tid in bit)
# convert the leg array to a dataframe
legs_df = pd.DataFrame(legs_array,
columns=['PID', 'Trip_ID', 'Leg_ID', 'Mode', 'Veh', 'Veh_type', 'Start_time', 'End_time', 'Duration_sec', 'Distance_m', 'Path', 'fuel', 'fuelType'])
return legs_df, path_traversal_events_full
# ############ 3. GENERATE THE CSV FILES ###########
def extract_person_dataframes(output_plans_path, persons_path, households_path, output_folder_path):
""" Create a csv file from the processed person dataframe
Parameters
----------
output_plans_path: pathlib.Path object
Absolute path of the the `outputPlans.xml` file
persons_path: pathlib.Path object
Absolute path of the the `outputPersonAttributes.xml` file
households_path: pathlib.Path object
Absolute path of the the `outputHouseholds.xml` file
output_folder_path: pathlib.Path object
Absolute path of the output folder of the simulation (format of the output folder name: `<scenario_name>-<sample_size>__<date and time>`)
Returns
-------
persons_attributes_df: pandas DataFrame
Record of all individual and household attributes for each person
"""
# opens the xml files
output_plans_xml = open_xml(output_plans_path)
persons_xml = open_xml(persons_path)
households_xml = open_xml(households_path)
persons_attributes_df = get_persons_attributes_output(output_plans_xml, persons_xml, households_xml, output_folder_path)
persons_attributes_df.to_csv(str(output_folder_path) + "/persons_dataframe.csv")
print("person_dataframe.csv generated")
return persons_attributes_df
def extract_activities_dataframes(experienced_plans_path, output_folder):
""" Create a csv file from the processed activities dataframe
Parameters
----------
experienced_plans_path: pathlib.Path object
output_folder_path: pathlib.Path object
Absolute path of the output folder of the simulation
(format of the output folder name: `<scenario_name>-<sample_size>__<date and time>`)
Returns
-------
activities_df
"""
# opens the experiencedPlans and passes the xml file to get_activity_trip_output
# returns the actitivities_dataframe and trips_dataframe
experienced_plans_xml = open_xml(experienced_plans_path)
activities_df, _ = get_activities_output(experienced_plans_xml)
# convert dataframes into csv files
activities_df.to_csv(str(output_folder) + "/activities_dataframe.csv")
print("activities_dataframe.csv generated")
return activities_df
def extract_legs_dataframes(events_path, trips_df, person_df, bus_fares_df, trip_to_route, fuel_costs, output_folder_path):
""" Create a csv file from the processes legs dataframe
Parameters
----------
events_path: pathlib.Path object
Absolute path of the `ITERS/<num_iterations>.events.csv.gz` file
trips_df: pandas DataFrame
Record of each person's trips' attributes: output of the get_trips_output() function
person_df: pandas DataFrame
Record of each person's trips' attributes: output of the get_persons_attributes_output() function
bus_fares_df: pandas DataFrame
Dataframe with rows = ages and columns = routes: output of the parse_bus_fare_input() function
trip_to_route: dictionary
route_id / trip_id correspondence extracted from the `trips.csv` file in the
`/reference-data/sioux_faux/sioux_faux_bus_lines/gtfs_data` folder of the Starter Kit
fuel_costs: dictionary
fuel type / fuel price correspondence extracted from the `beamFuelTypes.csv` file in the
`/reference-data/sioux_faux/config/<SAMPLE_SIZE>` folder of the Starter Kit
output_folder_path: pathlib.Path object
Absolute path of the output folder of the simulation
(format of the output folder name: `<scenario_name>-<sample_size>__<date and time>`)
Returns
-------
legs_df: pandas DataFrame
Records the legs attributes for each person's trips
"""
# opens the outputevents and passes the xml file to get_legs_output
# augments the legs dataframe with estimates of the fuelcosts and fares for each leg
# extract a dataframe from the `outputEvents.xml` file
#all_events_df = extract_dataframe(str(events_path))
all_events_df =
|
pd.read_csv(events_path)
|
pandas.read_csv
|
from datetime import datetime
import numpy as np
import pytest
import pandas.util._test_decorators as td
from pandas import DataFrame, DatetimeIndex, Index, MultiIndex, Series
import pandas._testing as tm
from pandas.core.window.common import flex_binary_moment
def _rolling_consistency_cases():
for window in [1, 2, 3, 10, 20]:
for min_periods in {0, 1, 2, 3, 4, window}:
if min_periods and (min_periods > window):
continue
for center in [False, True]:
yield window, min_periods, center
# binary moments
def test_rolling_cov(series):
A = series
B = A + np.random.randn(len(A))
result = A.rolling(window=50, min_periods=25).cov(B)
tm.assert_almost_equal(result[-1], np.cov(A[-50:], B[-50:])[0, 1])
def test_rolling_corr(series):
A = series
B = A + np.random.randn(len(A))
result = A.rolling(window=50, min_periods=25).corr(B)
tm.assert_almost_equal(result[-1], np.corrcoef(A[-50:], B[-50:])[0, 1])
# test for correct bias correction
a = tm.makeTimeSeries()
b = tm.makeTimeSeries()
a[:5] = np.nan
b[:10] = np.nan
result = a.rolling(window=len(a), min_periods=1).corr(b)
tm.assert_almost_equal(result[-1], a.corr(b))
@pytest.mark.parametrize("func", ["cov", "corr"])
def test_rolling_pairwise_cov_corr(func, frame):
result = getattr(frame.rolling(window=10, min_periods=5), func)()
result = result.loc[(slice(None), 1), 5]
result.index = result.index.droplevel(1)
expected = getattr(frame[1].rolling(window=10, min_periods=5), func)(frame[5])
tm.assert_series_equal(result, expected, check_names=False)
@pytest.mark.parametrize("method", ["corr", "cov"])
def test_flex_binary_frame(method, frame):
series = frame[1]
res = getattr(series.rolling(window=10), method)(frame)
res2 = getattr(frame.rolling(window=10), method)(series)
exp = frame.apply(lambda x: getattr(series.rolling(window=10), method)(x))
tm.assert_frame_equal(res, exp)
tm.assert_frame_equal(res2, exp)
frame2 = frame.copy()
frame2.values[:] = np.random.randn(*frame2.shape)
res3 = getattr(frame.rolling(window=10), method)(frame2)
exp = DataFrame(
{k: getattr(frame[k].rolling(window=10), method)(frame2[k]) for k in frame}
)
tm.assert_frame_equal(res3, exp)
@pytest.mark.parametrize(
"window,min_periods,center", list(_rolling_consistency_cases())
)
@pytest.mark.parametrize("f", [lambda v: Series(v).sum(), np.nansum])
def test_rolling_apply_consistency_sum_nans(
consistency_data, window, min_periods, center, f
):
x, is_constant, no_nans = consistency_data
if f is np.nansum and min_periods == 0:
pass
else:
rolling_f_result = x.rolling(
window=window, min_periods=min_periods, center=center
).sum()
rolling_apply_f_result = x.rolling(
window=window, min_periods=min_periods, center=center
).apply(func=f, raw=True)
tm.assert_equal(rolling_f_result, rolling_apply_f_result)
@pytest.mark.parametrize(
"window,min_periods,center", list(_rolling_consistency_cases())
)
@pytest.mark.parametrize("f", [lambda v: Series(v).sum(), np.nansum, np.sum])
def test_rolling_apply_consistency_sum_no_nans(
consistency_data, window, min_periods, center, f
):
x, is_constant, no_nans = consistency_data
if no_nans:
if f is np.nansum and min_periods == 0:
pass
else:
rolling_f_result = x.rolling(
window=window, min_periods=min_periods, center=center
).sum()
rolling_apply_f_result = x.rolling(
window=window, min_periods=min_periods, center=center
).apply(func=f, raw=True)
tm.assert_equal(rolling_f_result, rolling_apply_f_result)
@pytest.mark.parametrize("window", range(7))
def test_rolling_corr_with_zero_variance(window):
# GH 18430
s = Series(np.zeros(20))
other = Series(np.arange(20))
assert s.rolling(window=window).corr(other=other).isna().all()
def test_flex_binary_moment():
# GH3155
# don't blow the stack
msg = "arguments to moment function must be of type np.ndarray/Series/DataFrame"
with pytest.raises(TypeError, match=msg):
flex_binary_moment(5, 6, None)
def test_corr_sanity():
# GH 3155
df = DataFrame(
np.array(
[
[0.87024726, 0.18505595],
[0.64355431, 0.3091617],
[0.92372966, 0.50552513],
[0.00203756, 0.04520709],
[0.84780328, 0.33394331],
[0.78369152, 0.63919667],
]
)
)
res = df[0].rolling(5, center=True).corr(df[1])
assert all(np.abs(np.nan_to_num(x)) <= 1 for x in res)
df = DataFrame(np.random.rand(30, 2))
res = df[0].rolling(5, center=True).corr(df[1])
assert all(np.abs(np.nan_to_num(x)) <= 1 for x in res)
def test_rolling_cov_diff_length():
# GH 7512
s1 = Series([1, 2, 3], index=[0, 1, 2])
s2 = Series([1, 3], index=[0, 2])
result = s1.rolling(window=3, min_periods=2).cov(s2)
expected = Series([None, None, 2.0])
tm.assert_series_equal(result, expected)
s2a = Series([1, None, 3], index=[0, 1, 2])
result = s1.rolling(window=3, min_periods=2).cov(s2a)
tm.assert_series_equal(result, expected)
def test_rolling_corr_diff_length():
# GH 7512
s1 = Series([1, 2, 3], index=[0, 1, 2])
s2 = Series([1, 3], index=[0, 2])
result = s1.rolling(window=3, min_periods=2).corr(s2)
expected = Series([None, None, 1.0])
tm.assert_series_equal(result, expected)
s2a = Series([1, None, 3], index=[0, 1, 2])
result = s1.rolling(window=3, min_periods=2).corr(s2a)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"f",
[
lambda x: x.rolling(window=10, min_periods=5).cov(x, pairwise=False),
lambda x: x.rolling(window=10, min_periods=5).corr(x, pairwise=False),
lambda x: x.rolling(window=10, min_periods=5).max(),
lambda x: x.rolling(window=10, min_periods=5).min(),
lambda x: x.rolling(window=10, min_periods=5).sum(),
lambda x: x.rolling(window=10, min_periods=5).mean(),
lambda x: x.rolling(window=10, min_periods=5).std(),
lambda x: x.rolling(window=10, min_periods=5).var(),
lambda x: x.rolling(window=10, min_periods=5).skew(),
lambda x: x.rolling(window=10, min_periods=5).kurt(),
lambda x: x.rolling(window=10, min_periods=5).quantile(quantile=0.5),
lambda x: x.rolling(window=10, min_periods=5).median(),
lambda x: x.rolling(window=10, min_periods=5).apply(sum, raw=False),
lambda x: x.rolling(window=10, min_periods=5).apply(sum, raw=True),
pytest.param(
lambda x: x.rolling(win_type="boxcar", window=10, min_periods=5).mean(),
marks=td.skip_if_no_scipy,
),
],
)
def test_rolling_functions_window_non_shrinkage(f):
# GH 7764
s = Series(range(4))
s_expected = Series(np.nan, index=s.index)
df = DataFrame([[1, 5], [3, 2], [3, 9], [-1, 0]], columns=["A", "B"])
df_expected = DataFrame(np.nan, index=df.index, columns=df.columns)
s_result = f(s)
tm.assert_series_equal(s_result, s_expected)
df_result = f(df)
tm.assert_frame_equal(df_result, df_expected)
@pytest.mark.parametrize(
"f",
[
lambda x: (x.rolling(window=10, min_periods=5).cov(x, pairwise=True)),
lambda x: (x.rolling(window=10, min_periods=5).corr(x, pairwise=True)),
],
)
def test_rolling_functions_window_non_shrinkage_binary(f):
# corr/cov return a MI DataFrame
df = DataFrame(
[[1, 5], [3, 2], [3, 9], [-1, 0]],
columns=Index(["A", "B"], name="foo"),
index=Index(range(4), name="bar"),
)
df_expected = DataFrame(
columns=Index(["A", "B"], name="foo"),
index=MultiIndex.from_product([df.index, df.columns], names=["bar", "foo"]),
dtype="float64",
)
df_result = f(df)
tm.assert_frame_equal(df_result, df_expected)
def test_rolling_skew_edge_cases():
all_nan = Series([np.NaN] * 5)
# yields all NaN (0 variance)
d = Series([1] * 5)
x = d.rolling(window=5).skew()
tm.assert_series_equal(all_nan, x)
# yields all NaN (window too small)
d = Series(np.random.randn(5))
x = d.rolling(window=2).skew()
tm.assert_series_equal(all_nan, x)
# yields [NaN, NaN, NaN, 0.177994, 1.548824]
d = Series([-1.50837035, -0.1297039, 0.19501095, 1.73508164, 0.41941401])
expected = Series([np.NaN, np.NaN, np.NaN, 0.177994, 1.548824])
x = d.rolling(window=4).skew()
tm.assert_series_equal(expected, x)
def test_rolling_kurt_edge_cases():
all_nan = Series([np.NaN] * 5)
# yields all NaN (0 variance)
d = Series([1] * 5)
x = d.rolling(window=5).kurt()
tm.assert_series_equal(all_nan, x)
# yields all NaN (window too small)
d = Series(np.random.randn(5))
x = d.rolling(window=3).kurt()
tm.assert_series_equal(all_nan, x)
# yields [NaN, NaN, NaN, 1.224307, 2.671499]
d = Series([-1.50837035, -0.1297039, 0.19501095, 1.73508164, 0.41941401])
expected = Series([np.NaN, np.NaN, np.NaN, 1.224307, 2.671499])
x = d.rolling(window=4).kurt()
tm.assert_series_equal(expected, x)
def test_rolling_skew_eq_value_fperr():
# #18804 all rolling skew for all equal values should return Nan
a = Series([1.1] * 15).rolling(window=10).skew()
assert np.isnan(a).all()
def test_rolling_kurt_eq_value_fperr():
# #18804 all rolling kurt for all equal values should return Nan
a = Series([1.1] * 15).rolling(window=10).kurt()
assert np.isnan(a).all()
def test_rolling_max_gh6297():
"""Replicate result expected in GH #6297"""
indices = [datetime(1975, 1, i) for i in range(1, 6)]
# So that we can have 2 datapoints on one of the days
indices.append(datetime(1975, 1, 3, 6, 0))
series = Series(range(1, 7), index=indices)
# Use floats instead of ints as values
series = series.map(lambda x: float(x))
# Sort chronologically
series = series.sort_index()
expected = Series(
[1.0, 2.0, 6.0, 4.0, 5.0],
index=DatetimeIndex([datetime(1975, 1, i, 0) for i in range(1, 6)], freq="D"),
)
x = series.resample("D").max().rolling(window=1).max()
tm.assert_series_equal(expected, x)
def test_rolling_max_resample():
indices = [datetime(1975, 1, i) for i in range(1, 6)]
# So that we can have 3 datapoints on last day (4, 10, and 20)
indices.append(datetime(1975, 1, 5, 1))
indices.append(datetime(1975, 1, 5, 2))
series = Series(list(range(0, 5)) + [10, 20], index=indices)
# Use floats instead of ints as values
series = series.map(lambda x: float(x))
# Sort chronologically
series = series.sort_index()
# Default how should be max
expected = Series(
[0.0, 1.0, 2.0, 3.0, 20.0],
index=DatetimeIndex([datetime(1975, 1, i, 0) for i in range(1, 6)], freq="D"),
)
x = series.resample("D").max().rolling(window=1).max()
tm.assert_series_equal(expected, x)
# Now specify median (10.0)
expected = Series(
[0.0, 1.0, 2.0, 3.0, 10.0],
index=DatetimeIndex([datetime(1975, 1, i, 0) for i in range(1, 6)], freq="D"),
)
x = series.resample("D").median().rolling(window=1).max()
tm.assert_series_equal(expected, x)
# Now specify mean (4+10+20)/3
v = (4.0 + 10.0 + 20.0) / 3.0
expected = Series(
[0.0, 1.0, 2.0, 3.0, v],
index=DatetimeIndex([datetime(1975, 1, i, 0) for i in range(1, 6)], freq="D"),
)
x = series.resample("D").mean().rolling(window=1).max()
tm.assert_series_equal(expected, x)
def test_rolling_min_resample():
indices = [datetime(1975, 1, i) for i in range(1, 6)]
# So that we can have 3 datapoints on last day (4, 10, and 20)
indices.append(datetime(1975, 1, 5, 1))
indices.append(datetime(1975, 1, 5, 2))
series = Series(list(range(0, 5)) + [10, 20], index=indices)
# Use floats instead of ints as values
series = series.map(lambda x: float(x))
# Sort chronologically
series = series.sort_index()
# Default how should be min
expected = Series(
[0.0, 1.0, 2.0, 3.0, 4.0],
index=DatetimeIndex([datetime(1975, 1, i, 0) for i in range(1, 6)], freq="D"),
)
r = series.resample("D").min().rolling(window=1)
tm.assert_series_equal(expected, r.min())
def test_rolling_median_resample():
indices = [datetime(1975, 1, i) for i in range(1, 6)]
# So that we can have 3 datapoints on last day (4, 10, and 20)
indices.append(datetime(1975, 1, 5, 1))
indices.append(datetime(1975, 1, 5, 2))
series = Series(list(range(0, 5)) + [10, 20], index=indices)
# Use floats instead of ints as values
series = series.map(lambda x: float(x))
# Sort chronologically
series = series.sort_index()
# Default how should be median
expected = Series(
[0.0, 1.0, 2.0, 3.0, 10],
index=DatetimeIndex([datetime(1975, 1, i, 0) for i in range(1, 6)], freq="D"),
)
x = series.resample("D").median().rolling(window=1).median()
tm.assert_series_equal(expected, x)
def test_rolling_median_memory_error():
# GH11722
n = 20000
Series(np.random.randn(n)).rolling(window=2, center=False).median()
Series(np.random.randn(n)).rolling(window=2, center=False).median()
@pytest.mark.parametrize(
"data_type",
[np.dtype(f"f{width}") for width in [4, 8]]
+ [np.dtype(f"{sign}{width}") for width in [1, 2, 4, 8] for sign in "ui"],
)
def test_rolling_min_max_numeric_types(data_type):
# GH12373
# Just testing that these don't throw exceptions and that
# the return type is float64. Other tests will cover quantitative
# correctness
result = DataFrame(np.arange(20, dtype=data_type)).rolling(window=5).max()
assert result.dtypes[0] == np.dtype("f8")
result = DataFrame(np.arange(20, dtype=data_type)).rolling(window=5).min()
assert result.dtypes[0] == np.dtype("f8")
@pytest.mark.parametrize(
"f",
[
lambda x: x.rolling(window=10, min_periods=0).count(),
lambda x: x.rolling(window=10, min_periods=5).cov(x, pairwise=False),
lambda x: x.rolling(window=10, min_periods=5).corr(x, pairwise=False),
lambda x: x.rolling(window=10, min_periods=5).max(),
lambda x: x.rolling(window=10, min_periods=5).min(),
lambda x: x.rolling(window=10, min_periods=5).sum(),
lambda x: x.rolling(window=10, min_periods=5).mean(),
lambda x: x.rolling(window=10, min_periods=5).std(),
lambda x: x.rolling(window=10, min_periods=5).var(),
lambda x: x.rolling(window=10, min_periods=5).skew(),
lambda x: x.rolling(window=10, min_periods=5).kurt(),
lambda x: x.rolling(window=10, min_periods=5).quantile(0.5),
lambda x: x.rolling(window=10, min_periods=5).median(),
lambda x: x.rolling(window=10, min_periods=5).apply(sum, raw=False),
lambda x: x.rolling(window=10, min_periods=5).apply(sum, raw=True),
pytest.param(
lambda x: x.rolling(win_type="boxcar", window=10, min_periods=5).mean(),
marks=td.skip_if_no_scipy,
),
],
)
def test_moment_functions_zero_length(f):
# GH 8056
s = Series(dtype=np.float64)
s_expected = s
df1 = DataFrame()
df1_expected = df1
df2 = DataFrame(columns=["a"])
df2["a"] = df2["a"].astype("float64")
df2_expected = df2
s_result = f(s)
tm.assert_series_equal(s_result, s_expected)
df1_result = f(df1)
|
tm.assert_frame_equal(df1_result, df1_expected)
|
pandas._testing.assert_frame_equal
|
__all__ = [
'ROVER',
]
from copy import deepcopy
from typing import List, Callable, Dict, Optional
from enum import Enum, unique
import attr
import numpy as np
import pandas as pd
from tqdm.auto import tqdm
from ..annotations import TEXT_DATA, TASKS_TEXTS, manage_docstring, Annotation
from ..base import BaseTextsAggregator
@unique
class AlignmentAction(Enum):
DELETION = 'DELETION'
SUBSTITUTION = 'SUBSTITUTION'
INSERTION = 'INSERTION'
CORRECT = 'CORRECT'
@attr.s
class AlignmentEdge:
value: str = attr.ib()
sources_count: Optional[int] = attr.ib()
@attr.s
@manage_docstring
class ROVER(BaseTextsAggregator):
"""Recognizer Output Voting Error Reduction (ROVER)
<NAME>,
"A post-processing system to yield reduced word error rates: Recognizer Output Voting Error Reduction (ROVER),"
1997 IEEE Workshop on Automatic Speech Recognition and Understanding Proceedings, 1997, pp. 347-354.
https://doi.org/10.1109/ASRU.1997.659110
"""
tokenizer: Callable[[str], List[str]] = attr.ib()
detokenizer: Callable[[List[str]], str] = attr.ib()
silent: bool = attr.ib(default=True)
# Available after fit
# texts_
@manage_docstring
def fit(self, data: TEXT_DATA) -> Annotation(type='ROVER', title='self'):
result = {}
grouped_tasks = data.groupby('task') if self.silent else tqdm(data.groupby('task'))
for task, df in grouped_tasks:
hypotheses = [self.tokenizer(text) for i, text in enumerate(df['text'])]
edges = self._build_word_transition_network(hypotheses)
rover_result = self._get_result(edges)
text = self.detokenizer([value for value in rover_result if value != ''])
result[task] = text
result =
|
pd.Series(result, name='text')
|
pandas.Series
|
from collections import OrderedDict
from datetime import timedelta
import numpy as np
import pytest
from pandas.core.dtypes.dtypes import CategoricalDtype, DatetimeTZDtype
import pandas as pd
from pandas import (
Categorical,
DataFrame,
Series,
Timedelta,
Timestamp,
_np_version_under1p14,
concat,
date_range,
option_context,
)
from pandas.core.arrays import integer_array
import pandas.util.testing as tm
def _check_cast(df, v):
"""
Check if all dtypes of df are equal to v
"""
assert all(s.dtype.name == v for _, s in df.items())
class TestDataFrameDataTypes:
def test_concat_empty_dataframe_dtypes(self):
df = DataFrame(columns=list("abc"))
df["a"] = df["a"].astype(np.bool_)
df["b"] = df["b"].astype(np.int32)
df["c"] = df["c"].astype(np.float64)
result = pd.concat([df, df])
assert result["a"].dtype == np.bool_
assert result["b"].dtype == np.int32
assert result["c"].dtype == np.float64
result = pd.concat([df, df.astype(np.float64)])
assert result["a"].dtype == np.object_
assert result["b"].dtype == np.float64
assert result["c"].dtype == np.float64
def test_empty_frame_dtypes_ftypes(self):
empty_df = pd.DataFrame()
tm.assert_series_equal(empty_df.dtypes, pd.Series(dtype=np.object))
# GH 26705 - Assert .ftypes is deprecated
with tm.assert_produces_warning(FutureWarning):
tm.assert_series_equal(empty_df.ftypes, pd.Series(dtype=np.object))
nocols_df = pd.DataFrame(index=[1, 2, 3])
tm.assert_series_equal(nocols_df.dtypes, pd.Series(dtype=np.object))
# GH 26705 - Assert .ftypes is deprecated
with tm.assert_produces_warning(FutureWarning):
tm.assert_series_equal(nocols_df.ftypes, pd.Series(dtype=np.object))
norows_df = pd.DataFrame(columns=list("abc"))
tm.assert_series_equal(
norows_df.dtypes, pd.Series(np.object, index=list("abc"))
)
# GH 26705 - Assert .ftypes is deprecated
with tm.assert_produces_warning(FutureWarning):
tm.assert_series_equal(
norows_df.ftypes, pd.Series("object:dense", index=list("abc"))
)
norows_int_df = pd.DataFrame(columns=list("abc")).astype(np.int32)
tm.assert_series_equal(
norows_int_df.dtypes, pd.Series(np.dtype("int32"), index=list("abc"))
)
# GH 26705 - Assert .ftypes is deprecated
with tm.assert_produces_warning(FutureWarning):
tm.assert_series_equal(
norows_int_df.ftypes, pd.Series("int32:dense", index=list("abc"))
)
odict = OrderedDict
df = pd.DataFrame(odict([("a", 1), ("b", True), ("c", 1.0)]), index=[1, 2, 3])
ex_dtypes = pd.Series(
odict([("a", np.int64), ("b", np.bool), ("c", np.float64)])
)
ex_ftypes = pd.Series(
odict([("a", "int64:dense"), ("b", "bool:dense"), ("c", "float64:dense")])
)
tm.assert_series_equal(df.dtypes, ex_dtypes)
# GH 26705 - Assert .ftypes is deprecated
with tm.assert_produces_warning(FutureWarning):
tm.assert_series_equal(df.ftypes, ex_ftypes)
# same but for empty slice of df
tm.assert_series_equal(df[:0].dtypes, ex_dtypes)
# GH 26705 - Assert .ftypes is deprecated
with tm.assert_produces_warning(FutureWarning):
tm.assert_series_equal(df[:0].ftypes, ex_ftypes)
def test_datetime_with_tz_dtypes(self):
tzframe = DataFrame(
{
"A": date_range("20130101", periods=3),
"B": date_range("20130101", periods=3, tz="US/Eastern"),
"C": date_range("20130101", periods=3, tz="CET"),
}
)
tzframe.iloc[1, 1] = pd.NaT
tzframe.iloc[1, 2] = pd.NaT
result = tzframe.dtypes.sort_index()
expected = Series(
[
np.dtype("datetime64[ns]"),
DatetimeTZDtype("ns", "US/Eastern"),
DatetimeTZDtype("ns", "CET"),
],
["A", "B", "C"],
)
tm.assert_series_equal(result, expected)
def test_dtypes_are_correct_after_column_slice(self):
# GH6525
df = pd.DataFrame(index=range(5), columns=list("abc"), dtype=np.float_)
odict = OrderedDict
tm.assert_series_equal(
df.dtypes,
pd.Series(odict([("a", np.float_), ("b", np.float_), ("c", np.float_)])),
)
tm.assert_series_equal(
df.iloc[:, 2:].dtypes, pd.Series(odict([("c", np.float_)]))
)
tm.assert_series_equal(
df.dtypes,
pd.Series(odict([("a", np.float_), ("b", np.float_), ("c", np.float_)])),
)
def test_select_dtypes_include_using_list_like(self):
df = DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.Categorical(list("abc")),
"g": pd.date_range("20130101", periods=3),
"h": pd.date_range("20130101", periods=3, tz="US/Eastern"),
"i": pd.date_range("20130101", periods=3, tz="CET"),
"j": pd.period_range("2013-01", periods=3, freq="M"),
"k": pd.timedelta_range("1 day", periods=3),
}
)
ri = df.select_dtypes(include=[np.number])
ei = df[["b", "c", "d", "k"]]
tm.assert_frame_equal(ri, ei)
ri = df.select_dtypes(include=[np.number], exclude=["timedelta"])
ei = df[["b", "c", "d"]]
tm.assert_frame_equal(ri, ei)
ri = df.select_dtypes(include=[np.number, "category"], exclude=["timedelta"])
ei = df[["b", "c", "d", "f"]]
tm.assert_frame_equal(ri, ei)
ri = df.select_dtypes(include=["datetime"])
ei = df[["g"]]
tm.assert_frame_equal(ri, ei)
ri = df.select_dtypes(include=["datetime64"])
ei = df[["g"]]
tm.assert_frame_equal(ri, ei)
ri = df.select_dtypes(include=["datetimetz"])
ei = df[["h", "i"]]
tm.assert_frame_equal(ri, ei)
with pytest.raises(NotImplementedError, match=r"^$"):
df.select_dtypes(include=["period"])
def test_select_dtypes_exclude_using_list_like(self):
df = DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
}
)
re = df.select_dtypes(exclude=[np.number])
ee = df[["a", "e"]]
tm.assert_frame_equal(re, ee)
def test_select_dtypes_exclude_include_using_list_like(self):
df = DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.date_range("now", periods=3).values,
}
)
exclude = (np.datetime64,)
include = np.bool_, "integer"
r = df.select_dtypes(include=include, exclude=exclude)
e = df[["b", "c", "e"]]
tm.assert_frame_equal(r, e)
exclude = ("datetime",)
include = "bool", "int64", "int32"
r = df.select_dtypes(include=include, exclude=exclude)
e = df[["b", "e"]]
|
tm.assert_frame_equal(r, e)
|
pandas.util.testing.assert_frame_equal
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.