prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
# -*- coding: utf-8 -*-
"""
This module contains the classes for testing the model module of mpcpy.
"""
import unittest
from mpcpy import models
from mpcpy import exodata
from mpcpy import utility
from mpcpy import systems
from mpcpy import units
from mpcpy import variables
from testing import TestCaseMPCPy
import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
import pickle
import os
#%%
class SimpleRC(TestCaseMPCPy):
'''Test simple model simulate and estimate.
'''
def setUp(self):
self.start_time = '1/1/2017';
self.final_time = '1/2/2017';
# Set measurements
self.measurements = {};
self.measurements['T_db'] = {'Sample' : variables.Static('T_db_sample', 1800, units.s)};
def tearDown(self):
del self.start_time
del self.final_time
del self.measurements
def test_simulate(self):
'''Test simulation of a model.'''
# Set model paths
mopath = os.path.join(self.get_unittest_path(), 'resources', 'model', 'Simple.mo');
modelpath = 'Simple.RC_nostart';
# Gather control inputs
control_csv_filepath = os.path.join(self.get_unittest_path(), 'resources', 'model', 'SimpleRC_Input.csv');
variable_map = {'q_flow_csv' : ('q_flow', units.W)};
controls = exodata.ControlFromCSV(control_csv_filepath, variable_map);
controls.collect_data(self.start_time, self.final_time);
# Instantiate model
model = models.Modelica(models.JModelica, \
models.RMSE, \
self.measurements, \
moinfo = (mopath, modelpath, {}), \
control_data = controls.data);
# Simulate model
model.simulate(self.start_time, self.final_time);
# Check references
df_test = model.display_measurements('Simulated');
self.check_df(df_test, 'simulate_display.csv');
df_test = model.get_base_measurements('Simulated');
self.check_df(df_test, 'simulate_base.csv');
def test_simulate_with_save_parameter_input_data(self):
'''Test simulation of a model.'''
# Set model paths
mopath = os.path.join(self.get_unittest_path(), 'resources', 'model', 'Simple.mo');
modelpath = 'Simple.RC_nostart';
# Gather control inputs
control_csv_filepath = os.path.join(self.get_unittest_path(), 'resources', 'model', 'SimpleRC_Input.csv');
variable_map = {'q_flow_csv' : ('q_flow', units.W)};
controls = exodata.ControlFromCSV(control_csv_filepath, variable_map);
controls.collect_data(self.start_time, self.final_time);
# Instantiate model
model = models.Modelica(models.JModelica, \
models.RMSE, \
self.measurements, \
moinfo = (mopath, modelpath, {}), \
control_data = controls.data,
save_parameter_input_data=True);
# Simulate model
model.simulate(self.start_time, self.final_time);
# Check references
df_test = model.display_measurements('Simulated');
self.check_df(df_test, 'simulate_display.csv');
df_test = model.get_base_measurements('Simulated');
self.check_df(df_test, 'simulate_base.csv');
def test_estimate_one_par(self):
'''Test the estimation of one parameter of a model.'''
# Set model paths
mopath = os.path.join(self.get_unittest_path(), 'resources', 'model', 'Simple.mo');
modelpath = 'Simple.RC_noinputs';
# Instantiate system
system = systems.EmulationFromFMU(self.measurements, \
moinfo = (mopath, modelpath, {}));
system.collect_measurements(self.start_time, self.final_time);
# Define parameters
parameter_data = {};
parameter_data['heatCapacitor.C'] = {};
parameter_data['heatCapacitor.C']['Value'] = variables.Static('C_Value', 55000, units.J_K);
parameter_data['heatCapacitor.C']['Minimum'] = variables.Static('C_Min', 10000, units.J_K);
parameter_data['heatCapacitor.C']['Maximum'] = variables.Static('C_Max', 1000000, units.J_K);
parameter_data['heatCapacitor.C']['Free'] = variables.Static('C_Free', True, units.boolean);
# Instantiate model
model = models.Modelica(models.JModelica, \
models.RMSE, \
self.measurements, \
moinfo = (mopath, modelpath, {}), \
parameter_data = parameter_data);
# Estimate models
model.estimate(self.start_time, self.final_time, ['T_db'])
# Check references
data = [model.parameter_data['heatCapacitor.C']['Value'].display_data()]
index = ['heatCapacitor.C']
df_test = pd.DataFrame(data=data, index=index, columns=['Value'])
self.check_df(df_test, 'estimate_one_par.csv', timeseries=False)
def test_estimate_two_par(self):
'''Test the estimation of two parameters of a model.'''
# Set model paths
mopath = os.path.join(self.get_unittest_path(), 'resources', 'model', 'Simple.mo');
modelpath = 'Simple.RC_noinputs';
# Instantiate system
system = systems.EmulationFromFMU(self.measurements, \
moinfo = (mopath, modelpath, {}));
system.collect_measurements(self.start_time, self.final_time);
# Define parameters
parameter_data = {};
parameter_data['heatCapacitor.C'] = {};
parameter_data['heatCapacitor.C']['Value'] = variables.Static('C_Value', 55000, units.J_K);
parameter_data['heatCapacitor.C']['Minimum'] = variables.Static('C_Min', 10000, units.J_K);
parameter_data['heatCapacitor.C']['Maximum'] = variables.Static('C_Max', 1000000, units.J_K);
parameter_data['heatCapacitor.C']['Free'] = variables.Static('C_Free', True, units.boolean);
parameter_data['thermalResistor.R'] = {};
parameter_data['thermalResistor.R']['Value'] = variables.Static('R_Value', 0.02, units.K_W);
parameter_data['thermalResistor.R']['Minimum'] = variables.Static('R_Min', 0.001, units.K_W);
parameter_data['thermalResistor.R']['Maximum'] = variables.Static('R_Max', 0.1, units.K_W);
parameter_data['thermalResistor.R']['Free'] = variables.Static('R_Free', True, units.boolean);
# Instantiate model
model = models.Modelica(models.JModelica, \
models.RMSE, \
self.measurements, \
moinfo = (mopath, modelpath, {}), \
parameter_data = parameter_data);
# Estimate models
model.estimate(self.start_time, self.final_time, ['T_db'])
# Check references
data = [model.parameter_data['heatCapacitor.C']['Value'].display_data(),
model.parameter_data['thermalResistor.R']['Value'].display_data(),]
index = ['heatCapacitor.C', 'thermalResistor.R']
df_test = pd.DataFrame(data=data, index=index, columns=['Value'])
self.check_df(df_test, 'estimate_two_par.csv', timeseries=False)
def test_simulate_continue(self):
'''Test simulation of a model in steps.'''
# Set model paths
mopath = os.path.join(self.get_unittest_path(), 'resources', 'model', 'Simple.mo');
modelpath = 'Simple.RC_nostart';
# Gather control inputs
control_csv_filepath = os.path.join(self.get_unittest_path(), 'resources', 'model', 'SimpleRC_Input.csv');
variable_map = {'q_flow_csv' : ('q_flow', units.W)};
controls = exodata.ControlFromCSV(control_csv_filepath, variable_map);
controls.collect_data(self.start_time, self.final_time);
# Instantiate model
model = models.Modelica(models.JModelica, \
models.RMSE, \
self.measurements, \
moinfo = (mopath, modelpath, {}), \
control_data = controls.data);
# Simulate model
model.simulate(self.start_time, self.final_time);
# Check references
df_test = model.display_measurements('Simulated');
self.check_df(df_test, 'simulate_display.csv');
# Simulate model in 4-hour chunks
sim_steps = pd.date_range(self.start_time, self.final_time, freq=str('8H'))
for i in range(len(sim_steps)-1):
if i == 0:
model.simulate(sim_steps[i], sim_steps[i+1]);
else:
model.simulate('continue', sim_steps[i+1]);
# Check references
df_test = model.display_measurements('Simulated');
self.check_df(df_test, 'simulate_step{0}.csv'.format(i));
def test_simulate_noinputs(self):
'''Test simulation of a model with no external inputs.'''
# Set model paths
mopath = os.path.join(self.get_unittest_path(), 'resources', 'model', 'Simple.mo');
modelpath = 'Simple.RC_noinputs';
# Instantiate model
model = models.Modelica(models.JModelica, \
models.RMSE, \
self.measurements, \
moinfo = (mopath, modelpath, {}));
# Simulate model
model.simulate(self.start_time, self.final_time);
# Check references
df_test = model.display_measurements('Simulated');
self.check_df(df_test, 'simulate_noinputs.csv');
def test_estimate_error_nofreeparameters(self):
'''Test error raised if no free parameters passed.'''
# Set model paths
mopath = os.path.join(self.get_unittest_path(), 'resources', 'model', 'Simple.mo');
modelpath = 'Simple.RC_noinputs';
# Instantiate model
model_no_params = models.Modelica(models.JModelica, \
models.RMSE, \
self.measurements, \
moinfo = (mopath, modelpath, {}));
# Check error raised with no parameters
with self.assertRaises(ValueError):
model_no_params.estimate(self.start_time, self.final_time, []);
# Set parameters
parameter_data = {};
parameter_data['heatCapacitor.C'] = {};
parameter_data['heatCapacitor.C']['Value'] = variables.Static('C_Value', 55000, units.J_K);
parameter_data['heatCapacitor.C']['Minimum'] = variables.Static('C_Min', 10000, units.J_K);
parameter_data['heatCapacitor.C']['Maximum'] = variables.Static('C_Max', 100000, units.J_K);
parameter_data['heatCapacitor.C']['Free'] = variables.Static('C_Free', False, units.boolean);
# Instantiate model
model_no_free = models.Modelica(models.JModelica, \
models.RMSE, \
self.measurements, \
moinfo = (mopath, modelpath, {}), \
parameter_data = parameter_data);
# Check error raised with no free parameters
with self.assertRaises(ValueError):
model_no_params.estimate(self.start_time, self.final_time, []);
def test_estimate_error_nomeasurements(self):
'''Test error raised if measurement_variable_list not in measurements dictionary.'''
# Set model paths
mopath = os.path.join(self.get_unittest_path(), 'resources', 'model', 'Simple.mo');
modelpath = 'Simple.RC_noinputs';
# Set parameters
parameter_data = {};
parameter_data['heatCapacitor.C'] = {};
parameter_data['heatCapacitor.C']['Value'] = variables.Static('C_Value', 55000, units.J_K);
parameter_data['heatCapacitor.C']['Minimum'] = variables.Static('C_Min', 10000, units.J_K);
parameter_data['heatCapacitor.C']['Maximum'] = variables.Static('C_Max', 100000, units.J_K);
parameter_data['heatCapacitor.C']['Free'] = variables.Static('C_Free', True, units.boolean);
# Instantiate model
model_no_meas = models.Modelica(models.JModelica, \
models.RMSE, \
self.measurements, \
moinfo = (mopath, modelpath, {}), \
parameter_data = parameter_data);
# Check error raised with no free parameters
with self.assertRaises(ValueError):
model_no_meas.estimate(self.start_time, self.final_time, ['wrong_meas']);
def test_instantiate_error_incompatible_estimation(self):
'''Test error raised if estimation method is incompatible with model.'''
# Set model path
fmupath = os.path.join(self.get_unittest_path(), 'resources', 'building', 'LBNL71T_Emulation_JModelica_v1.fmu');
with self.assertRaises(ValueError):
model = models.Modelica(models.JModelica, models.RMSE, {}, fmupath=fmupath);
#%%
class EstimateFromJModelicaRealCSV(TestCaseMPCPy):
'''Test parameter estimation of a model using JModelica from real csv data.
'''
def setUp(self):
## Setup building fmu emulation
self.building_source_file_path_est = os.path.join(self.get_unittest_path(), 'resources', 'building', 'RealMeasurements_est.csv');
self.building_source_file_path_val = os.path.join(self.get_unittest_path(), 'resources', 'building', 'RealMeasurements_val.csv');
self.building_source_file_path_val_missing = os.path.join(self.get_unittest_path(), 'resources', 'building', 'RealMeasurements_val_missing.csv');
self.zone_names = ['wes', 'hal', 'eas'];
self.weather_path = os.path.join(self.get_unittest_path(), 'resources', 'weather', 'USA_IL_Chicago-OHare.Intl.AP.725300_TMY3.epw');
self.internal_path = os.path.join(self.get_unittest_path(), 'resources', 'internal', 'sampleCSV.csv');
self.internal_variable_map = {'intRad_wes' : ('wes', 'intRad', units.W_m2), \
'intCon_wes' : ('wes', 'intCon', units.W_m2), \
'intLat_wes' : ('wes', 'intLat', units.W_m2), \
'intRad_hal' : ('hal', 'intRad', units.W_m2), \
'intCon_hal' : ('hal', 'intCon', units.W_m2), \
'intLat_hal' : ('hal', 'intLat', units.W_m2), \
'intRad_eas' : ('eas', 'intRad', units.W_m2), \
'intCon_eas' : ('eas', 'intCon', units.W_m2), \
'intLat_eas' : ('eas', 'intLat', units.W_m2)};
self.control_path = os.path.join(self.get_unittest_path(), 'resources', 'building', 'ControlCSV_0.csv');
self.control_variable_map = {'conHeat_wes' : ('conHeat_wes', units.unit1), \
'conHeat_hal' : ('conHeat_hal', units.unit1), \
'conHeat_eas' : ('conHeat_eas', units.unit1)};
# Measurements
self.measurements = {};
self.measurements['wesTdb'] = {'Sample' : variables.Static('wesTdb_sample', 1800, units.s)};
self.measurements['halTdb'] = {'Sample' : variables.Static('halTdb_sample', 1800, units.s)};
self.measurements['easTdb'] = {'Sample' : variables.Static('easTdb_sample', 1800, units.s)};
self.measurements['wesPhvac'] = {'Sample' : variables.Static('easTdb_sample', 1800, units.s)};
self.measurements['halPhvac'] = {'Sample' : variables.Static('easTdb_sample', 1800, units.s)};
self.measurements['easPhvac'] = {'Sample' : variables.Static('easTdb_sample', 1800, units.s)};
self.measurements['Ptot'] = {'Sample' : variables.Static('easTdb_sample', 1800, units.s)};
self.measurement_variable_map = {'wesTdb_mea' : ('wesTdb', units.K),
'halTdb_mea' : ('halTdb', units.K),
'easTdb_mea' : ('easTdb', units.K),
'wesPhvac_mea' : ('wesPhvac', units.W),
'halPhvac_mea' : ('halPhvac', units.W),
'easPhvac_mea' : ('easPhvac', units.W),
'Ptot_mea' : ('Ptot', units.W)}
## Setup model
self.mopath = os.path.join(self.get_unittest_path(), 'resources', 'model', 'LBNL71T_MPC.mo');
self.modelpath = 'LBNL71T_MPC.MPC';
self.libraries = os.environ.get('MODELICAPATH');
self.estimate_method = models.JModelica;
self.validation_method = models.RMSE;
# Instantiate exo data sources
self.weather = exodata.WeatherFromEPW(self.weather_path);
self.internal = exodata.InternalFromCSV(self.internal_path, self.internal_variable_map, tz_name = self.weather.tz_name);
self.control = exodata.ControlFromCSV(self.control_path, self.control_variable_map, tz_name = self.weather.tz_name);
# Parameters
self.parameters = exodata.ParameterFromCSV(os.path.join(self.get_unittest_path(), 'resources', 'model', 'LBNL71T_Parameters.csv'));
self.parameters.collect_data();
self.parameters.data['lat'] = {};
self.parameters.data['lat']['Value'] = self.weather.lat;
# Instantiate test building
self.building_est = systems.RealFromCSV(self.building_source_file_path_est,
self.measurements,
self.measurement_variable_map,
tz_name = self.weather.tz_name);
# Exogenous collection time
self.start_time_exodata = '1/1/2015';
self.final_time_exodata = '1/30/2015';
# Estimation time
self.start_time_estimation = '1/1/2015';
self.final_time_estimation = '1/4/2015';
# Validation time
self.start_time_validation = '1/4/2015';
self.final_time_validation = '1/5/2015';
# Measurement variables for estimate
self.measurement_variable_list = ['wesTdb', 'easTdb', 'halTdb'];
# Exodata
self.weather.collect_data(self.start_time_exodata, self.final_time_exodata);
self.internal.collect_data(self.start_time_exodata, self.final_time_exodata);
self.control.collect_data(self.start_time_exodata, self.final_time_exodata);
# Collect measurement data
self.building_est.collect_measurements(self.start_time_estimation, self.final_time_estimation);
# Instantiate model
self.model = models.Modelica(self.estimate_method, \
self.validation_method, \
self.building_est.measurements, \
moinfo = (self.mopath, self.modelpath, self.libraries), \
zone_names = self.zone_names, \
weather_data = self.weather.data, \
internal_data = self.internal.data, \
control_data = self.control.data, \
parameter_data = self.parameters.data, \
tz_name = self.weather.tz_name);
# Simulate model with initial guess
self.model.simulate(self.start_time_estimation, self.final_time_estimation)
def tearDown(self):
del self.model
del self.building_est
del self.weather
del self.internal
del self.control
del self.parameters
del self.measurements
def test_estimate_and_validate(self):
'''Test the estimation of a model's coefficients based on measured data.'''
plt.close('all');
# Check references
df_test = self.model.display_measurements('Simulated');
self.check_df(df_test, 'simulate_initial_parameters.csv');
# Estimate model based on emulated data
self.model.estimate(self.start_time_estimation, self.final_time_estimation, self.measurement_variable_list);
# Finish test
self._finish_estimate_validate('')
def test_estimate_and_validate_missing_measurements(self):
'''Test the estimation of a model's coefficients based on measured data.
Some of the validation measurement data is missing.
'''
plt.close('all');
# Check references
df_test = self.model.display_measurements('Simulated');
self.check_df(df_test, 'simulate_initial_parameters.csv');
# Estimate model based on emulated data
self.model.estimate(self.start_time_estimation, self.final_time_estimation, self.measurement_variable_list);
# Validate model based on estimation data
self.model.validate(self.start_time_estimation, self.final_time_estimation, \
os.path.join(self.get_unittest_path(), 'outputs', 'model_estimation_csv'), plot=0)
# Check references
RMSE = {};
for key in self.model.RMSE.keys():
RMSE[key] = {};
RMSE[key]['Value'] = self.model.RMSE[key].display_data();
df_test = pd.DataFrame(data = RMSE);
self.check_df(df_test, 'estimate_RMSE.csv', timeseries=False);
# Instantiate validate building
building_val = systems.RealFromCSV(self.building_source_file_path_val_missing,
self.measurements,
self.measurement_variable_map,
tz_name = self.weather.tz_name);
# Validate on validation data
building_val.collect_measurements(self.start_time_validation, self.final_time_validation);
self.model.measurements = building_val.measurements;
self.model.validate(self.start_time_validation, self.final_time_validation, \
os.path.join(self.get_unittest_path(), 'outputs', 'model_validation_csv'), plot=0);
# Check references
RMSE = {};
for key in self.model.RMSE.keys():
RMSE[key] = {};
RMSE[key]['Value'] = self.model.RMSE[key].display_data();
df_test = pd.DataFrame(data = RMSE);
self.check_df(df_test, 'validate_RMSE_missing.csv', timeseries=False);
def test_estimate_and_validate_global_start_init(self):
'''Test the estimation of a model's coefficients based on measured data using global start and user-defined initial value.'''
plt.close('all');
# Estimate model based on emulated data
self.model.estimate(self.start_time_estimation, self.final_time_estimation, self.measurement_variable_list, global_start=7, seed=0, use_initial_values=True);
# Finish test
self._finish_estimate_validate('_global_start_winit')
def test_estimate_and_validate_global_start_woinit(self):
'''Test the estimation of a model's coefficients based on measured data using global start and no user-defined initial value.'''
plt.close('all');
# Estimate model based on emulated data
self.model.estimate(self.start_time_estimation, self.final_time_estimation, self.measurement_variable_list, global_start=7, seed=0, use_initial_values=False);
# Finish test
self._finish_estimate_validate('_global_start_woinit')
def test_estimate_and_validate_global_start_maxexceeded(self):
'''Test the estimation of a model's coefficients based on measured data using global start and maximum cpu time and iterations.'''
plt.close('all');
# Set maximum cpu time for JModelica
opt_options = self.model._estimate_method.opt_problem.get_optimization_options();
opt_options['IPOPT_options']['max_cpu_time'] = 60;
opt_options['IPOPT_options']['max_iter'] = 100;
self.model._estimate_method.opt_problem.set_optimization_options(opt_options);
# Estimate model based on emulated data
self.model.estimate(self.start_time_estimation, self.final_time_estimation, self.measurement_variable_list, global_start=7, seed=0, use_initial_values=True);
# Finish test
self._finish_estimate_validate('_global_start_maxexceeded')
def _finish_estimate_validate(self,tag):
'''Internal method for finishing the estimate and valudate tests.'''
# Validate model based on estimation data
self.model.validate(self.start_time_estimation, self.final_time_estimation, \
os.path.join(self.get_unittest_path(), 'outputs', 'model_estimation_csv'), plot=0)
# Check references
RMSE = {};
for key in self.model.RMSE.keys():
RMSE[key] = {};
RMSE[key]['Value'] = self.model.RMSE[key].display_data();
df_test = pd.DataFrame(data = RMSE);
self.check_df(df_test, 'estimate_RMSE{0}.csv'.format(tag), timeseries=False);
# All estimates if global estimate
try:
glo_est_data_test = self.model.get_global_estimate_data()
self.check_json(glo_est_data_test, 'estimate_gloest{0}.txt'.format(tag));
except:
pass
# Instantiate validate building
self.building_val = systems.RealFromCSV(self.building_source_file_path_val,
self.measurements,
self.measurement_variable_map,
tz_name = self.weather.tz_name);
# Validate on validation data
self.building_val.collect_measurements(self.start_time_validation, self.final_time_validation);
self.model.measurements = self.building_val.measurements;
self.model.validate(self.start_time_validation, self.final_time_validation, \
os.path.join(self.get_unittest_path(), 'outputs', 'model_validation_csv'), plot=0);
# Check references
RMSE = {};
for key in self.model.RMSE.keys():
RMSE[key] = {};
RMSE[key]['Value'] = self.model.RMSE[key].display_data();
df_test = pd.DataFrame(data = RMSE);
self.check_df(df_test, 'validate_RMSE{0}.csv'.format(tag), timeseries=False);
class EstimateFromJModelicaEmulationFMU(TestCaseMPCPy):
'''Test emulation-based parameter estimation of a model using JModelica.
'''
def setUp(self):
## Setup building fmu emulation
self.building_source_file_path = os.path.join(self.get_unittest_path(), 'resources', 'building', 'LBNL71T_Emulation_JModelica_v2.fmu');
self.zone_names = ['wes', 'hal', 'eas'];
self.weather_path = os.path.join(self.get_unittest_path(), 'resources', 'weather', 'USA_IL_Chicago-OHare.Intl.AP.725300_TMY3.epw');
self.internal_path = os.path.join(self.get_unittest_path(), 'resources', 'internal', 'sampleCSV.csv');
self.internal_variable_map = {'intRad_wes' : ('wes', 'intRad', units.W_m2), \
'intCon_wes' : ('wes', 'intCon', units.W_m2), \
'intLat_wes' : ('wes', 'intLat', units.W_m2), \
'intRad_hal' : ('hal', 'intRad', units.W_m2), \
'intCon_hal' : ('hal', 'intCon', units.W_m2), \
'intLat_hal' : ('hal', 'intLat', units.W_m2), \
'intRad_eas' : ('eas', 'intRad', units.W_m2), \
'intCon_eas' : ('eas', 'intCon', units.W_m2), \
'intLat_eas' : ('eas', 'intLat', units.W_m2)};
self.control_path = os.path.join(self.get_unittest_path(), 'resources', 'building', 'ControlCSV_0.csv');
self.control_variable_map = {'conHeat_wes' : ('conHeat_wes', units.unit1), \
'conHeat_hal' : ('conHeat_hal', units.unit1), \
'conHeat_eas' : ('conHeat_eas', units.unit1)};
# Measurements
self.measurements = {};
self.measurements['wesTdb'] = {'Sample' : variables.Static('wesTdb_sample', 1800, units.s)};
self.measurements['halTdb'] = {'Sample' : variables.Static('halTdb_sample', 1800, units.s)};
self.measurements['easTdb'] = {'Sample' : variables.Static('easTdb_sample', 1800, units.s)};
self.measurements['wesPhvac'] = {'Sample' : variables.Static('easTdb_sample', 1800, units.s)};
self.measurements['halPhvac'] = {'Sample' : variables.Static('easTdb_sample', 1800, units.s)};
self.measurements['easPhvac'] = {'Sample' : variables.Static('easTdb_sample', 1800, units.s)};
self.measurements['Ptot'] = {'Sample' : variables.Static('easTdb_sample', 1800, units.s)};
## Setup model
self.mopath = os.path.join(self.get_unittest_path(), 'resources', 'model', 'LBNL71T_MPC.mo');
self.modelpath = 'LBNL71T_MPC.MPC';
self.libraries = os.environ.get('MODELICAPATH');
self.estimate_method = models.JModelica;
self.validation_method = models.RMSE;
# Instantiate exo data sources
self.weather = exodata.WeatherFromEPW(self.weather_path);
self.internal = exodata.InternalFromCSV(self.internal_path, self.internal_variable_map, tz_name = self.weather.tz_name);
self.control = exodata.ControlFromCSV(self.control_path, self.control_variable_map, tz_name = self.weather.tz_name);
# Parameters
self.parameters = exodata.ParameterFromCSV(os.path.join(self.get_unittest_path(), 'resources', 'model', 'LBNL71T_Parameters.csv'));
self.parameters.collect_data();
self.parameters.data['lat'] = {};
self.parameters.data['lat']['Value'] = self.weather.lat;
# Instantiate building
building_parameters_data = {};
building_parameters_data['lat'] = {};
building_parameters_data['lat']['Value'] = self.weather.lat;
self.building = systems.EmulationFromFMU(self.measurements, \
fmupath = self.building_source_file_path, \
zone_names = self.zone_names, \
parameter_data = building_parameters_data);
def tearDown(self):
del self.building
del self.weather
del self.internal
del self.control
del self.parameters
del self.measurements
def test_estimate_and_validate(self):
'''Test the estimation of a model's coefficients based on measured data.'''
plt.close('all');
# Exogenous collection time
self.start_time_exodata = '1/1/2015';
self.final_time_exodata = '1/30/2015';
# Estimation time
self.start_time_estimation = '1/1/2015';
self.final_time_estimation = '1/4/2015';
# Validation time
self.start_time_validation = '1/4/2015';
self.final_time_validation = '1/5/2015';
# Measurement variables for estimate
self.measurement_variable_list = ['wesTdb', 'easTdb', 'halTdb'];
# Exodata
self.weather.collect_data(self.start_time_exodata, self.final_time_exodata);
self.internal.collect_data(self.start_time_exodata, self.final_time_exodata);
self.control.collect_data(self.start_time_exodata, self.final_time_exodata);
# Set exodata to building emulation
self.building.weather_data = self.weather.data;
self.building.internal_data = self.internal.data;
self.building.control_data = self.control.data;
self.building.tz_name = self.weather.tz_name;
# Collect measurement data
self.building.collect_measurements(self.start_time_estimation, self.final_time_estimation);
# Instantiate model
self.model = models.Modelica(self.estimate_method, \
self.validation_method, \
self.building.measurements, \
moinfo = (self.mopath, self.modelpath, self.libraries), \
zone_names = self.zone_names, \
weather_data = self.weather.data, \
internal_data = self.internal.data, \
control_data = self.control.data, \
parameter_data = self.parameters.data, \
tz_name = self.weather.tz_name,
save_parameter_input_data=True);
# Simulate model with initial guess
self.model.simulate(self.start_time_estimation, self.final_time_estimation)
# Check references
df_test = self.model.display_measurements('Simulated');
self.check_df(df_test, 'simulate_initial_parameters.csv');
# Check parameter and input data were saved
df_test = pd.read_csv('mpcpy_simulation_inputs_model.csv', index_col='Time');
df_test.index = pd.to_datetime(df_test.index).tz_localize('UTC')
self.check_df(df_test, 'mpcpy_simulation_inputs_model.csv');
df_test = | pd.read_csv('mpcpy_simulation_parameters_model.csv', index_col='parameter') | pandas.read_csv |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Aug 26 19:28:36 2019
@author: github.com/sahandv
"""
import sys
import gc
from tqdm import tqdm
import pandas as pd
import numpy as np
import re
from sciosci.assets import text_assets as kw
from sciosci.assets import keyword_dictionaries as kd
import nltk
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
tqdm.pandas()
nltk.download('wordnet')
nltk.download('punkt')
# =============================================================================
# Read data and Initialize
# =============================================================================
year_from = 0
year_to = 2021
MAKE_SENTENCE_CORPUS = False
MAKE_SENTENCE_CORPUS_ADVANCED_KW = False
MAKE_SENTENCE_CORPUS_ADVANCED = False
MAKE_REGULAR_CORPUS = True
GET_WORD_FREQ_IN_SENTENCE = False
PROCESS_KEYWORDS = False
stops = ['a','an','we','result','however','yet','since','previously','although','propose','proposed','this','...']
nltk.download('stopwords')
stop_words = list(set(stopwords.words("english")))+stops
data_path_rel = '/mnt/16A4A9BCA4A99EAD/GoogleDrive/Data/Corpus/KPRIS/kpris_data.csv'
# data_path_rel = '/mnt/6016589416586D52/Users/z5204044/GoogleDrive/GoogleDrive/Data/Corpus/AI 4k/scopus_4k.csv'
# data_path_rel = '/mnt/6016589416586D52/Users/z5204044/GoogleDrive/GoogleDrive/Data/AI ALL 1900-2019 - reformat'
# data_path_rel = '/mnt/6016589416586D52/Users/z5204044/GoogleDrive/GoogleDrive/Data/Corpus/AI 300/merged - scopus_v2_relevant wos_v1_relevant - duplicate doi removed - abstract corrected - 05 Aug 2019.csv'
data_full_relevant = pd.read_csv(data_path_rel)
# data_full_relevant = data_full_relevant[['dc:title','authkeywords','abstract','year']]
# data_full_relevant.columns = ['TI','DE','AB','PY']
sample = data_full_relevant.sample(4)
root_dir = '/mnt/16A4A9BCA4A99EAD/GoogleDrive/Data/Corpus/KPRIS/'
subdir = 'clean/' # no_lemmatization_no_stopwords
gc.collect()
data_full_relevant['PY'] = 2018
data_full_relevant['AB'] = data_full_relevant['abstract']
data_full_relevant['TI'] = ''
data_full_relevant['DE'] = np.nan
data_full_relevant['ID'] = ''
data_full_relevant['SO'] = ''
#
data_wrong = data_full_relevant[data_full_relevant['AB'].str.contains("abstract available")].index
data_wrong = list(data_wrong)
data_full_relevant = data_full_relevant.drop(data_wrong,axis=0)
# =============================================================================
# Initial Pre-Processing :
# Following tags requires WoS format. Change them otherwise.
# =============================================================================
data_filtered = data_full_relevant.copy()
data_filtered = data_filtered[pd.notnull(data_filtered['PY'])]
data_filtered = data_filtered[data_filtered['PY'].astype('int')>year_from-1]
data_filtered = data_filtered[data_filtered['PY'].astype('int')<year_to]
# Remove columns without keywords/abstract list
data_with_keywords = data_filtered[pd.notnull(data_filtered['DE'])]
data_with_abstract = data_filtered[pd.notnull(data_filtered['AB'])]
# Remove special chars and strings from abstracts
data_with_abstract['AB'] = data_with_abstract['AB'].progress_apply(lambda x: kw.find_and_remove_c(x) if pd.notnull(x) else np.nan).str.lower()
data_with_abstract['AB'] = data_with_abstract['AB'].progress_apply(lambda x: kw.find_and_remove_term(x,'et al.') if pd.notnull(x) else np.nan)
data_with_abstract['AB'] = data_with_abstract['AB'].progress_apply(lambda x: kw.find_and_remove_term(x,'eg.') if | pd.notnull(x) | pandas.notnull |
import pandas as pd
from my_lambdata.my_df_utils import MyUtil
# df = pd.DataFrame({"a": [1,2,3], "b": [4,5,6]})
# print(df.head())
df = | pd.DataFrame({"a": [1, None, 3], "b": [4, 5, None], "c": [6, 7, 8], "d": [9, 10, 11]}) | pandas.DataFrame |
# Import libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import StandardScaler
# Load data
def load_data(azdias_filepath, customers_filepath, attributes_filepath, attributes_desc_filepath):
"""
Method for loading dataset from CSV & Excel
Since the dataset is more than 1GB, c engine instead of python to load data faster. In addition, there are
mixed data in column 18 & 19; more specifically some NaN values are represented by X & XX. These values are
set as na_values.
Args:
azdias_filepath (str): Azdias filepath (Udacity_AZDIAS_052018)
customers_filepath (str): Customers filepath (Udacity_CUSTOMERS_052018)
attributes_filepath (str): Attributes filepath (DIAS Attributes - Values 2017.xlsx)
attributes_desc_filepath (str): Attributes description (DIAS Information Levels - Attributes 2017.xlsx)
Output:
azdias: Pandas Dataframe
customers: Pandas Dataframe
attributes: Pandas Dataframe
attributes_desc: Pandas Dataframe
"""
# Load "azdias" dataset
azdias = pd.read_csv(azdias_filepath, na_values=["X", "XX"], engine="c")
# Set LNR as index
#azdias = azdias.set_index("LNR")
# Load "customers" dataset
customers = pd.read_csv(customers_filepath, na_values=["X", "XX"], engine="c")
# Set LNT as index
#customers = customers.set_index("LNR")
# Load "attributes" dataset
attributes = pd.read_excel(attributes_filepath, header=1).loc[:, ["Attribute", "Value", "Meaning"]].fillna(method='ffill')
# Load "attributes_desc"
attributes_desc = pd.read_excel(attributes_desc_filepath, header=1).loc[:, ["Information level", "Attribute", "Description",
"Additional notes"]].fillna(method='ffill')
return azdias, customers, attributes, attributes_desc
# Create a dataframe that describes the information about each features
def build_feat_info(df):
"""
Method for finding statistics for each features in a dataset
Args:
df (Pandas Dataframe): Dataframe that needs to be described
Output:
Pandas Dataframe
"""
return pd.DataFrame({
"value_count" : [df[x].count() for x in df.columns],
"value_distinct" : [df[x].unique().shape[0] for x in df.columns],
"num_nans" : [df[x].isnull().sum() for x in df.columns],
"percent_nans" : [round(df[x].isnull().sum()/df[x].shape[0], 3) for x in df.columns],
}, index=df.columns)
# Plot bar plots
def bar_plt_nan(series1, series2):
"""
Method for plotting NaN counts for each features
Args:
series1: Series of NaN count for each feature
series2: Series of NaN count for each feature
Output:
fig: matplotlib Figure
"""
fig = plt.figure(figsize=(8,6))
ax1 = fig.add_axes([0, 0.85, 1, 0.5])
ax1.set_title("azdias: Distribution of NaN Count")
ax1.set_xlabel("Number of NaN Count")
ax1.set_ylabel("Number of Features")
ax1.hist(series1, bins=100)
ax2 = fig.add_axes([0, 0.15, 1, 0.5])
ax2.hist(series2, bins=100)
ax2.set_title("customers: Distribution of NaN Count")
ax2.set_xlabel("Number of NaN Count")
ax2.set_ylabel("Number of Features")
return None
# Replace unknown values with NaN
def replace_nan(df1, df2):
"""
Method for substituting unknown values with NaN
There are unknowns mapped to values in df1. These values are needed to be encoded to NaN in df2
Args:
df1 (Pandas Dataframe): Features dataframe with mapped values
df2 (Pandas Dataframe): azdias or customers dataframe
Output:
df2 (Pandas Dataframe): Dataframe with unknown values replaced by NaN
"""
# Create a subset of "attributes" with each feature and the associated unknown values
df1 = df1[(df1["Meaning"].str.contains("unknown") | df1["Meaning"].str.contains("no "))]
# Create a list of unknown value for each feature
unknown_val = []
for attribute in df1["Attribute"].unique():
val = df1.loc[df1["Attribute"] == attribute, "Value"].astype("str").str.cat(sep=",").split(",")
val = list(map(int, val)) # Convert the list to "int"
unknown_val.append(val)
# Create a dataframe of features with the list unknown value
df1 = pd.concat([pd.Series(df1["Attribute"].unique()), pd.Series(unknown_val)], axis=1)
# Rename the columns
df1.columns = ["attribute", "unknown"]
# Append the row to attributes_unknown_val
df1 = df1.append({"attribute" : "ARBEIT", "unknown" : [-1, 9]}, ignore_index=True)
print("Please wait: replacing unknown with NaN")
for row in df1.itertuples(index=False):
if row.attribute in df2.columns.values.tolist():
nan_val = df1.loc[df1['attribute'] == row.attribute, 'unknown'].iloc[0]
nan_idx = df2.loc[:, row.attribute].isin(nan_val)
df2.loc[nan_idx, row.attribute] = np.NaN
else:
continue
return df2
# Remove rows and features with high NaN count
def clean_data(df1, df2, df3, feat_list, drop_feat):
"""
Method for cleaning up features and rows with high NaN count
Args:
df1 (Pandas Dataframe): azdias dataset
df2 (Pandas Dataframe): customers dataset
df3 (Pandas Dataframe): attributes dataset
feat_list (list): List of features whose descriptions are available
drop_feat (list): List of features that are needed to be drop
Output:
df1 (Pandas Dataframe): Cleaned azdias dataset
df2 (Pandas Dataframe): Cleaned customers dataset
"""
# Select the features whose information are available
df1 = df1.loc[:, feat_list]
df2 = df2.loc[:, feat_list]
# Replace unknown values with NaN
df1 = replace_nan(df3, df1)
df2 = replace_nan(df3, df2)
# Replace zeros in GEBURTSJAHR with NaN
df1["GEBURTSJAHR"].replace(0, np.nan, inplace=True)
df2["GEBURTSJAHR"].replace(0, np.nan, inplace=True)
# NaN count for each feature
feature_nan_count = pd.DataFrame({
"azdias_nan_count": [round(df1[x].isnull().sum()/df1[x].shape[0], 3) for x in df1.columns],
"customers_nan_count": [round(df2[x].isnull().sum()/df2[x].shape[0], 3) for x in df2.columns]
}, index=df1.columns)
# Drop features where NaN count is higher than 20% in both df1 and df2
feature_nan_count = feature_nan_count.loc[((feature_nan_count.azdias_nan_count <= 0.2) |
(feature_nan_count.customers_nan_count <= 0.2))]
feature_nan_count = feature_nan_count.rename_axis("features").reset_index()
# Select Features from feature_nan_count
df1 = df1.loc[:, feature_nan_count.features.tolist()]
df2 = df2.loc[:, feature_nan_count.features.tolist()]
# Drop the features listed on drop_feat
df1.drop(columns=drop_feat, inplace=True)
df2.drop(columns=drop_feat, inplace=True)
# Drop the rows in azdias
count_nan_row = df1.shape[1] - df1.count(axis=1)
drop_row = df1.index[count_nan_row > 50]
df1.drop(drop_row, axis=0, inplace=True)
# Drop the rows in customers
count_nan_row = df2.shape[1] - df2.count(axis=1)
drop_row = df2.index[count_nan_row > 50]
df2.drop(drop_row, axis=0, inplace=True)
return df1, df2
# Plot comparison of value distribution in two dataframe
def plot_comparison(column, df1, df2):
"""
Method for plotting comparison of value distribution among two dataframes
Args:
column (series): Series from a feature
df1 (Pandas Dataframe): azdias dataset
df2 (Pandas Dataframe): customers dataset
Output:
None
"""
print(column)
sns.set(style="darkgrid")
fig, (ax1, ax2) = plt.subplots(figsize=(12,4), ncols=2)
sns.countplot(x = column, data=df1, ax=ax1, palette="Set3")
ax1.set_xlabel('Value')
ax1.set_title('Distribution of feature in AZDIAS dataset')
sns.countplot(x = column, data=df2, ax=ax2, palette="Set3")
ax2.set_xlabel('Value')
ax2.set_title('Distribution of feature in CUSTOMERS dataset')
fig.tight_layout()
plt.show()
# Feature Engineering
def feat_eng(df1, df2):
"""
Method for feature engineering & encoding & scaling the data
Args:
df1 (Pandas Dataframe): azdias dataset
df2 (Pandas Dataframe): customers dataset
Output:
df1: Scaled azdias dataframe
df2: Scaled customers dataframe
"""
# OST_WEST_KZ needs to be encoded since the data are strings
df1["OST_WEST_KZ"].replace(["W", "O"], [0, 1], inplace=True)
df2["OST_WEST_KZ"].replace(["W", "O"], [0, 1], inplace=True)
# Impute numeric columns
imputer = SimpleImputer(missing_values=np.nan, strategy='median')
df1 = pd.DataFrame(imputer.fit_transform(df1), columns = df1.columns)
df2 = pd.DataFrame(imputer.fit_transform(df2), columns = df2.columns)
# Standardize the data
scaler = StandardScaler()
df1_scaled = scaler.fit_transform(df1)
df2_scaled = scaler.transform(df2)
# Create dataframe from scaled dataset
df1 = | pd.DataFrame(data=df1_scaled, index=df1.index, columns=df1.columns) | pandas.DataFrame |
from datetime import *
import datetime
from numpy import *
import statsmodels.api as sm
import statsmodels.tsa.stattools as ts
import scipy.io as sio
import pandas as pd
def normcdf(X):
(a1,a2,a3,a4,a5) = (0.31938153, -0.356563782, 1.781477937, -1.821255978, 1.330274429)
L = abs(X)
K = 1.0 / (1.0 + 0.2316419 * L)
w = 1.0 - 1.0 / sqrt(2*pi)*exp(-L*L/2.) * (a1*K + a2*K*K + a3*pow(K,3) + a4*pow(K,4) + a5*pow(K,5))
if X < 0:
w = 1.0-w
return w
def vratio(a, lag = 2, cor = 'hom'):
""" the implementation found in the blog Leinenbock
http://www.leinenbock.com/variance-ratio-test/
"""
#t = (std((a[lag:]) - (a[1:-lag+1])))**2;
#b = (std((a[2:]) - (a[1:-1]) ))**2;
n = len(a)
mu = sum(a[1:n]-a[:n-1])/n;
m=(n-lag+1)*(1-lag/n);
#print( mu, m, lag)
b=sum(square(a[1:n]-a[:n-1]-mu))/(n-1)
t=sum(square(a[lag:n]-a[:n-lag]-lag*mu))/m
vratio = t/(lag*b);
la = float(lag)
if cor == 'hom':
varvrt=2*(2*la-1)*(la-1)/(3*la*n)
elif cor == 'het':
varvrt=0;
sum2=sum(square(a[1:n]-a[:n-1]-mu));
for j in range(lag-1):
sum1a=square(a[j+1:n]-a[j:n-1]-mu);
sum1b=square(a[1:n-j]-a[0:n-j-1]-mu)
sum1=dot(sum1a,sum1b);
delta=sum1/(sum2**2);
varvrt=varvrt+((2*(la-j)/la)**2)*delta
zscore = (vratio - 1) / sqrt(float(varvrt))
pval = normcdf(zscore);
return vratio, zscore, pval
def hurst2(ts):
""" the implementation found in the blog Leinenbock
http://www.leinenbock.com/calculation-of-the-hurst-exponent-to-test-for-trend-and-mean-reversion/
"""
tau = []; lagvec = []
# Step through the different lags
for lag in range(2,100):
# produce price difference with lag
pp = subtract(ts[lag:],ts[:-lag])
# Write the different lags into a vector
lagvec.append(lag)
# Calculate the variance of the differnce vector
tau.append(sqrt(std(pp)))
# linear fit to double-log graph (gives power)
m = polyfit(log10(lagvec),log10(tau),1)
# calculate hurst
hurst = m[0]*2.0
# plot lag vs variance
#plt.plot(lagvec,tau,'o')
#plt.show()
return hurst
def hurst(ts):
""" the implewmentation on the blog http://www.quantstart.com
http://www.quantstart.com/articles/Basics-of-Statistical-Mean-Reversion-Testing
Returns the Hurst Exponent of the time series vector ts"""
# Create the range of lag values
lags = range(2, 100)
# Calculate the array of the variances of the lagged differences
tau = [sqrt(std(subtract(ts[lag:], ts[:-lag]))) for lag in lags]
# Use a linear fit to estimate the Hurst Exponent
poly = polyfit(log(lags), log(tau), 1)
# Return the Hurst exponent from the polyfit output
return poly[0]*2.0
def half_life(ts):
""" this function calculate the half life of mean reversion
"""
# calculate the delta for each observation.
# delta = p(t) - p(t-1)
delta_ts = diff(ts)
# calculate the vector of lagged prices. lag = 1
# stack up a vector of ones and transpose
lag_ts = vstack([ts[1:], ones(len(ts[1:]))]).T
# calculate the slope (beta) of the deltas vs the lagged values
beta = linalg.lstsq(lag_ts, delta_ts)
# compute half life
half_life = log(2) / beta[0]
return half_life[0]
def random_walk(seed=1000, mu = 0.0, sigma = 1, length=1000):
""" this function creates a series of independent, identically distributed values
with the form of a random walk. Where the best prediction of the next value is the present
value plus some random variable with mean and variance finite
We distinguish two types of random walks: (1) random walk without drift (i.e., no constant
or intercept term) and (2) random walk with drift (i.e., a constant term is present).
The random walk model is an example of what is known in the literature as a unit root process.
RWM without drift: Yt = Yt−1 + ut
RWM with drift: Yt = δ + Yt−1 + ut
"""
ts = []
for i in range(length):
if i == 0:
ts.append(seed)
else:
ts.append(mu + ts[i-1] + random.gauss(0, sigma))
return ts
def subset_dataframe(data, start_date, end_date):
start = data.index.searchsorted(start_date)
end = data.index.searchsorted(end_date)
return data.ix[start:end]
def cointegration_test(y, x):
ols_result = sm.OLS(y, x).fit()
return ts.adfuller(ols_result.resid, maxlag=1)
def get_data_from_matlab(file_url, index, columns, data):
"""Description:*
This function takes a Matlab file .mat and extract some
information to a pandas data frame. The structure of the mat
file must be known, as the loadmat function used returns a
dictionary of arrays and they must be called by the key name
Args:
file_url: the ubication of the .mat file
index: the key for the array of string date-like to be used as index
for the dataframe
columns: the key for the array of data to be used as columns in
the dataframe
data: the key for the array to be used as data in the dataframe
Returns:
Pandas dataframe
"""
import scipy.io as sio
import datetime as dt
# load mat file to dictionary
mat = sio.loadmat(file_url)
# define data to import, columns names and index
cl = mat[data]
stocks = mat[columns]
dates = mat[index]
# extract the ticket to be used as columns name in dataframe
# to-do: list compression here
columns = []
for each_item in stocks:
for inside_item in each_item:
for ticket in inside_item:
columns.append(ticket)
# extract string ins date array and convert to datetimeindex
# to-do list compression here
df_dates =[]
for each_item in dates:
for inside_item in each_item:
df_dates.append(inside_item)
df_dates = pd.Series([pd.to_datetime(date, format= '%Y%m%d') for date in df_dates], name='date')
# construct the final dataframe
data = | pd.DataFrame(cl, columns=columns, index=df_dates) | pandas.DataFrame |
from datetime import datetime, timedelta
import sys
import fnmatch
import os
import numpy as np
from scipy import io as sio
import pandas as pd
import pyarrow as pa
import pyarrow.parquet as pq
import zarr
from numcodecs import Blosc
from mat_files import masc_mat_file_to_dict,masc_mat_triplet_to_dict,triplet_images_reshape
from mat_files import digits_dictionary
from weather_data import blowingsnow
sys.path.insert(1,'/home/grazioli/CODES/python/py-masc-gan-3Deval')
from gan3d_lib import gan3d
def files_in_dir_recursive(top, pattern="*", include_dir=True):
for (root, dirs, files) in os.walk(top):
match_files = (fn for fn in files if
fnmatch.fnmatchcase(fn, pattern))
if include_dir:
match_files = (os.path.join(root,fn) for fn in match_files)
for fn in match_files:
yield fn
def find_matched(data_dir, min_files=3):
files = {}
for fn_full in files_in_dir_recursive(
data_dir, pattern="*_flake_*_cam_?.mat"):
fn = fn_full.split("/")[-1]
fn = ".".join(fn.split(".")[:-1])
fn_parts = fn.split("_")
cam = int(fn_parts[-1])
flake_id = int(fn_parts[-3])
timestamp = "_".join(fn.split("_")[:2])
time = datetime.strptime(timestamp, "%Y.%m.%d_%H.%M.%S")
key = (time,flake_id)
if key not in files:
files[key] = {}
files[key][cam] = fn_full
print(len(files))
files = {k: files[k] for k in files if len(files[k])>=min_files}
print(len(files))
delete_keys = []
for (i,k) in enumerate(files):
if i%1000==0:
print("{}/{}, {} deleted".format(i,len(files),len(delete_keys)))
if any(not valid_file(files[k][c]) for c in files[k]):
delete_keys.append(k)
for k in delete_keys:
del files[k]
print(len(files))
return files
def valid_file(fn, xhi_min=7,
max_intens_min=0.03,
min_size=8,
max_size=2048):
m = sio.loadmat(fn)
xhi = m["roi"]["xhi"][0,0][0,0]
if xhi < xhi_min:
return False
max_intens = m["roi"]["max_intens"][0,0][0,0]
if max_intens < max_intens_min:
return False
shape = m["roi"]["data"][0,0].shape
size = np.max(shape)
if not (min_size <= size <= max_size):
return False
# Check if any nan in riming
if np.isnan(m["roi"][0,0]['riming_probs'][0]).any():
return False
return True
def valid_triplet(triplet_files,
min_size=12,
max_ysize_var=1.4,
max_ysize_delta=60, # Pixels
xhi_low=8,
xhi_high=8.5):
mat = [sio.loadmat(triplet_files[i]) for i in range(3)]
def get_size(m):
shape = m["roi"]["data"][0,0].shape
return shape[0]
def get_xhi(m):
return m["roi"]["xhi"][0,0][0,0]
sizes = [get_size(m) for m in mat]
largest = max(sizes)
smallest = min(sizes)
xhis = [get_xhi(m) for m in mat]
xhi_min = min(xhis)
xhi_max = max(xhis)
return (largest>=min_size) and (largest-smallest <= max_ysize_delta) and (largest/smallest<=max_ysize_var) and ((xhi_min >= xhi_low) or (xhi_max >= xhi_high))
def filter_triplets(files):
return {k: files[k] for k in files if valid_triplet(files[k])}
def filter_condensation(df0,
threshold_var=2.5,
Dmax_min=0.001):
"""
Filter condensation based on excessive stability of adjacent descriptors
Input:
df0: pandas dataframe of one camera
threshold_var : minimum allowed variation [%] of some descriptors
Dmax_min : minumum Dmax to consider for this filter
"""
df0_s = df0.copy() #Save
ind=[]
df0 = df0[['area','perim','Dmax','roi_centroid_X','roi_centroid_Y']]
# Shift forward
diff1 = 100*np.abs(df0.diff()/df0)
diff1['mean'] = diff1.mean(axis=1)
diff1 = diff1[['mean']]
# Shift backward
diff2 = 100*np.abs(df0.diff(periods=-1)/df0)
diff2['mean'] = diff2.mean(axis=1)
diff2 = diff2[['mean']]
df0_1= df0_s[diff1['mean'] < threshold_var]
df0_1 = df0_1[df0_1.Dmax > Dmax_min]
ind.extend(np.asarray(df0_1.index))
df0_2= df0_s[diff2['mean'] < threshold_var]
df0_2 = df0_2[df0_2.Dmax > Dmax_min]
ind.extend(np.asarray(df0_2.index))
return ind
def create_triplet_dataframes(triplet_files, out_dir,campaign_name='EPFL'):
"""
Put in a dataframe the descriptors of the images for each cam
"""
c0=[]
c1=[]
c2=[]
tri=[]
for (i,k) in enumerate(sorted(triplet_files.keys())):
if i%10000 == 0:
print("{}/{}".format(i,len(triplet_files)))
triplet = triplet_files[k]
# Create and increment the data frames
c0.append(masc_mat_file_to_dict(triplet[0]))
c1.append(masc_mat_file_to_dict(triplet[1]))
c2.append(masc_mat_file_to_dict(triplet[2]))
tri.append(masc_mat_triplet_to_dict(triplet,campaign=campaign_name))
c0 = | pd.DataFrame.from_dict(c0) | pandas.DataFrame.from_dict |
# -*- coding:utf-8 -*-
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the MIT License.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# MIT License for more details.
"""Quota for Affinity."""
import logging
import pandas as pd
from sklearn import ensemble
class AffinityModelBase(object):
"""Base class for affinity regression."""
def __init__(self, affinity_file, affinity_value):
self.affinity_report = pd.read_csv(affinity_file)
self.standard = affinity_value
self.ml_model = ensemble.RandomForestClassifier(n_estimators=20)
def build_model(self):
"""Build regression model."""
desc = self.affinity_report['desc']
inputs = self.generate_input_space(desc)
labels = self.generate_label()
self.ml_model.fit(inputs, labels)
def predict(self, input):
"""Predict output from input."""
return self.ml_model.predict(input[:1])[0]
def generate_input_space(self, desc):
"""Generate input space from desc."""
if desc is None:
return None
space_list = []
for idx in range(len(desc)):
desc_item = eval(desc.iloc[idx])
space_dict = {}
self.init_space_dict(space_dict)
for key, value in desc_item.items():
self.get_space_dict(key, value, space_dict)
# space_dict[_metric_key] = eval(pfms.iloc[idx])[_metric_key]
if space_dict:
space_list.append(space_dict)
return pd.DataFrame(space_list)
def generate_label(self):
"""Generate label from affinity report."""
_pfms = self.affinity_report['performance']
_metric_key = eval(self.affinity_report['_objective_keys'][0])[0]
label_list = []
for pfm in _pfms:
value = eval(pfm)[_metric_key]
clc = 1 if value > self.standard else 0
label_list.append({_metric_key: clc})
return pd.DataFrame(label_list)
def init_space_dict(self, space_dict):
"""Initialize space dict."""
pass
def get_space_dict(self, *args):
"""Get space dict from desc."""
raise NotImplementedError
class AffinityModelSrea(AffinityModelBase):
"""Affinity Regression for SR-EA."""
def __init__(self, affinity_file, affinity_value):
super(AffinityModelSrea, self).__init__(affinity_file, affinity_value)
def init_space_dict(self, space_dict):
"""Initialize space dict."""
for i in range(80):
space_dict['code_{}'.format(i)] = False
def get_space_dict(self, key, value, space_dict):
"""Get space dict from desc."""
key = key.split('.')[-1]
if isinstance(value, dict):
for sub_key, sub_value in value.items():
self.get_space_dict(sub_key, sub_value, space_dict)
elif key == 'code' and isinstance(value, str):
for i, element in enumerate(value):
if element == '0':
space_dict[key + '_{}'.format(i)] = 1
elif element == '1':
space_dict[key + '_{}'.format(i)] = 2
else:
space_dict[key + '_{}'.format(i)] = 3
class QuotaAffinity(object):
"""Generate affinity model of search space, filter bad sample."""
def __init__(self, affinity_cfg):
affinity_class = eval(self.get_affinity_model(affinity_cfg.type))
self.affinity_model = affinity_class(affinity_cfg.affinity_file, affinity_cfg.affinity_value)
self.affinity_model.build_model()
def get_affinity_model(self, affinity_type):
"""Get specific affinity model name."""
affinity_model_dict = {
'sr_ea': 'AffinityModelSrea'
}
return affinity_model_dict[affinity_type]
def is_affinity(self, desc):
"""Judge the desc is affinity or not."""
desc_dict = {'desc': str(desc)}
input = | pd.DataFrame([desc_dict]) | pandas.DataFrame |
import pandas as pd
import numpy
class DataSplitter:
@classmethod
def split_to_x_and_y(self, data, timesteps):
x, y = [], []
for i in range(len(data) - timesteps):
x.append(data.iloc[i:(i + timesteps)].drop('date', axis=1).as_matrix())
y.append([data.iloc[i + timesteps]['nikkei']])
return numpy.array(x), numpy.array(y)
@classmethod
def split_to_train_val_test(self, data):
train_start = pd.to_datetime('2003-01-22')
validation_start = pd.to_datetime('2016-01-01')
test_start = | pd.to_datetime('2017-01-01') | pandas.to_datetime |
import numpy as np
import numpy.testing as npt
import pandas as pd
import pandas.testing as pdt
import pytest
def test_col():
from anaphora import Col
# attribute access
df = pd.DataFrame({'y': [1,2,3], 'x': [4,5,6]})
actual = Col('y').values(df)
expected = df['y'].values
assert isinstance(actual, np.ndarray)
npt.assert_array_equal(actual, expected)
# attribute chaining (!!)
df = pd.DataFrame({'y': [1,2,3], 'x': [4,5,6]})
actual = Col('y').values(df).dtype
expected = df['y'].values.dtype
npt.assert_array_equal(actual, expected)
# method chaining
df = pd.DataFrame({'y': [1,2,3], 'x': [4,5,6]})
actual = Col('y').map({1: '1', 2: '2'}).astype('category')(df)
expected = df['y'].map({1: '1', 2: '2'}).astype('category')
| pdt.assert_series_equal(actual, expected) | pandas.testing.assert_series_equal |
# Copyright 2021 <NAME>. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
""" Ngram statistics """
from typing import Optional, Tuple
from absl import app
from absl import flags
from absl import logging
import pandas as pd
from probing._src.configurable import configurable
from probing._src.constants import COLORS
from probing._src.metrics import compute_metrics
from probing.dataset.dataset import create_dataset
@configurable('ngrams')
def ngram_stats(
ngram_gbc_path: Optional[str] = None,
metadata_path: Optional[str] = None,
ngram_metrics_path: Optional[str] = None,
) -> Tuple[pd.DataFrame, pd.DataFrame]:
""" Collect Statistics for the Google Books Ngram Corpus
Args:
ngram_gbc_path: Filtered Ngram counts from the GBC.
metadata_path: File containing dataset metadata.
ngram_metrics_path: File to write ngram metrics to.
Returns:
gbc_ngram_metrics: ngram metcis for GBC
"""
if metadata_path is None:
logging.info('did not receive metadata, regenerating the dataset.')
_, meta = create_dataset()
else:
meta = pd.read_csv(metadata_path)
# load per-object ngram counts from the google books corpus
gbc_ngram_counts = | pd.read_csv(ngram_gbc_path) | pandas.read_csv |
from glob import glob
from os import path,mkdir,remove
import numpy as np
import pandas as pd
from skyfield.api import Topos,load
from astropy.time import TimeDelta,Time
from astropy import units as u
from ..utils import data_prepare
def t_list(t_start,t_end,t_step=60):
"""
Generate a list of time moments based on start time, end time, and time step.
Usage:
t = t_list(t_start,t_end,t_step)
Inputs:
t_start -> [object of class Astropy Time] Start time, such as Time('2020-06-01 00:00:00')
t_end -> [object of class Astropy Time] End time, such as Time('2020-06-30 00:00:00')
Parameters:
t_step -> [int, optional, default = 60] time step[seconds]
Outputs:
t -> [list of object of class Skyfield Time] list of time moments
"""
ts = data_prepare.ts
dt = np.around((t_end - t_start).to(u.second).value)
t_astropy = t_start + TimeDelta(np.append(np.arange(0,dt,t_step),dt), format='sec')
t = ts.from_astropy(t_astropy)
return t
def next_pass(t_start,t_end,sat,site,cutoff = 10):
"""
Generate the space target passes in a time period.
Usage:
passes = next_pass(t_start,t_end,sat,site,cutoff)
Inputs:
t_start -> [object of class Astropy Time] Start time, such as Time('2020-06-01 00:00:00')
t_end -> [object of class Astropy Time] End time, such as Time('2020-06-30 00:00:00')
sat -> [object of class Skyfield satellite] Space target
site -> [object of class Skyfield Topos] Station
Parameters:
cutoff -> [float, optional, default=10] Cutoff altitude angle
Outputs:
t -> [list of object of class Skyfield Time] List of time moments
"""
ts = data_prepare.ts
passes = []
# Approximately calculate the orbital period[minutes] of a target relative to the rotating Earth
P = 2*np.pi/(sat.model.no - np.pi/(12*60))
# Determine the time step[seconds]
t_step = int(np.round(60*np.sqrt(P/120)))
t = t_list(t_start,t_end,t_step)
# Calculate the target position relative to the station in ICRS
sat_site = (sat - site).at(t)
alt_sat, az_sat, distance_sat = sat_site.altaz()
sat_above_horizon = alt_sat.degrees > cutoff
boundaries, = np.diff(sat_above_horizon).nonzero()
if len(boundaries) == 0: return passes
if sat_above_horizon[boundaries[0]]:
boundaries = np.append(0,boundaries)
if len(boundaries)%2 != 0:
boundaries = np.append(boundaries,len(sat_above_horizon)-1)
boundaries = np.array(t[boundaries].utc_iso()).reshape(len(boundaries) // 2,2)
seconds = TimeDelta(np.arange(t_step+1), format='sec')
for rises,sets in boundaries:
t_rise = ts.from_astropy(Time(rises)+seconds)
sat_site = (sat - site).at(t_rise)
alt, az, distance = sat_site.altaz()
sat_above_horizon = alt.degrees > cutoff
pass_rise = t_rise[sat_above_horizon][0]
t_set = ts.from_astropy(Time(sets)+seconds)
sat_site = (sat - site).at(t_set)
alt, az, distance = sat_site.altaz()
sat_above_horizon = alt.degrees > cutoff
if sat_above_horizon[-1]:
pass_set = t_set[sat_above_horizon][0]
else:
pass_set = t_set[sat_above_horizon][-1]
passes.append([pass_rise.utc_iso(),pass_set.utc_iso()])
return passes
def visible_pass(start_time,end_time,site,timezone=0,cutoff=10,twilight='nautical',visible_duration=120):
"""
Generate the visible passes of space targets in a time period.
Usage:
visible_pass(start_time,end_time,site,timezone=8)
Inputs:
start_time -> [str] start time, such as '2020-06-01 00:00:00'
end_time -> [str] end time, such as '2020-07-01 00:00:00'
site -> [list of str] geodetic coordinates of a station, such as ['21.03 N','157.80 W','1987.05']
Parameters:
timezone -> [int, optional, default=0] time zone, such as -10; if 0, then UTC is used.
cutoff -> [float, optional, default=10] Satellite Cutoff Altitude Angle
twilight -> [str, or float, optional, default='nautical'] Dark sign or solar cutoff angle; if 'dark', then the solar cutoff angle is less than -18 degrees;
if 'astronomical', then less than -12 degrees; if 'nautical', then less than -6 degrees; if 'civil', then less than -0.8333 degrees;
alternatively, it also can be set to a specific number, for example, 4.0 degrees.
visible_duration -> [int, optional, default=120] Duration[seconds] of a visible pass
Outputs:
VisiblePasses_bysat.csv -> csv-format files that record visible passes in sort of target
VisiblePasses_bydate.csv -> csv-format files that record visible passes in sort of date
xxxx.txt -> one-day prediction files for targets
"""
planets = data_prepare.planets
print('\nCalculating one-day predictions and multiple-day visible passes for targets', end=' ... \n')
if twilight == 'dark':
sun_alt_cutoff = -18
elif twilight == 'astronomical':
sun_alt_cutoff = -12
elif twilight == 'nautical':
sun_alt_cutoff = -6
elif twilight == 'civil':
sun_alt_cutoff = -0.8333
elif type(twilight) is int or type(twilight) is float:
sun_alt_cutoff = twilight
else:
raise Exception("twilight must be one of 'dark','astronomical','nautical','civil', or a number.")
dir_TLE = 'TLE/'
dir_prediction = 'prediction/'
sun,earth = planets['sun'],planets['earth']
sats = load.tle_file(dir_TLE+'satcat_tle.txt')
lon,lat,ele = site
observer = Topos(lon, lat, elevation_m = float(ele))
# local start time
t_start = Time(start_time) - timezone*u.hour
# local end time
t_end = Time(end_time) - timezone*u.hour
fileList_prediction = glob(dir_prediction+'*')
if path.exists(dir_prediction):
for file in fileList_prediction:
remove(file)
else:
mkdir(dir_prediction)
filename0 = 'VisiblePasses_bysat.csv'
filename1 = 'VisiblePasses_bydate.csv'
outfile0 = open(dir_prediction+filename0,'w')
header = ['Start Time[UTC+' + str(timezone) +']','End Time[UTC+' + str(timezone) +']','NORADID', 'During[seconds]']
outfile0.write('{},{},{},{}\n'.format(header[0],header[1],header[2],header[3]))
print('Generate one-day prediction for targets:')
for sat in sats:
visible_flag = False
noradid = sat.model.satnum
passes = next_pass(t_start,t_end,sat,observer,cutoff)
if not passes:
continue
else:
outfile = open(dir_prediction+ str(noradid) + '.txt','w')
outfile.write('# {:18s} {:8s} {:8s} {:8s} {:8s} {:10s} {:14s} {:7s} \n'.format('Date and Time(UTC)','Alt[deg]','Az[deg]','Ra[h]','Dec[deg]','Range[km]','Solar Alt[deg]','Visible'))
for pass_start,pass_end in passes:
t = t_list(Time(pass_start),Time(pass_end),1)
sat_observer = (sat - observer).at(t)
sat_alt, sat_az, sat_distance = sat_observer.altaz()
sat_ra, sat_dec, sat_distance = sat_observer.radec()
sun_observer = (earth+observer).at(t).observe(sun).apparent()
sun_alt, sun_az, sun_distance = sun_observer.altaz()
sun_beneath = sun_alt.degrees < sun_alt_cutoff
sunlight = sat.at(t).is_sunlit(planets)
# Under the premise of satellite transit, visibility requires at least two conditions to be met: dark and outside the earth shadow.
visible = sun_beneath & sunlight
if visible.any():
visible_flag = True
t_visible = np.array(t.utc_iso())[visible]
t_visible_0 = (Time(t_visible[0])+timezone*u.hour).iso
t_visible_1 = (Time(t_visible[-1])+timezone*u.hour).iso
during = round((Time(t_visible[-1]) - Time(t_visible[0])).sec)
if during >= visible_duration:
outfile0.write('{:s},{:s},{:d},{:d}\n'.format(t_visible_0,t_visible_1,noradid,int(during)))
# Generate a one-day prediction file
if Time(pass_end) < t_start + 1:
for i in range(len(t)):
outfile.write('{:20s} {:>8.4f} {:>8.4f} {:>8.5f} {:>8.4f} {:>10.4f} {:>10.4f} {:>7d} \n'.format(t.utc_strftime('%Y-%m-%d %H:%M:%S')[i],sat_alt.degrees[i],sat_az.degrees[i],sat_ra.hours[i],sat_dec.degrees[i],sat_distance.km[i],sun_alt.degrees[i],visible[i]))
outfile.write('\n')
if not visible_flag:
outfile0.write('{:s},{:s},{:d}\n\n'.format('','',noradid))
else:
outfile0.write('\n')
outfile.close()
print(noradid)
outfile0.close()
print('Generate multiple-day visible passes for all targets.')
# Sort by the start time of the visible passes
dates,temp = [],[]
VisiblePasses = | pd.read_csv(dir_prediction+filename0,dtype=object) | pandas.read_csv |
from collections import OrderedDict
import george
from george import kernels
import lightgbm as lgb
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
import pickle
from astropy.cosmology import FlatLambdaCDM
from scipy.optimize import minimize
from sklearn.model_selection import StratifiedKFold
from scipy.signal import find_peaks
from scipy.special import erf
import tqdm
from settings import settings
# Parameters of the dataset
num_passbands = 6
pad = 100
start_mjd = 59580 - pad
end_mjd = 60675 + pad
# Define class labels, galactic vs extragalactic label and weights
classes = [6, 15, 16, 42, 52, 53, 62, 64, 65, 67, 88, 90, 92, 95, 99]
class_weights = {6: 1, 15: 2, 16: 1, 42: 1, 52: 1, 53: 1, 62: 1, 64: 2, 65: 1,
67: 1, 88: 1, 90: 1, 92: 1, 95: 1, 99: 2}
class_galactic = {6: True, 15: False, 16: True, 42: False, 52: False, 53: True,
62: False, 64: False, 65: True, 67: False, 88: False, 90:
False, 92: True, 95: False}
# Reverse engineered cosmology used in sims
cosmo = FlatLambdaCDM(H0=70, Om0=0.3, Tcmb0=2.725)
def find_time_to_fractions(fluxes, fractions, forward=True):
"""Find the time for a lightcurve to decline to a specific fraction of
maximum light.
fractions should be a decreasing list of the fractions of maximum light
that will be found (eg: [0.8, 0.5, 0.2]).
"""
max_time = np.argmax(fluxes)
max_flux = fluxes[max_time]
result = np.ones(len(fractions)) * 99999
frac_idx = 0
# Start at maximum light, and move along the spectrum. Whenever we cross
# one threshold, we add it to the list and keep going. If we hit the end of
# the array without crossing the threshold, we return a large number for
# that time.
offset = 0
while True:
offset += 1
if forward:
new_time = max_time + offset
if new_time >= fluxes.shape:
break
else:
new_time = max_time - offset
if new_time < 0:
break
test_flux = fluxes[new_time]
while test_flux < max_flux * fractions[frac_idx]:
result[frac_idx] = offset
frac_idx += 1
if frac_idx == len(fractions):
break
if frac_idx == len(fractions):
break
return result
def multi_weighted_logloss(y_true, y_preds):
"""
@author olivier https://www.kaggle.com/ogrellier
multi logloss for PLAsTiCC challenge
"""
if y_preds.shape[1] != len(classes):
# No prediction for 99, pretend that it doesn't exist.
use_classes = classes[:-1]
else:
use_classes = classes
y_p = y_preds
# Trasform y_true in dummies
y_ohe = pd.get_dummies(y_true)
# Normalize rows and limit y_preds to 1e-15, 1-1e-15
y_p = np.clip(a=y_p, a_min=1e-15, a_max=1 - 1e-15)
# Transform to log
y_p_log = np.log(y_p)
y_log_ones = np.sum(y_ohe.values * y_p_log, axis=0)
# Get the number of positives for each class
nb_pos = y_ohe.sum(axis=0).values.astype(float)
# Weight average and divide by the number of positives
class_arr = np.array([class_weights[i] for i in use_classes])
y_w = y_log_ones * class_arr / nb_pos
loss = - np.sum(y_w) / np.sum(class_arr)
return loss
def lgb_multi_weighted_logloss(y_true, y_preds):
"""Wrapper around multi_weighted_logloss that works with lgbm"""
y_p = y_preds.reshape(y_true.shape[0], len(classes) - 1, order='F')
loss = multi_weighted_logloss(y_true, y_p)
return 'wloss', loss, False
def do_predictions_flatprob(object_ids, features, classifiers):
pred = 0
for classifier in classifiers:
pred += (
classifier.predict_proba(
features, num_iteration=classifier.best_iteration_)
) / len(classifiers)
# Add in flat prediction for class 99. This prediction depends on whether
# the object is galactic or extragalactic.
gal_frac_99 = 0.04
# Weights without 99 included.
weight_gal = sum([class_weights[class_id] for class_id, is_gal in
class_galactic.items() if is_gal])
weight_extgal = sum([class_weights[class_id] for class_id, is_gal in
class_galactic.items() if not is_gal])
guess_99_gal = gal_frac_99 * class_weights[99] / weight_gal
guess_99_extgal = (1 - gal_frac_99) * class_weights[99] / weight_extgal
is_gals = features['hostgal_photoz'] == 0.
pred_99 = np.array([guess_99_gal if is_gal else guess_99_extgal for is_gal
in is_gals])
stack_pred = np.hstack([pred, pred_99[:, None]])
# Normalize
stack_pred = stack_pred / np.sum(stack_pred, axis=1)[:, None]
# Build a pandas dataframe with the result
df = pd.DataFrame(index=object_ids, data=stack_pred,
columns=['class_%d' % i for i in classes])
return df
def do_predictions(object_ids, features, classifiers, gal_outlier_score=0.25,
extgal_outlier_score=1.4):
print("OLD!!! DON'T USE!")
is_gal = features['hostgal_photoz'] == 0.
base_class_99_scores = np.zeros((len(features), 1))
base_class_99_scores[is_gal] = gal_outlier_score
base_class_99_scores[~is_gal] = extgal_outlier_score
pred = 0
for classifier in classifiers:
# Get base scores
raw_scores = classifier.predict_proba(
features, raw_score=True, num_iteration=classifier.best_iteration_
)
max_scores = np.max(raw_scores, axis=1)[:, None]
class_99_scores = np.clip(base_class_99_scores, None,
max_scores)
# Add in class 99 scores.
scores = np.hstack([raw_scores, class_99_scores])
# Turn the scores into a prediction
iter_pred = np.exp(scores) / np.sum(np.exp(scores), axis=1)[:, None]
pred += iter_pred / len(classifiers)
# Build a pandas dataframe with the result
df = pd.DataFrame(index=object_ids, data=pred,
columns=['class_%d' % i for i in classes])
return df
def do_scores(object_ids, features, classifiers):
scores = []
for classifier in classifiers:
scores.append(classifier.predict_proba(
features, raw_score=True,
num_iteration=classifier.best_iteration_))
scores = np.array(scores)
return scores
def convert_scores(meta, scores, gal_outlier_score=0.4,
extgal_outlier_score=1.4):
is_gal = meta['hostgal_photoz'] == 0.
base_class_99_scores = np.zeros((len(meta), 1))
base_class_99_scores[is_gal] = gal_outlier_score
base_class_99_scores[~is_gal] = extgal_outlier_score
pred = 0
for iter_scores in scores:
# Iterate over each classifier's scores if there were more than one.
# Get base scores
# max_scores = np.max(iter_scores, axis=1)[:, None]
max_scores = np.percentile(iter_scores, 100 * 12.5/13, axis=1)[:, None]
class_99_scores = np.clip(base_class_99_scores, None, max_scores)
# Add in class 99 scores.
iter_full_scores = np.hstack([iter_scores, class_99_scores])
# Turn the scores into a prediction
iter_pred = np.exp(iter_full_scores) / np.sum(np.exp(iter_full_scores),
axis=1)[:, None]
pred += iter_pred / len(scores)
print("Mean gal 99: %.5f" % np.mean(pred[is_gal, -1]))
print("Mean ext 99: %.5f" % np.mean(pred[~is_gal, -1]))
# Build a pandas dataframe with the result
df = pd.DataFrame(index=meta['object_id'], data=pred,
columns=['class_%d' % i for i in classes])
return df
def convert_scores_2(meta, scores, s2n, gal_outlier_score=-2.,
extgal_outlier_score=-0.8):
is_gal = meta['hostgal_photoz'] == 0.
base_class_99_scores = np.zeros((len(meta), 1))
base_class_99_scores[is_gal] = gal_outlier_score
base_class_99_scores[~is_gal] = extgal_outlier_score
base_class_99_scores[:, 0] += 1.5*np.log10(s2n)
pred = 0
for iter_scores in scores:
# Iterate over each classifier's scores if there were more than one.
# Get base scores
# max_scores = np.max(iter_scores, axis=1)[:, None]
max_scores = np.percentile(iter_scores, 100 * 12.5/13, axis=1)[:, None]
class_99_scores = np.clip(base_class_99_scores, None, max_scores)
# Add in class 99 scores.
iter_full_scores = np.hstack([iter_scores, class_99_scores])
# Turn the scores into a prediction
iter_pred = np.exp(iter_full_scores) / np.sum(np.exp(iter_full_scores),
axis=1)[:, None]
pred += iter_pred / len(scores)
print("Mean gal 99: %.5f" % np.mean(pred[is_gal, -1]))
print("Mean ext 99: %.5f" % np.mean(pred[~is_gal, -1]))
# Build a pandas dataframe with the result
df = pd.DataFrame(index=meta['object_id'], data=pred,
columns=['class_%d' % i for i in classes])
return df
def fit_classifier(train_x, train_y, train_weights, eval_x=None, eval_y=None,
eval_weights=None, **kwargs):
lgb_params = {
'boosting_type': 'gbdt',
'objective': 'multiclass',
'num_class': 14,
'metric': 'multi_logloss',
'learning_rate': 0.05,
# 'bagging_fraction': .75,
# 'bagging_freq': 5,
'colsample_bytree': .5,
'reg_alpha': 0.,
'reg_lambda': 0.,
'min_split_gain': 10.,
'min_child_weight': 2000.,
'n_estimators': 5000,
'silent': -1,
'verbose': -1,
'max_depth': 7,
'num_leaves': 50,
}
lgb_params.update(kwargs)
fit_params = {
'verbose': 100,
'sample_weight': train_weights,
}
if eval_x is not None:
fit_params['eval_set'] = [(eval_x, eval_y)]
fit_params['eval_metric'] = lgb_multi_weighted_logloss
fit_params['early_stopping_rounds'] = 50
fit_params['eval_sample_weight'] = [eval_weights]
classifier = lgb.LGBMClassifier(**lgb_params)
classifier.fit(train_x, train_y, **fit_params)
return classifier
class Dataset(object):
def __init__(self):
"""Class to represent part of the PLAsTiCC dataset.
This class can load either the training or validation data, can produce
features and then can create outputs. The features can also be loaded
from a file to avoid having to recalculate them every time. Not
everything has to be loaded at once, but some functions might not work
if that is the case. I haven't put in the effort to make everything
safe with regards to random calls, so if something breaks you probably
just need to load the data that it needs.
"""
self.flux_data = None
self.meta_data = None
self.features = None
self.dataset_name = None
# Update this whenever the feature calculation code is updated.
self._features_version = settings['FEATURES_VERSION']
# Update this whenever the augmentation code is updated.
self._augment_version = settings['AUGMENT_VERSION']
def load_training_data(self):
"""Load the training dataset."""
self.flux_data = pd.read_csv(settings['RAW_TRAINING_PATH'])
self.meta_data = pd.read_csv(settings["RAW_TRAINING_METADATA_PATH"])
# Label folds
y = self.meta_data['target']
folds = StratifiedKFold(n_splits=5, shuffle=True, random_state=1)
kfold_indices = -1*np.ones(len(y))
for idx, (fold_train, fold_val) in enumerate(folds.split(y, y)):
kfold_indices[fold_val] = idx
self.meta_data['fold'] = kfold_indices
self.dataset_name = 'train'
def load_test_data(self):
"""Load the test metadata."""
self.meta_data = | pd.read_csv(settings["RAW_TEST_METADATA_PATH"]) | pandas.read_csv |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon May 3 11:10:59 2021
@author: zmg
"""
import copy
import logging
from collections import namedtuple
from dataclasses import dataclass, field
from operator import itemgetter
from pathlib import Path
from typing import Dict, List
import numpy as np
import pandas as pd
# from .. import __package_name__
from raman_fitting.indexing.filedata_parser import SpectrumReader
from raman_fitting.processing.cleaner import (
BaselineSubtractorNormalizer,
Despiker,
SpectrumMethods,
)
from raman_fitting.processing.spectrum_template import SpecTemplate, SpectrumWindows
# from .parser import Parser
logger = logging.getLogger(__name__)
@dataclass(order=True, frozen=False)
class SpectrumDataLoader:
"""
Raman Spectrum Loader Dataclass, reads in the file and constructs a clean spectrum from the data.
A sequence of steps is performed on the raw data from SpectrumReader.
The steps/methods are: smoothening filter, despiking and baseline correction.
"""
# TODO Fix this class, simplify
_fields = ("ramanshift", "intensity")
# _spectrum_grp_cols = ['PAR_file','Segment #',EvRHE, 'RPM_DAC']
file: Path = field(default=Path(Path.cwd().joinpath("empty.txt")))
# sample_position: int = 0
# ramanshift: np.array = field(default=np.array([]), init=False)
# intensity: np.array = field(default=np.array([]), init=False)
spectrum_length: int = field(default=0, init=False)
info: Dict = field(default_factory=dict, repr=False)
ovv: type(pd.DataFrame) = field(default=pd.DataFrame(), repr=False)
run_kwargs: Dict = field(default_factory=dict, repr=False)
def __post_init__(self):
self._qcnm = self.__class__.__qualname__
self.register = {} # this stores the data of each method as they are performed
self.filtered_intensity = None
self._despike = None
self._baseline_corrected = None
self.clean_data = None
self.clean_df = {} # dict of DataFrames
self.register_df = pd.DataFrame()
self._read_succes = False
self.load_data_delegator()
def register_spectrum(self, ramanshift, intensity, label):
_spec = SpecTemplate(ramanshift, copy.deepcopy(intensity), label)
self.register.update({label: _spec})
def __getattr__(self, attr):
"""checks if attr is in instance dicts before raising error"""
if attr in self.run_kwargs.keys():
# FIXME CARE getting attributes from kwargs
return self.run_kwargs.get(attr, None)
elif attr in self.info.keys():
return self.info.get(attr, None)
else:
raise AttributeError(
f'Attribute "{attr}" is not in {self.run_kwargs.keys()} or {self.info.keys()}.'
)
def load_data_delegator(self):
"""calls the SpectrumReader class"""
if self.info:
FP_from_info = self.info.get("FilePath", None)
if FP_from_info:
if not Path(FP_from_info) == self.file:
raise ValueError(
f"Mismatch in value for FilePath:\{self.file} != {FP_from_info}"
)
else:
self.info = {"FilePath": self.file}
raw_spectrum = SpectrumReader(self.file)
# print("======== ", raw_spectrum)
self.register_spectrum(raw_spectrum.ramanshift, raw_spectrum.intensity, "raw")
if raw_spectrum.spectrum_length > 0:
self.spectrum_length = raw_spectrum.spectrum_length
self.spectrum_methods_delegator()
else:
logger.warning(f"{self._qcnm} load data fail for:\n\t {self.file}")
self.info = {**self.info, **self.run_kwargs}
def spectrum_methods_delegator(self):
self.filter_data(on_lbl="raw", out_lbl="filtered")
self.despike(on_lbl="filtered", out_lbl="despiked")
self.baseline_correction(on_lbl="despiked", out_lbl="clean_data")
self.set_clean_data_df()
self.set_df_from_register()
def filter_data(self, on_lbl="raw", out_lbl="filtered"):
_r, _int, _lbl = self.register.get(on_lbl)
logger.debug(f"{self.file} try to filter len int({len(_int)}),({type(_int)})")
filtered_intensity = SpectrumMethods.filtered_int(intensity=_int)
self.filtered_intensity = filtered_intensity
self.register_spectrum(_r, filtered_intensity, out_lbl)
def despike(self, on_lbl="filtered", out_lbl="despiked"):
_r, _int, _lbl = self.register.get(on_lbl)
_despike = Despiker(_int) # TODO check for nan in array
self._despike = _despike
self.register_spectrum(_r, _despike.despiked_intensity, out_lbl)
def baseline_correction(self, on_lbl="despiked", out_lbl="clean_data"):
_r, _int, _lbl = self.register.get(on_lbl)
_baseline_corrected = BaselineSubtractorNormalizer(_r, _int, label="despiked")
self._baseline_corrected = _baseline_corrected
_fullspec = _baseline_corrected.norm_data["full"]
self.register_spectrum(_fullspec.ramanshift, _fullspec.intensity, out_lbl)
self.clean_data = _baseline_corrected.norm_data
def set_clean_data_df(self):
self.clean_df = {
k: pd.DataFrame(
{"ramanshift": val.ramanshift, f"int_{self.SamplePos}": val.intensity}
)
for k, val in self.clean_data.items()
}
def set_df_from_register(self):
_regdf = pd.DataFrame()
for k, val in self.register.items():
_spec = pd.DataFrame(
{
"ramanshift": val.ramanshift,
f"{k}_int_{self.SampleID}_{self.SamplePos}": val.intensity,
}
)
if _regdf.empty:
_regdf = _spec
else:
_regdf = pd.merge_asof(_regdf, _spec, on="ramanshift")
self.register_df = _regdf
logger.debug(
f"{self._qcnm} set_df_from_register len int({len(_regdf)}),({type(_regdf)})"
)
def plot_raw(self):
_raw_lbls = [
i
for i in self.register_df.columns
if not any(a in i for a in ["ramanshift", "clean_data"])
]
self.register_df.plot(x="ramanshift", y=_raw_lbls)
def split_data(self, on_lbl="filtered"):
_r, _int, _lbl = self.register.get(on_lbl) # unpacking data from register
for windowname, limits in SpectrumWindows().items():
ind = (_r >= np.min(limits)) & (_r <= np.max(limits))
_intslice = _int[ind]
label = f"{_lbl}_window_{windowname}"
self.register_spectrum(_r, _intslice, label)
class SpectrumDataCollection:
"""
This class takes in a collection of SpectrumDataLoader instances.
It checks each member of the list and this enables the option
to take the mean of several spectra from the same SampleID.
"""
MeanSpecTemplate = namedtuple(
"MeanSpectras", "windowname sID_rawcols sIDmean_col mean_info mean_spec"
)
def __init__(self, spectra: List = [SpectrumDataLoader]):
self._qcnm = self.__class__.__qualname__
self._spectra = spectra
Validators.check_members(
self._spectra
) # only raises warning when errors are found
self.spectra = Validators.check_spectra_lengths(self._spectra)
self.info = self.get_mean_spectra_info(self.spectra)
self.info_df = pd.DataFrame(self.info, index=[0])
self.prep_clean_data = self.get_mean_spectra_prep_data(self.spectra)
self.calc_mean()
@staticmethod
def get_mean_spectra_info(spectra: List[SpectrumDataLoader]) -> Dict:
"""retrieves the info dict from spec instances and merges dict in keys that have 1 common value"""
try:
_all_spec_info = [spec.info for spec in spectra if hasattr(spec, "info")]
_all_spec_info_merged = {
k: val for i in _all_spec_info for k, val in i.items()
}
_all_spec_info_sets = [
(k, set([i.get(k, None) for i in _all_spec_info]))
for k in _all_spec_info_merged
]
mean_spec_info = {
k: list(val)[0] for k, val in _all_spec_info_sets if len(val) == 1
}
except Exception as exc:
logger.warning(f"get_mean_spectra_info failed for spectra {spectra}")
mean_spec_info = {}
mean_spec_info.update({"mean_spectrum": True})
return mean_spec_info
@staticmethod
def get_mean_spectra_prep_data(spectra: List[SpectrumDataLoader]) -> Dict:
"""retrieves the clean data from spec instances and makes lists of tuples"""
# and merges dict in keys that have 1 common value'''
try:
_all_spec = [
spec
for spec in spectra
if hasattr(spec, "clean_data") and hasattr(spec, "SamplePos")
]
_all_spec_clean_data_keys = {
k for i in _all_spec for k in i.clean_data.keys()
}
clean_prep_data = {
k: [(i.SamplePos, i.clean_data.get(k, None)) for i in _all_spec]
for k in _all_spec_clean_data_keys
}
except Exception as exc:
logger.warning(f"get_mean_spectra_prep_data failed for spectra {spectra}")
clean_prep_data = {}
return clean_prep_data
def calc_mean(self):
"""Core function of the merging of spectra of different sample positions"""
_merged_window_specs = {}
_speclst = []
_posmean_lbl_base = f'int_{self.info.get("SampleID")}_mean'
for wndwnm, data in self.prep_clean_data.items():
_merge_df = pd.DataFrame()
_pos_lbl_lst = []
for _pos, _sp in data:
_pos_lbl = f"int_{_pos}"
_dfspec = pd.DataFrame(
{"ramanshift": _sp.ramanshift, _pos_lbl: _sp.intensity}
)
if _merge_df.empty:
_merge_df = _dfspec
else:
_merge_df = | pd.merge_asof(_merge_df, _dfspec, on="ramanshift") | pandas.merge_asof |
#!/usr/bin/python3
# # Compute the distance between traces of capturered on the same channel. Large difference signs probable changes on the channel usage, e.g channel sharing by different emitters
# Import standard libraries
import pandas as pd
import numpy as np
import math
import h5py
from matrixprofile import *
from matrixprofile.discords import discords
import matplotlib.pyplot as plt
# Import specific libraries used by the cortex system
import h5_spectrum as H5
import cortex_names as cn
import cortex_lib as cl
# TODO: This could be improved by testing the similarity before merging and so allow for separation of co channel emissions
def _main():
# open file and get a list of the channel spectrogram groups. Uses H5py for better efficiency
data_store_file = h5py.File(cn.FOLDER_TO_STORE_FILES+'/'+cn.DATA_FILENAME)
channel_spectrogram_list = list(data_store_file[H5.CHANNEL_DATA_GROUP].keys())
data_store_file.close()
# create array with bin edges to be used on the histogram
profile_histogram_bins = np.arange(1, 8, 0.05)
numpy_histogram_bins = np.r_[-np.inf, profile_histogram_bins, np.inf]
# create empty dataframe to store results
channel_distances = pd.DataFrame()
for spectrogram_group_name in channel_spectrogram_list:
# reopen the file with pandas HDF
data_store_file = pd.HDFStore(cn.FOLDER_TO_STORE_FILES+'/'+cn.DATA_FILENAME)
# Test if dataset is of the spectrogram type
if H5.EM_SPECTRUM_DATA_GROUP in spectrogram_group_name:
# get the channel ID
channel_id = spectrogram_group_name.split(H5.EM_SPECTRUM_DATA_GROUP)[1]
# get the dataframe
channel_traces = data_store_file[H5.CHANNEL_DATA_GROUP+'/'+spectrogram_group_name]
frequency_at_peak = channel_traces.idxmax(axis=1).mean()
number_of_time_samples = channel_traces.shape[0]
if number_of_time_samples > cn.MINIMUM_NUMBER_SAMPLES_FOR_INNER_ANALYSIS:
# reduce the number of traces to make computation viable
if number_of_time_samples*channel_traces.shape[1] > cn.MAXIMUM_NUMBER_DATAPOINTS_FOR_INNER_ANALYSIS:
number_of_time_samples = int(round(cn.MAXIMUM_NUMBER_DATAPOINTS_FOR_INNER_ANALYSIS/channel_traces.shape[1],0))
channel_traces = channel_traces.iloc[0:number_of_time_samples, :]
figure_name = "Spectrogram channel {}".format(channel_id)
plt.figure(figure_name)
plt.subplot(121)
plt.title("Spectrogram for channel {}".format(channel_id))
xy_array = channel_traces.to_numpy(dtype='float32')
frequency_axis = channel_traces.columns.to_numpy(dtype='float64')
x_axis = (frequency_axis-frequency_at_peak)/1000.0
y_axis = channel_traces.index.to_numpy(dtype='float64')
y_axis = y_axis-y_axis.min()
plt.pcolormesh(x_axis, np.arange(0, xy_array.shape[0]), xy_array, cmap='CMRmap_r')
plt.xlabel("Frequency[kHz]")
plt.ylabel("Index")
# flatten the dataframe to a series
flatten_trace = channel_traces.to_numpy(dtype='float32').flatten()
trace_bin_width = channel_traces.shape[1]
profile, profile_index = matrixProfile.stomp(flatten_trace, trace_bin_width)
plt.subplot(122)
plt.title("Matrix Profile".format(channel_id))
plt.ylim((0, profile.size))
plt.yticks(ticks=np.arange(0, profile.size, profile.size/number_of_time_samples), labels='')
plt.grid(which='major', axis='y')
plt.plot(profile, np.arange(0, profile.size))
combined_dataframe = pd.Series(flatten_trace).to_frame()
combined_dataframe['profile'] = np.append(profile, np.zeros(trace_bin_width-1)+np.nan)
# no exclusion zone
ex_zone = trace_bin_width
# TODO: instead of using fixed number, should use a measure above noise
number_of_modes = math.ceil(number_of_time_samples*cn.PEAK_SAMPLE_RATIO)
# get a maximum of one anomaly for each trace
profile_peak = discords(combined_dataframe['profile'], ex_zone, k=number_of_modes+1)
# get the peaks into a dataframe with corresponding matrix profile values
profile_peak_df = combined_dataframe.loc[profile_peak, 'profile']
# select peaks that have large profile values considering defined thershold
# profile_peak_df=profile_peak_df[profile_peak_df.loc[:]>cn.MPROFILE_NOISE_THRESHOLD]
profile_peak_df = profile_peak_df.reset_index()
# compute the corresponding trace index based on the flatten index
profile_peak_df['trace_index'] = round(profile_peak_df['index']/trace_bin_width, 0)
order = 1
for one_peak_index in profile_peak_df['trace_index']:
plt.subplot(121)
plot_name = "{}".format(order)
x_pos = x_axis.max()
y_pos = int(one_peak_index)
arrow_end_pos = x_pos+((x_pos-x_axis.min())/25)
plt.annotate(plot_name,
xy=(x_pos, y_pos), xycoords="data",
xytext=(arrow_end_pos, y_pos), textcoords='data',
arrowprops=dict(arrowstyle="->", connectionstyle="arc3"))
order += 1
figure_file_name = "./Images/"+figure_name+".png"
plt.savefig(figure_file_name)
plt.close(figure_name)
# TODO: Use profile_peak_df to split spectrogram into two subchannels. Compute new Profile Density
# store the distance information as reference to the channel
channel_distance_descriptor = | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
@author: <EMAIL>
@site: e-smartdata.org
"""
import numpy as np
import pandas as pd
df1 = pd.DataFrame(np.random.rand(10, 4), columns=list('abcd'))
df2 = pd.DataFrame(np.random.rand(10, 4), columns=list('abcd'))
df3 = pd.DataFrame(np.random.rand(10, 4), columns=list('abcd'))
s = pd.Series(np.random.rand(10), name='x')
# %% concat
df = | pd.concat([df1, df2, df3], ignore_index=True) | pandas.concat |
"""
.. _sitemaps:
Download, Parse, and Analyze XML Sitemaps
=========================================
One of the fastest and easiest ways to get insights on a website's content is
to simply download its XML sitemap(s).
Sitemaps are also important SEO tools as they reveal a lot of information about
the website, and help search engines in indexing those pages. You might want to
run an SEO audit and check if the URLs in the sitemap properly correspond to
the actual URLs of the site, so this would be an easy way to get them.
Sitemaps basically contain a log of publishing activity, and if they have rich
URLs then you can do some good analysis on their content over time as well.
The :func:`sitemap_to_df` function is very simple to use, and only requires the
URL of a sitemap, a sitemap index, or even a robots.txt file. It goes through
the sitemap(s) and returns a DataFrame containing all the tags and their
information.
* `loc`: The location of the URLs of hte sitemaps.
* `lastmod`: The datetime of the date when each URL was last modified, if
available.
* `sitemap`: The URL of the sitemap from which the URL on this row was
retreived.
* `etag`: The entity tag of the response header, if provided.
* `sitemap_last_modified`: The datetime when the sitemap file was last
modified, if provided.
* `sitemap_size_mb`: The size of the sitemap in mega bytes
(1MB = 1,024 x 1,024 bytes)
* `download_date`: The datetime when the sitemap was downloaded.
Sitemap Index
-------------
Large websites typically have a sitmeapindex file, which contains links to all
other regular sitemaps that belong to the site. The :func:`sitemap_to_df`
function retreives all sub-sitemaps recursively by default.
In some cases, especially with very large sites, it might be better to first
get the sitemap index, explore its structure, and then decide which sitemaps
you want to get, or if you want them all. Even with smaller websites, it still
might be interesting to get the index only and see how it is structured.
This behavior can be modified by the ``recursive`` parameter, which is set to
`True` by default. Set it to `False` if you want only the index file.
Another interesting thing you might want to do is to provide a robots.txt URL,
and set `recursive=False` to get all available sitemap index files.
>>> sitemap_to_df('https://example.com/robots.txt', recursive=False)
Let's now go through a quick example of what can be done with sitemaps. We can
start by getting one of the BBC's sitemaps.
Regular XML Sitemaps
--------------------
>>> bbc_sitemap = sitemap_to_df('https://www.bbc.com/sitemaps/https-sitemap-com-archive-1.xml')
>>> bbc_sitemap.head(10)
loc lastmod sitemap etag sitemap_last_modified sitemap_size_mb download_date
0 https://www.bbc.com/arabic/middleeast/2009/06/090620_as_iraq_explosion_tc2 2009-06-20 14:10:48+00:00 https://www.bbc.com/sitemaps/https-sitemap-com-archive-1.xml 5f78c818962d9c3656960a852a1fd9a5 2020-05-27 14:38:31+00:00 7.6312408447265625 2021-01-16 20:16:34.403337+00:00
1 https://www.bbc.com/arabic/middleeast/2009/06/090620_iraq_blast_tc2 2009-06-20 21:07:43+00:00 https://www.bbc.com/sitemaps/https-sitemap-com-archive-1.xml 5f78c818962d9c3656960a852a1fd9a5 2020-05-27 14:38:31+00:00 7.6312408447265625 2021-01-16 20:16:34.403337+00:00
2 https://www.bbc.com/arabic/business/2009/06/090622_me_worldbank_tc2 2009-06-22 12:41:48+00:00 https://www.bbc.com/sitemaps/https-sitemap-com-archive-1.xml 5f78c818962d9c3656960a852a1fd9a5 2020-05-27 14:38:31+00:00 7.6312408447265625 2021-01-16 20:16:34.403337+00:00
3 https://www.bbc.com/arabic/multimedia/2009/06/090624_me_inpictures_brazil_tc2 2009-06-24 15:27:24+00:00 https://www.bbc.com/sitemaps/https-sitemap-com-archive-1.xml 5f78c818962d9c3656960a852a1fd9a5 2020-05-27 14:38:31+00:00 7.6312408447265625 2021-01-16 20:16:34.403337+00:00
4 https://www.bbc.com/arabic/business/2009/06/090618_tomtest 2009-06-18 15:32:54+00:00 https://www.bbc.com/sitemaps/https-sitemap-com-archive-1.xml 5f78c818962d9c3656960a852a1fd9a5 2020-05-27 14:38:31+00:00 7.6312408447265625 2021-01-16 20:16:34.403337+00:00
5 https://www.bbc.com/arabic/multimedia/2009/06/090625_sf_tamim_verdict_tc2 2009-06-25 09:46:39+00:00 https://www.bbc.com/sitemaps/https-sitemap-com-archive-1.xml 5f78c818962d9c3656960a852a1fd9a5 2020-05-27 14:38:31+00:00 7.6312408447265625 2021-01-16 20:16:34.403337+00:00
6 https://www.bbc.com/arabic/middleeast/2009/06/090623_iz_cairo_russia_tc2 2009-06-23 13:10:56+00:00 https://www.bbc.com/sitemaps/https-sitemap-com-archive-1.xml 5f78c818962d9c3656960a852a1fd9a5 2020-05-27 14:38:31+00:00 7.6312408447265625 2021-01-16 20:16:34.403337+00:00
7 https://www.bbc.com/arabic/sports/2009/06/090622_me_egypt_us_tc2 2009-06-22 15:37:07+00:00 https://www.bbc.com/sitemaps/https-sitemap-com-archive-1.xml 5f78c818962d9c3656960a852a1fd9a5 2020-05-27 14:38:31+00:00 7.6312408447265625 2021-01-16 20:16:34.403337+00:00
8 https://www.bbc.com/arabic/sports/2009/06/090624_mz_wimbledon_tc2 2009-06-24 13:57:18+00:00 https://www.bbc.com/sitemaps/https-sitemap-com-archive-1.xml 5f78c818962d9c3656960a852a1fd9a5 2020-05-27 14:38:31+00:00 7.6312408447265625 2021-01-16 20:16:34.403337+00:00
9 https://www.bbc.com/arabic/worldnews/2009/06/090623_mz_leaders_lifespan_tc2 2009-06-23 13:24:23+00:00 https://www.bbc.com/sitemaps/https-sitemap-com-archive-1.xml 5f78c818962d9c3656960a852a1fd9a5 2020-05-27 14:38:31+00:00 7.6312408447265625 2021-01-16 20:16:34.403337+00:00
>>> bbc_sitemap.shape
(49999, 7)
>>> bbc_sitemap.dtypes
loc object
lastmod datetime64[ns, UTC]
sitemap object
etag object
sitemap_last_modified datetime64[ns, UTC]
sitemap_size_mb float64
download_date datetime64[ns, UTC]
dtype: object
Since ``lastmod`` is a ``datetime`` object, we can easily use it for various
time-related operations.
Here we look at how many articles have been published (last modified) per year.
>>> bbc_sitemap.set_index('lastmod').resample('A')['loc'].count()
lastmod
2008-12-31 00:00:00+00:00 2261
2009-12-31 00:00:00+00:00 47225
2010-12-31 00:00:00+00:00 0
2011-12-31 00:00:00+00:00 0
2012-12-31 00:00:00+00:00 0
2013-12-31 00:00:00+00:00 0
2014-12-31 00:00:00+00:00 0
2015-12-31 00:00:00+00:00 0
2016-12-31 00:00:00+00:00 0
2017-12-31 00:00:00+00:00 0
2018-12-31 00:00:00+00:00 0
2019-12-31 00:00:00+00:00 481
2020-12-31 00:00:00+00:00 32
Freq: A-DEC, Name: loc, dtype: int64
As the majority are in 2009 with a few in other years, it seems these were
later updated, but we would have to check to verify (in this special case BBC's
URLs contain date information, which can be compared to ``lastmod`` to check if
there is a difference between them).
We can take a look at a sample of the URLs to get the URL template that they
use.
>>> bbc_sitemap['loc'].sample(10).tolist()
['https://www.bbc.com/russian/rolling_news/2009/06/090628_rn_pakistani_soldiries_ambush',
'https://www.bbc.com/urdu/pakistan/2009/04/090421_mqm_speaks_rza',
'https://www.bbc.com/arabic/middleeast/2009/07/090723_ae_silwan_tc2',
'https://www.bbc.com/portuguese/noticias/2009/07/090729_iraquerefenbritsfn',
'https://www.bbc.com/portuguese/noticias/2009/06/090623_egitomilitaresfn',
'https://www.bbc.com/portuguese/noticias/2009/03/090302_gazaconferenciaml',
'https://www.bbc.com/portuguese/noticias/2009/07/090715_hillary_iran_cq',
'https://www.bbc.com/vietnamese/culture/2009/04/090409_machienhuu_revisiting',
'https://www.bbc.com/portuguese/noticias/2009/05/090524_paquistaoupdateg',
'https://www.bbc.com/arabic/worldnews/2009/06/090629_om_pakistan_report_tc2']
It seems the pattern is
**https://www.bbc.com/{language}/{topic}/{YYYY}/{MM}/{YYMMDD_article_title}**
This is quite a rich structure, full of useful information. We can easily count
how many articles they have by language, by splitting by "/" and getting the
elements at index three, and counting them.
>>> bbc_sitemap['loc'].str.split('/').str[3].value_counts()
russian 14022
persian 10968
portuguese 5403
urdu 5068
mundo 5065
vietnamese 3561
arabic 2984
hindi 1677
turkce 706
ukchina 545
Name: loc, dtype: int64
We can also get a subset of articles written in a certain language, and see how
many articles they publish per month, week, year, etc.
>>> (bbc_sitemap[bbc_sitemap['loc']
... .str.contains('/russian/')]
... .set_index('lastmod')
... .resample('M')['loc'].count())
lastmod
2009-04-30 00:00:00+00:00 1506
2009-05-31 00:00:00+00:00 2910
2009-06-30 00:00:00+00:00 3021
2009-07-31 00:00:00+00:00 3250
2009-08-31 00:00:00+00:00 2769
...
2019-09-30 00:00:00+00:00 8
2019-10-31 00:00:00+00:00 17
2019-11-30 00:00:00+00:00 11
2019-12-31 00:00:00+00:00 24
2020-01-31 00:00:00+00:00 6
Freq: M, Name: loc, Length: 130, dtype: int64
The fifth element after splitting URLs is the topic or category of the article.
We can do the same and count the values.
>>> bbc_sitemap['loc'].str.split('/').str[4].value_counts()[:30]
rolling_news 9044
world 5050
noticias 4224
iran 3682
pakistan 2103
afghanistan 1959
multimedia 1657
internacional 1555
sport 1350
international 1293
india 1285
america_latina 1274
business 1204
cultura_sociedad 913
middleeast 874
worldnews 872
russia 841
radio 769
science 755
football 674
arts 664
ciencia_tecnologia 627
entertainment 621
simp 545
vietnam 539
economia 484
haberler 424
interactivity 411
help 354
ciencia 308
Name: loc, dtype: int64
Finally, we can take the last element after splitting, which contains the slugs
of the articles, replace underscores with spaces, split, concatenate all, put
in a ``pd.Series`` and count the values. This way we see how many times each
word occurred in an article.
>>> (pd.Series(
... bbc_sitemap['loc']
... .str.split('/')
... .str[-1]
... .str.replace('_', ' ')
... .str.cat(sep=' ')
... .split()
... )
... .value_counts()[:15])
rn 8808
tc2 3153
iran 1534
video 973
obama 882
us 862
china 815
ir88 727
russia 683
si 640
np 638
afghan 632
ka 565
an 556
iraq 554
dtype: int64
This was a quick overview and data preparation for a sample sitemap. Once you
are familiar with the sitemap's structure, you can more easily start analyzing
the content.
.. note::
There is a bug currently with tags that contain multiple values in
sitemaps. If an image column in a news sitemap contains multiple images,
only the last one is retreived. The same applies for any other sitemap that
has a tag with multiple values.
News Sitemaps
-------------
>>> nyt_news = sitemap_to_df('https://www.nytimes.com/sitemaps/new/news.xml.gz')
>>> nyt_news
loc lastmod publication_name publication_language news_publication_date news_title news_keywords image image_loc sitemap etag sitemap_last_modified sitemap_size_mb download_date
0 https://www.nytimes.com/live/2021/01/16/us/inauguration-day-biden 2021-01-16 20:22:56+00:00 The New York Times en 2021-01-16T13:58:07Z Biden Inauguration, Trump Approval Rating and Protests: Live Weekend Updates nan nan nan https://www.nytimes.com/sitemaps/new/news.xml.gz 5bfc0575bbabef04ced9f8e33e05fdcd 2021-01-16 20:23:13+00:00 0.6700353622436523 2021-01-16 20:23:59.469793+00:00
1 https://www.nytimes.com/live/2021/01/16/science/nasa-space-launch-rocket-fire-test 2021-01-16 20:18:17+00:00 The New York Times en 2021-01-16T20:15:56Z Live: NASA’s Space Launch System Hot-Fire Test nan nan nan https://www.nytimes.com/sitemaps/new/news.xml.gz 5bfc0575bbabef04ced9f8e33e05fdcd 2021-01-16 20:23:13+00:00 0.6700353622436523 2021-01-16 20:23:59.469793+00:00
2 https://www.nytimes.com/interactive/2020/obituaries/people-died-coronavirus-obituaries.html 2021-01-16 20:17:36+00:00 The New York Times en-US 2020-04-16T22:28:14Z Those We’ve Lost Deaths (Obituaries), Coronavirus (2019-nCoV) https://static01.nyt.com/images/2020/12/01/obituaries/25Houser/merlin_180391827_78fe8f74-0a8e-43c9-bc96-51859d84c2a5-articleLarge.jpg https://www.nytimes.com/sitemaps/new/news.xml.gz 5bfc0575bbabef04ced9f8e33e05fdcd 2021-01-16 20:23:13+00:00 0.6700353622436523 2021-01-16 20:23:59.469793+00:00
3 https://www.nytimes.com/2021/01/16/opinion/coronavirus-biden-vaccine-covid.html 2021-01-16 20:13:19+00:00 The New York Times en-US 2021-01-16T19:30:07Z Joe Biden Actually Has a Covid Plan Coronavirus (2019-nCoV), Contact Tracing (Public Health), Vaccination and Immunization, United States Politics and Government, Biden, Joseph R Jr, United States https://static01.nyt.com/images/2021/01/17/opinion/16pandemic1-print/merlin_173210889_b98256be-c87b-4a48-b3ab-14c0c064e33f-articleLarge.jpg https://www.nytimes.com/sitemaps/new/news.xml.gz 5bfc0575bbabef04ced9f8e33e05fdcd 2021-01-16 20:23:13+00:00 0.6700353622436523 2021-01-16 20:23:59.469793+00:00
4 https://www.nytimes.com/2021/01/13/opinion/capitol-attack-war.html 2021-01-16 20:06:43+00:00 The New York Times en 2021-01-13T10:06:54Z Why the Capitol Riot Reminded Me of War Storming of the US Capitol (Jan, 2021), Video Recordings, Downloads and Streaming, Iraq War (2003-11) https://static01.nyt.com/images/2021/01/18/opinion/sunday/18Ackermann/13Ackermann-articleLarge.jpg https://www.nytimes.com/sitemaps/new/news.xml.gz 5bfc0575bbabef04ced9f8e33e05fdcd 2021-01-16 20:23:13+00:00 0.6700353622436523 2021-01-16 20:23:59.469793+00:00
5 https://www.nytimes.com/interactive/2020/us/wyoming-coronavirus-cases.html 2021-01-16 20:01:26+00:00 The New York Times en-US 2020-04-01T15:47:57Z Wyoming Coronavirus Map and Case Count Coronavirus (2019-nCoV), Wyoming, Disease Rates https://static01.nyt.com/images/2020/03/29/us/wyoming-coronavirus-cases-promo-1585539595289/wyoming-coronavirus-cases-promo-1585539595289-articleLarge-v117.png https://www.nytimes.com/sitemaps/new/news.xml.gz 5bfc0575bbabef04ced9f8e33e05fdcd 2021-01-16 20:23:13+00:00 0.6700353622436523 2021-01-16 20:23:59.469793+00:00
6 https://www.nytimes.com/interactive/2020/world/coronavirus-maps.html 2021-01-16 20:01:21+00:00 The New York Times en-US 2020-01-28T22:57:20Z Coronavirus World Map: Tracking the Global Outbreak Coronavirus (2019-nCoV), Epidemics, Centers for Disease Control and Prevention, Johns Hopkins University, Wuhan (China), China, United States, Australia, Singapore, Disease Rates, Deaths (Fatalities) https://static01.nyt.com/images/2020/09/29/us/china-wuhan-coronavirus-maps-promo-1601396059552/china-wuhan-coronavirus-maps-promo-1601396059552-articleLarge-v354.png https://www.nytimes.com/sitemaps/new/news.xml.gz 5bfc0575bbabef04ced9f8e33e05fdcd 2021-01-16 20:23:13+00:00 0.6700353622436523 2021-01-16 20:23:59.469793+00:00
7 https://www.nytimes.com/interactive/2020/us/wisconsin-coronavirus-cases.html 2021-01-16 20:01:16+00:00 The New York Times en-US 2020-04-01T15:47:54Z Wisconsin Coronavirus Map and Case Count Coronavirus (2019-nCoV), Wisconsin, Disease Rates https://static01.nyt.com/images/2020/03/29/us/wisconsin-coronavirus-cases-promo-1585539580772/wisconsin-coronavirus-cases-promo-1585539580772-articleLarge-v118.png https://www.nytimes.com/sitemaps/new/news.xml.gz 5bfc0575bbabef04ced9f8e33e05fdcd 2021-01-16 20:23:13+00:00 0.6700353622436523 2021-01-16 20:23:59.469793+00:00
8 https://www.nytimes.com/interactive/2020/us/west-virginia-coronavirus-cases.html 2021-01-16 20:01:12+00:00 The New York Times en-US 2020-04-01T15:47:51Z West Virginia Coronavirus Map and Case Count Coronavirus (2019-nCoV), West Virginia, Disease Rates https://static01.nyt.com/images/2020/03/29/us/west-virginia-coronavirus-cases-promo-1585539566313/west-virginia-coronavirus-cases-promo-1585539566313-articleLarge-v118.png https://www.nytimes.com/sitemaps/new/news.xml.gz 5bfc0575bbabef04ced9f8e33e05fdcd 2021-01-16 20:23:13+00:00 0.6700353622436523 2021-01-16 20:23:59.469793+00:00
9 https://www.nytimes.com/interactive/2020/us/washington-coronavirus-cases.html 2021-01-16 20:01:07+00:00 The New York Times en-US 2020-04-01T15:47:47Z Washington Coronavirus Map and Case Count Coronavirus (2019-nCoV), Washington (State), Disease Rates https://static01.nyt.com/images/2020/03/29/us/washington-coronavirus-cases-promo-1585539550650/washington-coronavirus-cases-promo-1585539550650-articleLarge-v116.png https://www.nytimes.com/sitemaps/new/news.xml.gz 5bfc0575bbabef04ced9f8e33e05fdcd 2021-01-16 20:23:13+00:00 0.6700353622436523 2021-01-16 20:23:59.469793+00:00
[741 rows x 13 columns]
Video Sitemaps
--------------
>>> wired_video = sitemap_to_df('https://www.wired.com/video/sitemap.xml')
>>> wired_video
loc video_thumbnail_loc video_title video_description video_content_loc video_duration video_publication_date sitemap etag sitemap_size_mb download_date
0 https://www.wired.com/video/watch/behind-the-scenes-with-jj-abrams http://dwgyu36up6iuz.cloudfront.net/heru80fdn/image/upload/c_fill,d_placeholder_thescene.jpg,fl_progressive,g_face,h_180,q_80,w_320/v1389040164/wired_behind-the-scenes-with-jj-abrams.jpg Behind the Scenes with <NAME> Wired magazine teams up with <NAME> for the May issue. Look in on the creative process with J.J. and the edit and design teams. http://dp8hsntg6do36.cloudfront.net/5171b42ac2b4c00dd0c1ff9e/low.mp4 205 2009-04-20T00:00:00+00:00 https://www.wired.com/video/sitemap.xml W/4eecc23d353856e29d6dae1ce42b43ba 2.2617597579956055 2021-01-16 20:43:37.992796+00:00
1 https://www.wired.com/video/watch/trip-hop-pioneer-tricky-sweet-and-naive http://dwgyu36up6iuz.cloudfront.net/heru80fdn/image/upload/c_fill,d_placeholder_thescene.jpg,fl_progressive,g_face,h_180,q_80,w_320/v1389040238/wired_trip-hop-pioneer-tricky-sweet-and-naive.jpg Trip-Hop Pioneer Tricky: Sweet and Naive Tricky, of Massive Attack fame, shows Wired.com the ropes on becoming a musician and producer. http://dp8hsntg6do36.cloudfront.net/5171b424c2b4c00dd0c1fe4e/low.mp4 267 2009-04-18T00:00:00+00:00 https://www.wired.com/video/sitemap.xml W/4eecc23d353856e29d6dae1ce42b43ba 2.2617597579956055 2021-01-16 20:43:37.992796+00:00
2 https://www.wired.com/video/watch/trash-foils-diamond-heist http://dwgyu36up6iuz.cloudfront.net/heru80fdn/image/upload/c_fill,d_placeholder_thescene.jpg,fl_progressive,g_face,h_180,q_80,w_320/v1464291637/wired_trash-foils-diamond-heist.jpg Trash Foils Diamond Heist Trash Foils Diamond Heist http://dp8hsntg6do36.cloudfront.net/5171b424c2b4c00dd0c1fe3c/low.mp4 278 2009-03-12T04:00:00+00:00 https://www.wired.com/video/sitemap.xml W/4eecc23d353856e29d6dae1ce42b43ba 2.2617597579956055 2021-01-16 20:43:37.992796+00:00
3 https://www.wired.com/video/watch/the-toxic-cloud-emitting-portable-dry-ice-mak http://dwgyu36up6iuz.cloudfront.net/heru80fdn/image/upload/c_fill,d_placeholder_thescene.jpg,fl_progressive,g_face,h_180,q_80,w_320/v1389040172/wired_the-toxic-cloud-emitting-portable-dry-ice-mak.jpg The Toxic Cloud-Emitting Portable Dry Ice Maker The Toxic Cloud-Emitting Portable Dry Ice Maker in action. http://dp8hsntg6do36.cloudfront.net/5171b424c2b4c00dd0c1fe42/low.mp4 31 2009-02-11T00:00:00+00:00 https://www.wired.com/video/sitemap.xml W/4eecc23d353856e29d6dae1ce42b43ba 2.2617597579956055 2021-01-16 20:43:37.992796+00:00
4 https://www.wired.com/video/watch/chef-ferran-adria-of-el-bulli http://dwgyu36up6iuz.cloudfront.net/heru80fdn/image/upload/c_fill,d_placeholder_thescene.jpg,fl_progressive,g_face,h_180,q_80,w_320/v1368475899/wired_chef-ferran-adria-of-el-bulli.jpg Chef Ferran Adria of El Bulli Ferran Adria on why the knife is the most essential tool in your kitchen. http://dp8hsntg6do36.cloudfront.net/5171b42ec2b4c00dd0c20064/low.mp4 72 2008-11-25T00:00:00+00:00 https://www.wired.com/video/sitemap.xml W/4eecc23d353856e29d6dae1ce42b43ba 2.2617597579956055 2021-01-16 20:43:37.992796+00:00
5 https://www.wired.com/video/watch/how-to-make-wired-origami http://dwgyu36up6iuz.cloudfront.net/heru80fdn/image/upload/c_fill,d_placeholder_thescene.jpg,fl_progressive,g_face,h_180,q_80,w_320/v1389040170/wired_how-to-make-wired-origami.jpg How To Make Wired Origami <NAME> explains how to fold the Wired issue 16.07 origami splash page. http://dp8hsntg6do36.cloudfront.net/5171b3cbc2b4c00dd0c1e969/low.mp4 150 2008-09-23T00:00:00+00:00 https://www.wired.com/video/sitemap.xml W/4eecc23d353856e29d6dae1ce42b43ba 2.2617597579956055 2021-01-16 20:43:37.992796+00:00
6 https://www.wired.com/video/watch/clover-coffee-machine http://dwgyu36up6iuz.cloudfront.net/heru80fdn/image/upload/c_fill,d_placeholder_thescene.jpg,fl_progressive,g_face,h_180,q_80,w_320/v1389040152/wired_clover-coffee-machine.jpg Clover Coffee Machine Wired.com takes a look at the 'Clover', an $11,000 coffee machine hand-built by Stanford engineers. http://dp8hsntg6do36.cloudfront.net/5171b42ec2b4c00dd0c2005b/low.mp4 147 2008-09-23T00:00:00+00:00 https://www.wired.com/video/sitemap.xml W/4eecc23d353856e29d6dae1ce42b43ba 2.2617597579956055 2021-01-16 20:43:37.992796+00:00
7 https://www.wired.com/video/watch/original-wargames-trailer http://dwgyu36up6iuz.cloudfront.net/heru80fdn/image/upload/c_fill,d_placeholder_thescene.jpg,fl_progressive,g_face,h_180,q_80,w_320/v1464291813/wired_original-wargames-trailer.jpg Original WarGames Trailer Original WarGames Trailer http://dp8hsntg6do36.cloudfront.net/5171b427c2b4c00dd0c1fee7/low.mp4 140 2008-07-21T04:00:00+00:00 https://www.wired.com/video/sitemap.xml W/4eecc23d353856e29d6dae1ce42b43ba 2.2617597579956055 2021-01-16 20:43:37.992796+00:00
8 https://www.wired.com/video/watch/rock-band-trailer http://dwgyu36up6iuz.cloudfront.net/heru80fdn/image/upload/c_fill,d_placeholder_thescene.jpg,fl_progressive,g_face,h_180,q_80,w_320/v1464292286/wired_rock-band-trailer.jpg Rock Band Trailer Rock Band Trailer http://dp8hsntg6do36.cloudfront.net/5171b431c2b4c00dd0c20100/low.mp4 70 2007-09-14T04:00:00+00:00 https://www.wired.com/video/sitemap.xml W/4eecc23d353856e29d6dae1ce42b43ba 2.2617597579956055 2021-01-16 20:43:37.992796+00:00
9 https://www.wired.com/video/watch/arrival-full-trailer http://dwgyu36up6iuz.cloudfront.net/heru80fdn/image/upload/c_fill,d_placeholder_thescene.jpg,fl_progressive,g_face,h_180,q_80,w_320/v1471366897/wired_arrival-full-trailer.jpg ‘Arrival’ — Full Trailer Louise Banks (<NAME>) must learn to communicate with aliens to save humanity in the new film from ‘Sicario’ director Denis Villeneuve. http://dp8hsntg6do36.cloudfront.net/57b344f4fd2e614f99000014/1a74100f-bc1b-4279-b677-5efc301785d9low.mp4 145 2003-10-22T04:00:00+00:00 https://www.wired.com/video/sitemap.xml W/4eecc23d353856e29d6dae1ce42b43ba 2.2617597579956055 2021-01-16 20:43:37.992796+00:00
[2343 rows x 11 columns]
"""
from gzip import GzipFile
import logging
from concurrent import futures
from xml.etree import ElementTree
from urllib.request import urlopen, Request
from advertools import __version__ as version
import pandas as pd
logging.basicConfig(level=logging.INFO)
headers = {'User-Agent': 'advertools-' + version}
def _sitemaps_from_robotstxt(robots_url):
sitemaps = []
robots_page = urlopen(Request(robots_url, headers=headers))
for line in robots_page.readlines():
line_split = [s.strip() for s in line.decode().split(':', maxsplit=1)]
if line_split[0].lower() == 'sitemap':
sitemaps.append(line_split[1])
return sitemaps
def _parse_sitemap(root):
d = dict()
for node in root:
for n in node:
if 'loc' in n.tag:
d[n.text] = {}
def parse_xml_node(node, node_url, prefix=''):
nonlocal d
keys = []
for element in node:
if element.text:
tag = element.tag.split('}')[-1]
d[node_url][prefix + tag] = element.text
keys.append(tag)
prefix = prefix if tag in keys else ''
if list(element):
parse_xml_node(element, node_url, prefix=element.tag.split('}')[-1] + '_')
for node in root:
node_url = [n.text for n in node if 'loc' in n.tag][0]
parse_xml_node(node, node_url=node_url)
return pd.DataFrame(d.values())
def sitemap_to_df(sitemap_url, max_workers=8, recursive=True):
"""
Retrieve all URLs and other available tags of a sitemap(s) and put them in
a DataFrame.
You can also pass the URL of a sitemap index, or a link to a robots.txt
file.
:param url sitemap_url: The URL of a sitemap, either a regular sitemap, a
sitemap index, or a link to a robots.txt file.
In the case of a sitemap index or robots.txt, the
function will go through all the sub sitemaps and
retrieve all the included URLs in one DataFrame.
:param int max_workers: The maximum number of workers to use for threading.
The higher the faster, but with high numbers you
risk being blocked and/or missing some data as you
might appear like an attacker.
:param bool recursive: Whether or not to follow and import all sub-sitemaps
(in case you have a sitemap index), or to only
import the given sitemap. This might be useful in
case you want to explore what sitemaps are available
after which you can decide which ones you are
interested in.
:return sitemap_df: A pandas DataFrame containing all URLs, as well as
other tags if available (``lastmod``, ``changefreq``,
``priority``, or others found in news, video, or image
sitemaps).
"""
if sitemap_url.endswith('robots.txt'):
return pd.concat([sitemap_to_df(sitemap, recursive=recursive)
for sitemap in _sitemaps_from_robotstxt(sitemap_url)],
ignore_index=True)
if sitemap_url.endswith('xml.gz'):
xml_text = urlopen(Request(sitemap_url,
headers={'Accept-Encoding': 'gzip',
'User-Agent': 'advertools-' +
version}))
resp_headers = xml_text.getheaders()
xml_text = GzipFile(fileobj=xml_text)
else:
xml_text = urlopen(Request(sitemap_url, headers=headers))
resp_headers = xml_text.getheaders()
xml_string = xml_text.read()
root = ElementTree.fromstring(xml_string)
sitemap_df = pd.DataFrame()
if (root.tag.split('}')[-1] == 'sitemapindex') and recursive:
multi_sitemap_df = pd.DataFrame()
sitemap_url_list = []
for elem in root:
for el in elem:
if 'loc' in el.tag:
if el.text == sitemap_url:
error_df = pd.DataFrame({
'sitemap': [sitemap_url],
'errors': ['WARNING: Sitemap contains a link to itself']
})
multi_sitemap_df = multi_sitemap_df.append(error_df,
ignore_index=True)
else:
sitemap_url_list.append(el.text)
with futures.ThreadPoolExecutor(max_workers=max_workers) as executor:
to_do = []
for sitemap in sitemap_url_list:
future = executor.submit(sitemap_to_df, sitemap)
to_do.append(future)
done_iter = futures.as_completed(to_do)
for future in done_iter:
try:
multi_sitemap_df = multi_sitemap_df.append(future.result(),
ignore_index=True)
except Exception as e:
error_df = pd.DataFrame(dict(errors=str(e)),
index=range(1))
future_str = hex(id(future))
hexes = [hex(id(f)) for f in to_do]
index = hexes.index(future_str)
error_df['sitemap'] = sitemap_url_list[index]
logging.warning(msg=str(e) + ' ' + sitemap_url_list[index])
multi_sitemap_df = multi_sitemap_df.append(error_df,
ignore_index=True)
return multi_sitemap_df
else:
logging.info(msg='Getting ' + sitemap_url)
elem_df = _parse_sitemap(root)
sitemap_df = sitemap_df.append(elem_df, ignore_index=True)
sitemap_df['sitemap'] = [sitemap_url] if sitemap_df.empty else sitemap_url
if 'lastmod' in sitemap_df:
try:
sitemap_df['lastmod'] = | pd.to_datetime(sitemap_df['lastmod'], utc=True) | pandas.to_datetime |
# --------------
##Data loading and statistics
#Import Packages
import pandas as pd
from sklearn import preprocessing
#path : File path
# Code Starts here
#The path for the data file has been store in variable path
#Load dataset using pandas read_csv api in variable dataset.
dataset=pd.read_csv(path)
#Display first 5 rows of dataframe dataset.
print(dataset.head())
dataset.drop(columns='Id',inplace=True)
#Check if there's any column which is not useful and remove it.
#Print out the statistical description of the above train dataset.
dataset.describe()
# Code ends here
# --------------
##Exploratory Data Analysis
# We will visualize all the attributes using Violin Plot - a combination of box and density plots
#Import Packages
import seaborn as sns
from matplotlib import pyplot as plt
#Code Starts here
#Store all the column names in the variable called as cols
cols=dataset.columns
#Store the length of number of features (excluding target variable) in the variable size respectively.
size=len(cols)-1
print(cols,size)
#Create variable x containing the target column.
x=dataset['Cover_Type']
#Create variable y containing all the columns exculding target.
y=dataset.drop(columns='Cover_Type')
for i in range(size):
#Plot the violin plot for all attributes.
sns.violinplot(x=x,y=dataset[cols[i]])
plt.show()
#Code ends here
# --------------
##Closer look towards relationship between different variables
#Import Packages
import numpy as np
upper_threshold = 0.5
lower_threshold = -0.5
# Code Starts Here
#We can observe that the first 10 columns contain all continous features
#Select the first 10 features and store them in the variable subset_train
subset_train=dataset.iloc[:,:10]
print(subset_train.head())
#Calculate the Pearson correlation between these 10 features and store it in variable data_corr
data_corr=subset_train.corr()
print(data_corr)
#Plot a heatmap of data_corr using seaborn
sns.heatmap(data_corr)
plt.show()
#List the correlation pairs from data_corr using .unstack() and .sort_values(kind='quicksort')
#& store it in variable correlation
correlation=data_corr.unstack().sort_values(kind='quicksort')
print(correlation)
#From correlation, using slicing, select the values above upper_threshold and
#below lower_threshold but not equal to 1 and save it as corr_var_list.
#We neglect correlation value equal to 1 since this indicates correlation of a feature with itself.
corr_var_list=correlation[((correlation>upper_threshold) | (correlation<lower_threshold)) & (correlation!=1)]
print(corr_var_list)
# Code ends here
# --------------
##Data Cleaning , Data preparation
#Import libraries
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
# Identify the unnecessary columns and remove it
dataset.drop(columns=['Soil_Type7', 'Soil_Type15'], inplace=True)
#Code Starts here
#Split the dataset in features and target and save the same in variables X and Y respectively
X=dataset.drop(columns='Cover_Type')
Y=dataset['Cover_Type']
#Split the data into chunks of 0.2 by using cross_validation class on X and Y and store it in X_train, X_test, Y_train, Y_test
#and set the random_state to 0.
X_train, X_test, Y_train, Y_test=train_test_split(X,Y,test_size=0.2,random_state=0)
#Instantiate StandardScaler() to a variable called scaler. Perform the standard scaling on the continuous data on X_train and X_test
#and store it in X_train_temp and X_test_temp.
scaler=StandardScaler()
numeric_cols=['Elevation', 'Aspect', 'Slope', 'Horizontal_Distance_To_Hydrology',
'Vertical_Distance_To_Hydrology', 'Horizontal_Distance_To_Roadways',
'Hillshade_9am', 'Hillshade_Noon', 'Hillshade_3pm',
'Horizontal_Distance_To_Fire_Points']
cat_cols=['Wilderness_Area1',
'Wilderness_Area2', 'Wilderness_Area3', 'Wilderness_Area4',
'Soil_Type1', 'Soil_Type2', 'Soil_Type3', 'Soil_Type4', 'Soil_Type5',
'Soil_Type6', 'Soil_Type8', 'Soil_Type9', 'Soil_Type10', 'Soil_Type11',
'Soil_Type12', 'Soil_Type13', 'Soil_Type14', 'Soil_Type16',
'Soil_Type17', 'Soil_Type18', 'Soil_Type19', 'Soil_Type20',
'Soil_Type21', 'Soil_Type22', 'Soil_Type23', 'Soil_Type24',
'Soil_Type25', 'Soil_Type26', 'Soil_Type27', 'Soil_Type28',
'Soil_Type29', 'Soil_Type30', 'Soil_Type31', 'Soil_Type32',
'Soil_Type33', 'Soil_Type34', 'Soil_Type35', 'Soil_Type36',
'Soil_Type37', 'Soil_Type38', 'Soil_Type39', 'Soil_Type40']
X_train_temp=scaler.fit_transform(X_train.iloc[:,:10])
X_test_temp=scaler.transform(X_test.iloc[:,:10])
# Scales are not the same for all variables. Hence, rescaling and standardization may be necessary for some algorithm to be applied
#on it.
#Concatenate the scaled continuous data to categorical data above (X_train_temp and X_train) and store it in X_train1
#and similarily concatenate ( X_test_temp and X_test) and store it in X_test1.
X_train1=np.concatenate((X_train_temp,X_train[cat_cols]),axis=1)
X_test1=np.concatenate((X_test_temp,X_test[cat_cols]),axis=1)
#Create a dataframe of rescaled data X_train1 which consists of columns and indexes set as of unscaled X_train
#and store in the variable named as scaled_features_train_df
scaled_features_train_df= | pd.DataFrame(X_train1,index=X_train.index,columns=X_train.columns) | pandas.DataFrame |
import argparse
import numpy as np
import csv
import pandas as pd
import json
import scipy.sparse as sp
from sparsebm import (
SBM,
LBM,
ModelSelection,
generate_LBM_dataset,
generate_SBM_dataset,
)
from sparsebm.utils import reorder_rows, ARI
import logging
logger = logging.getLogger(__name__)
try:
import cupy
_DEFAULT_USE_GPU = True
except ImportError:
_DEFAULT_USE_GPU = False
def define_parsers():
main = argparse.ArgumentParser(prog="sparsebm")
subparsers = main.add_subparsers(
help="algorithm to use", dest="subparser_name"
)
sbm_parser = subparsers.add_parser(
"sbm", help="use the stochastic block model"
)
lbm_parser = subparsers.add_parser(
"lbm", help="use the latent block model"
)
ms_parser = subparsers.add_parser(
"modelselection", help="use the model selection with LBM or SBM"
)
input_grp = ms_parser.add_argument_group("mandatory arguments")
input_grp.add_argument(
"ADJACENCY_MATRIX", help="List of edges in CSV format"
)
input_grp.add_argument(
"-t",
"--type",
help="model to use. Either 'lbm' or 'sbm'",
required=True,
)
input_grp = ms_parser.add_argument_group("optional arguments")
input_grp.add_argument(
"-sep",
"--sep",
default=",",
help="CSV delimiter to use. Default is ',' ",
)
input_grp.add_argument(
"-gpu",
"--use_gpu",
help="specify if a GPU should be used.",
default=_DEFAULT_USE_GPU,
type=bool,
)
input_grp.add_argument(
"-idgpu",
"--gpu_index",
help="specify the gpu index if needed.",
default=None,
type=bool,
)
input_grp.add_argument(
"-s",
"--symmetric",
help="specify if the adajacency matrix is symmetric. For sbm only",
default=False,
)
input_grp.add_argument(
"-p", "--plot", help="display model exploration plot", default=True
)
output_grp = ms_parser.add_argument_group("output")
output_grp.add_argument(
"-o",
"--output",
help="File path for the json results.",
default="results.json",
)
generate_sbm_parser = subparsers.add_parser(
"generate", help="use sparsebm to generate a data matrix"
)
subparsers_generate = generate_sbm_parser.add_subparsers(
help="model to generate data with", dest="subparsers_generate_name"
)
sbm_generation_parser = subparsers_generate.add_parser(
"sbm", help="use the stochastic block model to generate data"
)
lbm_generation_parser = subparsers_generate.add_parser(
"lbm", help="use the latent block model to generate data"
)
help_example_base = """A json configuration file that specify the parameters
of the data to generate. If no file is given a random graph is generated."""
help_sbm_gen = """\n Example of json configuration file for SBM: \n{\n
"type": "sbm",\n "number_of_nodes": 1000,\n "number_of_clusters": 4,\n
"symmetric": true,\n "connection_probabilities": [\n [\n 0.1,\n
0.036,\n 0.012,\n 0.0614\n ],\n [\n 0.036,\n
0.074,\n 0,\n 0\n ],\n [\n 0.012,\n 0,\n
0.11,\n 0.024\n ],\n [\n 0.0614,\n 0,\n
0.024,\n 0.086\n ]\n ],\n "cluster_proportions": [\n 0.25
,\n 0.25,\n 0.25,\n 0.25\n ]\n}"""
sbm_generation_parser.add_argument(
"-f",
"--file",
default=None,
help=help_example_base + help_sbm_gen,
required=False,
)
lbm_generation_parser.add_argument(
"-f", "--file", default=None, help=help_example_base, required=False
)
for parser in [sbm_parser, lbm_parser]:
input_grp = parser.add_argument_group("mandatory arguments")
input_grp.add_argument(
"ADJACENCY_MATRIX", help="List of edges in CSV format"
)
if parser == lbm_parser:
input_grp.add_argument(
"-k1",
"--n_row_clusters",
help="number of row clusters",
default=4,
type=int,
required=True,
)
input_grp.add_argument(
"-k2",
"--n_column_clusters",
help="number of row clusters",
default=4,
type=int,
required=True,
)
if parser == sbm_parser:
input_grp.add_argument(
"-k",
"--n_clusters",
help="number of clusters",
default=4,
type=int,
required=True,
)
output_grp = parser.add_argument_group("output")
output_grp.add_argument(
"-o",
"--output",
help="File path for the json results.",
default="results.json",
)
param_grp = parser.add_argument_group("optional arguments")
param_grp.add_argument(
"-sep",
"--sep",
default=",",
help="CSV delimiter to use. Default is ',' ",
)
if parser == sbm_parser:
param_grp.add_argument(
"-s",
"--symmetric",
help="Specify if the adajacency matrix is symmetric",
default=False,
# type=bool,
)
param_grp.add_argument(
"-niter",
"--max_iter",
help="Maximum number of EM step",
default=10000,
type=int,
)
param_grp.add_argument(
"-ninit",
"--n_init",
help="Number of initializations that will be run",
default=100,
type=int,
)
param_grp.add_argument(
"-early",
"--n_iter_early_stop",
help="Number of EM steps to perform for each initialization.",
default=10,
type=int,
)
param_grp.add_argument(
"-ninitt",
"--n_init_total_run",
help="Number of the best initializations that will be run\
until convergence.",
default=2,
type=int,
)
param_grp.add_argument(
"-t",
"--tol",
help="Tolerance of likelihood to declare convergence.",
default=1e-4,
type=float,
)
param_grp.add_argument(
"-v",
"--verbosity",
help="Degree of verbosity. Scale from 0 (no message displayed)\
to 3.",
default=1,
type=int,
)
param_grp.add_argument(
"-gpu",
"--use_gpu",
help="Specify if a GPU should be used.",
default=_DEFAULT_USE_GPU,
type=bool,
)
param_grp.add_argument(
"-idgpu",
"--gpu_index",
help="Specify the gpu index if needed.",
default=None,
type=bool,
)
return main
def graph_from_csv(file, type, sep=","):
try:
pda = | pd.read_csv(file, sep=sep, header=None) | pandas.read_csv |
'''
Module : create_salmon_ec_count_matrix
Description : Create equivalence class matrix from salmon.
Copyright : (c) <NAME>, Dec 2018
License : MIT
Maintainer : <EMAIL>
Portability : POSIX
Match salmon equivalence class output files from multiple
samples and create a matrix with EC counts and transcript IDs
'''
import pandas as pd
import numpy as np
import sys
from argparse import ArgumentParser
def parse_args():
'''
Parse command line arguments.
Returns Options object with command line argument values as attributes.
Will exit the program on a command line error.
'''
description = 'Create EC counts matrix from Salmon input'
usage = '''
python create_ec_count_matrix.py <eq_classes> <samples> <outfile>
For example:
python create_ec_count_matrix.py \\
sample1/aux_info/eq_classes.txt \\
sample2/aux_info/eq_classes.txt \\
sample1,sample2 ec_matrix.txt'''
parser = ArgumentParser(description=description, usage=usage)
parser.add_argument('inputs', nargs='*')
args = parser.parse_args()
if len(args.inputs) < 3:
print("Invalid arguments\n\nusage:")
print(usage)
sys.exit()
return args
def load_ecs(ec_file):
'''
Read Salmon equivalence class output file
and return dictionary with all transcripts,
equivalence class transcript IDs and counts
'''
ec_df = | pd.read_csv(ec_file, header=None) | pandas.read_csv |
# Necessary import
import pandas as pd
import matplotlib.pyplot as plt
import requests
import json
## Part 1. Functions related to historical Coronavirus data of last 30 days
def obtain_historical_data():
"""
This is a function to obtain historical data using NovelCOVID API.
API documentation: https://documenter.getpostman.com/view/11144369/Szf6Z9B3?version=latest#513b0f63-b3ae-4427-9e4d-d3f6937789e9
Official documentation: https://github.com/disease-sh/API
Authenticaiton: This API does not require a key and it is an open source.
Parameters (Inputs)
----------
Returns (Output)
-------
list
A list of dictionary with json format containing data of cases, deaths, and recovered for last 30 days in all countries available
Examples
--------
>>> history = obtain_historical_data()
>>> type(history)
list
"""
r = requests.get("https://corona.lmao.ninja/v2/historical?lastdays=30")
if r.status_code != 200:
print('API status !=200, failed')
elif r.status_code == 200:
print('API status = 200, sucessful')
historical_data = r.json() # convert the results to json format
print(json.dumps(historical_data, indent=2, sort_keys=True))
return(historical_data)
# 1.1 Historical data of cases in each country
def organized_historical_cases(historical_data):
"""
This is a function of selecting data related to cases from previously generated historical_data
It includes convert the type of data to DataFrame and organize data.
The reason of including groupby and sum() is that some of the raw data is listed for provinces available in that country.
In order to generate the data with whole country, groupby and sum() is in need.
Parameters (Inputs)
----------
historical_data : list
The list of data generated by the first function
Returns (Output)
-------
DataFrame
A DataFrame containing data of cases for last 30 days for all countries available.
Examples
--------
>>> history = obtain_historical_data()
>>> cases = organized_historical_cases(history)
>>> type(cases)
pandas.core.frame.DataFrame
"""
df = pd.DataFrame(historical_data)
list_timeline = df['timeline'].values.tolist()
timeline = pd.DataFrame(list_timeline)
list_cases = timeline['cases'].values.tolist()
cases = | pd.DataFrame(list_cases) | pandas.DataFrame |
import pandas as pd
import pickle
import gensim
import gensim.corpora as corpora
from gensim.utils import simple_preprocess
import numpy as np
import datetime as dt
from LDA import remove_stopwords, lemmatization, make_bigrams, sent_to_words
import warnings
warnings.filterwarnings("ignore",category=DeprecationWarning)
# LOAD CLUSTERING MODEL
with open("data/cluster_model.pkl", "rb") as f:
cluster_model = pickle.load(f)
# LOAD LDA MODEL
lda_model = gensim.models.LdaModel.load('data/LDA/lda.model')
id2word = corpora.Dictionary.load('data/LDA/lda.model.id2word')
def get_interests():
"""
Load the raw interest csv file.
:return: The full interest.csv file in pandas dataframe
"""
interest = pd.read_csv('data/interest.csv')
return(interest)
def get_posts():
"""
Load the raw posts csv file.
:return: The full posts.csv file in pandas dataframe
"""
posts = pd.read_csv('data/posts.csv')
return(posts)
def get_users():
"""
Load the raw users csv file.
:return: The full users.csv file in pandas dataframe
"""
users = pd.read_csv('data/users.csv')
return(users)
def filter_posts(uid,date):
"""
Returns posts that have been filtered to be before a given date and aren't owned by the user
:param uid (str): user-id to filter by
:param date (str): date value to filter by
:return: pandas dataframe filtered of any posts greater than date and not owned by user
"""
posts = get_posts()
posts = posts[posts['uid'] != uid]
posts = posts[posts['post_time'] < date]
return posts
def get_user_data(uid):
"""
Returns the selected user account information
:param uid (str): user-id
:return: single-row pandas dataframe of user account information
"""
users = get_users()
user = users[users['uid'] == uid].reset_index(drop=True)
return user
def get_user_interest(uid):
"""
Returns the selected user interest information
:param uid (str): user-id
:return: single-row pandas dataframe of user interest information
"""
interests = get_interests()
interest = interests[interests['uid'] == uid].reset_index(drop=True)
return interest
def cluster_user(uid):
"""
Returns categorised ID of the selected user from the clustering model
:param uid (str): user-id
:return: single integer value of ID category
"""
# Load needed data for user
users = get_user_data(uid)
interests = get_user_interest(uid)
# Create Age Buckets for clustering
users['date'] = pd.to_datetime(users['dob'], format='%d/%m/%Y', errors='coerce')
users['age'] = dt.datetime.now() - users['date']
users['age'] = (users['age']).dt.days
users['age'] = users['age']/365
users['age_cat'] = np.where(users['age']<20,1,
np.where((users['age']>=20) & (users['age']<25),2,
np.where((users['age']>=25) & (users['age']<30),3,
np.where((users['age']>=30) & (users['age']<35),4,
np.where((users['age']>=35) & (users['age']<40),5,
np.where((users['age']>=40) & (users['age']<45),6,
np.where((users['age']>=45) & (users['age']<50),7,
np.where((users['age']>=50) & (users['age']<55),8,
np.where((users['age']>=55) & (users['age']<60),9,
np.where((users['age']>=60) & (users['age']<65),10,11))))))))))
user_age = users[['uid', 'age_cat']]
user = pd.merge(users,interests, left_on='uid', right_on='uid', how='left')
# Load the categories in order used to cluster
cats = pd.read_csv('data/interest_cats.csv')
user = pd.merge(user,cats, left_on='interest', right_on='categories')
rows=len(users['uid'])
cols=len(cats['id']) # add 2 extra columns for age buckets and uid
matrix = pd.DataFrame(np.zeros((rows,cols)))
data = pd.concat([user_age.reset_index(drop=True), matrix], axis=1)
for i in range(1,len(user['uid'])):
#get row number
uid = user['uid'][i]
rnum = data.index[data['uid']==uid]
col = user['id'][i]+1
data.iloc[rnum.values[0],col] = 1
data.drop(columns='uid',axis=1,inplace=True)
# Use model to predict grouping of user
clusters = cluster_model.predict(data)
return clusters[0]
def get_post_topics(uid,date):
"""
Returns the filtered list of posts that are enriched by topic information
:param uid: user-id (str)
:param date: datetime (str)
:return: sorted dataframe of filtered posts with topic information
"""
# Filter posts for datetime given
posts = filter_posts(uid, date)
data = posts['text'] + posts['hashtags']
# Preprocess data
data_words = list(sent_to_words(data))
# Remove Stop Words
data_words_nostops = remove_stopwords(data_words)
# Form Bigrams
data_words_bigrams = make_bigrams(data_words_nostops)
# Do lemmatization keeping only noun, adj, vb, adv
data_lemmatized = lemmatization(data_words_bigrams, allowed_postags=['NOUN', 'ADJ', 'VERB', 'ADV'])
# Create Corpus
texts = data_lemmatized
# Term Document Frequency
corpus = [id2word.doc2bow(text) for text in texts]
vector = lda_model[corpus]
def getKey(item):
return item[1]
topic_probability = []
for t in vector:
topic_probability.append(sorted(t[0], key=getKey, reverse=True))
results = pd.DataFrame(topic_probability, columns=['col 1', 'col 2',
'col 3', 'col 4', 'col 5'])
results['topic'] = 0
results['topic_val'] = float(0)
for i in range(0, len(results['col 1'])):
val = results.iloc[i, 0]
results['topic'][i] = val[0]
results['topic_val'][i] = val[1]
topics = results[['topic', 'topic_val']]
return topics
def enrich_posts(uid,date):
"""
Returns the final list of ranked posts
:param uid: user-id (str)
:return: sorted dataframe of ranked posts relevent to a certain user given a date
"""
# Convert date to datetime
timestamp = | pd.Timestamp(date) | pandas.Timestamp |
import os
import numpy as np
import pandas as pd
from sklearn.linear_model import LinearRegression
from scipy.optimize import curve_fit
import shutil
from . import C_preprocessing as preproc
class ODE_POSTPROC:
'''
In this class the output of the ODE is post-processed and the output is written as required by optiSMOKE++
a. Read the Output\Output.OUT file and extract the columns => generate t,W
b. Once extracted: generate the files corresponding to the profile of every species and write them
c. Write "path_to_Exp_Datasets.txt" indicating the experimental dataset considered
d. Copy input_OS.dic in the new folder
'''
def __init__(self,cwd):
'''
Indicates the path where to store the output.
It can be called either at the beginning of the main or at the first iteration
'''
self.cwd = cwd
# preallocate the list of pathways to the experimental datasets you will generate.
# the paths to the OS input correspond to the datasets.
self.path_to_Exp_Datasets = []
self.path_to_OS_inputs = []
def MAKE_BRANCHINGFOLDERS(self,jobtype,REACLUMPED,PRODSLUMPED,T_VECT):
'''
Makes the subfolder "BF_OUTPUT" to store the data of the branching of the lumped products
It also allocates the dictionary where to store the branchings, which will be written at the end of each single-P simulation
NB this function is to be called only if PRODSLUMPED contains arrays. if not: raise exception
'''
self.lumped_branching = {}
# allocate the composition of the lumped reactant
if isinstance(REACLUMPED[0],np.ndarray):
# generate empty dataframe:
self.lumped_branching_reac = pd.DataFrame(index=T_VECT,columns=REACLUMPED[0])
# allocate only the lumped products with arrays
for PRi_L in PRODSLUMPED.index:
# the indexes are the names of the lumped species
if isinstance(PRODSLUMPED[PRi_L],np.ndarray):
# generate empty dataframe
df_PRi_L = pd.DataFrame(index=T_VECT,columns=PRODSLUMPED[PRi_L])
# allocate it in dictionary
self.lumped_branching[PRi_L] = df_PRi_L
# make the folder "Branchings" if not present already
self.branchingpath = os.path.join(self.cwd,jobtype,'BF_OUTPUT',REACLUMPED.index[0])
if os.path.exists(self.branchingpath) == False:
os.makedirs(self.branchingpath)
def MAKE_FOLDERS(self,fld,P,T,REACLUMPED):
'''
Makes the subfolders at the selected conditions of reactant, temperature and pressure
'''
# extract the name of the reactant
REAC = REACLUMPED.index[0]
self.fld = fld
self.dir_PT = os.path.join(fld,str(P) + 'atm',str(T) + 'K')
if os.path.exists(self.dir_PT) == False:
os.makedirs(self.dir_PT)
# Allocate T,P,REAC to "self" for successive use
self.T = T
self.P = P
self.REACNAME = REAC
self.REAC = REACLUMPED[0]
# NB self.REAC is the array of reactants, self.REACNAME is the name (single reactant or R_L)
def EXTRACT_PROFILES(self,SPECIES,i_REAC,N_INIT_REAC,SPECIES_BIMOL_SERIES,ISOM_EQUIL,CUTOFF):
'''
Read the profiles obtained by OpenSMOKE++ and store them into arrays
'''
# if the reaction is bimolecular: read also the xi of the second species.
# for bimolecular lumped species: set i_REAC as the first species
if isinstance(i_REAC,np.ndarray):
i_REAC_0 = i_REAC[0]
else:
i_REAC_0 = i_REAC
if SPECIES_BIMOL_SERIES.iloc[i_REAC_0] != '' and SPECIES_BIMOL_SERIES.iloc[i_REAC_0] != SPECIES[i_REAC_0]:
# if the second reactant has all same fragment: 1 extracolumn
extracol = 1
else:
extracol = 0
# read the output file
filename = os.path.join(self.cwd,'Output','Output.out')
if os.path.isfile(filename):
cols_species = np.arange(9,9+len(SPECIES)+extracol)
n_cols = np.insert(cols_species,0,[0]) # 0 IS THE INDEX, AND [0] IS THE VALUE TO INSERT
data = np.genfromtxt(filename,dtype=float,skip_header=1,usecols=(n_cols))
# extract also PV (needed for the total number of moles)
cols_PV = np.array([5,6],dtype=int)
data_PV = np.genfromtxt(filename,dtype=float,skip_header=1,usecols=(cols_PV))
######## lumped reactant: sum the profiles, redefine a new reactant index and name, new species and species bimol series
if isinstance(i_REAC,np.ndarray):
# 0: for the calculation of the derivatives: compute the absolute variation of each isomer
dreac_i_dt = abs((-data[2:,i_REAC+1]+data[1:-1,i_REAC+1])/(data[2:,0]-data[1:-1,0]).reshape(len(data[2:,0]),1))
dreac_dt = np.sum(dreac_i_dt,axis=1)
# 1: sum the profiles of all the reactants; save the reactant composition for later
Wreac_new = np.sum(data[:,i_REAC+1],axis=1)
Wreac_new = Wreac_new[np.newaxis,:]
self.Wreac_composition = data[:,i_REAC+1]
# 2: delete all the columns corresponding to the reactant and add the reactant in the first column
# do the same for the names of the species and the bimolecular species
data = np.delete(data,i_REAC+1,axis=1)
data = np.insert(data,1,Wreac_new,axis=1)
SPECIES = np.delete(SPECIES,i_REAC)
bim_reac = pd.Series(SPECIES_BIMOL_SERIES[i_REAC_0],index=[self.REACNAME])
SPECIES_BIMOL_SERIES = SPECIES_BIMOL_SERIES[SPECIES]
SPECIES = np.insert(SPECIES,0,self.REACNAME)
SPECIES_BIMOL_SERIES = pd.concat([bim_reac,SPECIES_BIMOL_SERIES])
# 3: assign new indices
i_REAC = 0 # now the reactant is in the first position
reaclumped = 'YES'
else:
# compute the derivative of the reactant consumption
dreac_dt = (-data[2:,i_REAC+1]+data[1:-1,i_REAC+1])/(data[2:,0]-data[1:-1,0])
# save variables forlater
reaclumped = 'NO'
# cut the profiles where needed
i_in = np.where(data[:,i_REAC+1] <= (1-CUTOFF[0])*N_INIT_REAC)
i_fin = np.where(data[:,i_REAC+1] <= (1-CUTOFF[1])*N_INIT_REAC)
# if the reactant does not reach the minimum consumption (possible for lumped reactants): set the initial value as 0
if len(i_in[0]) == 0:
i_in = 0
else:
i_in = i_in[0][0]
# impose to cut when the DERIVATIVE of the reactant (consumption) reaches a small value
if len(i_fin[0]) == 0:
#i_fin = np.where(dreac_dt[3:] < dreac_dt[3]*1e-4)
# remove infinite values and keep only positive values
dreac2_dt = dreac_dt[(np.isinf(dreac_dt)==False) & (dreac_dt > 0)]
if len(dreac2_dt) <= 1:
# include also length = 1 otherwise it means that i_in and i_fin will be the same
i_in = 0
i_fin = len(dreac_dt)
else:
maxderiv = max(dreac2_dt)
minderiv = min(dreac2_dt)
i_in = np.where(dreac_dt==maxderiv)[0][0]
if minderiv <= maxderiv*1e-4 :
cutoff_deriv = dreac2_dt[dreac2_dt < maxderiv*1e-4][0]
i_fin = np.where(dreac_dt==cutoff_deriv)[0][0]
elif minderiv > maxderiv*1e-4 :
i_fin = np.where(dreac_dt==minderiv)[0][0]
else:
i_fin = i_fin[0][0]
# check that i_fin > i_in, otherwise set i_in to 0
if i_fin < i_in:
i_in = 0
# save data in the appropriate range of consumption of the reactant
data = data[i_in:i_fin,:]
data_PV = data_PV[i_in:i_fin,:]
t = data[:,0]
t = t[:,np.newaxis]
# W will have the mole fractions multiplied by PV/RT (CtotV = Ntot)
# IF YOU HAVE AN EXTRA SPECIES (NAMELY: BIMOLECULAR REACTANT), MULTIPLY THE COLUMN OF THE REACTANT BY THAT
W = data[:,1:]
if extracol == 1:
# multiply by the number of the second fragments s.t. you reach the total fraction of N_ABU
W_reac2 = W[:,-1]
W = W[:,:-1]
# self.W will be used to write the profiles to the optimizer, so no modification should be done
self.W = pd.DataFrame(W,columns=SPECIES)
# after this, multiply W[reac] by the bimolecular reactant to obtain xi^2
W[:,i_REAC] = W[:,i_REAC]*W_reac2
else:
self.W = pd.DataFrame(W,columns=SPECIES)
self.t = t
tW_DF = np.concatenate((t,W),axis=1)
tW_DF = pd.DataFrame(tW_DF,columns=np.insert(SPECIES,0,'t'))
# FOR LUMPED REACTANTS: SAVE THE BRANCHING FRACTIONS FOR LATER
if reaclumped == 'YES':
# reactant composition
# print(self.Wreac_composition,i_in,i_fin)
self.Wreac_composition = self.Wreac_composition[i_in:i_fin,:]
# if ISOM_EQUIL is active: take only the last BF
if ISOM_EQUIL == 1:
self.lumped_branching_reac.loc[self.T,self.REAC] = self.Wreac_composition[-1,:]/np.sum(self.Wreac_composition[-1,:])
elif ISOM_EQUIL == 0:
Wreac_tot = np.sum(self.Wreac_composition[1:],axis=1)
Wreac_tot = Wreac_tot[:,np.newaxis]
dtweight = ((self.t[1:]-self.t[:-1])/self.t[-1])
br_weighted = self.Wreac_composition[1:,:]/Wreac_tot*dtweight
self.lumped_branching_reac.loc[self.T,self.REAC] = np.sum(br_weighted,axis=0)
# save the reactant composition separately for plotting
else:
raise ValueError('OS output file not found')
self.tW_DF = tW_DF
# save species and bimol series for the following steps
self.i_REAC = i_REAC
self.SPECIES = SPECIES
self.SPECIES_BIMOL_SERIES = SPECIES_BIMOL_SERIES
return tW_DF,data_PV
def PROFILES_REAC_COMPOSITION(self):
'''
Method to be called only in presence of a lumped reactant.
It returns the profile of lumped set of reactants in case it is needed for later use
'''
try:
tW_DF_reac = np.concatenate((self.t,self.Wreac_composition),axis=1)
tW_DF_reac = pd.DataFrame(tW_DF_reac,columns=np.insert(self.REAC,0,'t'))
return tW_DF_reac
except ValueError as e:
print(str(e))
def LUMP_PROFILES(self,PRODS,PRODSLUMPED):
'''
IN THIS METHOD:
- TAKE THE PROFILES OF self.W and sum those of the lumped products
- redefine self.PRODS as the names of the lumped products and rewrite self.W and tW_DF
NB also include the species that are not part of PRODSLUMPED or reac.
- Allocate the branchings within each lumped products to the appropriate dictionary
- for later processing: define new
REAC_L
i_REAC_L
SPECIES_SERIES_L
SPECIES_BIMOL_SERIES_L
PRODS_L
these have the same format as the initial ones.
'''
self.PRODS = PRODS
self.PRODSLUMPED = PRODSLUMPED
# non-lumped products: return all the values the same as before
if len(self.PRODS) == len(PRODSLUMPED) :
tW_DF = self.tW_DF
i_REAC_L = self.i_REAC
SPECIES_L = self.SPECIES
SPECIES_SERIES_L = pd.Series(np.arange(0,len(self.SPECIES)),index = self.SPECIES)
SPECIES_BIMOL_SERIES_L = self.SPECIES_BIMOL_SERIES
PRODS_L = self.PRODS
else:
# redefine the product names
self.PRODS = np.array(PRODSLUMPED.index, dtype='<U16')
# empty dataframe for the products
W_prods_L = pd.DataFrame(columns=self.PRODS)
# empty series for bimolecular species
PRODS_L_BIMOL = pd.Series(index=self.PRODS)
# lumped products: go over the lumped products and generate W_prods
# delete the corresponding columns in the dataframe
for PRi_L in self.PRODS:
# PRODSLUMPED[PRi_L] will be the value (either string or array, depending on whether the product is lumped or not)
PRi_L_value = PRODSLUMPED[PRi_L]
# if the product is just a string (single product): just save the corresponding line and delete it
if isinstance(PRi_L_value,str):
# NB here we have PRi_L_value==PRi_L, so using one or the other makes no difference
# delete the column from self.W and move it to W_prods_L
W_prods_L[PRi_L] = self.W[PRi_L]
# now delete the column from self.W
self.W = self.W.drop(columns=PRi_L)
# reconstruct the series of bimolecular species
PRODS_L_BIMOL[PRi_L] = self.SPECIES_BIMOL_SERIES[PRi_L]
elif isinstance(PRi_L_value,np.ndarray):
# for each of the products: sum the columns
W_prods_L[PRi_L] = np.sum(self.W[PRi_L_value],axis=1)
# compute the weighted average branching within each product and save them to dataframe
Wtot = W_prods_L[PRi_L].values[1:, np.newaxis]
dtweight = ((self.t[1:]-self.t[:-1])/self.t[-1])
BR_PRi_L = self.W.loc[1:,PRi_L_value]/Wtot*dtweight
self.lumped_branching[PRi_L].loc[self.T,PRi_L_value] = np.sum(BR_PRi_L,axis=0)
# delete the corresponding columns
self.W = self.W.drop(columns=PRi_L_value)
# reconstruct the series of bimolecular species: take the corresponding species of the first lumped species
PRODS_L_BIMOL[PRi_L] = self.SPECIES_BIMOL_SERIES[PRi_L_value[0]]
# now you deleted from self.W all the product profiles. Put them back at the end by concatenating the dataframes
W_noprods = self.W # save it for later
self.W = pd.concat([self.W,W_prods_L],axis=1)
# new dataframe
timeseries = pd.DataFrame(self.t,columns=['t'])
tW_DF = | pd.concat([timeseries,self.W],axis=1) | pandas.concat |
import pandas as pd
df = | pd.read_csv("data/New_Play_by_Play_data_2.csv") | pandas.read_csv |
import unittest
import numpy as np
import pandas as pd
from numpy import testing as nptest
from operational_analysis.methods import plant_analysis
from examples.project_ENGIE import Project_Engie
class TestPandasPrufPlantAnalysis(unittest.TestCase):
def setUp(self):
np.random.seed(42)
# Set up data to use for testing (ENGIE example plant)
self.project = Project_Engie('./examples/data/la_haute_borne')
self.project.prepare()
# Test inputs to the regression model, at monthly time resolution
def test_plant_analysis(self):
# ____________________________________________________________________
# Test inputs to the regression model, at monthly time resolution
self.analysis = plant_analysis.MonteCarloAEP(self.project,
reanal_products=['merra2', 'era5'],
time_resolution = 'M',
reg_temperature = True,
reg_winddirection = True)
df = self.analysis._aggregate.df
# Check the pre-processing functions
self.check_process_revenue_meter_energy_monthly(df)
self.check_process_loss_estimates_monthly(df)
self.check_process_reanalysis_data_monthly(df)
# ____________________________________________________________________
# Test inputs to the regression model, at daily time resolution
self.analysis = plant_analysis.MonteCarloAEP(self.project,
reanal_products=['merra2', 'era5'],
time_resolution = 'D',
reg_temperature = True,
reg_winddirection = True)
df = self.analysis._aggregate.df
# Check the pre-processing functions
self.check_process_revenue_meter_energy_daily(df)
self.check_process_loss_estimates_daily(df)
self.check_process_reanalysis_data_daily(df)
# ____________________________________________________________________
# Test linear regression model, at monthly time resolution
self.analysis = plant_analysis.MonteCarloAEP(self.project,
reanal_products=['merra2', 'era5'],
time_resolution = 'M',
reg_model = 'lin',
reg_temperature = False,
reg_winddirection = False)
# Run Monte Carlo AEP analysis, confirm the results are consistent
self.analysis.run(num_sim=30)
sim_results = self.analysis.results
self.check_simulation_results_lin_monthly(sim_results)
# ____________________________________________________________________
# Test linear regression model, at daily time resolution
self.analysis = plant_analysis.MonteCarloAEP(self.project,
reanal_products=['merra2', 'era5'],
time_resolution = 'D',
reg_model = 'lin',
reg_temperature = False,
reg_winddirection = False)
# Run Monte Carlo AEP analysis, confirm the results are consistent
self.analysis.run(num_sim=30)
sim_results = self.analysis.results
self.check_simulation_results_lin_daily(sim_results)
# ____________________________________________________________________
# Test GAM regression model (can be used at daily time resolution only)
self.analysis = plant_analysis.MonteCarloAEP(self.project,
reanal_products=['merra2', 'era5'],
time_resolution = 'D',
reg_model = 'gam',
reg_temperature = False,
reg_winddirection = True)
# Run Monte Carlo AEP analysis, confirm the results are consistent
self.analysis.run(num_sim=10)
sim_results = self.analysis.results
self.check_simulation_results_gam_daily(sim_results)
# ____________________________________________________________________
# Test GBM regression model (can be used at daily time resolution only)
self.analysis = plant_analysis.MonteCarloAEP(self.project,
reanal_products=['merra2', 'era5'],
time_resolution = 'D',
reg_model = 'gbm',
reg_temperature = True,
reg_winddirection = True)
# Run Monte Carlo AEP analysis, confirm the results are consistent
self.analysis.run(num_sim=10)
sim_results = self.analysis.results
self.check_simulation_results_gbm_daily(sim_results)
# ____________________________________________________________________
# Test ETR regression model (can be used at daily time resolution only)
self.analysis = plant_analysis.MonteCarloAEP(self.project,
reanal_products=['merra2', 'era5'],
time_resolution = 'D',
reg_model = 'etr',
reg_temperature = False,
reg_winddirection = False)
# Run Monte Carlo AEP analysis, confirm the results are consistent
self.analysis.run(num_sim=10)
sim_results = self.analysis.results
self.check_simulation_results_etr_daily(sim_results)
def check_process_revenue_meter_energy_monthly(self, df):
# Energy Nan flags are all zero
nptest.assert_array_equal(df['energy_nan_perc'].values, np.repeat(0.0, df.shape[0]))
# Expected number of days per month are equal to number of actual days
nptest.assert_array_equal(df['num_days_expected'], df['num_days_actual'])
# Check a few energy values
expected_gwh = pd.Series([0.692400, 1.471730, 0.580035])
actual_gwh = df.loc[pd.to_datetime(['2014-06-01', '2014-12-01', '2015-10-01']), 'energy_gwh']
nptest.assert_array_almost_equal(expected_gwh, actual_gwh)
def check_process_loss_estimates_monthly(self, df):
# Availablity, curtailment nan fields both 0, NaN flag is all False
nptest.assert_array_equal(df['avail_nan_perc'].values, np.repeat(0.0, df.shape[0]))
nptest.assert_array_equal(df['curt_nan_perc'].values, np.repeat(0.0, df.shape[0]))
nptest.assert_array_equal(df['nan_flag'].values, np.repeat(False, df.shape[0]))
# Check a few reported availabilty and curtailment values
expected_avail_gwh = pd.Series([0.029417, 0.021005, 0.000444])
expected_curt_gwh = pd.Series([0.013250, 0.000000, 0.000000])
expected_avail_pct = pd.Series([0.040019, 0.014071, 0.000765])
expected_curt_pct = pd.Series([0.018026, 0.000000, 0.000000])
date_ind = pd.to_datetime(['2014-06-01', '2014-12-01', '2015-10-01'])
nptest.assert_array_almost_equal(expected_avail_gwh, df.loc[date_ind, 'availability_gwh'])
nptest.assert_array_almost_equal(expected_curt_gwh, df.loc[date_ind, 'curtailment_gwh'])
nptest.assert_array_almost_equal(expected_avail_pct, df.loc[date_ind, 'availability_pct'])
nptest.assert_array_almost_equal(expected_curt_pct, df.loc[date_ind, 'curtailment_pct'])
def check_process_reanalysis_data_monthly(self, df):
# Check a few wind speed values
expected_merra2 = pd.Series([5.43, 6.87, 5.03])
expected_era5 = pd.Series([5.21, 6.72, 5.24])
date_ind = pd.to_datetime(['2014-06-01', '2014-12-01', '2015-10-01'])
nptest.assert_array_almost_equal(expected_merra2, df.loc[date_ind, 'merra2'], decimal = 2)
nptest.assert_array_almost_equal(expected_era5, df.loc[date_ind, 'era5'], decimal = 2)
# Check a few wind direction values
expected_merra2_wd = pd.Series([11.7, 250.9, 123.7])
expected_era5_wd = pd.Series([23.4, 253.1, 121.3])
date_ind = pd.to_datetime(['2014-06-01', '2014-12-01', '2015-10-01'])
nptest.assert_array_almost_equal(expected_merra2_wd*2*np.pi/360, df.loc[date_ind, 'merra2_wd'], decimal = 1)
nptest.assert_array_almost_equal(expected_era5_wd*2*np.pi/360, df.loc[date_ind, 'era5_wd'], decimal = 1)
# Check a few temperature values
expected_merra2_temp = pd.Series([289.9, 275.3, 281.7])
expected_era5_temp = pd.Series([290.8, 276.6, 282.7])
date_ind = pd.to_datetime(['2014-06-01', '2014-12-01', '2015-10-01'])
nptest.assert_array_almost_equal(expected_merra2_temp, df.loc[date_ind, 'merra2_temperature_K'],decimal = 1)
nptest.assert_array_almost_equal(expected_era5_temp, df.loc[date_ind, 'era5_temperature_K'], decimal = 1)
def check_process_revenue_meter_energy_daily(self, df):
# Energy Nan flags are all zero
nptest.assert_array_equal(df['energy_nan_perc'].values, np.repeat(0.0, df.shape[0]))
# Check a few energy values
expected_gwh = pd.Series([0.0848525, 0.0253657, 0.0668642])
actual_gwh = df.loc[pd.to_datetime(['2014-01-02', '2014-10-12', '2015-12-28']), 'energy_gwh']
nptest.assert_array_almost_equal(expected_gwh, actual_gwh)
def check_process_loss_estimates_daily(self, df):
# Availablity, curtailment nan fields both 0, NaN flag is all False
nptest.assert_array_equal(df['avail_nan_perc'].values, np.repeat(0.0, df.shape[0]))
nptest.assert_array_equal(df['curt_nan_perc'].values, np.repeat(0.0, df.shape[0]))
nptest.assert_array_equal(df['nan_flag'].values, np.repeat(False, df.shape[0]))
# Check a few reported availabilty and curtailment values
expected_avail_gwh = pd.Series([0.0000483644, 0.000000, 0.000000])
expected_curt_gwh = pd.Series([0.000000, 0.00019581, 0.000000])
expected_avail_pct = pd.Series([0.000569658, 0.000000, 0.000000])
expected_curt_pct = pd.Series([0.000000, 0.00766034, 0.000000])
date_ind = pd.to_datetime(['2014-01-02', '2014-10-12', '2015-12-28'])
nptest.assert_array_almost_equal(expected_avail_gwh, df.loc[date_ind, 'availability_gwh'])
nptest.assert_array_almost_equal(expected_curt_gwh, df.loc[date_ind, 'curtailment_gwh'])
nptest.assert_array_almost_equal(expected_avail_pct, df.loc[date_ind, 'availability_pct'])
nptest.assert_array_almost_equal(expected_curt_pct, df.loc[date_ind, 'curtailment_pct'])
def check_process_reanalysis_data_daily(self, df):
# Check a few wind speed values
expected_merra2 = pd.Series([11.02, 7.04, 8.42])
expected_era5 = pd.Series([10.48, 7.71, 9.61])
date_ind = pd.to_datetime(['2014-01-02', '2014-10-12', '2015-12-28'])
nptest.assert_array_almost_equal(expected_merra2, df.loc[date_ind, 'merra2'],decimal = 2)
nptest.assert_array_almost_equal(expected_era5, df.loc[date_ind, 'era5'], decimal = 2)
# Check a few wind direction values
expected_merra2_wd = pd.Series([213.8, 129.1, 170.4])
expected_era5_wd = pd.Series([212.2, 127.8, 170.3])
date_ind = pd.to_datetime(['2014-01-02', '2014-10-12', '2015-12-28'])
nptest.assert_array_almost_equal(expected_merra2_wd*2*np.pi/360, df.loc[date_ind, 'merra2_wd'], decimal = 1)
nptest.assert_array_almost_equal(expected_era5_wd*2*np.pi/360, df.loc[date_ind, 'era5_wd'], decimal = 1)
# Check a few temperature values
expected_merra2_temp = pd.Series([279.7, 285.7, 278.2])
expected_era5_temp = | pd.Series([281.1, 285.8, 280.4]) | pandas.Series |
"""
Module for applying conditional formatting to DataFrames and Series.
"""
from collections import defaultdict
from contextlib import contextmanager
import copy
from functools import partial
from itertools import product
from typing import (
Any,
Callable,
DefaultDict,
Dict,
List,
Optional,
Sequence,
Tuple,
Union,
)
from uuid import uuid1
import numpy as np
from pandas._config import get_option
from pandas._libs import lib
from pandas._typing import Axis, FrameOrSeries, FrameOrSeriesUnion, Label
from pandas.compat._optional import import_optional_dependency
from pandas.util._decorators import Appender
from pandas.core.dtypes.common import is_float
import pandas as pd
from pandas.api.types import is_dict_like, is_list_like
import pandas.core.common as com
from pandas.core.frame import DataFrame
from pandas.core.generic import _shared_docs
from pandas.core.indexing import _maybe_numeric_slice, _non_reducing_slice
jinja2 = import_optional_dependency("jinja2", extra="DataFrame.style requires jinja2.")
try:
import matplotlib.pyplot as plt
from matplotlib import colors
has_mpl = True
except ImportError:
has_mpl = False
no_mpl_message = "{0} requires matplotlib."
@contextmanager
def _mpl(func: Callable):
if has_mpl:
yield plt, colors
else:
raise ImportError(no_mpl_message.format(func.__name__))
class Styler:
"""
Helps style a DataFrame or Series according to the data with HTML and CSS.
Parameters
----------
data : Series or DataFrame
Data to be styled - either a Series or DataFrame.
precision : int
Precision to round floats to, defaults to pd.options.display.precision.
table_styles : list-like, default None
List of {selector: (attr, value)} dicts; see Notes.
uuid : str, default None
A unique identifier to avoid CSS collisions; generated automatically.
caption : str, default None
Caption to attach to the table.
table_attributes : str, default None
Items that show up in the opening ``<table>`` tag
in addition to automatic (by default) id.
cell_ids : bool, default True
If True, each cell will have an ``id`` attribute in their HTML tag.
The ``id`` takes the form ``T_<uuid>_row<num_row>_col<num_col>``
where ``<uuid>`` is the unique identifier, ``<num_row>`` is the row
number and ``<num_col>`` is the column number.
na_rep : str, optional
Representation for missing values.
If ``na_rep`` is None, no special formatting is applied
.. versionadded:: 1.0.0
Attributes
----------
env : Jinja2 jinja2.Environment
template : Jinja2 Template
loader : Jinja2 Loader
See Also
--------
DataFrame.style : Return a Styler object containing methods for building
a styled HTML representation for the DataFrame.
Notes
-----
Most styling will be done by passing style functions into
``Styler.apply`` or ``Styler.applymap``. Style functions should
return values with strings containing CSS ``'attr: value'`` that will
be applied to the indicated cells.
If using in the Jupyter notebook, Styler has defined a ``_repr_html_``
to automatically render itself. Otherwise call Styler.render to get
the generated HTML.
CSS classes are attached to the generated HTML
* Index and Column names include ``index_name`` and ``level<k>``
where `k` is its level in a MultiIndex
* Index label cells include
* ``row_heading``
* ``row<n>`` where `n` is the numeric position of the row
* ``level<k>`` where `k` is the level in a MultiIndex
* Column label cells include
* ``col_heading``
* ``col<n>`` where `n` is the numeric position of the column
* ``evel<k>`` where `k` is the level in a MultiIndex
* Blank cells include ``blank``
* Data cells include ``data``
"""
loader = jinja2.PackageLoader("pandas", "io/formats/templates")
env = jinja2.Environment(loader=loader, trim_blocks=True)
template = env.get_template("html.tpl")
def __init__(
self,
data: FrameOrSeriesUnion,
precision: Optional[int] = None,
table_styles: Optional[List[Dict[str, List[Tuple[str, str]]]]] = None,
uuid: Optional[str] = None,
caption: Optional[str] = None,
table_attributes: Optional[str] = None,
cell_ids: bool = True,
na_rep: Optional[str] = None,
):
self.ctx: DefaultDict[Tuple[int, int], List[str]] = defaultdict(list)
self._todo: List[Tuple[Callable, Tuple, Dict]] = []
if not isinstance(data, (pd.Series, pd.DataFrame)):
raise TypeError("``data`` must be a Series or DataFrame")
if data.ndim == 1:
data = data.to_frame()
if not data.index.is_unique or not data.columns.is_unique:
raise ValueError("style is not supported for non-unique indices.")
self.data = data
self.index = data.index
self.columns = data.columns
self.uuid = uuid
self.table_styles = table_styles
self.caption = caption
if precision is None:
precision = get_option("display.precision")
self.precision = precision
self.table_attributes = table_attributes
self.hidden_index = False
self.hidden_columns: Sequence[int] = []
self.cell_ids = cell_ids
self.na_rep = na_rep
# display_funcs maps (row, col) -> formatting function
def default_display_func(x):
if self.na_rep is not None and pd.isna(x):
return self.na_rep
elif is_float(x):
display_format = f"{x:.{self.precision}f}"
return display_format
else:
return x
self._display_funcs: DefaultDict[
Tuple[int, int], Callable[[Any], str]
] = defaultdict(lambda: default_display_func)
def _repr_html_(self) -> str:
"""
Hooks into Jupyter notebook rich display system.
"""
return self.render()
@Appender(
_shared_docs["to_excel"]
% dict(
axes="index, columns",
klass="Styler",
axes_single_arg="{0 or 'index', 1 or 'columns'}",
optional_by="""
by : str or list of str
Name or list of names which refer to the axis items.""",
versionadded_to_excel="\n .. versionadded:: 0.20",
)
)
def to_excel(
self,
excel_writer,
sheet_name: str = "Sheet1",
na_rep: str = "",
float_format: Optional[str] = None,
columns: Optional[Sequence[Label]] = None,
header: Union[Sequence[Label], bool] = True,
index: bool = True,
index_label: Optional[Union[Label, Sequence[Label]]] = None,
startrow: int = 0,
startcol: int = 0,
engine: Optional[str] = None,
merge_cells: bool = True,
encoding: Optional[str] = None,
inf_rep: str = "inf",
verbose: bool = True,
freeze_panes: Optional[Tuple[int, int]] = None,
) -> None:
from pandas.io.formats.excel import ExcelFormatter
formatter = ExcelFormatter(
self,
na_rep=na_rep,
cols=columns,
header=header,
float_format=float_format,
index=index,
index_label=index_label,
merge_cells=merge_cells,
inf_rep=inf_rep,
)
formatter.write(
excel_writer,
sheet_name=sheet_name,
startrow=startrow,
startcol=startcol,
freeze_panes=freeze_panes,
engine=engine,
)
def _translate(self):
"""
Convert the DataFrame in `self.data` and the attrs from `_build_styles`
into a dictionary of {head, body, uuid, cellstyle}.
"""
table_styles = self.table_styles or []
caption = self.caption
ctx = self.ctx
precision = self.precision
hidden_index = self.hidden_index
hidden_columns = self.hidden_columns
uuid = self.uuid or str(uuid1()).replace("-", "_")
ROW_HEADING_CLASS = "row_heading"
COL_HEADING_CLASS = "col_heading"
INDEX_NAME_CLASS = "index_name"
DATA_CLASS = "data"
BLANK_CLASS = "blank"
BLANK_VALUE = ""
def format_attr(pair):
return f"{pair['key']}={pair['value']}"
# for sparsifying a MultiIndex
idx_lengths = _get_level_lengths(self.index)
col_lengths = _get_level_lengths(self.columns, hidden_columns)
cell_context = dict()
n_rlvls = self.data.index.nlevels
n_clvls = self.data.columns.nlevels
rlabels = self.data.index.tolist()
clabels = self.data.columns.tolist()
if n_rlvls == 1:
rlabels = [[x] for x in rlabels]
if n_clvls == 1:
clabels = [[x] for x in clabels]
clabels = list(zip(*clabels))
cellstyle_map = defaultdict(list)
head = []
for r in range(n_clvls):
# Blank for Index columns...
row_es = [
{
"type": "th",
"value": BLANK_VALUE,
"display_value": BLANK_VALUE,
"is_visible": not hidden_index,
"class": " ".join([BLANK_CLASS]),
}
] * (n_rlvls - 1)
# ... except maybe the last for columns.names
name = self.data.columns.names[r]
cs = [
BLANK_CLASS if name is None else INDEX_NAME_CLASS,
f"level{r}",
]
name = BLANK_VALUE if name is None else name
row_es.append(
{
"type": "th",
"value": name,
"display_value": name,
"class": " ".join(cs),
"is_visible": not hidden_index,
}
)
if clabels:
for c, value in enumerate(clabels[r]):
cs = [
COL_HEADING_CLASS,
f"level{r}",
f"col{c}",
]
cs.extend(
cell_context.get("col_headings", {}).get(r, {}).get(c, [])
)
es = {
"type": "th",
"value": value,
"display_value": value,
"class": " ".join(cs),
"is_visible": _is_visible(c, r, col_lengths),
}
colspan = col_lengths.get((r, c), 0)
if colspan > 1:
es["attributes"] = [
format_attr({"key": "colspan", "value": colspan})
]
row_es.append(es)
head.append(row_es)
if (
self.data.index.names
and com.any_not_none(*self.data.index.names)
and not hidden_index
):
index_header_row = []
for c, name in enumerate(self.data.index.names):
cs = [INDEX_NAME_CLASS, f"level{c}"]
name = "" if name is None else name
index_header_row.append(
{"type": "th", "value": name, "class": " ".join(cs)}
)
index_header_row.extend(
[{"type": "th", "value": BLANK_VALUE, "class": " ".join([BLANK_CLASS])}]
* (len(clabels[0]) - len(hidden_columns))
)
head.append(index_header_row)
body = []
for r, idx in enumerate(self.data.index):
row_es = []
for c, value in enumerate(rlabels[r]):
rid = [
ROW_HEADING_CLASS,
f"level{c}",
f"row{r}",
]
es = {
"type": "th",
"is_visible": (_is_visible(r, c, idx_lengths) and not hidden_index),
"value": value,
"display_value": value,
"id": "_".join(rid[1:]),
"class": " ".join(rid),
}
rowspan = idx_lengths.get((c, r), 0)
if rowspan > 1:
es["attributes"] = [
format_attr({"key": "rowspan", "value": rowspan})
]
row_es.append(es)
for c, col in enumerate(self.data.columns):
cs = [DATA_CLASS, f"row{r}", f"col{c}"]
cs.extend(cell_context.get("data", {}).get(r, {}).get(c, []))
formatter = self._display_funcs[(r, c)]
value = self.data.iloc[r, c]
row_dict = {
"type": "td",
"value": value,
"class": " ".join(cs),
"display_value": formatter(value),
"is_visible": (c not in hidden_columns),
}
# only add an id if the cell has a style
if self.cell_ids or not (len(ctx[r, c]) == 1 and ctx[r, c][0] == ""):
row_dict["id"] = "_".join(cs[1:])
row_es.append(row_dict)
props = []
for x in ctx[r, c]:
# have to handle empty styles like ['']
if x.count(":"):
props.append(tuple(x.split(":")))
else:
props.append(("", ""))
cellstyle_map[tuple(props)].append(f"row{r}_col{c}")
body.append(row_es)
cellstyle = [
{"props": list(props), "selectors": selectors}
for props, selectors in cellstyle_map.items()
]
table_attr = self.table_attributes
use_mathjax = get_option("display.html.use_mathjax")
if not use_mathjax:
table_attr = table_attr or ""
if 'class="' in table_attr:
table_attr = table_attr.replace('class="', 'class="tex2jax_ignore ')
else:
table_attr += ' class="tex2jax_ignore"'
return dict(
head=head,
cellstyle=cellstyle,
body=body,
uuid=uuid,
precision=precision,
table_styles=table_styles,
caption=caption,
table_attributes=table_attr,
)
def format(self, formatter, subset=None, na_rep: Optional[str] = None) -> "Styler":
"""
Format the text display value of cells.
Parameters
----------
formatter : str, callable, dict or None
If ``formatter`` is None, the default formatter is used
subset : IndexSlice
An argument to ``DataFrame.loc`` that restricts which elements
``formatter`` is applied to.
na_rep : str, optional
Representation for missing values.
If ``na_rep`` is None, no special formatting is applied
.. versionadded:: 1.0.0
Returns
-------
self : Styler
Notes
-----
``formatter`` is either an ``a`` or a dict ``{column name: a}`` where
``a`` is one of
- str: this will be wrapped in: ``a.format(x)``
- callable: called with the value of an individual cell
The default display value for numeric values is the "general" (``g``)
format with ``pd.options.display.precision`` precision.
Examples
--------
>>> df = pd.DataFrame(np.random.randn(4, 2), columns=['a', 'b'])
>>> df.style.format("{:.2%}")
>>> df['c'] = ['a', 'b', 'c', 'd']
>>> df.style.format({'c': str.upper})
"""
if formatter is None:
assert self._display_funcs.default_factory is not None
formatter = self._display_funcs.default_factory()
if subset is None:
row_locs = range(len(self.data))
col_locs = range(len(self.data.columns))
else:
subset = _non_reducing_slice(subset)
if len(subset) == 1:
subset = subset, self.data.columns
sub_df = self.data.loc[subset]
row_locs = self.data.index.get_indexer_for(sub_df.index)
col_locs = self.data.columns.get_indexer_for(sub_df.columns)
if is_dict_like(formatter):
for col, col_formatter in formatter.items():
# formatter must be callable, so '{}' are converted to lambdas
col_formatter = _maybe_wrap_formatter(col_formatter, na_rep)
col_num = self.data.columns.get_indexer_for([col])[0]
for row_num in row_locs:
self._display_funcs[(row_num, col_num)] = col_formatter
else:
# single scalar to format all cells with
formatter = _maybe_wrap_formatter(formatter, na_rep)
locs = product(*(row_locs, col_locs))
for i, j in locs:
self._display_funcs[(i, j)] = formatter
return self
def render(self, **kwargs) -> str:
"""
Render the built up styles to HTML.
Parameters
----------
**kwargs
Any additional keyword arguments are passed
through to ``self.template.render``.
This is useful when you need to provide
additional variables for a custom template.
Returns
-------
rendered : str
The rendered HTML.
Notes
-----
``Styler`` objects have defined the ``_repr_html_`` method
which automatically calls ``self.render()`` when it's the
last item in a Notebook cell. When calling ``Styler.render()``
directly, wrap the result in ``IPython.display.HTML`` to view
the rendered HTML in the notebook.
Pandas uses the following keys in render. Arguments passed
in ``**kwargs`` take precedence, so think carefully if you want
to override them:
* head
* cellstyle
* body
* uuid
* precision
* table_styles
* caption
* table_attributes
"""
self._compute()
# TODO: namespace all the pandas keys
d = self._translate()
# filter out empty styles, every cell will have a class
# but the list of props may just be [['', '']].
# so we have the neested anys below
trimmed = [x for x in d["cellstyle"] if any(any(y) for y in x["props"])]
d["cellstyle"] = trimmed
d.update(kwargs)
return self.template.render(**d)
def _update_ctx(self, attrs: DataFrame) -> None:
"""
Update the state of the Styler.
Collects a mapping of {index_label: ['<property>: <value>']}.
Parameters
----------
attrs : DataFrame
should contain strings of '<property>: <value>;<prop2>: <val2>'
Whitespace shouldn't matter and the final trailing ';' shouldn't
matter.
"""
for row_label, v in attrs.iterrows():
for col_label, col in v.items():
i = self.index.get_indexer([row_label])[0]
j = self.columns.get_indexer([col_label])[0]
for pair in col.rstrip(";").split(";"):
self.ctx[(i, j)].append(pair)
def _copy(self, deepcopy: bool = False) -> "Styler":
styler = Styler(
self.data,
precision=self.precision,
caption=self.caption,
uuid=self.uuid,
table_styles=self.table_styles,
na_rep=self.na_rep,
)
if deepcopy:
styler.ctx = copy.deepcopy(self.ctx)
styler._todo = copy.deepcopy(self._todo)
else:
styler.ctx = self.ctx
styler._todo = self._todo
return styler
def __copy__(self) -> "Styler":
"""
Deep copy by default.
"""
return self._copy(deepcopy=False)
def __deepcopy__(self, memo) -> "Styler":
return self._copy(deepcopy=True)
def clear(self) -> None:
"""
Reset the styler, removing any previously applied styles.
Returns None.
"""
self.ctx.clear()
self._todo = []
def _compute(self):
"""
Execute the style functions built up in `self._todo`.
Relies on the conventions that all style functions go through
.apply or .applymap. The append styles to apply as tuples of
(application method, *args, **kwargs)
"""
r = self
for func, args, kwargs in self._todo:
r = func(self)(*args, **kwargs)
return r
def _apply(
self,
func: Callable[..., "Styler"],
axis: Optional[Axis] = 0,
subset=None,
**kwargs,
) -> "Styler":
subset = slice(None) if subset is None else subset
subset = _non_reducing_slice(subset)
data = self.data.loc[subset]
if axis is not None:
result = data.apply(func, axis=axis, result_type="expand", **kwargs)
result.columns = data.columns
else:
result = func(data, **kwargs)
if not isinstance(result, pd.DataFrame):
raise TypeError(
f"Function {repr(func)} must return a DataFrame when "
f"passed to `Styler.apply` with axis=None"
)
if not (
result.index.equals(data.index) and result.columns.equals(data.columns)
):
raise ValueError(
f"Result of {repr(func)} must have identical "
f"index and columns as the input"
)
result_shape = result.shape
expected_shape = self.data.loc[subset].shape
if result_shape != expected_shape:
raise ValueError(
f"Function {repr(func)} returned the wrong shape.\n"
f"Result has shape: {result.shape}\n"
f"Expected shape: {expected_shape}"
)
self._update_ctx(result)
return self
def apply(
self,
func: Callable[..., "Styler"],
axis: Optional[Axis] = 0,
subset=None,
**kwargs,
) -> "Styler":
"""
Apply a function column-wise, row-wise, or table-wise.
Updates the HTML representation with the result.
Parameters
----------
func : function
``func`` should take a Series or DataFrame (depending
on ``axis``), and return an object with the same shape.
Must return a DataFrame with identical index and
column labels when ``axis=None``.
axis : {0 or 'index', 1 or 'columns', None}, default 0
Apply to each column (``axis=0`` or ``'index'``), to each row
(``axis=1`` or ``'columns'``), or to the entire DataFrame at once
with ``axis=None``.
subset : IndexSlice
A valid indexer to limit ``data`` to *before* applying the
function. Consider using a pandas.IndexSlice.
**kwargs : dict
Pass along to ``func``.
Returns
-------
self : Styler
Notes
-----
The output shape of ``func`` should match the input, i.e. if
``x`` is the input row, column, or table (depending on ``axis``),
then ``func(x).shape == x.shape`` should be true.
This is similar to ``DataFrame.apply``, except that ``axis=None``
applies the function to the entire DataFrame at once,
rather than column-wise or row-wise.
Examples
--------
>>> def highlight_max(x):
... return ['background-color: yellow' if v == x.max() else ''
for v in x]
...
>>> df = pd.DataFrame(np.random.randn(5, 2))
>>> df.style.apply(highlight_max)
"""
self._todo.append(
(lambda instance: getattr(instance, "_apply"), (func, axis, subset), kwargs)
)
return self
def _applymap(self, func: Callable, subset=None, **kwargs) -> "Styler":
func = partial(func, **kwargs) # applymap doesn't take kwargs?
if subset is None:
subset = pd.IndexSlice[:]
subset = _non_reducing_slice(subset)
result = self.data.loc[subset].applymap(func)
self._update_ctx(result)
return self
def applymap(self, func: Callable, subset=None, **kwargs) -> "Styler":
"""
Apply a function elementwise.
Updates the HTML representation with the result.
Parameters
----------
func : function
``func`` should take a scalar and return a scalar.
subset : IndexSlice
A valid indexer to limit ``data`` to *before* applying the
function. Consider using a pandas.IndexSlice.
**kwargs : dict
Pass along to ``func``.
Returns
-------
self : Styler
See Also
--------
Styler.where
"""
self._todo.append(
(lambda instance: getattr(instance, "_applymap"), (func, subset), kwargs)
)
return self
def where(
self,
cond: Callable,
value: str,
other: Optional[str] = None,
subset=None,
**kwargs,
) -> "Styler":
"""
Apply a function elementwise.
Updates the HTML representation with a style which is
selected in accordance with the return value of a function.
.. versionadded:: 0.21.0
Parameters
----------
cond : callable
``cond`` should take a scalar and return a boolean.
value : str
Applied when ``cond`` returns true.
other : str
Applied when ``cond`` returns false.
subset : IndexSlice
A valid indexer to limit ``data`` to *before* applying the
function. Consider using a pandas.IndexSlice.
**kwargs : dict
Pass along to ``cond``.
Returns
-------
self : Styler
See Also
--------
Styler.applymap
"""
if other is None:
other = ""
return self.applymap(
lambda val: value if cond(val) else other, subset=subset, **kwargs
)
def set_precision(self, precision: int) -> "Styler":
"""
Set the precision used to render.
Parameters
----------
precision : int
Returns
-------
self : Styler
"""
self.precision = precision
return self
def set_table_attributes(self, attributes: str) -> "Styler":
"""
Set the table attributes.
These are the items that show up in the opening ``<table>`` tag
in addition to to automatic (by default) id.
Parameters
----------
attributes : str
Returns
-------
self : Styler
Examples
--------
>>> df = pd.DataFrame(np.random.randn(10, 4))
>>> df.style.set_table_attributes('class="pure-table"')
# ... <table class="pure-table"> ...
"""
self.table_attributes = attributes
return self
def export(self) -> List[Tuple[Callable, Tuple, Dict]]:
"""
Export the styles to applied to the current Styler.
Can be applied to a second style with ``Styler.use``.
Returns
-------
styles : list
See Also
--------
Styler.use
"""
return self._todo
def use(self, styles: List[Tuple[Callable, Tuple, Dict]]) -> "Styler":
"""
Set the styles on the current Styler.
Possibly uses styles from ``Styler.export``.
Parameters
----------
styles : list
List of style functions.
Returns
-------
self : Styler
See Also
--------
Styler.export
"""
self._todo.extend(styles)
return self
def set_uuid(self, uuid: str) -> "Styler":
"""
Set the uuid for a Styler.
Parameters
----------
uuid : str
Returns
-------
self : Styler
"""
self.uuid = uuid
return self
def set_caption(self, caption: str) -> "Styler":
"""
Set the caption on a Styler.
Parameters
----------
caption : str
Returns
-------
self : Styler
"""
self.caption = caption
return self
def set_table_styles(self, table_styles) -> "Styler":
"""
Set the table styles on a Styler.
These are placed in a ``<style>`` tag before the generated HTML table.
Parameters
----------
table_styles : list
Each individual table_style should be a dictionary with
``selector`` and ``props`` keys. ``selector`` should be a CSS
selector that the style will be applied to (automatically
prefixed by the table's UUID) and ``props`` should be a list of
tuples with ``(attribute, value)``.
Returns
-------
self : Styler
Examples
--------
>>> df = pd.DataFrame(np.random.randn(10, 4))
>>> df.style.set_table_styles(
... [{'selector': 'tr:hover',
... 'props': [('background-color', 'yellow')]}]
... )
"""
self.table_styles = table_styles
return self
def set_na_rep(self, na_rep: str) -> "Styler":
"""
Set the missing data representation on a Styler.
.. versionadded:: 1.0.0
Parameters
----------
na_rep : str
Returns
-------
self : Styler
"""
self.na_rep = na_rep
return self
def hide_index(self) -> "Styler":
"""
Hide any indices from rendering.
.. versionadded:: 0.23.0
Returns
-------
self : Styler
"""
self.hidden_index = True
return self
def hide_columns(self, subset) -> "Styler":
"""
Hide columns from rendering.
.. versionadded:: 0.23.0
Parameters
----------
subset : IndexSlice
An argument to ``DataFrame.loc`` that identifies which columns
are hidden.
Returns
-------
self : Styler
"""
subset = _non_reducing_slice(subset)
hidden_df = self.data.loc[subset]
self.hidden_columns = self.columns.get_indexer_for(hidden_df.columns)
return self
# -----------------------------------------------------------------------
# A collection of "builtin" styles
# -----------------------------------------------------------------------
@staticmethod
def _highlight_null(v, null_color: str) -> str:
return f"background-color: {null_color}" if pd.isna(v) else ""
def highlight_null(self, null_color: str = "red") -> "Styler":
"""
Shade the background ``null_color`` for missing values.
Parameters
----------
null_color : str
Returns
-------
self : Styler
"""
self.applymap(self._highlight_null, null_color=null_color)
return self
def background_gradient(
self,
cmap="PuBu",
low: float = 0,
high: float = 0,
axis: Optional[Axis] = 0,
subset=None,
text_color_threshold: float = 0.408,
vmin: Optional[float] = None,
vmax: Optional[float] = None,
) -> "Styler":
"""
Color the background in a gradient style.
The background color is determined according
to the data in each column (optionally row). Requires matplotlib.
Parameters
----------
cmap : str or colormap
Matplotlib colormap.
low : float
Compress the range by the low.
high : float
Compress the range by the high.
axis : {0 or 'index', 1 or 'columns', None}, default 0
Apply to each column (``axis=0`` or ``'index'``), to each row
(``axis=1`` or ``'columns'``), or to the entire DataFrame at once
with ``axis=None``.
subset : IndexSlice
A valid slice for ``data`` to limit the style application to.
text_color_threshold : float or int
Luminance threshold for determining text color. Facilitates text
visibility across varying background colors. From 0 to 1.
0 = all text is dark colored, 1 = all text is light colored.
.. versionadded:: 0.24.0
vmin : float, optional
Minimum data value that corresponds to colormap minimum value.
When None (default): the minimum value of the data will be used.
.. versionadded:: 1.0.0
vmax : float, optional
Maximum data value that corresponds to colormap maximum value.
When None (default): the maximum value of the data will be used.
.. versionadded:: 1.0.0
Returns
-------
self : Styler
Raises
------
ValueError
If ``text_color_threshold`` is not a value from 0 to 1.
Notes
-----
Set ``text_color_threshold`` or tune ``low`` and ``high`` to keep the
text legible by not using the entire range of the color map. The range
of the data is extended by ``low * (x.max() - x.min())`` and ``high *
(x.max() - x.min())`` before normalizing.
"""
subset = _maybe_numeric_slice(self.data, subset)
subset = _non_reducing_slice(subset)
self.apply(
self._background_gradient,
cmap=cmap,
subset=subset,
axis=axis,
low=low,
high=high,
text_color_threshold=text_color_threshold,
vmin=vmin,
vmax=vmax,
)
return self
@staticmethod
def _background_gradient(
s,
cmap="PuBu",
low: float = 0,
high: float = 0,
text_color_threshold: float = 0.408,
vmin: Optional[float] = None,
vmax: Optional[float] = None,
):
"""
Color background in a range according to the data.
"""
if (
not isinstance(text_color_threshold, (float, int))
or not 0 <= text_color_threshold <= 1
):
msg = "`text_color_threshold` must be a value from 0 to 1."
raise ValueError(msg)
with _mpl(Styler.background_gradient) as (plt, colors):
smin = np.nanmin(s.to_numpy()) if vmin is None else vmin
smax = np.nanmax(s.to_numpy()) if vmax is None else vmax
rng = smax - smin
# extend lower / upper bounds, compresses color range
norm = colors.Normalize(smin - (rng * low), smax + (rng * high))
# matplotlib colors.Normalize modifies inplace?
# https://github.com/matplotlib/matplotlib/issues/5427
rgbas = plt.cm.get_cmap(cmap)(norm(s.to_numpy(dtype=float)))
def relative_luminance(rgba) -> float:
"""
Calculate relative luminance of a color.
The calculation adheres to the W3C standards
(https://www.w3.org/WAI/GL/wiki/Relative_luminance)
Parameters
----------
color : rgb or rgba tuple
Returns
-------
float
The relative luminance as a value from 0 to 1
"""
r, g, b = (
x / 12.92 if x <= 0.03928 else ((x + 0.055) / 1.055 ** 2.4)
for x in rgba[:3]
)
return 0.2126 * r + 0.7152 * g + 0.0722 * b
def css(rgba) -> str:
dark = relative_luminance(rgba) < text_color_threshold
text_color = "#f1f1f1" if dark else "#000000"
return f"background-color: {colors.rgb2hex(rgba)};color: {text_color};"
if s.ndim == 1:
return [css(rgba) for rgba in rgbas]
else:
return pd.DataFrame(
[[css(rgba) for rgba in row] for row in rgbas],
index=s.index,
columns=s.columns,
)
def set_properties(self, subset=None, **kwargs) -> "Styler":
"""
Method to set one or more non-data dependent properties or each cell.
Parameters
----------
subset : IndexSlice
A valid slice for ``data`` to limit the style application to.
**kwargs : dict
A dictionary of property, value pairs to be set for each cell.
Returns
-------
self : Styler
Examples
--------
>>> df = pd.DataFrame(np.random.randn(10, 4))
>>> df.style.set_properties(color="white", align="right")
>>> df.style.set_properties(**{'background-color': 'yellow'})
"""
values = ";".join(f"{p}: {v}" for p, v in kwargs.items())
f = lambda x: values
return self.applymap(f, subset=subset)
@staticmethod
def _bar(
s,
align: str,
colors: List[str],
width: float = 100,
vmin: Optional[float] = None,
vmax: Optional[float] = None,
):
"""
Draw bar chart in dataframe cells.
"""
# Get input value range.
smin = np.nanmin(s.to_numpy()) if vmin is None else vmin
smax = np.nanmax(s.to_numpy()) if vmax is None else vmax
if align == "mid":
smin = min(0, smin)
smax = max(0, smax)
elif align == "zero":
# For "zero" mode, we want the range to be symmetrical around zero.
smax = max(abs(smin), abs(smax))
smin = -smax
# Transform to percent-range of linear-gradient
normed = width * (s.to_numpy(dtype=float) - smin) / (smax - smin + 1e-12)
zero = -width * smin / (smax - smin + 1e-12)
def css_bar(start: float, end: float, color: str) -> str:
"""
Generate CSS code to draw a bar from start to end.
"""
css = "width: 10em; height: 80%;"
if end > start:
css += "background: linear-gradient(90deg,"
if start > 0:
css += f" transparent {start:.1f}%, {color} {start:.1f}%, "
e = min(end, width)
css += f"{color} {e:.1f}%, transparent {e:.1f}%)"
return css
def css(x):
if | pd.isna(x) | pandas.isna |
import os
import sys
import numpy as np
import pandas as pd
import boto3
## ENVIRONMENT VARIABLES
BUCKET_NAME = os.getenv("AWS_BUCKET")
# Folder for storing all data for this Batch job
JOB_NAME = os.getenv("JOB_NAME")
## Helper functions
def check_env_var(a_var, a_var_name):
""" Check that an expected environment variable is actually present.
:param a_var: Variable to be checked
:param a_var_name: Name of environment variable that should be present
:return: None; exit program if variable is not present
"""
if a_var is None:
print(f"Environment variable {a_var_name} is not present!")
sys.exit(2)
# endif #
# enddef check_env_var() #
def upload_file(file_name, bucket, object_name=None):
"""Upload a file to an S3 bucket
:param file_name: File to upload
:param bucket: Bucket to upload to
:param object_name: S3 object name. If not specified then file_name is used
:return: True if file was uploaded, else False
"""
# If S3 object_name was not specified, use file_name
if object_name is None:
object_name = os.path.basename(file_name)
# endif #
# Upload the file
s3_client = boto3.client('s3')
try:
response = s3_client.upload_file(file_name, bucket, object_name)
except ClientError as e:
logging.error(e)
return False
# endtry #
return True
# enddef upload_file() #
def get_input_csv(bucket_name, file_name):
""" Download and read CSV file from an S3 bucket
:param bucket_name: Bucket in which CSV file is located
:param file_name: key name of the CSV file to read
:return: DataFrame constructed from CSV file
"""
s3 = boto3.client('s3')
response = s3.get_object(Bucket=bucket_name, Key=file_name)
status = response.get("ResponseMetadata", {}).get("HTTPStatusCode")
if status == 200:
print(f"Retrieved file {file_name} from bucket {bucket_name}")
return pd.read_csv(response.get("Body"), index_col=0)
else:
print(f"Error in retrieving file {file_name} from bucket {bucket_name}; {status}")
sys.exit(1)
# endif #
# enddef get_input_csv() #
def list_csv_files(bucket_name, folder_name=""):
""" List all CSV files in a given folder in S3 bucket
:param bucket_name: Bucket in which CSV files is located
:param folder_name: Folder in bucket in which CSV files reside
:return: List of names of CSV files in given bucket
"""
files = []
s3 = boto3.client('s3')
for f in s3.list_objects_v2(Bucket=bucket_name, Prefix=folder_name)["Contents"]:
# only get CSV files
if f["Key"].split('.')[-1] == 'csv':
files.append(f["Key"])
# endif
# endfor #
return files
# enddef list_csv_files() #
# check all required environment variables are defined
check_env_var(BUCKET_NAME, "AWS_BUCKET")
check_env_var(JOB_NAME, "JOB_NAME")
# gather results of all Monte Carlo runs
output_folder = JOB_NAME+"/output/"
mc_results_files = list_csv_files(BUCKET_NAME, output_folder)
mc_results = []
for f in mc_results_files:
mc_results.append(get_input_csv(BUCKET_NAME, f))
# endfor #
# combine final price predictions from all MC simulations in one dataframe
x = | pd.concat(mc_results, ignore_index=True) | pandas.concat |
import gzip
import io
import logging as logg
import re
import time
import numpy as np
import pandas as pd
from anndata import AnnData
from pandas import DataFrame
from typing import Optional, Union, Sequence
from ..obtain_dataset import flyatlas2
def symbol2fbgn(gene: Union[str, list] = None, datapath: Optional[str] = None):
FBgn_datasets = pd.read_csv(datapath, sep="\t")
if isinstance(gene, str):
FBgn_data = FBgn_datasets[FBgn_datasets["Symbol"] == gene]
if len(FBgn_data.index) == 0:
FBgn_data = FBgn_datasets[
FBgn_datasets["Symbol_synonym(s)"].map(
lambda s: gene in str(s).strip().split("|")
)
]
return FBgn_data["FBgn"].iloc[0]
elif isinstance(gene, list):
fbgn_list = []
for onegene in gene:
FBgn_data = FBgn_datasets[FBgn_datasets["Symbol"] == onegene]
if len(FBgn_data.index) == 0:
FBgn_data = FBgn_datasets[
FBgn_datasets["Symbol_synonym(s)"].map(
lambda s: onegene in str(s).strip().split("|")
)
]
fbgn_list.append(FBgn_data["FBgn"].iloc[0])
return fbgn_list
def get_genesummary(
data: DataFrame,
geneby: Optional[str] = None,
gene_nametype: Optional[str] = "symbol",
datapath: Optional[str] = None,
):
"""Get gene's summary from FlyBase."""
avail_gene_nametypes = {"symbol", "FBgn"}
if gene_nametype not in avail_gene_nametypes:
raise ValueError(f"Type of gene name must be one of {avail_gene_nametypes}.")
gene_names = data[geneby].tolist()
if gene_nametype is "symbol":
gene_names = [symbol2fbgn(gene_name) for gene_name in gene_names]
with gzip.open(datapath, "rb") as input_file:
with io.TextIOWrapper(input_file, encoding="utf-8") as dec:
dec_list = [
str(i).strip().split("\t")
for i in dec.readlines()
if i.startswith("#") is False
]
summaries = | pd.DataFrame(dec_list, columns=["FlyBase ID", "gene_summary"]) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
This module contains methods to parse, merge, annotate and filter variant calls.
"""
import pandas as pd
import numpy as np
import re
import os
# for overlap with targest bed
import collections
import interlap
# for VCF processing
import pysam
import logging
log = logging.getLogger(__name__)
log.setLevel(logging.DEBUG)
sh = logging.StreamHandler()
sh.setFormatter(logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s'))
log.addHandler(sh)
# functions for reading input files
from .reader import read_targets, read_sample_info
# datatypes for variant summary tables
variants_dtypes = {
'Chr': str,
'Ref': str,
'Alt': str,
'Start': int,
'End': int,
'GeneDetail.refGene': str,
'Func.refGene': str,
'ExonicFunc.refGene': str,
'genomicSuperDups': str,
'SIFT_pred': str,
'Polyphen2_HDIV_pred': str,
'LRT_pred': str,
'MutationTaster_pred': str,
'MutationAssessor_pred': str,
'FATHMM_pred': str,
'GERP++_RS': float,
'phyloP100way_vertebrate': float,
}
variants_na_values = ['', 'NA', '.']
def int_if_not_missing(x: str) -> int:
return int(x) if x != '.' else 0
def load_gene_exons(file: str, genes: list) -> dict:
"""Load list of exon chr, strand, start and end locations for each gene in genes."""
genes = genes.unique()
# load gene exon information from flat table
print('Loading exon table from ', file)
gene_exon_df = pd.read_csv(file, header=None, sep='\t')
gene_exon_df.rename(columns={2: 'chr', 3: 'strand', 8: 'n_exons', 9: 'exon_starts', 10: 'exon_ends', 12: 'gene'}, inplace=True)
print('Loaded ', len(gene_exon_df), ' rows.')
# filter down to only the genes we actually need
gene_exon_df = gene_exon_df[ gene_exon_df['gene'].isin(genes) ]
print('Filtered to ', len(gene_exon_df), ' rows for ', len(genes), ' genes.')
# identify standard chrs
regex_standard_chr = re.compile(r'^(chr)?([0-9MXY]+)$')
# make dict of genes to exon locations
gene_exons = {}
for gex in gene_exon_df.itertuples():
try:
# skip chrY FIXME: do we want this
if gex.chr == 'chrY':
continue
exon_data = {
'chr': gex.chr,
'strand': gex.strand,
'starts': [int(x) for x in gex.exon_starts.split(',') if len(x) > 0],
'ends': [int(x) for x in gex.exon_ends.split(',') if len(x) > 0],
}
# do we already have exons for this gene?
if gex.gene in gene_exons:
# is this on the same chr (and not an Alt chr)?
if gene_exons[gex.gene]['chr'] == exon_data['chr']:
# if we are on the same strand let's merge them
if gene_exons[gex.gene]['strand'] == exon_data['strand']:
n_total = 0
n_added = 0
for start, end in zip(exon_data['starts'], exon_data['ends']):
n_total += 1
# try to find existing pair that matches
for existing_start, existing_end in zip(gene_exons[gex.gene]['starts'], gene_exons[gex.gene]['ends']):
if existing_start == start and existing_end == end:
break
# otherwise add it
else:
n_added += 1
gene_exons[gex.gene]['starts'].append(start)
gene_exons[gex.gene]['ends'].append(end)
# log.debug('Merging exon data from duplicate rows for gene %s - added %d/%d', gex.gene, n_added, n_total)
else:
# print(gex.gene)
# print(gene_exons[gex.gene])
# print(exon_data)
raise Exception('Exon strand mismatch for {}'.format(gex.gene))
# if the existing copy is on a standard chr we want to keep it
elif regex_standard_chr.match(gene_exons[gex.gene]['chr']):
# normally this should only happen if we are on an alt chr now
if not regex_standard_chr.match(gex.chr):
# log.debug('Skipping copy of gene on nonstandard chr for gene exon table: {} @ {}/{}'.format(
# gex.gene, gex.chr, gene_exons[gex.gene]['chr']
# ))
continue
else:
raise Exception('Two copies of gene on different standard chrs encounted for gene exon table: {} @ {}/{}'.format(
gex.gene, gex.chr, gene_exons[gex.gene]['chr']
))
# if the existing copy was not on a standard chr we will just overwrite it with this copy
else:
pass
# log.debug('Overwriting copy of gene on nonstandard chr for gene exon table: {} @ {}/{}'.format(
# gex.gene, gex.chr, gene_exons[gex.gene]['chr']
# ))
# error checking
assert len(exon_data['starts']) == len(exon_data['ends'])
assert len(exon_data['starts']) == gex.n_exons
for start, end in zip(exon_data['starts'], exon_data['ends']):
assert end > start, 'Exon end is not after start for {}'.format(gex.gene)
# finally save the exon data
gene_exons[gex.gene] = exon_data
except Exception as e:
print('Igoring error while processing exon data: {}'.format(
str(e)))
# make sure we found exons for every gene
missing_genes = set(genes) - set(gene_exons.keys())
if len(missing_genes) > 0:
print('Failed to find exon location information for: %s' % (','.join(missing_genes)))
return gene_exons
def find_closest_exon(row: pd.Series, gexs: dict) -> int:
"""Get distance of variant to the closest exon of the gene it has been annotated with."""
if row['Gene.refGene'] is None:
return None
if not row['Func.refGene'] in ['intronic', 'upstream', 'downstream']:
return None
if not row['Gene.refGene'] in gexs:
return None
gex = gexs[row['Gene.refGene']]
assert (row['Chr'] == gex['chr']) or (row['Chr'] == 'chr'+gex['chr']) or ('chr'+row['Chr'] == gex['chr'])
# upstream of start = negative, downstream of end = positive
start_dist = [row['End'] - boundary_pos for boundary_pos in gex['starts'] if boundary_pos >= row['End']]
end_dist = [row['Start'] - boundary_pos for boundary_pos in gex['ends'] if boundary_pos <= row['Start']]
# find smallest abs(value) for each
closest_start = max(start_dist) if len(start_dist) > 0 else None
closest_end = min(end_dist) if len(end_dist) > 0 else None
# figure out whether we are closer to start or end
if closest_start is None and closest_end is None:
closest_distance = None
elif closest_start is None:
closest_distance = closest_end
elif closest_end is None:
closest_distance = closest_start
else:
closest_distance = closest_start if abs(closest_start) < closest_end else closest_end
# if gene strand is negative it's actually the opposite
if closest_distance is None:
return None
elif gex['strand'] == '+':
return (closest_distance)
elif gex['strand'] == '-':
return (closest_distance * -1)
else:
raise Exception('Invalid strand')
# see: /hts/data6/smcgowan/hts_scripts/TableAnnovar_to_spreadsheet.pl
def calculate_del_score(merged: pd.DataFrame):
"""
Add a column DeleteriousScore to dataframe which contains a count of how many tools have assigned this variant a deletious scores.
Score ranges from 0-6, corresponding to the tools SIFT, Polyphen2, LRT, MutationTaster, GERP++ and phyloP100way_vertebrate.
Additionally, any stopgain, frameshift or splicing variants are always set to 6.
"""
merged['DeleteriousScore'] = 0
if 'SIFT_pred' in merged.columns:
merged.loc[merged['SIFT_pred'] == 'D', 'DeleteriousScore'] += 1
if 'Polyphen2_HDIV_pred' in merged.columns:
merged.loc[merged['Polyphen2_HDIV_pred'] == 'D', 'DeleteriousScore'] += 1
if 'LRT_pred' in merged.columns:
merged.loc[merged['LRT_pred'] == 'D', 'DeleteriousScore'] += 1
if 'MutationTaster_pred' in merged.columns:
merged.loc[merged['MutationTaster_pred'].isin(['A', 'D']), 'DeleteriousScore'] += 1
if 'GERP++_RS' in merged.columns:
assert merged['GERP++_RS'].dtype == float
merged.loc[merged['GERP++_RS'] >= 5, 'DeleteriousScore'] += 1
if 'phyloP100way_vertebrate' in merged.columns:
assert merged['phyloP100way_vertebrate'].dtype == float
merged.loc[merged['phyloP100way_vertebrate'] > 1, 'DeleteriousScore'] += 1
# set score = 6 for some special cases
if 'ExonicFunc.refGene' in merged.columns:
merged.loc[merged['ExonicFunc.refGene'].notnull() & merged['ExonicFunc.refGene'].str.contains('^stopgain|^frameshift'), 'DeleteriousScore'] = 6
merged.loc[merged['ExonicFunc.refGene'].notnull() & merged['ExonicFunc.refGene'].str.contains('splic'), 'DeleteriousScore'] = 6
if 'Func.refGene' in merged.columns:
merged.loc[merged['Func.refGene'].notnull() & merged['Func.refGene'].str.contains('splic'), 'DeleteriousScore'] = 6
def merge_variants_from_annovar(input, output):
"""Merge individual Annovar CSV files together."""
merged = None
for file in input:
sname = os.path.basename(file).split('.')[0]
print('Reading', file, ' ( Sample =', sname, ')...')
try:
df = pd.read_csv(file, index_col = False, dtype = variants_dtypes, na_values = variants_na_values)
print('Data shape:', str(df.shape))
if len(df) > 0:
df['Sample'] = sname
if merged is None:
merged = df
else:
merged = merged.append(df, ignore_index = True)
else:
print ('Ignoring - empty!')
except pd.errors.EmptyDataError:
print('No data for', file, ', skipping.')
if merged is None:
print('No variants found in any of the samples, creating empty file!')
open(output[0], 'a').close()
else:
print('Merged data shape:', str(merged.shape))
merged.to_csv(output[0], index = False)
def merge_variants_unannotated(input_vcfs, output_file):
"""Merge unannotated VCF files into a single CSV."""
merged = None
for file in input_vcfs:
sname = os.path.basename(file).split('.')[0]
fileinfo = os.stat(file)
print('Reading', file, ' ( Sample =', sname, ') with size', fileinfo.st_size, ' bytes...')
if fileinfo.st_size == 0:
print('Empty file, skipping')
else:
rows = []
with pysam.VariantFile(file) as vcf:
for variant in vcf:
assert len(variant.alts) == 1, 'Invalid VCF - multiallelics should have been split!'
row = collections.OrderedDict()
row['Sample'] = sname
row['Chr'] = variant.chrom
row['Start'] = variant.start + 1 # pysam is 0-based
row['End'] = variant.start + len(variant.ref)
row['Ref'] = variant.ref
row['Alt'] = ','.join(variant.alts)
row['Otherinfo'] = str(variant).strip()
rows.append(row)
if len(rows) > 0:
print('Found', len(rows), 'rows.')
df = | pd.DataFrame(rows) | pandas.DataFrame |
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Validation API."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tempfile
from absl.testing import absltest
from absl.testing import parameterized
import apache_beam as beam
from apache_beam.testing import util
import numpy as np
import pandas as pd
import pyarrow as pa
import tensorflow as tf
import tensorflow_data_validation as tfdv
from tensorflow_data_validation import types
from tensorflow_data_validation.api import validation_api
from tensorflow_data_validation.api import validation_options
from tensorflow_data_validation.skew.protos import feature_skew_results_pb2
from tensorflow_data_validation.statistics import stats_options
from tensorflow_data_validation.types import FeaturePath
from tensorflow_data_validation.utils import schema_util
from tensorflow_data_validation.utils import test_util
from google.protobuf import text_format
from tensorflow.python.util.protobuf import compare # pylint: disable=g-direct-tensorflow-import
from tensorflow_metadata.proto.v0 import anomalies_pb2
from tensorflow_metadata.proto.v0 import schema_pb2
from tensorflow_metadata.proto.v0 import statistics_pb2
IDENTIFY_ANOMALOUS_EXAMPLES_VALID_INPUTS = [{
'testcase_name':
'no_anomalies',
'examples': [
pa.RecordBatch.from_arrays([pa.array([['A']])], ['annotated_enum']),
pa.RecordBatch.from_arrays([pa.array([['C']])], ['annotated_enum']),
],
'schema_text':
"""
string_domain {
name: "MyAloneEnum"
value: "A"
value: "B"
value: "C"
}
feature {
name: "annotated_enum"
value_count {
min:1
max:1
}
presence {
min_count: 1
}
type: BYTES
domain: "MyAloneEnum"
}
feature {
name: "ignore_this"
lifecycle_stage: DEPRECATED
value_count {
min:1
}
presence {
min_count: 1
}
type: BYTES
}
""",
'expected_result': []
}, {
'testcase_name':
'same_anomaly_reason',
'examples': [
pa.RecordBatch.from_arrays([pa.array([['D']])], ['annotated_enum']),
pa.RecordBatch.from_arrays([pa.array([['D']])], ['annotated_enum']),
pa.RecordBatch.from_arrays([pa.array([['C']])], ['annotated_enum']),
],
'schema_text':
"""
string_domain {
name: "MyAloneEnum"
value: "A"
value: "B"
value: "C"
}
feature {
name: "annotated_enum"
value_count {
min:1
max:1
}
presence {
min_count: 1
}
type: BYTES
domain: "MyAloneEnum"
}
""",
'expected_result': [('annotated_enum_ENUM_TYPE_UNEXPECTED_STRING_VALUES',
pa.RecordBatch.from_arrays([pa.array([['D']])],
['annotated_enum'])),
('annotated_enum_ENUM_TYPE_UNEXPECTED_STRING_VALUES',
pa.RecordBatch.from_arrays([pa.array([['D']])],
['annotated_enum']))]
}, {
'testcase_name':
'different_anomaly_reasons',
'examples': [
pa.RecordBatch.from_arrays([pa.array([['D']])], ['annotated_enum']),
pa.RecordBatch.from_arrays([pa.array([['C']])], ['annotated_enum']),
pa.RecordBatch.from_arrays([pa.array([[1]])],
['feature_not_in_schema']),
],
'schema_text':
"""
string_domain {
name: "MyAloneEnum"
value: "A"
value: "B"
value: "C"
}
feature {
name: "annotated_enum"
value_count {
min:1
max:1
}
presence {
min_count: 0
}
type: BYTES
domain: "MyAloneEnum"
}
""",
'expected_result': [('annotated_enum_ENUM_TYPE_UNEXPECTED_STRING_VALUES',
pa.RecordBatch.from_arrays([pa.array([['D']])],
['annotated_enum'])),
('feature_not_in_schema_SCHEMA_NEW_COLUMN',
pa.RecordBatch.from_arrays([pa.array([[1]])],
['feature_not_in_schema']))]
}]
class ValidationTestCase(parameterized.TestCase):
def _assert_equal_anomalies(self, actual_anomalies, expected_anomalies):
# Check if the actual anomalies matches with the expected anomalies.
for feature_name in expected_anomalies:
self.assertIn(feature_name, actual_anomalies.anomaly_info)
# Doesn't compare the diff_regions.
actual_anomalies.anomaly_info[feature_name].ClearField('diff_regions')
self.assertEqual(actual_anomalies.anomaly_info[feature_name],
expected_anomalies[feature_name])
self.assertEqual(
len(actual_anomalies.anomaly_info), len(expected_anomalies))
class ValidationApiTest(ValidationTestCase):
def test_infer_schema(self):
statistics = text_format.Parse(
"""
datasets {
num_examples: 7
features: {
path { step: 'feature1' }
type: STRING
string_stats: {
common_stats: {
num_non_missing: 7
min_num_values: 1
max_num_values: 1
}
unique: 3
}
}
}
""", statistics_pb2.DatasetFeatureStatisticsList())
expected_schema = text_format.Parse(
"""
feature {
name: "feature1"
value_count: {
min: 1
max: 1
}
presence: {
min_fraction: 1.0
min_count: 1
}
type: BYTES
}
""", schema_pb2.Schema())
validation_api._may_be_set_legacy_flag(expected_schema)
# Infer the schema from the stats.
actual_schema = validation_api.infer_schema(statistics,
infer_feature_shape=False)
self.assertEqual(actual_schema, expected_schema)
def test_infer_schema_with_string_domain(self):
statistics = text_format.Parse(
"""
datasets {
num_examples: 6
features: {
path { step: 'feature1' }
type: STRING
string_stats: {
common_stats: {
num_missing: 3
num_non_missing: 3
min_num_values: 1
max_num_values: 1
}
unique: 2
rank_histogram {
buckets {
low_rank: 0
high_rank: 0
label: "a"
sample_count: 2.0
}
buckets {
low_rank: 1
high_rank: 1
label: "b"
sample_count: 1.0
}
}
}
}
}
""", statistics_pb2.DatasetFeatureStatisticsList())
expected_schema = text_format.Parse(
"""
feature {
name: "feature1"
value_count: {
min: 1
max: 1
}
presence: {
min_count: 1
}
type: BYTES
domain: "feature1"
}
string_domain {
name: "feature1"
value: "a"
value: "b"
}
""", schema_pb2.Schema())
validation_api._may_be_set_legacy_flag(expected_schema)
# Infer the schema from the stats.
actual_schema = validation_api.infer_schema(statistics)
self.assertEqual(actual_schema, expected_schema)
def test_infer_schema_without_string_domain(self):
statistics = text_format.Parse(
"""
datasets {
num_examples: 6
features: {
path { step: 'feature1' }
type: STRING
string_stats: {
common_stats: {
num_missing: 3
num_non_missing: 3
min_num_values: 1
max_num_values: 1
}
unique: 3
rank_histogram {
buckets {
low_rank: 0
high_rank: 0
label: "a"
sample_count: 2.0
}
buckets {
low_rank: 1
high_rank: 1
label: "b"
sample_count: 1.0
}
}
}
}
}
""", statistics_pb2.DatasetFeatureStatisticsList())
expected_schema = text_format.Parse(
"""
feature {
name: "feature1"
value_count: {
min: 1
max: 1
}
presence: {
min_count: 1
}
type: BYTES
}
""", schema_pb2.Schema())
validation_api._may_be_set_legacy_flag(expected_schema)
# Infer the schema from the stats.
actual_schema = validation_api.infer_schema(statistics,
max_string_domain_size=1)
self.assertEqual(actual_schema, expected_schema)
def test_infer_schema_with_infer_shape(self):
statistics = text_format.Parse(
"""
datasets {
num_examples: 7
features: {
path { step: 'feature1' }
type: STRING
string_stats: {
common_stats: {
num_missing: 0
num_non_missing: 7
min_num_values: 1
max_num_values: 1
}
unique: 3
}
}
features: {
path { step: 'feature2' }
type: STRING
string_stats: {
common_stats: {
num_missing: 2
num_non_missing: 5
min_num_values: 1
max_num_values: 1
}
unique: 5
}
}
features: {
path { step: 'feature3' }
type: STRING
string_stats: {
common_stats: {
num_missing: 0
num_non_missing: 7
min_num_values: 0
max_num_values: 1
}
unique: 5
}
}
features: {
path { step: 'nested_feature1' }
type: STRING
string_stats: {
common_stats: {
num_missing: 0
num_non_missing: 7
min_num_values: 1
max_num_values: 1
presence_and_valency_stats {
num_missing: 0
num_non_missing: 7
min_num_values: 1
max_num_values: 1
}
presence_and_valency_stats {
num_missing: 0
num_non_missing: 7
min_num_values: 1
max_num_values: 1
}
}
unique: 3
}
}
features: {
path { step: 'nested_feature2' }
type: STRING
string_stats: {
common_stats: {
num_missing: 2
num_non_missing: 5
min_num_values: 1
max_num_values: 1
presence_and_valency_stats {
num_missing: 2
num_non_missing: 5
min_num_values: 1
max_num_values: 1
}
presence_and_valency_stats {
num_missing: 2
num_non_missing: 5
min_num_values: 1
max_num_values: 1
}
}
unique: 5
}
}
features: {
path { step: 'nested_feature3' }
type: STRING
string_stats: {
common_stats: {
num_missing: 0
num_non_missing: 7
min_num_values: 0
max_num_values: 1
presence_and_valency_stats {
num_missing: 0
num_non_missing: 7
min_num_values: 0
max_num_values: 1
}
presence_and_valency_stats {
num_missing: 0
num_non_missing: 7
min_num_values: 0
max_num_values: 1
}
}
unique: 5
}
}
}
""", statistics_pb2.DatasetFeatureStatisticsList())
expected_schema = text_format.Parse(
"""
feature {
name: "feature1"
shape { dim { size: 1 } }
presence: {
min_fraction: 1.0
min_count: 1
}
type: BYTES
}
feature {
name: "feature2"
value_count: { min: 1 max: 1}
presence: {
min_count: 1
}
type: BYTES
}
feature {
name: "feature3"
presence: {
min_fraction: 1.0
min_count: 1
}
type: BYTES
}
feature {
name: "nested_feature1"
shape: {
dim { size: 1 }
dim { size: 1 }
}
presence: {
min_fraction: 1.0
min_count: 1
}
type: BYTES
}
feature {
name: "nested_feature2"
value_counts: {
value_count { min: 1 max: 1 }
value_count { min: 1 max: 1 }
}
presence: {
min_count: 1
}
type: BYTES
}
feature {
name: "nested_feature3"
value_counts: {
value_count { }
value_count { }
}
presence: {
min_fraction: 1.0
min_count: 1
}
type: BYTES
}
""", schema_pb2.Schema())
validation_api._may_be_set_legacy_flag(expected_schema)
# Infer the schema from the stats.
actual_schema = validation_api.infer_schema(statistics,
infer_feature_shape=True)
self.assertEqual(actual_schema, expected_schema)
def test_infer_schema_with_transformations(self):
statistics = text_format.Parse(
"""
datasets {
num_examples: 7
features: {
path { step: 'foo' }
type: STRING
string_stats: {
common_stats: {
num_non_missing: 7
min_num_values: 1
max_num_values: 1
}
unique: 3
}
}
features: {
path { step: 'xyz_query' }
type: STRING
string_stats: {
common_stats: {
num_non_missing: 7
min_num_values: 1
max_num_values: 1
}
unique: 3
}
}
}
""", statistics_pb2.DatasetFeatureStatisticsList())
def _semantic_type_transformation_fn(schema, unused_stats):
for feature in schema.feature:
if 'query' in feature.name:
feature.natural_language_domain.CopyFrom(
schema_pb2.NaturalLanguageDomain())
return schema
expected_schema = text_format.Parse(
"""
feature {
name: "foo"
value_count: {
min: 1
max: 1
}
presence: {
min_fraction: 1.0
min_count: 1
}
type: BYTES
}
feature {
name: "xyz_query"
value_count: {
min: 1
max: 1
}
presence: {
min_fraction: 1.0
min_count: 1
}
type: BYTES
natural_language_domain {}
}
""", schema_pb2.Schema())
validation_api._may_be_set_legacy_flag(expected_schema)
# Infer the schema from the stats.
actual_schema = validation_api.infer_schema(
statistics, infer_feature_shape=False,
schema_transformations=[_semantic_type_transformation_fn])
self.assertEqual(actual_schema, expected_schema)
def test_infer_schema_multiple_datasets_with_default_slice(self):
statistics = text_format.Parse(
"""
datasets {
name: 'All Examples'
num_examples: 7
features: {
path { step: 'feature1' }
type: STRING
string_stats: {
common_stats: {
num_non_missing: 7
min_num_values: 1
max_num_values: 2
}
unique: 3
}
}
}
datasets {
name: 'feature1_testvalue'
num_examples: 4
features: {
path { step: 'feature1' }
type: STRING
string_stats: {
common_stats: {
num_non_missing: 4
min_num_values: 1
max_num_values: 1
}
unique: 1
}
}
}
""", statistics_pb2.DatasetFeatureStatisticsList())
expected_schema = text_format.Parse(
"""
feature {
name: "feature1"
value_count: {
min: 1
}
presence: {
min_fraction: 1.0
min_count: 1
}
type: BYTES
}
""", schema_pb2.Schema())
validation_api._may_be_set_legacy_flag(expected_schema)
# Infer the schema from the stats.
actual_schema = validation_api.infer_schema(statistics,
infer_feature_shape=False)
self.assertEqual(actual_schema, expected_schema)
def test_infer_schema_invalid_statistics_input(self):
with self.assertRaisesRegexp(
TypeError, '.*should be a DatasetFeatureStatisticsList proto.*'):
_ = validation_api.infer_schema({})
def test_infer_schema_invalid_multiple_datasets_no_default_slice(self):
statistics = statistics_pb2.DatasetFeatureStatisticsList()
statistics.datasets.extend([
statistics_pb2.DatasetFeatureStatistics(),
statistics_pb2.DatasetFeatureStatistics()
])
with self.assertRaisesRegexp(ValueError,
'.*statistics proto with one dataset.*'):
_ = validation_api.infer_schema(statistics)
def test_infer_schema_composite_feature_stats(self):
statistics = text_format.Parse(
"""
datasets {
num_examples: 4
features: {
path { step: 'value' }
type: STRING
string_stats: {
common_stats: {
num_non_missing: 2
num_missing: 2
min_num_values: 1
max_num_values: 1
}
}
}
features: {
path { step: 'weight' }
type: FLOAT
num_stats: {
common_stats: {
num_non_missing: 4
min_num_values: 1
max_num_values: 1
}
}
}
features: {
path { step: 'index' }
type: INT
num_stats: {
common_stats: {
num_non_missing: 4
min_num_values: 1
max_num_values: 1
}
}
}
features: {
path { step: 'weighted_feature' }
custom_stats {
name: 'missing_value'
num: 2
}
}
features: {
path { step: 'sparse_feature' }
custom_stats {
name: 'missing_value'
num: 2
}
}
}
""", statistics_pb2.DatasetFeatureStatisticsList())
expected_schema = text_format.Parse(
"""
feature {
name: "value"
value_count: {
min: 1
max: 1
}
presence: {
min_count: 1
}
type: BYTES
}
feature {
name: "weight"
value_count: {
min: 1
max: 1
}
presence: {
min_fraction: 1
min_count: 1
}
type: FLOAT
}
feature {
name: "index"
value_count: {
min: 1
max: 1
}
presence: {
min_fraction: 1
min_count: 1
}
type: INT
}
""", schema_pb2.Schema())
validation_api._may_be_set_legacy_flag(expected_schema)
# Infer the schema from the stats.
actual_schema = validation_api.infer_schema(statistics,
infer_feature_shape=False)
self.assertEqual(actual_schema, expected_schema)
def _assert_drift_skew_info(
self, actual_drift_skew_infos, expected_drift_skew_infos):
self.assertLen(actual_drift_skew_infos, len(expected_drift_skew_infos))
expected_drift_skew_infos = [
text_format.Parse(e, anomalies_pb2.DriftSkewInfo())
for e in expected_drift_skew_infos
]
path_to_expected = {
tuple(e.path.step): e for e in expected_drift_skew_infos
}
def check_measurements(actual_measurements, expected_measurements):
for actual_measurement, expected_measurement in zip(
actual_measurements, expected_measurements):
self.assertEqual(actual_measurement.type, expected_measurement.type)
self.assertAlmostEqual(actual_measurement.value,
expected_measurement.value)
self.assertAlmostEqual(actual_measurement.threshold,
expected_measurement.threshold)
for actual in actual_drift_skew_infos:
expected = path_to_expected[tuple(actual.path.step)]
self.assertIsNotNone(
expected, 'Did not expect a DriftSkewInfo for {}'.format(
tuple(actual.path.step)))
check_measurements(actual.drift_measurements, expected.drift_measurements)
check_measurements(actual.skew_measurements, expected.skew_measurements)
def test_update_schema(self):
schema = text_format.Parse(
"""
string_domain {
name: "MyAloneEnum"
value: "A"
value: "B"
value: "C"
}
feature {
name: "annotated_enum"
value_count {
min:1
max:1
}
presence {
min_count: 1
}
type: BYTES
domain: "MyAloneEnum"
}
""", schema_pb2.Schema())
statistics = text_format.Parse(
"""
datasets{
num_examples: 10
features {
path { step: 'annotated_enum' }
type: STRING
string_stats {
common_stats {
num_missing: 3
num_non_missing: 7
min_num_values: 1
max_num_values: 1
}
unique: 3
rank_histogram {
buckets {
label: "D"
sample_count: 1
}
}
}
}
}
""", statistics_pb2.DatasetFeatureStatisticsList())
expected_anomalies = {
'annotated_enum':
text_format.Parse(
"""
path {
step: "annotated_enum"
}
description: "Examples contain values missing from the schema: D (?). "
severity: ERROR
short_description: "Unexpected string values"
reason {
type: ENUM_TYPE_UNEXPECTED_STRING_VALUES
short_description: "Unexpected string values"
description: "Examples contain values missing from the schema: D (?). "
}
""", anomalies_pb2.AnomalyInfo())
}
# Validate the stats.
anomalies = validation_api.validate_statistics(statistics, schema)
self._assert_equal_anomalies(anomalies, expected_anomalies)
# Verify the updated schema.
actual_updated_schema = validation_api.update_schema(schema, statistics)
expected_updated_schema = schema
schema_util.get_domain(
expected_updated_schema,
types.FeaturePath(['annotated_enum'])).value.append('D')
self.assertEqual(actual_updated_schema, expected_updated_schema)
# Verify that there are no anomalies with the updated schema.
actual_updated_anomalies = validation_api.validate_statistics(
statistics, actual_updated_schema)
self._assert_equal_anomalies(actual_updated_anomalies, {})
def test_update_schema_multiple_datasets_with_default_slice(self):
schema = text_format.Parse(
"""
string_domain {
name: "MyAloneEnum"
value: "A"
value: "B"
value: "C"
}
feature {
name: "annotated_enum"
value_count {
min:1
max:1
}
presence {
min_count: 1
}
type: BYTES
domain: "MyAloneEnum"
}
""", schema_pb2.Schema())
statistics = text_format.Parse(
"""
datasets{
name: 'All Examples'
num_examples: 10
features {
path { step: 'annotated_enum' }
type: STRING
string_stats {
common_stats {
num_missing: 3
num_non_missing: 7
min_num_values: 1
max_num_values: 1
}
unique: 3
rank_histogram {
buckets {
label: "D"
sample_count: 1
}
}
}
}
}
datasets{
name: 'other dataset'
num_examples: 5
features {
path { step: 'annotated_enum' }
type: STRING
string_stats {
common_stats {
num_missing: 3
num_non_missing: 2
min_num_values: 1
max_num_values: 1
}
unique: 3
rank_histogram {
buckets {
label: "E"
sample_count: 1
}
}
}
}
}
""", statistics_pb2.DatasetFeatureStatisticsList())
expected_anomalies = {
'annotated_enum':
text_format.Parse(
"""
path {
step: "annotated_enum"
}
description: "Examples contain values missing from the schema: D (?). "
severity: ERROR
short_description: "Unexpected string values"
reason {
type: ENUM_TYPE_UNEXPECTED_STRING_VALUES
short_description: "Unexpected string values"
description: "Examples contain values missing from the schema: D (?). "
}
""", anomalies_pb2.AnomalyInfo())
}
# Validate the stats.
anomalies = validation_api.validate_statistics(statistics, schema)
self._assert_equal_anomalies(anomalies, expected_anomalies)
# Verify the updated schema.
actual_updated_schema = validation_api.update_schema(schema, statistics)
expected_updated_schema = schema
schema_util.get_domain(
expected_updated_schema,
types.FeaturePath(['annotated_enum'])).value.append('D')
self.assertEqual(actual_updated_schema, expected_updated_schema)
# Verify that there are no anomalies with the updated schema.
actual_updated_anomalies = validation_api.validate_statistics(
statistics, actual_updated_schema)
self._assert_equal_anomalies(actual_updated_anomalies, {})
def test_update_schema_invalid_schema_input(self):
statistics = statistics_pb2.DatasetFeatureStatisticsList()
statistics.datasets.extend([statistics_pb2.DatasetFeatureStatistics()])
with self.assertRaisesRegexp(
TypeError, 'schema is of type.*'):
_ = validation_api.update_schema({}, statistics)
def test_update_schema_invalid_statistics_input(self):
schema = schema_pb2.Schema()
with self.assertRaisesRegexp(
TypeError, 'statistics is of type.*'):
_ = validation_api.update_schema(schema, {})
def test_update_schema_invalid_multiple_datasets_no_default_slice(self):
schema = schema_pb2.Schema()
statistics = statistics_pb2.DatasetFeatureStatisticsList()
statistics.datasets.extend([
statistics_pb2.DatasetFeatureStatistics(),
statistics_pb2.DatasetFeatureStatistics()
])
with self.assertRaisesRegexp(ValueError,
'.*statistics proto with one dataset.*'):
_ = validation_api.update_schema(schema, statistics)
# See b/179197768.
def test_update_schema_remove_inferred_shape(self):
stats1 = text_format.Parse("""
datasets {
num_examples: 10000
features {
path {
step: ["f1"]
}
num_stats {
common_stats {
num_non_missing: 10000
num_missing: 0
min_num_values: 1
max_num_values: 1
}
}
}
}
""", statistics_pb2.DatasetFeatureStatisticsList())
stats2 = text_format.Parse("""
datasets {
num_examples: 10000
features {
path {
step: ["f1"]
}
num_stats {
common_stats {
num_non_missing: 9999
num_missing: 1
min_num_values: 1
max_num_values: 1
}
}
}
}
""", statistics_pb2.DatasetFeatureStatisticsList())
# Scenario 1: shape is inferred from stats1, then should be removed
# when schema is updated against stats2.
schema = validation_api.infer_schema(stats1, infer_feature_shape=True)
self.assertLen(schema.feature, 1)
self.assertTrue(schema.feature[0].HasField('shape'))
updated_schema = validation_api.update_schema(schema, stats2)
self.assertLen(updated_schema.feature, 1)
self.assertFalse(updated_schema.feature[0].HasField('shape'))
# once shape is dropped, it should not be added back, even if the stats
# provided support a fixed shape.
updated_schema = validation_api.update_schema(updated_schema, stats1)
self.assertLen(updated_schema.feature, 1)
self.assertFalse(updated_schema.feature[0].HasField('shape'))
# Scenario 2: shape is not inferred from stats2, then should not be
# added when schema is updated against stat1.
schema = validation_api.infer_schema(stats2, infer_feature_shape=True)
self.assertLen(schema.feature, 1)
self.assertFalse(schema.feature[0].HasField('shape'))
updated_schema = validation_api.update_schema(schema, stats1)
self.assertLen(updated_schema.feature, 1)
self.assertFalse(updated_schema.feature[0].HasField('shape'))
# Scenario 3: shape is inferred from stats1, then should not be removed
# when schema is updated against (again) stats1.
schema = validation_api.infer_schema(stats1, infer_feature_shape=True)
self.assertLen(schema.feature, 1)
self.assertTrue(schema.feature[0].HasField('shape'))
updated_schema = validation_api.update_schema(schema, stats1)
self.assertLen(updated_schema.feature, 1)
self.assertTrue(updated_schema.feature[0].HasField('shape'))
def test_validate_stats(self):
schema = text_format.Parse(
"""
string_domain {
name: "MyAloneEnum"
value: "A"
value: "B"
value: "C"
}
feature {
name: "annotated_enum"
value_count {
min:1
max:1
}
presence {
min_count: 1
}
type: BYTES
domain: "MyAloneEnum"
}
feature {
name: "ignore_this"
lifecycle_stage: DEPRECATED
value_count {
min:1
}
presence {
min_count: 1
}
type: BYTES
}
""", schema_pb2.Schema())
statistics = text_format.Parse(
"""
datasets{
num_examples: 10
features {
path { step: 'annotated_enum' }
type: STRING
string_stats {
common_stats {
num_missing: 3
num_non_missing: 7
min_num_values: 1
max_num_values: 1
}
unique: 3
rank_histogram {
buckets {
label: "D"
sample_count: 1
}
}
}
}
}
""", statistics_pb2.DatasetFeatureStatisticsList())
expected_anomalies = {
'annotated_enum':
text_format.Parse(
"""
path {
step: "annotated_enum"
}
description: "Examples contain values missing from the schema: D (?). "
severity: ERROR
short_description: "Unexpected string values"
reason {
type: ENUM_TYPE_UNEXPECTED_STRING_VALUES
short_description: "Unexpected string values"
description: "Examples contain values missing from the schema: D (?). "
}
""", anomalies_pb2.AnomalyInfo())
}
# Validate the stats.
anomalies = validation_api.validate_statistics(statistics, schema)
self._assert_equal_anomalies(anomalies, expected_anomalies)
def test_validate_stats_weighted_feature(self):
schema = text_format.Parse(
"""
feature {
name: "value"
}
feature {
name: "weight"
}
weighted_feature {
name: "weighted_feature"
feature {
step: "value"
}
weight_feature {
step: "weight"
}
}
""", schema_pb2.Schema())
statistics = text_format.Parse(
"""
datasets {
num_examples: 10
features {
path { step: 'weighted_feature' }
custom_stats {
name: 'missing_weight'
num: 1.0
}
custom_stats {
name: 'missing_value'
num: 2.0
}
custom_stats {
name: 'min_weight_length_diff'
num: 3.0
}
custom_stats {
name: 'max_weight_length_diff'
num: 4.0
}
}
}
""", statistics_pb2.DatasetFeatureStatisticsList())
expected_anomalies = {
'weighted_feature':
text_format.Parse(
"""
path {
step: "weighted_feature"
}
description: "Found 1 examples missing weight feature. Found 2 examples missing value feature. Mismatch between weight and value feature with min_weight_length_diff = 3 and max_weight_length_diff = 4."
severity: ERROR
short_description: "Multiple errors"
reason {
type: WEIGHTED_FEATURE_MISSING_WEIGHT
short_description: "Missing weight feature"
description: "Found 1 examples missing weight feature."
}
reason {
type: WEIGHTED_FEATURE_MISSING_VALUE
short_description: "Missing value feature"
description: "Found 2 examples missing value feature."
}
reason {
type: WEIGHTED_FEATURE_LENGTH_MISMATCH
short_description: "Length mismatch between value and weight feature"
description: "Mismatch between weight and value feature with min_weight_length_diff = 3 and max_weight_length_diff = 4."
}
""", anomalies_pb2.AnomalyInfo())
}
# Validate the stats.
anomalies = validation_api.validate_statistics(statistics, schema)
self._assert_equal_anomalies(anomalies, expected_anomalies)
def test_validate_stats_weighted_feature_name_collision(self):
schema = text_format.Parse(
"""
feature {
name: "value"
}
feature {
name: "weight"
}
feature {
name: "colliding_feature"
}
weighted_feature {
name: "colliding_feature"
feature {
step: "value"
}
weight_feature {
step: "weight"
}
}
""", schema_pb2.Schema())
statistics = text_format.Parse(
"""
datasets {
num_examples: 10
features {
path { step: 'colliding_feature' }
custom_stats {
name: 'missing_weight'
num: 1.0
}
custom_stats {
name: 'missing_value'
num: 2.0
}
custom_stats {
name: 'min_weight_length_diff'
num: 3.0
}
custom_stats {
name: 'max_weight_length_diff'
num: 4.0
}
}
}
""", statistics_pb2.DatasetFeatureStatisticsList())
expected_anomalies = {
'colliding_feature':
text_format.Parse(
"""
path {
step: "colliding_feature"
}
description: "Weighted feature name collision."
severity: ERROR
short_description: "Weighted feature name collision"
reason {
type: WEIGHTED_FEATURE_NAME_COLLISION
short_description: "Weighted feature name collision"
description: "Weighted feature name collision."
}
""", anomalies_pb2.AnomalyInfo())
}
# Validate the stats.
anomalies = validation_api.validate_statistics(statistics, schema)
self._assert_equal_anomalies(anomalies, expected_anomalies)
def test_validate_stats_weighted_feature_sparse_feature_name_collision(self):
schema = text_format.Parse(
"""
feature {
name: "value"
}
feature {
name: "weight"
}
feature {
name: "index"
}
weighted_feature {
name: "colliding_feature"
feature {
step: "value"
}
weight_feature {
step: "weight"
}
}
sparse_feature {
name: "colliding_feature"
value_feature {
name: "value"
}
index_feature {
name: "index"
}
}
""", schema_pb2.Schema())
statistics = text_format.Parse(
"""
datasets {
num_examples: 10
features {
path { step: 'colliding_feature' }
custom_stats {
name: 'missing_weight'
num: 1.0
}
custom_stats {
name: 'missing_index'
num: 1.0
}
custom_stats {
name: 'missing_value'
num: 2.0
}
custom_stats {
name: 'missing_value'
num: 2.0
}
custom_stats {
name: 'min_length_diff'
num: 3.0
}
custom_stats {
name: 'min_weight_length_diff'
num: 3.0
}
custom_stats {
name: 'max_length_diff'
num: 4.0
}
custom_stats {
name: 'max_weight_length_diff'
num: 4.0
}
}
}
""", statistics_pb2.DatasetFeatureStatisticsList())
expected_anomalies = {
'colliding_feature':
text_format.Parse(
"""
path {
step: "colliding_feature"
}
description: "Weighted feature name collision."
severity: ERROR
short_description: "Weighted feature name collision"
reason {
type: WEIGHTED_FEATURE_NAME_COLLISION
short_description: "Weighted feature name collision"
description: "Weighted feature name collision."
}
""", anomalies_pb2.AnomalyInfo())
}
# Validate the stats.
anomalies = validation_api.validate_statistics(statistics, schema)
self._assert_equal_anomalies(anomalies, expected_anomalies)
# pylint: disable=line-too-long
_annotated_enum_anomaly_info = """
path {
step: "annotated_enum"
}
description: "Examples contain values missing from the schema: b (?). The Linfty distance between current and previous is 0.25 (up to six significant digits), above the threshold 0.01. The feature value with maximum difference is: b"
severity: ERROR
short_description: "Multiple errors"
reason {
type: ENUM_TYPE_UNEXPECTED_STRING_VALUES
short_description: "Unexpected string values"
description: "Examples contain values missing from the schema: b (?). "
}
reason {
type: COMPARATOR_L_INFTY_HIGH
short_description: "High Linfty distance between current and previous"
description: "The Linfty distance between current and previous is 0.25 (up to six significant digits), above the threshold 0.01. The feature value with maximum difference is: b"
}"""
_bar_anomaly_info = """
path {
step: "bar"
}
short_description: "High Linfty distance between training and serving"
description: "The Linfty distance between training and serving is 0.2 (up to six significant digits), above the threshold 0.1. The feature value with maximum difference is: a"
severity: ERROR
reason {
type: COMPARATOR_L_INFTY_HIGH
short_description: "High Linfty distance between training and serving"
description: "The Linfty distance between training and serving is 0.2 (up to six significant digits), above the threshold 0.1. The feature value with maximum difference is: a"
}"""
def test_validate_stats_with_previous_stats(self):
statistics = text_format.Parse(
"""
datasets {
num_examples: 2
features {
path { step: 'annotated_enum' }
type: STRING
string_stats {
common_stats { num_non_missing: 2 num_missing: 0 max_num_values: 1 }
rank_histogram {
buckets { label: "a" sample_count: 1 }
buckets { label: "b" sample_count: 1 }
}
}
}
}""", statistics_pb2.DatasetFeatureStatisticsList())
previous_statistics = text_format.Parse(
"""
datasets {
num_examples: 4
features {
path { step: 'annotated_enum' }
type: STRING
string_stats {
common_stats { num_non_missing: 4 num_missing: 0 max_num_values: 1 }
rank_histogram {
buckets { label: "a" sample_count: 3 }
buckets { label: "b" sample_count: 1 }
}
}
}
}""", statistics_pb2.DatasetFeatureStatisticsList())
schema = text_format.Parse(
"""
feature {
name: "annotated_enum"
type: BYTES
domain: "annotated_enum"
drift_comparator { infinity_norm { threshold: 0.01 } }
}
string_domain { name: "annotated_enum" value: "a" }
""", schema_pb2.Schema())
expected_anomalies = {
'annotated_enum': text_format.Parse(self._annotated_enum_anomaly_info,
anomalies_pb2.AnomalyInfo())
}
# Validate the stats.
anomalies = validation_api.validate_statistics(
statistics, schema, previous_statistics=previous_statistics)
self._assert_drift_skew_info(anomalies.drift_skew_info, [
"""
path { step: ["annotated_enum"] }
drift_measurements {
type: L_INFTY
value: 0.25
threshold: 0.01
}
""",
])
self._assert_equal_anomalies(anomalies, expected_anomalies)
@parameterized.named_parameters(*[
dict(testcase_name='no_skew',
has_skew=False),
dict(testcase_name='with_skew',
has_skew=True),
])
def test_validate_stats_with_serving_stats(self, has_skew):
statistics = text_format.Parse(
"""
datasets {
num_examples: 10
features {
path { step: 'bar' }
type: STRING
string_stats {
common_stats {
num_missing: 0
num_non_missing: 10
max_num_values: 1
}
rank_histogram {
buckets { label: "a" sample_count: 1 }
buckets { label: "b" sample_count: 2 }
buckets { label: "c" sample_count: 7 }
}
}
}
}""", statistics_pb2.DatasetFeatureStatisticsList())
serving_statistics = text_format.Parse(
"""
datasets {
num_examples: 10
features {
name: 'bar'
type: STRING
string_stats {
common_stats {
num_missing: 0
num_non_missing: 10
max_num_values: 1
}
rank_histogram {
buckets { label: "a" sample_count: 3 }
buckets { label: "b" sample_count: 1 }
buckets { label: "c" sample_count: 6 }
}
}
}
}""", statistics_pb2.DatasetFeatureStatisticsList())
threshold = 0.1 if has_skew else 1.0
schema = text_format.Parse(
"""
feature {
name: 'bar'
type: BYTES
skew_comparator {
infinity_norm { threshold: %f }
}
}""" % threshold, schema_pb2.Schema())
expected_anomalies = {}
if has_skew:
expected_anomalies['bar'] = text_format.Parse(
self._bar_anomaly_info, anomalies_pb2.AnomalyInfo())
# Validate the stats.
anomalies = validation_api.validate_statistics(
statistics, schema, serving_statistics=serving_statistics)
self._assert_equal_anomalies(anomalies, expected_anomalies)
self._assert_drift_skew_info(anomalies.drift_skew_info, [
"""
path { step: ["bar"] }
skew_measurements {
type: L_INFTY
value: 0.2
threshold: %f
}
""" % threshold,
])
def test_validate_stats_with_environment(self):
statistics = text_format.Parse(
"""
datasets {
num_examples: 1000
features {
path { step: 'feature' }
type: STRING
string_stats {
common_stats {
num_non_missing: 1000
min_num_values: 1
max_num_values: 1
}
unique: 3
}
}
}""", statistics_pb2.DatasetFeatureStatisticsList())
schema = text_format.Parse(
"""
default_environment: "TRAINING"
default_environment: "SERVING"
feature {
name: "label"
not_in_environment: "SERVING"
value_count { min: 1 max: 1 }
presence { min_count: 1 }
type: BYTES
}
feature {
name: "feature"
value_count { min: 1 max: 1 }
presence { min_count: 1 }
type: BYTES
}
""", schema_pb2.Schema())
expected_anomalies_training = {
'label':
text_format.Parse(
"""
path {
step: "label"
}
description: "Column is completely missing"
severity: ERROR
short_description: "Column dropped"
reason {
type: SCHEMA_MISSING_COLUMN
short_description: "Column dropped"
description: "Column is completely missing"
}
""", anomalies_pb2.AnomalyInfo())
}
# Validate the stats in TRAINING environment.
anomalies_training = validation_api.validate_statistics(
statistics, schema, environment='TRAINING')
self._assert_equal_anomalies(anomalies_training,
expected_anomalies_training)
# Validate the stats in SERVING environment.
anomalies_serving = validation_api.validate_statistics(
statistics, schema, environment='SERVING')
self._assert_equal_anomalies(anomalies_serving, {})
def test_validate_stats_with_previous_and_serving_stats(self):
statistics = text_format.Parse(
"""
datasets {
num_examples: 10
features {
path { step: 'bar' }
type: STRING
string_stats {
common_stats {
num_missing: 0
num_non_missing: 10
max_num_values: 1
}
rank_histogram {
buckets { label: "a" sample_count: 1 }
buckets { label: "b" sample_count: 2 }
buckets { label: "c" sample_count: 7 }
}
}
}
features {
path { step: 'annotated_enum' }
type: STRING
string_stats {
common_stats {
num_missing: 0
num_non_missing: 10
max_num_values: 1
}
rank_histogram {
buckets { label: "a" sample_count: 1 }
buckets { label: "b" sample_count: 1 }
}
}
}
}""", statistics_pb2.DatasetFeatureStatisticsList())
previous_statistics = text_format.Parse(
"""
datasets {
num_examples: 10
features {
path { step: 'annotated_enum' }
type: STRING
string_stats {
common_stats {
num_non_missing: 10
num_missing: 0
max_num_values: 1
}
rank_histogram {
buckets { label: "a" sample_count: 3 }
buckets { label: "b" sample_count: 1 }
}
}
}
features {
path { step: 'bar' }
type: STRING
string_stats {
common_stats {
num_missing: 0
num_non_missing: 10
max_num_values: 1
}
rank_histogram {
buckets { label: "a" sample_count: 3 }
buckets { label: "b" sample_count: 1 }
buckets { label: "c" sample_count: 6 }
}
}
}
}""", statistics_pb2.DatasetFeatureStatisticsList())
serving_statistics = text_format.Parse(
"""
datasets {
num_examples: 10
features {
path { step: 'bar' }
type: STRING
string_stats {
common_stats {
num_missing: 0
num_non_missing: 10
max_num_values: 1
}
rank_histogram {
buckets { label: "a" sample_count: 3 }
buckets { label: "b" sample_count: 1 }
buckets { label: "c" sample_count: 6 }
}
}
}
features {
path { step: 'annotated_enum' }
type: STRING
string_stats {
common_stats {
num_non_missing: 10
num_missing: 0
max_num_values: 1
}
rank_histogram {
buckets { label: "a" sample_count: 3 }
buckets { label: "b" sample_count: 1 }
}
}
}
}""", statistics_pb2.DatasetFeatureStatisticsList())
schema = text_format.Parse(
"""
feature {
name: 'bar'
type: BYTES
skew_comparator { infinity_norm { threshold: 0.1 } }
}
feature {
name: "annotated_enum"
type: BYTES
domain: "annotated_enum"
drift_comparator { infinity_norm { threshold: 0.01 } }
}
string_domain { name: "annotated_enum" value: "a" }
""", schema_pb2.Schema())
expected_anomalies = {
'bar': text_format.Parse(self._bar_anomaly_info,
anomalies_pb2.AnomalyInfo()),
'annotated_enum': text_format.Parse(self._annotated_enum_anomaly_info,
anomalies_pb2.AnomalyInfo())
}
# Validate the stats.
anomalies = validation_api.validate_statistics(
statistics,
schema,
previous_statistics=previous_statistics,
serving_statistics=serving_statistics)
self._assert_equal_anomalies(anomalies, expected_anomalies)
self._assert_drift_skew_info(anomalies.drift_skew_info, [
"""
path { step: ["bar"] }
skew_measurements {
type: L_INFTY
value: 0.2
threshold: 0.1
}
""",
"""
path { step: ["annotated_enum"] }
drift_measurements {
type: L_INFTY
value: 0.25
threshold: 0.01
}
""",
])
# pylint: enable=line-too-long
def test_validate_stats_with_previous_and_serving_stats_with_default_slices(
self):
# All input statistics protos have multiple datasets, one of which
# corresponds to the default slice.
statistics = text_format.Parse(
"""
datasets {
name: 'All Examples'
num_examples: 10
features {
path { step: 'annotated_enum' }
type: STRING
string_stats {
common_stats {
num_missing: 0
num_non_missing: 10
max_num_values: 1
}
rank_histogram {
buckets { label: "a" sample_count: 1 }
buckets { label: "b" sample_count: 1 }
}
}
}
}""", statistics_pb2.DatasetFeatureStatisticsList())
previous_statistics = text_format.Parse(
"""
datasets {
name: 'All Examples'
num_examples: 10
features {
path { step: 'annotated_enum' }
type: STRING
string_stats {
common_stats {
num_non_missing: 10
num_missing: 0
max_num_values: 1
}
rank_histogram {
buckets { label: "a" sample_count: 3 }
buckets { label: "b" sample_count: 1 }
}
}
}
}
datasets {
name: "annotated_enum_b"
num_examples: 1
features {
path { step: 'annotated_enum' }
type: STRING
string_stats {
common_stats {
num_non_missing: 1
num_missing: 0
max_num_values: 1
}
rank_histogram {
buckets { label: "b" sample_count: 1 }
}
}
}
}""", statistics_pb2.DatasetFeatureStatisticsList())
serving_statistics = text_format.Parse(
"""
datasets {
name: 'All Examples'
num_examples: 10
features {
path { step: 'annotated_enum' }
type: STRING
string_stats {
common_stats {
num_non_missing: 10
num_missing: 0
max_num_values: 1
}
rank_histogram {
buckets { label: "a" sample_count: 3 }
buckets { label: "b" sample_count: 1 }
}
}
}
}
datasets {
name: "annotated_enum_a"
num_examples: 3
features {
path { step: 'annotated_enum' }
type: STRING
string_stats {
common_stats {
num_non_missing: 3
num_missing: 0
max_num_values: 1
}
rank_histogram {
buckets { label: "a" sample_count: 3 }
}
}
}
}""", statistics_pb2.DatasetFeatureStatisticsList())
schema = text_format.Parse(
"""
feature {
name: "annotated_enum"
type: BYTES
domain: "annotated_enum"
drift_comparator { infinity_norm { threshold: 0.01 } }
}
string_domain { name: "annotated_enum" value: "a" }
""", schema_pb2.Schema())
expected_anomalies = {
'annotated_enum': text_format.Parse(self._annotated_enum_anomaly_info,
anomalies_pb2.AnomalyInfo())
}
# Validate the stats.
anomalies = validation_api.validate_statistics(
statistics,
schema,
previous_statistics=previous_statistics,
serving_statistics=serving_statistics)
self._assert_equal_anomalies(anomalies, expected_anomalies)
# pylint: enable=line-too-long
def test_validate_stats_invalid_statistics_input(self):
schema = schema_pb2.Schema()
with self.assertRaisesRegexp(
TypeError, 'statistics is of type.*'):
_ = validation_api.validate_statistics({}, schema)
def test_validate_stats_invalid_previous_statistics_input(self):
statistics = statistics_pb2.DatasetFeatureStatisticsList()
statistics.datasets.extend([statistics_pb2.DatasetFeatureStatistics()])
schema = schema_pb2.Schema()
with self.assertRaisesRegexp(
TypeError, 'previous_statistics is of type.*'):
_ = validation_api.validate_statistics(statistics, schema,
previous_statistics='test')
def test_validate_stats_internal_invalid_previous_span_statistics_input(self):
statistics = statistics_pb2.DatasetFeatureStatisticsList()
statistics.datasets.extend([statistics_pb2.DatasetFeatureStatistics()])
schema = schema_pb2.Schema()
with self.assertRaisesRegexp(TypeError,
'previous_span_statistics is of type.*'):
_ = validation_api.validate_statistics_internal(
statistics, schema, previous_span_statistics='test')
def test_validate_stats_invalid_serving_statistics_input(self):
statistics = statistics_pb2.DatasetFeatureStatisticsList()
statistics.datasets.extend([statistics_pb2.DatasetFeatureStatistics()])
schema = schema_pb2.Schema()
with self.assertRaisesRegexp(
TypeError, 'serving_statistics is of type.*'):
_ = validation_api.validate_statistics(statistics, schema,
serving_statistics='test')
def test_validate_stats_invalid_previous_version_statistics_input(self):
statistics = statistics_pb2.DatasetFeatureStatisticsList()
statistics.datasets.extend([statistics_pb2.DatasetFeatureStatistics()])
schema = schema_pb2.Schema()
with self.assertRaisesRegexp(TypeError,
'previous_version_statistics is of type.*'):
_ = validation_api.validate_statistics_internal(
statistics, schema, previous_version_statistics='test')
def test_validate_stats_invalid_schema_input(self):
statistics = statistics_pb2.DatasetFeatureStatisticsList()
statistics.datasets.extend([statistics_pb2.DatasetFeatureStatistics()])
with self.assertRaisesRegexp(TypeError, '.*should be a Schema proto.*'):
_ = validation_api.validate_statistics(statistics, {})
def test_validate_stats_invalid_environment(self):
statistics = statistics_pb2.DatasetFeatureStatisticsList()
statistics.datasets.extend([statistics_pb2.DatasetFeatureStatistics()])
schema = text_format.Parse(
"""
default_environment: "TRAINING"
default_environment: "SERVING"
feature {
name: "label"
not_in_environment: "SERVING"
value_count { min: 1 max: 1 }
presence { min_count: 1 }
type: BYTES
}
""", schema_pb2.Schema())
with self.assertRaisesRegexp(
ValueError, 'Environment.*not found in the schema.*'):
_ = validation_api.validate_statistics(statistics, schema,
environment='INVALID')
def test_validate_stats_invalid_statistics_multiple_datasets_no_default_slice(
self):
statistics = statistics_pb2.DatasetFeatureStatisticsList()
statistics.datasets.extend([
statistics_pb2.DatasetFeatureStatistics(),
statistics_pb2.DatasetFeatureStatistics()
])
schema = schema_pb2.Schema()
with self.assertRaisesRegexp(
ValueError, 'Only statistics proto with one dataset or the default.*'):
_ = validation_api.validate_statistics(statistics, schema)
def test_validate_stats_invalid_previous_statistics_multiple_datasets(self):
current_stats = statistics_pb2.DatasetFeatureStatisticsList()
current_stats.datasets.extend([
statistics_pb2.DatasetFeatureStatistics()
])
previous_stats = statistics_pb2.DatasetFeatureStatisticsList()
previous_stats.datasets.extend([
statistics_pb2.DatasetFeatureStatistics(),
statistics_pb2.DatasetFeatureStatistics()
])
schema = schema_pb2.Schema()
with self.assertRaisesRegexp(
ValueError, 'Only statistics proto with one dataset or the default.*'):
_ = validation_api.validate_statistics(current_stats, schema,
previous_statistics=previous_stats)
def test_validate_stats_invalid_serving_statistics_multiple_datasets(self):
current_stats = statistics_pb2.DatasetFeatureStatisticsList()
current_stats.datasets.extend([
statistics_pb2.DatasetFeatureStatistics()
])
serving_stats = statistics_pb2.DatasetFeatureStatisticsList()
serving_stats.datasets.extend([
statistics_pb2.DatasetFeatureStatistics(),
statistics_pb2.DatasetFeatureStatistics()
])
schema = schema_pb2.Schema()
with self.assertRaisesRegexp(
ValueError, 'Only statistics proto with one dataset or the default.*'):
_ = validation_api.validate_statistics(current_stats, schema,
serving_statistics=serving_stats)
def test_validate_stats_invalid_previous_version_stats_multiple_datasets(
self):
current_stats = statistics_pb2.DatasetFeatureStatisticsList()
current_stats.datasets.extend([
statistics_pb2.DatasetFeatureStatistics()
])
previous_version_stats = statistics_pb2.DatasetFeatureStatisticsList()
previous_version_stats.datasets.extend([
statistics_pb2.DatasetFeatureStatistics(),
statistics_pb2.DatasetFeatureStatistics()
])
schema = schema_pb2.Schema()
with self.assertRaisesRegexp(
ValueError, 'Only statistics proto with one dataset or the default.*'):
_ = validation_api.validate_statistics_internal(
current_stats,
schema,
previous_version_statistics=previous_version_stats)
def test_validate_stats_internal_with_previous_version_stats(self):
statistics = text_format.Parse(
"""
datasets {
num_examples: 10
features {
path { step: 'bar' }
type: STRING
string_stats {
common_stats {
num_missing: 0
num_non_missing: 10
max_num_values: 1
}
rank_histogram {
buckets { label: "a" sample_count: 1 }
buckets { label: "b" sample_count: 2 }
buckets { label: "c" sample_count: 7 }
}
}
}
features {
path { step: 'annotated_enum' }
type: STRING
string_stats {
common_stats {
num_missing: 0
num_non_missing: 10
max_num_values: 1
}
rank_histogram {
buckets { label: "a" sample_count: 1 }
buckets { label: "b" sample_count: 1 }
}
}
}
}""", statistics_pb2.DatasetFeatureStatisticsList())
previous_span_statistics = text_format.Parse(
"""
datasets {
num_examples: 10
features {
path { step: 'annotated_enum' }
type: STRING
string_stats {
common_stats {
num_non_missing: 10
num_missing: 0
max_num_values: 1
}
rank_histogram {
buckets { label: "a" sample_count: 3 }
buckets { label: "b" sample_count: 1 }
}
}
}
features {
path { step: 'bar' }
type: STRING
string_stats {
common_stats {
num_missing: 0
num_non_missing: 10
max_num_values: 1
}
rank_histogram {
buckets { label: "a" sample_count: 3 }
buckets { label: "b" sample_count: 1 }
buckets { label: "c" sample_count: 6 }
}
}
}
}""", statistics_pb2.DatasetFeatureStatisticsList())
serving_statistics = text_format.Parse(
"""
datasets {
num_examples: 10
features {
path { step: 'bar' }
type: STRING
string_stats {
common_stats {
num_missing: 0
num_non_missing: 10
max_num_values: 1
}
rank_histogram {
buckets { label: "a" sample_count: 3 }
buckets { label: "b" sample_count: 1 }
buckets { label: "c" sample_count: 6 }
}
}
}
features {
path { step: 'annotated_enum' }
type: STRING
string_stats {
common_stats {
num_non_missing: 10
num_missing: 0
max_num_values: 1
}
rank_histogram {
buckets { label: "a" sample_count: 3 }
buckets { label: "b" sample_count: 1 }
}
}
}
}""", statistics_pb2.DatasetFeatureStatisticsList())
previous_version_statistics = text_format.Parse(
"""
datasets {
num_examples: 10
features {
path { step: 'annotated_enum' }
type: STRING
string_stats {
common_stats {
num_non_missing: 10
num_missing: 0
max_num_values: 1
}
rank_histogram {
buckets { label: "a" sample_count: 3 }
buckets { label: "b" sample_count: 1 }
}
}
}
features {
path { step: 'bar' }
type: STRING
string_stats {
common_stats {
num_missing: 0
num_non_missing: 10
max_num_values: 1
}
rank_histogram {
buckets { label: "a" sample_count: 3 }
buckets { label: "b" sample_count: 1 }
buckets { label: "c" sample_count: 6 }
}
}
}
}""", statistics_pb2.DatasetFeatureStatisticsList())
schema = text_format.Parse(
"""
feature {
name: 'bar'
type: BYTES
skew_comparator { infinity_norm { threshold: 0.1 } }
}
feature {
name: "annotated_enum"
type: BYTES
domain: "annotated_enum"
drift_comparator { infinity_norm { threshold: 0.01 } }
}
string_domain { name: "annotated_enum" value: "a" }
""", schema_pb2.Schema())
expected_anomalies = {
'bar': text_format.Parse(self._bar_anomaly_info,
anomalies_pb2.AnomalyInfo()),
'annotated_enum': text_format.Parse(self._annotated_enum_anomaly_info,
anomalies_pb2.AnomalyInfo())
}
# Validate the stats.
anomalies = validation_api.validate_statistics_internal(
statistics,
schema,
previous_span_statistics=previous_span_statistics,
serving_statistics=serving_statistics,
previous_version_statistics=previous_version_statistics)
self._assert_equal_anomalies(anomalies, expected_anomalies)
# pylint: enable=line-too-long
def test_validate_stats_internal_with_validation_options_set(self):
statistics = text_format.Parse(
"""
datasets {
num_examples: 10
features {
path { step: 'bar' }
type: STRING
string_stats {
common_stats {
num_missing: 0
num_non_missing: 10
max_num_values: 1
}
rank_histogram {
buckets { label: "a" sample_count: 1 }
buckets { label: "b" sample_count: 2 }
buckets { label: "c" sample_count: 7 }
}
}
}
features {
path { step: 'annotated_enum' }
type: STRING
string_stats {
common_stats {
num_missing: 0
num_non_missing: 10
max_num_values: 1
}
rank_histogram {
buckets { label: "a" sample_count: 1 }
buckets { label: "b" sample_count: 1 }
}
}
}
}""", statistics_pb2.DatasetFeatureStatisticsList())
empty_schema = schema_pb2.Schema()
# In this test case, both `bar` and `annotated_enum` are not defined in
# schema. But since only `bar` is in features_needed path, the expected
# anomalies only reports it. Besides, since new_features_are_warnings is
# set to true, the severity in the report is WARNING.
expected_anomalies = {
'bar': text_format.Parse("""
description: "New column (column in data but not in schema)"
severity: WARNING
short_description: "New column"
reason {
type: SCHEMA_NEW_COLUMN
short_description: "New column"
description: "New column (column in data but not in schema)"
}
path {
step: "bar"
}""", anomalies_pb2.AnomalyInfo())
}
features_needed = {
FeaturePath(['bar']): [
validation_options.ReasonFeatureNeeded(comment='reason1'),
validation_options.ReasonFeatureNeeded(comment='reason2')
]
}
new_features_are_warnings = True
vo = validation_options.ValidationOptions(
features_needed, new_features_are_warnings)
# Validate the stats.
anomalies = validation_api.validate_statistics_internal(
statistics,
empty_schema,
validation_options=vo)
self._assert_equal_anomalies(anomalies, expected_anomalies)
# pylint: enable=line-too-long
def test_validate_instance(self):
instance = pa.RecordBatch.from_arrays([pa.array([['D']])],
['annotated_enum'])
schema = text_format.Parse(
"""
string_domain {
name: "MyAloneEnum"
value: "A"
value: "B"
value: "C"
}
feature {
name: "annotated_enum"
value_count {
min:1
max:1
}
presence {
min_count: 1
}
type: BYTES
domain: "MyAloneEnum"
}
feature {
name: "ignore_this"
lifecycle_stage: DEPRECATED
value_count {
min:1
}
presence {
min_count: 1
}
type: BYTES
}
""", schema_pb2.Schema())
expected_anomalies = {
'annotated_enum':
text_format.Parse(
"""
path {
step: "annotated_enum"
}
description: "Examples contain values missing from the schema: D "
"(~100%). "
severity: ERROR
short_description: "Unexpected string values"
reason {
type: ENUM_TYPE_UNEXPECTED_STRING_VALUES
short_description: "Unexpected string values"
description: "Examples contain values missing from the schema: D "
"(~100%). "
}
""", anomalies_pb2.AnomalyInfo())
}
options = stats_options.StatsOptions(schema=schema)
anomalies = validation_api.validate_instance(instance, options)
self._assert_equal_anomalies(anomalies, expected_anomalies)
def test_validate_instance_global_only_anomaly_type(self):
instance = pa.RecordBatch.from_arrays([pa.array([['D']])],
['annotated_enum'])
# This schema has a presence.min_count > 1, which will generate an anomaly
# of type FEATURE_TYPE_LOW_NUMBER_PRESENT when any single example is
# validated using this schema. This test checks that this anomaly type
# (which is not meaningful in per-example validation) is not included in the
# Anomalies proto that validate_instance returns.
schema = text_format.Parse(
"""
string_domain {
name: "MyAloneEnum"
value: "A"
value: "B"
value: "C"
}
feature {
name: "annotated_enum"
value_count {
min:1
max:1
}
presence {
min_count: 5
}
type: BYTES
domain: "MyAloneEnum"
}
""", schema_pb2.Schema())
expected_anomalies = {
'annotated_enum':
text_format.Parse(
"""
path {
step: "annotated_enum"
}
description: "Examples contain values missing from the schema: D "
"(~100%). "
severity: ERROR
short_description: "Unexpected string values"
reason {
type: ENUM_TYPE_UNEXPECTED_STRING_VALUES
short_description: "Unexpected string values"
description: "Examples contain values missing from the schema: D "
"(~100%). "
}
""", anomalies_pb2.AnomalyInfo())
}
options = stats_options.StatsOptions(schema=schema)
anomalies = validation_api.validate_instance(instance, options)
self._assert_equal_anomalies(anomalies, expected_anomalies)
def test_validate_instance_environment(self):
instance = pa.RecordBatch.from_arrays([pa.array([['A']])], ['feature'])
schema = text_format.Parse(
"""
default_environment: "TRAINING"
default_environment: "SERVING"
feature {
name: "label"
not_in_environment: "SERVING"
value_count { min: 1 max: 1 }
presence { min_count: 1 }
type: BYTES
}
feature {
name: "feature"
value_count { min: 1 max: 1 }
presence { min_count: 1 }
type: BYTES
}
""", schema_pb2.Schema())
options = stats_options.StatsOptions(schema=schema)
# Validate the instance in TRAINING environment.
expected_anomalies_training = {
'label':
text_format.Parse(
"""
path {
step: "label"
}
description: "Column is completely missing"
severity: ERROR
short_description: "Column dropped"
reason {
type: SCHEMA_MISSING_COLUMN
short_description: "Column dropped"
description: "Column is completely missing"
}
""", anomalies_pb2.AnomalyInfo())
}
anomalies_training = validation_api.validate_instance(
instance, options, environment='TRAINING')
self._assert_equal_anomalies(anomalies_training,
expected_anomalies_training)
# Validate the instance in SERVING environment.
anomalies_serving = validation_api.validate_instance(
instance, options, environment='SERVING')
self._assert_equal_anomalies(anomalies_serving, {})
def test_validate_instance_invalid_environment(self):
instance = pa.RecordBatch.from_arrays([pa.array([['A']])], ['feature'])
schema = text_format.Parse(
"""
default_environment: "TRAINING"
default_environment: "SERVING"
feature {
name: "label"
not_in_environment: "SERVING"
value_count { min: 1 max: 1 }
presence { min_count: 1 }
type: BYTES
}
feature {
name: "feature"
value_count { min: 1 max: 1 }
presence { min_count: 1 }
type: BYTES
}
""", schema_pb2.Schema())
options = stats_options.StatsOptions(schema=schema)
with self.assertRaisesRegexp(
ValueError, 'Environment.*not found in the schema.*'):
_ = validation_api.validate_instance(
instance, options, environment='INVALID')
def test_validate_instance_invalid_options(self):
instance = pa.RecordBatch.from_arrays([pa.array([['A']])], ['feature'])
with self.assertRaisesRegexp(ValueError,
'options must be a StatsOptions object.'):
_ = validation_api.validate_instance(instance, {})
def test_validate_instance_stats_options_without_schema(self):
instance = pa.RecordBatch.from_arrays([pa.array([['A']])], ['feature'])
# This instance of StatsOptions has no schema.
options = stats_options.StatsOptions()
with self.assertRaisesRegexp(ValueError, 'options must include a schema.'):
_ = validation_api.validate_instance(instance, options)
class NLValidationTest(ValidationTestCase):
@parameterized.named_parameters(*[
dict(
testcase_name='no_coverage',
min_coverage=None,
feature_coverage=None,
min_avg_token_length=None,
feature_avg_token_length=None,
expected_anomaly_types=set(),
expected_min_coverage=None,
expected_min_avg_token_length=None),
dict(
testcase_name='missing_stats',
min_coverage=0.4,
feature_coverage=None,
min_avg_token_length=None,
feature_avg_token_length=None,
expected_anomaly_types=set(
[anomalies_pb2.AnomalyInfo.STATS_NOT_AVAILABLE]),
expected_min_coverage=None,
expected_min_avg_token_length=None,
),
dict(
testcase_name='low_min_coverage',
min_coverage=0.4,
feature_coverage=0.5,
min_avg_token_length=None,
feature_avg_token_length=None,
expected_anomaly_types=set(),
expected_min_coverage=0.4,
expected_min_avg_token_length=None),
dict(
testcase_name='high_min_coverage',
min_coverage=0.5,
feature_coverage=0.4,
min_avg_token_length=None,
feature_avg_token_length=None,
expected_anomaly_types=set(
[anomalies_pb2.AnomalyInfo.FEATURE_COVERAGE_TOO_LOW]),
expected_min_coverage=0.4,
expected_min_avg_token_length=None,
),
dict(
testcase_name='low_min_avg_token_length',
min_coverage=None,
feature_coverage=None,
min_avg_token_length=4,
feature_avg_token_length=5,
expected_anomaly_types=set(),
expected_min_coverage=None,
expected_min_avg_token_length=4,
),
dict(
testcase_name='high_min_avg_token_length',
min_coverage=None,
feature_coverage=None,
min_avg_token_length=5,
feature_avg_token_length=4,
expected_anomaly_types=set([
anomalies_pb2.AnomalyInfo
.FEATURE_COVERAGE_TOO_SHORT_AVG_TOKEN_LENGTH
]),
expected_min_coverage=None,
expected_min_avg_token_length=4,
),
])
def test_validate_nl_domain_coverage(self, min_coverage, feature_coverage,
min_avg_token_length,
feature_avg_token_length,
expected_anomaly_types,
expected_min_coverage,
expected_min_avg_token_length):
schema = text_format.Parse(
"""
feature {
name: "nl_feature"
natural_language_domain {
}
type: INT
}
""", schema_pb2.Schema())
if min_coverage is not None:
schema.feature[
0].natural_language_domain.coverage.min_coverage = min_coverage
if min_avg_token_length is not None:
schema.feature[
0].natural_language_domain.coverage.min_avg_token_length = min_avg_token_length
statistics = text_format.Parse(
"""
datasets{
num_examples: 10
features {
path { step: 'nl_feature' }
type: INT
num_stats: {
common_stats: {
num_missing: 3
num_non_missing: 7
min_num_values: 1
max_num_values: 1
}
}
}
}
""", statistics_pb2.DatasetFeatureStatisticsList())
if feature_coverage is not None or feature_avg_token_length is not None:
nl_stats = statistics_pb2.NaturalLanguageStatistics()
if feature_coverage is not None:
nl_stats.feature_coverage = feature_coverage
if feature_avg_token_length is not None:
nl_stats.avg_token_length = feature_avg_token_length
custom_stat = statistics.datasets[0].features[0].custom_stats.add()
custom_stat.name = 'nl_statistics'
custom_stat.any.Pack(nl_stats)
# Validate the stats and update schema.
anomalies = validation_api.validate_statistics(statistics, schema)
schema = validation_api.update_schema(schema, statistics)
anomaly_types = set(
[r.type for r in anomalies.anomaly_info['nl_feature'].reason])
self.assertSetEqual(expected_anomaly_types, anomaly_types)
for field, str_field in [(expected_min_coverage, 'min_coverage'),
(expected_min_avg_token_length,
'min_avg_token_length')]:
if field is None:
self.assertFalse(
schema.feature[0].natural_language_domain.coverage.HasField(
str_field))
else:
self.assertAlmostEqual(
getattr(schema.feature[0].natural_language_domain.coverage,
str_field), field)
@parameterized.named_parameters(*[
dict(
testcase_name='missing_stats',
token_name=100,
fraction_values=(None, 0.4, 0.6),
sequence_values=(None, None, None, 1, 3),
expected_anomaly_types=set(
[anomalies_pb2.AnomalyInfo.STATS_NOT_AVAILABLE]),
expected_fraction_values=None,
expected_sequence_values=None),
dict(
testcase_name='all_fraction_constraints_satisfied',
token_name=100,
fraction_values=(0.5, 0.4, 0.6),
sequence_values=None,
expected_anomaly_types=set(),
expected_fraction_values=(0.4, 0.6),
expected_sequence_values=None),
dict(
testcase_name='int_token_min_fraction_constraint_too_high',
token_name=100,
fraction_values=(0.5, 0.6, 0.6),
sequence_values=None,
expected_anomaly_types=set(
[anomalies_pb2.AnomalyInfo.SEQUENCE_VALUE_TOO_SMALL_FRACTION]),
expected_fraction_values=(0.5, 0.6),
expected_sequence_values=None),
dict(
testcase_name='string_token_min_fraction_constraint_too_high',
token_name='str',
fraction_values=(0.5, 0.6, 0.6),
sequence_values=None,
expected_anomaly_types=set(
[anomalies_pb2.AnomalyInfo.SEQUENCE_VALUE_TOO_SMALL_FRACTION]),
expected_fraction_values=(0.5, 0.6),
expected_sequence_values=None),
dict(
testcase_name='int_token_max_fraction_constraint_too_low',
token_name=100,
fraction_values=(0.5, 0.4, 0.4),
sequence_values=None,
expected_anomaly_types=set(
[anomalies_pb2.AnomalyInfo.SEQUENCE_VALUE_TOO_LARGE_FRACTION]),
expected_fraction_values=(0.4, 0.5),
expected_sequence_values=None),
dict(
testcase_name='string_token_max_fraction_constraint_too_low',
token_name='str',
fraction_values=(0.5, 0.4, 0.4),
sequence_values=None,
expected_anomaly_types=set(
[anomalies_pb2.AnomalyInfo.SEQUENCE_VALUE_TOO_LARGE_FRACTION]),
expected_fraction_values=(0.4, 0.5),
expected_sequence_values=None),
dict(
testcase_name='all_sequence_constraints_satisfied',
token_name=100,
fraction_values=None,
sequence_values=(2, 2, 2, 1, 3),
expected_anomaly_types=set(),
expected_fraction_values=None,
expected_sequence_values=(1, 3),
),
dict(
testcase_name='int_token_min_sequence_constraint_too_high',
token_name=100,
fraction_values=None,
sequence_values=(0, 2, 1, 1, 3),
expected_anomaly_types=set(
[anomalies_pb2.AnomalyInfo.SEQUENCE_VALUE_TOO_FEW_OCCURRENCES]),
expected_fraction_values=None,
expected_sequence_values=(0, 3),
),
dict(
testcase_name='string_token_min_sequence_constraint_too_high',
token_name='str',
fraction_values=None,
sequence_values=(0, 2, 1, 1, 3),
expected_anomaly_types=set(
[anomalies_pb2.AnomalyInfo.SEQUENCE_VALUE_TOO_FEW_OCCURRENCES]),
expected_fraction_values=None,
expected_sequence_values=(0, 3),
),
dict(
testcase_name='int_token_max_sequence_constraint_too_low',
token_name=100,
fraction_values=None,
sequence_values=(2, 4, 3, 1, 3),
expected_anomaly_types=set(
[anomalies_pb2.AnomalyInfo.SEQUENCE_VALUE_TOO_MANY_OCCURRENCES]),
expected_fraction_values=None,
expected_sequence_values=(1, 4),
),
dict(
testcase_name='string_token_max_sequence_constraint_too_low',
token_name='str',
fraction_values=None,
sequence_values=(2, 4, 3, 1, 3),
expected_anomaly_types=set(
[anomalies_pb2.AnomalyInfo.SEQUENCE_VALUE_TOO_MANY_OCCURRENCES]),
expected_fraction_values=None,
expected_sequence_values=(1, 4),
),
])
def test_validate_nl_domain_token_constraints(self, token_name,
fraction_values,
sequence_values,
expected_anomaly_types,
expected_fraction_values,
expected_sequence_values):
fraction, min_fraction, max_fraction = (
fraction_values if fraction_values else (None, None, None))
expected_min_fraction, expected_max_fraction = (
expected_fraction_values if expected_fraction_values else (None, None))
min_sequence_stat, max_sequence_stat, avg_sequence_stat, min_sequence, max_sequence = (
sequence_values if sequence_values else (None, None, None, None, None))
expected_min_sequence, expected_max_sequence = (
expected_sequence_values if expected_sequence_values else (None, None))
schema = text_format.Parse(
"""
feature {
name: "nl_feature"
natural_language_domain {
token_constraints {
int_value: 200
min_per_sequence: 1
max_per_sequence: 3
min_fraction_of_sequences: 0.1
max_fraction_of_sequences: 0.3
}
}
type: INT
}
""", schema_pb2.Schema())
if (min_fraction is not None or max_fraction is not None or
min_sequence is not None or max_sequence is not None):
token_constraint = (
schema.feature[0].natural_language_domain.token_constraints.add())
if isinstance(token_name, int):
token_constraint.int_value = token_name
else:
token_constraint.string_value = token_name
if min_fraction is not None:
token_constraint.min_fraction_of_sequences = min_fraction
if max_fraction is not None:
token_constraint.max_fraction_of_sequences = max_fraction
if min_sequence is not None:
token_constraint.min_per_sequence = min_sequence
if max_sequence is not None:
token_constraint.max_per_sequence = max_sequence
statistics = text_format.Parse(
"""
datasets{
num_examples: 10
features {
path { step: 'nl_feature' }
type: INT
num_stats: {
common_stats: {
num_missing: 3
num_non_missing: 7
min_num_values: 1
max_num_values: 1
}
}
}
}
""", statistics_pb2.DatasetFeatureStatisticsList())
nl_stats = statistics_pb2.NaturalLanguageStatistics()
token_stats = nl_stats.token_statistics.add()
token_stats.int_token = 200
token_stats.fraction_of_sequences = 0.2
token_stats.per_sequence_min_frequency = 2
token_stats.per_sequence_max_frequency = 2
token_stats.per_sequence_avg_frequency = 2
if (fraction is not None or min_sequence_stat is not None or
max_sequence_stat is not None):
token_stats = nl_stats.token_statistics.add()
if isinstance(token_name, int):
token_stats.int_token = token_name
else:
token_stats.string_token = token_name
if fraction is not None:
token_stats.fraction_of_sequences = fraction
if min_sequence_stat is not None:
token_stats.per_sequence_min_frequency = min_sequence_stat
if max_sequence_stat is not None:
token_stats.per_sequence_max_frequency = max_sequence_stat
if avg_sequence_stat is not None:
token_stats.per_sequence_avg_frequency = avg_sequence_stat
custom_stat = statistics.datasets[0].features[0].custom_stats.add()
custom_stat.name = 'nl_statistics'
custom_stat.any.Pack(nl_stats)
# Validate the stats.
anomalies = validation_api.validate_statistics(statistics, schema)
anomaly_types = set(
[r.type for r in anomalies.anomaly_info['nl_feature'].reason])
self.assertSetEqual(anomaly_types, expected_anomaly_types)
schema = validation_api.update_schema(schema, statistics)
for field, str_field in [
(expected_min_fraction, 'min_fraction_of_sequences'),
(expected_max_fraction, 'max_fraction_of_sequences'),
(expected_min_sequence, 'min_per_sequence'),
(expected_max_sequence, 'max_per_sequence')
]:
if field is None:
self.assertFalse(
len(schema.feature[0].natural_language_domain.token_constraints) and
schema.feature[0].natural_language_domain.token_constraints[1]
.HasField(str_field))
else:
self.assertAlmostEqual(
getattr(
schema.feature[0].natural_language_domain.token_constraints[1],
str_field), field)
class IdentifyAnomalousExamplesTest(parameterized.TestCase):
@parameterized.named_parameters(*IDENTIFY_ANOMALOUS_EXAMPLES_VALID_INPUTS)
def test_identify_anomalous_examples(self, examples, schema_text,
expected_result):
schema = text_format.Parse(schema_text, schema_pb2.Schema())
options = stats_options.StatsOptions(schema=schema)
def _assert_fn(got):
# TODO(zhuo): clean-up after ARROW-8277 is available.
class _RecordBatchEqualityWrapper(object):
__hash__ = None
def __init__(self, record_batch):
self._batch = record_batch
def __eq__(self, other):
return self._batch.equals(other._batch) # pylint: disable=protected-access
wrapped_got = [(k, _RecordBatchEqualityWrapper(v)) for k, v in got]
wrapped_expected = [
(k, _RecordBatchEqualityWrapper(v)) for k, v in expected_result]
self.assertCountEqual(wrapped_got, wrapped_expected)
with beam.Pipeline() as p:
result = (
p | beam.Create(examples)
| validation_api.IdentifyAnomalousExamples(options))
util.assert_that(result, _assert_fn)
def test_identify_anomalous_examples_options_of_wrong_type(self):
examples = [{'annotated_enum': np.array(['D'], dtype=object)}]
options = 1
with self.assertRaisesRegexp(ValueError, 'options must be a `StatsOptions` '
'object.'):
with beam.Pipeline() as p:
_ = (
p | beam.Create(examples)
| validation_api.IdentifyAnomalousExamples(options))
def test_identify_anomalous_examples_options_without_schema(self):
examples = [{'annotated_enum': np.array(['D'], dtype=object)}]
options = stats_options.StatsOptions()
with self.assertRaisesRegexp(ValueError, 'options must include a schema'):
with beam.Pipeline() as p:
_ = (
p | beam.Create(examples)
| validation_api.IdentifyAnomalousExamples(options))
class DetectFeatureSkewTest(absltest.TestCase):
def _assert_feature_skew_results_protos_equal(self, actual, expected) -> None:
self.assertLen(actual, len(expected))
sorted_actual = sorted(actual, key=lambda t: t.feature_name)
sorted_expected = sorted(expected, key=lambda e: e.feature_name)
for i in range(len(sorted_actual)):
compare.assertProtoEqual(self, sorted_actual[i], sorted_expected[i])
def _assert_skew_pairs_equal(self, actual, expected) -> None:
self.assertLen(actual, len(expected))
for each in actual:
self.assertIn(each, expected)
def test_detect_feature_skew(self):
training_data = [
text_format.Parse("""
features {
feature {
key: 'id'
value { bytes_list { value: [ 'first_feature' ] } }
}
feature {
key: 'feature_a'
value { int64_list { value: [ 12, 24 ] } }
}
feature {
key: 'feature_b'
value { float_list { value: [ 10.0 ] } }
}
}
""", tf.train.Example()),
text_format.Parse("""
features {
feature {
key: 'id'
value { bytes_list { value: [ 'second_feature' ] } }
}
feature {
key: 'feature_a'
value { int64_list { value: [ 5 ] } }
}
feature {
key: 'feature_b'
value { float_list { value: [ 15.0 ] } }
}
}
""", tf.train.Example())
]
serving_data = [
text_format.Parse("""
features {
feature {
key: 'id'
value { bytes_list { value: [ 'first_feature' ] } }
}
feature {
key: 'feature_b'
value { float_list { value: [ 10.0 ] } }
}
}
""", tf.train.Example()),
text_format.Parse("""
features {
feature {
key: 'id'
value { bytes_list { value: [ 'second_feature' ] } }
}
feature {
key: 'feature_a'
value { int64_list { value: [ 5 ] } }
}
feature {
key: 'feature_b'
value { float_list { value: [ 20.0 ] } }
}
}
""", tf.train.Example())
]
expected_feature_skew_result = [
text_format.Parse(
"""
feature_name: 'feature_a'
training_count: 2
serving_count: 1
match_count: 1
training_only: 1
diff_count: 1""", feature_skew_results_pb2.FeatureSkew()),
text_format.Parse(
"""
feature_name: 'feature_b'
training_count: 2
serving_count: 2
match_count: 1
mismatch_count: 1
diff_count: 1""", feature_skew_results_pb2.FeatureSkew())
]
with beam.Pipeline() as p:
training_data = p | 'CreateTraining' >> beam.Create(training_data)
serving_data = p | 'CreateServing' >> beam.Create(serving_data)
feature_skew, skew_sample = (
(training_data, serving_data)
| 'DetectSkew' >> validation_api.DetectFeatureSkew(
identifier_features=['id'], sample_size=1))
util.assert_that(
feature_skew,
test_util.make_skew_result_equal_fn(self,
expected_feature_skew_result),
'CheckFeatureSkew')
util.assert_that(skew_sample, util.is_not_empty(), 'CheckSkewSample')
def test_write_feature_skew_results_to_tf_record(self):
feature_skew_results = [
text_format.Parse(
"""
feature_name: 'skewed'
training_count: 2
serving_count: 2
mismatch_count: 2
diff_count: 2""", feature_skew_results_pb2.FeatureSkew()),
text_format.Parse(
"""
feature_name: 'no_skew'
training_count: 2
serving_count: 2
match_count: 2""", feature_skew_results_pb2.FeatureSkew())
]
output_path = os.path.join(tempfile.mkdtemp(), 'feature_skew')
with beam.Pipeline() as p:
_ = (
p | beam.Create(feature_skew_results)
| validation_api.WriteFeatureSkewResultsToTFRecord(output_path))
skew_results_from_file = []
for record in tf.compat.v1.io.tf_record_iterator(output_path):
skew_results_from_file.append(
feature_skew_results_pb2.FeatureSkew.FromString(record))
self._assert_feature_skew_results_protos_equal(skew_results_from_file,
feature_skew_results)
def test_write_skew_pairs_to_tf_record(self):
skew_pairs = [
text_format.Parse(
"""
training {
features {
feature {
key: 'id'
value { bytes_list { value: [ 'id_feature' ] } }
}
feature {
key: 'feature_a'
value { float_list { value: [ 10.0 ] } }
}
}
}
serving {
features {
feature {
key: 'id'
value { bytes_list { value: [ 'id_feature' ] } }
}
feature {
key: 'feature_a'
value { float_list { value: [ 11.0 ] } }
}
}
}
mismatched_features : [ 'feature_a' ]
""", feature_skew_results_pb2.SkewPair()),
text_format.Parse(
"""
training {
features {
feature {
key: 'id'
value { bytes_list { value: [ 'id_feature' ] } }
}
feature {
key: 'feature_a'
value { int64_list { value: [ 5 ] } }
}
}
}
serving {
features {
feature {
key: 'id'
value { bytes_list { value: [ 'id_feature' ] } }
}
}
}
training_only_features: [ 'feature_a' ]
""", feature_skew_results_pb2.SkewPair())
]
output_path = os.path.join(tempfile.mkdtemp(), 'skew_pairs')
with beam.Pipeline() as p:
_ = (
p | beam.Create(skew_pairs)
| validation_api.WriteSkewPairsToTFRecord(output_path))
skew_pairs_from_file = []
for record in tf.compat.v1.io.tf_record_iterator(output_path):
skew_pairs_from_file.append(
feature_skew_results_pb2.SkewPair.FromString(record))
self._assert_skew_pairs_equal(skew_pairs_from_file, skew_pairs)
def _construct_sliced_statistics(
values_slice1,
values_slice2) -> statistics_pb2.DatasetFeatureStatisticsList:
values_overall = values_slice1 + values_slice2
datasets = []
stats_slice1 = tfdv.generate_statistics_from_dataframe(
pd.DataFrame.from_dict({'foo': values_slice1}))
stats_slice1.datasets[0].name = 'slice1'
datasets.append(stats_slice1.datasets[0])
if values_slice2:
stats_slice2 = tfdv.generate_statistics_from_dataframe(
| pd.DataFrame.from_dict({'foo': values_slice2}) | pandas.DataFrame.from_dict |
import sys, os
import numpy as np, pandas as pd
import ujson, gc
##############################################
# defaults
##############################################
project_name = 'zwickys-quirky-transients'
class_file = project_name + '-classifications.csv'
# workflow_file = project_name + '-workflows.csv'
# workflow_contents_file = project_name + '-workflow_contents.csv'
# subject_file = project_name + '-subjects.csv'
aggregated_file = project_name + '-aggregated.csv'
# the workflow version is a string because it's actually 2 integers separated by a "."
# (major).(minor)
# i.e. 6.1 is NOT the same version as 6.10
# though note we only care about the major version below
workflow_id = 8368
workflow_version = "6.16"
# do we want to write an extra file with just classification counts and usernames
# (and a random color column, for treemaps)?
counts_out = True
counts_out_file = 'class_counts_colors.csv'
verbose = True
# placeholder (right now we aren't weighting)
apply_weight = 0
# what's the minimum number of classifications a subject should have before we think
# we might have enough information to want to rank its classification?
nclass_min = 5
#################################################################################
#################################################################################
#################################################################################
# Get the Gini coefficient - https://en.wikipedia.org/wiki/Gini_coefficient
# Typical values of the Gini for healthy Zooniverse projects (Cox et al. 2015) are
# in the range of 0.7-0.9.
def gini(list_of_values):
sorted_list = sorted(list_of_values)
height, area = 0, 0
for value in sorted_list:
height += value
area += height - value / 2.
fair_area = height * len(list_of_values) / 2
return (fair_area - area) / fair_area
#################################################################################
#################################################################################
#################################################################################
# assign a color randomly if logged in, gray otherwise
# this is for making a treemap of your project's contributors
def randcolor(user_label):
import random
if user_label.startswith('not-logged-in-'):
# keep it confined to grays, i.e. R=G=B and not too bright, not too dark
g = random.randint(25,150)
return '#%02X%02X%02X' % (g,g,g)
#return '#555555'
else:
# the lambda makes this generate a new int every time it's called, so that
# in general R != G != B below.
r = lambda: random.randint(0,255)
return '#%02X%02X%02X' % (r(),r(),r())
#################################################################################
#################################################################################
#################################################################################
# This is the function that actually does the aggregating
def aggregate_class(grp):
# translate the group to a dataframe because FML if I don't (some indexing etc is different)
thegrp = pd.DataFrame(grp)
# figure out what we're looping over below
answers = thegrp.candidate_anno.unique()
# aggregating is a matter of grouping by different answers and summing the counts/weights
byans = thegrp.groupby('candidate_anno')
ans_ct_tot = byans['count'].aggregate('sum')
ans_wt_tot = byans['weight'].aggregate('sum')
# we want fractions eventually, so we need denominators
count_tot = np.sum(ans_ct_tot) # we could also do len(thegrp)
weight_tot = np.sum(ans_wt_tot)
# okay, now we should have a series of counts for each answer, one for weighted counts, and
# the total votes and weighted votes for this subject.
# now loop through the possible answers and create the raw and weighted vote fractions
# and save the counts as well.
# this is a list for now and we'll make it into a series and order the columns later
class_agg = {}
class_agg['count_unweighted'] = count_tot
class_agg['count_weighted'] = weight_tot
class_agg['subject_filename'] = thegrp.subject_filename.unique()[0]
for a in answers:
# ignore blank classifications
if a is not None:
# don't be that jerk who labels things with "p0" or otherwise useless internal indices.
# Use the text of the response next to this answer choice in the project builder (but strip spaces)
try:
raw_frac_label = ('p_'+a).replace(' ', '_')
wt_frac_label = ('p_'+a+'_weight').replace(' ', '_')
except:
# if we're here it's because all the classifications for this
# subject are empty, i.e. they didn't answer the question, just
# clicked done
# in that case you're going to crash in a second, but at least
# print out some information about the group before you do
print("OOPS ")
print(thegrp)
print('-----------')
print(a)
print('+++++')
print(ans_ct_tot,ans_wt_tot,count_tot,weight_tot)
print('~~~~~~')
print(answers)
print(len(a))
class_agg[raw_frac_label] = ans_ct_tot[a]/float(count_tot)
class_agg[wt_frac_label] = ans_wt_tot[a]/float(weight_tot)
# oops, this is hard-coded - sorry to those trying to generalise
col_order = ["p_Real", "p_Bogus", "p_Skip", "p_Real_weight", "p_Bogus_weight", "p_Skip_weight",
"count_unweighted", "count_weighted", "subject_filename"]
return pd.Series(class_agg)[col_order]
#################################################################################
#################################################################################
#################################################################################
def aggregate_ztf(classfile_in=class_file, wfid=workflow_id, wfv=workflow_version, apply_weight=apply_weight, outfile=aggregated_file, counts_out=counts_out, counts_out_file=counts_out_file, nclass_min=nclass_min, verbose=True):
if verbose:
print("Reading classifications from %s ..." % classfile_in)
classifications_all = pd.read_csv(classfile_in) # this step can take a few minutes for a big file
this_workflow = classifications_all.workflow_id == wfid
# we only care about the major version
this_wf_version = [int(q) == int(float(wfv)) for q in classifications_all.workflow_version]
classifications = classifications_all[this_workflow & this_wf_version].copy()
del classifications_all
n_class = len(classifications)
if verbose:
print("... %d classifications selected from workflow %d, version %d." % (n_class, wfid, int(float(wfv))))
# first, extract the started_at and finished_at from the annotations column
classifications['meta_json'] = [ujson.loads(q) for q in classifications.metadata]
classifications['started_at_str'] = [q['started_at'] for q in classifications.meta_json]
classifications['finished_at_str'] = [q['finished_at'] for q in classifications.meta_json]
classifications['created_day'] = [q[:10] for q in classifications.created_at]
if verbose:
print("Getting subject info...")
# Get subject info into a format we can actually use
classifications['subject_json'] = [ujson.loads(q) for q in classifications.subject_data]
# the metadata saved with the subject is just this filename, but extract it
classifications['subject_filename'] = [q[1]['subject_json']["%d" % q[1]['subject_ids']]['Filename'] for q in classifications['subject_ids subject_json'.split()].iterrows()]
# Get annotation info into a format we can actually use
# these annotations are just a single question, yay
classifications['annotation_json'] = [ujson.loads(q) for q in classifications.annotations]
classifications['candidate_anno'] = [q[0]['value'] for q in classifications.annotation_json]
# possible answers (that are actually used) to the question
# Should be Real, Bogus, Skip, but let's go with what's actually there
answers = classifications.candidate_anno.unique()
# create a weight parameter but set it to 1.0 for all classifications (unweighted) - may change later
classifications['weight'] = [1.0 for q in classifications.workflow_version]
# also create a count parameter, because at the time of writing this .aggregate('count') was sometimes off by 1
classifications['count'] = [1 for q in classifications.workflow_version]
# just so that we can check we're aggregating what we think we are
last_class_time = max(classifications.created_at)[:16].replace(' ', '_').replace('T', '_').replace(':', 'h')+"m"
## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## #
#######################################################
# Apply weighting function (or don't) #
#######################################################
## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## #
classifications['seed'] = [0 for q in classifications.weight]
classifications['is_gs'] = [0 for q in classifications.weight]
if apply_weight > 0:
if verbose:
print(" Computing user weights...")
print("Except there's nothing to do right now because we haven't written a weighting scheme")
else:
# just make a collated classification count array so we can print it to the screen
by_user = classifications.groupby('user_name')
user_exp = by_user.seed.aggregate('sum')
user_weights = pd.DataFrame(user_exp)
user_weights.columns = ['seed']
#user_weights['user_label'] = user_weights.index
user_weights['nclass_user'] = by_user['count'].aggregate('sum')
user_weights['n_gs'] = by_user['is_gs'].aggregate('sum')
# UNWEIGHTED
user_weights['weight'] = [1 for q in user_exp]
# bit of memory management
gc.collect()
# grab basic stats
# also we'll use by_subject for the actual aggregation
n_subj_tot = len(classifications.subject_data.unique())
by_subject = classifications.groupby('subject_ids')
subj_class = by_subject.created_at.aggregate('count')
all_users = classifications.user_name.unique()
n_user_tot = len(all_users)
n_user_unreg = sum([q.startswith('not-logged-in-') for q in all_users])
last_class_id = max(classifications.classification_id)
# obviously if we didn't weight then we don't need to get stats on weights
if apply_weight > 0:
user_weight_mean = np.mean(user_weights.weight)
user_weight_median = np.median(user_weights.weight)
user_weight_25pct = np.percentile(user_weights.weight, 25)
user_weight_75pct = np.percentile(user_weights.weight, 75)
user_weight_min = min(user_weights.weight)
user_weight_max = max(user_weights.weight)
nclass_mean = np.mean(user_weights.nclass_user)
nclass_median = np.median(user_weights.nclass_user)
nclass_tot = len(classifications)
user_weights.sort_values(['nclass_user'], ascending=False, inplace=True)
if verbose:
## ## ## ## ## ## ## ## ## ## ## ## ## ## #
#######################################################
# Print out basic project info #
#######################################################
## ## ## ## ## ## ## ## ## ## ## ## ## ## #
print("%d classifications from %d users, %d registered and %d unregistered.\n" % (nclass_tot, n_user_tot, n_user_tot - n_user_unreg, n_user_unreg))
print("Mean n_class per user %.1f, median %.1f." % (nclass_mean, nclass_median))
if apply_weight > 0:
print("Mean user weight %.3f, median %.3f, with the middle 50 percent of users between %.3f and %.3f." % (user_weight_mean, user_weight_median, user_weight_25pct, user_weight_75pct))
print("The min user weight is %.3f and the max user weight is %.3f.\n" % (user_weight_min, user_weight_max))
cols_print = 'nclass_user weight'.split()
else:
cols_print = 'nclass_user'
# don't make this leaderboard public unless you want to gamify your users in ways we already know
# have unintended and sometimes negative consequences. This is just for your information.
print("Classification leaderboard:")
print(user_weights[cols_print].head(20))
print("Gini coefficient for project: %.3f" % gini(user_weights['nclass_user']))
# If you want to print out a file of classification counts per user, with colors
# for making a treemap
# honestly I'm not sure why you wouldn't want to print this, as it's very little
# extra effort AND it's the only place we record the user weights
if counts_out:
if verbose:
print("Printing classification counts to %s..." % counts_out_file)
user_weights['color'] = [randcolor(q) for q in user_weights.index]
user_weights.to_csv(counts_out_file)
## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## #
#######################################################
# Aggregate classifications, unweighted and weighted #
#######################################################
## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## #
if verbose:
print("\nAggregating classifications...\n")
class_agg = by_subject['weight count candidate_anno subject_filename'.split()].apply(aggregate_class)
# if p_Ans = 0.0 the function skipped those values, so replace all the NaNs with 0.0
class_agg.fillna(value=0.0, inplace=True)
#######################################################
# Write to files #
#######################################################
#
# add value-added columns
#
# let people look up the subject on Talk directly from the aggregated file
class_agg['link'] = ['https://www.zooniverse.org/projects/rswcit/zwickys-quirky-transients/talk/subjects/'+str(q) for q in class_agg.index]
# after we do the merges below the new indices might not be linked to the subject id, so save it explicitly
#class_agg['subject_ids'] = [str(q) for q in class_agg.index]
# make the list ranked by p_Real_weight
# which is the same as p_Real if there isn't any weighting
class_agg.sort_values(['p_Real_weight', 'count_weighted'], ascending=False, inplace=True)
# if there are lots of unfinished subjects, make 2 versions of output file
has_enough = class_agg['count_unweighted'] >= nclass_min
if (len(has_enough) - sum(has_enough) > 50):
outfile_enough = outfile.replace(".csv", "_nclassgt%.0f.csv" % nclass_min)
# this is unlikely, but just in case
if outfile_enough == outfile:
outfile_enough += "_nclassgt%.0f.csv" % nclass_min
else:
# we don't need to write a second file
outfile_enough = ''
if verbose:
print("Writing aggregated output for %d subjects to file %s...\n" % (len(class_agg), outfile))
if len(outfile_enough) > 2:
print(" and aggregated %d subjects with n_class > %.0f to %s.\n" % (sum(has_enough), nclass_min, outfile_enough))
if apply_weight > 0:
pd.DataFrame(class_agg).to_csv(outfile)
if len(outfile_enough) > 2:
pd.DataFrame(class_agg[has_enough]).to_csv(outfile_enough)
cols_out = class_agg.columns
else:
# ignore the weighted columns
cols_out = []
for col in class_agg.columns:
if (not col.endswith('_weight')) & (not col.endswith('_weighted')):
cols_out.append(col)
pd.DataFrame(class_agg[cols_out]).to_csv(outfile)
if len(outfile_enough) > 2:
pd.DataFrame(class_agg[cols_out][has_enough]).to_csv(outfile_enough)
return | pd.DataFrame(class_agg[cols_out]) | pandas.DataFrame |
# THIS SCRIPT SERVES TO TRANSLATE INDIVIDUAL DATASET ANNOTATIONS TO CELL TYPE ANNOTATIONS AS IMPLEMENTED BY SASHA AND MARTIJN! :)
import pandas as pd
import numpy as np
import scanpy as sc
import utils # this is a custom script from me
def nan_checker(entry):
"""replaces entry that is not a cell type label, but rather some other
entry that exists in the table (e.g. |, >, nan) with None. Retains
labels that look like proper labels."""
if entry == '|':
new_entry = None
elif entry == '>':
new_entry = None
elif pd.isna(entry):
new_entry = None
else:
new_entry = entry
return new_entry
def load_harmonizing_table(path_to_csv):
"""Loads the csv download version of the google doc in which
cell types for each dataset are assigned a consensus equivalent.
Returns the cleaned dataframe"""
cell_type_harm_df = pd.read_csv(
path_to_csv,
header=1
)
cell_type_harm_df = cell_type_harm_df.applymap(nan_checker)
# strip white spaces
cell_type_harm_df = cell_type_harm_df.apply(lambda x: x.str.strip() if x.dtype == "object" else x)
return cell_type_harm_df
def consensus_index_renamer(consensus_df, idx):
highest_res = int(float(consensus_df.loc[idx, "highest_res"]))
new_index = (
str(highest_res) + "_" + consensus_df.loc[idx, "level_" + str(highest_res)]
)
return new_index
def create_consensus_table(harmonizing_df, max_level=5):
"""creates a clean consensus table based on the harmonizing_df that also
includes dataset level info. The output dataframe contains, for each consensus
cell type, it's 'parent cell types', an indication of whether the cell
type is considered new, and the level of the annotation.
max_level - maximum level of annotation available"""
# set up empty consensus df, that will be filled later on
consensus_df = pd.DataFrame(
columns=[
'level_' + str(levnum) for levnum in range(1,max_level + 1)
] + [
'highest_res','new','harmonizing_df_index'
]
)
# store the index of the harmonizing df, so that we can match rows later on
consensus_df["harmonizing_df_index"] = harmonizing_df.index
# create dictionary storing the most recent instance of every level,
# so that we always track the 'mother levels' of each instance
most_recent_level_instance = dict()
# loop through rows
for idx in harmonizing_df.index:
# set 'new' to false. This variable indicates if the cell type annotation
# is new according to Sasha and Martijn. It will be checked/changed later.
new = False
# create a dictionary where we story the entries of this row,
# for each level
original_entries_per_level = dict()
# create a dictionary where we store the final entries of this row,
# for each level. That is, including the mother levels.
final_entries_per_level = dict()
# if all entries in this row are None, continue to next row:
if (
sum(
[
pd.isna(x)
for x in harmonizing_df.loc[
idx, ["Level_" + str(levnum) for levnum in range(1, max_level + 1)]
]
]
)
== max_level
):
continue
# for each level, check if we need to store the current entry,
# the mother level entry, or no entry (we do not want to store daughters)
for level in range(1, max_level + 1):
original_entry = harmonizing_df.loc[idx, "Level_" + str(level)]
# if the current level has an entry in the original dataframe,
# update the 'most_recent_level_instance'
# and store the entry as the final output entry for this level
if original_entry != None:
# store the lowest level annotated for this row:
lowest_level = level
# check if entry says 'New!', then we need to obtain actual label
# from the dataset where the label appears
if original_entry == "New!":
# set new to true now
new = True
# get all the unique entries from this row
# this should be only one entry aside from 'None' and 'New!'
actual_label = list(set(harmonizing_df.loc[idx, :]))
# filter out 'New!' and 'None'
actual_label = [
x for x in actual_label if not pd.isna(x) and x != "New!"
][0]
original_entry = actual_label
# update most recent instance of level:
most_recent_level_instance[level] = original_entry
# store entry as final entry for this level
final_entries_per_level[level] = original_entry
# Moreover, store the most recent instances of the lower (parent)
# levels as final output:
for parent_level in range(1, level):
final_entries_per_level[parent_level] = most_recent_level_instance[
parent_level
]
# and set the daughter levels to None:
for daughter_level in range(level + 1, max_level + 1):
final_entries_per_level[daughter_level] = None
break
# if none of the columns have an entry,
# set all of them to None:
if bool(final_entries_per_level) == False:
for level in range(1, 5):
final_entries_per_level[level] = None
# store the final outputs in the dataframe:
consensus_df.loc[idx, "highest_res"] = int(lowest_level)
consensus_df.loc[idx, "new"] = new
consensus_df.loc[idx, consensus_df.columns[:max_level]] = [
final_entries_per_level[level] for level in range(1, max_level + 1)
]
rows_to_keep = (
idx
for idx in consensus_df.index
if not consensus_df.loc[
idx, ["level_" + str(levnum) for levnum in range(1, max_level + 1)]
]
.isna()
.all()
)
consensus_df = consensus_df.loc[rows_to_keep, :]
# rename indices so that they also specify the level of origin
# this allows for indexing by cell type while retaining uniqueness
# (i.e. we're able to distinguis 'Epithelial' level 1, and level 2)
consensus_df.index = [
consensus_index_renamer(consensus_df, idx) for idx in consensus_df.index
]
# Now check if there are double index names. Sasha added multiple rows
# for the same consensus type to accomodate his cluster names. We
# should therefore merge these rows:
# First create an empty dictionary in which to store the harmonizing_df row
# indices of the multiple identical rows:
harmonizing_df_index_lists = dict()
for unique_celltype in set(consensus_df.index):
# if there are multiple rows, the only way in which they should differ
# is their harmonizing_df_index. Check this:
celltype_sub_df = consensus_df.loc[unique_celltype]
if type(celltype_sub_df) == pd.DataFrame and celltype_sub_df.shape[0] > 1:
# check if all levels align:
for level in range(1, max_level + 1):
if len(set(celltype_sub_df["level_" + str(level)])) > 1:
print(
"WARNING: {} has different annotations in different rows at level {}. Look into this!!".format(
unique_celltype, str(level)
)
)
harmonizing_df_index_list = celltype_sub_df["harmonizing_df_index"].values
# if there was only one index equal to the unique celltype, the
# celltype_sub_df is actually a pandas series and we need to index
# it diffently
else:
harmonizing_df_index_list = [celltype_sub_df["harmonizing_df_index"]]
# store the harmonizing_df_index_list in the consensus df
# we use the 'at' function to be able to store a list in a single
# cell/entry of the dataframe.
harmonizing_df_index_lists[unique_celltype] = harmonizing_df_index_list
# now that we have a list per cell type, we can drop the duplicate columns
consensus_df.drop_duplicates(
subset=["level_" + str(levnum) for levnum in range(1, max_level + 1)], inplace=True
)
# and replace the harmonizing_df_index column with the generated complete lists
consensus_df["harmonizing_df_index"] = None
for celltype in consensus_df.index:
consensus_df.at[celltype, "harmonizing_df_index"] = harmonizing_df_index_lists[
celltype
]
# forward propagate coarser annotations
# add a prefix (matching with the original level of coarseness) to the
# forward-propagated annotations
for celltype in consensus_df.index:
highest_res = consensus_df.loc[celltype, "highest_res"]
# go to next level of loop if the highest_res is nan
if not type(highest_res) == pd.Series:
if pd.isna(highest_res):
continue
for i in range(highest_res + 1, max_level + 1):
consensus_df.loc[celltype, "level_" + str(i)] = celltype
return consensus_df
def create_orig_ann_to_consensus_translation_df(
adata,
consensus_df,
harmonizing_df,
verbose=True,
number_of_levels=5,
ontology_type="cell_type",
):
"""returns a dataframe. The indices of the dataframe correspond to original
labels from the dataset, with the study name as a prefix (as it appears in
the input adata.obs.study), the columns contain information on the
consensus translation. I.e. a translation at each level, whether or not the
cell type is 'new', and what the level of the annotation is.
ontology_type <"cell_type","anatomical_region_coarse","anatomical_region_fine"> - type of ontology
"""
# list the studies of interest. This name should correspond to the
# study name in the anndata object, 'study' column.
# store the column name in the "cell type harmonization table" (google doc)
# that corresponds to the study, in a dictionary:
study_cat = "study"
studies = set(adata.obs[study_cat])
if ontology_type == "cell_type":
harm_colnames = {study:study for study in studies}
elif ontology_type == "anatomical_region_coarse":
harm_colnames = {study: study + "_coarse" for study in studies}
elif ontology_type == "anatomical_region_fine":
harm_colnames = {study: study + "_fine" for study in studies}
else:
raise ValueError(
"ontology_type must be set to either cell_type, anatomical_region_coarse or anatomical_region_fine. Exiting"
)
# store set of studies
original_annotation_names = {
"cell_type": "original_celltype_ann",
"anatomical_region_coarse": "anatomical_region_coarse",
"anatomical_region_fine": "anatomical_region_detailed",
}
original_annotation_name = original_annotation_names[ontology_type]
original_annotations_prefixed = sorted(
set(
[
cell_study + "_" + ann
for cell_study, ann in zip(
adata.obs[study_cat], adata.obs[original_annotation_name]
)
if str(ann) != "nan"
]
)
)
level_names = [
"level" + "_" + str(level_number)
for level_number in range(1, number_of_levels + 1)
]
translation_df = pd.DataFrame(
index=original_annotations_prefixed,
columns=level_names + ["new", "highest_res"],
)
for study in studies:
harm_colname = harm_colnames[study]
if verbose:
print("working on study " + study + "...")
# get cell type names, remove nan and None from list:
cell_types_original = sorted(
set(
[
cell
for cell, cell_study in zip(
adata.obs[original_annotation_name], adata.obs[study_cat]
)
if cell_study == study
and str(cell) != "nan"
and str(cell) != "None"
]
)
)
for label in cell_types_original:
if verbose:
print(label)
# add study prefix for output, in that way we can distinguish
# between identical labels in different studies
label_prefixed = study + "_" + label
# if the label is 'nan', skip this row
if label == "nan":
continue
# get the rows in which this label appears in the study column
# of the harmonizing_df:
ref_rows = harmonizing_df[harm_colname][
harmonizing_df[harm_colname] == label
].index.tolist()
# if the label does not occur, there is a discrepancy between the
# labels in adata and the labels in the harmonizing table made
# by Martijn and Sasha.
if len(ref_rows) == 0:
print(
"HEY THERE ARE NO ROWS IN THE harmonizing_df WITH THIS LABEL ({}) AS ENTRY!".format(
label
)
)
# if the label occurs twice, there is some ambiguity as to how to
# translate this label to a consensus label. In that case, translate
# to the finest level that is identical in consensus translation
# between the multiple rows.
elif len(ref_rows) > 1:
print(
"HEY THE NUMBER OF ROWS WITH ENTRY {} IS NOT 1 but {}!".format(
label, str(len(ref_rows))
)
)
# get matching indices from consensus_df:
consensus_idc = list()
for i in range(len(ref_rows)):
consensus_idc.append(
consensus_df[
[
ref_rows[i] in index_list
for index_list in consensus_df["harmonizing_df_index"]
]
].index[0]
)
# now get the translations for both cases:
df_label_subset = consensus_df.loc[consensus_idc, :]
# store only labels that are common among all rows with this label:
for level in range(1, number_of_levels + 1):
col = "level_" + str(level)
n_translations = len(set(df_label_subset.loc[:, col]))
if n_translations == 1:
# update finest annotation
finest_annotation = df_label_subset.loc[consensus_idc[0], col]
# update finest annotation level
finest_level = level
# store current annotation at current level
translation_df.loc[label_prefixed, col] = finest_annotation
# if level labels differ between instances, store None for this level
else:
translation_df.loc[label_prefixed, col] = (
str(finest_level) + "_" + finest_annotation
)
translation_df.loc[label_prefixed, "highest_res"] = finest_level
# set "new" to false, since we fell back to a common annotation:
translation_df.loc[label_prefixed, "new"] = False
# add ref rows to harmonizing_df_index?
# ...
# if there is only one row with this label, copy the consensus labels
# from the same row to the translation df.
else:
consensus_idx = consensus_df[
[
ref_rows[0] in index_list
for index_list in consensus_df["harmonizing_df_index"]
]
]
if len(consensus_idx) == 0:
raise ValueError(f"label {label} does not have any reference label in your harmonizing df! Exiting.")
consensus_idx = consensus_idx.index[
0
] # loc[label,'harmonizing_df_index'][0]
translation_df.loc[label_prefixed, :] = consensus_df.loc[
consensus_idx, :
]
if verbose:
print("Done!")
return translation_df
def consensus_annotate_anndata(
adata, translation_df, verbose=True, max_ann_level=5, ontology_type="cell_type"
):
"""annotates cells in adata with consensus annotation. Returns adata."""
# list the studies of interest. This name should correspond to the
# study name in the anndata object, 'study' column.
# get name of adata.obs column with original annotations:
original_annotation_names = {
"cell_type": "original_celltype_ann",
"anatomical_region_coarse": "anatomical_region_coarse",
"anatomical_region_fine": "anatomical_region_detailed",
}
original_annotation_name = original_annotation_names[ontology_type]
# add prefix to original annotations, so that we can distinguish between
# datasets:
adata.obs[original_annotation_name + "_prefixed"] = [
cell_study + "_" + ann if str(ann) != "nan" else np.nan
for cell_study, ann in zip(adata.obs.study, adata.obs[original_annotation_name])
]
# specify the columns to copy:
col_to_copy = [
"level_" + str(level_number) for level_number in range(1, max_ann_level + 1)
]
col_to_copy = col_to_copy + ["highest_res", "new"]
# add prefix for in adata.obs:
prefixes = {
"cell_type": "ann_",
"anatomical_region_coarse": "region_coarse_",
"anatomical_region_fine": "region_fine_",
}
prefix = prefixes[ontology_type]
col_to_copy_new_names = [prefix + name for name in col_to_copy]
# add these columns to adata:
for col_name_old, col_name_new in zip(col_to_copy, col_to_copy_new_names):
translation_dict = dict(zip(translation_df.index, translation_df[col_name_old]))
adata.obs[col_name_new] = adata.obs[original_annotation_name + "_prefixed"].map(
translation_dict
)
adata.obs.drop([original_annotation_name + "_prefixed"], axis=1, inplace=True)
return adata
# ANATOMICAL REGION HARMONIZATION:
def merge_anatomical_annotations(ann_coarse, ann_fine):
"""Takes in two same-level annotation, for coarse and
fine annotation, returns the finest annotation.
To use on vectors, do:
np.vectorize(merge_anatomical_annotations)(
vector_coarse, vector_fine
)
"""
if utils.check_if_nan(ann_coarse):
ann_coarse_annotated = False
elif ann_coarse[0].isdigit() and ann_coarse[1] == "_":
ann_coarse_annotated = ann_coarse[0]
else:
ann_coarse_annotated = True
if utils.check_if_nan(ann_fine):
ann_fine_annotated = False
elif ann_fine[0].isdigit() and ann_fine[1] == "_":
ann_fine_annotated = ann_fine[0]
else:
ann_fine_annotated = True
# if finely annotated, return fine annotation
if ann_fine_annotated == True:
return ann_fine
# if only coarse is annotated, return coarse annotation
elif ann_coarse_annotated == True:
return ann_coarse
# if both are not annotated, return np.nan
elif ann_coarse_annotated == False and ann_fine_annotated == False:
return np.nan
# if only one is not annotated, return the other:
elif ann_coarse_annotated == False:
return ann_fine
elif ann_fine_annotated == False:
return ann_coarse
# if one or both are under-annotated (i.e. have
# forward propagated annotations from higher levels),
# choose the one with the highest prefix
elif ann_coarse_annotated > ann_fine_annotated:
return ann_coarse
elif ann_fine_annotated > ann_coarse_annotated:
return ann_fine
elif ann_fine_annotated == ann_coarse_annotated:
if ann_coarse == ann_fine:
return ann_fine
else:
raise ValueError(
"Contradicting annotations. ann_coarse: {}, ann_fine: {}".format(
ann_coarse, ann_fine
)
)
else:
raise ValueError(
"Something most have gone wrong. ann_coarse: {}, ann_fine: {}".format(
ann_coarse, ann_fine
)
)
def merge_coarse_and_fine_anatomical_ontology_anns(
adata, remove_harm_coarse_and_fine_original=False, n_levels=3
):
"""takes in an adata with in its obs: anatomical_region_coarse_level_[n]
and anatomical_region_fine_level_[n] and merges those for n_levels.
Returns adata with merged annotation under anatomical_region_level_[n].
Removes coarse and fine original harmonizations if
remove_harm_coarse_and_fine_original is set to True."""
for lev in range(1, n_levels + 1):
adata.obs["anatomical_region_level_" + str(lev)] = np.vectorize(
merge_anatomical_annotations
)(
adata.obs["region_coarse_level_" + str(lev)],
adata.obs["region_fine_level_" + str(lev)],
)
adata.obs["anatomical_region_highest_res"] = np.vectorize(max)(
adata.obs["region_coarse_highest_res"], adata.obs["region_fine_highest_res"]
)
if remove_harm_coarse_and_fine_original:
for lev in range(1, n_levels + 1):
del adata.obs["region_coarse_level_" + str(lev)]
del adata.obs["region_fine_level_" + str(lev)]
del adata.obs["region_coarse_highest_res"]
del adata.obs["region_fine_highest_res"]
del adata.obs["region_coarse_new"]
del adata.obs["region_fine_new"]
return adata
def add_clean_annotation(adata, max_level=5):
"""converts ann_level_[annotation level] to annotation without label
propagation from lower levels. I.e. in level 2, we will not have 1_Epithelial
anymore; instead cells without level 2 annotations will have annotation None.
Returns adata with extra annotation levels.
"""
for level in range(1, max_level + 1):
level_name = "ann_level_" + str(level)
anns = sorted(set(adata.obs[level_name]))
ann2pureann = dict(zip(anns, anns))
for ann_name in ann2pureann.keys():
if ann_name[:2] in ["1_", "2_", "3_", "4_"]:
ann2pureann[ann_name] = None
adata.obs[level_name + "_clean"] = adata.obs[level_name].map(ann2pureann)
return adata
def add_anatomical_region_ccf_score(adata, harmonizing_df):
"""
Adds ccf score according to mapping in harmoinzing_df. Uses
adata.obs.anatomical_region_level_1 and adata.obs.anatomical_region_level_2
for mapping.
Returns annotated adata
"""
an_region_l1_to_ccf_score = dict()
an_region_l2_to_ccf_score = dict()
for row, score in enumerate(harmonizing_df.continuous_score_upper_and_lower):
if not pd.isnull(score):
level_1_label = harmonizing_df["Level_1"][row]
level_2_label = harmonizing_df["Level_2"][row]
if not | pd.isnull(level_2_label) | pandas.isnull |
import numpy as np
import keras
import tensorflow as tf
from matplotlib import pyplot as plt
import pandas as pd
from keras.layers.embeddings import Embedding
from keras.layers import concatenate, Lambda
import os, sys
from weather_model import Seq2Seq_MVE_subnets_swish, weather_conv1D, CausalCNN, RNN_builder, Seq2Seq, Seq2Seq_MVE, Seq2Seq_MVE_subnets
from keras.models import load_model, model_from_json
#from utils import random_sine, plot_prediction
#learning_rate = 0.01
#decay = 0 # Learning rate decay
model_save_path = '../models/'
#loss = "mse" # Other loss functions are possible, see Keras documentation.
# Regularisation isn't really needed for this application
#lambda_regulariser = None #0.000001 # Will not be used if regulariser is None
#regulariser = None # Possible regulariser: keras.regularizers.l2(lambda_regulariser)
#steps_per_epoch = 200 # batch_size * steps_per_epoch = total number of training examples
#num_signals = 2 # The number of random sine waves the compose the signal. The more sine waves, the harder the problem.
def crop(dimension, start, end):
# Crops (or slices) a Tensor on a given dimension from start to end
# example : to crop tensor x[:, :, 5:10]
# call slice(2, 5, 10) as you want to crop on the second dimension
def func(x):
if dimension == 0:
return x[start: end]
if dimension == 1:
return x[:, start: end]
if dimension == 2:
return x[:, :, start: end]
if dimension == 3:
return x[:, :, :, start: end]
if dimension == 4:
return x[:, :, :, :, start: end]
return Lambda(func)
class WeatherConv1D:
def __init__(self, regulariser=None,
lr=0.001, decay=0, loss="mse",
layers=[35, 35], batch_size=256,
input_len=37, input_features=29,
strides_len=3, kernel_size=5):
self.regulariser = regulariser
self.layers = layers
self.lr = lr
self.decay = decay
self.loss = loss
self.pred_result = None
self.batch_size = batch_size
self.input_len = input_len
self.input_features = input_features
self.kernel_strides = strides_len
self.kernel_size = kernel_size
print('Initialized!')
def build_graph(self):
keras.backend.clear_session() # clear session/graph
self.model = weather_conv1D(self.layers, self.lr,
self.decay, self.loss, self.input_len,
self.input_features, self.kernel_strides, self.kernel_size)
print(self.model.summary())
def sample_batch(self, data_inputs, ground_truth, ruitu_inputs, batch_size, certain_id=None, certain_feature=None):
max_i, _, max_j, _ = data_inputs.shape # Example: (1148, 37, 10, 9)-(sample_ind, timestep, sta_id, features)
if certain_id == None and certain_feature == None:
id_ = np.random.randint(max_j, size=batch_size)
i = np.random.randint(max_i, size=batch_size)
batch_ouputs = ground_truth[i,:,id_,:]
batch_ruitu = ruitu_inputs[i,:,id_,:]
elif certain_id != None:
pass
return batch_ruitu, batch_ouputs
def order_batch(self, data_inputs, ground_truth, ruitu_inputs, batch_size, certain_id=None, certain_feature=None):
pass #TODO:
def fit(self, train_input_ruitu, train_labels,
val_input_ruitu, val_labels, batch_size,
iterations=300, validation=True):
self.optimizer = keras.optimizers.Adam(lr=self.lr, decay=self.decay)
self.model.compile(optimizer = self.optimizer, loss=self.loss)
print('Train batch size: {}'.format(batch_size))
print('Validation on data size of {};'.format(val_input_ruitu.shape[0]))
for i in range(iterations):
batch_ruitu, batch_labels = self.sample_batch(train_input_ruitu, train_labels,
train_input_ruitu, batch_size=batch_size)
loss_ = self.model.train_on_batch(x=[batch_ruitu],
y=[batch_labels])
if (i+1)%50 == 0:
print('Iteration:{}/{}. Training batch loss:{}'.
format(i+1, iterations, loss_))
if validation :
self.evaluate(val_input_ruitu, val_labels, each_station_display=False)
print('###'*10)
print('Train finish! Total validation loss:')
self.evaluate(val_input_ruitu, val_labels, each_station_display=True)
def evaluate(self, data_input_ruitu, data_labels, each_station_display=False):
all_loss=[]
for i in range(10): # iterate for each station. (sample_ind, timestep, staionID, features)
val_loss= self.model.evaluate(x=[data_input_ruitu[:,:,i,:]],
y=[data_labels[:,:,i,:]], verbose=False)
all_loss.append(val_loss)
if each_station_display:
print('\tFor station 9000{}, evaluated loss: {}'.format(i+1, val_loss))
print('Mean evaluated loss on all stations:', np.mean(all_loss))
#return np.mean(all_loss)
def predict(self, batch_ruitu):
pred_result_list = []
for i in range(10):
#print('Predict for station: 9000{}'.format(i+1))
result = self.model.predict(x=[batch_ruitu[:,:,i,:]])
result = np.squeeze(result, axis=0)
#all_pred[i] = result
pred_result_list.append(result)
#pass
pred_result = np.stack(pred_result_list, axis=0)
#return all_pred, pred_result
print('Predict shape (10,37,3) means (stationID, timestep, features). Features include: t2m, rh2m and w10m')
self.pred_result = pred_result
return pred_result
def renorm_for_submit(self, pred_mean, pred_var):
if self.pred_result is None:
print('You must run self.predict(batch_inputs, batch_ruitu) firstly!!')
else:
df_empty = pd.DataFrame(columns=['FORE_data', 't2m', 'rh2m', 'w10m'])
target_list=['t2m','rh2m','w10m']
self.obs_range_dic={'t2m':[-30,42], # Official value: [-20,42]
'rh2m':[0.0,100.0],
'w10m':[0.0, 30.0]}
for j, target_v in enumerate(self.target_list):
series_ids = pd.Series()
series_targets = pd.Series()
renorm_value = renorm(self.pred_result[:,:,j], self.obs_range_dic[target_v][0], self.obs_range_dic[target_v][1])
for i in range(10):
if i != 9:
id_num = '0'+str(i+1)
else:
id_num = str(10)
sta_name_time = '900'+id_num+'_'
time_str_list=[]
for t in range(37):
if t < 10:
time_str= sta_name_time + '0'+ str(t)
else:
time_str = sta_name_time + str(t)
time_str_list.append(time_str)
series_id = pd.Series(time_str_list)
series_target = pd.Series(renorm_value[i])
series_ids = pd.concat([series_ids, series_id])
series_targets = pd.concat([series_targets, series_target])
df_empty['FORE_data'] = series_ids
df_empty[target_v] = series_targets
return df_empty
class CausalCNN_Class(WeatherConv1D):
def __init__(self, regulariser,lr, decay, loss,
n_filters, strides_len, kernel_size, seq_len,
input_features, output_features, dilation_rates):
self.regulariser=regulariser
self.n_filters=n_filters
self.lr=lr
self.decay=decay
self.loss=loss
self.seq_len=seq_len
self.input_features=input_features
self.output_features = output_features
self.strides_len=strides_len
self.kernel_size=kernel_size
self.dilation_rates=dilation_rates
def build_graph(self):
keras.backend.clear_session() # clear session/graph
self.model = CausalCNN(self.n_filters, self.lr,
self.decay, self.loss,
self.seq_len, self.input_features,
self.strides_len, self.kernel_size,
self.dilation_rates)
print(self.model.summary())
class FNN(WeatherConv1D):
def __init__(self, regulariser,lr, decay, loss,
layers, batch_size, seq_len, input_features, output_features):
self.regulariser=regulariser
self.layers=layers
self.lr=lr
self.decay=decay
self.loss=loss
self.seq_len=seq_len
self.input_features=input_features
self.output_features = output_features
def build_graph(self):
keras.backend.clear_session() # clear session/graph
self.model = weather_fnn(self.layers, self.lr,
self.decay, self.loss, self.seq_len,
self.input_features, self.output_features)
print(self.model.summary())
class Enc_Dec:
def __init__(self, num_input_features, num_output_features, num_decoder_features,
input_sequence_length, target_sequence_length,
num_steps_to_predict, regulariser = None,
lr=0.001, decay=0, loss = "mse",
layers=[35, 35]):
self.num_input_features = num_input_features
self.num_output_features = num_output_features
self.num_decoder_features = num_decoder_features
self.input_sequence_length = input_sequence_length
self.target_sequence_length = target_sequence_length
self.num_steps_to_predict = num_steps_to_predict
self.regulariser = regulariser
self.layers = layers
self.lr = lr
self.decay = decay
self.loss = loss
self.pred_result = None
self.train_loss=[]
self.target_list=['t2m','rh2m','w10m']
self.obs_range_dic={'t2m':[-30,42], # Official value: [-20,42]
'rh2m':[0.0,100.0],
'w10m':[0.0, 30.0]}
print('Initialized!')
def build_graph(self):
keras.backend.clear_session() # clear session/graph
self.optimiser = keras.optimizers.Adam(lr=self.lr, decay=self.decay)
# Define an input sequence.
encoder_inputs = keras.layers.Input(shape=(None, self.num_input_features), name='encoder_inputs')
# Create a list of RNN Cells, these are then concatenated into a single layer
# with the RNN layer.
encoder_cells = []
for hidden_neurons in self.layers:
encoder_cells.append(keras.layers.GRUCell(hidden_neurons,
kernel_regularizer = self.regulariser,
recurrent_regularizer = self.regulariser,
bias_regularizer = self.regulariser))
encoder = keras.layers.RNN(encoder_cells, return_state=True)
encoder_outputs_and_states = encoder(encoder_inputs)
# Discard encoder outputs and only keep the states.
encoder_states = encoder_outputs_and_states[1:]
# Define a decoder sequence.
decoder_inputs = keras.layers.Input(shape=(None, self.num_decoder_features), name='decoder_inputs')
decoder_cells = []
for hidden_neurons in self.layers:
decoder_cells.append(keras.layers.GRUCell(hidden_neurons,
kernel_regularizer = self.regulariser,
recurrent_regularizer = self.regulariser,
bias_regularizer = self.regulariser))
decoder = keras.layers.RNN(decoder_cells, return_sequences=True, return_state=True)
# Set the initial state of the decoder to be the ouput state of the encoder.
decoder_outputs_and_states = decoder(decoder_inputs, initial_state=encoder_states)
# Only select the output of the decoder (not the states)
decoder_outputs = decoder_outputs_and_states[0]
# Apply a dense layer with linear activation to set output to correct dimension
# and scale (tanh is default activation for GRU in Keras, our output sine function can be larger then 1)
decoder_dense1 = keras.layers.Dense(units=64,
activation='tanh',
kernel_regularizer = self.regulariser,
bias_regularizer = self.regulariser, name='dense_tanh')
output_dense = keras.layers.Dense(self.num_output_features,
activation='sigmoid',
kernel_regularizer = self.regulariser,
bias_regularizer = self.regulariser, name='output_sig')
#densen1=decoder_dense1(decoder_outputs)
decoder_outputs = output_dense(decoder_outputs)
# Create a model using the functional API provided by Keras.
self.model = keras.models.Model(inputs=[encoder_inputs, decoder_inputs], outputs=decoder_outputs)
print(self.model.summary())
def sample_batch(self, data_inputs, ground_truth, ruitu_inputs, batch_size, certain_id=None, certain_feature=None):
max_i, _, max_j, _ = data_inputs.shape # Example: (1148, 37, 10, 9)-(sample_ind, timestep, sta_id, features)
if certain_id == None and certain_feature == None:
id_ = np.random.randint(max_j, size=batch_size)
i = np.random.randint(max_i, size=batch_size)
batch_inputs = data_inputs[i,:,id_,:]
batch_ouputs = ground_truth[i,:,id_,:]
batch_ruitu = ruitu_inputs[i,:,id_,:]
elif certain_id != None:
pass
return batch_inputs, batch_ruitu, batch_ouputs
def fit(self, train_input_obs, train_input_ruitu, train_labels,
val_input_obs, val_input_ruitu, val_labels, batch_size,
iterations=300, validation=True):
self.model.compile(optimizer = self.optimiser, loss=self.loss)
print('Train batch size: {}'.format(batch_size))
print('Validation on data size of {};'.format(val_input_obs.shape[0]))
for i in range(iterations):
batch_inputs, batch_ruitu, batch_labels = self.sample_batch(train_input_obs, train_labels,
train_input_ruitu, batch_size=batch_size)
loss_ = self.model.train_on_batch(x=[batch_inputs, batch_ruitu],
y=[batch_labels])
if (i+1)%50 == 0:
print('Iteration:{}/{}. Training batch loss:{}'.
format(i+1, iterations, loss_))
if validation :
self.evaluate(val_input_obs, val_input_ruitu, val_labels, each_station_display=False)
print('###'*10)
print('Train finish! Total validation loss:')
self.evaluate(val_input_obs, val_input_ruitu, val_labels, each_station_display=True)
def evaluate(self, data_input_obs, data_input_ruitu, data_labels, each_station_display=False):
assert data_input_ruitu.shape[0] == data_input_obs.shape[0] == data_labels.shape[0], 'Shape Error'
#assert data_input_obs.shape[1] == 28 and data_input_obs.shape[2] == 10 and data_input_obs.shape[3] == 9, 'Error! Obs input shape must be (None, 28,10,9)'
assert data_input_ruitu.shape[1] == 37 and data_input_ruitu.shape[2] == 10 and data_input_ruitu.shape[3] == 29, 'Error! Ruitu input shape must be (None, 37,10,29)'
assert data_labels.shape[1] == 37 and data_labels.shape[2] == 10 and data_labels.shape[3] == 3, 'Error! Ruitu input shape must be (None, 37,10,3)'
all_loss=[]
for i in range(10): # iterate for each station. (sample_ind, timestep, staionID, features)
val_loss= self.model.evaluate(x=[data_input_obs[:,:,i,:], data_input_ruitu[:,:,i,:]],
y=[data_labels[:,:,i,:]], verbose=False)
all_loss.append(val_loss)
if each_station_display:
print('\tFor station 9000{}, evaluated loss: {}'.format(i+1, val_loss))
print('Mean evaluated loss on all stations:', np.mean(all_loss))
def predict(self, batch_inputs, batch_ruitu):
assert batch_ruitu.shape[0] == batch_inputs.shape[0], 'Shape Error'
assert batch_inputs.shape[1] == 28 and batch_inputs.shape[2] == 10 and batch_inputs.shape[3] == 9, 'Error! Obs input shape must be (None, 28,10,9)'
assert batch_ruitu.shape[1] == 37 and batch_ruitu.shape[2] == 10 and batch_ruitu.shape[3] == 29, 'Error! Ruitu input shape must be (None, 37,10, 29)'
#all_pred={}
pred_result_list = []
for i in range(10):
#print('Predict for station: 9000{}'.format(i+1))
result = self.model.predict(x=[batch_inputs[:,:,i,:], batch_ruitu[:,:,i,:]])
result = np.squeeze(result, axis=0)
#all_pred[i] = result
pred_result_list.append(result)
#pass
pred_result = np.stack(pred_result_list, axis=0)
#return all_pred, pred_result
print('Predict shape (10,37,3) means (stationID, timestep, features). Features include: t2m, rh2m and w10m')
self.pred_result = pred_result
return pred_result
def renorm_for_submit(self, pred_mean, pred_var=None):
'''
# TODO: Add three strategies for output
'''
assert self.pred_result is not None, 'You must run self.predict(batch_inputs, batch_ruitu) firstly!!'
assert pred_mean.shape == (10, 37, 3), 'Error! This funtion ONLY works for one data sample with shape (10, 37, 3). Any data shape (None, 10, 37, 3) will leads this error!'
df_empty = pd.DataFrame(columns=['FORE_data', 't2m', 'rh2m', 'w10m'])
for j, target_v in enumerate(self.target_list):
series_ids = pd.Series()
series_targets = pd.Series()
renorm_value = renorm(pred_mean[:,:,j], self.obs_range_dic[target_v][0], self.obs_range_dic[target_v][1])
for i in range(10):
if i != 9:
id_num = '0'+str(i+1)
else:
id_num = str(10)
sta_name_time = '900'+id_num+'_'
time_str_list=[]
for t in range(37):
if t < 10:
time_str= sta_name_time + '0'+ str(t)
else:
time_str = sta_name_time + str(t)
time_str_list.append(time_str)
series_id = pd.Series(time_str_list)
series_target = pd.Series(renorm_value[i])
series_ids = pd.concat([series_ids, series_id])
series_targets = pd.concat([series_targets, series_target])
df_empty['FORE_data'] = series_ids
df_empty[target_v] = series_targets
return df_empty
#pass
def plot_prediction(self, x, y_true, y_pred, input_ruitu=None):
"""Plots the predictions.
Arguments
---------
x: Input sequence of shape (input_sequence_length,
dimension_of_signal)
y_true: True output sequence of shape (input_sequence_length,
dimension_of_signal)
y_pred: Predicted output sequence (input_sequence_length,
dimension_of_signal)
input_ruitu: Ruitu output sequence
"""
plt.figure(figsize=(12, 3))
output_dim = x.shape[-1]# feature dimension
for j in range(output_dim):
past = x[:, j]
true = y_true[:, j]
pred = y_pred[:, j]
if input_ruitu is not None:
ruitu = input_ruitu[:, j]
label1 = "Seen (past) values" if j==0 else "_nolegend_"
label2 = "True future values" if j==0 else "_nolegend_"
label3 = "Predictions" if j==0 else "_nolegend_"
label4 = "Ruitu values" if j==0 else "_nolegend_"
plt.plot(range(len(past)), past, "o-g",
label=label1)
plt.plot(range(len(past),
len(true)+len(past)), true, "x--g", label=label2)
plt.plot(range(len(past), len(pred)+len(past)), pred, "o--y",
label=label3)
if input_ruitu is not None:
plt.plot(range(len(past), len(ruitu)+len(past)), ruitu, "o--r",
label=label4)
plt.legend(loc='best')
plt.title("Predictions v.s. true values v.s. Ruitu")
plt.show()
class RNN_Class(WeatherConv1D):
def __init__(self, num_output_features, num_decoder_features,
target_sequence_length,
num_steps_to_predict, regulariser = None,
lr=0.001, decay=0, loss = "mse",
layers=[35, 35]):
self.num_output_features = num_output_features
self.num_decoder_features = num_decoder_features
self.target_sequence_length = target_sequence_length
self.num_steps_to_predict = num_steps_to_predict
self.regulariser = regulariser
self.layers = layers
self.lr = lr
self.decay = decay
self.loss = loss
self.pred_result = None
#self.batch_size = batch_size
print('Initialized!')
def build_graph(self):
keras.backend.clear_session() # clear session/graph
self.model = RNN_builder(self.num_output_features, self.num_decoder_features,
self.target_sequence_length,
self.num_steps_to_predict, self.regulariser,
self.lr, self.decay, self.loss, self.layers)
print(self.model.summary())
class Seq2Seq_Class(Enc_Dec):
def __init__(self, id_embd, time_embd,
num_input_features, num_output_features, num_decoder_features,
input_sequence_length, target_sequence_length,
num_steps_to_predict, regulariser = None,
lr=0.001, decay=0, loss = "mse",
layers=[35, 35], model_save_path='../models',
model_structure_name='seq2seq_model.json', model_weights_name='seq2seq_model_weights.h5'):
super().__init__(num_input_features, num_output_features, num_decoder_features,
input_sequence_length, target_sequence_length,
num_steps_to_predict, regulariser = None,
lr=lr, decay=decay, loss = loss,
layers=layers)
self.id_embd = id_embd
self.time_embd = time_embd
self.val_loss_list=[]
self.train_loss_list=[]
self.current_mean_val_loss = None
self.early_stop_limit = 10 # with the unit of Iteration Display
self.EARLY_STOP=False
self.pred_var_result = []
self.pi_dic={0.95:1.96, 0.9:1.645, 0.8:1.28, 0.68:1.}
self.target_list=['t2m','rh2m','w10m']
self.obs_range_dic={'t2m':[-30,42], # Official value: [-20,42]
'rh2m':[0.0,100.0],
'w10m':[0.0, 30.0]}
self.obs_and_output_feature_index_map = {'t2m':0,'rh2m':1,'w10m':2}
self.ruitu_feature_index_map = {'t2m':1,'rh2m':3,'w10m':4}
self.model_save_path = model_save_path
self.model_structure_name=model_structure_name
self.model_weights_name=model_weights_name
def build_graph(self):
#keras.backend.clear_session() # clear session/graph
self.optimizer = keras.optimizers.Adam(lr=self.lr, decay=self.decay)
self.model = Seq2Seq_MVE_subnets_swish(id_embd=True, time_embd=True,
lr=self.lr, decay=self.decay,
num_input_features=self.num_input_features, num_output_features=self.num_output_features,
num_decoder_features=self.num_decoder_features, layers=self.layers,
loss=self.loss, regulariser=self.regulariser)
def _mve_loss(y_true, y_pred):
pred_u = crop(2,0,3)(y_pred)
pred_sig = crop(2,3,6)(y_pred)
print(pred_sig)
#exp_sig = tf.exp(pred_sig) # avoid pred_sig is too small such as zero
#precision = 1./exp_sig
precision = 1./pred_sig
#log_loss= 0.5*tf.log(exp_sig)+0.5*precision*((pred_u-y_true)**2)
log_loss= 0.5*tf.log(pred_sig)+0.5*precision*((pred_u-y_true)**2)
log_loss=tf.reduce_mean(log_loss)
return log_loss
print(self.model.summary())
self.model.compile(optimizer = self.optimizer, loss=_mve_loss)
def sample_batch(self, data_inputs, ground_truth, ruitu_inputs, batch_size, certain_id=None, certain_feature=None):
max_i, _, max_j, _ = data_inputs.shape # Example: (1148, 37, 10, 9)-(sample_ind, timestep, sta_id, features)
id_ = np.random.randint(max_j, size=batch_size)
i = np.random.randint(max_i, size=batch_size)
batch_inputs = data_inputs[i,:,id_,:]
batch_ouputs = ground_truth[i,:,id_,:]
batch_ruitu = ruitu_inputs[i,:,id_,:]
# id used for embedding
if self.id_embd and (not self.time_embd):
expd_id = np.expand_dims(id_,axis=1)
batch_ids = np.tile(expd_id,(1,37))
return batch_inputs, batch_ruitu, batch_ouputs, batch_ids
elif (not self.id_embd) and (self.time_embd):
time_range = np.array(range(37))
batch_time = np.tile(time_range,(batch_size,1))
#batch_time = np.expand_dims(batch_time, axis=-1)
return batch_inputs, batch_ruitu, batch_ouputs, batch_time
elif (self.id_embd) and (self.time_embd):
expd_id = np.expand_dims(id_,axis=1)
batch_ids = np.tile(expd_id,(1,37))
time_range = np.array(range(37))
batch_time = np.tile(time_range,(batch_size,1))
#batch_time = np.expand_dims(batch_time, axis=-1)
return batch_inputs, batch_ruitu, batch_ouputs, batch_ids, batch_time
elif (not self.id_embd) and (not self.time_embd):
return batch_inputs, batch_ruitu, batch_ouputs
def fit(self, train_input_obs, train_input_ruitu, train_labels,
val_input_obs, val_input_ruitu, val_labels, val_ids, val_times, batch_size,
iterations=300, validation=True):
print('Train batch size: {}'.format(batch_size))
print('Validation on data size of {};'.format(val_input_obs.shape[0]))
early_stop_count = 0
for i in range(iterations):
batch_inputs, batch_ruitu, batch_labels, batch_ids, batch_time = self.sample_batch(train_input_obs, train_labels,
train_input_ruitu, batch_size=batch_size)
#batch_placeholders = np.zeros_like(batch_labels)
loss_ = self.model.train_on_batch(x=[batch_inputs, batch_ruitu, batch_ids, batch_time],
y=[batch_labels])
if (i+1)%50 == 0:
print('Iteration:{}/{}. Training batch MLE loss:{}'.
format(i+1, iterations, loss_))
if validation :
self.evaluate(val_input_obs, val_input_ruitu, val_labels, val_ids, val_times, each_station_display=False)
if len(self.val_loss_list) >0: # Early stopping
if(self.current_mean_val_loss) <= min(self.val_loss_list): # compare with the last early_stop_limit values except SELF
early_stop_count = 0
model_json = self.model.to_json()
with open(self.model_save_path+self.model_structure_name, "w") as json_file:
json_file.write(model_json)
self.model.save_weights(self.model_save_path+self.model_weights_name)
else:
early_stop_count +=1
print('Early-stop counter:', early_stop_count)
if early_stop_count == self.early_stop_limit:
self.EARLY_STOP=True
break
print('###'*10)
if self.EARLY_STOP:
print('Loading the best model before early-stop ...')
self.model.load_weights(self.model_save_path+self.model_weights_name)
print('Training finished! Detailed val MLE loss:')
self.evaluate(val_input_obs, val_input_ruitu, val_labels, val_ids, val_times, each_station_display=True)
def evaluate(self, data_input_obs, data_input_ruitu, data_labels, data_ids, data_time, each_station_display=False):
all_loss=[]
for i in range(10): # iterate for each station. (sample_ind, timestep, staionID, features)
#batch_placeholders = np.zeros_like(data_labels[:,:,i,:])
val_loss= self.model.evaluate(x=[data_input_obs[:,:,i,:], data_input_ruitu[:,:,i,:], data_ids[:,:,i], data_time],
y=[data_labels[:,:,i,:]], verbose=False)
all_loss.append(val_loss)
if each_station_display:
print('\tFor station 9000{}, val MLE loss: {}'.format(i+1, val_loss))
self.current_mean_val_loss = np.mean(all_loss)
print('Mean val MLE loss:', self.current_mean_val_loss)
self.val_loss_list.append(self.current_mean_val_loss)
def predict(self, batch_inputs, batch_ruitu, batch_ids, batch_times):
'''
Input:
Output:
pred_result (mean value) : (None, 10,37,3). i.e., (sample_nums, stationID, timestep, features)
pred_var_result (var value) : (None, 10,37,3)
'''
pred_result_list = []
pred_var_list = []
#pred_std_list =[]
for i in range(10):
result = self.model.predict(x=[batch_inputs[:,:,i,:], batch_ruitu[:,:,i,:], batch_ids[:,:,i], batch_times])
var_result = result[:,:,3:6] # Variance
result = result[:,:,0:3] # Mean
#result = np.squeeze(result, axis=0)
pred_result_list.append(result)
#var_result = np.squeeze(var_result, axis=0)
pred_var_list.append(var_result)
pred_result = np.stack(pred_result_list, axis=1)
pred_var_result = np.stack(pred_var_list, axis=1)
print('Predictive shape (None, 10,37,3) means (sample_nums, stationID, timestep, features). \
Features include: t2m, rh2m and w10m')
self.pred_result = pred_result
self.pred_var_result = pred_var_result
#self.pred_std_result = np.sqrt(np.exp(self.pred_var_result[:,:,i,j])) # Calculate standard deviation
return pred_result, pred_var_result
def renorm_for_visualization(self, obs_inputs, ruitu_inputs, pred_mean_result, pred_var_result, ground_truth=None):
'''
obs_inputs: (None, 28, 10, 9)
ruitu_inputs: (None, 37, 10, 29)
pred_mean_result: (None, 10, 37, 3)
pred_var_result: (None, 10, 37, 3)
ground_truth: (None, 37, 10, 3)
#self.target_list=['t2m','rh2m','w10m']
#self.obs_range_dic={'t2m':[-30,42],
# 'rh2m':[0.0,100.0],
# 'w10m':[0.0, 30.0]}
#self.obs_and_output_feature_index_map = {'t2m':0,'rh2m':1,'w10m':2}
#self.ruitu_feature_index_map = {'t2m':1,'rh2m':3,'w10m':4}
#TODO:
'''
for target_v in self.target_list:
temp1 = obs_inputs[:,:,:,self.obs_and_output_feature_index_map[target_v]]
temp2 = ruitu_inputs[:,:,:,self.ruitu_feature_index_map[target_v]]
temp3 = pred_mean_result[:,:,:,self.obs_and_output_feature_index_map[target_v]]
#temp4 = pred_var_result[:,:,:,self.obs_and_output_feature_index_map[target_v]]
obs_inputs[:,:,:,self.obs_and_output_feature_index_map[target_v]] = renorm(temp1, self.obs_range_dic[target_v][0], self.obs_range_dic[target_v][1])
ruitu_inputs[:,:,:,self.ruitu_feature_index_map[target_v]] = renorm(temp2, self.obs_range_dic[target_v][0], self.obs_range_dic[target_v][1])
pred_mean_result[:,:,:,self.obs_and_output_feature_index_map[target_v]] = renorm(temp3, self.obs_range_dic[target_v][0], self.obs_range_dic[target_v][1])
#pred_var_result[:,:,:,self.obs_and_output_feature_index_map[target_v]] = renorm(temp4, self.obs_range_dic[target_v][0], self.obs_range_dic[target_v][1])
if ground_truth is not None:
temp5 = ground_truth[:,:,:,self.obs_and_output_feature_index_map[target_v]]
ground_truth[:,:,:,self.obs_and_output_feature_index_map[target_v]] = renorm(temp5, self.obs_range_dic[target_v][0], self.obs_range_dic[target_v][1])
if ground_truth is not None:
return obs_inputs, ruitu_inputs, pred_mean_result, pred_var_result, ground_truth
else:
return obs_inputs, ruitu_inputs, pred_mean_result, pred_var_result
def calc_uncertainty_info(self, verbose=False):
'''
Verbose: Display uncertainty for each feature i.e., (t2m, rh2m, w10m)
#TODO: Refactor the double 'for' part.
'''
assert len(self.pred_var_result)>0, 'Error! You must run predict() before running calc_uncertainty_info()'
print('The uncertainty info are calculated on {} predicted samples with shape {}'
.format(len(self.pred_var_result), self.pred_var_result.shape))
#
if verbose:
assert self.target_list == ['t2m','rh2m','w10m'], 'ERROR, list changed!'
for j, target_v in enumerate(['t2m','rh2m','w10m']):
print('For feature {}:'.format(target_v))
for i in range(37):
#unctt_var = np.exp(self.pred_var_result[:,:,i,j])
unctt_std = np.sqrt(unctt_var)
unctt_mean_std = np.mean(unctt_std)
unctt_mean_var = np.mean(unctt_var)
#renorm_unctt_mean_std = renorm(unctt_mean_std, self.obs_range_dic[target_v][0], self.obs_range_dic[target_v][1])
print('\tTime:{}-Variance:{:.4f}; Std:{:.4f};'.
format(i+1, unctt_mean_var, unctt_mean_std))
else:
for i in range(37):
unctt_var = np.exp(self.pred_var_result[:,:,i,:])
unctt_std = np.sqrt(unctt_var)
unctt_mean_std = np.mean(unctt_std)
unctt_mean_var = np.mean(unctt_var)
#renorm_unctt_mean_std = 0
print('Time:{}-Variance:{:.4f}; Std:{:.4f};'.
format(i+1, unctt_mean_var, unctt_mean_std))
def minus_plus_std_strategy(self, pred_mean, pred_var, feature_name,\
timestep_to_ensemble=21, alpha=0):
'''
This stratergy aims to calculate linear weighted at specific timestep (timestep_to_ensemble) between prediction and ruitu as formula:
(alpha)*pred_mean + (1-alpha)*ruitu_inputs
pred_mean: (10, 37, 3)
pred_var: (10, 37, 3)
timestep_to_ensemble: int32 (From 0 to 36)
'''
print('Using minus_plus_var_strategy with alpha {}'.format(alpha))
assert 0<=timestep_to_ensemble<=36 , 'Please ensure 0<=timestep_to_ensemble<=36!'
assert -0.3<= alpha <=0.3, '-0.3<= alpha <=0.3!'
assert pred_mean.shape == (10, 37, 3), 'Error! This funtion ONLY works for \
one data sample with shape (10, 37, 3). Any data shape (None, 10, 37, 3) will leads this error!'
pred_std = np.sqrt(np.exp(pred_var))
print('alpha:',alpha)
pred_mean[:,timestep_to_ensemble:,self.obs_and_output_feature_index_map[feature_name]] = \
pred_mean[:,timestep_to_ensemble:,self.obs_and_output_feature_index_map[feature_name]] + \
alpha * pred_std[:,timestep_to_ensemble:,self.obs_and_output_feature_index_map[feature_name]]
return pred_mean
def linear_ensemble_strategy(self, pred_mean, pred_var, ruitu_inputs, feature_name,\
timestep_to_ensemble=21, alpha=1):
'''
This stratergy aims to calculate linear weighted at specific timestep (timestep_to_ensemble) between prediction and ruitu as formula:
(alpha)*pred_mean + (1-alpha)*ruitu_inputs
pred_mean: (10, 37, 3)
pred_var: (10, 37, 3)
ruitu_inputs: (37,10,29). Need Swamp to(10,37,29) FIRSTLY!!
timestep_to_ensemble: int32 (From 0 to 36)
'''
assert 0<= alpha <=1, 'Please ensure 0<= alpha <=1 !'
assert pred_mean.shape == (10, 37, 3), 'Error! This funtion ONLY works for \
one data sample with shape (10, 37, 3). Any data shape (None, 10, 37, 3) will leads this error!'
#pred_std = np.sqrt(np.exp(pred_var))
ruitu_inputs = np.swapaxes(ruitu_inputs,0,1)
print('alpha:',alpha)
pred_mean[:,timestep_to_ensemble:,self.obs_and_output_feature_index_map[feature_name]] = \
(alpha)*pred_mean[:,timestep_to_ensemble:,self.obs_and_output_feature_index_map[feature_name]] + \
(1-alpha)*ruitu_inputs[:,timestep_to_ensemble:, self.ruitu_feature_index_map[feature_name]]
print('Corrected pred_mean shape:', pred_mean.shape)
return pred_mean
def fuzzy_ensemble_strategy(self, pred_mean, pred_var, feature_name,\
timestep_to_ensemble=21, alpha=0):
'''
This stratergy aims to calculate linear weighted at specific timestep (timestep_to_ensemble) between prediction and ruitu as formula:
(alpha)*pred_mean + (1-alpha)*ruitu_inputs
pred_mean: (10, 37, 3)
pred_var: (10, 37, 3)
timestep_to_ensemble: int32 (From 0 to 36)
'''
print('Using fuzzy_ensemble_strategy with alpha {}'.format(alpha))
assert 0<=timestep_to_ensemble<=36 , 'Please ensure 0<=timestep_to_ensemble<=36!'
assert -0.4<= alpha <=0.4, 'Please ensure -0.4<= alpha <=0.4 !'
assert pred_mean.shape == (10, 37, 3), 'Error! This funtion ONLY works for \
one data sample with shape (10, 37, 3). Any data shape (None, 10, 37, 3) will leads this error!'
pred_std = np.sqrt(np.exp(pred_var))
#print('normalizing for Std. after timestep:', timestep_to_ensemble)
temp_std = pred_std[:,timestep_to_ensemble:,self.obs_and_output_feature_index_map[feature_name]]
norm_std = temp_std / np.max(temp_std)
#print('norm_std shape', norm_std.shape)
dim_0, dim_1 = norm_std.shape
reshaped_std = norm_std.reshape(-1)
from skfuzzy import trimf
fuzzy_degree = trimf(reshaped_std, [0., 1, 1.2])
fuzzy_degree = fuzzy_degree.reshape(dim_0, dim_1)
#print('fuzzy_degree shape:',fuzzy_degree.shape)
#print('temp_std shape:',temp_std.shape)
pred_mean[:,timestep_to_ensemble:,self.obs_and_output_feature_index_map[feature_name]] = \
pred_mean[:,timestep_to_ensemble:,self.obs_and_output_feature_index_map[feature_name]] + \
fuzzy_degree*alpha*temp_std
#pred_mean[:,timestep_to_ensemble:,self.obs_and_output_feature_index_map[feature_name]] + \
#alpha * pred_std[:,timestep_to_ensemble:,self.obs_and_output_feature_index_map[feature_name]]
#print('pred_mean.shape',pred_mean.shape)
return pred_mean
pass
def renorm_for_submit(self, pred_mean, pred_var, ruitu_inputs, timestep_to_ensemble=21, alpha=1):
'''
Overwrite for Seq2Seq_MVE Class
pred_mean: shape of (10, 37, 3)
pred_var: shape of (10, 37, 3)
ruitu_inputs: shape of (10, 37, 3)
timestep_to_ensemble: int32 (From 0 to 36)
# TODO: Add three strategies for output
'''
assert self.pred_result is not None, 'You must run self.predict(batch_inputs, batch_ruitu) firstly!!'
assert pred_mean.shape == (10, 37, 3), 'Error! This funtion ONLY works for one data sample with shape (10, 37, 3). Any data shape (None, 10, 37, 3) will leads this error!'
df_empty = pd.DataFrame(columns=['FORE_data', 't2m', 'rh2m', 'w10m'])
for j, target_v in enumerate(self.target_list):
series_ids = pd.Series()
series_targets = | pd.Series() | pandas.Series |
"""
Herfindahl–Hirschman Index | Cannabis Data Science
Author: <NAME>
Created: Wed Mar 24 2021
License GPLv3+: GNU GPL version 3 or later <https://gnu.org/licenses/gpl.html>
This is free software: you are free to change and redistribute it.
There is NO WARRANTY, to the extent permitted by law.
Description:
Estimate the HHI for cannabis cultivators in the Washington state
cannabis market over time in 2020.
Resources:
https://www.justice.gov/atr/herfindahl-hirschman-index
https://data.openthc.org/b2b
"""
import pandas as pd
#-----------------------------------------------------------------------------
# Import the data.
#-----------------------------------------------------------------------------
data = pd.read_excel(
'./data/wholesale_sales.xlsx',
sheet_name='Panel',
col=0
)
#-----------------------------------------------------------------------------
# Calculate total sales by month.
#-----------------------------------------------------------------------------
sales_by_month = {}
months = data.month.unique()
for month in months:
total_sales = data.loc[data.month == month].sales.sum()
sales_by_month[month] = total_sales
#-----------------------------------------------------------------------------
# Calculate market share for each wholesaler by month. (Refactor!)
#-----------------------------------------------------------------------------
market_shares = []
for index, values in data.iterrows():
market_share = values.sales / sales_by_month[values.month]
market_shares.append(market_share)
data['market_share'] = pd.Series(market_shares)
#-----------------------------------------------------------------------------
# Calculate the HHI by month.
#-----------------------------------------------------------------------------
hhi_by_month = {}
for month in months:
month_data = data.loc[data.month == month]
hhi = 0
for index, values in month_data.iterrows():
hhi += values.market_share ** 2
hhi_by_month[month] = hhi
#-----------------------------------------------------------------------------
# Plot the HHI
#-----------------------------------------------------------------------------
hhi_data = | pd.Series(hhi_by_month) | pandas.Series |
from __future__ import division # brings in Python 3.0 mixed type calculation rules
import datetime
import inspect
import numpy as np
import numpy.testing as npt
import os.path
import pandas as pd
import sys
from tabulate import tabulate
import unittest
print("Python version: " + sys.version)
print("Numpy version: " + np.__version__)
from ..ted_exe import Ted
test = {}
class TestTed(unittest.TestCase):
"""
Unit tests for TED model.
"""
print("ted unittests conducted at " + str(datetime.datetime.today()))
def setUp(self):
"""
Setup routine for ted unit tests.
:return:
"""
pass
def tearDown(self):
"""
Teardown routine for ted unit tests.
:return:
"""
pass
# teardown called after each test
# e.g. maybe write test results to some text file
def create_ted_object(self):
# create empty pandas dataframes to create empty object for testing
df_empty = pd.DataFrame()
# create an empty ted object
ted_empty = Ted(df_empty, df_empty)
return ted_empty
def test_daily_app_flag(self):
"""
:description generates a daily flag to denote whether a pesticide is applied that day or not (1 - applied, 0 - anot applied)
:param num_apps; number of applications
:param app_interval; number of days between applications
:NOTE in TED model there are two application scenarios per simulation (one for a min/max exposure scenario)
(this is why the parameters are passed in)
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
ted_empty = self.create_ted_object()
expected_results = pd.Series([], dtype='bool')
result = pd.Series([[]], dtype='bool')
expected_results = [[True, False, False, True, False, False, True, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False],
[True, False, False, False, False, False, False, True, False, False,
False, False, False, False, True, False, False, False, False, False,
False, True, False, False, False, False, False, False, True, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False],
[True, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False]]
try:
# internal model constants
ted_empty.num_simulation_days = 366
# input varialbles that change per simulation
ted_empty.num_apps_min = pd.Series([3, 5, 1])
ted_empty.app_interval_min = pd.Series([3, 7, 1])
for i in range (3):
result[i] = ted_empty.daily_app_flag(ted_empty.num_apps_min[i], ted_empty.app_interval_min[i])
np.array_equal(result[i],expected_results[i])
finally:
for i in range(3):
tab = [result[i], expected_results[i]]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_set_drift_parameters(self):
"""
:description provides parmaeter values to use when calculating distances from edge of application source area to
concentration of interest
:param app_method; application method (aerial/ground/airblast)
:param boom_hgt; height of boom (low/high) - 'NA' if not ground application
:param drop_size; droplet spectrum for application (see list below for aerial/ground - 'NA' if airblast)
:param param_a (result[i][0]; parameter a for spray drift distance calculation
:param param_b (result[i][1]; parameter b for spray drift distance calculation
:param param_c (result[i][2]; parameter c for spray drift distance calculation
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
ted_empty = self.create_ted_object()
expected_results = pd.Series([], dtype='float')
result = pd.Series(9*[[0.,0.,0.]], dtype='float')
expected_results = [[0.0292,0.822,0.6539],[0.043,1.03,0.5],[0.0721,1.0977,0.4999],[0.1014,1.1344,0.4999],
[1.0063,0.9998,1.0193],[5.5513,0.8523,1.0079],[0.1913,1.2366,1.0552],
[2.4154,0.9077,1.0128],[0.0351,2.4586,0.4763]]
try:
# input variable that change per simulation
ted_empty.app_method_min = pd.Series(['aerial','aerial','aerial','aerial','ground','ground','ground','ground','airblast'])
ted_empty.boom_hgt_min = pd.Series(['','','','','low','low','high','high',''])
ted_empty.droplet_spec_min = pd.Series(['very_fine_to_fine','fine_to_medium','medium_to_coarse','coarse_to_very_coarse',
'very_fine_to_fine','fine_to_medium-coarse','very_fine_to_fine','fine_to_medium-coarse',''])
for i in range (9): # test that the nine combinations are accessed
result[i][0], result[i][1], result[i][2] = ted_empty.set_drift_parameters(ted_empty.app_method_min[i], ted_empty.boom_hgt_min[i], ted_empty.droplet_spec_min[i])
npt.assert_allclose(result[i],expected_results[i],rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
for i in range (9):
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_drift_distance_calc(self):
"""
:description provides parmaeter values to use when calculating distances from edge of application source area to
concentration of interest
:param app_rate_frac; fraction of active ingredient application rate equivalent to the health threshold of concern
:param param_a; parameter a for spray drift distance calculation
:param param_b; parameter b for spray drift distance calculation
:param param_c; parameter c for spray drift distance calculation
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
ted_empty = self.create_ted_object()
expected_results = pd.Series([], dtype='float')
result = pd.Series([], dtype='float')
expected_results = [302.050738, 11.484378, 0.0]
try:
# internal model constants
ted_empty.max_distance_from_source = 1000.
# input variable that is internally specified from among options
param_a = pd.Series([0.0292, 0.1913, 0.0351], dtype='float')
param_b = pd.Series([0.822, 1.2366, 2.4586], dtype='float')
param_c = pd.Series([0.6539, 1.0522, 0.4763], dtype='float')
# internally calculated variables
app_rate_frac = pd.Series([0.1,0.25,0.88], dtype='float')
for i in range(3):
result[i] = ted_empty.drift_distance_calc(app_rate_frac[i], param_a[i], param_b[i], param_c[i], ted_empty.max_distance_from_source)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_conc_timestep(self):
"""
:description unittest for function conc_timestep:
:param conc_ini; initial concentration for day (actually previous day concentration)
:param half_life; halflife of pesiticde representing either foliar dissipation halflife or aerobic soil metabolism halflife (days)
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
ted_empty = self.create_ted_object()
expected_results = pd.Series([], dtype='float')
result = pd.Series([], dtype='float')
expected_results = [9.803896e-4, 0.106066, 1.220703e-3]
try:
# input variable that is internally specified from among options
half_life = pd.Series([35., 2., .1])
# internally calculated variables
conc_ini = pd.Series([1.e-3, 0.15, 1.25])
result = ted_empty.conc_timestep(conc_ini, half_life)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_conc_initial_canopy_air(self):
"""
:description calculates initial (1st application day) air concentration of pesticide within plant canopy (ug/mL)
:param application rate; active ingredient application rate (lbs a.i./acre)
:param mass_pest; mass of pesticide on treated field (mg)
:param volume_air; volume of air in 1 hectare to a height equal to the height of the crop canopy
:param biotransfer_factor; the volume_based biotransfer factor; function of Henry's las constant and Log Kow
NOTE: this represents Eq 24 (and supporting eqs 25,26,27) of Attachment 1-7 of 'Biological Evaluation Chapters for Diazinon ESA Assessment'
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
ted_empty = self.create_ted_object()
expected_results = pd.Series([], dtype='float')
result = pd.Series([], dtype='float')
expected_results = [1.152526e-7, 1.281910e-5, 7.925148e-8]
try:
# internal model constants
ted_empty.hectare_to_acre = 2.47105
ted_empty.gms_to_mg = 1000.
ted_empty.lbs_to_gms = 453.592
ted_empty.crop_hgt = 1. #m
ted_empty.hectare_area = 10000. #m2
ted_empty.m3_to_liters = 1000.
ted_empty.mass_plant = 25000. # kg/hectare
ted_empty.density_plant = 0.77 #kg/L
# input variables that change per simulation
ted_empty.log_kow = pd.Series([2., 4., 6.], dtype='float')
ted_empty.log_unitless_hlc = pd.Series([-5., -3., -4.], dtype='float')
ted_empty.app_rate_min = pd.Series([1.e-3, 0.15, 1.25]) # lbs a.i./acre
for i in range(3): #let's do 3 iterations
result[i] = ted_empty.conc_initial_canopy_air(i, ted_empty.app_rate_min[i])
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_conc_initial_soil_h2o(self):
"""
:description calculates initial (1st application day) concentration in soil pore water or surface puddles(ug/L)
:param application rate; active ingredient application rate (lbs a.i./acre)
:param soil_depth
:param soil_bulk_density; kg/L
:param porosity; soil porosity
:param frac_org_cont_soil; fraction organic carbon in soil
:param app_rate_conv; conversion factor used to convert units of application rate (lbs a.i./acre) to (ug a.i./mL)
:NOTE this represents Eq 3 of Attachment 1-7 of 'Biological Evaluation Chapters for Diazinon ESA Assessment'
(the depth of water in this equation is assumed to be 0.0 and therefore not included here)
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
ted_empty = self.create_ted_object()
expected_results = pd.Series([], dtype='float')
result = pd.Series([], dtype='float')
expected_results = [5.067739e-3, 1.828522, 6.13194634]
try:
# internal model constants
ted_empty.app_rate_conv1 = 11.2
ted_empty.soil_depth = 2.6 # cm
ted_empty.soil_porosity = 0.35
ted_empty.soil_bulk_density = 1.5 # kg/L
ted_empty.soil_foc = 0.015
ted_empty.h2o_depth_soil = 0.0
ted_empty.h2o_depth_puddles = 1.3
# internally specified variable
ted_empty.water_type = pd.Series(["puddles", "pore_water", "puddles"])
# input variables that change per simulation
ted_empty.koc = pd.Series([1.e-3, 0.15, 1.25])
ted_empty.app_rate_min = pd.Series([1.e-3, 0.15, 1.25]) # lbs a.i./acre
for i in range(3): #let's do 3 iterations
result[i] = ted_empty.conc_initial_soil_h2o(i, ted_empty.app_rate_min[i], ted_empty.water_type[i])
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_conc_initial_plant(self):
"""
:description calculates initial (1st application day) dietary based EEC (residue concentration) from pesticide application
(mg/kg-diet for food items including short/tall grass, broadleaf plants, seeds/fruit/pods, and above ground arthropods)
:param application rate; active ingredient application rate (lbs a.i./acre)
:param food_multiplier; factor by which application rate of active ingredient is multiplied to estimate dietary based EECs
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
ted_empty = self.create_ted_object()
expected_results = pd.Series([], dtype='float')
result = pd.Series([], dtype='float')
expected_results = [1.5e-2, 22.5, 300.]
try:
# input variables that change per simulation
ted_empty.food_multiplier = pd.Series([15., 150., 240.])
ted_empty.app_rate_min = pd.Series([1.e-3, 0.15, 1.25]) # lbs a.i./acre
result = ted_empty.conc_initial_plant(ted_empty.app_rate_min, ted_empty.food_multiplier)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_animal_dietary_intake(self):
"""
:description generates pesticide intake via consumption of diet containing pesticide for animals (mammals, birds, amphibians, reptiles)
:param a1; coefficient of allometric expression
:param b1; exponent of allometric expression
:param body_wgt; body weight of species (g)
:param frac_h2o; fraction of water in food item
# this represents Eqs 6 of Attachment 1-7 of 'Biological Evaluation Chapters for Diazinon ESA Assessment'
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
ted_empty = self.create_ted_object()
expected_results = pd.Series([], dtype='float')
result = pd.Series([], dtype='float')
expected_results = [8.050355, 3.507997, 64.92055]
try:
# internally specified parameters
a1 = pd.Series([.398, .013, .621], dtype='float')
b1 = pd.Series([.850, .773, .564], dtype='float')
# variables from external database
body_wgt = pd.Series([10., 120., 450.], dtype='float')
frac_h2o = pd.Series([0.65, 0.85, 0.7], dtype='float')
result = ted_empty.animal_dietary_intake(a1, b1, body_wgt, frac_h2o)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_animal_dietary_dose(self):
"""
:description generates pesticide dietary-based dose for animals (mammals, birds, amphibians, reptiles)
:param body_wgt; body weight of species (g)
:param frac_h2o; fraction of water in food item
:param food_intake_rate; ingestion rate of food item (g/day-ww)
:param food_pest_conc; pesticide concentration in food item (mg a.i./kg)
# this represents Eqs 5 of Attachment 1-7 of 'Biological Evaluation Chapters for Diazinon ESA Assessment'
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
ted_empty = self.create_ted_object()
expected_results = pd.Series([], dtype='float')
result = pd.Series([], dtype='float')
expected_results = [3.e-4, 3.45e-2, 4.5]
try:
# variables from external database
body_wgt = pd.Series([10., 120., 450.], dtype='float')
# internally calculated variables
food_intake_rate = pd.Series([3., 12., 45.], dtype='float')
food_pest_conc = pd.Series([1.e-3, 3.45e-1, 4.50e+1], dtype='float')
result = ted_empty.animal_dietary_dose(body_wgt, food_intake_rate, food_pest_conc)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_daily_plant_timeseries(self):
"""
:description generates annual timeseries of daily pesticide residue concentration (EECs) for a food item
:param i; simulation number/index
:param application rate; active ingredient application rate (lbs a.i./acre)
:param food_multiplier; factor by which application rate of active ingredient is multiplied to estimate dietary based EECs
:param daily_flag; daily flag denoting if pesticide is applied (0 - not applied, 1 - applied)
:Notes # calculations are performed daily from day of first application (assumed day 0) through the last day of a year
# note: day numbers are synchronized with 0-based array indexing; thus the year does not have a calendar specific
# assoication, rather it is one year from the day of 1st pesticide application
#expected results generated by running OPP spreadsheet with appropriate inputs
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
ted_empty = self.create_ted_object()
expected_results = pd.Series([], dtype='float')
result = pd.Series([], dtype='float')
expected_results = [[2.700000E+00,2.578072E+00,2.461651E+00,5.050487E+00,4.822415E+00,4.604642E+00,7.096704E+00,
6.776228E+00,6.470225E+00,6.178040E+00,5.899049E+00,5.632658E+00,5.378296E+00,5.135421E+00,
4.903513E+00,4.682078E+00,4.470643E+00,4.268756E+00,4.075986E+00,3.891921E+00,3.716168E+00,
3.548352E+00,3.388114E+00,3.235112E+00,3.089020E+00,2.949525E+00,2.816329E+00,2.689148E+00,
2.567710E+00,2.451757E+00,2.341039E+00,2.235322E+00,2.134378E+00,2.037993E+00,1.945961E+00,
1.858084E+00,1.774176E+00,1.694057E+00,1.617556E+00,1.544510E+00,1.474762E+00,1.408164E+00,
1.344574E+00,1.283855E+00,1.225878E+00,1.170520E+00,1.117661E+00,1.067189E+00,1.018997E+00,
9.729803E-01,9.290420E-01,8.870880E-01,8.470285E-01,8.087781E-01,7.722549E-01,7.373812E-01,
7.040822E-01,6.722870E-01,6.419276E-01,6.129392E-01,5.852598E-01,5.588304E-01,5.335945E-01,
5.094983E-01,4.864901E-01,4.645210E-01,4.435440E-01,4.235143E-01,4.043890E-01,3.861275E-01,
3.686906E-01,3.520411E-01,3.361435E-01,3.209638E-01,3.064696E-01,2.926299E-01,2.794152E-01,
2.667973E-01,2.547491E-01,2.432451E-01,2.322605E-01,2.217720E-01,2.117571E-01,2.021945E-01,
1.930637E-01,1.843453E-01,1.760206E-01,1.680717E-01,1.604819E-01,1.532348E-01,1.463150E-01,
1.397076E-01,1.333986E-01,1.273746E-01,1.216225E-01,1.161303E-01,1.108860E-01,1.058786E-01,
1.010973E-01,9.653187E-02,9.217264E-02,8.801028E-02,8.403587E-02,8.024095E-02,7.661739E-02,
7.315748E-02,6.985380E-02,6.669932E-02,6.368728E-02,6.081127E-02,5.806513E-02,5.544300E-02,
5.293928E-02,5.054863E-02,4.826593E-02,4.608632E-02,4.400514E-02,4.201794E-02,4.012047E-02,
3.830870E-02,3.657874E-02,3.492690E-02,3.334966E-02,3.184364E-02,3.040563E-02,2.903256E-02,
2.772150E-02,2.646964E-02,2.527431E-02,2.413297E-02,2.304316E-02,2.200257E-02,2.100897E-02,
2.006024E-02,1.915435E-02,1.828937E-02,1.746345E-02,1.667483E-02,1.592182E-02,1.520282E-02,
1.451628E-02,1.386075E-02,1.323482E-02,1.263716E-02,1.206648E-02,1.152158E-02,1.100128E-02,
1.050448E-02,1.003012E-02,9.577174E-03,9.144684E-03,8.731725E-03,8.337415E-03,7.960910E-03,
7.601408E-03,7.258141E-03,6.930375E-03,6.617410E-03,6.318579E-03,6.033242E-03,5.760790E-03,
5.500642E-03,5.252242E-03,5.015059E-03,4.788587E-03,4.572342E-03,4.365863E-03,4.168707E-03,
3.980455E-03,3.800704E-03,3.629070E-03,3.465187E-03,3.308705E-03,3.159289E-03,3.016621E-03,
2.880395E-03,2.750321E-03,2.626121E-03,2.507530E-03,2.394294E-03,2.286171E-03,2.182931E-03,
2.084354E-03,1.990228E-03,1.900352E-03,1.814535E-03,1.732594E-03,1.654353E-03,1.579645E-03,
1.508310E-03,1.440198E-03,1.375161E-03,1.313061E-03,1.253765E-03,1.197147E-03,1.143086E-03,
1.091466E-03,1.042177E-03,9.951138E-04,9.501760E-04,9.072676E-04,8.662969E-04,8.271763E-04,
7.898223E-04,7.541552E-04,7.200988E-04,6.875803E-04,6.565303E-04,6.268824E-04,5.985734E-04,
5.715428E-04,5.457328E-04,5.210884E-04,4.975569E-04,4.750880E-04,4.536338E-04,4.331484E-04,
4.135881E-04,3.949112E-04,3.770776E-04,3.600494E-04,3.437901E-04,3.282651E-04,3.134412E-04,
2.992867E-04,2.857714E-04,2.728664E-04,2.605442E-04,2.487784E-04,2.375440E-04,2.268169E-04,
2.165742E-04,2.067941E-04,1.974556E-04,1.885388E-04,1.800247E-04,1.718951E-04,1.641326E-04,
1.567206E-04,1.496433E-04,1.428857E-04,1.364332E-04,1.302721E-04,1.243892E-04,1.187720E-04,
1.134085E-04,1.082871E-04,1.033970E-04,9.872779E-05,9.426940E-05,9.001235E-05,8.594753E-05,
8.206628E-05,7.836030E-05,7.482167E-05,7.144285E-05,6.821660E-05,6.513605E-05,6.219461E-05,
5.938600E-05,5.670423E-05,5.414355E-05,5.169852E-05,4.936390E-05,4.713470E-05,4.500617E-05,
4.297377E-05,4.103314E-05,3.918015E-05,3.741084E-05,3.572142E-05,3.410830E-05,3.256803E-05,
3.109731E-05,2.969300E-05,2.835211E-05,2.707178E-05,2.584926E-05,2.468195E-05,2.356735E-05,
2.250309E-05,2.148688E-05,2.051657E-05,1.959007E-05,1.870542E-05,1.786071E-05,1.705415E-05,
1.628401E-05,1.554865E-05,1.484650E-05,1.417606E-05,1.353589E-05,1.292463E-05,1.234097E-05,
1.178368E-05,1.125154E-05,1.074344E-05,1.025829E-05,9.795037E-06,9.352709E-06,8.930356E-06,
8.527075E-06,8.142006E-06,7.774326E-06,7.423250E-06,7.088028E-06,6.767944E-06,6.462315E-06,
6.170487E-06,5.891838E-06,5.625772E-06,5.371721E-06,5.129143E-06,4.897519E-06,4.676355E-06,
4.465178E-06,4.263538E-06,4.071003E-06,3.887163E-06,3.711625E-06,3.544014E-06,3.383972E-06,
3.231157E-06,3.085243E-06,2.945919E-06,2.812886E-06,2.685860E-06,2.564571E-06,2.448759E-06,
2.338177E-06,2.232589E-06,2.131769E-06,2.035502E-06,1.943582E-06,1.855813E-06,1.772007E-06,
1.691986E-06,1.615579E-06,1.542622E-06,1.472959E-06,1.406443E-06,1.342930E-06,1.282286E-06,
1.224380E-06,1.169089E-06,1.116294E-06,1.065884E-06,1.017751E-06,9.717908E-07,9.279063E-07,
8.860035E-07,8.459930E-07,8.077893E-07,7.713109E-07,7.364797E-07,7.032215E-07,6.714651E-07,
6.411428E-07,6.121898E-07,5.845443E-07,5.581472E-07,5.329422E-07,5.088754E-07,4.858954E-07,
4.639531E-07,4.430018E-07],
[5.500000E+01,5.349602E+01,5.203317E+01,5.061032E+01,4.922638E+01,4.788028E+01,4.657099E+01,
1.002975E+02,9.755487E+01,9.488722E+01,9.229253E+01,8.976878E+01,8.731405E+01,8.492644E+01,
1.376041E+02,1.338413E+02,1.301814E+02,1.266216E+02,1.231591E+02,1.197913E+02,1.165156E+02,
1.683295E+02,1.637265E+02,1.592494E+02,1.548947E+02,1.506591E+02,1.465394E+02,1.425322E+02,
1.936347E+02,1.883397E+02,1.831896E+02,1.781802E+02,1.733079E+02,1.685688E+02,1.639593E+02,
1.594758E+02,1.551149E+02,1.508733E+02,1.467476E+02,1.427348E+02,1.388317E+02,1.350354E+02,
1.313428E+02,1.277512E+02,1.242579E+02,1.208600E+02,1.175551E+02,1.143406E+02,1.112139E+02,
1.081728E+02,1.052148E+02,1.023377E+02,9.953925E+01,9.681734E+01,9.416987E+01,9.159479E+01,
8.909012E+01,8.665395E+01,8.428439E+01,8.197963E+01,7.973789E+01,7.755746E+01,7.543664E+01,
7.337382E+01,7.136741E+01,6.941587E+01,6.751769E+01,6.567141E+01,6.387562E+01,6.212894E+01,
6.043002E+01,5.877756E+01,5.717028E+01,5.560696E+01,5.408638E+01,5.260739E+01,5.116884E+01,
4.976962E+01,4.840867E+01,4.708493E+01,4.579739E+01,4.454506E+01,4.332697E+01,4.214220E+01,
4.098981E+01,3.986895E+01,3.877873E+01,3.771832E+01,3.668691E+01,3.568371E+01,3.470793E+01,
3.375884E+01,3.283571E+01,3.193781E+01,3.106447E+01,3.021501E+01,2.938878E+01,2.858514E+01,
2.780348E+01,2.704319E+01,2.630369E+01,2.558442E+01,2.488481E+01,2.420434E+01,2.354247E+01,
2.289870E+01,2.227253E+01,2.166349E+01,2.107110E+01,2.049491E+01,1.993447E+01,1.938936E+01,
1.885916E+01,1.834346E+01,1.784185E+01,1.735397E+01,1.687942E+01,1.641785E+01,1.596891E+01,
1.553224E+01,1.510751E+01,1.469439E+01,1.429257E+01,1.390174E+01,1.352160E+01,1.315185E+01,
1.279221E+01,1.244241E+01,1.210217E+01,1.177123E+01,1.144935E+01,1.113627E+01,1.083174E+01,
1.053555E+01,1.024745E+01,9.967237E+00,9.694682E+00,9.429580E+00,9.171728E+00,8.920927E+00,
8.676983E+00,8.439711E+00,8.208926E+00,7.984453E+00,7.766118E+00,7.553753E+00,7.347195E+00,
7.146286E+00,6.950870E+00,6.760798E+00,6.575924E+00,6.396105E+00,6.221203E+00,6.051084E+00,
5.885617E+00,5.724674E+00,5.568133E+00,5.415872E+00,5.267774E+00,5.123727E+00,4.983618E+00,
4.847341E+00,4.714790E+00,4.585864E+00,4.460463E+00,4.338492E+00,4.219855E+00,4.104463E+00,
3.992226E+00,3.883059E+00,3.776876E+00,3.673597E+00,3.573143E+00,3.475435E+00,3.380399E+00,
3.287962E+00,3.198052E+00,3.110601E+00,3.025542E+00,2.942808E+00,2.862337E+00,2.784066E+00,
2.707936E+00,2.633887E+00,2.561863E+00,2.491809E+00,2.423670E+00,2.357395E+00,2.292932E+00,
2.230232E+00,2.169246E+00,2.109928E+00,2.052232E+00,1.996113E+00,1.941529E+00,1.888438E+00,
1.836799E+00,1.786571E+00,1.737718E+00,1.690200E+00,1.643981E+00,1.599026E+00,1.555301E+00,
1.512771E+00,1.471404E+00,1.431169E+00,1.392033E+00,1.353968E+00,1.316944E+00,1.280932E+00,
1.245905E+00,1.211835E+00,1.178698E+00,1.146466E+00,1.115116E+00,1.084623E+00,1.054964E+00,
1.026116E+00,9.980566E-01,9.707647E-01,9.442191E-01,9.183994E-01,8.932857E-01,8.688588E-01,
8.450998E-01,8.219905E-01,7.995131E-01,7.776504E-01,7.563855E-01,7.357021E-01,7.155843E-01,
6.960166E-01,6.769840E-01,6.584718E-01,6.404659E-01,6.229523E-01,6.059176E-01,5.893488E-01,
5.732330E-01,5.575579E-01,5.423115E-01,5.274819E-01,5.130579E-01,4.990283E-01,4.853824E-01,
4.721095E-01,4.591997E-01,4.466428E-01,4.344294E-01,4.225499E-01,4.109952E-01,3.997565E-01,
3.888252E-01,3.781927E-01,3.678510E-01,3.577921E-01,3.480083E-01,3.384920E-01,3.292359E-01,
3.202329E-01,3.114761E-01,3.029588E-01,2.946744E-01,2.866165E-01,2.787790E-01,2.711557E-01,
2.637410E-01,2.565290E-01,2.495142E-01,2.426912E-01,2.360548E-01,2.295998E-01,2.233214E-01,
2.172147E-01,2.112749E-01,2.054976E-01,1.998783E-01,1.944126E-01,1.890964E-01,1.839255E-01,
1.788961E-01,1.740041E-01,1.692460E-01,1.646180E-01,1.601165E-01,1.557381E-01,1.514794E-01,
1.473372E-01,1.433082E-01,1.393895E-01,1.355779E-01,1.318705E-01,1.282645E-01,1.247571E-01,
1.213456E-01,1.180274E-01,1.147999E-01,1.116607E-01,1.086073E-01,1.056375E-01,1.027488E-01,
9.993914E-02,9.720630E-02,9.454818E-02,9.196276E-02,8.944803E-02,8.700207E-02,8.462300E-02,
8.230898E-02,8.005823E-02,7.786904E-02,7.573970E-02,7.366860E-02,7.165412E-02,6.969474E-02,
6.778893E-02,6.593524E-02,6.413224E-02,6.237854E-02,6.067279E-02,5.901369E-02,5.739996E-02,
5.583036E-02,5.430367E-02,5.281874E-02,5.137440E-02,4.996957E-02,4.860315E-02,4.727409E-02,
4.598138E-02,4.472402E-02,4.350104E-02,4.231150E-02,4.115449E-02,4.002912E-02,3.893452E-02,
3.786985E-02,3.683430E-02,3.582706E-02,3.484737E-02,3.389447E-02,3.296762E-02,3.206612E-02,
3.118927E-02,3.033640E-02,2.950685E-02,2.869998E-02,2.791518E-02,2.715184E-02,2.640937E-02,
2.568720E-02,2.498478E-02,2.430157E-02,2.363705E-02,2.299069E-02,2.236201E-02,2.175052E-02,
2.115575E-02,2.057724E-02,2.001456E-02,1.946726E-02,1.893493E-02,1.841715E-02,1.791353E-02,
1.742368E-02,1.694723E-02],
[3.000000E+02,2.941172E+02,2.883497E+02,2.826954E+02,2.771519E+02,2.717171E+02,2.663889E+02,
2.611652E+02,2.560439E+02,2.510230E+02,2.461006E+02,2.412747E+02,2.365435E+02,2.319050E+02,
2.273575E+02,2.228991E+02,2.185282E+02,2.142430E+02,2.100418E+02,2.059231E+02,2.018850E+02,
1.979262E+02,1.940450E+02,1.902399E+02,1.865094E+02,1.828520E+02,1.792664E+02,1.757511E+02,
1.723048E+02,1.689260E+02,1.656134E+02,1.623658E+02,1.591820E+02,1.560605E+02,1.530002E+02,
1.500000E+02,1.470586E+02,1.441749E+02,1.413477E+02,1.385759E+02,1.358585E+02,1.331944E+02,
1.305826E+02,1.280219E+02,1.255115E+02,1.230503E+02,1.206374E+02,1.182717E+02,1.159525E+02,
1.136787E+02,1.114496E+02,1.092641E+02,1.071215E+02,1.050209E+02,1.029615E+02,1.009425E+02,
9.896309E+01,9.702249E+01,9.511994E+01,9.325469E+01,9.142602E+01,8.963322E+01,8.787556E+01,
8.615238E+01,8.446298E+01,8.280671E+01,8.118292E+01,7.959098E+01,7.803025E+01,7.650012E+01,
7.500000E+01,7.352930E+01,7.208743E+01,7.067384E+01,6.928797E+01,6.792927E+01,6.659722E+01,
6.529129E+01,6.401097E+01,6.275575E+01,6.152515E+01,6.031868E+01,5.913587E+01,5.797625E+01,
5.683937E+01,5.572479E+01,5.463206E+01,5.356076E+01,5.251046E+01,5.148076E+01,5.047126E+01,
4.948155E+01,4.851124E+01,4.755997E+01,4.662735E+01,4.571301E+01,4.481661E+01,4.393778E+01,
4.307619E+01,4.223149E+01,4.140336E+01,4.059146E+01,3.979549E+01,3.901512E+01,3.825006E+01,
3.750000E+01,3.676465E+01,3.604372E+01,3.533692E+01,3.464398E+01,3.396464E+01,3.329861E+01,
3.264565E+01,3.200548E+01,3.137788E+01,3.076258E+01,3.015934E+01,2.956793E+01,2.898813E+01,
2.841969E+01,2.786239E+01,2.731603E+01,2.678038E+01,2.625523E+01,2.574038E+01,2.523563E+01,
2.474077E+01,2.425562E+01,2.377998E+01,2.331367E+01,2.285651E+01,2.240830E+01,2.196889E+01,
2.153809E+01,2.111575E+01,2.070168E+01,2.029573E+01,1.989774E+01,1.950756E+01,1.912503E+01,
1.875000E+01,1.838232E+01,1.802186E+01,1.766846E+01,1.732199E+01,1.698232E+01,1.664931E+01,
1.632282E+01,1.600274E+01,1.568894E+01,1.538129E+01,1.507967E+01,1.478397E+01,1.449406E+01,
1.420984E+01,1.393120E+01,1.365801E+01,1.339019E+01,1.312762E+01,1.287019E+01,1.261781E+01,
1.237039E+01,1.212781E+01,1.188999E+01,1.165684E+01,1.142825E+01,1.120415E+01,1.098445E+01,
1.076905E+01,1.055787E+01,1.035084E+01,1.014787E+01,9.948872E+00,9.753781E+00,9.562515E+00,
9.375000E+00,9.191162E+00,9.010929E+00,8.834230E+00,8.660996E+00,8.491159E+00,8.324653E+00,
8.161412E+00,8.001371E+00,7.844469E+00,7.690644E+00,7.539835E+00,7.391984E+00,7.247031E+00,
7.104921E+00,6.965598E+00,6.829007E+00,6.695094E+00,6.563808E+00,6.435095E+00,6.308907E+00,
6.185193E+00,6.063905E+00,5.944996E+00,5.828418E+00,5.714127E+00,5.602076E+00,5.492223E+00,
5.384524E+00,5.278936E+00,5.175420E+00,5.073933E+00,4.974436E+00,4.876890E+00,4.781258E+00,
4.687500E+00,4.595581E+00,4.505464E+00,4.417115E+00,4.330498E+00,4.245580E+00,4.162326E+00,
4.080706E+00,4.000686E+00,3.922235E+00,3.845322E+00,3.769918E+00,3.695992E+00,3.623516E+00,
3.552461E+00,3.482799E+00,3.414504E+00,3.347547E+00,3.281904E+00,3.217548E+00,3.154454E+00,
3.092597E+00,3.031953E+00,2.972498E+00,2.914209E+00,2.857063E+00,2.801038E+00,2.746111E+00,
2.692262E+00,2.639468E+00,2.587710E+00,2.536966E+00,2.487218E+00,2.438445E+00,2.390629E+00,
2.343750E+00,2.297790E+00,2.252732E+00,2.208558E+00,2.165249E+00,2.122790E+00,2.081163E+00,
2.040353E+00,2.000343E+00,1.961117E+00,1.922661E+00,1.884959E+00,1.847996E+00,1.811758E+00,
1.776230E+00,1.741400E+00,1.707252E+00,1.673774E+00,1.640952E+00,1.608774E+00,1.577227E+00,
1.546298E+00,1.515976E+00,1.486249E+00,1.457105E+00,1.428532E+00,1.400519E+00,1.373056E+00,
1.346131E+00,1.319734E+00,1.293855E+00,1.268483E+00,1.243609E+00,1.219223E+00,1.195314E+00,
1.171875E+00,1.148895E+00,1.126366E+00,1.104279E+00,1.082625E+00,1.061395E+00,1.040582E+00,
1.020176E+00,1.000171E+00,9.805587E-01,9.613305E-01,9.424794E-01,9.239979E-01,9.058789E-01,
8.881152E-01,8.706998E-01,8.536259E-01,8.368868E-01,8.204760E-01,8.043869E-01,7.886134E-01,
7.731492E-01,7.579882E-01,7.431245E-01,7.285523E-01,7.142658E-01,7.002595E-01,6.865278E-01,
6.730654E-01,6.598670E-01,6.469274E-01,6.342416E-01,6.218045E-01,6.096113E-01,5.976572E-01,
5.859375E-01,5.744476E-01,5.631831E-01,5.521394E-01,5.413123E-01,5.306975E-01,5.202908E-01,
5.100882E-01,5.000857E-01,4.902793E-01,4.806652E-01,4.712397E-01,4.619990E-01,4.529395E-01,
4.440576E-01,4.353499E-01,4.268129E-01,4.184434E-01,4.102380E-01,4.021935E-01,3.943067E-01,
3.865746E-01,3.789941E-01,3.715622E-01,3.642761E-01,3.571329E-01,3.501297E-01,3.432639E-01,
3.365327E-01,3.299335E-01,3.234637E-01,3.171208E-01,3.109023E-01,3.048056E-01,2.988286E-01,
2.929687E-01,2.872238E-01,2.815915E-01,2.760697E-01,2.706561E-01,2.653487E-01,2.601454E-01,
2.550441E-01,2.500429E-01,2.451397E-01,2.403326E-01,2.356198E-01,2.309995E-01,2.264697E-01,
2.220288E-01,2.176749E-01]]
try:
# internal model constants
ted_empty.num_simulation_days = 366
# internally specified variable (from internal database)
food_multiplier = pd.Series([15., 110., 240.])
# input variables that change per simulation
ted_empty.foliar_diss_hlife = pd.Series([15., 25., 35.])
ted_empty.app_rate_min = pd.Series([0.18, 0.5, 1.25]) # lbs a.i./acre
# application scenarios generated from 'daily_app_flag' tests and reused here
daily_flag = pd.Series([[True, False, False, True, False, False, True, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False],
[True, False, False, False, False, False, False, True, False, False,
False, False, False, False, True, False, False, False, False, False,
False, True, False, False, False, False, False, False, True, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False],
[True, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False]], dtype='bool')
for i in range(3):
result[i] = ted_empty.daily_plant_timeseries(i, ted_empty.app_rate_min[i], food_multiplier[i], daily_flag[i])
npt.assert_allclose(result[i],expected_results[i],rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
for i in range(3):
tab = [result[i], expected_results[i]]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_daily_soil_h2o_timeseries(self):
"""
:description generates annual timeseries of daily pesticide concentrations in soil pore water and surface puddles
:param i; simulation number/index
:param application rate; active ingredient application rate (lbs a.i./acre)
:param food_multiplier; factor by which application rate of active ingredient is multiplied to estimate dietary based EECs
:param daily_flag; daily flag denoting if pesticide is applied (0 - not applied, 1 - applied)
:param water_type; type of water (pore water or surface puddles)
:Notes # calculations are performed daily from day of first application (assumed day 0) through the last day of a year
# note: day numbers are synchronized with 0-based array indexing; thus the year does not have a calendar specific
# assoication, rather it is one year from the day of 1st pesticide application
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
ted_empty = self.create_ted_object()
expected_results = pd.Series([], dtype='float')
result = pd.Series([], dtype='float')
expected_results = [[2.235571E-02,2.134616E-02,2.038220E-02,4.181749E-02,3.992908E-02,3.812594E-02,
5.875995E-02,5.610644E-02,5.357277E-02,5.115350E-02,4.884349E-02,4.663780E-02,
4.453171E-02,4.252073E-02,4.060056E-02,3.876711E-02,3.701645E-02,3.534484E-02,
3.374873E-02,3.222469E-02,3.076947E-02,2.937997E-02,2.805322E-02,2.678638E-02,
2.557675E-02,2.442175E-02,2.331890E-02,2.226586E-02,2.126037E-02,2.030028E-02,
1.938355E-02,1.850822E-02,1.767242E-02,1.687436E-02,1.611234E-02,1.538474E-02,
1.468999E-02,1.402661E-02,1.339319E-02,1.278838E-02,1.221087E-02,1.165945E-02,
1.113293E-02,1.063018E-02,1.015014E-02,9.691777E-03,9.254112E-03,8.836211E-03,
8.437182E-03,8.056172E-03,7.692368E-03,7.344993E-03,7.013305E-03,6.696596E-03,
6.394188E-03,6.105437E-03,5.829725E-03,5.566464E-03,5.315091E-03,5.075070E-03,
4.845888E-03,4.627056E-03,4.418105E-03,4.218591E-03,4.028086E-03,3.846184E-03,
3.672497E-03,3.506653E-03,3.348298E-03,3.197094E-03,3.052718E-03,2.914863E-03,
2.783232E-03,2.657546E-03,2.537535E-03,2.422944E-03,2.313528E-03,2.209053E-03,
2.109295E-03,2.014043E-03,1.923092E-03,1.836248E-03,1.753326E-03,1.674149E-03,
1.598547E-03,1.526359E-03,1.457431E-03,1.391616E-03,1.328773E-03,1.268768E-03,
1.211472E-03,1.156764E-03,1.104526E-03,1.054648E-03,1.007022E-03,9.615460E-04,
9.181242E-04,8.766632E-04,8.370745E-04,7.992735E-04,7.631796E-04,7.287156E-04,
6.958080E-04,6.643864E-04,6.343838E-04,6.057361E-04,5.783820E-04,5.522632E-04,
5.273239E-04,5.035108E-04,4.807730E-04,4.590621E-04,4.383316E-04,4.185372E-04,
3.996368E-04,3.815898E-04,3.643578E-04,3.479040E-04,3.321932E-04,3.171919E-04,
3.028680E-04,2.891910E-04,2.761316E-04,2.636619E-04,2.517554E-04,2.403865E-04,
2.295310E-04,2.191658E-04,2.092686E-04,1.998184E-04,1.907949E-04,1.821789E-04,
1.739520E-04,1.660966E-04,1.585960E-04,1.514340E-04,1.445955E-04,1.380658E-04,
1.318310E-04,1.258777E-04,1.201933E-04,1.147655E-04,1.095829E-04,1.046343E-04,
9.990919E-05,9.539745E-05,9.108945E-05,8.697600E-05,8.304830E-05,7.929798E-05,
7.571701E-05,7.229775E-05,6.903290E-05,6.591548E-05,6.293885E-05,6.009663E-05,
5.738276E-05,5.479145E-05,5.231715E-05,4.995459E-05,4.769873E-05,4.554473E-05,
4.348800E-05,4.152415E-05,3.964899E-05,3.785850E-05,3.614887E-05,3.451645E-05,
3.295774E-05,3.146942E-05,3.004831E-05,2.869138E-05,2.739572E-05,2.615858E-05,
2.497730E-05,2.384936E-05,2.277236E-05,2.174400E-05,2.076208E-05,1.982449E-05,
1.892925E-05,1.807444E-05,1.725822E-05,1.647887E-05,1.573471E-05,1.502416E-05,
1.434569E-05,1.369786E-05,1.307929E-05,1.248865E-05,1.192468E-05,1.138618E-05,
1.087200E-05,1.038104E-05,9.912247E-06,9.464626E-06,9.037219E-06,8.629112E-06,
8.239435E-06,7.867356E-06,7.512079E-06,7.172845E-06,6.848931E-06,6.539644E-06,
6.244324E-06,5.962341E-06,5.693091E-06,5.436000E-06,5.190519E-06,4.956124E-06,
4.732313E-06,4.518609E-06,4.314556E-06,4.119718E-06,3.933678E-06,3.756039E-06,
3.586423E-06,3.424465E-06,3.269822E-06,3.122162E-06,2.981170E-06,2.846545E-06,
2.718000E-06,2.595260E-06,2.478062E-06,2.366156E-06,2.259305E-06,2.157278E-06,
2.059859E-06,1.966839E-06,1.878020E-06,1.793211E-06,1.712233E-06,1.634911E-06,
1.561081E-06,1.490585E-06,1.423273E-06,1.359000E-06,1.297630E-06,1.239031E-06,
1.183078E-06,1.129652E-06,1.078639E-06,1.029929E-06,9.834195E-07,9.390098E-07,
8.966056E-07,8.561164E-07,8.174555E-07,7.805405E-07,7.452926E-07,7.116364E-07,
6.795000E-07,6.488149E-07,6.195154E-07,5.915391E-07,5.648262E-07,5.393195E-07,
5.149647E-07,4.917097E-07,4.695049E-07,4.483028E-07,4.280582E-07,4.087278E-07,
3.902703E-07,3.726463E-07,3.558182E-07,3.397500E-07,3.244074E-07,3.097577E-07,
2.957696E-07,2.824131E-07,2.696598E-07,2.574824E-07,2.458549E-07,2.347525E-07,
2.241514E-07,2.140291E-07,2.043639E-07,1.951351E-07,1.863231E-07,1.779091E-07,
1.698750E-07,1.622037E-07,1.548789E-07,1.478848E-07,1.412065E-07,1.348299E-07,
1.287412E-07,1.229274E-07,1.173762E-07,1.120757E-07,1.070145E-07,1.021819E-07,
9.756757E-08,9.316157E-08,8.895455E-08,8.493750E-08,8.110186E-08,7.743943E-08,
7.394239E-08,7.060327E-08,6.741494E-08,6.437059E-08,6.146372E-08,5.868811E-08,
5.603785E-08,5.350727E-08,5.109097E-08,4.878378E-08,4.658079E-08,4.447727E-08,
4.246875E-08,4.055093E-08,3.871971E-08,3.697119E-08,3.530163E-08,3.370747E-08,
3.218529E-08,3.073186E-08,2.934406E-08,2.801893E-08,2.675364E-08,2.554549E-08,
2.439189E-08,2.329039E-08,2.223864E-08,2.123438E-08,2.027546E-08,1.935986E-08,
1.848560E-08,1.765082E-08,1.685373E-08,1.609265E-08,1.536593E-08,1.467203E-08,
1.400946E-08,1.337682E-08,1.277274E-08,1.219595E-08,1.164520E-08,1.111932E-08,
1.061719E-08,1.013773E-08,9.679929E-09,9.242799E-09,8.825409E-09,8.426867E-09,
8.046324E-09,7.682965E-09,7.336014E-09,7.004732E-09,6.688409E-09,6.386371E-09,
6.097973E-09,5.822598E-09,5.559659E-09,5.308594E-09,5.068866E-09,4.839964E-09,
4.621399E-09,4.412704E-09,4.213434E-09,4.023162E-09,3.841482E-09,3.668007E-09],
[9.391514E-02,8.762592E-02,8.175787E-02,7.628279E-02,7.117436E-02,6.640803E-02,
6.196088E-02,1.517267E-01,1.415660E-01,1.320858E-01,1.232404E-01,1.149873E-01,
1.072870E-01,1.001023E-01,1.873139E-01,1.747700E-01,1.630662E-01,1.521461E-01,
1.419574E-01,1.324509E-01,1.235811E-01,2.092203E-01,1.952095E-01,1.821369E-01,
1.699397E-01,1.585594E-01,1.479411E-01,1.380340E-01,2.227054E-01,2.077915E-01,
1.938763E-01,1.808930E-01,1.687791E-01,1.574765E-01,1.469307E-01,1.370912E-01,
1.279106E-01,1.193449E-01,1.113527E-01,1.038957E-01,9.693814E-02,9.044648E-02,
8.438955E-02,7.873824E-02,7.346537E-02,6.854562E-02,6.395532E-02,5.967242E-02,
5.567634E-02,5.194786E-02,4.846907E-02,4.522324E-02,4.219478E-02,3.936912E-02,
3.673269E-02,3.427281E-02,3.197766E-02,2.983621E-02,2.783817E-02,2.597393E-02,
2.423454E-02,2.261162E-02,2.109739E-02,1.968456E-02,1.836634E-02,1.713640E-02,
1.598883E-02,1.491811E-02,1.391909E-02,1.298697E-02,1.211727E-02,1.130581E-02,
1.054869E-02,9.842280E-03,9.183172E-03,8.568202E-03,7.994415E-03,7.459053E-03,
6.959543E-03,6.493483E-03,6.058634E-03,5.652905E-03,5.274347E-03,4.921140E-03,
4.591586E-03,4.284101E-03,3.997208E-03,3.729527E-03,3.479771E-03,3.246741E-03,
3.029317E-03,2.826453E-03,2.637174E-03,2.460570E-03,2.295793E-03,2.142051E-03,
1.998604E-03,1.864763E-03,1.739886E-03,1.623371E-03,1.514658E-03,1.413226E-03,
1.318587E-03,1.230285E-03,1.147896E-03,1.071025E-03,9.993019E-04,9.323816E-04,
8.699428E-04,8.116854E-04,7.573292E-04,7.066131E-04,6.592934E-04,6.151425E-04,
5.739482E-04,5.355126E-04,4.996509E-04,4.661908E-04,4.349714E-04,4.058427E-04,
3.786646E-04,3.533066E-04,3.296467E-04,3.075712E-04,2.869741E-04,2.677563E-04,
2.498255E-04,2.330954E-04,2.174857E-04,2.029213E-04,1.893323E-04,1.766533E-04,
1.648233E-04,1.537856E-04,1.434871E-04,1.338782E-04,1.249127E-04,1.165477E-04,
1.087429E-04,1.014607E-04,9.466615E-05,8.832664E-05,8.241167E-05,7.689281E-05,
7.174353E-05,6.693908E-05,6.245637E-05,5.827385E-05,5.437143E-05,5.073034E-05,
4.733308E-05,4.416332E-05,4.120584E-05,3.844640E-05,3.587176E-05,3.346954E-05,
3.122818E-05,2.913693E-05,2.718571E-05,2.536517E-05,2.366654E-05,2.208166E-05,
2.060292E-05,1.922320E-05,1.793588E-05,1.673477E-05,1.561409E-05,1.456846E-05,
1.359286E-05,1.268258E-05,1.183327E-05,1.104083E-05,1.030146E-05,9.611601E-06,
8.967941E-06,8.367385E-06,7.807046E-06,7.284232E-06,6.796428E-06,6.341292E-06,
5.916635E-06,5.520415E-06,5.150730E-06,4.805801E-06,4.483971E-06,4.183692E-06,
3.903523E-06,3.642116E-06,3.398214E-06,3.170646E-06,2.958317E-06,2.760208E-06,
2.575365E-06,2.402900E-06,2.241985E-06,2.091846E-06,1.951762E-06,1.821058E-06,
1.699107E-06,1.585323E-06,1.479159E-06,1.380104E-06,1.287682E-06,1.201450E-06,
1.120993E-06,1.045923E-06,9.758808E-07,9.105289E-07,8.495535E-07,7.926615E-07,
7.395793E-07,6.900519E-07,6.438412E-07,6.007251E-07,5.604963E-07,5.229616E-07,
4.879404E-07,4.552645E-07,4.247768E-07,3.963307E-07,3.697897E-07,3.450260E-07,
3.219206E-07,3.003625E-07,2.802482E-07,2.614808E-07,2.439702E-07,2.276322E-07,
2.123884E-07,1.981654E-07,1.848948E-07,1.725130E-07,1.609603E-07,1.501813E-07,
1.401241E-07,1.307404E-07,1.219851E-07,1.138161E-07,1.061942E-07,9.908269E-08,
9.244741E-08,8.625649E-08,8.048015E-08,7.509063E-08,7.006204E-08,6.537019E-08,
6.099255E-08,5.690806E-08,5.309710E-08,4.954134E-08,4.622371E-08,4.312824E-08,
4.024007E-08,3.754532E-08,3.503102E-08,3.268510E-08,3.049627E-08,2.845403E-08,
2.654855E-08,2.477067E-08,2.311185E-08,2.156412E-08,2.012004E-08,1.877266E-08,
1.751551E-08,1.634255E-08,1.524814E-08,1.422702E-08,1.327427E-08,1.238534E-08,
1.155593E-08,1.078206E-08,1.006002E-08,9.386329E-09,8.757755E-09,8.171274E-09,
7.624068E-09,7.113507E-09,6.637137E-09,6.192668E-09,5.777963E-09,5.391030E-09,
5.030009E-09,4.693165E-09,4.378877E-09,4.085637E-09,3.812034E-09,3.556754E-09,
3.318569E-09,3.096334E-09,2.888982E-09,2.695515E-09,2.515005E-09,2.346582E-09,
2.189439E-09,2.042819E-09,1.906017E-09,1.778377E-09,1.659284E-09,1.548167E-09,
1.444491E-09,1.347758E-09,1.257502E-09,1.173291E-09,1.094719E-09,1.021409E-09,
9.530086E-10,8.891884E-10,8.296421E-10,7.740835E-10,7.222454E-10,6.738788E-10,
6.287512E-10,5.866456E-10,5.473597E-10,5.107046E-10,4.765043E-10,4.445942E-10,
4.148211E-10,3.870417E-10,3.611227E-10,3.369394E-10,3.143756E-10,2.933228E-10,
2.736798E-10,2.553523E-10,2.382521E-10,2.222971E-10,2.074105E-10,1.935209E-10,
1.805614E-10,1.684697E-10,1.571878E-10,1.466614E-10,1.368399E-10,1.276762E-10,
1.191261E-10,1.111486E-10,1.037053E-10,9.676043E-11,9.028068E-11,8.423485E-11,
7.859390E-11,7.333070E-11,6.841996E-11,6.383808E-11,5.956303E-11,5.557428E-11,
5.185263E-11,4.838022E-11,4.514034E-11,4.211743E-11,3.929695E-11,3.666535E-11,
3.420998E-11,3.191904E-11,2.978152E-11,2.778714E-11,2.592632E-11,2.419011E-11,
2.257017E-11,2.105871E-11,1.964847E-11,1.833267E-11,1.710499E-11,1.595952E-11],
[1.172251E-01,1.132320E-01,1.093749E-01,1.056492E-01,1.020504E-01,9.857420E-02,
9.521640E-02,9.197298E-02,8.884005E-02,8.581383E-02,8.289069E-02,8.006713E-02,
7.733975E-02,7.470528E-02,7.216054E-02,6.970249E-02,6.732817E-02,6.503472E-02,
6.281940E-02,6.067954E-02,5.861257E-02,5.661601E-02,5.468746E-02,5.282461E-02,
5.102521E-02,4.928710E-02,4.760820E-02,4.598649E-02,4.442002E-02,4.290691E-02,
4.144535E-02,4.003357E-02,3.866988E-02,3.735264E-02,3.608027E-02,3.485124E-02,
3.366408E-02,3.251736E-02,3.140970E-02,3.033977E-02,2.930629E-02,2.830801E-02,
2.734373E-02,2.641230E-02,2.551260E-02,2.464355E-02,2.380410E-02,2.299325E-02,
2.221001E-02,2.145346E-02,2.072267E-02,2.001678E-02,1.933494E-02,1.867632E-02,
1.804014E-02,1.742562E-02,1.683204E-02,1.625868E-02,1.570485E-02,1.516989E-02,
1.465314E-02,1.415400E-02,1.367187E-02,1.320615E-02,1.275630E-02,1.232178E-02,
1.190205E-02,1.149662E-02,1.110501E-02,1.072673E-02,1.036134E-02,1.000839E-02,
9.667469E-03,9.338160E-03,9.020068E-03,8.712811E-03,8.416021E-03,8.129340E-03,
7.852425E-03,7.584943E-03,7.326572E-03,7.077002E-03,6.835933E-03,6.603076E-03,
6.378151E-03,6.160888E-03,5.951025E-03,5.748312E-03,5.552503E-03,5.363364E-03,
5.180668E-03,5.004196E-03,4.833735E-03,4.669080E-03,4.510034E-03,4.356406E-03,
4.208010E-03,4.064670E-03,3.926212E-03,3.792471E-03,3.663286E-03,3.538501E-03,
3.417966E-03,3.301538E-03,3.189075E-03,3.080444E-03,2.975513E-03,2.874156E-03,
2.776251E-03,2.681682E-03,2.590334E-03,2.502098E-03,2.416867E-03,2.334540E-03,
2.255017E-03,2.178203E-03,2.104005E-03,2.032335E-03,1.963106E-03,1.896236E-03,
1.831643E-03,1.769250E-03,1.708983E-03,1.650769E-03,1.594538E-03,1.540222E-03,
1.487756E-03,1.437078E-03,1.388126E-03,1.340841E-03,1.295167E-03,1.251049E-03,
1.208434E-03,1.167270E-03,1.127508E-03,1.089101E-03,1.052003E-03,1.016168E-03,
9.815531E-04,9.481178E-04,9.158214E-04,8.846252E-04,8.544916E-04,8.253845E-04,
7.972689E-04,7.701110E-04,7.438782E-04,7.185389E-04,6.940629E-04,6.704205E-04,
6.475836E-04,6.255245E-04,6.042168E-04,5.836350E-04,5.637542E-04,5.445507E-04,
5.260013E-04,5.080838E-04,4.907766E-04,4.740589E-04,4.579107E-04,4.423126E-04,
4.272458E-04,4.126923E-04,3.986344E-04,3.850555E-04,3.719391E-04,3.592695E-04,
3.470314E-04,3.352103E-04,3.237918E-04,3.127622E-04,3.021084E-04,2.918175E-04,
2.818771E-04,2.722753E-04,2.630006E-04,2.540419E-04,2.453883E-04,2.370295E-04,
2.289554E-04,2.211563E-04,2.136229E-04,2.063461E-04,1.993172E-04,1.925277E-04,
1.859695E-04,1.796347E-04,1.735157E-04,1.676051E-04,1.618959E-04,1.563811E-04,
1.510542E-04,1.459087E-04,1.409386E-04,1.361377E-04,1.315003E-04,1.270209E-04,
1.226941E-04,1.185147E-04,1.144777E-04,1.105782E-04,1.068115E-04,1.031731E-04,
9.965861E-05,9.626387E-05,9.298477E-05,8.981737E-05,8.675786E-05,8.380257E-05,
8.094794E-05,7.819056E-05,7.552710E-05,7.295437E-05,7.046928E-05,6.806884E-05,
6.575016E-05,6.351047E-05,6.134707E-05,5.925736E-05,5.723884E-05,5.528908E-05,
5.340573E-05,5.158653E-05,4.982930E-05,4.813194E-05,4.649239E-05,4.490868E-05,
4.337893E-05,4.190128E-05,4.047397E-05,3.909528E-05,3.776355E-05,3.647719E-05,
3.523464E-05,3.403442E-05,3.287508E-05,3.175523E-05,3.067354E-05,2.962868E-05,
2.861942E-05,2.764454E-05,2.670286E-05,2.579327E-05,2.491465E-05,2.406597E-05,
2.324619E-05,2.245434E-05,2.168946E-05,2.095064E-05,2.023699E-05,1.954764E-05,
1.888178E-05,1.823859E-05,1.761732E-05,1.701721E-05,1.643754E-05,1.587762E-05,
1.533677E-05,1.481434E-05,1.430971E-05,1.382227E-05,1.335143E-05,1.289663E-05,
1.245733E-05,1.203298E-05,1.162310E-05,1.122717E-05,1.084473E-05,1.047532E-05,
1.011849E-05,9.773820E-06,9.440888E-06,9.119297E-06,8.808660E-06,8.508605E-06,
8.218770E-06,7.938809E-06,7.668384E-06,7.407170E-06,7.154855E-06,6.911134E-06,
6.675716E-06,6.448316E-06,6.228663E-06,6.016492E-06,5.811548E-06,5.613585E-06,
5.422366E-06,5.237660E-06,5.059247E-06,4.886910E-06,4.720444E-06,4.559648E-06,
4.404330E-06,4.254302E-06,4.109385E-06,3.969404E-06,3.834192E-06,3.703585E-06,
3.577428E-06,3.455567E-06,3.337858E-06,3.224158E-06,3.114332E-06,3.008246E-06,
2.905774E-06,2.806793E-06,2.711183E-06,2.618830E-06,2.529623E-06,2.443455E-06,
2.360222E-06,2.279824E-06,2.202165E-06,2.127151E-06,2.054693E-06,1.984702E-06,
1.917096E-06,1.851793E-06,1.788714E-06,1.727784E-06,1.668929E-06,1.612079E-06,
1.557166E-06,1.504123E-06,1.452887E-06,1.403396E-06,1.355592E-06,1.309415E-06,
1.264812E-06,1.221728E-06,1.180111E-06,1.139912E-06,1.101082E-06,1.063576E-06,
1.027346E-06,9.923511E-07,9.585480E-07,9.258963E-07,8.943569E-07,8.638918E-07,
8.344645E-07,8.060396E-07,7.785829E-07,7.520615E-07,7.264435E-07,7.016982E-07,
6.777958E-07,6.547076E-07,6.324058E-07,6.108638E-07,5.900555E-07,5.699560E-07,
5.505412E-07,5.317878E-07,5.136731E-07,4.961755E-07,4.792740E-07,4.629482E-07,
4.471784E-07,4.319459E-07,4.172322E-07,4.030198E-07,3.892914E-07,3.760307E-07]]
try:
# internal model constants
ted_empty.num_simulation_days = 366
ted_empty.app_rate_conv1 = 11.2
ted_empty.h2o_depth_puddles = 1.3
ted_empty.soil_depth = 2.6
ted_empty.soil_porosity = 0.4339623
ted_empty.soil_bulk_density = 1.5
ted_empty.h2o_depth_soil = 0.0
ted_empty.soil_foc = 0.015
# internally specified variable
water_type = ['puddles', 'pore_water', 'puddles']
# input variables that change per simulation
ted_empty.aerobic_soil_meta_hlife = pd.Series([15., 10., 20.], dtype='float')
ted_empty.koc = pd.Series([1500., 1000., 2000.], dtype='float')
ted_empty.app_rate_min = pd.Series([0.18, 0.5, 1.25]) # lbs a.i./acre
# application scenarios generated from 'daily_app_flag' tests and reused here
daily_flag = pd.Series([[True, False, False, True, False, False, True, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False],
[True, False, False, False, False, False, False, True, False, False,
False, False, False, False, True, False, False, False, False, False,
False, True, False, False, False, False, False, False, True, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False],
[True, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False]], dtype='bool')
for i in range(3):
result[i] = ted_empty.daily_soil_h2o_timeseries(i, ted_empty.app_rate_min[i], daily_flag[i], water_type[i])
npt.assert_allclose(result[i],expected_results[i],rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
for i in range(3):
tab = [result[i], expected_results[i]]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_daily_plant_dew_timeseries(self):
"""
:description generates annual timeseries of daily pesticide concentrations in dew that resides on broad leaf plants
:param i; simulation number/index
:param blp_conc; daily values of pesticide concentration in broad leaf plant dew
:Notes # calculations are performed daily from day of first application (assumed day 0) through the last day of a year
# note: day numbers are synchronized with 0-based array indexing; thus the year does not have a calendar specific
# assoication, rather it is one year from the day of 1st pesticide application
#this represents Eq 11 of Attachment 1-7 of 'Biological Evaluation Chapters for Diazinon ESA Assessment'
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
ted_empty = self.create_ted_object()
expected_results = pd.Series([], dtype='float')
result = pd.Series([[]], dtype='float')
expected_results = [[6.201749E+00,6.080137E+00,5.960909E+00,5.844019E+00,5.729422E+00,5.617071E+00,
5.506924E+00,1.160069E+01,1.137320E+01,1.115018E+01,1.093153E+01,1.071717E+01,
1.050702E+01,1.030098E+01,1.630073E+01,1.598109E+01,1.566771E+01,1.536047E+01,
1.505926E+01,1.476396E+01,1.447445E+01,2.039236E+01,1.999248E+01,1.960044E+01,
1.921609E+01,1.883927E+01,1.846984E+01,1.810766E+01,2.395433E+01,2.348460E+01,
2.302408E+01,2.257259E+01,2.212996E+01,2.169600E+01,2.127056E+01,2.085346E+01,
2.044453E+01,2.004363E+01,1.965059E+01,1.926525E+01,1.888747E+01,1.851710E+01,
1.815399E+01,1.779800E+01,1.744899E+01,1.710683E+01,1.677137E+01,1.644250E+01,
1.612007E+01,1.580396E+01,1.549406E+01,1.519023E+01,1.489236E+01,1.460033E+01,
1.431403E+01,1.403334E+01,1.375815E+01,1.348836E+01,1.322386E+01,1.296455E+01,
1.271032E+01,1.246108E+01,1.221673E+01,1.197717E+01,1.174230E+01,1.151204E+01,
1.128630E+01,1.106498E+01,1.084800E+01,1.063528E+01,1.042673E+01,1.022227E+01,
1.002181E+01,9.825293E+00,9.632625E+00,9.443735E+00,9.258549E+00,9.076994E+00,
8.899000E+00,8.724496E+00,8.553414E+00,8.385687E+00,8.221249E+00,8.060035E+00,
7.901982E+00,7.747029E+00,7.595115E+00,7.446179E+00,7.300164E+00,7.157013E+00,
7.016668E+00,6.879075E+00,6.744181E+00,6.611932E+00,6.482276E+00,6.355162E+00,
6.230541E+00,6.108364E+00,5.988583E+00,5.871150E+00,5.756021E+00,5.643149E+00,
5.532490E+00,5.424001E+00,5.317640E+00,5.213364E+00,5.111133E+00,5.010907E+00,
4.912646E+00,4.816312E+00,4.721867E+00,4.629274E+00,4.538497E+00,4.449500E+00,
4.362248E+00,4.276707E+00,4.192843E+00,4.110624E+00,4.030017E+00,3.950991E+00,
3.873515E+00,3.797557E+00,3.723090E+00,3.650082E+00,3.578506E+00,3.508334E+00,
3.439538E+00,3.372090E+00,3.305966E+00,3.241138E+00,3.177581E+00,3.115271E+00,
3.054182E+00,2.994291E+00,2.935575E+00,2.878010E+00,2.821574E+00,2.766245E+00,
2.712001E+00,2.658820E+00,2.606682E+00,2.555567E+00,2.505454E+00,2.456323E+00,
2.408156E+00,2.360934E+00,2.314637E+00,2.269249E+00,2.224750E+00,2.181124E+00,
2.138353E+00,2.096422E+00,2.055312E+00,2.015009E+00,1.975496E+00,1.936757E+00,
1.898779E+00,1.861545E+00,1.825041E+00,1.789253E+00,1.754167E+00,1.719769E+00,
1.686045E+00,1.652983E+00,1.620569E+00,1.588791E+00,1.557635E+00,1.527091E+00,
1.497146E+00,1.467788E+00,1.439005E+00,1.410787E+00,1.383122E+00,1.356000E+00,
1.329410E+00,1.303341E+00,1.277783E+00,1.252727E+00,1.228162E+00,1.204078E+00,
1.180467E+00,1.157319E+00,1.134624E+00,1.112375E+00,1.090562E+00,1.069177E+00,
1.048211E+00,1.027656E+00,1.007504E+00,9.877478E-01,9.683787E-01,9.493894E-01,
9.307724E-01,9.125205E-01,8.946266E-01,8.770835E-01,8.598844E-01,8.430226E-01,
8.264914E-01,8.102845E-01,7.943953E-01,7.788177E-01,7.635455E-01,7.485729E-01,
7.338938E-01,7.195026E-01,7.053936E-01,6.915612E-01,6.780002E-01,6.647050E-01,
6.516705E-01,6.388917E-01,6.263634E-01,6.140808E-01,6.020390E-01,5.902334E-01,
5.786593E-01,5.673121E-01,5.561875E-01,5.452810E-01,5.345884E-01,5.241054E-01,
5.138280E-01,5.037522E-01,4.938739E-01,4.841893E-01,4.746947E-01,4.653862E-01,
4.562603E-01,4.473133E-01,4.385417E-01,4.299422E-01,4.215113E-01,4.132457E-01,
4.051422E-01,3.971976E-01,3.894088E-01,3.817728E-01,3.742864E-01,3.669469E-01,
3.597513E-01,3.526968E-01,3.457806E-01,3.390001E-01,3.323525E-01,3.258353E-01,
3.194458E-01,3.131817E-01,3.070404E-01,3.010195E-01,2.951167E-01,2.893296E-01,
2.836561E-01,2.780937E-01,2.726405E-01,2.672942E-01,2.620527E-01,2.569140E-01,
2.518761E-01,2.469370E-01,2.420947E-01,2.373473E-01,2.326931E-01,2.281301E-01,
2.236566E-01,2.192709E-01,2.149711E-01,2.107557E-01,2.066229E-01,2.025711E-01,
1.985988E-01,1.947044E-01,1.908864E-01,1.871432E-01,1.834735E-01,1.798756E-01,
1.763484E-01,1.728903E-01,1.695000E-01,1.661762E-01,1.629176E-01,1.597229E-01,
1.565908E-01,1.535202E-01,1.505098E-01,1.475584E-01,1.446648E-01,1.418280E-01,
1.390469E-01,1.363202E-01,1.336471E-01,1.310264E-01,1.284570E-01,1.259380E-01,
1.234685E-01,1.210473E-01,1.186737E-01,1.163466E-01,1.140651E-01,1.118283E-01,
1.096354E-01,1.074856E-01,1.053778E-01,1.033114E-01,1.012856E-01,9.929941E-02,
9.735221E-02,9.544319E-02,9.357161E-02,9.173673E-02,8.993782E-02,8.817420E-02,
8.644516E-02,8.475002E-02,8.308812E-02,8.145882E-02,7.986146E-02,7.829542E-02,
7.676010E-02,7.525488E-02,7.377918E-02,7.233241E-02,7.091402E-02,6.952344E-02,
6.816012E-02,6.682355E-02,6.551318E-02,6.422850E-02,6.296902E-02,6.173424E-02,
6.052367E-02,5.933684E-02,5.817328E-02,5.703253E-02,5.591416E-02,5.481772E-02,
5.374278E-02,5.268891E-02,5.165572E-02,5.064278E-02,4.964970E-02,4.867610E-02,
4.772160E-02,4.678580E-02,4.586836E-02,4.496891E-02,4.408710E-02,4.322258E-02,
4.237501E-02,4.154406E-02,4.072941E-02,3.993073E-02,3.914771E-02,3.838005E-02,
3.762744E-02,3.688959E-02,3.616621E-02,3.545701E-02,3.476172E-02,3.408006E-02,
3.341177E-02,3.275659E-02,3.211425E-02,3.148451E-02,3.086712E-02,3.026183E-02],
[3.487500E-01,3.419112E-01,3.352066E-01,3.286334E-01,3.221891E-01,3.158711E-01,
3.096771E-01,6.523545E-01,6.395622E-01,6.270208E-01,6.147253E-01,6.026709E-01,
5.908529E-01,5.792667E-01,9.166576E-01,8.986825E-01,8.810599E-01,8.637828E-01,
8.468446E-01,8.302385E-01,8.139580E-01,1.000000E+00,1.000000E+00,1.000000E+00,
1.000000E+00,1.000000E+00,1.000000E+00,1.000000E+00,1.000000E+00,1.000000E+00,
1.000000E+00,1.000000E+00,1.000000E+00,1.000000E+00,1.000000E+00,1.000000E+00,
1.000000E+00,1.000000E+00,1.000000E+00,1.000000E+00,1.000000E+00,1.000000E+00,
1.000000E+00,1.000000E+00,9.812289E-01,9.619876E-01,9.431236E-01,9.246296E-01,
9.064981E-01,8.887223E-01,8.712950E-01,8.542094E-01,8.374589E-01,8.210368E-01,
8.049368E-01,7.891525E-01,7.736777E-01,7.585063E-01,7.436325E-01,7.290503E-01,
7.147541E-01,7.007382E-01,6.869971E-01,6.735255E-01,6.603181E-01,6.473697E-01,
6.346751E-01,6.222295E-01,6.100280E-01,5.980657E-01,5.863380E-01,5.748403E-01,
5.635680E-01,5.525168E-01,5.416823E-01,5.310602E-01,5.206465E-01,5.104369E-01,
5.004275E-01,4.906145E-01,4.809938E-01,4.715618E-01,4.623148E-01,4.532491E-01,
4.443611E-01,4.356475E-01,4.271047E-01,4.187294E-01,4.105184E-01,4.024684E-01,
3.945762E-01,3.868388E-01,3.792532E-01,3.718162E-01,3.645251E-01,3.573770E-01,
3.503691E-01,3.434986E-01,3.367628E-01,3.301591E-01,3.236848E-01,3.173376E-01,
3.111148E-01,3.050140E-01,2.990329E-01,2.931690E-01,2.874201E-01,2.817840E-01,
2.762584E-01,2.708411E-01,2.655301E-01,2.603232E-01,2.552184E-01,2.502138E-01,
2.453072E-01,2.404969E-01,2.357809E-01,2.311574E-01,2.266245E-01,2.221806E-01,
2.178237E-01,2.135523E-01,2.093647E-01,2.052592E-01,2.012342E-01,1.972881E-01,
1.934194E-01,1.896266E-01,1.859081E-01,1.822626E-01,1.786885E-01,1.751845E-01,
1.717493E-01,1.683814E-01,1.650795E-01,1.618424E-01,1.586688E-01,1.555574E-01,
1.525070E-01,1.495164E-01,1.465845E-01,1.437101E-01,1.408920E-01,1.381292E-01,
1.354206E-01,1.327651E-01,1.301616E-01,1.276092E-01,1.251069E-01,1.226536E-01,
1.202485E-01,1.178905E-01,1.155787E-01,1.133123E-01,1.110903E-01,1.089119E-01,
1.067762E-01,1.046824E-01,1.026296E-01,1.006171E-01,9.864406E-02,9.670971E-02,
9.481329E-02,9.295406E-02,9.113129E-02,8.934426E-02,8.759227E-02,8.587464E-02,
8.419069E-02,8.253976E-02,8.092121E-02,7.933439E-02,7.777869E-02,7.625350E-02,
7.475822E-02,7.329225E-02,7.185504E-02,7.044600E-02,6.906460E-02,6.771029E-02,
6.638253E-02,6.508081E-02,6.380461E-02,6.255344E-02,6.132681E-02,6.012423E-02,
5.894523E-02,5.778935E-02,5.665613E-02,5.554514E-02,5.445593E-02,5.338809E-02,
5.234118E-02,5.131480E-02,5.030855E-02,4.932203E-02,4.835485E-02,4.740664E-02,
4.647703E-02,4.556564E-02,4.467213E-02,4.379614E-02,4.293732E-02,4.209535E-02,
4.126988E-02,4.046060E-02,3.966720E-02,3.888935E-02,3.812675E-02,3.737911E-02,
3.664613E-02,3.592752E-02,3.522300E-02,3.453230E-02,3.385514E-02,3.319126E-02,
3.254040E-02,3.190231E-02,3.127672E-02,3.066340E-02,3.006211E-02,2.947261E-02,
2.889467E-02,2.832807E-02,2.777257E-02,2.722797E-02,2.669404E-02,2.617059E-02,
2.565740E-02,2.515427E-02,2.466101E-02,2.417743E-02,2.370332E-02,2.323851E-02,
2.278282E-02,2.233606E-02,2.189807E-02,2.146866E-02,2.104767E-02,2.063494E-02,
2.023030E-02,1.983360E-02,1.944467E-02,1.906338E-02,1.868955E-02,1.832306E-02,
1.796376E-02,1.761150E-02,1.726615E-02,1.692757E-02,1.659563E-02,1.627020E-02,
1.595115E-02,1.563836E-02,1.533170E-02,1.503106E-02,1.473631E-02,1.444734E-02,
1.416403E-02,1.388629E-02,1.361398E-02,1.334702E-02,1.308529E-02,1.282870E-02,
1.257714E-02,1.233051E-02,1.208871E-02,1.185166E-02,1.161926E-02,1.139141E-02,
1.116803E-02,1.094903E-02,1.073433E-02,1.052384E-02,1.031747E-02,1.011515E-02,
9.916799E-03,9.722337E-03,9.531688E-03,9.344777E-03,9.161532E-03,8.981880E-03,
8.805750E-03,8.633075E-03,8.463786E-03,8.297816E-03,8.135101E-03,7.975577E-03,
7.819180E-03,7.665851E-03,7.515528E-03,7.368153E-03,7.223668E-03,7.082017E-03,
6.943143E-03,6.806992E-03,6.673511E-03,6.542647E-03,6.414350E-03,6.288569E-03,
6.165254E-03,6.044357E-03,5.925831E-03,5.809629E-03,5.695705E-03,5.584016E-03,
5.474517E-03,5.367165E-03,5.261918E-03,5.158735E-03,5.057576E-03,4.958400E-03,
4.861168E-03,4.765844E-03,4.672389E-03,4.580766E-03,4.490940E-03,4.402875E-03,
4.316538E-03,4.231893E-03,4.148908E-03,4.067550E-03,3.987788E-03,3.909590E-03,
3.832926E-03,3.757764E-03,3.684077E-03,3.611834E-03,3.541008E-03,3.471571E-03,
3.403496E-03,3.336755E-03,3.271324E-03,3.207175E-03,3.144284E-03,3.082627E-03,
3.022178E-03,2.962915E-03,2.904814E-03,2.847853E-03,2.792008E-03,2.737258E-03,
2.683583E-03,2.630959E-03,2.579368E-03,2.528788E-03,2.479200E-03,2.430584E-03,
2.382922E-03,2.336194E-03,2.290383E-03,2.245470E-03,2.201438E-03,2.158269E-03,
2.115946E-03,2.074454E-03,2.033775E-03,1.993894E-03,1.954795E-03,1.916463E-03,
1.878882E-03,1.842038E-03,1.805917E-03,1.770504E-03,1.735786E-03,1.701748E-03],
[8.718750E-02,8.547781E-02,8.380164E-02,8.215834E-02,8.054726E-02,7.896778E-02,
7.741927E-02,1.630886E-01,1.598906E-01,1.567552E-01,1.536813E-01,1.506677E-01,
1.477132E-01,1.448167E-01,2.291644E-01,2.246706E-01,2.202650E-01,2.159457E-01,
2.117111E-01,2.075596E-01,2.034895E-01,2.866867E-01,2.810649E-01,2.755534E-01,
2.701500E-01,2.648525E-01,2.596589E-01,2.545672E-01,3.367628E-01,3.301591E-01,
3.236848E-01,3.173376E-01,3.111148E-01,3.050140E-01,2.990329E-01,3.803565E-01,
3.728980E-01,3.655857E-01,3.584167E-01,3.513884E-01,3.444979E-01,3.377425E-01,
4.183071E-01,4.101043E-01,4.020624E-01,3.941782E-01,3.864486E-01,3.788706E-01,
3.714412E-01,3.641575E-01,3.570166E-01,3.500157E-01,3.431521E-01,3.364231E-01,
3.298260E-01,3.233583E-01,3.170175E-01,3.108010E-01,3.047063E-01,2.987312E-01,
2.928733E-01,2.871302E-01,2.814998E-01,2.759797E-01,2.705680E-01,2.652623E-01,
2.600606E-01,2.549610E-01,2.499614E-01,2.450598E-01,2.402543E-01,2.355431E-01,
2.309242E-01,2.263959E-01,2.219565E-01,2.176040E-01,2.133369E-01,2.091535E-01,
2.050522E-01,2.010312E-01,1.970891E-01,1.932243E-01,1.894353E-01,1.857206E-01,
1.820787E-01,1.785083E-01,1.750078E-01,1.715760E-01,1.682115E-01,1.649130E-01,
1.616792E-01,1.585087E-01,1.554005E-01,1.523532E-01,1.493656E-01,1.464367E-01,
1.435651E-01,1.407499E-01,1.379899E-01,1.352840E-01,1.326311E-01,1.300303E-01,
1.274805E-01,1.249807E-01,1.225299E-01,1.201272E-01,1.177715E-01,1.154621E-01,
1.131980E-01,1.109782E-01,1.088020E-01,1.066685E-01,1.045768E-01,1.025261E-01,
1.005156E-01,9.854456E-02,9.661216E-02,9.471765E-02,9.286030E-02,9.103937E-02,
8.925414E-02,8.750392E-02,8.578802E-02,8.410577E-02,8.245651E-02,8.083959E-02,
7.925437E-02,7.770024E-02,7.617659E-02,7.468281E-02,7.321833E-02,7.178256E-02,
7.037495E-02,6.899494E-02,6.764199E-02,6.631557E-02,6.501516E-02,6.374025E-02,
6.249035E-02,6.126495E-02,6.006358E-02,5.888577E-02,5.773106E-02,5.659899E-02,
5.548911E-02,5.440101E-02,5.333424E-02,5.228838E-02,5.126304E-02,5.025780E-02,
4.927228E-02,4.830608E-02,4.735883E-02,4.643015E-02,4.551968E-02,4.462707E-02,
4.375196E-02,4.289401E-02,4.205289E-02,4.122825E-02,4.041979E-02,3.962719E-02,
3.885012E-02,3.808829E-02,3.734141E-02,3.660916E-02,3.589128E-02,3.518747E-02,
3.449747E-02,3.382099E-02,3.315779E-02,3.250758E-02,3.187013E-02,3.124517E-02,
3.063247E-02,3.003179E-02,2.944289E-02,2.886553E-02,2.829949E-02,2.774456E-02,
2.720050E-02,2.666712E-02,2.614419E-02,2.563152E-02,2.512890E-02,2.463614E-02,
2.415304E-02,2.367941E-02,2.321507E-02,2.275984E-02,2.231353E-02,2.187598E-02,
2.144701E-02,2.102644E-02,2.061413E-02,2.020990E-02,1.981359E-02,1.942506E-02,
1.904415E-02,1.867070E-02,1.830458E-02,1.794564E-02,1.759374E-02,1.724873E-02,
1.691050E-02,1.657889E-02,1.625379E-02,1.593506E-02,1.562259E-02,1.531624E-02,
1.501590E-02,1.472144E-02,1.443276E-02,1.414975E-02,1.387228E-02,1.360025E-02,
1.333356E-02,1.307210E-02,1.281576E-02,1.256445E-02,1.231807E-02,1.207652E-02,
1.183971E-02,1.160754E-02,1.137992E-02,1.115677E-02,1.093799E-02,1.072350E-02,
1.051322E-02,1.030706E-02,1.010495E-02,9.906796E-03,9.712530E-03,9.522073E-03,
9.335351E-03,9.152291E-03,8.972820E-03,8.796868E-03,8.624367E-03,8.455249E-03,
8.289446E-03,8.126895E-03,7.967532E-03,7.811293E-03,7.658119E-03,7.507948E-03,
7.360721E-03,7.216382E-03,7.074873E-03,6.936139E-03,6.800126E-03,6.666780E-03,
6.536048E-03,6.407880E-03,6.282226E-03,6.159035E-03,6.038260E-03,5.919853E-03,
5.803769E-03,5.689960E-03,5.578384E-03,5.468995E-03,5.361751E-03,5.256611E-03,
5.153532E-03,5.052474E-03,4.953398E-03,4.856265E-03,4.761037E-03,4.667676E-03,
4.576145E-03,4.486410E-03,4.398434E-03,4.312184E-03,4.227624E-03,4.144723E-03,
4.063448E-03,3.983766E-03,3.905647E-03,3.829059E-03,3.753974E-03,3.680361E-03,
3.608191E-03,3.537437E-03,3.468070E-03,3.400063E-03,3.333390E-03,3.268024E-03,
3.203940E-03,3.141113E-03,3.079517E-03,3.019130E-03,2.959927E-03,2.901884E-03,
2.844980E-03,2.789192E-03,2.734497E-03,2.680876E-03,2.628305E-03,2.576766E-03,
2.526237E-03,2.476699E-03,2.428133E-03,2.380518E-03,2.333838E-03,2.288073E-03,
2.243205E-03,2.199217E-03,2.156092E-03,2.113812E-03,2.072362E-03,2.031724E-03,
1.991883E-03,1.952823E-03,1.914530E-03,1.876987E-03,1.840180E-03,1.804096E-03,
1.768718E-03,1.734035E-03,1.700031E-03,1.666695E-03,1.634012E-03,1.601970E-03,
1.570556E-03,1.539759E-03,1.509565E-03,1.479963E-03,1.450942E-03,1.422490E-03,
1.394596E-03,1.367249E-03,1.340438E-03,1.314153E-03,1.288383E-03,1.263119E-03,
1.238350E-03,1.214066E-03,1.190259E-03,1.166919E-03,1.144036E-03,1.121602E-03,
1.099609E-03,1.078046E-03,1.056906E-03,1.036181E-03,1.015862E-03,9.959415E-04,
9.764117E-04,9.572648E-04,9.384935E-04,9.200902E-04,9.020478E-04,8.843592E-04,
8.670174E-04,8.500157E-04,8.333474E-04,8.170060E-04,8.009850E-04,7.852782E-04,
7.698794E-04,7.547825E-04,7.399817E-04,7.254711E-04,7.112450E-04,6.972980E-04]]
try:
# internal model constants
ted_empty.num_simulation_days = 366
ted_empty.frac_pest_on_surface = 0.62
ted_empty.density_h2o = 1.0
ted_empty.mass_wax = 0.012
# input variables that change per simulation
ted_empty.solubility = pd.Series([145., 1., 20.], dtype='float')
ted_empty.log_kow = pd.Series([2.75, 4., 5.], dtype='float')
# internally calculated variables
blp_conc = pd.Series([[6.750000E+01,6.617637E+01,6.487869E+01,6.360646E+01,6.235917E+01,6.113635E+01,5.993750E+01,
1.262622E+02,1.237862E+02,1.213589E+02,1.189791E+02,1.166460E+02,1.143586E+02,1.121161E+02,
1.774176E+02,1.739385E+02,1.705277E+02,1.671838E+02,1.639054E+02,1.606913E+02,1.575403E+02,
2.219510E+02,2.175987E+02,2.133317E+02,2.091484E+02,2.050471E+02,2.010263E+02,1.970843E+02,
2.607196E+02,2.556070E+02,2.505947E+02,2.456807E+02,2.408631E+02,2.361399E+02,2.315093E+02,
2.269696E+02,2.225188E+02,2.181554E+02,2.138775E+02,2.096835E+02,2.055717E+02,2.015406E+02,
1.975885E+02,1.937139E+02,1.899153E+02,1.861912E+02,1.825401E+02,1.789606E+02,1.754513E+02,
1.720108E+02,1.686377E+02,1.653309E+02,1.620888E+02,1.589104E+02,1.557942E+02,1.527392E+02,
1.497441E+02,1.468077E+02,1.439289E+02,1.411065E+02,1.383395E+02,1.356267E+02,1.329672E+02,
1.303598E+02,1.278035E+02,1.252974E+02,1.228404E+02,1.204315E+02,1.180699E+02,1.157547E+02,
1.134848E+02,1.112594E+02,1.090777E+02,1.069387E+02,1.048417E+02,1.027859E+02,1.007703E+02,
9.879424E+01,9.685694E+01,9.495764E+01,9.309558E+01,9.127003E+01,8.948028E+01,8.772563E+01,
8.600538E+01,8.431887E+01,8.266543E+01,8.104441E+01,7.945518E+01,7.789711E+01,7.636959E+01,
7.487203E+01,7.340384E+01,7.196443E+01,7.055325E+01,6.916975E+01,6.781337E+01,6.648359E+01,
6.517989E+01,6.390175E+01,6.264868E+01,6.142018E+01,6.021576E+01,5.903497E+01,5.787733E+01,
5.674239E+01,5.562971E+01,5.453884E+01,5.346937E+01,5.242087E+01,5.139293E+01,5.038514E+01,
4.939712E+01,4.842847E+01,4.747882E+01,4.654779E+01,4.563501E+01,4.474014E+01,4.386281E+01,
4.300269E+01,4.215943E+01,4.133271E+01,4.052220E+01,3.972759E+01,3.894855E+01,3.818480E+01,
3.743602E+01,3.670192E+01,3.598222E+01,3.527663E+01,3.458487E+01,3.390669E+01,3.324180E+01,
3.258994E+01,3.195088E+01,3.132434E+01,3.071009E+01,3.010788E+01,2.951748E+01,2.893866E+01,
2.837119E+01,2.781485E+01,2.726942E+01,2.673468E+01,2.621043E+01,2.569646E+01,2.519257E+01,
2.469856E+01,2.421424E+01,2.373941E+01,2.327389E+01,2.281751E+01,2.237007E+01,2.193141E+01,
2.150135E+01,2.107972E+01,2.066636E+01,2.026110E+01,1.986379E+01,1.947428E+01,1.909240E+01,
1.871801E+01,1.835096E+01,1.799111E+01,1.763831E+01,1.729244E+01,1.695334E+01,1.662090E+01,
1.629497E+01,1.597544E+01,1.566217E+01,1.535504E+01,1.505394E+01,1.475874E+01,1.446933E+01,
1.418560E+01,1.390743E+01,1.363471E+01,1.336734E+01,1.310522E+01,1.284823E+01,1.259629E+01,
1.234928E+01,1.210712E+01,1.186970E+01,1.163695E+01,1.140875E+01,1.118503E+01,1.096570E+01,
1.075067E+01,1.053986E+01,1.033318E+01,1.013055E+01,9.931897E+00,9.737138E+00,9.546199E+00,
9.359004E+00,9.175480E+00,8.995554E+00,8.819157E+00,8.646218E+00,8.476671E+00,8.310449E+00,
8.147486E+00,7.987719E+00,7.831085E+00,7.677522E+00,7.526970E+00,7.379371E+00,7.234666E+00,
7.092799E+00,6.953713E+00,6.817355E+00,6.683671E+00,6.552608E+00,6.424116E+00,6.298143E+00,
6.174640E+00,6.053559E+00,5.934852E+00,5.818474E+00,5.704377E+00,5.592517E+00,5.482852E+00,
5.375336E+00,5.269929E+00,5.166589E+00,5.065275E+00,4.965948E+00,4.868569E+00,4.773100E+00,
4.679502E+00,4.587740E+00,4.497777E+00,4.409578E+00,4.323109E+00,4.238336E+00,4.155225E+00,
4.073743E+00,3.993859E+00,3.915542E+00,3.838761E+00,3.763485E+00,3.689686E+00,3.617333E+00,
3.546399E+00,3.476857E+00,3.408678E+00,3.341835E+00,3.276304E+00,3.212058E+00,3.149071E+00,
3.087320E+00,3.026779E+00,2.967426E+00,2.909237E+00,2.852188E+00,2.796259E+00,2.741426E+00,
2.687668E+00,2.634965E+00,2.583295E+00,2.532638E+00,2.482974E+00,2.434285E+00,2.386550E+00,
2.339751E+00,2.293870E+00,2.248889E+00,2.204789E+00,2.161555E+00,2.119168E+00,2.077612E+00,
2.036872E+00,1.996930E+00,1.957771E+00,1.919380E+00,1.881743E+00,1.844843E+00,1.808667E+00,
1.773200E+00,1.738428E+00,1.704339E+00,1.670918E+00,1.638152E+00,1.606029E+00,1.574536E+00,
1.543660E+00,1.513390E+00,1.483713E+00,1.454618E+00,1.426094E+00,1.398129E+00,1.370713E+00,
1.343834E+00,1.317482E+00,1.291647E+00,1.266319E+00,1.241487E+00,1.217142E+00,1.193275E+00,
1.169876E+00,1.146935E+00,1.124444E+00,1.102395E+00,1.080777E+00,1.059584E+00,1.038806E+00,
1.018436E+00,9.984649E-01,9.788856E-01,9.596902E-01,9.408713E-01,9.224214E-01,9.043333E-01,
8.865998E-01,8.692142E-01,8.521694E-01,8.354589E-01,8.190760E-01,8.030145E-01,7.872678E-01,
7.718300E-01,7.566949E-01,7.418565E-01,7.273092E-01,7.130471E-01,6.990647E-01,6.853565E-01,
6.719170E-01,6.587411E-01,6.458236E-01,6.331594E-01,6.207436E-01,6.085712E-01,5.966374E-01,
5.849378E-01,5.734675E-01,5.622221E-01,5.511973E-01,5.403887E-01,5.297920E-01,5.194031E-01,
5.092179E-01,4.992324E-01,4.894428E-01,4.798451E-01,4.704356E-01,4.612107E-01,4.521666E-01,
4.432999E-01,4.346071E-01,4.260847E-01,4.177294E-01,4.095380E-01,4.015072E-01,3.936339E-01,
3.859150E-01,3.783474E-01,3.709283E-01,3.636546E-01,3.565236E-01,3.495323E-01,3.426782E-01,
3.359585E-01,3.293706E-01],
[6.750000E+01,6.617637E+01,6.487869E+01,6.360646E+01,6.235917E+01,6.113635E+01,5.993750E+01,
1.262622E+02,1.237862E+02,1.213589E+02,1.189791E+02,1.166460E+02,1.143586E+02,1.121161E+02,
1.774176E+02,1.739385E+02,1.705277E+02,1.671838E+02,1.639054E+02,1.606913E+02,1.575403E+02,
2.219510E+02,2.175987E+02,2.133317E+02,2.091484E+02,2.050471E+02,2.010263E+02,1.970843E+02,
2.607196E+02,2.556070E+02,2.505947E+02,2.456807E+02,2.408631E+02,2.361399E+02,2.315093E+02,
2.269696E+02,2.225188E+02,2.181554E+02,2.138775E+02,2.096835E+02,2.055717E+02,2.015406E+02,
1.975885E+02,1.937139E+02,1.899153E+02,1.861912E+02,1.825401E+02,1.789606E+02,1.754513E+02,
1.720108E+02,1.686377E+02,1.653309E+02,1.620888E+02,1.589104E+02,1.557942E+02,1.527392E+02,
1.497441E+02,1.468077E+02,1.439289E+02,1.411065E+02,1.383395E+02,1.356267E+02,1.329672E+02,
1.303598E+02,1.278035E+02,1.252974E+02,1.228404E+02,1.204315E+02,1.180699E+02,1.157547E+02,
1.134848E+02,1.112594E+02,1.090777E+02,1.069387E+02,1.048417E+02,1.027859E+02,1.007703E+02,
9.879424E+01,9.685694E+01,9.495764E+01,9.309558E+01,9.127003E+01,8.948028E+01,8.772563E+01,
8.600538E+01,8.431887E+01,8.266543E+01,8.104441E+01,7.945518E+01,7.789711E+01,7.636959E+01,
7.487203E+01,7.340384E+01,7.196443E+01,7.055325E+01,6.916975E+01,6.781337E+01,6.648359E+01,
6.517989E+01,6.390175E+01,6.264868E+01,6.142018E+01,6.021576E+01,5.903497E+01,5.787733E+01,
5.674239E+01,5.562971E+01,5.453884E+01,5.346937E+01,5.242087E+01,5.139293E+01,5.038514E+01,
4.939712E+01,4.842847E+01,4.747882E+01,4.654779E+01,4.563501E+01,4.474014E+01,4.386281E+01,
4.300269E+01,4.215943E+01,4.133271E+01,4.052220E+01,3.972759E+01,3.894855E+01,3.818480E+01,
3.743602E+01,3.670192E+01,3.598222E+01,3.527663E+01,3.458487E+01,3.390669E+01,3.324180E+01,
3.258994E+01,3.195088E+01,3.132434E+01,3.071009E+01,3.010788E+01,2.951748E+01,2.893866E+01,
2.837119E+01,2.781485E+01,2.726942E+01,2.673468E+01,2.621043E+01,2.569646E+01,2.519257E+01,
2.469856E+01,2.421424E+01,2.373941E+01,2.327389E+01,2.281751E+01,2.237007E+01,2.193141E+01,
2.150135E+01,2.107972E+01,2.066636E+01,2.026110E+01,1.986379E+01,1.947428E+01,1.909240E+01,
1.871801E+01,1.835096E+01,1.799111E+01,1.763831E+01,1.729244E+01,1.695334E+01,1.662090E+01,
1.629497E+01,1.597544E+01,1.566217E+01,1.535504E+01,1.505394E+01,1.475874E+01,1.446933E+01,
1.418560E+01,1.390743E+01,1.363471E+01,1.336734E+01,1.310522E+01,1.284823E+01,1.259629E+01,
1.234928E+01,1.210712E+01,1.186970E+01,1.163695E+01,1.140875E+01,1.118503E+01,1.096570E+01,
1.075067E+01,1.053986E+01,1.033318E+01,1.013055E+01,9.931897E+00,9.737138E+00,9.546199E+00,
9.359004E+00,9.175480E+00,8.995554E+00,8.819157E+00,8.646218E+00,8.476671E+00,8.310449E+00,
8.147486E+00,7.987719E+00,7.831085E+00,7.677522E+00,7.526970E+00,7.379371E+00,7.234666E+00,
7.092799E+00,6.953713E+00,6.817355E+00,6.683671E+00,6.552608E+00,6.424116E+00,6.298143E+00,
6.174640E+00,6.053559E+00,5.934852E+00,5.818474E+00,5.704377E+00,5.592517E+00,5.482852E+00,
5.375336E+00,5.269929E+00,5.166589E+00,5.065275E+00,4.965948E+00,4.868569E+00,4.773100E+00,
4.679502E+00,4.587740E+00,4.497777E+00,4.409578E+00,4.323109E+00,4.238336E+00,4.155225E+00,
4.073743E+00,3.993859E+00,3.915542E+00,3.838761E+00,3.763485E+00,3.689686E+00,3.617333E+00,
3.546399E+00,3.476857E+00,3.408678E+00,3.341835E+00,3.276304E+00,3.212058E+00,3.149071E+00,
3.087320E+00,3.026779E+00,2.967426E+00,2.909237E+00,2.852188E+00,2.796259E+00,2.741426E+00,
2.687668E+00,2.634965E+00,2.583295E+00,2.532638E+00,2.482974E+00,2.434285E+00,2.386550E+00,
2.339751E+00,2.293870E+00,2.248889E+00,2.204789E+00,2.161555E+00,2.119168E+00,2.077612E+00,
2.036872E+00,1.996930E+00,1.957771E+00,1.919380E+00,1.881743E+00,1.844843E+00,1.808667E+00,
1.773200E+00,1.738428E+00,1.704339E+00,1.670918E+00,1.638152E+00,1.606029E+00,1.574536E+00,
1.543660E+00,1.513390E+00,1.483713E+00,1.454618E+00,1.426094E+00,1.398129E+00,1.370713E+00,
1.343834E+00,1.317482E+00,1.291647E+00,1.266319E+00,1.241487E+00,1.217142E+00,1.193275E+00,
1.169876E+00,1.146935E+00,1.124444E+00,1.102395E+00,1.080777E+00,1.059584E+00,1.038806E+00,
1.018436E+00,9.984649E-01,9.788856E-01,9.596902E-01,9.408713E-01,9.224214E-01,9.043333E-01,
8.865998E-01,8.692142E-01,8.521694E-01,8.354589E-01,8.190760E-01,8.030145E-01,7.872678E-01,
7.718300E-01,7.566949E-01,7.418565E-01,7.273092E-01,7.130471E-01,6.990647E-01,6.853565E-01,
6.719170E-01,6.587411E-01,6.458236E-01,6.331594E-01,6.207436E-01,6.085712E-01,5.966374E-01,
5.849378E-01,5.734675E-01,5.622221E-01,5.511973E-01,5.403887E-01,5.297920E-01,5.194031E-01,
5.092179E-01,4.992324E-01,4.894428E-01,4.798451E-01,4.704356E-01,4.612107E-01,4.521666E-01,
4.432999E-01,4.346071E-01,4.260847E-01,4.177294E-01,4.095380E-01,4.015072E-01,3.936339E-01,
3.859150E-01,3.783474E-01,3.709283E-01,3.636546E-01,3.565236E-01,3.495323E-01,3.426782E-01,
3.359585E-01,3.293706E-01],
[1.687500E+02,1.654409E+02,1.621967E+02,1.590161E+02,1.558979E+02,1.528409E+02,1.498438E+02,
3.156554E+02,3.094656E+02,3.033972E+02,2.974477E+02,2.916150E+02,2.858966E+02,2.802903E+02,
4.435440E+02,4.348464E+02,4.263193E+02,4.179594E+02,4.097635E+02,4.017283E+02,3.938506E+02,
5.548775E+02,5.439967E+02,5.333292E+02,5.228710E+02,5.126178E+02,5.025657E+02,4.927107E+02,
6.517989E+02,6.390175E+02,6.264868E+02,6.142018E+02,6.021576E+02,5.903497E+02,5.787733E+02,
7.361739E+02,7.217380E+02,7.075851E+02,6.937098E+02,6.801066E+02,6.667701E+02,6.536952E+02,
8.096266E+02,7.937503E+02,7.781854E+02,7.629256E+02,7.479651E+02,7.332980E+02,7.189184E+02,
7.048209E+02,6.909998E+02,6.774497E+02,6.641653E+02,6.511414E+02,6.383730E+02,6.258549E+02,
6.135822E+02,6.015503E+02,5.897542E+02,5.781895E+02,5.668516E+02,5.557359E+02,5.448383E+02,
5.341544E+02,5.236799E+02,5.134109E+02,5.033432E+02,4.934729E+02,4.837962E+02,4.743093E+02,
4.650084E+02,4.558898E+02,4.469501E+02,4.381857E+02,4.295931E+02,4.211691E+02,4.129102E+02,
4.048133E+02,3.968752E+02,3.890927E+02,3.814628E+02,3.739826E+02,3.666490E+02,3.594592E+02,
3.524104E+02,3.454999E+02,3.387249E+02,3.320827E+02,3.255707E+02,3.191865E+02,3.129274E+02,
3.067911E+02,3.007751E+02,2.948771E+02,2.890947E+02,2.834258E+02,2.778680E+02,2.724191E+02,
2.670772E+02,2.618400E+02,2.567054E+02,2.516716E+02,2.467365E+02,2.418981E+02,2.371546E+02,
2.325042E+02,2.279449E+02,2.234751E+02,2.190929E+02,2.147966E+02,2.105845E+02,2.064551E+02,
2.024067E+02,1.984376E+02,1.945463E+02,1.907314E+02,1.869913E+02,1.833245E+02,1.797296E+02,
1.762052E+02,1.727499E+02,1.693624E+02,1.660413E+02,1.627854E+02,1.595932E+02,1.564637E+02,
1.533956E+02,1.503876E+02,1.474386E+02,1.445474E+02,1.417129E+02,1.389340E+02,1.362096E+02,
1.335386E+02,1.309200E+02,1.283527E+02,1.258358E+02,1.233682E+02,1.209491E+02,1.185773E+02,
1.162521E+02,1.139725E+02,1.117375E+02,1.095464E+02,1.073983E+02,1.052923E+02,1.032276E+02,
1.012033E+02,9.921879E+01,9.727317E+01,9.536570E+01,9.349564E+01,9.166225E+01,8.986481E+01,
8.810261E+01,8.637497E+01,8.468121E+01,8.302067E+01,8.139268E+01,7.979662E+01,7.823186E+01,
7.669778E+01,7.519378E+01,7.371928E+01,7.227369E+01,7.085644E+01,6.946699E+01,6.810479E+01,
6.676929E+01,6.545999E+01,6.417636E+01,6.291790E+01,6.168412E+01,6.047453E+01,5.928866E+01,
5.812605E+01,5.698623E+01,5.586876E+01,5.477321E+01,5.369914E+01,5.264614E+01,5.161378E+01,
5.060166E+01,4.960939E+01,4.863658E+01,4.768285E+01,4.674782E+01,4.583112E+01,4.493240E+01,
4.405131E+01,4.318749E+01,4.234061E+01,4.151033E+01,4.069634E+01,3.989831E+01,3.911593E+01,
3.834889E+01,3.759689E+01,3.685964E+01,3.613684E+01,3.542822E+01,3.473350E+01,3.405239E+01,
3.338465E+01,3.272999E+01,3.208818E+01,3.145895E+01,3.084206E+01,3.023726E+01,2.964433E+01,
2.906302E+01,2.849312E+01,2.793438E+01,2.738661E+01,2.684957E+01,2.632307E+01,2.580689E+01,
2.530083E+01,2.480470E+01,2.431829E+01,2.384143E+01,2.337391E+01,2.291556E+01,2.246620E+01,
2.202565E+01,2.159374E+01,2.117030E+01,2.075517E+01,2.034817E+01,1.994916E+01,1.955796E+01,
1.917444E+01,1.879845E+01,1.842982E+01,1.806842E+01,1.771411E+01,1.736675E+01,1.702620E+01,
1.669232E+01,1.636500E+01,1.604409E+01,1.572947E+01,1.542103E+01,1.511863E+01,1.482217E+01,
1.453151E+01,1.424656E+01,1.396719E+01,1.369330E+01,1.342479E+01,1.316153E+01,1.290344E+01,
1.265042E+01,1.240235E+01,1.215915E+01,1.192071E+01,1.168695E+01,1.145778E+01,1.123310E+01,
1.101283E+01,1.079687E+01,1.058515E+01,1.037758E+01,1.017409E+01,9.974578E+00,9.778982E+00,
9.587222E+00,9.399223E+00,9.214910E+00,9.034211E+00,8.857056E+00,8.683374E+00,8.513098E+00,
8.346162E+00,8.182499E+00,8.022045E+00,7.864737E+00,7.710515E+00,7.559316E+00,7.411083E+00,
7.265756E+00,7.123279E+00,6.983596E+00,6.846652E+00,6.712393E+00,6.580767E+00,6.451722E+00,
6.325208E+00,6.201174E+00,6.079573E+00,5.960356E+00,5.843477E+00,5.728890E+00,5.616550E+00,
5.506413E+00,5.398436E+00,5.292576E+00,5.188792E+00,5.087043E+00,4.987289E+00,4.889491E+00,
4.793611E+00,4.699611E+00,4.607455E+00,4.517105E+00,4.428528E+00,4.341687E+00,4.256549E+00,
4.173081E+00,4.091249E+00,4.011022E+00,3.932369E+00,3.855257E+00,3.779658E+00,3.705541E+00,
3.632878E+00,3.561639E+00,3.491798E+00,3.423326E+00,3.356196E+00,3.290383E+00,3.225861E+00,
3.162604E+00,3.100587E+00,3.039787E+00,2.980178E+00,2.921739E+00,2.864445E+00,2.808275E+00,
2.753207E+00,2.699218E+00,2.646288E+00,2.594396E+00,2.543521E+00,2.493644E+00,2.444746E+00,
2.396806E+00,2.349806E+00,2.303727E+00,2.258553E+00,2.214264E+00,2.170844E+00,2.128275E+00,
2.086540E+00,2.045625E+00,2.005511E+00,1.966184E+00,1.927629E+00,1.889829E+00,1.852771E+00,
1.816439E+00,1.780820E+00,1.745899E+00,1.711663E+00,1.678098E+00,1.645192E+00,1.612931E+00,
1.581302E+00,1.550294E+00,1.519893E+00,1.490089E+00,1.460869E+00,1.432223E+00,1.404138E+00,
1.376603E+00,1.349609E+00]], dtype='float')
for i in range(3):
result[i] = ted_empty.daily_plant_dew_timeseries(i, blp_conc[i])
npt.assert_allclose(result[i],expected_results[i],rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
for i in range(3):
tab = [result[i], expected_results[i]]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_daily_soil_timeseries(self):
"""
:description generates annual timeseries of daily pesticide concentrations in soil
:param i; simulation number/index
:param pore_h2o_conc; daily values of pesticide concentration in soil pore water
:Notes # calculations are performed daily from day of first application (assumed day 0) through the last day of a year
# note: day numbers are synchronized with 0-based array indexing; thus the year does not have a calendar specific
# assoication, rather it is one year from the day of 1st pesticide application
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
ted_empty = self.create_ted_object()
expected_results = pd.Series([], dtype='float')
result = pd.Series([[]], dtype='float')
expected_results = [[3.521818E+00,3.285972E+00,3.065920E+00,2.860605E+00,2.669039E+00,2.490301E+00,2.323533E+00,
5.689751E+00,5.308725E+00,4.953216E+00,4.621514E+00,4.312025E+00,4.023261E+00,3.753835E+00,
7.024270E+00,6.553876E+00,6.114982E+00,5.705480E+00,5.323401E+00,4.966909E+00,4.634290E+00,
7.845763E+00,7.320356E+00,6.830133E+00,6.372740E+00,5.945976E+00,5.547792E+00,5.176273E+00,
8.351451E+00,7.792179E+00,7.270360E+00,6.783486E+00,6.329216E+00,5.905368E+00,5.509903E+00,
8.662739E+00,8.082621E+00,7.541352E+00,7.036330E+00,6.565128E+00,6.125481E+00,5.715276E+00,
8.854359E+00,8.261409E+00,7.708167E+00,7.191974E+00,6.710349E+00,6.260977E+00,5.841698E+00,
5.450497E+00,5.085494E+00,4.744933E+00,4.427179E+00,4.130704E+00,3.854083E+00,3.595987E+00,
3.355175E+00,3.130489E+00,2.920849E+00,2.725249E+00,2.542747E+00,2.372467E+00,2.213590E+00,
2.065352E+00,1.927042E+00,1.797994E+00,1.677587E+00,1.565244E+00,1.460425E+00,1.362624E+00,
1.271373E+00,1.186233E+00,1.106795E+00,1.032676E+00,9.635209E-01,8.989968E-01,8.387936E-01,
7.826221E-01,7.302123E-01,6.813121E-01,6.356867E-01,5.931167E-01,5.533974E-01,5.163381E-01,
4.817604E-01,4.494984E-01,4.193968E-01,3.913111E-01,3.651061E-01,3.406561E-01,3.178434E-01,
2.965583E-01,2.766987E-01,2.581690E-01,2.408802E-01,2.247492E-01,2.096984E-01,1.956555E-01,
1.825531E-01,1.703280E-01,1.589217E-01,1.482792E-01,1.383494E-01,1.290845E-01,1.204401E-01,
1.123746E-01,1.048492E-01,9.782777E-02,9.127653E-02,8.516402E-02,7.946084E-02,7.413958E-02,
6.917468E-02,6.454226E-02,6.022005E-02,5.618730E-02,5.242460E-02,4.891388E-02,4.563827E-02,
4.258201E-02,3.973042E-02,3.706979E-02,3.458734E-02,3.227113E-02,3.011003E-02,2.809365E-02,
2.621230E-02,2.445694E-02,2.281913E-02,2.129100E-02,1.986521E-02,1.853490E-02,1.729367E-02,
1.613556E-02,1.505501E-02,1.404682E-02,1.310615E-02,1.222847E-02,1.140957E-02,1.064550E-02,
9.932605E-03,9.267448E-03,8.646835E-03,8.067782E-03,7.527507E-03,7.023412E-03,6.553075E-03,
6.114235E-03,5.704783E-03,5.322751E-03,4.966302E-03,4.633724E-03,4.323417E-03,4.033891E-03,
3.763753E-03,3.511706E-03,3.276538E-03,3.057118E-03,2.852392E-03,2.661376E-03,2.483151E-03,
2.316862E-03,2.161709E-03,2.016946E-03,1.881877E-03,1.755853E-03,1.638269E-03,1.528559E-03,
1.426196E-03,1.330688E-03,1.241576E-03,1.158431E-03,1.080854E-03,1.008473E-03,9.409384E-04,
8.779265E-04,8.191344E-04,7.642794E-04,7.130979E-04,6.653439E-04,6.207878E-04,5.792155E-04,
5.404272E-04,5.042364E-04,4.704692E-04,4.389633E-04,4.095672E-04,3.821397E-04,3.565490E-04,
3.326719E-04,3.103939E-04,2.896077E-04,2.702136E-04,2.521182E-04,2.352346E-04,2.194816E-04,
2.047836E-04,1.910699E-04,1.782745E-04,1.663360E-04,1.551970E-04,1.448039E-04,1.351068E-04,
1.260591E-04,1.176173E-04,1.097408E-04,1.023918E-04,9.553493E-05,8.913724E-05,8.316799E-05,
7.759848E-05,7.240194E-05,6.755340E-05,6.302955E-05,5.880865E-05,5.487041E-05,5.119590E-05,
4.776746E-05,4.456862E-05,4.158399E-05,3.879924E-05,3.620097E-05,3.377670E-05,3.151477E-05,
2.940432E-05,2.743520E-05,2.559795E-05,2.388373E-05,2.228431E-05,2.079200E-05,1.939962E-05,
1.810048E-05,1.688835E-05,1.575739E-05,1.470216E-05,1.371760E-05,1.279898E-05,1.194187E-05,
1.114216E-05,1.039600E-05,9.699809E-06,9.050242E-06,8.444175E-06,7.878693E-06,7.351081E-06,
6.858801E-06,6.399488E-06,5.970933E-06,5.571078E-06,5.197999E-06,4.849905E-06,4.525121E-06,
4.222087E-06,3.939347E-06,3.675540E-06,3.429400E-06,3.199744E-06,2.985467E-06,2.785539E-06,
2.599000E-06,2.424952E-06,2.262561E-06,2.111044E-06,1.969673E-06,1.837770E-06,1.714700E-06,
1.599872E-06,1.492733E-06,1.392769E-06,1.299500E-06,1.212476E-06,1.131280E-06,1.055522E-06,
9.848367E-07,9.188851E-07,8.573501E-07,7.999360E-07,7.463666E-07,6.963847E-07,6.497499E-07,
6.062381E-07,5.656401E-07,5.277609E-07,4.924183E-07,4.594426E-07,4.286751E-07,3.999680E-07,
3.731833E-07,3.481923E-07,3.248749E-07,3.031190E-07,2.828201E-07,2.638805E-07,2.462092E-07,
2.297213E-07,2.143375E-07,1.999840E-07,1.865917E-07,1.740962E-07,1.624375E-07,1.515595E-07,
1.414100E-07,1.319402E-07,1.231046E-07,1.148606E-07,1.071688E-07,9.999199E-08,9.329583E-08,
8.704809E-08,8.121874E-08,7.577976E-08,7.070502E-08,6.597011E-08,6.155229E-08,5.743032E-08,
5.358438E-08,4.999600E-08,4.664791E-08,4.352404E-08,4.060937E-08,3.788988E-08,3.535251E-08,
3.298506E-08,3.077615E-08,2.871516E-08,2.679219E-08,2.499800E-08,2.332396E-08,2.176202E-08,
2.030468E-08,1.894494E-08,1.767625E-08,1.649253E-08,1.538807E-08,1.435758E-08,1.339610E-08,
1.249900E-08,1.166198E-08,1.088101E-08,1.015234E-08,9.472470E-09,8.838127E-09,8.246264E-09,
7.694037E-09,7.178790E-09,6.698048E-09,6.249500E-09,5.830989E-09,5.440505E-09,5.076171E-09,
4.736235E-09,4.419064E-09,4.123132E-09,3.847018E-09,3.589395E-09,3.349024E-09,3.124750E-09,
2.915495E-09,2.720253E-09,2.538086E-09,2.368118E-09,2.209532E-09,2.061566E-09,1.923509E-09,
1.794697E-09,1.674512E-09],
[3.544172E+00,3.306830E+00,3.085381E+00,2.878762E+00,2.685980E+00,2.506108E+00,2.338282E+00,
5.725866E+00,5.342422E+00,4.984656E+00,4.650848E+00,4.339395E+00,4.048799E+00,3.777663E+00,
7.068856E+00,6.595476E+00,6.153797E+00,5.741695E+00,5.357191E+00,4.998436E+00,4.663706E+00,
7.895563E+00,7.366821E+00,6.873487E+00,6.413190E+00,5.983718E+00,5.583006E+00,5.209129E+00,
8.404462E+00,7.841640E+00,7.316509E+00,6.826544E+00,6.369391E+00,5.942852E+00,5.544877E+00,
8.717725E+00,8.133925E+00,7.589220E+00,7.080993E+00,6.606800E+00,6.164363E+00,5.751554E+00,
8.910561E+00,8.313848E+00,7.757094E+00,7.237625E+00,6.752943E+00,6.300718E+00,5.878778E+00,
5.485094E+00,5.117774E+00,4.775052E+00,4.455281E+00,4.156924E+00,3.878547E+00,3.618812E+00,
3.376471E+00,3.150359E+00,2.939389E+00,2.742547E+00,2.558887E+00,2.387526E+00,2.227640E+00,
2.078462E+00,1.939274E+00,1.809406E+00,1.688236E+00,1.575180E+00,1.469695E+00,1.371273E+00,
1.279443E+00,1.193763E+00,1.113820E+00,1.039231E+00,9.696368E-01,9.047031E-01,8.441178E-01,
7.875898E-01,7.348473E-01,6.856367E-01,6.397217E-01,5.968815E-01,5.569101E-01,5.196155E-01,
4.848184E-01,4.523516E-01,4.220589E-01,3.937949E-01,3.674236E-01,3.428184E-01,3.198608E-01,
2.984407E-01,2.784550E-01,2.598077E-01,2.424092E-01,2.261758E-01,2.110295E-01,1.968974E-01,
1.837118E-01,1.714092E-01,1.599304E-01,1.492204E-01,1.392275E-01,1.299039E-01,1.212046E-01,
1.130879E-01,1.055147E-01,9.844872E-02,9.185591E-02,8.570459E-02,7.996521E-02,7.461018E-02,
6.961376E-02,6.495194E-02,6.060230E-02,5.654394E-02,5.275737E-02,4.922436E-02,4.592795E-02,
4.285230E-02,3.998261E-02,3.730509E-02,3.480688E-02,3.247597E-02,3.030115E-02,2.827197E-02,
2.637868E-02,2.461218E-02,2.296398E-02,2.142615E-02,1.999130E-02,1.865255E-02,1.740344E-02,
1.623798E-02,1.515057E-02,1.413599E-02,1.318934E-02,1.230609E-02,1.148199E-02,1.071307E-02,
9.995652E-03,9.326273E-03,8.701720E-03,8.118992E-03,7.575287E-03,7.067993E-03,6.594671E-03,
6.153045E-03,5.740994E-03,5.356537E-03,4.997826E-03,4.663136E-03,4.350860E-03,4.059496E-03,
3.787644E-03,3.533997E-03,3.297335E-03,3.076523E-03,2.870497E-03,2.678269E-03,2.498913E-03,
2.331568E-03,2.175430E-03,2.029748E-03,1.893822E-03,1.766998E-03,1.648668E-03,1.538261E-03,
1.435249E-03,1.339134E-03,1.249456E-03,1.165784E-03,1.087715E-03,1.014874E-03,9.469109E-04,
8.834991E-04,8.243338E-04,7.691307E-04,7.176243E-04,6.695671E-04,6.247282E-04,5.828920E-04,
5.438575E-04,5.074370E-04,4.734555E-04,4.417496E-04,4.121669E-04,3.845653E-04,3.588121E-04,
3.347836E-04,3.123641E-04,2.914460E-04,2.719288E-04,2.537185E-04,2.367277E-04,2.208748E-04,
2.060835E-04,1.922827E-04,1.794061E-04,1.673918E-04,1.561821E-04,1.457230E-04,1.359644E-04,
1.268592E-04,1.183639E-04,1.104374E-04,1.030417E-04,9.614133E-05,8.970304E-05,8.369589E-05,
7.809103E-05,7.286151E-05,6.798219E-05,6.342962E-05,5.918193E-05,5.521870E-05,5.152086E-05,
4.807067E-05,4.485152E-05,4.184795E-05,3.904551E-05,3.643075E-05,3.399109E-05,3.171481E-05,
2.959097E-05,2.760935E-05,2.576043E-05,2.403533E-05,2.242576E-05,2.092397E-05,1.952276E-05,
1.821538E-05,1.699555E-05,1.585741E-05,1.479548E-05,1.380467E-05,1.288022E-05,1.201767E-05,
1.121288E-05,1.046199E-05,9.761378E-06,9.107688E-06,8.497774E-06,7.928703E-06,7.397742E-06,
6.902337E-06,6.440108E-06,6.008833E-06,5.606440E-06,5.230993E-06,4.880689E-06,4.553844E-06,
4.248887E-06,3.964352E-06,3.698871E-06,3.451168E-06,3.220054E-06,3.004417E-06,2.803220E-06,
2.615497E-06,2.440345E-06,2.276922E-06,2.124443E-06,1.982176E-06,1.849435E-06,1.725584E-06,
1.610027E-06,1.502208E-06,1.401610E-06,1.307748E-06,1.220172E-06,1.138461E-06,1.062222E-06,
9.910879E-07,9.247177E-07,8.627921E-07,8.050135E-07,7.511042E-07,7.008050E-07,6.538742E-07,
6.100862E-07,5.692305E-07,5.311108E-07,4.955439E-07,4.623588E-07,4.313961E-07,4.025068E-07,
3.755521E-07,3.504025E-07,3.269371E-07,3.050431E-07,2.846153E-07,2.655554E-07,2.477720E-07,
2.311794E-07,2.156980E-07,2.012534E-07,1.877760E-07,1.752012E-07,1.634685E-07,1.525215E-07,
1.423076E-07,1.327777E-07,1.238860E-07,1.155897E-07,1.078490E-07,1.006267E-07,9.388802E-08,
8.760062E-08,8.173427E-08,7.626077E-08,7.115381E-08,6.638886E-08,6.194299E-08,5.779486E-08,
5.392451E-08,5.031334E-08,4.694401E-08,4.380031E-08,4.086713E-08,3.813038E-08,3.557691E-08,
3.319443E-08,3.097150E-08,2.889743E-08,2.696225E-08,2.515667E-08,2.347201E-08,2.190016E-08,
2.043357E-08,1.906519E-08,1.778845E-08,1.659721E-08,1.548575E-08,1.444871E-08,1.348113E-08,
1.257834E-08,1.173600E-08,1.095008E-08,1.021678E-08,9.532596E-09,8.894227E-09,8.298607E-09,
7.742874E-09,7.224357E-09,6.740563E-09,6.289168E-09,5.868001E-09,5.475039E-09,5.108392E-09,
4.766298E-09,4.447113E-09,4.149303E-09,3.871437E-09,3.612178E-09,3.370282E-09,3.144584E-09,
2.934001E-09,2.737519E-09,2.554196E-09,2.383149E-09,2.223557E-09,2.074652E-09,1.935719E-09,
1.806089E-09,1.685141E-09],
[3.555456E+00,3.317358E+00,3.095204E+00,2.887928E+00,2.694532E+00,2.514087E+00,2.345726E+00,
5.744096E+00,5.359431E+00,5.000526E+00,4.665656E+00,4.353211E+00,4.061689E+00,3.789690E+00,
7.091362E+00,6.616475E+00,6.173389E+00,5.759976E+00,5.374248E+00,5.014350E+00,4.678554E+00,
7.920702E+00,7.390276E+00,6.895371E+00,6.433609E+00,6.002769E+00,5.600782E+00,5.225714E+00,
8.431220E+00,7.866606E+00,7.339803E+00,6.848279E+00,6.389670E+00,5.961773E+00,5.562531E+00,
8.745481E+00,8.159822E+00,7.613383E+00,7.103538E+00,6.627835E+00,6.183989E+00,5.769866E+00,
8.938931E+00,8.340318E+00,7.781792E+00,7.260668E+00,6.774443E+00,6.320779E+00,5.897495E+00,
5.502558E+00,5.134068E+00,4.790255E+00,4.469466E+00,4.170159E+00,3.890896E+00,3.630334E+00,
3.387221E+00,3.160389E+00,2.948748E+00,2.751279E+00,2.567034E+00,2.395127E+00,2.234733E+00,
2.085079E+00,1.945448E+00,1.815167E+00,1.693611E+00,1.580195E+00,1.474374E+00,1.375639E+00,
1.283517E+00,1.197564E+00,1.117366E+00,1.042540E+00,9.727239E-01,9.075835E-01,8.468054E-01,
7.900974E-01,7.371869E-01,6.878197E-01,6.417585E-01,5.987818E-01,5.586832E-01,5.212699E-01,
4.863620E-01,4.537918E-01,4.234027E-01,3.950487E-01,3.685934E-01,3.439098E-01,3.208792E-01,
2.993909E-01,2.793416E-01,2.606349E-01,2.431810E-01,2.268959E-01,2.117013E-01,1.975243E-01,
1.842967E-01,1.719549E-01,1.604396E-01,1.496955E-01,1.396708E-01,1.303175E-01,1.215905E-01,
1.134479E-01,1.058507E-01,9.876217E-02,9.214836E-02,8.597746E-02,8.021981E-02,7.484773E-02,
6.983540E-02,6.515873E-02,6.079525E-02,5.672397E-02,5.292534E-02,4.938108E-02,4.607418E-02,
4.298873E-02,4.010990E-02,3.742386E-02,3.491770E-02,3.257937E-02,3.039762E-02,2.836199E-02,
2.646267E-02,2.469054E-02,2.303709E-02,2.149437E-02,2.005495E-02,1.871193E-02,1.745885E-02,
1.628968E-02,1.519881E-02,1.418099E-02,1.323133E-02,1.234527E-02,1.151855E-02,1.074718E-02,
1.002748E-02,9.355966E-03,8.729425E-03,8.144841E-03,7.599406E-03,7.090496E-03,6.615667E-03,
6.172636E-03,5.759273E-03,5.373591E-03,5.013738E-03,4.677983E-03,4.364712E-03,4.072421E-03,
3.799703E-03,3.545248E-03,3.307833E-03,3.086318E-03,2.879636E-03,2.686796E-03,2.506869E-03,
2.338991E-03,2.182356E-03,2.036210E-03,1.899851E-03,1.772624E-03,1.653917E-03,1.543159E-03,
1.439818E-03,1.343398E-03,1.253435E-03,1.169496E-03,1.091178E-03,1.018105E-03,9.499257E-04,
8.863120E-04,8.269584E-04,7.715794E-04,7.199091E-04,6.716989E-04,6.267173E-04,5.847479E-04,
5.455891E-04,5.090526E-04,4.749629E-04,4.431560E-04,4.134792E-04,3.857897E-04,3.599545E-04,
3.358495E-04,3.133586E-04,2.923739E-04,2.727945E-04,2.545263E-04,2.374814E-04,2.215780E-04,
2.067396E-04,1.928949E-04,1.799773E-04,1.679247E-04,1.566793E-04,1.461870E-04,1.363973E-04,
1.272631E-04,1.187407E-04,1.107890E-04,1.033698E-04,9.644743E-05,8.998863E-05,8.396236E-05,
7.833966E-05,7.309348E-05,6.819863E-05,6.363157E-05,5.937036E-05,5.539450E-05,5.168490E-05,
4.822372E-05,4.499432E-05,4.198118E-05,3.916983E-05,3.654674E-05,3.409932E-05,3.181579E-05,
2.968518E-05,2.769725E-05,2.584245E-05,2.411186E-05,2.249716E-05,2.099059E-05,1.958491E-05,
1.827337E-05,1.704966E-05,1.590789E-05,1.484259E-05,1.384863E-05,1.292122E-05,1.205593E-05,
1.124858E-05,1.049530E-05,9.792457E-06,9.136686E-06,8.524829E-06,7.953947E-06,7.421295E-06,
6.924313E-06,6.460612E-06,6.027964E-06,5.624290E-06,5.247648E-06,4.896229E-06,4.568343E-06,
4.262415E-06,3.976973E-06,3.710647E-06,3.462156E-06,3.230306E-06,3.013982E-06,2.812145E-06,
2.623824E-06,2.448114E-06,2.284171E-06,2.131207E-06,1.988487E-06,1.855324E-06,1.731078E-06,
1.615153E-06,1.506991E-06,1.406072E-06,1.311912E-06,1.224057E-06,1.142086E-06,1.065604E-06,
9.942433E-07,9.276618E-07,8.655391E-07,8.075765E-07,7.534956E-07,7.030362E-07,6.559560E-07,
6.120286E-07,5.710428E-07,5.328018E-07,4.971217E-07,4.638309E-07,4.327695E-07,4.037883E-07,
3.767478E-07,3.515181E-07,3.279780E-07,3.060143E-07,2.855214E-07,2.664009E-07,2.485608E-07,
2.319155E-07,2.163848E-07,2.018941E-07,1.883739E-07,1.757591E-07,1.639890E-07,1.530071E-07,
1.427607E-07,1.332005E-07,1.242804E-07,1.159577E-07,1.081924E-07,1.009471E-07,9.418694E-08,
8.787953E-08,8.199450E-08,7.650357E-08,7.138036E-08,6.660023E-08,6.214021E-08,5.797886E-08,
5.409619E-08,5.047353E-08,4.709347E-08,4.393976E-08,4.099725E-08,3.825179E-08,3.569018E-08,
3.330011E-08,3.107010E-08,2.898943E-08,2.704810E-08,2.523677E-08,2.354674E-08,2.196988E-08,
2.049862E-08,1.912589E-08,1.784509E-08,1.665006E-08,1.553505E-08,1.449472E-08,1.352405E-08,
1.261838E-08,1.177337E-08,1.098494E-08,1.024931E-08,9.562946E-09,8.922544E-09,8.325028E-09,
7.767526E-09,7.247358E-09,6.762024E-09,6.309192E-09,5.886684E-09,5.492470E-09,5.124656E-09,
4.781473E-09,4.461272E-09,4.162514E-09,3.883763E-09,3.623679E-09,3.381012E-09,3.154596E-09,
2.943342E-09,2.746235E-09,2.562328E-09,2.390737E-09,2.230636E-09,2.081257E-09,1.941882E-09,
1.811840E-09,1.690506E-09]]
try:
# internal model constants
ted_empty.num_simulation_days = 366
ted_empty.soil_foc = 0.015
# input variables that change per simulation
ted_empty.koc = pd.Series([1000., 1500., 2000.], dtype='float')
# internally calculated variables
pore_h2o_conc = pd.Series([[2.347878E-01,2.190648E-01,2.043947E-01,1.907070E-01,1.779359E-01,1.660201E-01,
1.549022E-01,3.793167E-01,3.539150E-01,3.302144E-01,3.081009E-01,2.874683E-01,
2.682174E-01,2.502557E-01,4.682847E-01,4.369250E-01,4.076655E-01,3.803653E-01,
3.548934E-01,3.311273E-01,3.089527E-01,5.230509E-01,4.880237E-01,4.553422E-01,
4.248493E-01,3.963984E-01,3.698528E-01,3.450849E-01,5.567634E-01,5.194786E-01,
4.846907E-01,4.522324E-01,4.219478E-01,3.936912E-01,3.673269E-01,5.775159E-01,
5.388414E-01,5.027568E-01,4.690887E-01,4.376752E-01,4.083654E-01,3.810184E-01,
5.902906E-01,5.507606E-01,5.138778E-01,4.794649E-01,4.473566E-01,4.173985E-01,
3.894465E-01,3.633665E-01,3.390329E-01,3.163289E-01,2.951453E-01,2.753803E-01,
2.569389E-01,2.397325E-01,2.236783E-01,2.086992E-01,1.947233E-01,1.816832E-01,
1.695165E-01,1.581644E-01,1.475726E-01,1.376901E-01,1.284694E-01,1.198662E-01,
1.118392E-01,1.043496E-01,9.736164E-02,9.084162E-02,8.475823E-02,7.908222E-02,
7.378632E-02,6.884507E-02,6.423472E-02,5.993312E-02,5.591958E-02,5.217481E-02,
4.868082E-02,4.542081E-02,4.237911E-02,3.954111E-02,3.689316E-02,3.442254E-02,
3.211736E-02,2.996656E-02,2.795979E-02,2.608740E-02,2.434041E-02,2.271040E-02,
2.118956E-02,1.977056E-02,1.844658E-02,1.721127E-02,1.605868E-02,1.498328E-02,
1.397989E-02,1.304370E-02,1.217020E-02,1.135520E-02,1.059478E-02,9.885278E-03,
9.223290E-03,8.605634E-03,8.029341E-03,7.491640E-03,6.989947E-03,6.521851E-03,
6.085102E-03,5.677601E-03,5.297389E-03,4.942639E-03,4.611645E-03,4.302817E-03,
4.014670E-03,3.745820E-03,3.494973E-03,3.260926E-03,3.042551E-03,2.838801E-03,
2.648695E-03,2.471319E-03,2.305823E-03,2.151409E-03,2.007335E-03,1.872910E-03,
1.747487E-03,1.630463E-03,1.521276E-03,1.419400E-03,1.324347E-03,1.235660E-03,
1.152911E-03,1.075704E-03,1.003668E-03,9.364550E-04,8.737434E-04,8.152314E-04,
7.606378E-04,7.097001E-04,6.621737E-04,6.178299E-04,5.764556E-04,5.378521E-04,
5.018338E-04,4.682275E-04,4.368717E-04,4.076157E-04,3.803189E-04,3.548501E-04,
3.310868E-04,3.089149E-04,2.882278E-04,2.689261E-04,2.509169E-04,2.341137E-04,
2.184358E-04,2.038078E-04,1.901594E-04,1.774250E-04,1.655434E-04,1.544575E-04,
1.441139E-04,1.344630E-04,1.254584E-04,1.170569E-04,1.092179E-04,1.019039E-04,
9.507972E-05,8.871252E-05,8.277171E-05,7.722873E-05,7.205696E-05,6.723152E-05,
6.272922E-05,5.852844E-05,5.460896E-05,5.095196E-05,4.753986E-05,4.435626E-05,
4.138585E-05,3.861437E-05,3.602848E-05,3.361576E-05,3.136461E-05,2.926422E-05,
2.730448E-05,2.547598E-05,2.376993E-05,2.217813E-05,2.069293E-05,1.930718E-05,
1.801424E-05,1.680788E-05,1.568231E-05,1.463211E-05,1.365224E-05,1.273799E-05,
1.188497E-05,1.108906E-05,1.034646E-05,9.653592E-06,9.007119E-06,8.403940E-06,
7.841153E-06,7.316054E-06,6.826120E-06,6.368995E-06,5.942483E-06,5.544532E-06,
5.173232E-06,4.826796E-06,4.503560E-06,4.201970E-06,3.920576E-06,3.658027E-06,
3.413060E-06,3.184498E-06,2.971241E-06,2.772266E-06,2.586616E-06,2.413398E-06,
2.251780E-06,2.100985E-06,1.960288E-06,1.829014E-06,1.706530E-06,1.592249E-06,
1.485621E-06,1.386133E-06,1.293308E-06,1.206699E-06,1.125890E-06,1.050492E-06,
9.801441E-07,9.145068E-07,8.532650E-07,7.961244E-07,7.428103E-07,6.930666E-07,
6.466540E-07,6.033495E-07,5.629450E-07,5.252462E-07,4.900721E-07,4.572534E-07,
4.266325E-07,3.980622E-07,3.714052E-07,3.465333E-07,3.233270E-07,3.016747E-07,
2.814725E-07,2.626231E-07,2.450360E-07,2.286267E-07,2.133163E-07,1.990311E-07,
1.857026E-07,1.732666E-07,1.616635E-07,1.508374E-07,1.407362E-07,1.313116E-07,
1.225180E-07,1.143133E-07,1.066581E-07,9.951555E-08,9.285129E-08,8.663332E-08,
8.083174E-08,7.541868E-08,7.036812E-08,6.565578E-08,6.125901E-08,5.715667E-08,
5.332906E-08,4.975778E-08,4.642565E-08,4.331666E-08,4.041587E-08,3.770934E-08,
3.518406E-08,3.282789E-08,3.062950E-08,2.857834E-08,2.666453E-08,2.487889E-08,
2.321282E-08,2.165833E-08,2.020794E-08,1.885467E-08,1.759203E-08,1.641394E-08,
1.531475E-08,1.428917E-08,1.333227E-08,1.243944E-08,1.160641E-08,1.082916E-08,
1.010397E-08,9.427336E-09,8.796015E-09,8.206972E-09,7.657376E-09,7.144584E-09,
6.666133E-09,6.219722E-09,5.803206E-09,5.414582E-09,5.051984E-09,4.713668E-09,
4.398008E-09,4.103486E-09,3.828688E-09,3.572292E-09,3.333066E-09,3.109861E-09,
2.901603E-09,2.707291E-09,2.525992E-09,2.356834E-09,2.199004E-09,2.051743E-09,
1.914344E-09,1.786146E-09,1.666533E-09,1.554930E-09,1.450801E-09,1.353646E-09,
1.262996E-09,1.178417E-09,1.099502E-09,1.025872E-09,9.571720E-10,8.930730E-10,
8.332666E-10,7.774652E-10,7.254007E-10,6.768228E-10,6.314980E-10,5.892085E-10,
5.497509E-10,5.129358E-10,4.785860E-10,4.465365E-10,4.166333E-10,3.887326E-10,
3.627004E-10,3.384114E-10,3.157490E-10,2.946042E-10,2.748755E-10,2.564679E-10,
2.392930E-10,2.232683E-10,2.083167E-10,1.943663E-10,1.813502E-10,1.692057E-10,
1.578745E-10,1.473021E-10,1.374377E-10,1.282339E-10,1.196465E-10,1.116341E-10],
[1.575188E-01,1.469702E-01,1.371280E-01,1.279450E-01,1.193769E-01,1.113826E-01,
1.039236E-01,2.544829E-01,2.374410E-01,2.215403E-01,2.067044E-01,1.928620E-01,
1.799466E-01,1.678961E-01,3.141714E-01,2.931323E-01,2.735021E-01,2.551865E-01,
2.380974E-01,2.221527E-01,2.072758E-01,3.509139E-01,3.274143E-01,3.054883E-01,
2.850307E-01,2.659430E-01,2.481336E-01,2.315169E-01,3.735316E-01,3.485173E-01,
3.251782E-01,3.034020E-01,2.830840E-01,2.641267E-01,2.464390E-01,3.874544E-01,
3.615078E-01,3.372987E-01,3.147108E-01,2.936356E-01,2.739717E-01,2.556246E-01,
3.960250E-01,3.695043E-01,3.447597E-01,3.216722E-01,3.001308E-01,2.800319E-01,
2.612790E-01,2.437820E-01,2.274566E-01,2.122245E-01,1.980125E-01,1.847522E-01,
1.723799E-01,1.608361E-01,1.500654E-01,1.400160E-01,1.306395E-01,1.218910E-01,
1.137283E-01,1.061123E-01,9.900624E-02,9.237609E-02,8.618994E-02,8.041805E-02,
7.503270E-02,7.000798E-02,6.531976E-02,6.094549E-02,5.686415E-02,5.305613E-02,
4.950312E-02,4.618804E-02,4.309497E-02,4.020903E-02,3.751635E-02,3.500399E-02,
3.265988E-02,3.047274E-02,2.843208E-02,2.652806E-02,2.475156E-02,2.309402E-02,
2.154748E-02,2.010451E-02,1.875817E-02,1.750200E-02,1.632994E-02,1.523637E-02,
1.421604E-02,1.326403E-02,1.237578E-02,1.154701E-02,1.077374E-02,1.005226E-02,
9.379087E-03,8.750998E-03,8.164970E-03,7.618186E-03,7.108019E-03,6.632016E-03,
6.187890E-03,5.773505E-03,5.386871E-03,5.026128E-03,4.689544E-03,4.375499E-03,
4.082485E-03,3.809093E-03,3.554009E-03,3.316008E-03,3.093945E-03,2.886753E-03,
2.693435E-03,2.513064E-03,2.344772E-03,2.187749E-03,2.041242E-03,1.904547E-03,
1.777005E-03,1.658004E-03,1.546972E-03,1.443376E-03,1.346718E-03,1.256532E-03,
1.172386E-03,1.093875E-03,1.020621E-03,9.522733E-04,8.885024E-04,8.290020E-04,
7.734862E-04,7.216882E-04,6.733589E-04,6.282660E-04,5.861929E-04,5.469374E-04,
5.103106E-04,4.761366E-04,4.442512E-04,4.145010E-04,3.867431E-04,3.608441E-04,
3.366794E-04,3.141330E-04,2.930965E-04,2.734687E-04,2.551553E-04,2.380683E-04,
2.221256E-04,2.072505E-04,1.933716E-04,1.804220E-04,1.683397E-04,1.570665E-04,
1.465482E-04,1.367343E-04,1.275777E-04,1.190342E-04,1.110628E-04,1.036253E-04,
9.668578E-05,9.021102E-05,8.416986E-05,7.853326E-05,7.327412E-05,6.836717E-05,
6.378883E-05,5.951708E-05,5.553140E-05,5.181263E-05,4.834289E-05,4.510551E-05,
4.208493E-05,3.926663E-05,3.663706E-05,3.418358E-05,3.189441E-05,2.975854E-05,
2.776570E-05,2.590631E-05,2.417144E-05,2.255276E-05,2.104246E-05,1.963331E-05,
1.831853E-05,1.709179E-05,1.594721E-05,1.487927E-05,1.388285E-05,1.295316E-05,
1.208572E-05,1.127638E-05,1.052123E-05,9.816657E-06,9.159265E-06,8.545896E-06,
7.973603E-06,7.439635E-06,6.941425E-06,6.476578E-06,6.042861E-06,5.638189E-06,
5.260616E-06,4.908328E-06,4.579632E-06,4.272948E-06,3.986802E-06,3.719817E-06,
3.470712E-06,3.238289E-06,3.021431E-06,2.819094E-06,2.630308E-06,2.454164E-06,
2.289816E-06,2.136474E-06,1.993401E-06,1.859909E-06,1.735356E-06,1.619145E-06,
1.510715E-06,1.409547E-06,1.315154E-06,1.227082E-06,1.144908E-06,1.068237E-06,
9.967004E-07,9.299543E-07,8.676781E-07,8.095723E-07,7.553576E-07,7.047736E-07,
6.575770E-07,6.135411E-07,5.724540E-07,5.341185E-07,4.983502E-07,4.649772E-07,
4.338390E-07,4.047861E-07,3.776788E-07,3.523868E-07,3.287885E-07,3.067705E-07,
2.862270E-07,2.670593E-07,2.491751E-07,2.324886E-07,2.169195E-07,2.023931E-07,
1.888394E-07,1.761934E-07,1.643943E-07,1.533853E-07,1.431135E-07,1.335296E-07,
1.245875E-07,1.162443E-07,1.084598E-07,1.011965E-07,9.441971E-08,8.809670E-08,
8.219713E-08,7.669263E-08,7.155676E-08,6.676481E-08,6.229377E-08,5.812215E-08,
5.422988E-08,5.059827E-08,4.720985E-08,4.404835E-08,4.109856E-08,3.834632E-08,
3.577838E-08,3.338241E-08,3.114689E-08,2.906107E-08,2.711494E-08,2.529913E-08,
2.360493E-08,2.202418E-08,2.054928E-08,1.917316E-08,1.788919E-08,1.669120E-08,
1.557344E-08,1.453054E-08,1.355747E-08,1.264957E-08,1.180246E-08,1.101209E-08,
1.027464E-08,9.586579E-09,8.944595E-09,8.345602E-09,7.786722E-09,7.265268E-09,
6.778735E-09,6.324783E-09,5.901232E-09,5.506044E-09,5.137321E-09,4.793290E-09,
4.472297E-09,4.172801E-09,3.893361E-09,3.632634E-09,3.389368E-09,3.162392E-09,
2.950616E-09,2.753022E-09,2.568660E-09,2.396645E-09,2.236149E-09,2.086400E-09,
1.946680E-09,1.816317E-09,1.694684E-09,1.581196E-09,1.475308E-09,1.376511E-09,
1.284330E-09,1.198322E-09,1.118074E-09,1.043200E-09,9.733402E-10,9.081585E-10,
8.473419E-10,7.905979E-10,7.376540E-10,6.882555E-10,6.421651E-10,5.991612E-10,
5.590372E-10,5.216001E-10,4.866701E-10,4.540793E-10,4.236709E-10,3.952990E-10,
3.688270E-10,3.441277E-10,3.210825E-10,2.995806E-10,2.795186E-10,2.608001E-10,
2.433351E-10,2.270396E-10,2.118355E-10,1.976495E-10,1.844135E-10,1.720639E-10,
1.605413E-10,1.497903E-10,1.397593E-10,1.304000E-10,1.216675E-10,1.135198E-10,
1.059177E-10,9.882474E-11,9.220674E-11,8.603193E-11,8.027063E-11,7.489515E-11],
[1.185152E-01,1.105786E-01,1.031735E-01,9.626426E-02,8.981773E-02,8.380291E-02,
7.819088E-02,1.914699E-01,1.786477E-01,1.666842E-01,1.555219E-01,1.451070E-01,
1.353896E-01,1.263230E-01,2.363787E-01,2.205492E-01,2.057796E-01,1.919992E-01,
1.791416E-01,1.671450E-01,1.559518E-01,2.640234E-01,2.463425E-01,2.298457E-01,
2.144536E-01,2.000923E-01,1.866927E-01,1.741905E-01,2.810407E-01,2.622202E-01,
2.446601E-01,2.282760E-01,2.129890E-01,1.987258E-01,1.854177E-01,2.915160E-01,
2.719941E-01,2.537794E-01,2.367846E-01,2.209278E-01,2.061330E-01,1.923289E-01,
2.979644E-01,2.780106E-01,2.593931E-01,2.420223E-01,2.258148E-01,2.106926E-01,
1.965832E-01,1.834186E-01,1.711356E-01,1.596752E-01,1.489822E-01,1.390053E-01,
1.296965E-01,1.210111E-01,1.129074E-01,1.053463E-01,9.829159E-02,9.170929E-02,
8.556780E-02,7.983758E-02,7.449109E-02,6.950265E-02,6.484826E-02,6.050557E-02,
5.645369E-02,5.267316E-02,4.914579E-02,4.585465E-02,4.278390E-02,3.991879E-02,
3.724555E-02,3.475132E-02,3.242413E-02,3.025278E-02,2.822685E-02,2.633658E-02,
2.457290E-02,2.292732E-02,2.139195E-02,1.995939E-02,1.862277E-02,1.737566E-02,
1.621207E-02,1.512639E-02,1.411342E-02,1.316829E-02,1.228645E-02,1.146366E-02,
1.069597E-02,9.979697E-03,9.311387E-03,8.687831E-03,8.106033E-03,7.563196E-03,
7.056711E-03,6.584145E-03,6.143224E-03,5.731831E-03,5.347987E-03,4.989849E-03,
4.655693E-03,4.343915E-03,4.053016E-03,3.781598E-03,3.528356E-03,3.292072E-03,
3.071612E-03,2.865915E-03,2.673994E-03,2.494924E-03,2.327847E-03,2.171958E-03,
2.026508E-03,1.890799E-03,1.764178E-03,1.646036E-03,1.535806E-03,1.432958E-03,
1.336997E-03,1.247462E-03,1.163923E-03,1.085979E-03,1.013254E-03,9.453995E-04,
8.820889E-04,8.230181E-04,7.679030E-04,7.164788E-04,6.684984E-04,6.237311E-04,
5.819617E-04,5.429894E-04,5.066271E-04,4.726998E-04,4.410445E-04,4.115090E-04,
3.839515E-04,3.582394E-04,3.342492E-04,3.118655E-04,2.909808E-04,2.714947E-04,
2.533135E-04,2.363499E-04,2.205222E-04,2.057545E-04,1.919758E-04,1.791197E-04,
1.671246E-04,1.559328E-04,1.454904E-04,1.357474E-04,1.266568E-04,1.181749E-04,
1.102611E-04,1.028773E-04,9.598788E-05,8.955986E-05,8.356230E-05,7.796638E-05,
7.274521E-05,6.787368E-05,6.332838E-05,5.908747E-05,5.513056E-05,5.143863E-05,
4.799394E-05,4.477993E-05,4.178115E-05,3.898319E-05,3.637260E-05,3.393684E-05,
3.166419E-05,2.954373E-05,2.756528E-05,2.571931E-05,2.399697E-05,2.238996E-05,
2.089058E-05,1.949160E-05,1.818630E-05,1.696842E-05,1.583210E-05,1.477187E-05,
1.378264E-05,1.285966E-05,1.199848E-05,1.119498E-05,1.044529E-05,9.745798E-06,
9.093151E-06,8.484210E-06,7.916048E-06,7.385934E-06,6.891320E-06,6.429829E-06,
5.999242E-06,5.597491E-06,5.222644E-06,4.872899E-06,4.546575E-06,4.242105E-06,
3.958024E-06,3.692967E-06,3.445660E-06,3.214914E-06,2.999621E-06,2.798745E-06,
2.611322E-06,2.436449E-06,2.273288E-06,2.121052E-06,1.979012E-06,1.846483E-06,
1.722830E-06,1.607457E-06,1.499811E-06,1.399373E-06,1.305661E-06,1.218225E-06,
1.136644E-06,1.060526E-06,9.895060E-07,9.232417E-07,8.614150E-07,8.037286E-07,
7.499053E-07,6.996864E-07,6.528305E-07,6.091124E-07,5.683219E-07,5.302631E-07,
4.947530E-07,4.616209E-07,4.307075E-07,4.018643E-07,3.749526E-07,3.498432E-07,
3.264152E-07,3.045562E-07,2.841610E-07,2.651316E-07,2.473765E-07,2.308104E-07,
2.153537E-07,2.009321E-07,1.874763E-07,1.749216E-07,1.632076E-07,1.522781E-07,
1.420805E-07,1.325658E-07,1.236882E-07,1.154052E-07,1.076769E-07,1.004661E-07,
9.373816E-08,8.746080E-08,8.160381E-08,7.613905E-08,7.104024E-08,6.628289E-08,
6.184412E-08,5.770261E-08,5.383844E-08,5.023304E-08,4.686908E-08,4.373040E-08,
4.080190E-08,3.806952E-08,3.552012E-08,3.314144E-08,3.092206E-08,2.885130E-08,
2.691922E-08,2.511652E-08,2.343454E-08,2.186520E-08,2.040095E-08,1.903476E-08,
1.776006E-08,1.657072E-08,1.546103E-08,1.442565E-08,1.345961E-08,1.255826E-08,
1.171727E-08,1.093260E-08,1.020048E-08,9.517381E-09,8.880030E-09,8.285361E-09,
7.730515E-09,7.212826E-09,6.729804E-09,6.279130E-09,5.858635E-09,5.466300E-09,
5.100238E-09,4.758690E-09,4.440015E-09,4.142681E-09,3.865258E-09,3.606413E-09,
3.364902E-09,3.139565E-09,2.929318E-09,2.733150E-09,2.550119E-09,2.379345E-09,
2.220008E-09,2.071340E-09,1.932629E-09,1.803206E-09,1.682451E-09,1.569782E-09,
1.464659E-09,1.366575E-09,1.275060E-09,1.189673E-09,1.110004E-09,1.035670E-09,
9.663144E-10,9.016032E-10,8.412256E-10,7.848912E-10,7.323294E-10,6.832875E-10,
6.375298E-10,5.948363E-10,5.550019E-10,5.178351E-10,4.831572E-10,4.508016E-10,
4.206128E-10,3.924456E-10,3.661647E-10,3.416437E-10,3.187649E-10,2.974181E-10,
2.775009E-10,2.589175E-10,2.415786E-10,2.254008E-10,2.103064E-10,1.962228E-10,
1.830823E-10,1.708219E-10,1.593824E-10,1.487091E-10,1.387505E-10,1.294588E-10,
1.207893E-10,1.127004E-10,1.051532E-10,9.811140E-11,9.154117E-11,8.541093E-11,
7.969122E-11,7.435454E-11,6.937524E-11,6.472938E-11,6.039465E-11,5.635020E-11]], dtype='float')
for i in range(3):
result[i] = ted_empty.daily_soil_timeseries(i, pore_h2o_conc[i])
npt.assert_allclose(result[i],expected_results[i],rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
for i in range(3):
tab = [result[i], expected_results[i]]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_daily_soil_inv_timeseries(self):
"""
:description generates annual timeseries of daily pesticide concentrations in soil invertebrates (earthworms)
:param i; simulation number/index
:param pore_h2o_conc; daily values of pesticide concentration in soil pore water
:Notes # calculations are performed daily from day of first application (assumed day 0) through the last day of a year
# note: day numbers are synchronized with 0-based array indexing; thus the year does not have a calendar specific
# assoication, rather it is one year from the day of 1st pesticide application
# this represents Eq 2 of Attachment 1-7 of 'Biological Evaluation Chapters for Diazinon ESA Assessment'
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
ted_empty = self.create_ted_object()
expected_results = pd.Series([], dtype='float')
result = pd.Series([[]], dtype='float')
expected_results = [[2.347878E+02,2.190648E+02,2.043947E+02,1.907070E+02,1.779359E+02,1.660201E+02,
1.549022E+02,3.793167E+02,3.539150E+02,3.302144E+02,3.081009E+02,2.874683E+02,
2.682174E+02,2.502557E+02,4.682847E+02,4.369250E+02,4.076655E+02,3.803653E+02,
3.548934E+02,3.311273E+02,3.089527E+02,5.230509E+02,4.880237E+02,4.553422E+02,
4.248493E+02,3.963984E+02,3.698528E+02,3.450849E+02,5.567634E+02,5.194786E+02,
4.846907E+02,4.522324E+02,4.219478E+02,3.936912E+02,3.673269E+02,5.775159E+02,
5.388414E+02,5.027568E+02,4.690887E+02,4.376752E+02,4.083654E+02,3.810184E+02,
5.902906E+02,5.507606E+02,5.138778E+02,4.794649E+02,4.473566E+02,4.173985E+02,
3.894465E+02,3.633665E+02,3.390329E+02,3.163289E+02,2.951453E+02,2.753803E+02,
2.569389E+02,2.397325E+02,2.236783E+02,2.086992E+02,1.947233E+02,1.816832E+02,
1.695165E+02,1.581644E+02,1.475726E+02,1.376901E+02,1.284694E+02,1.198662E+02,
1.118392E+02,1.043496E+02,9.736164E+01,9.084162E+01,8.475823E+01,7.908222E+01,
7.378632E+01,6.884507E+01,6.423472E+01,5.993312E+01,5.591958E+01,5.217481E+01,
4.868082E+01,4.542081E+01,4.237911E+01,3.954111E+01,3.689316E+01,3.442254E+01,
3.211736E+01,2.996656E+01,2.795979E+01,2.608740E+01,2.434041E+01,2.271040E+01,
2.118956E+01,1.977056E+01,1.844658E+01,1.721127E+01,1.605868E+01,1.498328E+01,
1.397989E+01,1.304370E+01,1.217020E+01,1.135520E+01,1.059478E+01,9.885278E+00,
9.223290E+00,8.605634E+00,8.029341E+00,7.491640E+00,6.989947E+00,6.521851E+00,
6.085102E+00,5.677601E+00,5.297389E+00,4.942639E+00,4.611645E+00,4.302817E+00,
4.014670E+00,3.745820E+00,3.494973E+00,3.260926E+00,3.042551E+00,2.838801E+00,
2.648695E+00,2.471319E+00,2.305823E+00,2.151409E+00,2.007335E+00,1.872910E+00,
1.747487E+00,1.630463E+00,1.521276E+00,1.419400E+00,1.324347E+00,1.235660E+00,
1.152911E+00,1.075704E+00,1.003668E+00,9.364550E-01,8.737434E-01,8.152314E-01,
7.606378E-01,7.097001E-01,6.621737E-01,6.178299E-01,5.764556E-01,5.378521E-01,
5.018338E-01,4.682275E-01,4.368717E-01,4.076157E-01,3.803189E-01,3.548501E-01,
3.310868E-01,3.089149E-01,2.882278E-01,2.689261E-01,2.509169E-01,2.341137E-01,
2.184358E-01,2.038078E-01,1.901594E-01,1.774250E-01,1.655434E-01,1.544575E-01,
1.441139E-01,1.344630E-01,1.254584E-01,1.170569E-01,1.092179E-01,1.019039E-01,
9.507972E-02,8.871252E-02,8.277171E-02,7.722873E-02,7.205696E-02,6.723152E-02,
6.272922E-02,5.852844E-02,5.460896E-02,5.095196E-02,4.753986E-02,4.435626E-02,
4.138585E-02,3.861437E-02,3.602848E-02,3.361576E-02,3.136461E-02,2.926422E-02,
2.730448E-02,2.547598E-02,2.376993E-02,2.217813E-02,2.069293E-02,1.930718E-02,
1.801424E-02,1.680788E-02,1.568231E-02,1.463211E-02,1.365224E-02,1.273799E-02,
1.188497E-02,1.108906E-02,1.034646E-02,9.653592E-03,9.007119E-03,8.403940E-03,
7.841153E-03,7.316054E-03,6.826120E-03,6.368995E-03,5.942483E-03,5.544532E-03,
5.173232E-03,4.826796E-03,4.503560E-03,4.201970E-03,3.920576E-03,3.658027E-03,
3.413060E-03,3.184498E-03,2.971241E-03,2.772266E-03,2.586616E-03,2.413398E-03,
2.251780E-03,2.100985E-03,1.960288E-03,1.829014E-03,1.706530E-03,1.592249E-03,
1.485621E-03,1.386133E-03,1.293308E-03,1.206699E-03,1.125890E-03,1.050492E-03,
9.801441E-04,9.145068E-04,8.532650E-04,7.961244E-04,7.428103E-04,6.930666E-04,
6.466540E-04,6.033495E-04,5.629450E-04,5.252462E-04,4.900721E-04,4.572534E-04,
4.266325E-04,3.980622E-04,3.714052E-04,3.465333E-04,3.233270E-04,3.016747E-04,
2.814725E-04,2.626231E-04,2.450360E-04,2.286267E-04,2.133163E-04,1.990311E-04,
1.857026E-04,1.732666E-04,1.616635E-04,1.508374E-04,1.407362E-04,1.313116E-04,
1.225180E-04,1.143133E-04,1.066581E-04,9.951555E-05,9.285129E-05,8.663332E-05,
8.083174E-05,7.541868E-05,7.036812E-05,6.565578E-05,6.125901E-05,5.715667E-05,
5.332906E-05,4.975778E-05,4.642565E-05,4.331666E-05,4.041587E-05,3.770934E-05,
3.518406E-05,3.282789E-05,3.062950E-05,2.857834E-05,2.666453E-05,2.487889E-05,
2.321282E-05,2.165833E-05,2.020794E-05,1.885467E-05,1.759203E-05,1.641394E-05,
1.531475E-05,1.428917E-05,1.333227E-05,1.243944E-05,1.160641E-05,1.082916E-05,
1.010397E-05,9.427336E-06,8.796015E-06,8.206972E-06,7.657376E-06,7.144584E-06,
6.666133E-06,6.219722E-06,5.803206E-06,5.414582E-06,5.051984E-06,4.713668E-06,
4.398008E-06,4.103486E-06,3.828688E-06,3.572292E-06,3.333066E-06,3.109861E-06,
2.901603E-06,2.707291E-06,2.525992E-06,2.356834E-06,2.199004E-06,2.051743E-06,
1.914344E-06,1.786146E-06,1.666533E-06,1.554930E-06,1.450801E-06,1.353646E-06,
1.262996E-06,1.178417E-06,1.099502E-06,1.025872E-06,9.571720E-07,8.930730E-07,
8.332666E-07,7.774652E-07,7.254007E-07,6.768228E-07,6.314980E-07,5.892085E-07,
5.497509E-07,5.129358E-07,4.785860E-07,4.465365E-07,4.166333E-07,3.887326E-07,
3.627004E-07,3.384114E-07,3.157490E-07,2.946042E-07,2.748755E-07,2.564679E-07,
2.392930E-07,2.232683E-07,2.083167E-07,1.943663E-07,1.813502E-07,1.692057E-07,
1.578745E-07,1.473021E-07,1.374377E-07,1.282339E-07,1.196465E-07,1.116341E-07],
[2.347878E+01,2.190648E+01,2.043947E+01,1.907070E+01,1.779359E+01,1.660201E+01,
1.549022E+01,3.793167E+01,3.539150E+01,3.302144E+01,3.081009E+01,2.874683E+01,
2.682174E+01,2.502557E+01,4.682847E+01,4.369250E+01,4.076655E+01,3.803653E+01,
3.548934E+01,3.311273E+01,3.089527E+01,5.230509E+01,4.880237E+01,4.553422E+01,
4.248493E+01,3.963984E+01,3.698528E+01,3.450849E+01,5.567634E+01,5.194786E+01,
4.846907E+01,4.522324E+01,4.219478E+01,3.936912E+01,3.673269E+01,5.775159E+01,
5.388414E+01,5.027568E+01,4.690887E+01,4.376752E+01,4.083654E+01,3.810184E+01,
5.902906E+01,5.507606E+01,5.138778E+01,4.794649E+01,4.473566E+01,4.173985E+01,
3.894465E+01,3.633665E+01,3.390329E+01,3.163289E+01,2.951453E+01,2.753803E+01,
2.569389E+01,2.397325E+01,2.236783E+01,2.086992E+01,1.947233E+01,1.816832E+01,
1.695165E+01,1.581644E+01,1.475726E+01,1.376901E+01,1.284694E+01,1.198662E+01,
1.118392E+01,1.043496E+01,9.736164E+00,9.084162E+00,8.475823E+00,7.908222E+00,
7.378632E+00,6.884507E+00,6.423472E+00,5.993312E+00,5.591958E+00,5.217481E+00,
4.868082E+00,4.542081E+00,4.237911E+00,3.954111E+00,3.689316E+00,3.442254E+00,
3.211736E+00,2.996656E+00,2.795979E+00,2.608740E+00,2.434041E+00,2.271040E+00,
2.118956E+00,1.977056E+00,1.844658E+00,1.721127E+00,1.605868E+00,1.498328E+00,
1.397989E+00,1.304370E+00,1.217020E+00,1.135520E+00,1.059478E+00,9.885278E-01,
9.223290E-01,8.605634E-01,8.029341E-01,7.491640E-01,6.989947E-01,6.521851E-01,
6.085102E-01,5.677601E-01,5.297389E-01,4.942639E-01,4.611645E-01,4.302817E-01,
4.014670E-01,3.745820E-01,3.494973E-01,3.260926E-01,3.042551E-01,2.838801E-01,
2.648695E-01,2.471319E-01,2.305823E-01,2.151409E-01,2.007335E-01,1.872910E-01,
1.747487E-01,1.630463E-01,1.521276E-01,1.419400E-01,1.324347E-01,1.235660E-01,
1.152911E-01,1.075704E-01,1.003668E-01,9.364550E-02,8.737434E-02,8.152314E-02,
7.606378E-02,7.097001E-02,6.621737E-02,6.178299E-02,5.764556E-02,5.378521E-02,
5.018338E-02,4.682275E-02,4.368717E-02,4.076157E-02,3.803189E-02,3.548501E-02,
3.310868E-02,3.089149E-02,2.882278E-02,2.689261E-02,2.509169E-02,2.341137E-02,
2.184358E-02,2.038078E-02,1.901594E-02,1.774250E-02,1.655434E-02,1.544575E-02,
1.441139E-02,1.344630E-02,1.254584E-02,1.170569E-02,1.092179E-02,1.019039E-02,
9.507972E-03,8.871252E-03,8.277171E-03,7.722873E-03,7.205696E-03,6.723152E-03,
6.272922E-03,5.852844E-03,5.460896E-03,5.095196E-03,4.753986E-03,4.435626E-03,
4.138585E-03,3.861437E-03,3.602848E-03,3.361576E-03,3.136461E-03,2.926422E-03,
2.730448E-03,2.547598E-03,2.376993E-03,2.217813E-03,2.069293E-03,1.930718E-03,
1.801424E-03,1.680788E-03,1.568231E-03,1.463211E-03,1.365224E-03,1.273799E-03,
1.188497E-03,1.108906E-03,1.034646E-03,9.653592E-04,9.007119E-04,8.403940E-04,
7.841153E-04,7.316054E-04,6.826120E-04,6.368995E-04,5.942483E-04,5.544532E-04,
5.173232E-04,4.826796E-04,4.503560E-04,4.201970E-04,3.920576E-04,3.658027E-04,
3.413060E-04,3.184498E-04,2.971241E-04,2.772266E-04,2.586616E-04,2.413398E-04,
2.251780E-04,2.100985E-04,1.960288E-04,1.829014E-04,1.706530E-04,1.592249E-04,
1.485621E-04,1.386133E-04,1.293308E-04,1.206699E-04,1.125890E-04,1.050492E-04,
9.801441E-05,9.145068E-05,8.532650E-05,7.961244E-05,7.428103E-05,6.930666E-05,
6.466540E-05,6.033495E-05,5.629450E-05,5.252462E-05,4.900721E-05,4.572534E-05,
4.266325E-05,3.980622E-05,3.714052E-05,3.465333E-05,3.233270E-05,3.016747E-05,
2.814725E-05,2.626231E-05,2.450360E-05,2.286267E-05,2.133163E-05,1.990311E-05,
1.857026E-05,1.732666E-05,1.616635E-05,1.508374E-05,1.407362E-05,1.313116E-05,
1.225180E-05,1.143133E-05,1.066581E-05,9.951555E-06,9.285129E-06,8.663332E-06,
8.083174E-06,7.541868E-06,7.036812E-06,6.565578E-06,6.125901E-06,5.715667E-06,
5.332906E-06,4.975778E-06,4.642565E-06,4.331666E-06,4.041587E-06,3.770934E-06,
3.518406E-06,3.282789E-06,3.062950E-06,2.857834E-06,2.666453E-06,2.487889E-06,
2.321282E-06,2.165833E-06,2.020794E-06,1.885467E-06,1.759203E-06,1.641394E-06,
1.531475E-06,1.428917E-06,1.333227E-06,1.243944E-06,1.160641E-06,1.082916E-06,
1.010397E-06,9.427336E-07,8.796015E-07,8.206972E-07,7.657376E-07,7.144584E-07,
6.666133E-07,6.219722E-07,5.803206E-07,5.414582E-07,5.051984E-07,4.713668E-07,
4.398008E-07,4.103486E-07,3.828688E-07,3.572292E-07,3.333066E-07,3.109861E-07,
2.901603E-07,2.707291E-07,2.525992E-07,2.356834E-07,2.199004E-07,2.051743E-07,
1.914344E-07,1.786146E-07,1.666533E-07,1.554930E-07,1.450801E-07,1.353646E-07,
1.262996E-07,1.178417E-07,1.099502E-07,1.025872E-07,9.571720E-08,8.930730E-08,
8.332666E-08,7.774652E-08,7.254007E-08,6.768228E-08,6.314980E-08,5.892085E-08,
5.497509E-08,5.129358E-08,4.785860E-08,4.465365E-08,4.166333E-08,3.887326E-08,
3.627004E-08,3.384114E-08,3.157490E-08,2.946042E-08,2.748755E-08,2.564679E-08,
2.392930E-08,2.232683E-08,2.083167E-08,1.943663E-08,1.813502E-08,1.692057E-08,
1.578745E-08,1.473021E-08,1.374377E-08,1.282339E-08,1.196465E-08,1.116341E-08],
[6.664600E-01,6.218291E-01,5.801871E-01,5.413337E-01,5.050822E-01,4.712584E-01,
4.396996E-01,1.076714E+00,1.004610E+00,9.373342E-01,8.745637E-01,8.159968E-01,
7.613519E-01,7.103665E-01,1.329255E+00,1.240239E+00,1.157184E+00,1.079691E+00,
1.007387E+00,9.399255E-01,8.769815E-01,1.484713E+00,1.385286E+00,1.292517E+00,
1.205961E+00,1.125202E+00,1.049850E+00,9.795450E-01,1.580408E+00,1.474573E+00,
1.375825E+00,1.283690E+00,1.197725E+00,1.117517E+00,1.042680E+00,1.639315E+00,
1.529535E+00,1.427107E+00,1.331538E+00,1.242369E+00,1.159171E+00,1.081545E+00,
1.675577E+00,1.563368E+00,1.458674E+00,1.360991E+00,1.269850E+00,1.184812E+00,
1.105468E+00,1.031439E+00,9.623662E-01,8.979194E-01,8.377884E-01,7.816842E-01,
7.293372E-01,6.804956E-01,6.349249E-01,5.924059E-01,5.527342E-01,5.157193E-01,
4.811831E-01,4.489597E-01,4.188942E-01,3.908421E-01,3.646686E-01,3.402478E-01,
3.174624E-01,2.962029E-01,2.763671E-01,2.578596E-01,2.405915E-01,2.244798E-01,
2.094471E-01,1.954211E-01,1.823343E-01,1.701239E-01,1.587312E-01,1.481015E-01,
1.381836E-01,1.289298E-01,1.202958E-01,1.122399E-01,1.047235E-01,9.771053E-02,
9.116714E-02,8.506195E-02,7.936561E-02,7.405073E-02,6.909178E-02,6.446491E-02,
6.014788E-02,5.611996E-02,5.236177E-02,4.885526E-02,4.558357E-02,4.253098E-02,
3.968280E-02,3.702537E-02,3.454589E-02,3.223245E-02,3.007394E-02,2.805998E-02,
2.618089E-02,2.442763E-02,2.279179E-02,2.126549E-02,1.984140E-02,1.851268E-02,
1.727294E-02,1.611623E-02,1.503697E-02,1.402999E-02,1.309044E-02,1.221382E-02,
1.139589E-02,1.063274E-02,9.920701E-03,9.256341E-03,8.636472E-03,8.058113E-03,
7.518486E-03,7.014995E-03,6.545222E-03,6.106908E-03,5.697947E-03,5.316372E-03,
4.960351E-03,4.628171E-03,4.318236E-03,4.029057E-03,3.759243E-03,3.507498E-03,
3.272611E-03,3.053454E-03,2.848973E-03,2.658186E-03,2.480175E-03,2.314085E-03,
2.159118E-03,2.014528E-03,1.879621E-03,1.753749E-03,1.636305E-03,1.526727E-03,
1.424487E-03,1.329093E-03,1.240088E-03,1.157043E-03,1.079559E-03,1.007264E-03,
9.398107E-04,8.768744E-04,8.181527E-04,7.633635E-04,7.122433E-04,6.645465E-04,
6.200438E-04,5.785213E-04,5.397795E-04,5.036321E-04,4.699053E-04,4.384372E-04,
4.090764E-04,3.816817E-04,3.561217E-04,3.322733E-04,3.100219E-04,2.892607E-04,
2.698897E-04,2.518160E-04,2.349527E-04,2.192186E-04,2.045382E-04,1.908409E-04,
1.780608E-04,1.661366E-04,1.550110E-04,1.446303E-04,1.349449E-04,1.259080E-04,
1.174763E-04,1.096093E-04,1.022691E-04,9.542044E-05,8.903041E-05,8.306831E-05,
7.750548E-05,7.231517E-05,6.747244E-05,6.295401E-05,5.873817E-05,5.480465E-05,
5.113455E-05,4.771022E-05,4.451521E-05,4.153416E-05,3.875274E-05,3.615758E-05,
3.373622E-05,3.147701E-05,2.936908E-05,2.740232E-05,2.556727E-05,2.385511E-05,
2.225760E-05,2.076708E-05,1.937637E-05,1.807879E-05,1.686811E-05,1.573850E-05,
1.468454E-05,1.370116E-05,1.278364E-05,1.192755E-05,1.112880E-05,1.038354E-05,
9.688185E-06,9.039396E-06,8.434055E-06,7.869251E-06,7.342271E-06,6.850581E-06,
6.391818E-06,5.963777E-06,5.564401E-06,5.191770E-06,4.844092E-06,4.519698E-06,
4.217027E-06,3.934626E-06,3.671136E-06,3.425291E-06,3.195909E-06,2.981889E-06,
2.782200E-06,2.595885E-06,2.422046E-06,2.259849E-06,2.108514E-06,1.967313E-06,
1.835568E-06,1.712645E-06,1.597955E-06,1.490944E-06,1.391100E-06,1.297942E-06,
1.211023E-06,1.129924E-06,1.054257E-06,9.836564E-07,9.177839E-07,8.563226E-07,
7.989773E-07,7.454722E-07,6.955501E-07,6.489712E-07,6.055115E-07,5.649622E-07,
5.271284E-07,4.918282E-07,4.588919E-07,4.281613E-07,3.994886E-07,3.727361E-07,
3.477751E-07,3.244856E-07,3.027558E-07,2.824811E-07,2.635642E-07,2.459141E-07,
2.294460E-07,2.140807E-07,1.997443E-07,1.863680E-07,1.738875E-07,1.622428E-07,
1.513779E-07,1.412406E-07,1.317821E-07,1.229571E-07,1.147230E-07,1.070403E-07,
9.987216E-08,9.318402E-08,8.694376E-08,8.112140E-08,7.568894E-08,7.062028E-08,
6.589105E-08,6.147853E-08,5.736149E-08,5.352016E-08,4.993608E-08,4.659201E-08,
4.347188E-08,4.056070E-08,3.784447E-08,3.531014E-08,3.294553E-08,3.073926E-08,
2.868075E-08,2.676008E-08,2.496804E-08,2.329600E-08,2.173594E-08,2.028035E-08,
1.892224E-08,1.765507E-08,1.647276E-08,1.536963E-08,1.434037E-08,1.338004E-08,
1.248402E-08,1.164800E-08,1.086797E-08,1.014018E-08,9.461118E-09,8.827535E-09,
8.236382E-09,7.684816E-09,7.170187E-09,6.690021E-09,6.242010E-09,5.824001E-09,
5.433985E-09,5.070088E-09,4.730559E-09,4.413768E-09,4.118191E-09,3.842408E-09,
3.585093E-09,3.345010E-09,3.121005E-09,2.912001E-09,2.716993E-09,2.535044E-09,
2.365279E-09,2.206884E-09,2.059095E-09,1.921204E-09,1.792547E-09,1.672505E-09,
1.560502E-09,1.456000E-09,1.358496E-09,1.267522E-09,1.182640E-09,1.103442E-09,
1.029548E-09,9.606020E-10,8.962733E-10,8.362526E-10,7.802512E-10,7.280002E-10,
6.792482E-10,6.337609E-10,5.913199E-10,5.517209E-10,5.147738E-10,4.803010E-10,
4.481367E-10,4.181263E-10,3.901256E-10,3.640001E-10,3.396241E-10,3.168805E-10]]
try:
# internal model constants
ted_empty.num_simulation_days = 366
ted_empty.lipid_earthworm = 0.01
ted_empty.density_earthworm = 1.0
# input variables that change per simulation
ted_empty.log_kow = pd.Series([5.0, 4.0, 2.75], dtype='float')
# internally calculated variables
pore_h2o_conc = pd.Series([[2.347878E-01,2.190648E-01,2.043947E-01,1.907070E-01,1.779359E-01,1.660201E-01,
1.549022E-01,3.793167E-01,3.539150E-01,3.302144E-01,3.081009E-01,2.874683E-01,
2.682174E-01,2.502557E-01,4.682847E-01,4.369250E-01,4.076655E-01,3.803653E-01,
3.548934E-01,3.311273E-01,3.089527E-01,5.230509E-01,4.880237E-01,4.553422E-01,
4.248493E-01,3.963984E-01,3.698528E-01,3.450849E-01,5.567634E-01,5.194786E-01,
4.846907E-01,4.522324E-01,4.219478E-01,3.936912E-01,3.673269E-01,5.775159E-01,
5.388414E-01,5.027568E-01,4.690887E-01,4.376752E-01,4.083654E-01,3.810184E-01,
5.902906E-01,5.507606E-01,5.138778E-01,4.794649E-01,4.473566E-01,4.173985E-01,
3.894465E-01,3.633665E-01,3.390329E-01,3.163289E-01,2.951453E-01,2.753803E-01,
2.569389E-01,2.397325E-01,2.236783E-01,2.086992E-01,1.947233E-01,1.816832E-01,
1.695165E-01,1.581644E-01,1.475726E-01,1.376901E-01,1.284694E-01,1.198662E-01,
1.118392E-01,1.043496E-01,9.736164E-02,9.084162E-02,8.475823E-02,7.908222E-02,
7.378632E-02,6.884507E-02,6.423472E-02,5.993312E-02,5.591958E-02,5.217481E-02,
4.868082E-02,4.542081E-02,4.237911E-02,3.954111E-02,3.689316E-02,3.442254E-02,
3.211736E-02,2.996656E-02,2.795979E-02,2.608740E-02,2.434041E-02,2.271040E-02,
2.118956E-02,1.977056E-02,1.844658E-02,1.721127E-02,1.605868E-02,1.498328E-02,
1.397989E-02,1.304370E-02,1.217020E-02,1.135520E-02,1.059478E-02,9.885278E-03,
9.223290E-03,8.605634E-03,8.029341E-03,7.491640E-03,6.989947E-03,6.521851E-03,
6.085102E-03,5.677601E-03,5.297389E-03,4.942639E-03,4.611645E-03,4.302817E-03,
4.014670E-03,3.745820E-03,3.494973E-03,3.260926E-03,3.042551E-03,2.838801E-03,
2.648695E-03,2.471319E-03,2.305823E-03,2.151409E-03,2.007335E-03,1.872910E-03,
1.747487E-03,1.630463E-03,1.521276E-03,1.419400E-03,1.324347E-03,1.235660E-03,
1.152911E-03,1.075704E-03,1.003668E-03,9.364550E-04,8.737434E-04,8.152314E-04,
7.606378E-04,7.097001E-04,6.621737E-04,6.178299E-04,5.764556E-04,5.378521E-04,
5.018338E-04,4.682275E-04,4.368717E-04,4.076157E-04,3.803189E-04,3.548501E-04,
3.310868E-04,3.089149E-04,2.882278E-04,2.689261E-04,2.509169E-04,2.341137E-04,
2.184358E-04,2.038078E-04,1.901594E-04,1.774250E-04,1.655434E-04,1.544575E-04,
1.441139E-04,1.344630E-04,1.254584E-04,1.170569E-04,1.092179E-04,1.019039E-04,
9.507972E-05,8.871252E-05,8.277171E-05,7.722873E-05,7.205696E-05,6.723152E-05,
6.272922E-05,5.852844E-05,5.460896E-05,5.095196E-05,4.753986E-05,4.435626E-05,
4.138585E-05,3.861437E-05,3.602848E-05,3.361576E-05,3.136461E-05,2.926422E-05,
2.730448E-05,2.547598E-05,2.376993E-05,2.217813E-05,2.069293E-05,1.930718E-05,
1.801424E-05,1.680788E-05,1.568231E-05,1.463211E-05,1.365224E-05,1.273799E-05,
1.188497E-05,1.108906E-05,1.034646E-05,9.653592E-06,9.007119E-06,8.403940E-06,
7.841153E-06,7.316054E-06,6.826120E-06,6.368995E-06,5.942483E-06,5.544532E-06,
5.173232E-06,4.826796E-06,4.503560E-06,4.201970E-06,3.920576E-06,3.658027E-06,
3.413060E-06,3.184498E-06,2.971241E-06,2.772266E-06,2.586616E-06,2.413398E-06,
2.251780E-06,2.100985E-06,1.960288E-06,1.829014E-06,1.706530E-06,1.592249E-06,
1.485621E-06,1.386133E-06,1.293308E-06,1.206699E-06,1.125890E-06,1.050492E-06,
9.801441E-07,9.145068E-07,8.532650E-07,7.961244E-07,7.428103E-07,6.930666E-07,
6.466540E-07,6.033495E-07,5.629450E-07,5.252462E-07,4.900721E-07,4.572534E-07,
4.266325E-07,3.980622E-07,3.714052E-07,3.465333E-07,3.233270E-07,3.016747E-07,
2.814725E-07,2.626231E-07,2.450360E-07,2.286267E-07,2.133163E-07,1.990311E-07,
1.857026E-07,1.732666E-07,1.616635E-07,1.508374E-07,1.407362E-07,1.313116E-07,
1.225180E-07,1.143133E-07,1.066581E-07,9.951555E-08,9.285129E-08,8.663332E-08,
8.083174E-08,7.541868E-08,7.036812E-08,6.565578E-08,6.125901E-08,5.715667E-08,
5.332906E-08,4.975778E-08,4.642565E-08,4.331666E-08,4.041587E-08,3.770934E-08,
3.518406E-08,3.282789E-08,3.062950E-08,2.857834E-08,2.666453E-08,2.487889E-08,
2.321282E-08,2.165833E-08,2.020794E-08,1.885467E-08,1.759203E-08,1.641394E-08,
1.531475E-08,1.428917E-08,1.333227E-08,1.243944E-08,1.160641E-08,1.082916E-08,
1.010397E-08,9.427336E-09,8.796015E-09,8.206972E-09,7.657376E-09,7.144584E-09,
6.666133E-09,6.219722E-09,5.803206E-09,5.414582E-09,5.051984E-09,4.713668E-09,
4.398008E-09,4.103486E-09,3.828688E-09,3.572292E-09,3.333066E-09,3.109861E-09,
2.901603E-09,2.707291E-09,2.525992E-09,2.356834E-09,2.199004E-09,2.051743E-09,
1.914344E-09,1.786146E-09,1.666533E-09,1.554930E-09,1.450801E-09,1.353646E-09,
1.262996E-09,1.178417E-09,1.099502E-09,1.025872E-09,9.571720E-10,8.930730E-10,
8.332666E-10,7.774652E-10,7.254007E-10,6.768228E-10,6.314980E-10,5.892085E-10,
5.497509E-10,5.129358E-10,4.785860E-10,4.465365E-10,4.166333E-10,3.887326E-10,
3.627004E-10,3.384114E-10,3.157490E-10,2.946042E-10,2.748755E-10,2.564679E-10,
2.392930E-10,2.232683E-10,2.083167E-10,1.943663E-10,1.813502E-10,1.692057E-10,
1.578745E-10,1.473021E-10,1.374377E-10,1.282339E-10,1.196465E-10,1.116341E-10],
[2.347878E-01,2.190648E-01,2.043947E-01,1.907070E-01,1.779359E-01,1.660201E-01,
1.549022E-01,3.793167E-01,3.539150E-01,3.302144E-01,3.081009E-01,2.874683E-01,
2.682174E-01,2.502557E-01,4.682847E-01,4.369250E-01,4.076655E-01,3.803653E-01,
3.548934E-01,3.311273E-01,3.089527E-01,5.230509E-01,4.880237E-01,4.553422E-01,
4.248493E-01,3.963984E-01,3.698528E-01,3.450849E-01,5.567634E-01,5.194786E-01,
4.846907E-01,4.522324E-01,4.219478E-01,3.936912E-01,3.673269E-01,5.775159E-01,
5.388414E-01,5.027568E-01,4.690887E-01,4.376752E-01,4.083654E-01,3.810184E-01,
5.902906E-01,5.507606E-01,5.138778E-01,4.794649E-01,4.473566E-01,4.173985E-01,
3.894465E-01,3.633665E-01,3.390329E-01,3.163289E-01,2.951453E-01,2.753803E-01,
2.569389E-01,2.397325E-01,2.236783E-01,2.086992E-01,1.947233E-01,1.816832E-01,
1.695165E-01,1.581644E-01,1.475726E-01,1.376901E-01,1.284694E-01,1.198662E-01,
1.118392E-01,1.043496E-01,9.736164E-02,9.084162E-02,8.475823E-02,7.908222E-02,
7.378632E-02,6.884507E-02,6.423472E-02,5.993312E-02,5.591958E-02,5.217481E-02,
4.868082E-02,4.542081E-02,4.237911E-02,3.954111E-02,3.689316E-02,3.442254E-02,
3.211736E-02,2.996656E-02,2.795979E-02,2.608740E-02,2.434041E-02,2.271040E-02,
2.118956E-02,1.977056E-02,1.844658E-02,1.721127E-02,1.605868E-02,1.498328E-02,
1.397989E-02,1.304370E-02,1.217020E-02,1.135520E-02,1.059478E-02,9.885278E-03,
9.223290E-03,8.605634E-03,8.029341E-03,7.491640E-03,6.989947E-03,6.521851E-03,
6.085102E-03,5.677601E-03,5.297389E-03,4.942639E-03,4.611645E-03,4.302817E-03,
4.014670E-03,3.745820E-03,3.494973E-03,3.260926E-03,3.042551E-03,2.838801E-03,
2.648695E-03,2.471319E-03,2.305823E-03,2.151409E-03,2.007335E-03,1.872910E-03,
1.747487E-03,1.630463E-03,1.521276E-03,1.419400E-03,1.324347E-03,1.235660E-03,
1.152911E-03,1.075704E-03,1.003668E-03,9.364550E-04,8.737434E-04,8.152314E-04,
7.606378E-04,7.097001E-04,6.621737E-04,6.178299E-04,5.764556E-04,5.378521E-04,
5.018338E-04,4.682275E-04,4.368717E-04,4.076157E-04,3.803189E-04,3.548501E-04,
3.310868E-04,3.089149E-04,2.882278E-04,2.689261E-04,2.509169E-04,2.341137E-04,
2.184358E-04,2.038078E-04,1.901594E-04,1.774250E-04,1.655434E-04,1.544575E-04,
1.441139E-04,1.344630E-04,1.254584E-04,1.170569E-04,1.092179E-04,1.019039E-04,
9.507972E-05,8.871252E-05,8.277171E-05,7.722873E-05,7.205696E-05,6.723152E-05,
6.272922E-05,5.852844E-05,5.460896E-05,5.095196E-05,4.753986E-05,4.435626E-05,
4.138585E-05,3.861437E-05,3.602848E-05,3.361576E-05,3.136461E-05,2.926422E-05,
2.730448E-05,2.547598E-05,2.376993E-05,2.217813E-05,2.069293E-05,1.930718E-05,
1.801424E-05,1.680788E-05,1.568231E-05,1.463211E-05,1.365224E-05,1.273799E-05,
1.188497E-05,1.108906E-05,1.034646E-05,9.653592E-06,9.007119E-06,8.403940E-06,
7.841153E-06,7.316054E-06,6.826120E-06,6.368995E-06,5.942483E-06,5.544532E-06,
5.173232E-06,4.826796E-06,4.503560E-06,4.201970E-06,3.920576E-06,3.658027E-06,
3.413060E-06,3.184498E-06,2.971241E-06,2.772266E-06,2.586616E-06,2.413398E-06,
2.251780E-06,2.100985E-06,1.960288E-06,1.829014E-06,1.706530E-06,1.592249E-06,
1.485621E-06,1.386133E-06,1.293308E-06,1.206699E-06,1.125890E-06,1.050492E-06,
9.801441E-07,9.145068E-07,8.532650E-07,7.961244E-07,7.428103E-07,6.930666E-07,
6.466540E-07,6.033495E-07,5.629450E-07,5.252462E-07,4.900721E-07,4.572534E-07,
4.266325E-07,3.980622E-07,3.714052E-07,3.465333E-07,3.233270E-07,3.016747E-07,
2.814725E-07,2.626231E-07,2.450360E-07,2.286267E-07,2.133163E-07,1.990311E-07,
1.857026E-07,1.732666E-07,1.616635E-07,1.508374E-07,1.407362E-07,1.313116E-07,
1.225180E-07,1.143133E-07,1.066581E-07,9.951555E-08,9.285129E-08,8.663332E-08,
8.083174E-08,7.541868E-08,7.036812E-08,6.565578E-08,6.125901E-08,5.715667E-08,
5.332906E-08,4.975778E-08,4.642565E-08,4.331666E-08,4.041587E-08,3.770934E-08,
3.518406E-08,3.282789E-08,3.062950E-08,2.857834E-08,2.666453E-08,2.487889E-08,
2.321282E-08,2.165833E-08,2.020794E-08,1.885467E-08,1.759203E-08,1.641394E-08,
1.531475E-08,1.428917E-08,1.333227E-08,1.243944E-08,1.160641E-08,1.082916E-08,
1.010397E-08,9.427336E-09,8.796015E-09,8.206972E-09,7.657376E-09,7.144584E-09,
6.666133E-09,6.219722E-09,5.803206E-09,5.414582E-09,5.051984E-09,4.713668E-09,
4.398008E-09,4.103486E-09,3.828688E-09,3.572292E-09,3.333066E-09,3.109861E-09,
2.901603E-09,2.707291E-09,2.525992E-09,2.356834E-09,2.199004E-09,2.051743E-09,
1.914344E-09,1.786146E-09,1.666533E-09,1.554930E-09,1.450801E-09,1.353646E-09,
1.262996E-09,1.178417E-09,1.099502E-09,1.025872E-09,9.571720E-10,8.930730E-10,
8.332666E-10,7.774652E-10,7.254007E-10,6.768228E-10,6.314980E-10,5.892085E-10,
5.497509E-10,5.129358E-10,4.785860E-10,4.465365E-10,4.166333E-10,3.887326E-10,
3.627004E-10,3.384114E-10,3.157490E-10,2.946042E-10,2.748755E-10,2.564679E-10,
2.392930E-10,2.232683E-10,2.083167E-10,1.943663E-10,1.813502E-10,1.692057E-10,
1.578745E-10,1.473021E-10,1.374377E-10,1.282339E-10,1.196465E-10,1.116341E-10],
[1.185152E-01,1.105786E-01,1.031735E-01,9.626426E-02,8.981773E-02,8.380291E-02,
7.819088E-02,1.914699E-01,1.786477E-01,1.666842E-01,1.555219E-01,1.451070E-01,
1.353896E-01,1.263230E-01,2.363787E-01,2.205492E-01,2.057796E-01,1.919992E-01,
1.791416E-01,1.671450E-01,1.559518E-01,2.640234E-01,2.463425E-01,2.298457E-01,
2.144536E-01,2.000923E-01,1.866927E-01,1.741905E-01,2.810407E-01,2.622202E-01,
2.446601E-01,2.282760E-01,2.129890E-01,1.987258E-01,1.854177E-01,2.915160E-01,
2.719941E-01,2.537794E-01,2.367846E-01,2.209278E-01,2.061330E-01,1.923289E-01,
2.979644E-01,2.780106E-01,2.593931E-01,2.420223E-01,2.258148E-01,2.106926E-01,
1.965832E-01,1.834186E-01,1.711356E-01,1.596752E-01,1.489822E-01,1.390053E-01,
1.296965E-01,1.210111E-01,1.129074E-01,1.053463E-01,9.829159E-02,9.170929E-02,
8.556780E-02,7.983758E-02,7.449109E-02,6.950265E-02,6.484826E-02,6.050557E-02,
5.645369E-02,5.267316E-02,4.914579E-02,4.585465E-02,4.278390E-02,3.991879E-02,
3.724555E-02,3.475132E-02,3.242413E-02,3.025278E-02,2.822685E-02,2.633658E-02,
2.457290E-02,2.292732E-02,2.139195E-02,1.995939E-02,1.862277E-02,1.737566E-02,
1.621207E-02,1.512639E-02,1.411342E-02,1.316829E-02,1.228645E-02,1.146366E-02,
1.069597E-02,9.979697E-03,9.311387E-03,8.687831E-03,8.106033E-03,7.563196E-03,
7.056711E-03,6.584145E-03,6.143224E-03,5.731831E-03,5.347987E-03,4.989849E-03,
4.655693E-03,4.343915E-03,4.053016E-03,3.781598E-03,3.528356E-03,3.292072E-03,
3.071612E-03,2.865915E-03,2.673994E-03,2.494924E-03,2.327847E-03,2.171958E-03,
2.026508E-03,1.890799E-03,1.764178E-03,1.646036E-03,1.535806E-03,1.432958E-03,
1.336997E-03,1.247462E-03,1.163923E-03,1.085979E-03,1.013254E-03,9.453995E-04,
8.820889E-04,8.230181E-04,7.679030E-04,7.164788E-04,6.684984E-04,6.237311E-04,
5.819617E-04,5.429894E-04,5.066271E-04,4.726998E-04,4.410445E-04,4.115090E-04,
3.839515E-04,3.582394E-04,3.342492E-04,3.118655E-04,2.909808E-04,2.714947E-04,
2.533135E-04,2.363499E-04,2.205222E-04,2.057545E-04,1.919758E-04,1.791197E-04,
1.671246E-04,1.559328E-04,1.454904E-04,1.357474E-04,1.266568E-04,1.181749E-04,
1.102611E-04,1.028773E-04,9.598788E-05,8.955986E-05,8.356230E-05,7.796638E-05,
7.274521E-05,6.787368E-05,6.332838E-05,5.908747E-05,5.513056E-05,5.143863E-05,
4.799394E-05,4.477993E-05,4.178115E-05,3.898319E-05,3.637260E-05,3.393684E-05,
3.166419E-05,2.954373E-05,2.756528E-05,2.571931E-05,2.399697E-05,2.238996E-05,
2.089058E-05,1.949160E-05,1.818630E-05,1.696842E-05,1.583210E-05,1.477187E-05,
1.378264E-05,1.285966E-05,1.199848E-05,1.119498E-05,1.044529E-05,9.745798E-06,
9.093151E-06,8.484210E-06,7.916048E-06,7.385934E-06,6.891320E-06,6.429829E-06,
5.999242E-06,5.597491E-06,5.222644E-06,4.872899E-06,4.546575E-06,4.242105E-06,
3.958024E-06,3.692967E-06,3.445660E-06,3.214914E-06,2.999621E-06,2.798745E-06,
2.611322E-06,2.436449E-06,2.273288E-06,2.121052E-06,1.979012E-06,1.846483E-06,
1.722830E-06,1.607457E-06,1.499811E-06,1.399373E-06,1.305661E-06,1.218225E-06,
1.136644E-06,1.060526E-06,9.895060E-07,9.232417E-07,8.614150E-07,8.037286E-07,
7.499053E-07,6.996864E-07,6.528305E-07,6.091124E-07,5.683219E-07,5.302631E-07,
4.947530E-07,4.616209E-07,4.307075E-07,4.018643E-07,3.749526E-07,3.498432E-07,
3.264152E-07,3.045562E-07,2.841610E-07,2.651316E-07,2.473765E-07,2.308104E-07,
2.153537E-07,2.009321E-07,1.874763E-07,1.749216E-07,1.632076E-07,1.522781E-07,
1.420805E-07,1.325658E-07,1.236882E-07,1.154052E-07,1.076769E-07,1.004661E-07,
9.373816E-08,8.746080E-08,8.160381E-08,7.613905E-08,7.104024E-08,6.628289E-08,
6.184412E-08,5.770261E-08,5.383844E-08,5.023304E-08,4.686908E-08,4.373040E-08,
4.080190E-08,3.806952E-08,3.552012E-08,3.314144E-08,3.092206E-08,2.885130E-08,
2.691922E-08,2.511652E-08,2.343454E-08,2.186520E-08,2.040095E-08,1.903476E-08,
1.776006E-08,1.657072E-08,1.546103E-08,1.442565E-08,1.345961E-08,1.255826E-08,
1.171727E-08,1.093260E-08,1.020048E-08,9.517381E-09,8.880030E-09,8.285361E-09,
7.730515E-09,7.212826E-09,6.729804E-09,6.279130E-09,5.858635E-09,5.466300E-09,
5.100238E-09,4.758690E-09,4.440015E-09,4.142681E-09,3.865258E-09,3.606413E-09,
3.364902E-09,3.139565E-09,2.929318E-09,2.733150E-09,2.550119E-09,2.379345E-09,
2.220008E-09,2.071340E-09,1.932629E-09,1.803206E-09,1.682451E-09,1.569782E-09,
1.464659E-09,1.366575E-09,1.275060E-09,1.189673E-09,1.110004E-09,1.035670E-09,
9.663144E-10,9.016032E-10,8.412256E-10,7.848912E-10,7.323294E-10,6.832875E-10,
6.375298E-10,5.948363E-10,5.550019E-10,5.178351E-10,4.831572E-10,4.508016E-10,
4.206128E-10,3.924456E-10,3.661647E-10,3.416437E-10,3.187649E-10,2.974181E-10,
2.775009E-10,2.589175E-10,2.415786E-10,2.254008E-10,2.103064E-10,1.962228E-10,
1.830823E-10,1.708219E-10,1.593824E-10,1.487091E-10,1.387505E-10,1.294588E-10,
1.207893E-10,1.127004E-10,1.051532E-10,9.811140E-11,9.154117E-11,8.541093E-11,
7.969122E-11,7.435454E-11,6.937524E-11,6.472938E-11,6.039465E-11,5.635020E-11]], dtype='float')
for i in range(3):
result[i] = ted_empty.daily_soil_inv_timeseries(i, pore_h2o_conc[i])
npt.assert_allclose(result[i],expected_results[i],rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
for i in range(3):
tab = [result[i], expected_results[i]]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_daily_animal_dose_timeseries(self):
"""
:description generates annual timeseries of daily pesticide concentrations in animals (mammals, birds, amphibians, reptiles)
:param a1; coefficient of allometric expression
:param b1; exponent of allometrice expression
:param body_wgt; body weight of species (g)
:param frac_h2o; fraction of water in food item
:param intake_food_conc; pesticide concentration in food item (daily mg a.i./kg)
:param frac_retained; fraction of ingested food retained by animal (mammals, birds, reptiles/amphibians)
:Notes # calculations are performed daily from day of first application (assumed day 0) through the last day of a year
# note: day numbers are synchronized with 0-based array indexing; thus the year does not have a calendar specific
# assoication, rather it is one year from the day of 1st pesticide application
# this represents Eqs 5&6 of Attachment 1-7 of 'Biological Evaluation Chapters for Diazinon ESA Assessment'
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
ted_empty = self.create_ted_object()
expected_results = pd.Series([], dtype='float')
result = pd.Series([[]], dtype='float')
expected_results = [[2.860270E+02,3.090209E+02,3.058215E+02,3.001105E+02,2.942541E+02,2.884869E+02,2.828301E+02,
5.633110E+02,5.808675E+02,5.723374E+02,5.614002E+02,5.504201E+02,5.396295E+02,5.290480E+02,
8.047008E+02,8.175238E+02,8.043529E+02,7.888661E+02,7.734255E+02,7.582619E+02,7.433932E+02,
1.014843E+03,1.023545E+03,1.006334E+03,9.868866E+02,9.675630E+02,9.485925E+02,9.299915E+02,
1.197782E+03,1.202897E+03,1.182169E+03,1.159274E+03,1.136569E+03,1.114285E+03,1.092435E+03,
1.357040E+03,1.359032E+03,1.335242E+03,1.309345E+03,1.283698E+03,1.258528E+03,1.233850E+03,
1.495682E+03,1.494955E+03,1.468500E+03,1.439990E+03,1.411781E+03,1.384100E+03,1.356959E+03,
1.330350E+03,1.304262E+03,1.278687E+03,1.253612E+03,1.229030E+03,1.204929E+03,1.181301E+03,
1.158137E+03,1.135426E+03,1.113161E+03,1.091333E+03,1.069932E+03,1.048952E+03,1.028382E+03,
1.008217E+03,9.884460E+02,9.690632E+02,9.500604E+02,9.314303E+02,9.131655E+02,8.952589E+02,
8.777034E+02,8.604922E+02,8.436185E+02,8.270756E+02,8.108572E+02,7.949568E+02,7.793682E+02,
7.640852E+02,7.491020E+02,7.344125E+02,7.200112E+02,7.058922E+02,6.920501E+02,6.784794E+02,
6.651748E+02,6.521312E+02,6.393433E+02,6.268061E+02,6.145148E+02,6.024646E+02,5.906506E+02,
5.790683E+02,5.677131E+02,5.565806E+02,5.456664E+02,5.349662E+02,5.244759E+02,5.141912E+02,
5.041083E+02,4.942230E+02,4.845316E+02,4.750302E+02,4.657152E+02,4.565828E+02,4.476295E+02,
4.388517E+02,4.302461E+02,4.218092E+02,4.135378E+02,4.054286E+02,3.974784E+02,3.896841E+02,
3.820426E+02,3.745510E+02,3.672063E+02,3.600056E+02,3.529461E+02,3.460250E+02,3.392397E+02,
3.325874E+02,3.260656E+02,3.196716E+02,3.134031E+02,3.072574E+02,3.012323E+02,2.953253E+02,
2.895342E+02,2.838566E+02,2.782903E+02,2.728332E+02,2.674831E+02,2.622379E+02,2.570956E+02,
2.520541E+02,2.471115E+02,2.422658E+02,2.375151E+02,2.328576E+02,2.282914E+02,2.238147E+02,
2.194259E+02,2.151231E+02,2.109046E+02,2.067689E+02,2.027143E+02,1.987392E+02,1.948420E+02,
1.910213E+02,1.872755E+02,1.836031E+02,1.800028E+02,1.764730E+02,1.730125E+02,1.696198E+02,
1.662937E+02,1.630328E+02,1.598358E+02,1.567015E+02,1.536287E+02,1.506161E+02,1.476627E+02,
1.447671E+02,1.419283E+02,1.391452E+02,1.364166E+02,1.337416E+02,1.311190E+02,1.285478E+02,
1.260271E+02,1.235557E+02,1.211329E+02,1.187576E+02,1.164288E+02,1.141457E+02,1.119074E+02,
1.097129E+02,1.075615E+02,1.054523E+02,1.033845E+02,1.013571E+02,9.936960E+01,9.742102E+01,
9.551065E+01,9.363775E+01,9.180157E+01,9.000140E+01,8.823652E+01,8.650626E+01,8.480992E+01,
8.314685E+01,8.151639E+01,7.991791E+01,7.835077E+01,7.681436E+01,7.530807E+01,7.383133E+01,
7.238354E+01,7.096414E+01,6.957258E+01,6.820830E+01,6.687078E+01,6.555948E+01,6.427390E+01,
6.301353E+01,6.177787E+01,6.056645E+01,5.937878E+01,5.821440E+01,5.707285E+01,5.595368E+01,
5.485647E+01,5.378076E+01,5.272616E+01,5.169223E+01,5.067857E+01,4.968480E+01,4.871051E+01,
4.775533E+01,4.681887E+01,4.590078E+01,4.500070E+01,4.411826E+01,4.325313E+01,4.240496E+01,
4.157343E+01,4.075820E+01,3.995895E+01,3.917538E+01,3.840718E+01,3.765404E+01,3.691566E+01,
3.619177E+01,3.548207E+01,3.478629E+01,3.410415E+01,3.343539E+01,3.277974E+01,3.213695E+01,
3.150677E+01,3.088894E+01,3.028322E+01,2.968939E+01,2.910720E+01,2.853642E+01,2.797684E+01,
2.742823E+01,2.689038E+01,2.636308E+01,2.584611E+01,2.533929E+01,2.484240E+01,2.435526E+01,
2.387766E+01,2.340944E+01,2.295039E+01,2.250035E+01,2.205913E+01,2.162656E+01,2.120248E+01,
2.078671E+01,2.037910E+01,1.997948E+01,1.958769E+01,1.920359E+01,1.882702E+01,1.845783E+01,
1.809588E+01,1.774104E+01,1.739314E+01,1.705208E+01,1.671770E+01,1.638987E+01,1.606848E+01,
1.575338E+01,1.544447E+01,1.514161E+01,1.484469E+01,1.455360E+01,1.426821E+01,1.398842E+01,
1.371412E+01,1.344519E+01,1.318154E+01,1.292306E+01,1.266964E+01,1.242120E+01,1.217763E+01,
1.193883E+01,1.170472E+01,1.147520E+01,1.125017E+01,1.102957E+01,1.081328E+01,1.060124E+01,
1.039336E+01,1.018955E+01,9.989738E+00,9.793846E+00,9.601794E+00,9.413509E+00,9.228916E+00,
9.047942E+00,8.870518E+00,8.696572E+00,8.526038E+00,8.358848E+00,8.194936E+00,8.034238E+00,
7.876691E+00,7.722234E+00,7.570806E+00,7.422347E+00,7.276799E+00,7.134106E+00,6.994210E+00,
6.857058E+00,6.722595E+00,6.590769E+00,6.461528E+00,6.334822E+00,6.210600E+00,6.088814E+00,
5.969416E+00,5.852359E+00,5.737598E+00,5.625087E+00,5.514783E+00,5.406641E+00,5.300620E+00,
5.196678E+00,5.094775E+00,4.994869E+00,4.896923E+00,4.800897E+00,4.706755E+00,4.614458E+00,
4.523971E+00,4.435259E+00,4.348286E+00,4.263019E+00,4.179424E+00,4.097468E+00,4.017119E+00,
3.938346E+00,3.861117E+00,3.785403E+00,3.711174E+00,3.638400E+00,3.567053E+00,3.497105E+00,
3.428529E+00,3.361298E+00,3.295385E+00,3.230764E+00,3.167411E+00,3.105300E+00,3.044407E+00,
2.984708E+00,2.926180E+00,2.868799E+00,2.812544E+00,2.757391E+00,2.703321E+00,2.650310E+00,
2.598339E+00,2.547387E+00],
[4.583348E+01,4.951806E+01,4.900538E+01,4.809025E+01,4.715181E+01,4.622765E+01,4.532120E+01,
9.026597E+01,9.307926E+01,9.171236E+01,8.995977E+01,8.820030E+01,8.647120E+01,8.477560E+01,
1.289467E+02,1.310015E+02,1.288910E+02,1.264093E+02,1.239351E+02,1.215053E+02,1.191227E+02,
1.626202E+02,1.640147E+02,1.612568E+02,1.581405E+02,1.550440E+02,1.520042E+02,1.490235E+02,
1.919347E+02,1.927544E+02,1.894329E+02,1.857641E+02,1.821259E+02,1.785550E+02,1.750537E+02,
2.174545E+02,2.177737E+02,2.139616E+02,2.098118E+02,2.057021E+02,2.016689E+02,1.977143E+02,
2.396707E+02,2.395543E+02,2.353151E+02,2.307466E+02,2.262263E+02,2.217906E+02,2.174415E+02,
2.131776E+02,2.089973E+02,2.048990E+02,2.008811E+02,1.969419E+02,1.930800E+02,1.892938E+02,
1.855819E+02,1.819427E+02,1.783750E+02,1.748771E+02,1.714479E+02,1.680859E+02,1.647898E+02,
1.615584E+02,1.583904E+02,1.552844E+02,1.522394E+02,1.492541E+02,1.463273E+02,1.434579E+02,
1.406448E+02,1.378868E+02,1.351829E+02,1.325321E+02,1.299332E+02,1.273853E+02,1.248873E+02,
1.224384E+02,1.200374E+02,1.176836E+02,1.153759E+02,1.131134E+02,1.108953E+02,1.087208E+02,
1.065888E+02,1.044987E+02,1.024495E+02,1.004405E+02,9.847096E+01,9.654000E+01,9.464691E+01,
9.279094E+01,9.097137E+01,8.918748E+01,8.743857E+01,8.572395E+01,8.404295E+01,8.239492E+01,
8.077921E+01,7.919518E+01,7.764221E+01,7.611969E+01,7.462703E+01,7.316364E+01,7.172895E+01,
7.032239E+01,6.894341E+01,6.759147E+01,6.626604E+01,6.496660E+01,6.369265E+01,6.244367E+01,
6.121919E+01,6.001872E+01,5.884179E+01,5.768794E+01,5.655671E+01,5.544767E+01,5.436038E+01,
5.329440E+01,5.224933E+01,5.122475E+01,5.022027E+01,4.923548E+01,4.827000E+01,4.732346E+01,
4.639547E+01,4.548569E+01,4.459374E+01,4.371928E+01,4.286197E+01,4.202148E+01,4.119746E+01,
4.038960E+01,3.959759E+01,3.882110E+01,3.805985E+01,3.731352E+01,3.658182E+01,3.586447E+01,
3.516119E+01,3.447170E+01,3.379573E+01,3.313302E+01,3.248330E+01,3.184632E+01,3.122184E+01,
3.060960E+01,3.000936E+01,2.942090E+01,2.884397E+01,2.827836E+01,2.772384E+01,2.718019E+01,
2.664720E+01,2.612467E+01,2.561238E+01,2.511013E+01,2.461774E+01,2.413500E+01,2.366173E+01,
2.319774E+01,2.274284E+01,2.229687E+01,2.185964E+01,2.143099E+01,2.101074E+01,2.059873E+01,
2.019480E+01,1.979879E+01,1.941055E+01,1.902992E+01,1.865676E+01,1.829091E+01,1.793224E+01,
1.758060E+01,1.723585E+01,1.689787E+01,1.656651E+01,1.624165E+01,1.592316E+01,1.561092E+01,
1.530480E+01,1.500468E+01,1.471045E+01,1.442198E+01,1.413918E+01,1.386192E+01,1.359009E+01,
1.332360E+01,1.306233E+01,1.280619E+01,1.255507E+01,1.230887E+01,1.206750E+01,1.183086E+01,
1.159887E+01,1.137142E+01,1.114843E+01,1.092982E+01,1.071549E+01,1.050537E+01,1.029937E+01,
1.009740E+01,9.899397E+00,9.705276E+00,9.514962E+00,9.328379E+00,9.145455E+00,8.966118E+00,
8.790298E+00,8.617926E+00,8.448934E+00,8.283255E+00,8.120826E+00,7.961581E+00,7.805459E+00,
7.652399E+00,7.502340E+00,7.355224E+00,7.210992E+00,7.069589E+00,6.930959E+00,6.795047E+00,
6.661800E+00,6.531166E+00,6.403094E+00,6.277533E+00,6.154435E+00,6.033750E+00,5.915432E+00,
5.799434E+00,5.685711E+00,5.574217E+00,5.464910E+00,5.357747E+00,5.252685E+00,5.149683E+00,
5.048701E+00,4.949699E+00,4.852638E+00,4.757481E+00,4.664189E+00,4.572728E+00,4.483059E+00,
4.395149E+00,4.308963E+00,4.224467E+00,4.141628E+00,4.060413E+00,3.980791E+00,3.902730E+00,
3.826200E+00,3.751170E+00,3.677612E+00,3.605496E+00,3.534795E+00,3.465479E+00,3.397524E+00,
3.330900E+00,3.265583E+00,3.201547E+00,3.138767E+00,3.077217E+00,3.016875E+00,2.957716E+00,
2.899717E+00,2.842855E+00,2.787109E+00,2.732455E+00,2.678873E+00,2.626342E+00,2.574841E+00,
2.524350E+00,2.474849E+00,2.426319E+00,2.378740E+00,2.332095E+00,2.286364E+00,2.241530E+00,
2.197575E+00,2.154481E+00,2.112233E+00,2.070814E+00,2.030206E+00,1.990395E+00,1.951365E+00,
1.913100E+00,1.875585E+00,1.838806E+00,1.802748E+00,1.767397E+00,1.732740E+00,1.698762E+00,
1.665450E+00,1.632792E+00,1.600774E+00,1.569383E+00,1.538609E+00,1.508438E+00,1.478858E+00,
1.449859E+00,1.421428E+00,1.393554E+00,1.366228E+00,1.339437E+00,1.313171E+00,1.287421E+00,
1.262175E+00,1.237425E+00,1.213160E+00,1.189370E+00,1.166047E+00,1.143182E+00,1.120765E+00,
1.098787E+00,1.077241E+00,1.056117E+00,1.035407E+00,1.015103E+00,9.951976E-01,9.756824E-01,
9.565499E-01,9.377925E-01,9.194030E-01,9.013741E-01,8.836987E-01,8.663699E-01,8.493809E-01,
8.327250E-01,8.163958E-01,8.003868E-01,7.846917E-01,7.693044E-01,7.542188E-01,7.394290E-01,
7.249293E-01,7.107138E-01,6.967772E-01,6.831138E-01,6.697183E-01,6.565856E-01,6.437103E-01,
6.310876E-01,6.187123E-01,6.065798E-01,5.946851E-01,5.830237E-01,5.715909E-01,5.603824E-01,
5.493936E-01,5.386204E-01,5.280583E-01,5.177034E-01,5.075516E-01,4.975988E-01,4.878412E-01,
4.782749E-01,4.688963E-01,4.597015E-01,4.506870E-01,4.418493E-01,4.331849E-01,4.246904E-01,
4.163625E-01,4.081979E-01],
[1.338207E+02,1.378876E+02,1.355183E+02,1.328776E+02,1.302728E+02,1.277182E+02,1.252138E+02,
2.565791E+02,2.582388E+02,2.535095E+02,2.485550E+02,2.436818E+02,2.389034E+02,2.342187E+02,
3.634465E+02,3.630106E+02,3.562267E+02,3.492581E+02,3.424102E+02,3.356958E+02,3.291130E+02,
4.564800E+02,4.542198E+02,4.456473E+02,4.369252E+02,4.283582E+02,4.199584E+02,4.117233E+02,
5.374704E+02,5.336219E+02,5.234925E+02,5.132438E+02,5.031803E+02,4.933133E+02,4.836397E+02,
6.079765E+02,6.027455E+02,5.912606E+02,5.796831E+02,5.683167E+02,5.571724E+02,5.462466E+02,
6.693557E+02,6.629211E+02,6.502562E+02,6.375218E+02,6.250212E+02,6.127650E+02,6.007490E+02,
5.889687E+02,5.774194E+02,5.660965E+02,5.549957E+02,5.441126E+02,5.334429E+02,5.229824E+02,
5.127270E+02,5.026728E+02,4.928157E+02,4.831519E+02,4.736775E+02,4.643890E+02,4.552826E+02,
4.463548E+02,4.376021E+02,4.290210E+02,4.206081E+02,4.123602E+02,4.042741E+02,3.963465E+02,
3.885744E+02,3.809547E+02,3.734844E+02,3.661606E+02,3.589804E+02,3.519411E+02,3.450397E+02,
3.382737E+02,3.316404E+02,3.251371E+02,3.187613E+02,3.125106E+02,3.063825E+02,3.003745E+02,
2.944844E+02,2.887097E+02,2.830483E+02,2.774979E+02,2.720563E+02,2.667214E+02,2.614912E+02,
2.563635E+02,2.513364E+02,2.464078E+02,2.415759E+02,2.368388E+02,2.321945E+02,2.276413E+02,
2.231774E+02,2.188010E+02,2.145105E+02,2.103041E+02,2.061801E+02,2.021371E+02,1.981733E+02,
1.942872E+02,1.904774E+02,1.867422E+02,1.830803E+02,1.794902E+02,1.759705E+02,1.725199E+02,
1.691368E+02,1.658202E+02,1.625685E+02,1.593807E+02,1.562553E+02,1.531912E+02,1.501873E+02,
1.472422E+02,1.443548E+02,1.415241E+02,1.387489E+02,1.360282E+02,1.333607E+02,1.307456E+02,
1.281818E+02,1.256682E+02,1.232039E+02,1.207880E+02,1.184194E+02,1.160973E+02,1.138207E+02,
1.115887E+02,1.094005E+02,1.072552E+02,1.051520E+02,1.030901E+02,1.010685E+02,9.908664E+01,
9.714361E+01,9.523868E+01,9.337111E+01,9.154016E+01,8.974511E+01,8.798527E+01,8.625993E+01,
8.456842E+01,8.291009E+01,8.128427E+01,7.969034E+01,7.812766E+01,7.659562E+01,7.509363E+01,
7.362109E+01,7.217742E+01,7.076207E+01,6.937447E+01,6.801408E+01,6.668036E+01,6.537280E+01,
6.409088E+01,6.283410E+01,6.160196E+01,6.039398E+01,5.920969E+01,5.804863E+01,5.691033E+01,
5.579435E+01,5.470026E+01,5.362762E+01,5.257601E+01,5.154503E+01,5.053426E+01,4.954332E+01,
4.857180E+01,4.761934E+01,4.668555E+01,4.577008E+01,4.487256E+01,4.399263E+01,4.312996E+01,
4.228421E+01,4.145504E+01,4.064214E+01,3.984517E+01,3.906383E+01,3.829781E+01,3.754681E+01,
3.681054E+01,3.608871E+01,3.538103E+01,3.468723E+01,3.400704E+01,3.334018E+01,3.268640E+01,
3.204544E+01,3.141705E+01,3.080098E+01,3.019699E+01,2.960485E+01,2.902431E+01,2.845516E+01,
2.789718E+01,2.735013E+01,2.681381E+01,2.628801E+01,2.577252E+01,2.526713E+01,2.477166E+01,
2.428590E+01,2.380967E+01,2.334278E+01,2.288504E+01,2.243628E+01,2.199632E+01,2.156498E+01,
2.114211E+01,2.072752E+01,2.032107E+01,1.992258E+01,1.953191E+01,1.914891E+01,1.877341E+01,
1.840527E+01,1.804436E+01,1.769052E+01,1.734362E+01,1.700352E+01,1.667009E+01,1.634320E+01,
1.602272E+01,1.570852E+01,1.540049E+01,1.509850E+01,1.480242E+01,1.451216E+01,1.422758E+01,
1.394859E+01,1.367506E+01,1.340690E+01,1.314400E+01,1.288626E+01,1.263357E+01,1.238583E+01,
1.214295E+01,1.190484E+01,1.167139E+01,1.144252E+01,1.121814E+01,1.099816E+01,1.078249E+01,
1.057105E+01,1.036376E+01,1.016053E+01,9.961292E+00,9.765957E+00,9.574453E+00,9.386704E+00,
9.202636E+00,9.022178E+00,8.845259E+00,8.671808E+00,8.501760E+00,8.335045E+00,8.171600E+00,
8.011360E+00,7.854262E+00,7.700245E+00,7.549248E+00,7.401212E+00,7.256078E+00,7.113791E+00,
6.974294E+00,6.837532E+00,6.703452E+00,6.572002E+00,6.443129E+00,6.316783E+00,6.192915E+00,
6.071476E+00,5.952418E+00,5.835694E+00,5.721260E+00,5.609069E+00,5.499079E+00,5.391245E+00,
5.285526E+00,5.181880E+00,5.080267E+00,4.980646E+00,4.882979E+00,4.787226E+00,4.693352E+00,
4.601318E+00,4.511089E+00,4.422629E+00,4.335904E+00,4.250880E+00,4.167523E+00,4.085800E+00,
4.005680E+00,3.927131E+00,3.850122E+00,3.774624E+00,3.700606E+00,3.628039E+00,3.556896E+00,
3.487147E+00,3.418766E+00,3.351726E+00,3.286001E+00,3.221564E+00,3.158392E+00,3.096457E+00,
3.035738E+00,2.976209E+00,2.917847E+00,2.860630E+00,2.804535E+00,2.749540E+00,2.695623E+00,
2.642763E+00,2.590940E+00,2.540133E+00,2.490323E+00,2.441489E+00,2.393613E+00,2.346676E+00,
2.300659E+00,2.255544E+00,2.211315E+00,2.167952E+00,2.125440E+00,2.083761E+00,2.042900E+00,
2.002840E+00,1.963566E+00,1.925061E+00,1.887312E+00,1.850303E+00,1.814020E+00,1.778448E+00,
1.743573E+00,1.709383E+00,1.675863E+00,1.643000E+00,1.610782E+00,1.579196E+00,1.548229E+00,
1.517869E+00,1.488104E+00,1.458924E+00,1.430315E+00,1.402267E+00,1.374770E+00,1.347811E+00,
1.321382E+00,1.295470E+00,1.270067E+00,1.245162E+00,1.220745E+00,1.196807E+00,1.173338E+00,
1.150330E+00,1.127772E+00]]
try:
# internal model constants
ted_empty.num_simulation_days = 366
# internally specified variables
a1 = pd.Series([.621, .621, .648], dtype='float')
b1 = pd.Series([.564, .564, .651], dtype='float')
# internally specified variables from external database
body_wgt = pd.Series([15., 1000., 20.], dtype='float')
frac_h2o = pd.Series([0.8, 0.8, 0.8], dtype='float')
# input variables that change per simulation
ted_empty.frac_retained_mamm = | pd.Series([0.1, 0.1, 0.05], dtype='float') | pandas.Series |
import datetime
from datetime import timedelta
from distutils.version import LooseVersion
from io import BytesIO
import os
import re
from warnings import catch_warnings, simplefilter
import numpy as np
import pytest
from pandas.compat import is_platform_little_endian, is_platform_windows
import pandas.util._test_decorators as td
from pandas.core.dtypes.common import is_categorical_dtype
import pandas as pd
from pandas import (
Categorical,
CategoricalIndex,
DataFrame,
DatetimeIndex,
Index,
Int64Index,
MultiIndex,
RangeIndex,
Series,
Timestamp,
bdate_range,
concat,
date_range,
isna,
timedelta_range,
)
from pandas.tests.io.pytables.common import (
_maybe_remove,
create_tempfile,
ensure_clean_path,
ensure_clean_store,
safe_close,
safe_remove,
tables,
)
import pandas.util.testing as tm
from pandas.io.pytables import (
ClosedFileError,
HDFStore,
PossibleDataLossError,
Term,
read_hdf,
)
from pandas.io import pytables as pytables # noqa: E402 isort:skip
from pandas.io.pytables import TableIterator # noqa: E402 isort:skip
_default_compressor = "blosc"
ignore_natural_naming_warning = pytest.mark.filterwarnings(
"ignore:object name:tables.exceptions.NaturalNameWarning"
)
@pytest.mark.single
class TestHDFStore:
def test_format_kwarg_in_constructor(self, setup_path):
# GH 13291
with ensure_clean_path(setup_path) as path:
with pytest.raises(ValueError):
HDFStore(path, format="table")
def test_context(self, setup_path):
path = create_tempfile(setup_path)
try:
with HDFStore(path) as tbl:
raise ValueError("blah")
except ValueError:
pass
finally:
safe_remove(path)
try:
with HDFStore(path) as tbl:
tbl["a"] = tm.makeDataFrame()
with HDFStore(path) as tbl:
assert len(tbl) == 1
assert type(tbl["a"]) == DataFrame
finally:
safe_remove(path)
def test_conv_read_write(self, setup_path):
path = create_tempfile(setup_path)
try:
def roundtrip(key, obj, **kwargs):
obj.to_hdf(path, key, **kwargs)
return read_hdf(path, key)
o = tm.makeTimeSeries()
tm.assert_series_equal(o, roundtrip("series", o))
o = tm.makeStringSeries()
tm.assert_series_equal(o, roundtrip("string_series", o))
o = tm.makeDataFrame()
tm.assert_frame_equal(o, roundtrip("frame", o))
# table
df = DataFrame(dict(A=range(5), B=range(5)))
df.to_hdf(path, "table", append=True)
result = read_hdf(path, "table", where=["index>2"])
tm.assert_frame_equal(df[df.index > 2], result)
finally:
safe_remove(path)
def test_long_strings(self, setup_path):
# GH6166
df = DataFrame(
{"a": tm.rands_array(100, size=10)}, index=tm.rands_array(100, size=10)
)
with ensure_clean_store(setup_path) as store:
store.append("df", df, data_columns=["a"])
result = store.select("df")
tm.assert_frame_equal(df, result)
def test_api(self, setup_path):
# GH4584
# API issue when to_hdf doesn't accept append AND format args
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.iloc[:10].to_hdf(path, "df", append=True, format="table")
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
# append to False
df.iloc[:10].to_hdf(path, "df", append=False, format="table")
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.iloc[:10].to_hdf(path, "df", append=True)
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
# append to False
df.iloc[:10].to_hdf(path, "df", append=False, format="table")
df.iloc[10:].to_hdf(path, "df", append=True)
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.to_hdf(path, "df", append=False, format="fixed")
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df", append=False, format="f")
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df", append=False)
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df")
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_store(setup_path) as store:
path = store._path
df = tm.makeDataFrame()
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=True, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
# append to False
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
# formats
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format=None)
tm.assert_frame_equal(store.select("df"), df)
with ensure_clean_path(setup_path) as path:
# Invalid.
df = tm.makeDataFrame()
with pytest.raises(ValueError):
df.to_hdf(path, "df", append=True, format="f")
with pytest.raises(ValueError):
df.to_hdf(path, "df", append=True, format="fixed")
with pytest.raises(TypeError):
df.to_hdf(path, "df", append=True, format="foo")
with pytest.raises(TypeError):
df.to_hdf(path, "df", append=False, format="bar")
# File path doesn't exist
path = ""
with pytest.raises(FileNotFoundError):
read_hdf(path, "df")
def test_api_default_format(self, setup_path):
# default_format option
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
pd.set_option("io.hdf.default_format", "fixed")
_maybe_remove(store, "df")
store.put("df", df)
assert not store.get_storer("df").is_table
with pytest.raises(ValueError):
store.append("df2", df)
pd.set_option("io.hdf.default_format", "table")
_maybe_remove(store, "df")
store.put("df", df)
assert store.get_storer("df").is_table
_maybe_remove(store, "df2")
store.append("df2", df)
assert store.get_storer("df").is_table
pd.set_option("io.hdf.default_format", None)
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
pd.set_option("io.hdf.default_format", "fixed")
df.to_hdf(path, "df")
with HDFStore(path) as store:
assert not store.get_storer("df").is_table
with pytest.raises(ValueError):
df.to_hdf(path, "df2", append=True)
pd.set_option("io.hdf.default_format", "table")
df.to_hdf(path, "df3")
with HDFStore(path) as store:
assert store.get_storer("df3").is_table
df.to_hdf(path, "df4", append=True)
with HDFStore(path) as store:
assert store.get_storer("df4").is_table
pd.set_option("io.hdf.default_format", None)
def test_keys(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeStringSeries()
store["c"] = tm.makeDataFrame()
assert len(store) == 3
expected = {"/a", "/b", "/c"}
assert set(store.keys()) == expected
assert set(store) == expected
def test_keys_ignore_hdf_softlink(self, setup_path):
# GH 20523
# Puts a softlink into HDF file and rereads
with ensure_clean_store(setup_path) as store:
df = DataFrame(dict(A=range(5), B=range(5)))
store.put("df", df)
assert store.keys() == ["/df"]
store._handle.create_soft_link(store._handle.root, "symlink", "df")
# Should ignore the softlink
assert store.keys() == ["/df"]
def test_iter_empty(self, setup_path):
with ensure_clean_store(setup_path) as store:
# GH 12221
assert list(store) == []
def test_repr(self, setup_path):
with ensure_clean_store(setup_path) as store:
repr(store)
store.info()
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeStringSeries()
store["c"] = tm.makeDataFrame()
df = tm.makeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
df["bool2"] = df["B"] > 0
df["bool3"] = True
df["int1"] = 1
df["int2"] = 2
df["timestamp1"] = Timestamp("20010102")
df["timestamp2"] = Timestamp("20010103")
df["datetime1"] = datetime.datetime(2001, 1, 2, 0, 0)
df["datetime2"] = datetime.datetime(2001, 1, 3, 0, 0)
df.loc[3:6, ["obj1"]] = np.nan
df = df._consolidate()._convert(datetime=True)
with catch_warnings(record=True):
simplefilter("ignore", pd.errors.PerformanceWarning)
store["df"] = df
# make a random group in hdf space
store._handle.create_group(store._handle.root, "bah")
assert store.filename in repr(store)
assert store.filename in str(store)
store.info()
# storers
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
store.append("df", df)
s = store.get_storer("df")
repr(s)
str(s)
@ignore_natural_naming_warning
def test_contains(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeDataFrame()
store["foo/bar"] = tm.makeDataFrame()
assert "a" in store
assert "b" in store
assert "c" not in store
assert "foo/bar" in store
assert "/foo/bar" in store
assert "/foo/b" not in store
assert "bar" not in store
# gh-2694: tables.NaturalNameWarning
with catch_warnings(record=True):
store["node())"] = tm.makeDataFrame()
assert "node())" in store
def test_versioning(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeDataFrame()
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df1")
store.append("df1", df[:10])
store.append("df1", df[10:])
assert store.root.a._v_attrs.pandas_version == "0.15.2"
assert store.root.b._v_attrs.pandas_version == "0.15.2"
assert store.root.df1._v_attrs.pandas_version == "0.15.2"
# write a file and wipe its versioning
_maybe_remove(store, "df2")
store.append("df2", df)
# this is an error because its table_type is appendable, but no
# version info
store.get_node("df2")._v_attrs.pandas_version = None
with pytest.raises(Exception):
store.select("df2")
def test_mode(self, setup_path):
df = tm.makeTimeDataFrame()
def check(mode):
with ensure_clean_path(setup_path) as path:
# constructor
if mode in ["r", "r+"]:
with pytest.raises(IOError):
HDFStore(path, mode=mode)
else:
store = HDFStore(path, mode=mode)
assert store._handle.mode == mode
store.close()
with ensure_clean_path(setup_path) as path:
# context
if mode in ["r", "r+"]:
with pytest.raises(IOError):
with HDFStore(path, mode=mode) as store: # noqa
pass
else:
with | HDFStore(path, mode=mode) | pandas.io.pytables.HDFStore |
import os
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from copy import deepcopy
from qsm import LogProfile, NormalisedWindTable1D, KiteKinematics, SteadyState, TractionPhaseHybrid, \
TractionConstantElevation, SteadyStateError, TractionPhase
from kitepower_kites import sys_props_v3
from cycle_optimizer import OptimizerCycle
from power_curve_constructor import PowerCurveConstructor
# Assumptions representative reel-out state at cut-in wind speed.
theta_ro_ci = 25 * np.pi / 180.
phi_ro = 13 * np.pi / 180.
chi_ro = 100 * np.pi / 180.
l0 = 200 # Tether length at start of reel-out.
l1_lb = 350 # Lower bound of tether length at end of reel-out.
l1_ub = 450 # Upper bound of tether length at end of reel-out.
def calc_tether_force_traction(env_state, straight_tether_length):
""""Calculate tether force for the minimum allowable reel-out speed and given wind conditions and tether length."""
kinematics = KiteKinematics(straight_tether_length, phi_ro, theta_ro_ci, chi_ro)
env_state.calculate(kinematics.z)
sys_props_v3.update(kinematics.straight_tether_length, True)
ss = SteadyState({'enable_steady_state_errors': True})
ss.control_settings = ('reeling_speed', sys_props_v3.reeling_speed_min_limit)
ss.find_state(sys_props_v3, env_state, kinematics)
return ss.tether_force_ground
def get_cut_in_wind_speed(env):
"""Iteratively determine lowest wind speed for which, along the entire reel-out path, feasible steady flight states
with the minimum allowable reel-out speed are found."""
dv = 1e-2 # Step size [m/s].
v0 = 5.6 # Lowest wind speed [m/s] with which the iteration is started.
v = v0
while True:
env.set_reference_wind_speed(v)
try:
# Setting tether force as setpoint in qsm yields infeasible region
tether_force_start = calc_tether_force_traction(env, l0)
tether_force_end = calc_tether_force_traction(env, l1_lb)
start_critical = tether_force_end > tether_force_start
if start_critical:
critical_force = tether_force_start
else:
critical_force = tether_force_end
if tether_force_start > sys_props_v3.tether_force_min_limit and \
tether_force_end > sys_props_v3.tether_force_min_limit:
if v == v0:
raise ValueError("Starting speed is too high.")
return v, start_critical, critical_force
except SteadyStateError:
pass
v += dv
def calc_n_cw_patterns(env, theta=60. * np.pi / 180.):
"""Calculate the number of cross-wind manoeuvres flown."""
trac = TractionPhaseHybrid({
'control': ('tether_force_ground', sys_props_v3.tether_force_max_limit),
'azimuth_angle': phi_ro,
'course_angle': chi_ro,
})
trac.enable_limit_violation_error = True
# Start and stop conditions of traction phase. Note that the traction phase uses an azimuth angle in contrast to
# the other phases, which results in jumps of the kite position.
trac.tether_length_start = l0
trac.tether_length_start_aim = l0
trac.elevation_angle = TractionConstantElevation(theta)
trac.tether_length_end = l1_ub
trac.finalize_start_and_end_kite_obj()
trac.run_simulation(sys_props_v3, env, {'enable_steady_state_errors': True})
return trac.n_crosswind_patterns
def get_max_wind_speed_at_elevation(env=LogProfile(), theta=60. * np.pi / 180.):
"""Iteratively determine maximum wind speed allowing at least one cross-wind manoeuvre during the reel-out phase for
provided elevation angle."""
dv = 1e-1 # Step size [m/s].
v0 = 18. # Lowest wind speed [m/s] with which the iteration is started.
# Check if the starting wind speed gives a feasible solution.
env.set_reference_wind_speed(v0)
try:
n_cw_patterns = calc_n_cw_patterns(env, theta)
except SteadyStateError as e:
if e.code != 8:
raise ValueError("No feasible solution found for first assessed cut out wind speed.")
# Increase wind speed until number of cross-wind manoeuvres subceeds one.
v = v0 + dv
while True:
env.set_reference_wind_speed(v)
try:
n_cw_patterns = calc_n_cw_patterns(env, theta)
if n_cw_patterns < 1.:
return v
except SteadyStateError as e:
if e.code != 8: # Speed is too low to yield a solution when e.code == 8.
raise
# return None
if v > 30.:
raise ValueError("Iteration did not find feasible cut-out speed.")
v += dv
def get_cut_out_wind_speed(env=LogProfile()):
"""In general, the elevation angle is increased with wind speed as a last means of de-powering the kite. In that
case, the wind speed at which the elevation angle reaches its upper limit is the cut-out wind speed. This
procedure verifies if this is indeed the case. Iteratively the elevation angle is determined giving the highest
wind speed allowing at least one cross-wind manoeuvre during the reel-out phase."""
beta = 60*np.pi/180.
dbeta = 1*np.pi/180.
vw_last = 0.
while True:
vw = get_max_wind_speed_at_elevation(env, beta)
if vw is not None:
if vw <= vw_last:
return vw_last, beta+dbeta
vw_last = vw
beta -= dbeta
def export_to_csv(v, v_cut_out, p, x_opts, n_cwp, i_profile, suffix):
df = {
'v_100m [m/s]': v,
'v/v_cut-out [-]': v/v_cut_out,
'P [W]': p,
'F_out [N]': [x[0] for x in x_opts],
'F_in [N]': [x[1] for x in x_opts],
'theta_out [rad]': [x[2] for x in x_opts],
'dl_tether [m]': [x[3] for x in x_opts],
'l0_tether [m]': [x[4] for x in x_opts],
'n_crosswind_patterns [-]': n_cwp,
}
df = pd.DataFrame(df)
df.to_csv('output/power_curve{}{}.csv'.format(suffix, i_profile), index=False, sep=";")
def create_environment(suffix, i_profile):
"""Flatten wind profile shapes resulting from the clustering and use to create the environment object."""
df = pd.read_csv('wind_resource/'+'profile{}{}.csv'.format(suffix, i_profile), sep=";")
env = NormalisedWindTable1D()
env.heights = list(df['h [m]'])
env.normalised_wind_speeds = list((df['u1 [-]']**2 + df['v1 [-]']**2)**.5)
return env
def estimate_wind_speed_operational_limits(loc='mmc', n_clusters=8):
"""Estimate the cut-in and cut-out wind speeds for each wind profile shape. These wind speeds are refined when
determining the power curves."""
suffix = '_{}{}'.format(n_clusters, loc)
fig, ax = plt.subplots(1, 2, figsize=(5.5, 3), sharey=True)
plt.subplots_adjust(top=0.92, bottom=0.164, left=0.11, right=0.788, wspace=0.13)
res = {'vw_100m_cut_in': [], 'vw_100m_cut_out': [], 'tether_force_cut_in': []}
for i_profile in range(1, n_clusters+1):
env = create_environment(suffix, i_profile)
# Get cut-in wind speed.
vw_cut_in, _, tether_force_cut_in = get_cut_in_wind_speed(env)
res['vw_100m_cut_in'].append(vw_cut_in)
res['tether_force_cut_in'].append(tether_force_cut_in)
# Get cut-out wind speed, which proved to work better when using 250m reference height.
env.set_reference_height(250)
vw_cut_out250m, elev = get_cut_out_wind_speed(env)
env.set_reference_wind_speed(vw_cut_out250m)
vw_cut_out = env.calculate_wind(100.)
res['vw_100m_cut_out'].append(vw_cut_out)
# Plot the wind profiles corresponding to the wind speed operational limits and the profile shapes.
env.set_reference_height(100.)
env.set_reference_wind_speed(vw_cut_in)
plt.sca(ax[0])
env.plot_wind_profile()
env.set_reference_wind_speed(vw_cut_out)
plt.sca(ax[1])
env.plot_wind_profile("{}-{}".format(loc.upper(), i_profile))
plt.legend(bbox_to_anchor=(1.04, 1), loc="upper left")
plt.ylabel('')
df = pd.DataFrame(res)
print(df)
if not os.path.exists('output/wind_limits_estimate{}.csv'.format(suffix)):
df.to_csv('output/wind_limits_estimate{}.csv'.format(suffix))
else:
print("Skipping exporting operational limits.")
ax[0].set_title("Cut-in")
ax[0].set_xlim([0, None])
ax[0].set_ylim([0, 400])
ax[1].set_title("Cut-out")
ax[1].set_xlim([0, None])
ax[1].set_ylim([0, 400])
def generate_power_curves(loc='mmc', n_clusters=8):
"""Determine power curves - requires estimates of the cut-in and cut-out wind speed to be available."""
suffix = '_{}{}'.format(n_clusters, loc)
limit_estimates = pd.read_csv('output/wind_limits_estimate{}.csv'.format(suffix))
# Cycle simulation settings for different phases of the power curves.
cycle_sim_settings_pc_phase1 = {
'cycle': {
'traction_phase': TractionPhase,
'include_transition_energy': False,
},
'retraction': {},
'transition': {
'time_step': 0.25,
},
'traction': {
'azimuth_angle': phi_ro,
'course_angle': chi_ro,
},
}
cycle_sim_settings_pc_phase2 = deepcopy(cycle_sim_settings_pc_phase1)
cycle_sim_settings_pc_phase2['cycle']['traction_phase'] = TractionPhaseHybrid
ax_pcs = plt.subplots(2, 1)[1]
for a in ax_pcs: a.grid()
limits_refined = {'vw_100m_cut_in': [], 'vw_100m_cut_out': []}
res_pcs = []
for i_profile in range(1, n_clusters+1):
# Pre-configure environment object for optimizations by setting normalized wind profile.
env = create_environment(suffix, i_profile)
# Optimizations are performed sequentially with increased wind speed. The solution of the previous optimization
# is used to initialise the next. With trial and error the lower configuration, a reasonably robust approach is
# obtained. The power curves may however still exhibit discontinuities and therefore need to be checked and
# possibly post-processed.
# The optimization incessantly fails for the estimated cut-out wind speed. Therefore, the highest wind speed for
# which the optimization is performed is somewhat lower than the estimated cut-out wind speed.
vw_cut_in = limit_estimates.iloc[i_profile-1]['vw_100m_cut_in']
vw_cut_out = limit_estimates.iloc[i_profile-1]['vw_100m_cut_out']
wind_speeds = np.linspace(vw_cut_in, vw_cut_out-1, 50)
wind_speeds = np.concatenate((wind_speeds, np.linspace(vw_cut_out-1, vw_cut_out-0.05, 15)))
# For the first phase of the power curve, the constraint on the number of cross-wind patterns flown is not
# active. It is assumed that sufficient cross-wind patterns are flown up to vw_100m = 7 m/s (verify this).
# Therefore, the number of cross-wind patterns is not calculated for this phase. Also the upper elevation bound
# is set to 30 degrees.
op_cycle_pc_phase1 = OptimizerCycle(cycle_sim_settings_pc_phase1, sys_props_v3, env, reduce_x=np.array([0, 1, 2, 3]))
op_cycle_pc_phase1.bounds_real_scale[2][1] = 30*np.pi/180.
op_cycle_pc_phase2 = OptimizerCycle(cycle_sim_settings_pc_phase2, sys_props_v3, env, reduce_x=np.array([0, 1, 2, 3]))
# Configuration of the sequential optimizations for which is differentiated between the wind speed ranges
# bounded above by the wind speed of the dictionary key. If dx0 does not contain only zeros, the starting point
# of the new optimization is not the solution of the preceding optimization.
op_seq = {
7.: {'power_optimizer': op_cycle_pc_phase1, 'dx0': np.array([0., 0., 0., 0., 0.])},
17.: {'power_optimizer': op_cycle_pc_phase2, 'dx0': np.array([0., 0., 0., 0., 0.])},
np.inf: {'power_optimizer': op_cycle_pc_phase2, 'dx0': np.array([0., 0., 0.1, 0., 0.])}, # Convergence for
# profiles 2 and 6 are sensitive to starting elevation. The number of patterns constraint exhibits a
# minimum along the feasible elevation angle range. When the elevation angle of the starting point is lower
# than that of the minimum, the optimizer is driven towards lower elevation angles which do not yield a
# feasible solution.
}
# Define starting point for the very first optimization at the cut-in wind speed.
critical_force = limit_estimates.iloc[i_profile-1]['tether_force_cut_in']
x0 = np.array([critical_force, 300., theta_ro_ci, 150., 200.0])
# Start optimizations.
pc = PowerCurveConstructor(wind_speeds)
pc.run_predefined_sequence(op_seq, x0)
pc.export_results('output/power_curve{}{}.pickle'.format(suffix, i_profile))
res_pcs.append(pc)
# Refine the wind speed operational limits to wind speeds for which optimal solutions are found.
limits_refined['vw_100m_cut_in'].append(pc.wind_speeds[0])
limits_refined['vw_100m_cut_out'].append(pc.wind_speeds[-1])
print("Cut-in and -out speeds changed from [{:.3f}, {:.3f}] to "
"[{:.3f}, {:.3f}].".format(vw_cut_in, vw_cut_out, pc.wind_speeds[0], pc.wind_speeds[-1]))
# Plot power curve together with that of the other wind profile shapes.
p_cycle = [kpis['average_power']['cycle'] for kpis in pc.performance_indicators]
ax_pcs[0].plot(pc.wind_speeds, p_cycle, label=i_profile)
ax_pcs[1].plot(pc.wind_speeds/vw_cut_out, p_cycle, label=i_profile)
pc.plot_optimal_trajectories()
pc.plot_optimization_results(op_cycle_pc_phase2.OPT_VARIABLE_LABELS, op_cycle_pc_phase2.bounds_real_scale,
[sys_props_v3.tether_force_min_limit, sys_props_v3.tether_force_max_limit],
[sys_props_v3.reeling_speed_min_limit, sys_props_v3.reeling_speed_max_limit])
n_cwp = [kpis['n_crosswind_patterns'] for kpis in pc.performance_indicators]
export_to_csv(pc.wind_speeds, vw_cut_out, p_cycle, pc.x_opts, n_cwp, i_profile, suffix)
ax_pcs[1].legend()
df = | pd.DataFrame(limits_refined) | pandas.DataFrame |
import re
import numpy as np
import pytest
import pandas as pd
import pandas._testing as tm
from pandas.core.arrays import IntervalArray
class TestSeriesReplace:
def test_replace_explicit_none(self):
# GH#36984 if the user explicitly passes value=None, give it to them
ser = pd.Series([0, 0, ""], dtype=object)
result = ser.replace("", None)
expected = pd.Series([0, 0, None], dtype=object)
tm.assert_series_equal(result, expected)
df = pd.DataFrame(np.zeros((3, 3)))
df.iloc[2, 2] = ""
result = df.replace("", None)
expected = pd.DataFrame(
{
0: np.zeros(3),
1: np.zeros(3),
2: np.array([0.0, 0.0, None], dtype=object),
}
)
assert expected.iloc[2, 2] is None
tm.assert_frame_equal(result, expected)
# GH#19998 same thing with object dtype
ser = pd.Series([10, 20, 30, "a", "a", "b", "a"])
result = ser.replace("a", None)
expected = pd.Series([10, 20, 30, None, None, "b", None])
assert expected.iloc[-1] is None
tm.assert_series_equal(result, expected)
def test_replace_noop_doesnt_downcast(self):
# GH#44498
ser = pd.Series([None, None, pd.Timestamp("2021-12-16 17:31")], dtype=object)
res = ser.replace({np.nan: None}) # should be a no-op
tm.assert_series_equal(res, ser)
assert res.dtype == object
# same thing but different calling convention
res = ser.replace(np.nan, None)
tm.assert_series_equal(res, ser)
assert res.dtype == object
def test_replace(self):
N = 100
ser = pd.Series(np.random.randn(N))
ser[0:4] = np.nan
ser[6:10] = 0
# replace list with a single value
return_value = ser.replace([np.nan], -1, inplace=True)
assert return_value is None
exp = ser.fillna(-1)
tm.assert_series_equal(ser, exp)
rs = ser.replace(0.0, np.nan)
ser[ser == 0.0] = np.nan
tm.assert_series_equal(rs, ser)
ser = pd.Series(np.fabs(np.random.randn(N)), tm.makeDateIndex(N), dtype=object)
ser[:5] = np.nan
ser[6:10] = "foo"
ser[20:30] = "bar"
# replace list with a single value
rs = ser.replace([np.nan, "foo", "bar"], -1)
assert (rs[:5] == -1).all()
assert (rs[6:10] == -1).all()
assert (rs[20:30] == -1).all()
assert (pd.isna(ser[:5])).all()
# replace with different values
rs = ser.replace({np.nan: -1, "foo": -2, "bar": -3})
assert (rs[:5] == -1).all()
assert (rs[6:10] == -2).all()
assert (rs[20:30] == -3).all()
assert (pd.isna(ser[:5])).all()
# replace with different values with 2 lists
rs2 = ser.replace([np.nan, "foo", "bar"], [-1, -2, -3])
tm.assert_series_equal(rs, rs2)
# replace inplace
return_value = ser.replace([np.nan, "foo", "bar"], -1, inplace=True)
assert return_value is None
assert (ser[:5] == -1).all()
assert (ser[6:10] == -1).all()
assert (ser[20:30] == -1).all()
def test_replace_nan_with_inf(self):
ser = pd.Series([np.nan, 0, np.inf])
tm.assert_series_equal(ser.replace(np.nan, 0), ser.fillna(0))
ser = pd.Series([np.nan, 0, "foo", "bar", np.inf, None, pd.NaT])
tm.assert_series_equal(ser.replace(np.nan, 0), ser.fillna(0))
filled = ser.copy()
filled[4] = 0
tm.assert_series_equal(ser.replace(np.inf, 0), filled)
def test_replace_listlike_value_listlike_target(self, datetime_series):
ser = pd.Series(datetime_series.index)
tm.assert_series_equal(ser.replace(np.nan, 0), ser.fillna(0))
# malformed
msg = r"Replacement lists must match in length\. Expecting 3 got 2"
with pytest.raises(ValueError, match=msg):
ser.replace([1, 2, 3], [np.nan, 0])
# ser is dt64 so can't hold 1 or 2, so this replace is a no-op
result = ser.replace([1, 2], [np.nan, 0])
tm.assert_series_equal(result, ser)
ser = pd.Series([0, 1, 2, 3, 4])
result = ser.replace([0, 1, 2, 3, 4], [4, 3, 2, 1, 0])
tm.assert_series_equal(result, pd.Series([4, 3, 2, 1, 0]))
def test_replace_gh5319(self):
# API change from 0.12?
# GH 5319
ser = pd.Series([0, np.nan, 2, 3, 4])
expected = ser.ffill()
result = ser.replace([np.nan])
tm.assert_series_equal(result, expected)
ser = pd.Series([0, np.nan, 2, 3, 4])
expected = ser.ffill()
result = ser.replace(np.nan)
tm.assert_series_equal(result, expected)
def test_replace_datetime64(self):
# GH 5797
ser = pd.Series(pd.date_range("20130101", periods=5))
expected = ser.copy()
expected.loc[2] = pd.Timestamp("20120101")
result = ser.replace({pd.Timestamp("20130103"): pd.Timestamp("20120101")})
tm.assert_series_equal(result, expected)
result = ser.replace(pd.Timestamp("20130103"), pd.Timestamp("20120101"))
tm.assert_series_equal(result, expected)
def test_replace_nat_with_tz(self):
# GH 11792: Test with replacing NaT in a list with tz data
ts = pd.Timestamp("2015/01/01", tz="UTC")
s = pd.Series([pd.NaT, pd.Timestamp("2015/01/01", tz="UTC")])
result = s.replace([np.nan, pd.NaT], pd.Timestamp.min)
expected = pd.Series([pd.Timestamp.min, ts], dtype=object)
tm.assert_series_equal(expected, result)
def test_replace_timedelta_td64(self):
tdi = pd.timedelta_range(0, periods=5)
ser = pd.Series(tdi)
# Using a single dict argument means we go through replace_list
result = ser.replace({ser[1]: ser[3]})
expected = pd.Series([ser[0], ser[3], ser[2], ser[3], ser[4]])
tm.assert_series_equal(result, expected)
def test_replace_with_single_list(self):
ser = pd.Series([0, 1, 2, 3, 4])
result = ser.replace([1, 2, 3])
tm.assert_series_equal(result, | pd.Series([0, 0, 0, 0, 4]) | pandas.Series |
# -*- coding: utf-8 -*-
import pandas as pd
from datetime import datetime
import logging
import numpy as np
from tashares.cfg import config
from tashares.stockmeta import Stockmeta
rng = np.random.default_rng(seed=0)
logging.basicConfig(format='%(asctime)s %(levelname)s:%(message)s',
level=logging.INFO,
datefmt='%m/%d/%Y %I:%M:%S %p')
class Stockjob(Stockmeta):
"""Generate ML features
Args:
symbol (string, required): stock symbol. Default: ''
forecast_days (int, optional): forecast which day in future. Default: 1, i.e. tomorrow
max_training_date (string, optional): the stopping date for training, to control training/test split. Default: '2021-01-01'
stack (int, optional): stacking features of multiple days. Default: 1, i.e. today's feature only
"""
stack: int = config.getint('DEFAULT', 'FeatureStacks')
forecast_jobs = config['DEFAULT']['ForecastJobs']
max_training_date = config['DEFAULT']['MaxTrainingDate']
features = pd.DataFrame()
max_shift = 0
def __init__(self, *args, **kwargs):
super(Stockjob, self).__init__(*args, **kwargs)
self.max_training_date = datetime.strptime(
kwargs.get('max_training_date', self.max_training_date), '%Y-%m-%d')
self.stack = kwargs.get('stack_features', self.stack)
self.update_features()
def update_features(self):
self.features = self.ta # reference only
if self.features.empty:
return
# stacking
original = self.ta[['open', 'high', 'low', 'close', 'volume']]
for stack in range(1, self.stack+1):
content = original.shift(stack)
content.columns = [
str(col) + f'_{stack}' for col in original.columns]
self.features = | pd.concat([self.features, content], axis=1) | pandas.concat |
import warnings
import pydot
import graphviz
# Take a look at the raw data :
import pandas as pd
from pandas import Series
from pandas import DataFrame
from pandas import read_csv
from sklearn import preprocessing
from sklearn.metrics import mean_squared_error
import matplotlib
# be able to save images on server
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
from math import sqrt
import numpy as np
import tensorflow as tf
import random as rn
# The below is necessary in Python 3.2.3 onwards to
# have reproducible behavior for certain hash-based operations.
# See these references for further details:
# https://docs.python.org/3.4/using/cmdline.html#envvar-PYTHONHASHSEED
# https://github.com/fchollet/keras/issues/2280#issuecomment-306959926
import os
import sys
import errno
os.environ['PYTHONHASHSEED'] = '0'
# The below is necessary for starting Numpy generated random numbers
# in a well-defined initial state.
np.random.seed(42)
# The below is necessary for starting core Python generated random numbers
# in a well-defined state.
rn.seed(12345)
# Force TensorFlow to use single thread.
# Multiple threads are a potential source of
# non-reproducible results.
# For further details, see: https://stackoverflow.com/questions/42022950/which-seeds-have-to-be-set-where-to-realize-100-reproducibility-of-training-res
session_conf = tf.ConfigProto(
intra_op_parallelism_threads=1, inter_op_parallelism_threads=1)
from keras import backend as K
# The below tf.set_random_seed() will make random number generation
# in the TensorFlow backend have a well-defined initial state.
# For further details, see: https://www.tensorflow.org/api_docs/python/tf/set_random_seed
import keras
from keras.layers import Input, Convolution1D, Dense, MaxPooling1D, Flatten, Conv2D
from keras.layers import LSTM
from keras.callbacks import Callback
from keras.callbacks import ModelCheckpoint
from keras.utils import plot_model
# be able to save images on server
# matplotlib.use('Agg')
import time
import datetime
from keras.models import load_model
import multiprocessing
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # Hide messy TensorFlow warnings
warnings.filterwarnings("ignore") # Hide messy Numpy warnings
tf.set_random_seed(1234)
sess = tf.Session(graph=tf.get_default_graph(), config=session_conf)
K.set_session(sess)
class EarlyStoppingByLossVal(Callback):
def __init__(self, monitor='val_loss', value=0.00001, verbose=0):
super(Callback, self).__init__()
self.monitor = monitor
self.value = value
self.verbose = verbose
def on_epoch_end(self, epoch, logs={}):
current = logs.get(self.monitor)
if current is None:
warnings.warn("Early stopping requires %s available!" %
self.monitor, RuntimeWarning)
if current < self.value:
if self.verbose > 0:
print("Epoch %05d: early stopping THR" % epoch)
self.model.stop_training = True
class RData:
def __init__(self, path, n_weeks=26):
self.path = path
self.data = {}
# load dataset
self.data['raw'] = self.load_data()
# config
self.n_weeks = n_weeks
self.n_features = int(len(self.data['raw'][0].columns))
print("number of features: {}".format(self.n_features))
# scale data
self.scaler = preprocessing.MinMaxScaler()
self.scale()
# reframe data
self.reframe()
# self.state_list_name = self.data.state.unique()
self.split_data()
# print(self.n_features)
# Return specific data
def __getitem__(self, index):
return self.data[index]
# load dataset
def load_data(self):
raw = read_csv(self.path)
raw = raw.fillna(0)
# print(raw['0'].head())
# raw = raw.drop(["0"], axis = 1)
# print(raw.head())
# transform column names
raw.columns = map(str.lower, raw.columns)
# raw.rename(columns={'weekend': 'date'}, inplace=True)
latitudeList = raw.latitude.unique()
longitudeList = raw.longitude.unique()
data_list = list()
cell_label = list()
for la in latitudeList:
for lo in longitudeList:
data = raw[(raw.latitude == la) & (raw.longitude == lo)]
if(len(data) == 260):
select = [
#'date',
#'year',
#'month',
#'week',
#'week_temp',
#'week_prcp',
#'latitude',
#'longitude',
'mean_ili',
#'ili_activity_label',
#'ili_activity_group'
]
# One Hot Encoding
data = pd.get_dummies(data[select])
# print(data.head(1))
data_list.append(data)
cell_label.append('lat {} - long {}'.format(la, lo))
#print("The data for latitude {} and longitude {} contains {} rows".format(
# la, lo, len(data)))
self.data['cell_labels'] = cell_label
print("The are {} cell in the data".format(len(data_list)))
return data_list
# convert series to supervised learning
@staticmethod
def series_to_supervised(df, n_in=26, n_out=26, dropnan=True):
from pandas import concat
data = | DataFrame(df) | pandas.DataFrame |
import pandas as pd
import csv
from itertools import zip_longest
import os
import math
def readFiles(tpath):
txtLists = os.listdir(tpath)
return txtLists
def atan(x):
b = []
for i in x:
bb = ( math.atan(i) * 2 / math.pi)
b.append(bb)
b = pd.Series(b)
return b
def log(x,maxx):
b = []
for i in x:
bb = math.log10(i+1)/math.log10(maxx)
b.append(bb)
b = pd.Series(b)
return b
def SaveFCAs(path):
errorlist = []
count = 0
FreqC, FreqCR, FreqE, FreqER = find_max_freq()
print(FreqC, FreqCR, FreqE, FreqER)
CueC, CueCR, CueE, CueER, mCueE, mCueER, mCueC, mCueCR = find_max_cue()
print(CueC, CueCR, CueE, CueER)
print( mCueC, mCueCR, mCueE, mCueER)
f = 1
sta_schemas = pd.DataFrame(
columns=["Schema", "nClass", "CueE_n", "CueER_n", "FreqE_n", "FreqER_n", 'search', 'share', 'overall'])
for i in readFiles(path):
if 'csv' not in i:
continue
count += 1
name = i[:-8]
add1 = 'Cue/%s_Cue.csv' %name
add2 = 'Freq/%s_Freq.csv' %name
cue = | pd.read_csv(add1) | pandas.read_csv |
#
# Copyright 2020 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Testing out the datacompy functionality
"""
import io
import logging
import sys
from datetime import datetime
from decimal import Decimal
from unittest import mock
import numpy as np
import pandas as pd
import pytest
from pandas.util.testing import assert_series_equal
from pytest import raises
import datacompy
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
def test_numeric_columns_equal_abs():
data = """a|b|expected
1|1|True
2|2.1|True
3|4|False
4|NULL|False
NULL|4|False
NULL|NULL|True"""
df = pd.read_csv(io.StringIO(data), sep="|")
actual_out = datacompy.columns_equal(df.a, df.b, abs_tol=0.2)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_numeric_columns_equal_rel():
data = """a|b|expected
1|1|True
2|2.1|True
3|4|False
4|NULL|False
NULL|4|False
NULL|NULL|True"""
df = pd.read_csv(io.StringIO(data), sep="|")
actual_out = datacompy.columns_equal(df.a, df.b, rel_tol=0.2)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_string_columns_equal():
data = """a|b|expected
Hi|Hi|True
Yo|Yo|True
Hey|Hey |False
résumé|resume|False
résumé|résumé|True
💩|💩|True
💩|🤔|False
| |True
| |False
datacompy|DataComPy|False
something||False
|something|False
||True"""
df = pd.read_csv(io.StringIO(data), sep="|")
actual_out = datacompy.columns_equal(df.a, df.b, rel_tol=0.2)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_string_columns_equal_with_ignore_spaces():
data = """a|b|expected
Hi|Hi|True
Yo|Yo|True
Hey|Hey |True
résumé|resume|False
résumé|résumé|True
💩|💩|True
💩|🤔|False
| |True
| |True
datacompy|DataComPy|False
something||False
|something|False
||True"""
df = pd.read_csv(io.StringIO(data), sep="|")
actual_out = datacompy.columns_equal(df.a, df.b, rel_tol=0.2, ignore_spaces=True)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_string_columns_equal_with_ignore_spaces_and_case():
data = """a|b|expected
Hi|Hi|True
Yo|Yo|True
Hey|Hey |True
résumé|resume|False
résumé|résumé|True
💩|💩|True
💩|🤔|False
| |True
| |True
datacompy|DataComPy|True
something||False
|something|False
||True"""
df = pd.read_csv(io.StringIO(data), sep="|")
actual_out = datacompy.columns_equal(
df.a, df.b, rel_tol=0.2, ignore_spaces=True, ignore_case=True
)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_date_columns_equal():
data = """a|b|expected
2017-01-01|2017-01-01|True
2017-01-02|2017-01-02|True
2017-10-01|2017-10-10|False
2017-01-01||False
|2017-01-01|False
||True"""
df = pd.read_csv(io.StringIO(data), sep="|")
# First compare just the strings
actual_out = datacompy.columns_equal(df.a, df.b, rel_tol=0.2)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
# Then compare converted to datetime objects
df["a"] = pd.to_datetime(df["a"])
df["b"] = pd.to_datetime(df["b"])
actual_out = datacompy.columns_equal(df.a, df.b, rel_tol=0.2)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
# and reverse
actual_out_rev = datacompy.columns_equal(df.b, df.a, rel_tol=0.2)
assert_series_equal(expect_out, actual_out_rev, check_names=False)
def test_date_columns_equal_with_ignore_spaces():
data = """a|b|expected
2017-01-01|2017-01-01 |True
2017-01-02 |2017-01-02|True
2017-10-01 |2017-10-10 |False
2017-01-01||False
|2017-01-01|False
||True"""
df = pd.read_csv(io.StringIO(data), sep="|")
# First compare just the strings
actual_out = datacompy.columns_equal(df.a, df.b, rel_tol=0.2, ignore_spaces=True)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
# Then compare converted to datetime objects
df["a"] = pd.to_datetime(df["a"])
df["b"] = pd.to_datetime(df["b"])
actual_out = datacompy.columns_equal(df.a, df.b, rel_tol=0.2, ignore_spaces=True)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
# and reverse
actual_out_rev = datacompy.columns_equal(df.b, df.a, rel_tol=0.2, ignore_spaces=True)
assert_series_equal(expect_out, actual_out_rev, check_names=False)
def test_date_columns_equal_with_ignore_spaces_and_case():
data = """a|b|expected
2017-01-01|2017-01-01 |True
2017-01-02 |2017-01-02|True
2017-10-01 |2017-10-10 |False
2017-01-01||False
|2017-01-01|False
||True"""
df = pd.read_csv(io.StringIO(data), sep="|")
# First compare just the strings
actual_out = datacompy.columns_equal(
df.a, df.b, rel_tol=0.2, ignore_spaces=True, ignore_case=True
)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
# Then compare converted to datetime objects
df["a"] = pd.to_datetime(df["a"])
df["b"] = pd.to_datetime(df["b"])
actual_out = datacompy.columns_equal(df.a, df.b, rel_tol=0.2, ignore_spaces=True)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
# and reverse
actual_out_rev = datacompy.columns_equal(df.b, df.a, rel_tol=0.2, ignore_spaces=True)
assert_series_equal(expect_out, actual_out_rev, check_names=False)
def test_date_columns_unequal():
"""I want datetime fields to match with dates stored as strings
"""
df = pd.DataFrame([{"a": "2017-01-01", "b": "2017-01-02"}, {"a": "2017-01-01"}])
df["a_dt"] = pd.to_datetime(df["a"])
df["b_dt"] = pd.to_datetime(df["b"])
assert datacompy.columns_equal(df.a, df.a_dt).all()
assert datacompy.columns_equal(df.b, df.b_dt).all()
assert datacompy.columns_equal(df.a_dt, df.a).all()
assert datacompy.columns_equal(df.b_dt, df.b).all()
assert not datacompy.columns_equal(df.b_dt, df.a).any()
assert not datacompy.columns_equal(df.a_dt, df.b).any()
assert not datacompy.columns_equal(df.a, df.b_dt).any()
assert not datacompy.columns_equal(df.b, df.a_dt).any()
def test_bad_date_columns():
"""If strings can't be coerced into dates then it should be false for the
whole column.
"""
df = pd.DataFrame(
[{"a": "2017-01-01", "b": "2017-01-01"}, {"a": "2017-01-01", "b": "217-01-01"}]
)
df["a_dt"] = pd.to_datetime(df["a"])
assert not datacompy.columns_equal(df.a_dt, df.b).any()
def test_rounded_date_columns():
"""If strings can't be coerced into dates then it should be false for the
whole column.
"""
df = pd.DataFrame(
[
{"a": "2017-01-01", "b": "2017-01-01 00:00:00.000000", "exp": True},
{"a": "2017-01-01", "b": "2017-01-01 00:00:00.123456", "exp": False},
{"a": "2017-01-01", "b": "2017-01-01 00:00:01.000000", "exp": False},
{"a": "2017-01-01", "b": "2017-01-01 00:00:00", "exp": True},
]
)
df["a_dt"] = pd.to_datetime(df["a"])
actual = datacompy.columns_equal(df.a_dt, df.b)
expected = df["exp"]
assert_series_equal(actual, expected, check_names=False)
def test_decimal_float_columns_equal():
df = pd.DataFrame(
[
{"a": Decimal("1"), "b": 1, "expected": True},
{"a": Decimal("1.3"), "b": 1.3, "expected": True},
{"a": Decimal("1.000003"), "b": 1.000003, "expected": True},
{"a": Decimal("1.000000004"), "b": 1.000000003, "expected": False},
{"a": Decimal("1.3"), "b": 1.2, "expected": False},
{"a": np.nan, "b": np.nan, "expected": True},
{"a": np.nan, "b": 1, "expected": False},
{"a": Decimal("1"), "b": np.nan, "expected": False},
]
)
actual_out = datacompy.columns_equal(df.a, df.b)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_decimal_float_columns_equal_rel():
df = pd.DataFrame(
[
{"a": Decimal("1"), "b": 1, "expected": True},
{"a": Decimal("1.3"), "b": 1.3, "expected": True},
{"a": Decimal("1.000003"), "b": 1.000003, "expected": True},
{"a": Decimal("1.000000004"), "b": 1.000000003, "expected": True},
{"a": Decimal("1.3"), "b": 1.2, "expected": False},
{"a": np.nan, "b": np.nan, "expected": True},
{"a": np.nan, "b": 1, "expected": False},
{"a": Decimal("1"), "b": np.nan, "expected": False},
]
)
actual_out = datacompy.columns_equal(df.a, df.b, abs_tol=0.001)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_decimal_columns_equal():
df = pd.DataFrame(
[
{"a": Decimal("1"), "b": Decimal("1"), "expected": True},
{"a": Decimal("1.3"), "b": Decimal("1.3"), "expected": True},
{"a": Decimal("1.000003"), "b": Decimal("1.000003"), "expected": True},
{"a": Decimal("1.000000004"), "b": Decimal("1.000000003"), "expected": False},
{"a": Decimal("1.3"), "b": Decimal("1.2"), "expected": False},
{"a": np.nan, "b": np.nan, "expected": True},
{"a": np.nan, "b": Decimal("1"), "expected": False},
{"a": Decimal("1"), "b": np.nan, "expected": False},
]
)
actual_out = datacompy.columns_equal(df.a, df.b)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_decimal_columns_equal_rel():
df = pd.DataFrame(
[
{"a": Decimal("1"), "b": Decimal("1"), "expected": True},
{"a": Decimal("1.3"), "b": Decimal("1.3"), "expected": True},
{"a": Decimal("1.000003"), "b": Decimal("1.000003"), "expected": True},
{"a": Decimal("1.000000004"), "b": Decimal("1.000000003"), "expected": True},
{"a": Decimal("1.3"), "b": Decimal("1.2"), "expected": False},
{"a": np.nan, "b": np.nan, "expected": True},
{"a": np.nan, "b": Decimal("1"), "expected": False},
{"a": Decimal("1"), "b": np.nan, "expected": False},
]
)
actual_out = datacompy.columns_equal(df.a, df.b, abs_tol=0.001)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_infinity_and_beyond():
df = pd.DataFrame(
[
{"a": np.inf, "b": np.inf, "expected": True},
{"a": -np.inf, "b": -np.inf, "expected": True},
{"a": -np.inf, "b": np.inf, "expected": False},
{"a": np.inf, "b": -np.inf, "expected": False},
{"a": 1, "b": 1, "expected": True},
{"a": 1, "b": 0, "expected": False},
]
)
actual_out = datacompy.columns_equal(df.a, df.b)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_mixed_column():
df = pd.DataFrame(
[
{"a": "hi", "b": "hi", "expected": True},
{"a": 1, "b": 1, "expected": True},
{"a": np.inf, "b": np.inf, "expected": True},
{"a": Decimal("1"), "b": Decimal("1"), "expected": True},
{"a": 1, "b": "1", "expected": False},
{"a": 1, "b": "yo", "expected": False},
]
)
actual_out = datacompy.columns_equal(df.a, df.b)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_mixed_column_with_ignore_spaces():
df = pd.DataFrame(
[
{"a": "hi", "b": "hi ", "expected": True},
{"a": 1, "b": 1, "expected": True},
{"a": np.inf, "b": np.inf, "expected": True},
{"a": Decimal("1"), "b": Decimal("1"), "expected": True},
{"a": 1, "b": "1 ", "expected": False},
{"a": 1, "b": "yo ", "expected": False},
]
)
actual_out = datacompy.columns_equal(df.a, df.b, ignore_spaces=True)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_mixed_column_with_ignore_spaces_and_case():
df = pd.DataFrame(
[
{"a": "hi", "b": "hi ", "expected": True},
{"a": 1, "b": 1, "expected": True},
{"a": np.inf, "b": np.inf, "expected": True},
{"a": Decimal("1"), "b": Decimal("1"), "expected": True},
{"a": 1, "b": "1 ", "expected": False},
{"a": 1, "b": "yo ", "expected": False},
{"a": "Hi", "b": "hI ", "expected": True},
{"a": "HI", "b": "HI ", "expected": True},
{"a": "hi", "b": "hi ", "expected": True},
]
)
actual_out = datacompy.columns_equal(df.a, df.b, ignore_spaces=True, ignore_case=True)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_compare_df_setter_bad():
df = pd.DataFrame([{"a": 1, "A": 2}, {"a": 2, "A": 2}])
with raises(TypeError, match="df1 must be a pandas DataFrame"):
compare = datacompy.Compare("a", "a", ["a"])
with raises(ValueError, match="df1 must have all columns from join_columns"):
compare = datacompy.Compare(df, df.copy(), ["b"])
with raises(ValueError, match="df1 must have unique column names"):
compare = datacompy.Compare(df, df.copy(), ["a"])
df_dupe = pd.DataFrame([{"a": 1, "b": 2}, {"a": 1, "b": 3}])
assert datacompy.Compare(df_dupe, df_dupe.copy(), ["a", "b"]).df1.equals(df_dupe)
def test_compare_df_setter_good():
df1 = pd.DataFrame([{"a": 1, "b": 2}, {"a": 2, "b": 2}])
df2 = pd.DataFrame([{"A": 1, "B": 2}, {"A": 2, "B": 3}])
compare = datacompy.Compare(df1, df2, ["a"])
assert compare.df1.equals(df1)
assert compare.df2.equals(df2)
assert compare.join_columns == ["a"]
compare = datacompy.Compare(df1, df2, ["A", "b"])
assert compare.df1.equals(df1)
assert compare.df2.equals(df2)
assert compare.join_columns == ["a", "b"]
def test_compare_df_setter_different_cases():
df1 = pd.DataFrame([{"a": 1, "b": 2}, {"a": 2, "b": 2}])
df2 = pd.DataFrame([{"A": 1, "b": 2}, {"A": 2, "b": 3}])
compare = datacompy.Compare(df1, df2, ["a"])
assert compare.df1.equals(df1)
assert compare.df2.equals(df2)
def test_compare_df_setter_bad_index():
df = pd.DataFrame([{"a": 1, "A": 2}, {"a": 2, "A": 2}])
with raises(TypeError, match="df1 must be a pandas DataFrame"):
compare = datacompy.Compare("a", "a", on_index=True)
with raises(ValueError, match="df1 must have unique column names"):
compare = datacompy.Compare(df, df.copy(), on_index=True)
def test_compare_on_index_and_join_columns():
df = pd.DataFrame([{"a": 1, "b": 2}, {"a": 2, "b": 2}])
with raises(Exception, match="Only provide on_index or join_columns"):
compare = datacompy.Compare(df, df.copy(), on_index=True, join_columns=["a"])
def test_compare_df_setter_good_index():
df1 = pd.DataFrame([{"a": 1, "b": 2}, {"a": 2, "b": 2}])
df2 = pd.DataFrame([{"a": 1, "b": 2}, {"a": 2, "b": 3}])
compare = datacompy.Compare(df1, df2, on_index=True)
assert compare.df1.equals(df1)
assert compare.df2.equals(df2)
def test_columns_overlap():
df1 = | pd.DataFrame([{"a": 1, "b": 2}, {"a": 2, "b": 2}]) | pandas.DataFrame |
from config_stat import twitter_app_auth # config file
from metric.const_config import NUM_FRIENDS_TO_SAVE, NUM_FOLLOWERS_TO_SAVE
from metric.db_for_analysis import SaveTweetstoDB, GetAllConns,UpdateConn
from metric.db_for_analysis import GetTweetsWithoutLowCredScore, UpdateLowCredScore
from metric.db_for_analysis import GetTweetsWithoutHashtagScore, GetTweetsWithoutURLScore
from metric.metrics import URLBased, HashtagBased, process_low_cred_urls
import numpy as np
import tweepy
import random
import networkx as nx
import pandas as pd
import time
import tldextract
from tweepy.error import TweepError
class Analyzer(object):
def __init__(self, init_tweepy=False):
self.tweepy_api = None
self.auth = None
self.low_credibility_df = | pd.read_csv("../data/fake_source_list.csv") | pandas.read_csv |
"""
Created on 3/9/2020
By <NAME>
"""
import pickle
import numpy as np
import pandas as pd
from collections import defaultdict
import os
import pickle
import numpy as np
import pandas as pd
from collections import defaultdict
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.metrics import roc_curve, roc_auc_score, f1_score
from functools import partial
from scipy.optimize import brentq
from scipy.interpolate import interp1d
from sklearn.svm import SVC
from scipy.stats import gaussian_kde
# from sklearn.metrics import det_curve
from Analytics import IdentificationTasks as Identify
import warnings
import time
from Analytics.Experiment import TrainedModel
warnings.filterwarnings('ignore', category=FutureWarning)
np.seterr(divide='ignore', invalid='ignore')
class FuseRule:
def __init__(self, list_o_rules, score_data, modalities, fusion_settings=None, experiment='', tasks=[], thinking=None):
self.list_o_rules = sorted(list(set(list_o_rules)))
self.score_data = score_data
self.modalities = modalities
self.fused_modalities = []
self.fusion_settings = fusion_settings
self.results = None
self.tasks = tasks
self.experiment_name = experiment
self.title = ''
self.models = []
self.thinking = thinking
self.cmc_accuracies = None
def make_rocs(self):
experiment_dir = self.experiment_name
if not os.path.exists('./generated/experiments/ROC/' + experiment_dir):
os.makedirs('./generated/experiments/ROC/' + experiment_dir)
# TODO: handle ROC/CMC settings better
if not os.path.exists('./generated/experiments/CMC/' + experiment_dir):
os.makedirs('./generated/experiments/CMC/' + experiment_dir)
print('Making ROCs... ')
plt.figure()
analytic_beans = []
base_modals = [x for x in self.modalities if ':' not in x]
fused_modals = [x for x in self.fused_modalities]
## Baseline
if not self.results:
for modality in base_modals:
metrics = {'FPRS': [], 'TPRS': [], 'EER': [], 'AUC': [], 'thresholds': [], 'Experiment': []}
Y_test = self.score_data['Label']
scores = self.score_data[modality]
fprs, tprs, thresh = roc_curve(Y_test, scores)
metrics['Experiment'] = 'BASELINE'
metrics['FPRS'] = fprs
metrics['TPRS'] = tprs
metrics['thresholds'] = thresh
metrics['EER'] = brentq(lambda x : 1. - x - interp1d(fprs, tprs)(x), 0., 1.)
metrics['AUC'] = roc_auc_score(Y_test, scores)
analytic_beans.append(metrics)
## Fused
for modality in fused_modals:
metrics = {'FPRS': [], 'TPRS': [], 'EER': [], 'AUC': [], 'thresholds': [], 'Experiment': []}
Y_test = self.score_data['Label']
scores = self.score_data[modality]
fprs, tprs, thresh = roc_curve(Y_test, scores)
metrics['Experiment'] = self.experiment_name
metrics['FPRS'] = fprs
metrics['TPRS'] = tprs
metrics['thresholds'] = thresh
metrics['EER'] = brentq(lambda x : 1. - x - interp1d(fprs, tprs)(x), 0., 1.)
metrics['AUC'] = roc_auc_score(Y_test, scores)
analytic_beans.append(metrics)
self.results = pd.DataFrame(analytic_beans)
self.results.index = [x for x in base_modals] + [x for x in fused_modals]
############## Baseline Plots
for baseline in base_modals:
fprs = self.results.loc[baseline]['FPRS']
tprs = self.results.loc[baseline]['TPRS']
plt.semilogx(fprs, tprs, label=baseline, marker='+')
plt.legend(bbox_to_anchor=(0.5, -0.02), loc="lower left", borderaxespad=0)
plt.xlabel('False Match Rate (FMR)', fontsize=15)
plt.ylabel('True Match Rate (TMR)', fontsize=15)
plt.title('Baseline Modalities', fontsize=15)
plot_name = './generated/experiments/ROC/' + experiment_dir + '/baseline.png'
plt.savefig(plot_name, bbox_inches='tight', pad_inches=0.5)
plt.clf()
############## Fused Plots
for fused in fused_modals:
for baseline in base_modals:
fprs = self.results.loc[baseline]['FPRS']
tprs = self.results.loc[baseline]['TPRS']
plt.semilogx(fprs, tprs, label=baseline, marker='+')
fused_fprs = self.results.loc[fused]['FPRS']
fused_tprs = self.results.loc[fused]['TPRS']
plt.semilogx(fused_fprs, fused_tprs, label=fused, marker='X')
plt.legend(bbox_to_anchor=(0.5, -0.02), loc="lower left", borderaxespad=0)
plt.xlabel('False Match Rate (FMR)', fontsize=15)
plt.ylabel('True Match Rate (TMR)', fontsize=15)
plt.title(fused + ' Fusion', fontsize=15)
plot_name = './generated/experiments/ROC/' + experiment_dir + '/' + fused.replace(':', '') + '.png'
plt.savefig(plot_name, bbox_inches='tight', pad_inches=0.5)
plt.clf()
############## ALL Plots
for baseline in base_modals:
fprs = self.results.loc[baseline]['FPRS']
tprs = self.results.loc[baseline]['TPRS']
plt.semilogx(fprs, tprs, label=baseline, marker='+')
for fused in fused_modals:
fused_fprs = self.results.loc[fused]['FPRS']
fused_tprs = self.results.loc[fused]['TPRS']
plt.semilogx(fused_fprs, fused_tprs, label=fused, marker='X')
plt.legend(bbox_to_anchor=(0.5, -0.02), loc="lower left", borderaxespad=0)
plt.xlabel('False Match Rate (FMR)', fontsize=15)
plt.ylabel('True Match Rate (TMR)', fontsize=15)
fusion_rules = [x.replace(':', '') for x in self.score_data.columns if x.isupper()]
plt.title('All Fusion Rules::' + ' '.join(fusion_rules), fontsize=15)
plot_name = './generated/experiments/ROC/' + experiment_dir + '/' + 'all.png'
plt.savefig(plot_name, bbox_inches='tight', pad_inches=0.5)
plt.clf()
print('Finished ROC')
################################################################################################################
################################################################################################################
def extract_alpha_beta(self, gen, imp):
# get bin width for gen and imp
gen_Q1 = np.quantile(gen, 0.25)
imp_Q3 = np.quantile(imp, 0.75)
ran = np.array([imp_Q3, gen_Q1])
return min(ran), max(ran)
def sequential_rule(self):
t0 = time.time()
seq_title='SequentialRule'
train = self.score_data[self.score_data['Train_Test'] == 'TRAIN']
baseline = self.fusion_settings['baseline']
if self.fusion_settings['auto']:
seq_title = seq_title+'(AUTO):'
gen_scores = train[train['Label'] == 1.0][baseline]
imp_scores = train[train['Label'] == 0.0][baseline]
alpha, beta = self.extract_alpha_beta(gen_scores, imp_scores)
else:
alpha = self.fusion_settings['alpha']
beta = self.fusion_settings['beta']
seq_title = seq_title+'('+str(round(alpha, 2))+'-'+str(round(alpha, 2))+'):'
if seq_title not in self.fused_modalities:
self.fused_modalities.append(seq_title)
self.score_data[seq_title] = \
self.score_data[(self.score_data[baseline]>= alpha) & (self.score_data[baseline]< beta)][self.modalities].mean(axis=1)
self.score_data[seq_title].fillna(self.score_data[baseline], inplace=True)
t1 = time.time()
self.models.append(TrainedModel(title=seq_title, train_time=t1 - t0, model=None))
def sum_rule(self):
t0 = time.time()
sum_title = 'SIMPLE_SUM:'
if sum_title not in self.fused_modalities:
self.fused_modalities.append(sum_title)
self.score_data[sum_title] = self.score_data[self.modalities].mean(axis=1)
t1 = time.time()
self.models.append(TrainedModel(title=sum_title, train_time=t1-t0, model=None))
def svm_rule(self):
t0 = time.time()
svm_title = 'SVM:'
if svm_title not in self.fused_modalities:
self.fused_modalities.append(svm_title)
train_indices = self.score_data.index[self.score_data['Train_Test'] == 'TRAIN']
test_indices = self.score_data.index[self.score_data['Train_Test'] == 'TEST']
train = self.score_data.iloc[train_indices]
train_y = self.score_data.iloc[train_indices]['Label']
train_x = train[self.modalities]
test = self.score_data.iloc[test_indices]
test_x = test[self.modalities]
clf = SVC(gamma='auto', probability=True)
clf.fit(train_x, train_y)
t1 = time.time()
self.models.append(TrainedModel(title=svm_title, train_time=t1-t0, model=clf))
## Get the scores for the predicted test labels
preds = clf.predict(test_x).astype(int)
test_scores_all = clf.predict_proba(test_x)
rows = [i for i in range(len(preds))]
test_scores = | pd.DataFrame(test_scores_all[rows, preds], index=test_indices) | pandas.DataFrame |
import pandas as pd
d0 = pd.read_table('test20151110a.csv.gz', sep=',')
d = d0[['sim_id', 'y', 'category']]
g = d.groupby('sim_id')
# for each sim_id find the most frequent category
freq = g['category'].agg(lambda x: x.value_counts().idxmax())
mm = (freq.index.values.astype(int) // 2e7).astype(int)
ebint = (freq.index.values.astype(int) - mm * 2e7).astype(int)
d1 = | pd.DataFrame({'mm': mm, 'ebint': ebint, 'category': freq.values}) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import re
import warnings
from datetime import timedelta
from itertools import product
import pytest
import numpy as np
import pandas as pd
from pandas import (CategoricalIndex, DataFrame, Index, MultiIndex,
compat, date_range, period_range)
from pandas.compat import PY3, long, lrange, lzip, range, u, PYPY
from pandas.errors import PerformanceWarning, UnsortedIndexError
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.core.indexes.base import InvalidIndexError
from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
from pandas._libs.tslib import Timestamp
import pandas.util.testing as tm
from pandas.util.testing import assert_almost_equal, assert_copy
from .common import Base
class TestMultiIndex(Base):
_holder = MultiIndex
_compat_props = ['shape', 'ndim', 'size']
def setup_method(self, method):
major_axis = Index(['foo', 'bar', 'baz', 'qux'])
minor_axis = Index(['one', 'two'])
major_labels = np.array([0, 0, 1, 2, 3, 3])
minor_labels = np.array([0, 1, 0, 1, 0, 1])
self.index_names = ['first', 'second']
self.indices = dict(index=MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels
], names=self.index_names,
verify_integrity=False))
self.setup_indices()
def create_index(self):
return self.index
def test_can_hold_identifiers(self):
idx = self.create_index()
key = idx[0]
assert idx._can_hold_identifiers_and_holds_name(key) is True
def test_boolean_context_compat2(self):
# boolean context compat
# GH7897
i1 = MultiIndex.from_tuples([('A', 1), ('A', 2)])
i2 = MultiIndex.from_tuples([('A', 1), ('A', 3)])
common = i1.intersection(i2)
def f():
if common:
pass
tm.assert_raises_regex(ValueError, 'The truth value of a', f)
def test_labels_dtypes(self):
# GH 8456
i = MultiIndex.from_tuples([('A', 1), ('A', 2)])
assert i.labels[0].dtype == 'int8'
assert i.labels[1].dtype == 'int8'
i = MultiIndex.from_product([['a'], range(40)])
assert i.labels[1].dtype == 'int8'
i = MultiIndex.from_product([['a'], range(400)])
assert i.labels[1].dtype == 'int16'
i = MultiIndex.from_product([['a'], range(40000)])
assert i.labels[1].dtype == 'int32'
i = pd.MultiIndex.from_product([['a'], range(1000)])
assert (i.labels[0] >= 0).all()
assert (i.labels[1] >= 0).all()
def test_where(self):
i = MultiIndex.from_tuples([('A', 1), ('A', 2)])
def f():
i.where(True)
pytest.raises(NotImplementedError, f)
def test_where_array_like(self):
i = MultiIndex.from_tuples([('A', 1), ('A', 2)])
klasses = [list, tuple, np.array, pd.Series]
cond = [False, True]
for klass in klasses:
def f():
return i.where(klass(cond))
pytest.raises(NotImplementedError, f)
def test_repeat(self):
reps = 2
numbers = [1, 2, 3]
names = np.array(['foo', 'bar'])
m = MultiIndex.from_product([
numbers, names], names=names)
expected = MultiIndex.from_product([
numbers, names.repeat(reps)], names=names)
tm.assert_index_equal(m.repeat(reps), expected)
with tm.assert_produces_warning(FutureWarning):
result = m.repeat(n=reps)
tm.assert_index_equal(result, expected)
def test_numpy_repeat(self):
reps = 2
numbers = [1, 2, 3]
names = np.array(['foo', 'bar'])
m = MultiIndex.from_product([
numbers, names], names=names)
expected = MultiIndex.from_product([
numbers, names.repeat(reps)], names=names)
tm.assert_index_equal(np.repeat(m, reps), expected)
msg = "the 'axis' parameter is not supported"
tm.assert_raises_regex(
ValueError, msg, np.repeat, m, reps, axis=1)
def test_set_name_methods(self):
# so long as these are synonyms, we don't need to test set_names
assert self.index.rename == self.index.set_names
new_names = [name + "SUFFIX" for name in self.index_names]
ind = self.index.set_names(new_names)
assert self.index.names == self.index_names
assert ind.names == new_names
with tm.assert_raises_regex(ValueError, "^Length"):
ind.set_names(new_names + new_names)
new_names2 = [name + "SUFFIX2" for name in new_names]
res = ind.set_names(new_names2, inplace=True)
assert res is None
assert ind.names == new_names2
# set names for specific level (# GH7792)
ind = self.index.set_names(new_names[0], level=0)
assert self.index.names == self.index_names
assert ind.names == [new_names[0], self.index_names[1]]
res = ind.set_names(new_names2[0], level=0, inplace=True)
assert res is None
assert ind.names == [new_names2[0], self.index_names[1]]
# set names for multiple levels
ind = self.index.set_names(new_names, level=[0, 1])
assert self.index.names == self.index_names
assert ind.names == new_names
res = ind.set_names(new_names2, level=[0, 1], inplace=True)
assert res is None
assert ind.names == new_names2
@pytest.mark.parametrize('inplace', [True, False])
def test_set_names_with_nlevel_1(self, inplace):
# GH 21149
# Ensure that .set_names for MultiIndex with
# nlevels == 1 does not raise any errors
expected = pd.MultiIndex(levels=[[0, 1]],
labels=[[0, 1]],
names=['first'])
m = pd.MultiIndex.from_product([[0, 1]])
result = m.set_names('first', level=0, inplace=inplace)
if inplace:
result = m
tm.assert_index_equal(result, expected)
def test_set_levels_labels_directly(self):
# setting levels/labels directly raises AttributeError
levels = self.index.levels
new_levels = [[lev + 'a' for lev in level] for level in levels]
labels = self.index.labels
major_labels, minor_labels = labels
major_labels = [(x + 1) % 3 for x in major_labels]
minor_labels = [(x + 1) % 1 for x in minor_labels]
new_labels = [major_labels, minor_labels]
with pytest.raises(AttributeError):
self.index.levels = new_levels
with pytest.raises(AttributeError):
self.index.labels = new_labels
def test_set_levels(self):
# side note - you probably wouldn't want to use levels and labels
# directly like this - but it is possible.
levels = self.index.levels
new_levels = [[lev + 'a' for lev in level] for level in levels]
def assert_matching(actual, expected, check_dtype=False):
# avoid specifying internal representation
# as much as possible
assert len(actual) == len(expected)
for act, exp in zip(actual, expected):
act = np.asarray(act)
exp = np.asarray(exp)
tm.assert_numpy_array_equal(act, exp, check_dtype=check_dtype)
# level changing [w/o mutation]
ind2 = self.index.set_levels(new_levels)
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
# level changing [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels, inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, new_levels)
# level changing specific level [w/o mutation]
ind2 = self.index.set_levels(new_levels[0], level=0)
assert_matching(ind2.levels, [new_levels[0], levels[1]])
assert_matching(self.index.levels, levels)
ind2 = self.index.set_levels(new_levels[1], level=1)
assert_matching(ind2.levels, [levels[0], new_levels[1]])
assert_matching(self.index.levels, levels)
# level changing multiple levels [w/o mutation]
ind2 = self.index.set_levels(new_levels, level=[0, 1])
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
# level changing specific level [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels[0], level=0, inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, [new_levels[0], levels[1]])
assert_matching(self.index.levels, levels)
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels[1], level=1, inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, [levels[0], new_levels[1]])
assert_matching(self.index.levels, levels)
# level changing multiple levels [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels, level=[0, 1],
inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
# illegal level changing should not change levels
# GH 13754
original_index = self.index.copy()
for inplace in [True, False]:
with tm.assert_raises_regex(ValueError, "^On"):
self.index.set_levels(['c'], level=0, inplace=inplace)
assert_matching(self.index.levels, original_index.levels,
check_dtype=True)
with tm.assert_raises_regex(ValueError, "^On"):
self.index.set_labels([0, 1, 2, 3, 4, 5], level=0,
inplace=inplace)
assert_matching(self.index.labels, original_index.labels,
check_dtype=True)
with tm.assert_raises_regex(TypeError, "^Levels"):
self.index.set_levels('c', level=0, inplace=inplace)
assert_matching(self.index.levels, original_index.levels,
check_dtype=True)
with tm.assert_raises_regex(TypeError, "^Labels"):
self.index.set_labels(1, level=0, inplace=inplace)
assert_matching(self.index.labels, original_index.labels,
check_dtype=True)
def test_set_labels(self):
# side note - you probably wouldn't want to use levels and labels
# directly like this - but it is possible.
labels = self.index.labels
major_labels, minor_labels = labels
major_labels = [(x + 1) % 3 for x in major_labels]
minor_labels = [(x + 1) % 1 for x in minor_labels]
new_labels = [major_labels, minor_labels]
def assert_matching(actual, expected):
# avoid specifying internal representation
# as much as possible
assert len(actual) == len(expected)
for act, exp in zip(actual, expected):
act = np.asarray(act)
exp = np.asarray(exp, dtype=np.int8)
tm.assert_numpy_array_equal(act, exp)
# label changing [w/o mutation]
ind2 = self.index.set_labels(new_labels)
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
# label changing [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels, inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, new_labels)
# label changing specific level [w/o mutation]
ind2 = self.index.set_labels(new_labels[0], level=0)
assert_matching(ind2.labels, [new_labels[0], labels[1]])
assert_matching(self.index.labels, labels)
ind2 = self.index.set_labels(new_labels[1], level=1)
assert_matching(ind2.labels, [labels[0], new_labels[1]])
assert_matching(self.index.labels, labels)
# label changing multiple levels [w/o mutation]
ind2 = self.index.set_labels(new_labels, level=[0, 1])
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
# label changing specific level [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels[0], level=0, inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, [new_labels[0], labels[1]])
assert_matching(self.index.labels, labels)
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels[1], level=1, inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, [labels[0], new_labels[1]])
assert_matching(self.index.labels, labels)
# label changing multiple levels [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels, level=[0, 1],
inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
# label changing for levels of different magnitude of categories
ind = pd.MultiIndex.from_tuples([(0, i) for i in range(130)])
new_labels = range(129, -1, -1)
expected = pd.MultiIndex.from_tuples(
[(0, i) for i in new_labels])
# [w/o mutation]
result = ind.set_labels(labels=new_labels, level=1)
assert result.equals(expected)
# [w/ mutation]
result = ind.copy()
result.set_labels(labels=new_labels, level=1, inplace=True)
assert result.equals(expected)
def test_set_levels_labels_names_bad_input(self):
levels, labels = self.index.levels, self.index.labels
names = self.index.names
with tm.assert_raises_regex(ValueError, 'Length of levels'):
self.index.set_levels([levels[0]])
with tm.assert_raises_regex(ValueError, 'Length of labels'):
self.index.set_labels([labels[0]])
with tm.assert_raises_regex(ValueError, 'Length of names'):
self.index.set_names([names[0]])
# shouldn't scalar data error, instead should demand list-like
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_levels(levels[0])
# shouldn't scalar data error, instead should demand list-like
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_labels(labels[0])
# shouldn't scalar data error, instead should demand list-like
with tm.assert_raises_regex(TypeError, 'list-like'):
self.index.set_names(names[0])
# should have equal lengths
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_levels(levels[0], level=[0, 1])
with tm.assert_raises_regex(TypeError, 'list-like'):
self.index.set_levels(levels, level=0)
# should have equal lengths
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_labels(labels[0], level=[0, 1])
with tm.assert_raises_regex(TypeError, 'list-like'):
self.index.set_labels(labels, level=0)
# should have equal lengths
with tm.assert_raises_regex(ValueError, 'Length of names'):
self.index.set_names(names[0], level=[0, 1])
with tm.assert_raises_regex(TypeError, 'string'):
self.index.set_names(names, level=0)
def test_set_levels_categorical(self):
# GH13854
index = MultiIndex.from_arrays([list("xyzx"), [0, 1, 2, 3]])
for ordered in [False, True]:
cidx = CategoricalIndex(list("bac"), ordered=ordered)
result = index.set_levels(cidx, 0)
expected = MultiIndex(levels=[cidx, [0, 1, 2, 3]],
labels=index.labels)
tm.assert_index_equal(result, expected)
result_lvl = result.get_level_values(0)
expected_lvl = CategoricalIndex(list("bacb"),
categories=cidx.categories,
ordered=cidx.ordered)
tm.assert_index_equal(result_lvl, expected_lvl)
def test_metadata_immutable(self):
levels, labels = self.index.levels, self.index.labels
# shouldn't be able to set at either the top level or base level
mutable_regex = re.compile('does not support mutable operations')
with tm.assert_raises_regex(TypeError, mutable_regex):
levels[0] = levels[0]
with tm.assert_raises_regex(TypeError, mutable_regex):
levels[0][0] = levels[0][0]
# ditto for labels
with tm.assert_raises_regex(TypeError, mutable_regex):
labels[0] = labels[0]
with tm.assert_raises_regex(TypeError, mutable_regex):
labels[0][0] = labels[0][0]
# and for names
names = self.index.names
with tm.assert_raises_regex(TypeError, mutable_regex):
names[0] = names[0]
def test_inplace_mutation_resets_values(self):
levels = [['a', 'b', 'c'], [4]]
levels2 = [[1, 2, 3], ['a']]
labels = [[0, 1, 0, 2, 2, 0], [0, 0, 0, 0, 0, 0]]
mi1 = MultiIndex(levels=levels, labels=labels)
mi2 = MultiIndex(levels=levels2, labels=labels)
vals = mi1.values.copy()
vals2 = mi2.values.copy()
assert mi1._tuples is not None
# Make sure level setting works
new_vals = mi1.set_levels(levels2).values
tm.assert_almost_equal(vals2, new_vals)
# Non-inplace doesn't kill _tuples [implementation detail]
tm.assert_almost_equal(mi1._tuples, vals)
# ...and values is still same too
tm.assert_almost_equal(mi1.values, vals)
# Inplace should kill _tuples
mi1.set_levels(levels2, inplace=True)
tm.assert_almost_equal(mi1.values, vals2)
# Make sure label setting works too
labels2 = [[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0]]
exp_values = np.empty((6,), dtype=object)
exp_values[:] = [(long(1), 'a')] * 6
# Must be 1d array of tuples
assert exp_values.shape == (6,)
new_values = mi2.set_labels(labels2).values
# Not inplace shouldn't change
tm.assert_almost_equal(mi2._tuples, vals2)
# Should have correct values
tm.assert_almost_equal(exp_values, new_values)
# ...and again setting inplace should kill _tuples, etc
mi2.set_labels(labels2, inplace=True)
tm.assert_almost_equal(mi2.values, new_values)
def test_copy_in_constructor(self):
levels = np.array(["a", "b", "c"])
labels = np.array([1, 1, 2, 0, 0, 1, 1])
val = labels[0]
mi = MultiIndex(levels=[levels, levels], labels=[labels, labels],
copy=True)
assert mi.labels[0][0] == val
labels[0] = 15
assert mi.labels[0][0] == val
val = levels[0]
levels[0] = "PANDA"
assert mi.levels[0][0] == val
def test_set_value_keeps_names(self):
# motivating example from #3742
lev1 = ['hans', 'hans', 'hans', 'grethe', 'grethe', 'grethe']
lev2 = ['1', '2', '3'] * 2
idx = pd.MultiIndex.from_arrays([lev1, lev2], names=['Name', 'Number'])
df = pd.DataFrame(
np.random.randn(6, 4),
columns=['one', 'two', 'three', 'four'],
index=idx)
df = df.sort_index()
assert df._is_copy is None
assert df.index.names == ('Name', 'Number')
df.at[('grethe', '4'), 'one'] = 99.34
assert df._is_copy is None
assert df.index.names == ('Name', 'Number')
def test_copy_names(self):
# Check that adding a "names" parameter to the copy is honored
# GH14302
multi_idx = pd.Index([(1, 2), (3, 4)], names=['MyName1', 'MyName2'])
multi_idx1 = multi_idx.copy()
assert multi_idx.equals(multi_idx1)
assert multi_idx.names == ['MyName1', 'MyName2']
assert multi_idx1.names == ['MyName1', 'MyName2']
multi_idx2 = multi_idx.copy(names=['NewName1', 'NewName2'])
assert multi_idx.equals(multi_idx2)
assert multi_idx.names == ['MyName1', 'MyName2']
assert multi_idx2.names == ['NewName1', 'NewName2']
multi_idx3 = multi_idx.copy(name=['NewName1', 'NewName2'])
assert multi_idx.equals(multi_idx3)
assert multi_idx.names == ['MyName1', 'MyName2']
assert multi_idx3.names == ['NewName1', 'NewName2']
def test_names(self):
# names are assigned in setup
names = self.index_names
level_names = [level.name for level in self.index.levels]
assert names == level_names
# setting bad names on existing
index = self.index
tm.assert_raises_regex(ValueError, "^Length of names",
setattr, index, "names",
list(index.names) + ["third"])
tm.assert_raises_regex(ValueError, "^Length of names",
setattr, index, "names", [])
# initializing with bad names (should always be equivalent)
major_axis, minor_axis = self.index.levels
major_labels, minor_labels = self.index.labels
tm.assert_raises_regex(ValueError, "^Length of names", MultiIndex,
levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels],
names=['first'])
tm.assert_raises_regex(ValueError, "^Length of names", MultiIndex,
levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels],
names=['first', 'second', 'third'])
# names are assigned
index.names = ["a", "b"]
ind_names = list(index.names)
level_names = [level.name for level in index.levels]
assert ind_names == level_names
def test_astype(self):
expected = self.index.copy()
actual = self.index.astype('O')
assert_copy(actual.levels, expected.levels)
assert_copy(actual.labels, expected.labels)
self.check_level_names(actual, expected.names)
with tm.assert_raises_regex(TypeError, "^Setting.*dtype.*object"):
self.index.astype(np.dtype(int))
@pytest.mark.parametrize('ordered', [True, False])
def test_astype_category(self, ordered):
# GH 18630
msg = '> 1 ndim Categorical are not supported at this time'
with tm.assert_raises_regex(NotImplementedError, msg):
self.index.astype(CategoricalDtype(ordered=ordered))
if ordered is False:
# dtype='category' defaults to ordered=False, so only test once
with tm.assert_raises_regex(NotImplementedError, msg):
self.index.astype('category')
def test_constructor_single_level(self):
result = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux']],
labels=[[0, 1, 2, 3]], names=['first'])
assert isinstance(result, MultiIndex)
expected = Index(['foo', 'bar', 'baz', 'qux'], name='first')
tm.assert_index_equal(result.levels[0], expected)
assert result.names == ['first']
def test_constructor_no_levels(self):
tm.assert_raises_regex(ValueError, "non-zero number "
"of levels/labels",
MultiIndex, levels=[], labels=[])
both_re = re.compile('Must pass both levels and labels')
with tm.assert_raises_regex(TypeError, both_re):
MultiIndex(levels=[])
with tm.assert_raises_regex(TypeError, both_re):
MultiIndex(labels=[])
def test_constructor_mismatched_label_levels(self):
labels = [np.array([1]), np.array([2]), np.array([3])]
levels = ["a"]
tm.assert_raises_regex(ValueError, "Length of levels and labels "
"must be the same", MultiIndex,
levels=levels, labels=labels)
length_error = re.compile('>= length of level')
label_error = re.compile(r'Unequal label lengths: \[4, 2\]')
# important to check that it's looking at the right thing.
with tm.assert_raises_regex(ValueError, length_error):
MultiIndex(levels=[['a'], ['b']],
labels=[[0, 1, 2, 3], [0, 3, 4, 1]])
with tm.assert_raises_regex(ValueError, label_error):
MultiIndex(levels=[['a'], ['b']], labels=[[0, 0, 0, 0], [0, 0]])
# external API
with tm.assert_raises_regex(ValueError, length_error):
self.index.copy().set_levels([['a'], ['b']])
with tm.assert_raises_regex(ValueError, label_error):
self.index.copy().set_labels([[0, 0, 0, 0], [0, 0]])
def test_constructor_nonhashable_names(self):
# GH 20527
levels = [[1, 2], [u'one', u'two']]
labels = [[0, 0, 1, 1], [0, 1, 0, 1]]
names = ((['foo'], ['bar']))
message = "MultiIndex.name must be a hashable type"
tm.assert_raises_regex(TypeError, message,
MultiIndex, levels=levels,
labels=labels, names=names)
# With .rename()
mi = MultiIndex(levels=[[1, 2], [u'one', u'two']],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=('foo', 'bar'))
renamed = [['foor'], ['barr']]
tm.assert_raises_regex(TypeError, message, mi.rename, names=renamed)
# With .set_names()
tm.assert_raises_regex(TypeError, message, mi.set_names, names=renamed)
@pytest.mark.parametrize('names', [['a', 'b', 'a'], ['1', '1', '2'],
['1', 'a', '1']])
def test_duplicate_level_names(self, names):
# GH18872
pytest.raises(ValueError, pd.MultiIndex.from_product,
[[0, 1]] * 3, names=names)
# With .rename()
mi = pd.MultiIndex.from_product([[0, 1]] * 3)
tm.assert_raises_regex(ValueError, "Duplicated level name:",
mi.rename, names)
# With .rename(., level=)
mi.rename(names[0], level=1, inplace=True)
tm.assert_raises_regex(ValueError, "Duplicated level name:",
mi.rename, names[:2], level=[0, 2])
def assert_multiindex_copied(self, copy, original):
# Levels should be (at least, shallow copied)
tm.assert_copy(copy.levels, original.levels)
tm.assert_almost_equal(copy.labels, original.labels)
# Labels doesn't matter which way copied
tm.assert_almost_equal(copy.labels, original.labels)
assert copy.labels is not original.labels
# Names doesn't matter which way copied
assert copy.names == original.names
assert copy.names is not original.names
# Sort order should be copied
assert copy.sortorder == original.sortorder
def test_copy(self):
i_copy = self.index.copy()
self.assert_multiindex_copied(i_copy, self.index)
def test_shallow_copy(self):
i_copy = self.index._shallow_copy()
self.assert_multiindex_copied(i_copy, self.index)
def test_view(self):
i_view = self.index.view()
self.assert_multiindex_copied(i_view, self.index)
def check_level_names(self, index, names):
assert [level.name for level in index.levels] == list(names)
def test_changing_names(self):
# names should be applied to levels
level_names = [level.name for level in self.index.levels]
self.check_level_names(self.index, self.index.names)
view = self.index.view()
copy = self.index.copy()
shallow_copy = self.index._shallow_copy()
# changing names should change level names on object
new_names = [name + "a" for name in self.index.names]
self.index.names = new_names
self.check_level_names(self.index, new_names)
# but not on copies
self.check_level_names(view, level_names)
self.check_level_names(copy, level_names)
self.check_level_names(shallow_copy, level_names)
# and copies shouldn't change original
shallow_copy.names = [name + "c" for name in shallow_copy.names]
self.check_level_names(self.index, new_names)
def test_get_level_number_integer(self):
self.index.names = [1, 0]
assert self.index._get_level_number(1) == 0
assert self.index._get_level_number(0) == 1
pytest.raises(IndexError, self.index._get_level_number, 2)
tm.assert_raises_regex(KeyError, 'Level fourth not found',
self.index._get_level_number, 'fourth')
def test_from_arrays(self):
arrays = []
for lev, lab in zip(self.index.levels, self.index.labels):
arrays.append(np.asarray(lev).take(lab))
# list of arrays as input
result = MultiIndex.from_arrays(arrays, names=self.index.names)
tm.assert_index_equal(result, self.index)
# infer correctly
result = MultiIndex.from_arrays([[pd.NaT, Timestamp('20130101')],
['a', 'b']])
assert result.levels[0].equals(Index([Timestamp('20130101')]))
assert result.levels[1].equals(Index(['a', 'b']))
def test_from_arrays_iterator(self):
# GH 18434
arrays = []
for lev, lab in zip(self.index.levels, self.index.labels):
arrays.append(np.asarray(lev).take(lab))
# iterator as input
result = MultiIndex.from_arrays(iter(arrays), names=self.index.names)
tm.assert_index_equal(result, self.index)
# invalid iterator input
with tm.assert_raises_regex(
TypeError, "Input must be a list / sequence of array-likes."):
MultiIndex.from_arrays(0)
def test_from_arrays_index_series_datetimetz(self):
idx1 = pd.date_range('2015-01-01 10:00', freq='D', periods=3,
tz='US/Eastern')
idx2 = pd.date_range('2015-01-01 10:00', freq='H', periods=3,
tz='Asia/Tokyo')
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_series_timedelta(self):
idx1 = pd.timedelta_range('1 days', freq='D', periods=3)
idx2 = pd.timedelta_range('2 hours', freq='H', periods=3)
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_series_period(self):
idx1 = pd.period_range('2011-01-01', freq='D', periods=3)
idx2 = pd.period_range('2015-01-01', freq='H', periods=3)
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_datetimelike_mixed(self):
idx1 = pd.date_range('2015-01-01 10:00', freq='D', periods=3,
tz='US/Eastern')
idx2 = pd.date_range('2015-01-01 10:00', freq='H', periods=3)
idx3 = pd.timedelta_range('1 days', freq='D', periods=3)
idx4 = pd.period_range('2011-01-01', freq='D', periods=3)
result = pd.MultiIndex.from_arrays([idx1, idx2, idx3, idx4])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
tm.assert_index_equal(result.get_level_values(2), idx3)
tm.assert_index_equal(result.get_level_values(3), idx4)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1),
pd.Series(idx2),
pd.Series(idx3),
pd.Series(idx4)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result2.get_level_values(2), idx3)
tm.assert_index_equal(result2.get_level_values(3), idx4)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_series_categorical(self):
# GH13743
idx1 = pd.CategoricalIndex(list("abcaab"), categories=list("bac"),
ordered=False)
idx2 = pd.CategoricalIndex(list("abcaab"), categories=list("bac"),
ordered=True)
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
result3 = pd.MultiIndex.from_arrays([idx1.values, idx2.values])
tm.assert_index_equal(result3.get_level_values(0), idx1)
tm.assert_index_equal(result3.get_level_values(1), idx2)
def test_from_arrays_empty(self):
# 0 levels
with tm.assert_raises_regex(
ValueError, "Must pass non-zero number of levels/labels"):
MultiIndex.from_arrays(arrays=[])
# 1 level
result = MultiIndex.from_arrays(arrays=[[]], names=['A'])
assert isinstance(result, MultiIndex)
expected = Index([], name='A')
tm.assert_index_equal(result.levels[0], expected)
# N levels
for N in [2, 3]:
arrays = [[]] * N
names = list('ABC')[:N]
result = MultiIndex.from_arrays(arrays=arrays, names=names)
expected = MultiIndex(levels=[[]] * N, labels=[[]] * N,
names=names)
tm.assert_index_equal(result, expected)
def test_from_arrays_invalid_input(self):
invalid_inputs = [1, [1], [1, 2], [[1], 2],
'a', ['a'], ['a', 'b'], [['a'], 'b']]
for i in invalid_inputs:
pytest.raises(TypeError, MultiIndex.from_arrays, arrays=i)
def test_from_arrays_different_lengths(self):
# see gh-13599
idx1 = [1, 2, 3]
idx2 = ['a', 'b']
tm.assert_raises_regex(ValueError, '^all arrays must '
'be same length$',
MultiIndex.from_arrays, [idx1, idx2])
idx1 = []
idx2 = ['a', 'b']
tm.assert_raises_regex(ValueError, '^all arrays must '
'be same length$',
MultiIndex.from_arrays, [idx1, idx2])
idx1 = [1, 2, 3]
idx2 = []
tm.assert_raises_regex(ValueError, '^all arrays must '
'be same length$',
MultiIndex.from_arrays, [idx1, idx2])
def test_from_product(self):
first = ['foo', 'bar', 'buz']
second = ['a', 'b', 'c']
names = ['first', 'second']
result = MultiIndex.from_product([first, second], names=names)
tuples = [('foo', 'a'), ('foo', 'b'), ('foo', 'c'), ('bar', 'a'),
('bar', 'b'), ('bar', 'c'), ('buz', 'a'), ('buz', 'b'),
('buz', 'c')]
expected = MultiIndex.from_tuples(tuples, names=names)
tm.assert_index_equal(result, expected)
def test_from_product_iterator(self):
# GH 18434
first = ['foo', 'bar', 'buz']
second = ['a', 'b', 'c']
names = ['first', 'second']
tuples = [('foo', 'a'), ('foo', 'b'), ('foo', 'c'), ('bar', 'a'),
('bar', 'b'), ('bar', 'c'), ('buz', 'a'), ('buz', 'b'),
('buz', 'c')]
expected = MultiIndex.from_tuples(tuples, names=names)
# iterator as input
result = MultiIndex.from_product(iter([first, second]), names=names)
tm.assert_index_equal(result, expected)
# Invalid non-iterable input
with tm.assert_raises_regex(
TypeError, "Input must be a list / sequence of iterables."):
MultiIndex.from_product(0)
def test_from_product_empty(self):
# 0 levels
with tm.assert_raises_regex(
ValueError, "Must pass non-zero number of levels/labels"):
MultiIndex.from_product([])
# 1 level
result = MultiIndex.from_product([[]], names=['A'])
expected = pd.Index([], name='A')
tm.assert_index_equal(result.levels[0], expected)
# 2 levels
l1 = [[], ['foo', 'bar', 'baz'], []]
l2 = [[], [], ['a', 'b', 'c']]
names = ['A', 'B']
for first, second in zip(l1, l2):
result = MultiIndex.from_product([first, second], names=names)
expected = MultiIndex(levels=[first, second],
labels=[[], []], names=names)
tm.assert_index_equal(result, expected)
# GH12258
names = ['A', 'B', 'C']
for N in range(4):
lvl2 = lrange(N)
result = MultiIndex.from_product([[], lvl2, []], names=names)
expected = MultiIndex(levels=[[], lvl2, []],
labels=[[], [], []], names=names)
tm.assert_index_equal(result, expected)
def test_from_product_invalid_input(self):
invalid_inputs = [1, [1], [1, 2], [[1], 2],
'a', ['a'], ['a', 'b'], [['a'], 'b']]
for i in invalid_inputs:
pytest.raises(TypeError, MultiIndex.from_product, iterables=i)
def test_from_product_datetimeindex(self):
dt_index = date_range('2000-01-01', periods=2)
mi = pd.MultiIndex.from_product([[1, 2], dt_index])
etalon = construct_1d_object_array_from_listlike([(1, pd.Timestamp(
'2000-01-01')), (1, pd.Timestamp('2000-01-02')), (2, pd.Timestamp(
'2000-01-01')), (2, pd.Timestamp('2000-01-02'))])
tm.assert_numpy_array_equal(mi.values, etalon)
def test_from_product_index_series_categorical(self):
# GH13743
first = ['foo', 'bar']
for ordered in [False, True]:
idx = pd.CategoricalIndex(list("abcaab"), categories=list("bac"),
ordered=ordered)
expected = pd.CategoricalIndex(list("abcaab") + list("abcaab"),
categories=list("bac"),
ordered=ordered)
for arr in [idx, pd.Series(idx), idx.values]:
result = pd.MultiIndex.from_product([first, arr])
tm.assert_index_equal(result.get_level_values(1), expected)
def test_values_boxed(self):
tuples = [(1, pd.Timestamp('2000-01-01')), (2, pd.NaT),
(3, pd.Timestamp('2000-01-03')),
(1, pd.Timestamp('2000-01-04')),
(2, pd.Timestamp('2000-01-02')),
(3, pd.Timestamp('2000-01-03'))]
result = pd.MultiIndex.from_tuples(tuples)
expected = construct_1d_object_array_from_listlike(tuples)
tm.assert_numpy_array_equal(result.values, expected)
# Check that code branches for boxed values produce identical results
tm.assert_numpy_array_equal(result.values[:4], result[:4].values)
def test_values_multiindex_datetimeindex(self):
# Test to ensure we hit the boxing / nobox part of MI.values
ints = np.arange(10 ** 18, 10 ** 18 + 5)
naive = pd.DatetimeIndex(ints)
aware = pd.DatetimeIndex(ints, tz='US/Central')
idx = pd.MultiIndex.from_arrays([naive, aware])
result = idx.values
outer = pd.DatetimeIndex([x[0] for x in result])
tm.assert_index_equal(outer, naive)
inner = pd.DatetimeIndex([x[1] for x in result])
tm.assert_index_equal(inner, aware)
# n_lev > n_lab
result = idx[:2].values
outer = pd.DatetimeIndex([x[0] for x in result])
tm.assert_index_equal(outer, naive[:2])
inner = pd.DatetimeIndex([x[1] for x in result])
tm.assert_index_equal(inner, aware[:2])
def test_values_multiindex_periodindex(self):
# Test to ensure we hit the boxing / nobox part of MI.values
ints = np.arange(2007, 2012)
pidx = pd.PeriodIndex(ints, freq='D')
idx = pd.MultiIndex.from_arrays([ints, pidx])
result = idx.values
outer = pd.Int64Index([x[0] for x in result])
tm.assert_index_equal(outer, pd.Int64Index(ints))
inner = pd.PeriodIndex([x[1] for x in result])
tm.assert_index_equal(inner, pidx)
# n_lev > n_lab
result = idx[:2].values
outer = pd.Int64Index([x[0] for x in result])
tm.assert_index_equal(outer, pd.Int64Index(ints[:2]))
inner = pd.PeriodIndex([x[1] for x in result])
tm.assert_index_equal(inner, pidx[:2])
def test_append(self):
result = self.index[:3].append(self.index[3:])
assert result.equals(self.index)
foos = [self.index[:1], self.index[1:3], self.index[3:]]
result = foos[0].append(foos[1:])
assert result.equals(self.index)
# empty
result = self.index.append([])
assert result.equals(self.index)
def test_append_mixed_dtypes(self):
# GH 13660
dti = date_range('2011-01-01', freq='M', periods=3, )
dti_tz = date_range('2011-01-01', freq='M', periods=3, tz='US/Eastern')
pi = period_range('2011-01', freq='M', periods=3)
mi = MultiIndex.from_arrays([[1, 2, 3],
[1.1, np.nan, 3.3],
['a', 'b', 'c'],
dti, dti_tz, pi])
assert mi.nlevels == 6
res = mi.append(mi)
exp = MultiIndex.from_arrays([[1, 2, 3, 1, 2, 3],
[1.1, np.nan, 3.3, 1.1, np.nan, 3.3],
['a', 'b', 'c', 'a', 'b', 'c'],
dti.append(dti),
dti_tz.append(dti_tz),
pi.append(pi)])
tm.assert_index_equal(res, exp)
other = MultiIndex.from_arrays([['x', 'y', 'z'], ['x', 'y', 'z'],
['x', 'y', 'z'], ['x', 'y', 'z'],
['x', 'y', 'z'], ['x', 'y', 'z']])
res = mi.append(other)
exp = MultiIndex.from_arrays([[1, 2, 3, 'x', 'y', 'z'],
[1.1, np.nan, 3.3, 'x', 'y', 'z'],
['a', 'b', 'c', 'x', 'y', 'z'],
dti.append(pd.Index(['x', 'y', 'z'])),
dti_tz.append(pd.Index(['x', 'y', 'z'])),
pi.append(pd.Index(['x', 'y', 'z']))])
tm.assert_index_equal(res, exp)
def test_get_level_values(self):
result = self.index.get_level_values(0)
expected = Index(['foo', 'foo', 'bar', 'baz', 'qux', 'qux'],
name='first')
tm.assert_index_equal(result, expected)
assert result.name == 'first'
result = self.index.get_level_values('first')
expected = self.index.get_level_values(0)
tm.assert_index_equal(result, expected)
# GH 10460
index = MultiIndex(
levels=[CategoricalIndex(['A', 'B']),
CategoricalIndex([1, 2, 3])],
labels=[np.array([0, 0, 0, 1, 1, 1]),
np.array([0, 1, 2, 0, 1, 2])])
exp = CategoricalIndex(['A', 'A', 'A', 'B', 'B', 'B'])
tm.assert_index_equal(index.get_level_values(0), exp)
exp = CategoricalIndex([1, 2, 3, 1, 2, 3])
tm.assert_index_equal(index.get_level_values(1), exp)
def test_get_level_values_int_with_na(self):
# GH 17924
arrays = [['a', 'b', 'b'], [1, np.nan, 2]]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(1)
expected = Index([1, np.nan, 2])
tm.assert_index_equal(result, expected)
arrays = [['a', 'b', 'b'], [np.nan, np.nan, 2]]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(1)
expected = Index([np.nan, np.nan, 2])
tm.assert_index_equal(result, expected)
def test_get_level_values_na(self):
arrays = [[np.nan, np.nan, np.nan], ['a', np.nan, 1]]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(0)
expected = pd.Index([np.nan, np.nan, np.nan])
tm.assert_index_equal(result, expected)
result = index.get_level_values(1)
expected = pd.Index(['a', np.nan, 1])
tm.assert_index_equal(result, expected)
arrays = [['a', 'b', 'b'], pd.DatetimeIndex([0, 1, pd.NaT])]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(1)
expected = pd.DatetimeIndex([0, 1, pd.NaT])
tm.assert_index_equal(result, expected)
arrays = [[], []]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(0)
expected = pd.Index([], dtype=object)
tm.assert_index_equal(result, expected)
def test_get_level_values_all_na(self):
# GH 17924 when level entirely consists of nan
arrays = [[np.nan, np.nan, np.nan], ['a', np.nan, 1]]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(0)
expected = pd.Index([np.nan, np.nan, np.nan], dtype=np.float64)
tm.assert_index_equal(result, expected)
result = index.get_level_values(1)
expected = pd.Index(['a', np.nan, 1], dtype=object)
tm.assert_index_equal(result, expected)
def test_reorder_levels(self):
# this blows up
tm.assert_raises_regex(IndexError, '^Too many levels',
self.index.reorder_levels, [2, 1, 0])
def test_nlevels(self):
assert self.index.nlevels == 2
def test_iter(self):
result = list(self.index)
expected = [('foo', 'one'), ('foo', 'two'), ('bar', 'one'),
('baz', 'two'), ('qux', 'one'), ('qux', 'two')]
assert result == expected
def test_legacy_pickle(self):
if PY3:
pytest.skip("testing for legacy pickles not "
"support on py3")
path = tm.get_data_path('multiindex_v1.pickle')
obj = pd.read_pickle(path)
obj2 = MultiIndex.from_tuples(obj.values)
assert obj.equals(obj2)
res = obj.get_indexer(obj)
exp = np.arange(len(obj), dtype=np.intp)
assert_almost_equal(res, exp)
res = obj.get_indexer(obj2[::-1])
exp = obj.get_indexer(obj[::-1])
exp2 = obj2.get_indexer(obj2[::-1])
assert_almost_equal(res, exp)
assert_almost_equal(exp, exp2)
def test_legacy_v2_unpickle(self):
# 0.7.3 -> 0.8.0 format manage
path = tm.get_data_path('mindex_073.pickle')
obj = pd.read_pickle(path)
obj2 = MultiIndex.from_tuples(obj.values)
assert obj.equals(obj2)
res = obj.get_indexer(obj)
exp = np.arange(len(obj), dtype=np.intp)
assert_almost_equal(res, exp)
res = obj.get_indexer(obj2[::-1])
exp = obj.get_indexer(obj[::-1])
exp2 = obj2.get_indexer(obj2[::-1])
assert_almost_equal(res, exp)
assert_almost_equal(exp, exp2)
def test_roundtrip_pickle_with_tz(self):
# GH 8367
# round-trip of timezone
index = MultiIndex.from_product(
[[1, 2], ['a', 'b'], date_range('20130101', periods=3,
tz='US/Eastern')
], names=['one', 'two', 'three'])
unpickled = tm.round_trip_pickle(index)
assert index.equal_levels(unpickled)
def test_from_tuples_index_values(self):
result = MultiIndex.from_tuples(self.index)
assert (result.values == self.index.values).all()
def test_contains(self):
assert ('foo', 'two') in self.index
assert ('bar', 'two') not in self.index
assert None not in self.index
def test_contains_top_level(self):
midx = MultiIndex.from_product([['A', 'B'], [1, 2]])
assert 'A' in midx
assert 'A' not in midx._engine
def test_contains_with_nat(self):
# MI with a NaT
mi = MultiIndex(levels=[['C'],
pd.date_range('2012-01-01', periods=5)],
labels=[[0, 0, 0, 0, 0, 0], [-1, 0, 1, 2, 3, 4]],
names=[None, 'B'])
assert ('C', pd.Timestamp('2012-01-01')) in mi
for val in mi.values:
assert val in mi
def test_is_all_dates(self):
assert not self.index.is_all_dates
def test_is_numeric(self):
# MultiIndex is never numeric
assert not self.index.is_numeric()
def test_getitem(self):
# scalar
assert self.index[2] == ('bar', 'one')
# slice
result = self.index[2:5]
expected = self.index[[2, 3, 4]]
assert result.equals(expected)
# boolean
result = self.index[[True, False, True, False, True, True]]
result2 = self.index[np.array([True, False, True, False, True, True])]
expected = self.index[[0, 2, 4, 5]]
assert result.equals(expected)
assert result2.equals(expected)
def test_getitem_group_select(self):
sorted_idx, _ = self.index.sortlevel(0)
assert sorted_idx.get_loc('baz') == slice(3, 4)
assert sorted_idx.get_loc('foo') == slice(0, 2)
def test_get_loc(self):
assert self.index.get_loc(('foo', 'two')) == 1
assert self.index.get_loc(('baz', 'two')) == 3
pytest.raises(KeyError, self.index.get_loc, ('bar', 'two'))
pytest.raises(KeyError, self.index.get_loc, 'quux')
pytest.raises(NotImplementedError, self.index.get_loc, 'foo',
method='nearest')
# 3 levels
index = MultiIndex(levels=[Index(lrange(4)), Index(lrange(4)), Index(
lrange(4))], labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])])
pytest.raises(KeyError, index.get_loc, (1, 1))
assert index.get_loc((2, 0)) == slice(3, 5)
def test_get_loc_duplicates(self):
index = Index([2, 2, 2, 2])
result = index.get_loc(2)
expected = slice(0, 4)
assert result == expected
# pytest.raises(Exception, index.get_loc, 2)
index = Index(['c', 'a', 'a', 'b', 'b'])
rs = index.get_loc('c')
xp = 0
assert rs == xp
def test_get_value_duplicates(self):
index = MultiIndex(levels=[['D', 'B', 'C'],
[0, 26, 27, 37, 57, 67, 75, 82]],
labels=[[0, 0, 0, 1, 2, 2, 2, 2, 2, 2],
[1, 3, 4, 6, 0, 2, 2, 3, 5, 7]],
names=['tag', 'day'])
assert index.get_loc('D') == slice(0, 3)
with pytest.raises(KeyError):
index._engine.get_value(np.array([]), 'D')
def test_get_loc_level(self):
index = MultiIndex(levels=[Index(lrange(4)), Index(lrange(4)), Index(
lrange(4))], labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])])
loc, new_index = index.get_loc_level((0, 1))
expected = slice(1, 2)
exp_index = index[expected].droplevel(0).droplevel(0)
assert loc == expected
assert new_index.equals(exp_index)
loc, new_index = index.get_loc_level((0, 1, 0))
expected = 1
assert loc == expected
assert new_index is None
pytest.raises(KeyError, index.get_loc_level, (2, 2))
index = MultiIndex(levels=[[2000], lrange(4)], labels=[np.array(
[0, 0, 0, 0]), np.array([0, 1, 2, 3])])
result, new_index = index.get_loc_level((2000, slice(None, None)))
expected = slice(None, None)
assert result == expected
assert new_index.equals(index.droplevel(0))
@pytest.mark.parametrize('level', [0, 1])
@pytest.mark.parametrize('null_val', [np.nan, pd.NaT, None])
def test_get_loc_nan(self, level, null_val):
# GH 18485 : NaN in MultiIndex
levels = [['a', 'b'], ['c', 'd']]
key = ['b', 'd']
levels[level] = np.array([0, null_val], dtype=type(null_val))
key[level] = null_val
idx = MultiIndex.from_product(levels)
assert idx.get_loc(tuple(key)) == 3
def test_get_loc_missing_nan(self):
# GH 8569
idx = MultiIndex.from_arrays([[1.0, 2.0], [3.0, 4.0]])
assert isinstance(idx.get_loc(1), slice)
pytest.raises(KeyError, idx.get_loc, 3)
pytest.raises(KeyError, idx.get_loc, np.nan)
pytest.raises(KeyError, idx.get_loc, [np.nan])
@pytest.mark.parametrize('dtype1', [int, float, bool, str])
@pytest.mark.parametrize('dtype2', [int, float, bool, str])
def test_get_loc_multiple_dtypes(self, dtype1, dtype2):
# GH 18520
levels = [np.array([0, 1]).astype(dtype1),
np.array([0, 1]).astype(dtype2)]
idx = pd.MultiIndex.from_product(levels)
assert idx.get_loc(idx[2]) == 2
@pytest.mark.parametrize('level', [0, 1])
@pytest.mark.parametrize('dtypes', [[int, float], [float, int]])
def test_get_loc_implicit_cast(self, level, dtypes):
# GH 18818, GH 15994 : as flat index, cast int to float and vice-versa
levels = [['a', 'b'], ['c', 'd']]
key = ['b', 'd']
lev_dtype, key_dtype = dtypes
levels[level] = np.array([0, 1], dtype=lev_dtype)
key[level] = key_dtype(1)
idx = MultiIndex.from_product(levels)
assert idx.get_loc(tuple(key)) == 3
def test_get_loc_cast_bool(self):
# GH 19086 : int is casted to bool, but not vice-versa
levels = [[False, True], np.arange(2, dtype='int64')]
idx = MultiIndex.from_product(levels)
assert idx.get_loc((0, 1)) == 1
assert idx.get_loc((1, 0)) == 2
pytest.raises(KeyError, idx.get_loc, (False, True))
pytest.raises(KeyError, idx.get_loc, (True, False))
def test_slice_locs(self):
df = tm.makeTimeDataFrame()
stacked = df.stack()
idx = stacked.index
slob = slice(*idx.slice_locs(df.index[5], df.index[15]))
sliced = stacked[slob]
expected = df[5:16].stack()
tm.assert_almost_equal(sliced.values, expected.values)
slob = slice(*idx.slice_locs(df.index[5] + timedelta(seconds=30),
df.index[15] - timedelta(seconds=30)))
sliced = stacked[slob]
expected = df[6:15].stack()
tm.assert_almost_equal(sliced.values, expected.values)
def test_slice_locs_with_type_mismatch(self):
df = tm.makeTimeDataFrame()
stacked = df.stack()
idx = stacked.index
tm.assert_raises_regex(TypeError, '^Level type mismatch',
idx.slice_locs, (1, 3))
tm.assert_raises_regex(TypeError, '^Level type mismatch',
idx.slice_locs,
df.index[5] + timedelta(
seconds=30), (5, 2))
df = tm.makeCustomDataframe(5, 5)
stacked = df.stack()
idx = stacked.index
with tm.assert_raises_regex(TypeError, '^Level type mismatch'):
idx.slice_locs(timedelta(seconds=30))
# TODO: Try creating a UnicodeDecodeError in exception message
with tm.assert_raises_regex(TypeError, '^Level type mismatch'):
idx.slice_locs(df.index[1], (16, "a"))
def test_slice_locs_not_sorted(self):
index = MultiIndex(levels=[Index(lrange(4)), Index(lrange(4)), Index(
lrange(4))], labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])])
tm.assert_raises_regex(KeyError, "[Kk]ey length.*greater than "
"MultiIndex lexsort depth",
index.slice_locs, (1, 0, 1), (2, 1, 0))
# works
sorted_index, _ = index.sortlevel(0)
# should there be a test case here???
sorted_index.slice_locs((1, 0, 1), (2, 1, 0))
def test_slice_locs_partial(self):
sorted_idx, _ = self.index.sortlevel(0)
result = sorted_idx.slice_locs(('foo', 'two'), ('qux', 'one'))
assert result == (1, 5)
result = sorted_idx.slice_locs(None, ('qux', 'one'))
assert result == (0, 5)
result = sorted_idx.slice_locs(('foo', 'two'), None)
assert result == (1, len(sorted_idx))
result = sorted_idx.slice_locs('bar', 'baz')
assert result == (2, 4)
def test_slice_locs_not_contained(self):
# some searchsorted action
index = MultiIndex(levels=[[0, 2, 4, 6], [0, 2, 4]],
labels=[[0, 0, 0, 1, 1, 2, 3, 3, 3],
[0, 1, 2, 1, 2, 2, 0, 1, 2]], sortorder=0)
result = index.slice_locs((1, 0), (5, 2))
assert result == (3, 6)
result = index.slice_locs(1, 5)
assert result == (3, 6)
result = index.slice_locs((2, 2), (5, 2))
assert result == (3, 6)
result = index.slice_locs(2, 5)
assert result == (3, 6)
result = index.slice_locs((1, 0), (6, 3))
assert result == (3, 8)
result = index.slice_locs(-1, 10)
assert result == (0, len(index))
def test_consistency(self):
# need to construct an overflow
major_axis = lrange(70000)
minor_axis = lrange(10)
major_labels = np.arange(70000)
minor_labels = np.repeat(lrange(10), 7000)
# the fact that is works means it's consistent
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
# inconsistent
major_labels = np.array([0, 0, 1, 1, 1, 2, 2, 3, 3])
minor_labels = np.array([0, 1, 0, 1, 1, 0, 1, 0, 1])
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
assert not index.is_unique
def test_truncate(self):
major_axis = Index(lrange(4))
minor_axis = Index(lrange(2))
major_labels = np.array([0, 0, 1, 2, 3, 3])
minor_labels = np.array([0, 1, 0, 1, 0, 1])
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
result = index.truncate(before=1)
assert 'foo' not in result.levels[0]
assert 1 in result.levels[0]
result = index.truncate(after=1)
assert 2 not in result.levels[0]
assert 1 in result.levels[0]
result = index.truncate(before=1, after=2)
assert len(result.levels[0]) == 2
# after < before
pytest.raises(ValueError, index.truncate, 3, 1)
def test_get_indexer(self):
major_axis = Index(lrange(4))
minor_axis = Index(lrange(2))
major_labels = np.array([0, 0, 1, 2, 2, 3, 3], dtype=np.intp)
minor_labels = np.array([0, 1, 0, 0, 1, 0, 1], dtype=np.intp)
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
idx1 = index[:5]
idx2 = index[[1, 3, 5]]
r1 = idx1.get_indexer(idx2)
assert_almost_equal(r1, np.array([1, 3, -1], dtype=np.intp))
r1 = idx2.get_indexer(idx1, method='pad')
e1 = np.array([-1, 0, 0, 1, 1], dtype=np.intp)
assert_almost_equal(r1, e1)
r2 = idx2.get_indexer(idx1[::-1], method='pad')
assert_almost_equal(r2, e1[::-1])
rffill1 = idx2.get_indexer(idx1, method='ffill')
assert_almost_equal(r1, rffill1)
r1 = idx2.get_indexer(idx1, method='backfill')
e1 = np.array([0, 0, 1, 1, 2], dtype=np.intp)
assert_almost_equal(r1, e1)
r2 = idx2.get_indexer(idx1[::-1], method='backfill')
assert_almost_equal(r2, e1[::-1])
rbfill1 = idx2.get_indexer(idx1, method='bfill')
assert_almost_equal(r1, rbfill1)
# pass non-MultiIndex
r1 = idx1.get_indexer(idx2.values)
rexp1 = idx1.get_indexer(idx2)
assert_almost_equal(r1, rexp1)
r1 = idx1.get_indexer([1, 2, 3])
assert (r1 == [-1, -1, -1]).all()
# create index with duplicates
idx1 = Index(lrange(10) + lrange(10))
idx2 = Index(lrange(20))
msg = "Reindexing only valid with uniquely valued Index objects"
with tm.assert_raises_regex(InvalidIndexError, msg):
idx1.get_indexer(idx2)
def test_get_indexer_nearest(self):
midx = MultiIndex.from_tuples([('a', 1), ('b', 2)])
with pytest.raises(NotImplementedError):
midx.get_indexer(['a'], method='nearest')
with pytest.raises(NotImplementedError):
midx.get_indexer(['a'], method='pad', tolerance=2)
def test_hash_collisions(self):
# non-smoke test that we don't get hash collisions
index = MultiIndex.from_product([np.arange(1000), np.arange(1000)],
names=['one', 'two'])
result = index.get_indexer(index.values)
tm.assert_numpy_array_equal(result, np.arange(
len(index), dtype='intp'))
for i in [0, 1, len(index) - 2, len(index) - 1]:
result = index.get_loc(index[i])
assert result == i
def test_format(self):
self.index.format()
self.index[:0].format()
def test_format_integer_names(self):
index = MultiIndex(levels=[[0, 1], [0, 1]],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]], names=[0, 1])
index.format(names=True)
def test_format_sparse_display(self):
index = MultiIndex(levels=[[0, 1], [0, 1], [0, 1], [0]],
labels=[[0, 0, 0, 1, 1, 1], [0, 0, 1, 0, 0, 1],
[0, 1, 0, 0, 1, 0], [0, 0, 0, 0, 0, 0]])
result = index.format()
assert result[3] == '1 0 0 0'
def test_format_sparse_config(self):
warn_filters = warnings.filters
warnings.filterwarnings('ignore', category=FutureWarning,
module=".*format")
# GH1538
pd.set_option('display.multi_sparse', False)
result = self.index.format()
assert result[1] == 'foo two'
tm.reset_display_options()
warnings.filters = warn_filters
def test_to_frame(self):
tuples = [(1, 'one'), (1, 'two'), (2, 'one'), (2, 'two')]
index = MultiIndex.from_tuples(tuples)
result = index.to_frame(index=False)
expected = DataFrame(tuples)
tm.assert_frame_equal(result, expected)
result = index.to_frame()
expected.index = index
tm.assert_frame_equal(result, expected)
tuples = [(1, 'one'), (1, 'two'), (2, 'one'), (2, 'two')]
index = MultiIndex.from_tuples(tuples, names=['first', 'second'])
result = index.to_frame(index=False)
expected = DataFrame(tuples)
expected.columns = ['first', 'second']
tm.assert_frame_equal(result, expected)
result = index.to_frame()
expected.index = index
tm.assert_frame_equal(result, expected)
index = MultiIndex.from_product([range(5),
pd.date_range('20130101', periods=3)])
result = index.to_frame(index=False)
expected = DataFrame(
{0: np.repeat(np.arange(5, dtype='int64'), 3),
1: np.tile(pd.date_range('20130101', periods=3), 5)})
tm.assert_frame_equal(result, expected)
index = MultiIndex.from_product([range(5),
pd.date_range('20130101', periods=3)])
result = index.to_frame()
expected.index = index
tm.assert_frame_equal(result, expected)
def test_to_hierarchical(self):
index = MultiIndex.from_tuples([(1, 'one'), (1, 'two'), (2, 'one'), (
2, 'two')])
result = index.to_hierarchical(3)
expected = MultiIndex(levels=[[1, 2], ['one', 'two']],
labels=[[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1]])
tm.assert_index_equal(result, expected)
assert result.names == index.names
# K > 1
result = index.to_hierarchical(3, 2)
expected = MultiIndex(levels=[[1, 2], ['one', 'two']],
labels=[[0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1],
[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]])
tm.assert_index_equal(result, expected)
assert result.names == index.names
# non-sorted
index = MultiIndex.from_tuples([(2, 'c'), (1, 'b'),
(2, 'a'), (2, 'b')],
names=['N1', 'N2'])
result = index.to_hierarchical(2)
expected = MultiIndex.from_tuples([(2, 'c'), (2, 'c'), (1, 'b'),
(1, 'b'),
(2, 'a'), (2, 'a'),
(2, 'b'), (2, 'b')],
names=['N1', 'N2'])
tm.assert_index_equal(result, expected)
assert result.names == index.names
def test_bounds(self):
self.index._bounds
def test_equals_multi(self):
assert self.index.equals(self.index)
assert not self.index.equals(self.index.values)
assert self.index.equals(Index(self.index.values))
assert self.index.equal_levels(self.index)
assert not self.index.equals(self.index[:-1])
assert not self.index.equals(self.index[-1])
# different number of levels
index = MultiIndex(levels=[Index(lrange(4)), Index(lrange(4)), Index(
lrange(4))], labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])])
index2 = MultiIndex(levels=index.levels[:-1], labels=index.labels[:-1])
assert not index.equals(index2)
assert not index.equal_levels(index2)
# levels are different
major_axis = Index(lrange(4))
minor_axis = Index(lrange(2))
major_labels = np.array([0, 0, 1, 2, 2, 3])
minor_labels = np.array([0, 1, 0, 0, 1, 0])
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
assert not self.index.equals(index)
assert not self.index.equal_levels(index)
# some of the labels are different
major_axis = Index(['foo', 'bar', 'baz', 'qux'])
minor_axis = Index(['one', 'two'])
major_labels = np.array([0, 0, 2, 2, 3, 3])
minor_labels = np.array([0, 1, 0, 1, 0, 1])
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
assert not self.index.equals(index)
def test_equals_missing_values(self):
# make sure take is not using -1
i = pd.MultiIndex.from_tuples([(0, pd.NaT),
(0, pd.Timestamp('20130101'))])
result = i[0:1].equals(i[0])
assert not result
result = i[1:2].equals(i[1])
assert not result
def test_identical(self):
mi = self.index.copy()
mi2 = self.index.copy()
assert mi.identical(mi2)
mi = mi.set_names(['new1', 'new2'])
assert mi.equals(mi2)
assert not mi.identical(mi2)
mi2 = mi2.set_names(['new1', 'new2'])
assert mi.identical(mi2)
mi3 = Index(mi.tolist(), names=mi.names)
mi4 = Index(mi.tolist(), names=mi.names, tupleize_cols=False)
assert mi.identical(mi3)
assert not mi.identical(mi4)
assert mi.equals(mi4)
def test_is_(self):
mi = MultiIndex.from_tuples(lzip(range(10), range(10)))
assert mi.is_(mi)
assert mi.is_(mi.view())
assert mi.is_(mi.view().view().view().view())
mi2 = mi.view()
# names are metadata, they don't change id
mi2.names = ["A", "B"]
assert mi2.is_(mi)
assert mi.is_(mi2)
assert mi.is_(mi.set_names(["C", "D"]))
mi2 = mi.view()
mi2.set_names(["E", "F"], inplace=True)
assert mi.is_(mi2)
# levels are inherent properties, they change identity
mi3 = mi2.set_levels([lrange(10), lrange(10)])
assert not mi3.is_(mi2)
# shouldn't change
assert mi2.is_(mi)
mi4 = mi3.view()
# GH 17464 - Remove duplicate MultiIndex levels
mi4.set_levels([lrange(10), lrange(10)], inplace=True)
assert not mi4.is_(mi3)
mi5 = mi.view()
mi5.set_levels(mi5.levels, inplace=True)
assert not mi5.is_(mi)
def test_union(self):
piece1 = self.index[:5][::-1]
piece2 = self.index[3:]
the_union = piece1 | piece2
tups = sorted(self.index.values)
expected = MultiIndex.from_tuples(tups)
assert the_union.equals(expected)
# corner case, pass self or empty thing:
the_union = self.index.union(self.index)
assert the_union is self.index
the_union = self.index.union(self.index[:0])
assert the_union is self.index
# won't work in python 3
# tuples = self.index.values
# result = self.index[:4] | tuples[4:]
# assert result.equals(tuples)
# not valid for python 3
# def test_union_with_regular_index(self):
# other = Index(['A', 'B', 'C'])
# result = other.union(self.index)
# assert ('foo', 'one') in result
# assert 'B' in result
# result2 = self.index.union(other)
# assert result.equals(result2)
def test_intersection(self):
piece1 = self.index[:5][::-1]
piece2 = self.index[3:]
the_int = piece1 & piece2
tups = sorted(self.index[3:5].values)
expected = MultiIndex.from_tuples(tups)
assert the_int.equals(expected)
# corner case, pass self
the_int = self.index.intersection(self.index)
assert the_int is self.index
# empty intersection: disjoint
empty = self.index[:2] & self.index[2:]
expected = self.index[:0]
assert empty.equals(expected)
# can't do in python 3
# tuples = self.index.values
# result = self.index & tuples
# assert result.equals(tuples)
def test_sub(self):
first = self.index
# - now raises (previously was set op difference)
with pytest.raises(TypeError):
first - self.index[-3:]
with pytest.raises(TypeError):
self.index[-3:] - first
with pytest.raises(TypeError):
self.index[-3:] - first.tolist()
with pytest.raises(TypeError):
first.tolist() - self.index[-3:]
def test_difference(self):
first = self.index
result = first.difference(self.index[-3:])
expected = MultiIndex.from_tuples(sorted(self.index[:-3].values),
sortorder=0,
names=self.index.names)
assert isinstance(result, MultiIndex)
assert result.equals(expected)
assert result.names == self.index.names
# empty difference: reflexive
result = self.index.difference(self.index)
expected = self.index[:0]
assert result.equals(expected)
assert result.names == self.index.names
# empty difference: superset
result = self.index[-3:].difference(self.index)
expected = self.index[:0]
assert result.equals(expected)
assert result.names == self.index.names
# empty difference: degenerate
result = self.index[:0].difference(self.index)
expected = self.index[:0]
assert result.equals(expected)
assert result.names == self.index.names
# names not the same
chunklet = self.index[-3:]
chunklet.names = ['foo', 'baz']
result = first.difference(chunklet)
assert result.names == (None, None)
# empty, but non-equal
result = self.index.difference(self.index.sortlevel(1)[0])
assert len(result) == 0
# raise Exception called with non-MultiIndex
result = first.difference(first.values)
assert result.equals(first[:0])
# name from empty array
result = first.difference([])
assert first.equals(result)
assert first.names == result.names
# name from non-empty array
result = first.difference([('foo', 'one')])
expected = pd.MultiIndex.from_tuples([('bar', 'one'), ('baz', 'two'), (
'foo', 'two'), ('qux', 'one'), ('qux', 'two')])
expected.names = first.names
assert first.names == result.names
tm.assert_raises_regex(TypeError, "other must be a MultiIndex "
"or a list of tuples",
first.difference, [1, 2, 3, 4, 5])
def test_from_tuples(self):
tm.assert_raises_regex(TypeError, 'Cannot infer number of levels '
'from empty list',
MultiIndex.from_tuples, [])
expected = MultiIndex(levels=[[1, 3], [2, 4]],
labels=[[0, 1], [0, 1]],
names=['a', 'b'])
# input tuples
result = MultiIndex.from_tuples(((1, 2), (3, 4)), names=['a', 'b'])
tm.assert_index_equal(result, expected)
def test_from_tuples_iterator(self):
# GH 18434
# input iterator for tuples
expected = MultiIndex(levels=[[1, 3], [2, 4]],
labels=[[0, 1], [0, 1]],
names=['a', 'b'])
result = MultiIndex.from_tuples(zip([1, 3], [2, 4]), names=['a', 'b'])
tm.assert_index_equal(result, expected)
# input non-iterables
with tm.assert_raises_regex(
TypeError, 'Input must be a list / sequence of tuple-likes.'):
MultiIndex.from_tuples(0)
def test_from_tuples_empty(self):
# GH 16777
result = MultiIndex.from_tuples([], names=['a', 'b'])
expected = MultiIndex.from_arrays(arrays=[[], []],
names=['a', 'b'])
tm.assert_index_equal(result, expected)
def test_argsort(self):
result = self.index.argsort()
expected = self.index.values.argsort()
tm.assert_numpy_array_equal(result, expected)
def test_sortlevel(self):
import random
tuples = list(self.index)
random.shuffle(tuples)
index = MultiIndex.from_tuples(tuples)
sorted_idx, _ = index.sortlevel(0)
expected = MultiIndex.from_tuples(sorted(tuples))
assert sorted_idx.equals(expected)
sorted_idx, _ = index.sortlevel(0, ascending=False)
assert sorted_idx.equals(expected[::-1])
sorted_idx, _ = index.sortlevel(1)
by1 = sorted(tuples, key=lambda x: (x[1], x[0]))
expected = MultiIndex.from_tuples(by1)
assert sorted_idx.equals(expected)
sorted_idx, _ = index.sortlevel(1, ascending=False)
assert sorted_idx.equals(expected[::-1])
def test_sortlevel_not_sort_remaining(self):
mi = MultiIndex.from_tuples([[1, 1, 3], [1, 1, 1]], names=list('ABC'))
sorted_idx, _ = mi.sortlevel('A', sort_remaining=False)
assert sorted_idx.equals(mi)
def test_sortlevel_deterministic(self):
tuples = [('bar', 'one'), ('foo', 'two'), ('qux', 'two'),
('foo', 'one'), ('baz', 'two'), ('qux', 'one')]
index = MultiIndex.from_tuples(tuples)
sorted_idx, _ = index.sortlevel(0)
expected = MultiIndex.from_tuples(sorted(tuples))
assert sorted_idx.equals(expected)
sorted_idx, _ = index.sortlevel(0, ascending=False)
assert sorted_idx.equals(expected[::-1])
sorted_idx, _ = index.sortlevel(1)
by1 = sorted(tuples, key=lambda x: (x[1], x[0]))
expected = MultiIndex.from_tuples(by1)
assert sorted_idx.equals(expected)
sorted_idx, _ = index.sortlevel(1, ascending=False)
assert sorted_idx.equals(expected[::-1])
def test_dims(self):
pass
def test_drop(self):
dropped = self.index.drop([('foo', 'two'), ('qux', 'one')])
index = MultiIndex.from_tuples([('foo', 'two'), ('qux', 'one')])
dropped2 = self.index.drop(index)
expected = self.index[[0, 2, 3, 5]]
tm.assert_index_equal(dropped, expected)
tm.assert_index_equal(dropped2, expected)
dropped = self.index.drop(['bar'])
expected = self.index[[0, 1, 3, 4, 5]]
tm.assert_index_equal(dropped, expected)
dropped = self.index.drop('foo')
expected = self.index[[2, 3, 4, 5]]
tm.assert_index_equal(dropped, expected)
index = MultiIndex.from_tuples([('bar', 'two')])
pytest.raises(KeyError, self.index.drop, [('bar', 'two')])
pytest.raises(KeyError, self.index.drop, index)
pytest.raises(KeyError, self.index.drop, ['foo', 'two'])
# partially correct argument
mixed_index = MultiIndex.from_tuples([('qux', 'one'), ('bar', 'two')])
pytest.raises(KeyError, self.index.drop, mixed_index)
# error='ignore'
dropped = self.index.drop(index, errors='ignore')
expected = self.index[[0, 1, 2, 3, 4, 5]]
tm.assert_index_equal(dropped, expected)
dropped = self.index.drop(mixed_index, errors='ignore')
expected = self.index[[0, 1, 2, 3, 5]]
tm.assert_index_equal(dropped, expected)
dropped = self.index.drop(['foo', 'two'], errors='ignore')
expected = self.index[[2, 3, 4, 5]]
tm.assert_index_equal(dropped, expected)
# mixed partial / full drop
dropped = self.index.drop(['foo', ('qux', 'one')])
expected = self.index[[2, 3, 5]]
tm.assert_index_equal(dropped, expected)
# mixed partial / full drop / error='ignore'
mixed_index = ['foo', ('qux', 'one'), 'two']
pytest.raises(KeyError, self.index.drop, mixed_index)
dropped = self.index.drop(mixed_index, errors='ignore')
expected = self.index[[2, 3, 5]]
tm.assert_index_equal(dropped, expected)
def test_droplevel_with_names(self):
index = self.index[self.index.get_loc('foo')]
dropped = index.droplevel(0)
assert dropped.name == 'second'
index = MultiIndex(
levels=[Index(lrange(4)), Index(lrange(4)), Index(lrange(4))],
labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])],
names=['one', 'two', 'three'])
dropped = index.droplevel(0)
assert dropped.names == ('two', 'three')
dropped = index.droplevel('two')
expected = index.droplevel(1)
assert dropped.equals(expected)
def test_droplevel_list(self):
index = MultiIndex(
levels=[Index(lrange(4)), Index(lrange(4)), Index(lrange(4))],
labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])],
names=['one', 'two', 'three'])
dropped = index[:2].droplevel(['three', 'one'])
expected = index[:2].droplevel(2).droplevel(0)
assert dropped.equals(expected)
dropped = index[:2].droplevel([])
expected = index[:2]
assert dropped.equals(expected)
with pytest.raises(ValueError):
index[:2].droplevel(['one', 'two', 'three'])
with pytest.raises(KeyError):
index[:2].droplevel(['one', 'four'])
def test_drop_not_lexsorted(self):
# GH 12078
# define the lexsorted version of the multi-index
tuples = [('a', ''), ('b1', 'c1'), ('b2', 'c2')]
lexsorted_mi = MultiIndex.from_tuples(tuples, names=['b', 'c'])
assert lexsorted_mi.is_lexsorted()
# and the not-lexsorted version
df = pd.DataFrame(columns=['a', 'b', 'c', 'd'],
data=[[1, 'b1', 'c1', 3], [1, 'b2', 'c2', 4]])
df = df.pivot_table(index='a', columns=['b', 'c'], values='d')
df = df.reset_index()
not_lexsorted_mi = df.columns
assert not not_lexsorted_mi.is_lexsorted()
# compare the results
tm.assert_index_equal(lexsorted_mi, not_lexsorted_mi)
with tm.assert_produces_warning(PerformanceWarning):
tm.assert_index_equal(lexsorted_mi.drop('a'),
not_lexsorted_mi.drop('a'))
def test_insert(self):
# key contained in all levels
new_index = self.index.insert(0, ('bar', 'two'))
assert new_index.equal_levels(self.index)
assert new_index[0] == ('bar', 'two')
# key not contained in all levels
new_index = self.index.insert(0, ('abc', 'three'))
exp0 = Index(list(self.index.levels[0]) + ['abc'], name='first')
tm.assert_index_equal(new_index.levels[0], exp0)
exp1 = Index(list(self.index.levels[1]) + ['three'], name='second')
tm.assert_index_equal(new_index.levels[1], exp1)
assert new_index[0] == ('abc', 'three')
# key wrong length
msg = "Item must have length equal to number of levels"
with tm.assert_raises_regex(ValueError, msg):
self.index.insert(0, ('foo2',))
left = pd.DataFrame([['a', 'b', 0], ['b', 'd', 1]],
columns=['1st', '2nd', '3rd'])
left.set_index(['1st', '2nd'], inplace=True)
ts = left['3rd'].copy(deep=True)
left.loc[('b', 'x'), '3rd'] = 2
left.loc[('b', 'a'), '3rd'] = -1
left.loc[('b', 'b'), '3rd'] = 3
left.loc[('a', 'x'), '3rd'] = 4
left.loc[('a', 'w'), '3rd'] = 5
left.loc[('a', 'a'), '3rd'] = 6
ts.loc[('b', 'x')] = 2
ts.loc['b', 'a'] = -1
ts.loc[('b', 'b')] = 3
ts.loc['a', 'x'] = 4
ts.loc[('a', 'w')] = 5
ts.loc['a', 'a'] = 6
right = pd.DataFrame([['a', 'b', 0], ['b', 'd', 1], ['b', 'x', 2],
['b', 'a', -1], ['b', 'b', 3], ['a', 'x', 4],
['a', 'w', 5], ['a', 'a', 6]],
columns=['1st', '2nd', '3rd'])
right.set_index(['1st', '2nd'], inplace=True)
# FIXME data types changes to float because
# of intermediate nan insertion;
tm.assert_frame_equal(left, right, check_dtype=False)
tm.assert_series_equal(ts, right['3rd'])
# GH9250
idx = [('test1', i) for i in range(5)] + \
[('test2', i) for i in | range(6) | pandas.compat.range |
# Copyright (c) 2018-2021, NVIDIA CORPORATION.
import array as arr
import datetime
import io
import operator
import random
import re
import string
import textwrap
from copy import copy
import cupy
import numpy as np
import pandas as pd
import pyarrow as pa
import pytest
from numba import cuda
import cudf
from cudf.core._compat import PANDAS_GE_110, PANDAS_GE_120
from cudf.core.column import column
from cudf.tests import utils
from cudf.tests.utils import (
ALL_TYPES,
DATETIME_TYPES,
NUMERIC_TYPES,
assert_eq,
assert_exceptions_equal,
does_not_raise,
gen_rand,
)
def test_init_via_list_of_tuples():
data = [
(5, "cats", "jump", np.nan),
(2, "dogs", "dig", 7.5),
(3, "cows", "moo", -2.1, "occasionally"),
]
pdf = pd.DataFrame(data)
gdf = cudf.DataFrame(data)
assert_eq(pdf, gdf)
def _dataframe_na_data():
return [
pd.DataFrame(
{
"a": [0, 1, 2, np.nan, 4, None, 6],
"b": [np.nan, None, "u", "h", "d", "a", "m"],
},
index=["q", "w", "e", "r", "t", "y", "u"],
),
pd.DataFrame({"a": [0, 1, 2, 3, 4], "b": ["a", "b", "u", "h", "d"]}),
pd.DataFrame(
{
"a": [None, None, np.nan, None],
"b": [np.nan, None, np.nan, None],
}
),
pd.DataFrame({"a": []}),
pd.DataFrame({"a": [np.nan], "b": [None]}),
pd.DataFrame({"a": ["a", "b", "c", None, "e"]}),
pd.DataFrame({"a": ["a", "b", "c", "d", "e"]}),
]
@pytest.mark.parametrize("rows", [0, 1, 2, 100])
def test_init_via_list_of_empty_tuples(rows):
data = [()] * rows
pdf = pd.DataFrame(data)
gdf = cudf.DataFrame(data)
assert_eq(
pdf,
gdf,
check_like=True,
check_column_type=False,
check_index_type=False,
)
@pytest.mark.parametrize(
"dict_of_series",
[
{"a": pd.Series([1.0, 2.0, 3.0])},
{"a": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6])},
{
"a": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6]),
"b": pd.Series([1.0, 2.0, 4.0], index=[1, 2, 3]),
},
{"a": [1, 2, 3], "b": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6])},
{
"a": pd.Series([1.0, 2.0, 3.0], index=["a", "b", "c"]),
"b": pd.Series([1.0, 2.0, 4.0], index=["c", "d", "e"]),
},
{
"a": pd.Series(
["a", "b", "c"],
index=pd.MultiIndex.from_tuples([(1, 2), (1, 3), (2, 3)]),
),
"b": pd.Series(
["a", " b", "d"],
index=pd.MultiIndex.from_tuples([(1, 2), (1, 3), (2, 3)]),
),
},
],
)
def test_init_from_series_align(dict_of_series):
pdf = pd.DataFrame(dict_of_series)
gdf = cudf.DataFrame(dict_of_series)
assert_eq(pdf, gdf)
for key in dict_of_series:
if isinstance(dict_of_series[key], pd.Series):
dict_of_series[key] = cudf.Series(dict_of_series[key])
gdf = cudf.DataFrame(dict_of_series)
assert_eq(pdf, gdf)
@pytest.mark.parametrize(
("dict_of_series", "expectation"),
[
(
{
"a": pd.Series(["a", "b", "c"], index=[4, 4, 5]),
"b": pd.Series(["a", "b", "c"], index=[4, 5, 6]),
},
pytest.raises(
ValueError, match="Cannot align indices with non-unique values"
),
),
(
{
"a": pd.Series(["a", "b", "c"], index=[4, 4, 5]),
"b": pd.Series(["a", "b", "c"], index=[4, 4, 5]),
},
does_not_raise(),
),
],
)
def test_init_from_series_align_nonunique(dict_of_series, expectation):
with expectation:
gdf = cudf.DataFrame(dict_of_series)
if expectation == does_not_raise():
pdf = pd.DataFrame(dict_of_series)
assert_eq(pdf, gdf)
def test_init_unaligned_with_index():
pdf = pd.DataFrame(
{
"a": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6]),
"b": pd.Series([1.0, 2.0, 3.0], index=[1, 2, 3]),
},
index=[7, 8, 9],
)
gdf = cudf.DataFrame(
{
"a": cudf.Series([1.0, 2.0, 3.0], index=[4, 5, 6]),
"b": cudf.Series([1.0, 2.0, 3.0], index=[1, 2, 3]),
},
index=[7, 8, 9],
)
assert_eq(pdf, gdf, check_dtype=False)
def test_series_basic():
# Make series from buffer
a1 = np.arange(10, dtype=np.float64)
series = cudf.Series(a1)
assert len(series) == 10
np.testing.assert_equal(series.to_array(), np.hstack([a1]))
def test_series_from_cupy_scalars():
data = [0.1, 0.2, 0.3]
data_np = np.array(data)
data_cp = cupy.array(data)
s_np = cudf.Series([data_np[0], data_np[2]])
s_cp = cudf.Series([data_cp[0], data_cp[2]])
assert_eq(s_np, s_cp)
@pytest.mark.parametrize("a", [[1, 2, 3], [1, 10, 30]])
@pytest.mark.parametrize("b", [[4, 5, 6], [-11, -100, 30]])
def test_append_index(a, b):
df = pd.DataFrame()
df["a"] = a
df["b"] = b
gdf = cudf.DataFrame()
gdf["a"] = a
gdf["b"] = b
# Check the default index after appending two columns(Series)
expected = df.a.append(df.b)
actual = gdf.a.append(gdf.b)
assert len(expected) == len(actual)
assert_eq(expected.index, actual.index)
expected = df.a.append(df.b, ignore_index=True)
actual = gdf.a.append(gdf.b, ignore_index=True)
assert len(expected) == len(actual)
assert_eq(expected.index, actual.index)
def test_series_init_none():
# test for creating empty series
# 1: without initializing
sr1 = cudf.Series()
got = sr1.to_string()
expect = "Series([], dtype: float64)"
# values should match despite whitespace difference
assert got.split() == expect.split()
# 2: Using `None` as an initializer
sr2 = cudf.Series(None)
got = sr2.to_string()
expect = "Series([], dtype: float64)"
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_basic():
np.random.seed(0)
df = cudf.DataFrame()
# Populate with cuda memory
df["keys"] = np.arange(10, dtype=np.float64)
np.testing.assert_equal(df["keys"].to_array(), np.arange(10))
assert len(df) == 10
# Populate with numpy array
rnd_vals = np.random.random(10)
df["vals"] = rnd_vals
np.testing.assert_equal(df["vals"].to_array(), rnd_vals)
assert len(df) == 10
assert tuple(df.columns) == ("keys", "vals")
# Make another dataframe
df2 = cudf.DataFrame()
df2["keys"] = np.array([123], dtype=np.float64)
df2["vals"] = np.array([321], dtype=np.float64)
# Concat
df = cudf.concat([df, df2])
assert len(df) == 11
hkeys = np.asarray(np.arange(10, dtype=np.float64).tolist() + [123])
hvals = np.asarray(rnd_vals.tolist() + [321])
np.testing.assert_equal(df["keys"].to_array(), hkeys)
np.testing.assert_equal(df["vals"].to_array(), hvals)
# As matrix
mat = df.as_matrix()
expect = np.vstack([hkeys, hvals]).T
np.testing.assert_equal(mat, expect)
# test dataframe with tuple name
df_tup = cudf.DataFrame()
data = np.arange(10)
df_tup[(1, "foobar")] = data
np.testing.assert_equal(data, df_tup[(1, "foobar")].to_array())
df = cudf.DataFrame(pd.DataFrame({"a": [1, 2, 3], "c": ["a", "b", "c"]}))
pdf = pd.DataFrame(pd.DataFrame({"a": [1, 2, 3], "c": ["a", "b", "c"]}))
assert_eq(df, pdf)
gdf = cudf.DataFrame({"id": [0, 1], "val": [None, None]})
gdf["val"] = gdf["val"].astype("int")
assert gdf["val"].isnull().all()
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame({"a": range(10), "b": range(10, 20), "c": range(1, 11)}),
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5}
),
],
)
@pytest.mark.parametrize(
"columns", [["a"], ["b"], "a", "b", ["a", "b"]],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_columns(pdf, columns, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(columns=columns, inplace=inplace)
actual = gdf.drop(columns=columns, inplace=inplace)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame({"a": range(10), "b": range(10, 20), "c": range(1, 11)}),
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5}
),
],
)
@pytest.mark.parametrize(
"labels",
[[1], [0], 1, 5, [5, 9], pd.Index([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_labels_axis_0(pdf, labels, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(labels=labels, axis=0, inplace=inplace)
actual = gdf.drop(labels=labels, axis=0, inplace=inplace)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame({"a": range(10), "b": range(10, 20), "c": range(1, 11)}),
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5}
),
],
)
@pytest.mark.parametrize(
"index",
[[1], [0], 1, 5, [5, 9], pd.Index([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_index(pdf, index, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(index=index, inplace=inplace)
actual = gdf.drop(index=index, inplace=inplace)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5},
index=pd.MultiIndex(
levels=[
["lama", "cow", "falcon"],
["speed", "weight", "length"],
],
codes=[
[0, 0, 0, 1, 1, 1, 2, 2, 2, 1],
[0, 1, 2, 0, 1, 2, 0, 1, 2, 1],
],
),
)
],
)
@pytest.mark.parametrize(
"index,level",
[
("cow", 0),
("lama", 0),
("falcon", 0),
("speed", 1),
("weight", 1),
("length", 1),
pytest.param(
"cow",
None,
marks=pytest.mark.xfail(
reason="https://github.com/pandas-dev/pandas/issues/36293"
),
),
pytest.param(
"lama",
None,
marks=pytest.mark.xfail(
reason="https://github.com/pandas-dev/pandas/issues/36293"
),
),
pytest.param(
"falcon",
None,
marks=pytest.mark.xfail(
reason="https://github.com/pandas-dev/pandas/issues/36293"
),
),
],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_multiindex(pdf, index, level, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(index=index, inplace=inplace, level=level)
actual = gdf.drop(index=index, inplace=inplace, level=level)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame({"a": range(10), "b": range(10, 20), "c": range(1, 11)}),
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5}
),
],
)
@pytest.mark.parametrize(
"labels", [["a"], ["b"], "a", "b", ["a", "b"]],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_labels_axis_1(pdf, labels, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(labels=labels, axis=1, inplace=inplace)
actual = gdf.drop(labels=labels, axis=1, inplace=inplace)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
def test_dataframe_drop_error():
df = cudf.DataFrame({"a": [1], "b": [2], "c": [3]})
pdf = df.to_pandas()
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"columns": "d"}),
rfunc_args_and_kwargs=([], {"columns": "d"}),
expected_error_message="column 'd' does not exist",
)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"columns": ["a", "d", "b"]}),
rfunc_args_and_kwargs=([], {"columns": ["a", "d", "b"]}),
expected_error_message="column 'd' does not exist",
)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=(["a"], {"columns": "a", "axis": 1}),
rfunc_args_and_kwargs=(["a"], {"columns": "a", "axis": 1}),
expected_error_message="Cannot specify both",
)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"axis": 1}),
rfunc_args_and_kwargs=([], {"axis": 1}),
expected_error_message="Need to specify at least",
)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([[2, 0]],),
rfunc_args_and_kwargs=([[2, 0]],),
expected_error_message="One or more values not found in axis",
)
def test_dataframe_drop_raises():
df = cudf.DataFrame(
{"a": [1, 2, 3], "c": [10, 20, 30]}, index=["x", "y", "z"]
)
pdf = df.to_pandas()
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=(["p"],),
rfunc_args_and_kwargs=(["p"],),
expected_error_message="One or more values not found in axis",
)
# label dtype mismatch
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([3],),
rfunc_args_and_kwargs=([3],),
expected_error_message="One or more values not found in axis",
)
expect = pdf.drop("p", errors="ignore")
actual = df.drop("p", errors="ignore")
assert_eq(actual, expect)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"columns": "p"}),
rfunc_args_and_kwargs=([], {"columns": "p"}),
expected_error_message="column 'p' does not exist",
)
expect = pdf.drop(columns="p", errors="ignore")
actual = df.drop(columns="p", errors="ignore")
assert_eq(actual, expect)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"labels": "p", "axis": 1}),
rfunc_args_and_kwargs=([], {"labels": "p", "axis": 1}),
expected_error_message="column 'p' does not exist",
)
expect = pdf.drop(labels="p", axis=1, errors="ignore")
actual = df.drop(labels="p", axis=1, errors="ignore")
assert_eq(actual, expect)
def test_dataframe_column_add_drop_via_setitem():
df = cudf.DataFrame()
data = np.asarray(range(10))
df["a"] = data
df["b"] = data
assert tuple(df.columns) == ("a", "b")
del df["a"]
assert tuple(df.columns) == ("b",)
df["c"] = data
assert tuple(df.columns) == ("b", "c")
df["a"] = data
assert tuple(df.columns) == ("b", "c", "a")
def test_dataframe_column_set_via_attr():
data_0 = np.asarray([0, 2, 4, 5])
data_1 = np.asarray([1, 4, 2, 3])
data_2 = np.asarray([2, 0, 3, 0])
df = cudf.DataFrame({"a": data_0, "b": data_1, "c": data_2})
for i in range(10):
df.c = df.a
assert assert_eq(df.c, df.a, check_names=False)
assert tuple(df.columns) == ("a", "b", "c")
df.c = df.b
assert assert_eq(df.c, df.b, check_names=False)
assert tuple(df.columns) == ("a", "b", "c")
def test_dataframe_column_drop_via_attr():
df = cudf.DataFrame({"a": []})
with pytest.raises(AttributeError):
del df.a
assert tuple(df.columns) == tuple("a")
@pytest.mark.parametrize("axis", [0, "index"])
def test_dataframe_index_rename(axis):
pdf = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]})
gdf = cudf.DataFrame.from_pandas(pdf)
expect = pdf.rename(mapper={1: 5, 2: 6}, axis=axis)
got = gdf.rename(mapper={1: 5, 2: 6}, axis=axis)
assert_eq(expect, got)
expect = pdf.rename(index={1: 5, 2: 6})
got = gdf.rename(index={1: 5, 2: 6})
assert_eq(expect, got)
expect = pdf.rename({1: 5, 2: 6})
got = gdf.rename({1: 5, 2: 6})
assert_eq(expect, got)
# `pandas` can support indexes with mixed values. We throw a
# `NotImplementedError`.
with pytest.raises(NotImplementedError):
gdf.rename(mapper={1: "x", 2: "y"}, axis=axis)
def test_dataframe_MI_rename():
gdf = cudf.DataFrame(
{"a": np.arange(10), "b": np.arange(10), "c": np.arange(10)}
)
gdg = gdf.groupby(["a", "b"]).count()
pdg = gdg.to_pandas()
expect = pdg.rename(mapper={1: 5, 2: 6}, axis=0)
got = gdg.rename(mapper={1: 5, 2: 6}, axis=0)
assert_eq(expect, got)
@pytest.mark.parametrize("axis", [1, "columns"])
def test_dataframe_column_rename(axis):
pdf = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]})
gdf = cudf.DataFrame.from_pandas(pdf)
expect = pdf.rename(mapper=lambda name: 2 * name, axis=axis)
got = gdf.rename(mapper=lambda name: 2 * name, axis=axis)
assert_eq(expect, got)
expect = pdf.rename(columns=lambda name: 2 * name)
got = gdf.rename(columns=lambda name: 2 * name)
assert_eq(expect, got)
rename_mapper = {"a": "z", "b": "y", "c": "x"}
expect = pdf.rename(columns=rename_mapper)
got = gdf.rename(columns=rename_mapper)
assert_eq(expect, got)
def test_dataframe_pop():
pdf = pd.DataFrame(
{"a": [1, 2, 3], "b": ["x", "y", "z"], "c": [7.0, 8.0, 9.0]}
)
gdf = cudf.DataFrame.from_pandas(pdf)
# Test non-existing column error
with pytest.raises(KeyError) as raises:
gdf.pop("fake_colname")
raises.match("fake_colname")
# check pop numeric column
pdf_pop = pdf.pop("a")
gdf_pop = gdf.pop("a")
assert_eq(pdf_pop, gdf_pop)
assert_eq(pdf, gdf)
# check string column
pdf_pop = pdf.pop("b")
gdf_pop = gdf.pop("b")
assert_eq(pdf_pop, gdf_pop)
assert_eq(pdf, gdf)
# check float column and empty dataframe
pdf_pop = pdf.pop("c")
gdf_pop = gdf.pop("c")
assert_eq(pdf_pop, gdf_pop)
assert_eq(pdf, gdf)
# check empty dataframe edge case
empty_pdf = pd.DataFrame(columns=["a", "b"])
empty_gdf = cudf.DataFrame(columns=["a", "b"])
pb = empty_pdf.pop("b")
gb = empty_gdf.pop("b")
assert len(pb) == len(gb)
assert empty_pdf.empty and empty_gdf.empty
@pytest.mark.parametrize("nelem", [0, 3, 100, 1000])
def test_dataframe_astype(nelem):
df = cudf.DataFrame()
data = np.asarray(range(nelem), dtype=np.int32)
df["a"] = data
assert df["a"].dtype is np.dtype(np.int32)
df["b"] = df["a"].astype(np.float32)
assert df["b"].dtype is np.dtype(np.float32)
np.testing.assert_equal(df["a"].to_array(), df["b"].to_array())
@pytest.mark.parametrize("nelem", [0, 100])
def test_index_astype(nelem):
df = cudf.DataFrame()
data = np.asarray(range(nelem), dtype=np.int32)
df["a"] = data
assert df.index.dtype is np.dtype(np.int64)
df.index = df.index.astype(np.float32)
assert df.index.dtype is np.dtype(np.float32)
df["a"] = df["a"].astype(np.float32)
np.testing.assert_equal(df.index.to_array(), df["a"].to_array())
df["b"] = df["a"]
df = df.set_index("b")
df["a"] = df["a"].astype(np.int16)
df.index = df.index.astype(np.int16)
np.testing.assert_equal(df.index.to_array(), df["a"].to_array())
def test_dataframe_to_string():
pd.options.display.max_rows = 5
pd.options.display.max_columns = 8
# Test basic
df = cudf.DataFrame(
{"a": [1, 2, 3, 4, 5, 6], "b": [11, 12, 13, 14, 15, 16]}
)
string = str(df)
assert string.splitlines()[-1] == "[6 rows x 2 columns]"
# Test skipped columns
df = cudf.DataFrame(
{
"a": [1, 2, 3, 4, 5, 6],
"b": [11, 12, 13, 14, 15, 16],
"c": [11, 12, 13, 14, 15, 16],
"d": [11, 12, 13, 14, 15, 16],
}
)
string = df.to_string()
assert string.splitlines()[-1] == "[6 rows x 4 columns]"
# Test masked
df = cudf.DataFrame(
{"a": [1, 2, 3, 4, 5, 6], "b": [11, 12, 13, 14, 15, 16]}
)
data = np.arange(6)
mask = np.zeros(1, dtype=cudf.utils.utils.mask_dtype)
mask[0] = 0b00101101
masked = cudf.Series.from_masked_array(data, mask)
assert masked.null_count == 2
df["c"] = masked
# check data
values = masked.copy()
validids = [0, 2, 3, 5]
densearray = masked.to_array()
np.testing.assert_equal(data[validids], densearray)
# valid position is corret
for i in validids:
assert data[i] == values[i]
# null position is correct
for i in range(len(values)):
if i not in validids:
assert values[i] is cudf.NA
pd.options.display.max_rows = 10
got = df.to_string()
expect = """
a b c
0 1 11 0
1 2 12 <NA>
2 3 13 2
3 4 14 3
4 5 15 <NA>
5 6 16 5
"""
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_to_string_wide(monkeypatch):
monkeypatch.setenv("COLUMNS", "79")
# Test basic
df = cudf.DataFrame()
for i in range(100):
df["a{}".format(i)] = list(range(3))
pd.options.display.max_columns = 0
got = df.to_string()
expect = """
a0 a1 a2 a3 a4 a5 a6 a7 ... a92 a93 a94 a95 a96 a97 a98 a99
0 0 0 0 0 0 0 0 0 ... 0 0 0 0 0 0 0 0
1 1 1 1 1 1 1 1 1 ... 1 1 1 1 1 1 1 1
2 2 2 2 2 2 2 2 2 ... 2 2 2 2 2 2 2 2
[3 rows x 100 columns]
"""
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_empty_to_string():
# Test for printing empty dataframe
df = cudf.DataFrame()
got = df.to_string()
expect = "Empty DataFrame\nColumns: []\nIndex: []\n"
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_emptycolumns_to_string():
# Test for printing dataframe having empty columns
df = cudf.DataFrame()
df["a"] = []
df["b"] = []
got = df.to_string()
expect = "Empty DataFrame\nColumns: [a, b]\nIndex: []\n"
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_copy():
# Test for copying the dataframe using python copy pkg
df = cudf.DataFrame()
df["a"] = [1, 2, 3]
df2 = copy(df)
df2["b"] = [4, 5, 6]
got = df.to_string()
expect = """
a
0 1
1 2
2 3
"""
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_copy_shallow():
# Test for copy dataframe using class method
df = cudf.DataFrame()
df["a"] = [1, 2, 3]
df2 = df.copy()
df2["b"] = [4, 2, 3]
got = df.to_string()
expect = """
a
0 1
1 2
2 3
"""
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_dtypes():
dtypes = pd.Series(
[np.int32, np.float32, np.float64], index=["c", "a", "b"]
)
df = cudf.DataFrame(
{k: np.ones(10, dtype=v) for k, v in dtypes.iteritems()}
)
assert df.dtypes.equals(dtypes)
def test_dataframe_add_col_to_object_dataframe():
# Test for adding column to an empty object dataframe
cols = ["a", "b", "c"]
df = pd.DataFrame(columns=cols, dtype="str")
data = {k: v for (k, v) in zip(cols, [["a"] for _ in cols])}
gdf = cudf.DataFrame(data)
gdf = gdf[:0]
assert gdf.dtypes.equals(df.dtypes)
gdf["a"] = [1]
df["a"] = [10]
assert gdf.dtypes.equals(df.dtypes)
gdf["b"] = [1.0]
df["b"] = [10.0]
assert gdf.dtypes.equals(df.dtypes)
def test_dataframe_dir_and_getattr():
df = cudf.DataFrame(
{
"a": np.ones(10),
"b": np.ones(10),
"not an id": np.ones(10),
"oop$": np.ones(10),
}
)
o = dir(df)
assert {"a", "b"}.issubset(o)
assert "not an id" not in o
assert "oop$" not in o
# Getattr works
assert df.a.equals(df["a"])
assert df.b.equals(df["b"])
with pytest.raises(AttributeError):
df.not_a_column
@pytest.mark.parametrize("order", ["C", "F"])
def test_empty_dataframe_as_gpu_matrix(order):
df = cudf.DataFrame()
# Check fully empty dataframe.
mat = df.as_gpu_matrix(order=order).copy_to_host()
assert mat.shape == (0, 0)
df = cudf.DataFrame()
nelem = 123
for k in "abc":
df[k] = np.random.random(nelem)
# Check all columns in empty dataframe.
mat = df.head(0).as_gpu_matrix(order=order).copy_to_host()
assert mat.shape == (0, 3)
@pytest.mark.parametrize("order", ["C", "F"])
def test_dataframe_as_gpu_matrix(order):
df = cudf.DataFrame()
nelem = 123
for k in "abcd":
df[k] = np.random.random(nelem)
# Check all columns
mat = df.as_gpu_matrix(order=order).copy_to_host()
assert mat.shape == (nelem, 4)
for i, k in enumerate(df.columns):
np.testing.assert_array_equal(df[k].to_array(), mat[:, i])
# Check column subset
mat = df.as_gpu_matrix(order=order, columns=["a", "c"]).copy_to_host()
assert mat.shape == (nelem, 2)
for i, k in enumerate("ac"):
np.testing.assert_array_equal(df[k].to_array(), mat[:, i])
def test_dataframe_as_gpu_matrix_null_values():
df = cudf.DataFrame()
nelem = 123
na = -10000
refvalues = {}
for k in "abcd":
df[k] = data = np.random.random(nelem)
bitmask = utils.random_bitmask(nelem)
df[k] = df[k].set_mask(bitmask)
boolmask = np.asarray(
utils.expand_bits_to_bytes(bitmask)[:nelem], dtype=np.bool_
)
data[~boolmask] = na
refvalues[k] = data
# Check null value causes error
with pytest.raises(ValueError) as raises:
df.as_gpu_matrix()
raises.match("column 'a' has null values")
for k in df.columns:
df[k] = df[k].fillna(na)
mat = df.as_gpu_matrix().copy_to_host()
for i, k in enumerate(df.columns):
np.testing.assert_array_equal(refvalues[k], mat[:, i])
def test_dataframe_append_empty():
pdf = pd.DataFrame(
{
"key": [1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4],
"value": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
}
)
gdf = cudf.DataFrame.from_pandas(pdf)
gdf["newcol"] = 100
pdf["newcol"] = 100
assert len(gdf["newcol"]) == len(pdf)
assert len(pdf["newcol"]) == len(pdf)
assert_eq(gdf, pdf)
def test_dataframe_setitem_from_masked_object():
ary = np.random.randn(100)
mask = np.zeros(100, dtype=bool)
mask[:20] = True
np.random.shuffle(mask)
ary[mask] = np.nan
test1_null = cudf.Series(ary, nan_as_null=True)
assert test1_null.nullable
assert test1_null.null_count == 20
test1_nan = cudf.Series(ary, nan_as_null=False)
assert test1_nan.null_count == 0
test2_null = cudf.DataFrame.from_pandas(
pd.DataFrame({"a": ary}), nan_as_null=True
)
assert test2_null["a"].nullable
assert test2_null["a"].null_count == 20
test2_nan = cudf.DataFrame.from_pandas(
pd.DataFrame({"a": ary}), nan_as_null=False
)
assert test2_nan["a"].null_count == 0
gpu_ary = cupy.asarray(ary)
test3_null = cudf.Series(gpu_ary, nan_as_null=True)
assert test3_null.nullable
assert test3_null.null_count == 20
test3_nan = cudf.Series(gpu_ary, nan_as_null=False)
assert test3_nan.null_count == 0
test4 = cudf.DataFrame()
lst = [1, 2, None, 4, 5, 6, None, 8, 9]
test4["lst"] = lst
assert test4["lst"].nullable
assert test4["lst"].null_count == 2
def test_dataframe_append_to_empty():
pdf = pd.DataFrame()
pdf["a"] = []
pdf["b"] = [1, 2, 3]
gdf = cudf.DataFrame()
gdf["a"] = []
gdf["b"] = [1, 2, 3]
assert_eq(gdf, pdf)
def test_dataframe_setitem_index_len1():
gdf = cudf.DataFrame()
gdf["a"] = [1]
gdf["b"] = gdf.index._values
np.testing.assert_equal(gdf.b.to_array(), [0])
def test_empty_dataframe_setitem_df():
gdf1 = cudf.DataFrame()
gdf2 = cudf.DataFrame({"a": [1, 2, 3, 4, 5]})
gdf1["a"] = gdf2["a"]
assert_eq(gdf1, gdf2)
def test_assign():
gdf = cudf.DataFrame({"x": [1, 2, 3]})
gdf2 = gdf.assign(y=gdf.x + 1)
assert list(gdf.columns) == ["x"]
assert list(gdf2.columns) == ["x", "y"]
np.testing.assert_equal(gdf2.y.to_array(), [2, 3, 4])
@pytest.mark.parametrize("nrows", [1, 8, 100, 1000])
def test_dataframe_hash_columns(nrows):
gdf = cudf.DataFrame()
data = np.asarray(range(nrows))
data[0] = data[-1] # make first and last the same
gdf["a"] = data
gdf["b"] = gdf.a + 100
out = gdf.hash_columns(["a", "b"])
assert isinstance(out, cupy.ndarray)
assert len(out) == nrows
assert out.dtype == np.int32
# Check default
out_all = gdf.hash_columns()
np.testing.assert_array_equal(cupy.asnumpy(out), cupy.asnumpy(out_all))
# Check single column
out_one = cupy.asnumpy(gdf.hash_columns(["a"]))
# First matches last
assert out_one[0] == out_one[-1]
# Equivalent to the cudf.Series.hash_values()
np.testing.assert_array_equal(cupy.asnumpy(gdf.a.hash_values()), out_one)
@pytest.mark.parametrize("nrows", [3, 10, 100, 1000])
@pytest.mark.parametrize("nparts", [1, 2, 8, 13])
@pytest.mark.parametrize("nkeys", [1, 2])
def test_dataframe_hash_partition(nrows, nparts, nkeys):
np.random.seed(123)
gdf = cudf.DataFrame()
keycols = []
for i in range(nkeys):
keyname = "key{}".format(i)
gdf[keyname] = np.random.randint(0, 7 - i, nrows)
keycols.append(keyname)
gdf["val1"] = np.random.randint(0, nrows * 2, nrows)
got = gdf.partition_by_hash(keycols, nparts=nparts)
# Must return a list
assert isinstance(got, list)
# Must have correct number of partitions
assert len(got) == nparts
# All partitions must be DataFrame type
assert all(isinstance(p, cudf.DataFrame) for p in got)
# Check that all partitions have unique keys
part_unique_keys = set()
for p in got:
if len(p):
# Take rows of the keycolumns and build a set of the key-values
unique_keys = set(map(tuple, p.as_matrix(columns=keycols)))
# Ensure that none of the key-values have occurred in other groups
assert not (unique_keys & part_unique_keys)
part_unique_keys |= unique_keys
assert len(part_unique_keys)
@pytest.mark.parametrize("nrows", [3, 10, 50])
def test_dataframe_hash_partition_masked_value(nrows):
gdf = cudf.DataFrame()
gdf["key"] = np.arange(nrows)
gdf["val"] = np.arange(nrows) + 100
bitmask = utils.random_bitmask(nrows)
bytemask = utils.expand_bits_to_bytes(bitmask)
gdf["val"] = gdf["val"].set_mask(bitmask)
parted = gdf.partition_by_hash(["key"], nparts=3)
# Verify that the valid mask is correct
for p in parted:
df = p.to_pandas()
for row in df.itertuples():
valid = bool(bytemask[row.key])
expected_value = row.key + 100 if valid else np.nan
got_value = row.val
assert (expected_value == got_value) or (
np.isnan(expected_value) and np.isnan(got_value)
)
@pytest.mark.parametrize("nrows", [3, 10, 50])
def test_dataframe_hash_partition_masked_keys(nrows):
gdf = cudf.DataFrame()
gdf["key"] = np.arange(nrows)
gdf["val"] = np.arange(nrows) + 100
bitmask = utils.random_bitmask(nrows)
bytemask = utils.expand_bits_to_bytes(bitmask)
gdf["key"] = gdf["key"].set_mask(bitmask)
parted = gdf.partition_by_hash(["key"], nparts=3, keep_index=False)
# Verify that the valid mask is correct
for p in parted:
df = p.to_pandas()
for row in df.itertuples():
valid = bool(bytemask[row.val - 100])
# val is key + 100
expected_value = row.val - 100 if valid else np.nan
got_value = row.key
assert (expected_value == got_value) or (
np.isnan(expected_value) and np.isnan(got_value)
)
@pytest.mark.parametrize("keep_index", [True, False])
def test_dataframe_hash_partition_keep_index(keep_index):
gdf = cudf.DataFrame(
{"val": [1, 2, 3, 4], "key": [3, 2, 1, 4]}, index=[4, 3, 2, 1]
)
expected_df1 = cudf.DataFrame(
{"val": [1], "key": [3]}, index=[4] if keep_index else None
)
expected_df2 = cudf.DataFrame(
{"val": [2, 3, 4], "key": [2, 1, 4]},
index=[3, 2, 1] if keep_index else range(1, 4),
)
expected = [expected_df1, expected_df2]
parts = gdf.partition_by_hash(["key"], nparts=2, keep_index=keep_index)
for exp, got in zip(expected, parts):
assert_eq(exp, got)
def test_dataframe_hash_partition_empty():
gdf = cudf.DataFrame({"val": [1, 2], "key": [3, 2]}, index=["a", "b"])
parts = gdf.iloc[:0].partition_by_hash(["key"], nparts=3)
assert len(parts) == 3
for part in parts:
assert_eq(gdf.iloc[:0], part)
@pytest.mark.parametrize("dtype1", utils.supported_numpy_dtypes)
@pytest.mark.parametrize("dtype2", utils.supported_numpy_dtypes)
def test_dataframe_concat_different_numerical_columns(dtype1, dtype2):
df1 = pd.DataFrame(dict(x=pd.Series(np.arange(5)).astype(dtype1)))
df2 = pd.DataFrame(dict(x=pd.Series(np.arange(5)).astype(dtype2)))
if dtype1 != dtype2 and "datetime" in dtype1 or "datetime" in dtype2:
with pytest.raises(TypeError):
cudf.concat([df1, df2])
else:
pres = pd.concat([df1, df2])
gres = cudf.concat([cudf.from_pandas(df1), cudf.from_pandas(df2)])
assert_eq(cudf.from_pandas(pres), gres)
def test_dataframe_concat_different_column_types():
df1 = cudf.Series([42], dtype=np.float64)
df2 = cudf.Series(["a"], dtype="category")
with pytest.raises(ValueError):
cudf.concat([df1, df2])
df2 = cudf.Series(["a string"])
with pytest.raises(TypeError):
cudf.concat([df1, df2])
@pytest.mark.parametrize(
"df_1", [cudf.DataFrame({"a": [1, 2], "b": [1, 3]}), cudf.DataFrame({})]
)
@pytest.mark.parametrize(
"df_2", [cudf.DataFrame({"a": [], "b": []}), cudf.DataFrame({})]
)
def test_concat_empty_dataframe(df_1, df_2):
got = cudf.concat([df_1, df_2])
expect = pd.concat([df_1.to_pandas(), df_2.to_pandas()], sort=False)
# ignoring dtypes as pandas upcasts int to float
# on concatenation with empty dataframes
assert_eq(got, expect, check_dtype=False)
@pytest.mark.parametrize(
"df1_d",
[
{"a": [1, 2], "b": [1, 2], "c": ["s1", "s2"], "d": [1.0, 2.0]},
{"b": [1.9, 10.9], "c": ["s1", "s2"]},
{"c": ["s1"], "b": [None], "a": [False]},
],
)
@pytest.mark.parametrize(
"df2_d",
[
{"a": [1, 2, 3]},
{"a": [1, None, 3], "b": [True, True, False], "c": ["s3", None, "s4"]},
{"a": [], "b": []},
{},
],
)
def test_concat_different_column_dataframe(df1_d, df2_d):
got = cudf.concat(
[cudf.DataFrame(df1_d), cudf.DataFrame(df2_d), cudf.DataFrame(df1_d)],
sort=False,
)
expect = pd.concat(
[pd.DataFrame(df1_d), pd.DataFrame(df2_d), pd.DataFrame(df1_d)],
sort=False,
)
# numerical columns are upcasted to float in cudf.DataFrame.to_pandas()
# casts nan to 0 in non-float numerical columns
numeric_cols = got.dtypes[got.dtypes != "object"].index
for col in numeric_cols:
got[col] = got[col].astype(np.float64).fillna(np.nan)
assert_eq(got, expect, check_dtype=False)
@pytest.mark.parametrize(
"ser_1", [pd.Series([1, 2, 3]), pd.Series([], dtype="float64")]
)
@pytest.mark.parametrize("ser_2", [pd.Series([], dtype="float64")])
def test_concat_empty_series(ser_1, ser_2):
got = cudf.concat([cudf.Series(ser_1), cudf.Series(ser_2)])
expect = pd.concat([ser_1, ser_2])
assert_eq(got, expect)
def test_concat_with_axis():
df1 = pd.DataFrame(dict(x=np.arange(5), y=np.arange(5)))
df2 = pd.DataFrame(dict(a=np.arange(5), b=np.arange(5)))
concat_df = pd.concat([df1, df2], axis=1)
cdf1 = cudf.from_pandas(df1)
cdf2 = cudf.from_pandas(df2)
# concat only dataframes
concat_cdf = cudf.concat([cdf1, cdf2], axis=1)
assert_eq(concat_cdf, concat_df)
# concat only series
concat_s = pd.concat([df1.x, df1.y], axis=1)
cs1 = cudf.Series.from_pandas(df1.x)
cs2 = cudf.Series.from_pandas(df1.y)
concat_cdf_s = cudf.concat([cs1, cs2], axis=1)
assert_eq(concat_cdf_s, concat_s)
# concat series and dataframes
s3 = pd.Series(np.random.random(5))
cs3 = cudf.Series.from_pandas(s3)
concat_cdf_all = cudf.concat([cdf1, cs3, cdf2], axis=1)
concat_df_all = pd.concat([df1, s3, df2], axis=1)
assert_eq(concat_cdf_all, concat_df_all)
# concat manual multi index
midf1 = cudf.from_pandas(df1)
midf1.index = cudf.MultiIndex(
levels=[[0, 1, 2, 3], [0, 1]], codes=[[0, 1, 2, 3, 2], [0, 1, 0, 1, 0]]
)
midf2 = midf1[2:]
midf2.index = cudf.MultiIndex(
levels=[[3, 4, 5], [2, 0]], codes=[[0, 1, 2], [1, 0, 1]]
)
mipdf1 = midf1.to_pandas()
mipdf2 = midf2.to_pandas()
assert_eq(cudf.concat([midf1, midf2]), pd.concat([mipdf1, mipdf2]))
assert_eq(cudf.concat([midf2, midf1]), pd.concat([mipdf2, mipdf1]))
assert_eq(
cudf.concat([midf1, midf2, midf1]), pd.concat([mipdf1, mipdf2, mipdf1])
)
# concat groupby multi index
gdf1 = cudf.DataFrame(
{
"x": np.random.randint(0, 10, 10),
"y": np.random.randint(0, 10, 10),
"z": np.random.randint(0, 10, 10),
"v": np.random.randint(0, 10, 10),
}
)
gdf2 = gdf1[5:]
gdg1 = gdf1.groupby(["x", "y"]).min()
gdg2 = gdf2.groupby(["x", "y"]).min()
pdg1 = gdg1.to_pandas()
pdg2 = gdg2.to_pandas()
assert_eq(cudf.concat([gdg1, gdg2]), pd.concat([pdg1, pdg2]))
assert_eq(cudf.concat([gdg2, gdg1]), pd.concat([pdg2, pdg1]))
# series multi index concat
gdgz1 = gdg1.z
gdgz2 = gdg2.z
pdgz1 = gdgz1.to_pandas()
pdgz2 = gdgz2.to_pandas()
assert_eq(cudf.concat([gdgz1, gdgz2]), pd.concat([pdgz1, pdgz2]))
assert_eq(cudf.concat([gdgz2, gdgz1]), pd.concat([pdgz2, pdgz1]))
@pytest.mark.parametrize("nrows", [0, 3, 10, 100, 1000])
def test_nonmatching_index_setitem(nrows):
np.random.seed(0)
gdf = cudf.DataFrame()
gdf["a"] = np.random.randint(2147483647, size=nrows)
gdf["b"] = np.random.randint(2147483647, size=nrows)
gdf = gdf.set_index("b")
test_values = np.random.randint(2147483647, size=nrows)
gdf["c"] = test_values
assert len(test_values) == len(gdf["c"])
assert (
gdf["c"]
.to_pandas()
.equals(cudf.Series(test_values).set_index(gdf._index).to_pandas())
)
def test_from_pandas():
df = pd.DataFrame({"x": [1, 2, 3]}, index=[4.0, 5.0, 6.0])
gdf = cudf.DataFrame.from_pandas(df)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
s = df.x
gs = cudf.Series.from_pandas(s)
assert isinstance(gs, cudf.Series)
assert_eq(s, gs)
@pytest.mark.parametrize("dtypes", [int, float])
def test_from_records(dtypes):
h_ary = np.ndarray(shape=(10, 4), dtype=dtypes)
rec_ary = h_ary.view(np.recarray)
gdf = cudf.DataFrame.from_records(rec_ary, columns=["a", "b", "c", "d"])
df = pd.DataFrame.from_records(rec_ary, columns=["a", "b", "c", "d"])
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame.from_records(rec_ary)
df = pd.DataFrame.from_records(rec_ary)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
@pytest.mark.parametrize("columns", [None, ["first", "second", "third"]])
@pytest.mark.parametrize(
"index",
[
None,
["first", "second"],
"name",
"age",
"weight",
[10, 11],
["abc", "xyz"],
],
)
def test_from_records_index(columns, index):
rec_ary = np.array(
[("Rex", 9, 81.0), ("Fido", 3, 27.0)],
dtype=[("name", "U10"), ("age", "i4"), ("weight", "f4")],
)
gdf = cudf.DataFrame.from_records(rec_ary, columns=columns, index=index)
df = pd.DataFrame.from_records(rec_ary, columns=columns, index=index)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
def test_dataframe_construction_from_cupy_arrays():
h_ary = np.array([[1, 2, 3], [4, 5, 6]], np.int32)
d_ary = cupy.asarray(h_ary)
gdf = cudf.DataFrame(d_ary, columns=["a", "b", "c"])
df = pd.DataFrame(h_ary, columns=["a", "b", "c"])
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame(d_ary)
df = pd.DataFrame(h_ary)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame(d_ary, index=["a", "b"])
df = pd.DataFrame(h_ary, index=["a", "b"])
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame(d_ary)
gdf = gdf.set_index(keys=0, drop=False)
df = pd.DataFrame(h_ary)
df = df.set_index(keys=0, drop=False)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame(d_ary)
gdf = gdf.set_index(keys=1, drop=False)
df = pd.DataFrame(h_ary)
df = df.set_index(keys=1, drop=False)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
def test_dataframe_cupy_wrong_dimensions():
d_ary = cupy.empty((2, 3, 4), dtype=np.int32)
with pytest.raises(
ValueError, match="records dimension expected 1 or 2 but found: 3"
):
cudf.DataFrame(d_ary)
def test_dataframe_cupy_array_wrong_index():
d_ary = cupy.empty((2, 3), dtype=np.int32)
with pytest.raises(
ValueError,
match="Length mismatch: Expected axis has 2 elements, "
"new values have 1 elements",
):
cudf.DataFrame(d_ary, index=["a"])
with pytest.raises(
ValueError,
match="Length mismatch: Expected axis has 2 elements, "
"new values have 1 elements",
):
cudf.DataFrame(d_ary, index="a")
def test_index_in_dataframe_constructor():
a = pd.DataFrame({"x": [1, 2, 3]}, index=[4.0, 5.0, 6.0])
b = cudf.DataFrame({"x": [1, 2, 3]}, index=[4.0, 5.0, 6.0])
assert_eq(a, b)
assert_eq(a.loc[4:], b.loc[4:])
dtypes = NUMERIC_TYPES + DATETIME_TYPES + ["bool"]
@pytest.mark.parametrize("nelem", [0, 2, 3, 100, 1000])
@pytest.mark.parametrize("data_type", dtypes)
def test_from_arrow(nelem, data_type):
df = pd.DataFrame(
{
"a": np.random.randint(0, 1000, nelem).astype(data_type),
"b": np.random.randint(0, 1000, nelem).astype(data_type),
}
)
padf = pa.Table.from_pandas(
df, preserve_index=False
).replace_schema_metadata(None)
gdf = cudf.DataFrame.from_arrow(padf)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
s = pa.Array.from_pandas(df.a)
gs = cudf.Series.from_arrow(s)
assert isinstance(gs, cudf.Series)
# For some reason PyArrow to_pandas() converts to numpy array and has
# better type compatibility
np.testing.assert_array_equal(s.to_pandas(), gs.to_array())
@pytest.mark.parametrize("nelem", [0, 2, 3, 100, 1000])
@pytest.mark.parametrize("data_type", dtypes)
def test_to_arrow(nelem, data_type):
df = pd.DataFrame(
{
"a": np.random.randint(0, 1000, nelem).astype(data_type),
"b": np.random.randint(0, 1000, nelem).astype(data_type),
}
)
gdf = cudf.DataFrame.from_pandas(df)
pa_df = pa.Table.from_pandas(
df, preserve_index=False
).replace_schema_metadata(None)
pa_gdf = gdf.to_arrow(preserve_index=False).replace_schema_metadata(None)
assert isinstance(pa_gdf, pa.Table)
assert pa.Table.equals(pa_df, pa_gdf)
pa_s = pa.Array.from_pandas(df.a)
pa_gs = gdf["a"].to_arrow()
assert isinstance(pa_gs, pa.Array)
assert pa.Array.equals(pa_s, pa_gs)
pa_i = pa.Array.from_pandas(df.index)
pa_gi = gdf.index.to_arrow()
assert isinstance(pa_gi, pa.Array)
assert pa.Array.equals(pa_i, pa_gi)
@pytest.mark.parametrize("data_type", dtypes)
def test_to_from_arrow_nulls(data_type):
if data_type == "longlong":
data_type = "int64"
if data_type == "bool":
s1 = pa.array([True, None, False, None, True], type=data_type)
else:
dtype = np.dtype(data_type)
if dtype.type == np.datetime64:
time_unit, _ = np.datetime_data(dtype)
data_type = pa.timestamp(unit=time_unit)
s1 = pa.array([1, None, 3, None, 5], type=data_type)
gs1 = cudf.Series.from_arrow(s1)
assert isinstance(gs1, cudf.Series)
# We have 64B padded buffers for nulls whereas Arrow returns a minimal
# number of bytes, so only check the first byte in this case
np.testing.assert_array_equal(
np.asarray(s1.buffers()[0]).view("u1")[0],
gs1._column.mask_array_view.copy_to_host().view("u1")[0],
)
assert pa.Array.equals(s1, gs1.to_arrow())
s2 = pa.array([None, None, None, None, None], type=data_type)
gs2 = cudf.Series.from_arrow(s2)
assert isinstance(gs2, cudf.Series)
# We have 64B padded buffers for nulls whereas Arrow returns a minimal
# number of bytes, so only check the first byte in this case
np.testing.assert_array_equal(
np.asarray(s2.buffers()[0]).view("u1")[0],
gs2._column.mask_array_view.copy_to_host().view("u1")[0],
)
assert pa.Array.equals(s2, gs2.to_arrow())
def test_to_arrow_categorical():
df = pd.DataFrame()
df["a"] = pd.Series(["a", "b", "c"], dtype="category")
gdf = cudf.DataFrame.from_pandas(df)
pa_df = pa.Table.from_pandas(
df, preserve_index=False
).replace_schema_metadata(None)
pa_gdf = gdf.to_arrow(preserve_index=False).replace_schema_metadata(None)
assert isinstance(pa_gdf, pa.Table)
assert pa.Table.equals(pa_df, pa_gdf)
pa_s = pa.Array.from_pandas(df.a)
pa_gs = gdf["a"].to_arrow()
assert isinstance(pa_gs, pa.Array)
assert pa.Array.equals(pa_s, pa_gs)
def test_from_arrow_missing_categorical():
pd_cat = pd.Categorical(["a", "b", "c"], categories=["a", "b"])
pa_cat = pa.array(pd_cat, from_pandas=True)
gd_cat = cudf.Series(pa_cat)
assert isinstance(gd_cat, cudf.Series)
assert_eq(
pd.Series(pa_cat.to_pandas()), # PyArrow returns a pd.Categorical
gd_cat.to_pandas(),
)
def test_to_arrow_missing_categorical():
pd_cat = pd.Categorical(["a", "b", "c"], categories=["a", "b"])
pa_cat = pa.array(pd_cat, from_pandas=True)
gd_cat = cudf.Series(pa_cat)
assert isinstance(gd_cat, cudf.Series)
assert pa.Array.equals(pa_cat, gd_cat.to_arrow())
@pytest.mark.parametrize("data_type", dtypes)
def test_from_scalar_typing(data_type):
if data_type == "datetime64[ms]":
scalar = (
np.dtype("int64")
.type(np.random.randint(0, 5))
.astype("datetime64[ms]")
)
elif data_type.startswith("datetime64"):
scalar = np.datetime64(datetime.date.today()).astype("datetime64[ms]")
data_type = "datetime64[ms]"
else:
scalar = np.dtype(data_type).type(np.random.randint(0, 5))
gdf = cudf.DataFrame()
gdf["a"] = [1, 2, 3, 4, 5]
gdf["b"] = scalar
assert gdf["b"].dtype == np.dtype(data_type)
assert len(gdf["b"]) == len(gdf["a"])
@pytest.mark.parametrize("data_type", NUMERIC_TYPES)
def test_from_python_array(data_type):
np_arr = np.random.randint(0, 100, 10).astype(data_type)
data = memoryview(np_arr)
data = arr.array(data.format, data)
gs = cudf.Series(data)
np.testing.assert_equal(gs.to_array(), np_arr)
def test_series_shape():
ps = pd.Series([1, 2, 3, 4])
cs = cudf.Series([1, 2, 3, 4])
assert ps.shape == cs.shape
def test_series_shape_empty():
ps = pd.Series(dtype="float64")
cs = cudf.Series([])
assert ps.shape == cs.shape
def test_dataframe_shape():
pdf = pd.DataFrame({"a": [0, 1, 2, 3], "b": [0.1, 0.2, None, 0.3]})
gdf = cudf.DataFrame.from_pandas(pdf)
assert pdf.shape == gdf.shape
def test_dataframe_shape_empty():
pdf = pd.DataFrame()
gdf = cudf.DataFrame()
assert pdf.shape == gdf.shape
@pytest.mark.parametrize("num_cols", [1, 2, 10])
@pytest.mark.parametrize("num_rows", [1, 2, 20])
@pytest.mark.parametrize("dtype", dtypes)
@pytest.mark.parametrize("nulls", ["none", "some", "all"])
def test_dataframe_transpose(nulls, num_cols, num_rows, dtype):
pdf = pd.DataFrame()
null_rep = np.nan if dtype in ["float32", "float64"] else None
for i in range(num_cols):
colname = string.ascii_lowercase[i]
data = pd.Series(np.random.randint(0, 26, num_rows).astype(dtype))
if nulls == "some":
idx = np.random.choice(
num_rows, size=int(num_rows / 2), replace=False
)
data[idx] = null_rep
elif nulls == "all":
data[:] = null_rep
pdf[colname] = data
gdf = cudf.DataFrame.from_pandas(pdf)
got_function = gdf.transpose()
got_property = gdf.T
expect = pdf.transpose()
assert_eq(expect, got_function)
assert_eq(expect, got_property)
@pytest.mark.parametrize("num_cols", [1, 2, 10])
@pytest.mark.parametrize("num_rows", [1, 2, 20])
def test_dataframe_transpose_category(num_cols, num_rows):
pdf = pd.DataFrame()
for i in range(num_cols):
colname = string.ascii_lowercase[i]
data = pd.Series(list(string.ascii_lowercase), dtype="category")
data = data.sample(num_rows, replace=True).reset_index(drop=True)
pdf[colname] = data
gdf = cudf.DataFrame.from_pandas(pdf)
got_function = gdf.transpose()
got_property = gdf.T
expect = pdf.transpose()
assert_eq(expect, got_function.to_pandas())
assert_eq(expect, got_property.to_pandas())
def test_generated_column():
gdf = cudf.DataFrame({"a": (i for i in range(5))})
assert len(gdf) == 5
@pytest.fixture
def pdf():
return pd.DataFrame({"x": range(10), "y": range(10)})
@pytest.fixture
def gdf(pdf):
return cudf.DataFrame.from_pandas(pdf)
@pytest.mark.parametrize(
"data",
[
{"x": [np.nan, 2, 3, 4, 100, np.nan], "y": [4, 5, 6, 88, 99, np.nan]},
{"x": [1, 2, 3], "y": [4, 5, 6]},
{"x": [np.nan, np.nan, np.nan], "y": [np.nan, np.nan, np.nan]},
{"x": [], "y": []},
{"x": []},
],
)
@pytest.mark.parametrize(
"func",
[
lambda df, **kwargs: df.min(**kwargs),
lambda df, **kwargs: df.max(**kwargs),
lambda df, **kwargs: df.sum(**kwargs),
lambda df, **kwargs: df.product(**kwargs),
lambda df, **kwargs: df.cummin(**kwargs),
lambda df, **kwargs: df.cummax(**kwargs),
lambda df, **kwargs: df.cumsum(**kwargs),
lambda df, **kwargs: df.cumprod(**kwargs),
lambda df, **kwargs: df.mean(**kwargs),
lambda df, **kwargs: df.sum(**kwargs),
lambda df, **kwargs: df.max(**kwargs),
lambda df, **kwargs: df.std(ddof=1, **kwargs),
lambda df, **kwargs: df.var(ddof=1, **kwargs),
lambda df, **kwargs: df.std(ddof=2, **kwargs),
lambda df, **kwargs: df.var(ddof=2, **kwargs),
lambda df, **kwargs: df.kurt(**kwargs),
lambda df, **kwargs: df.skew(**kwargs),
lambda df, **kwargs: df.all(**kwargs),
lambda df, **kwargs: df.any(**kwargs),
],
)
@pytest.mark.parametrize("skipna", [True, False, None])
def test_dataframe_reductions(data, func, skipna):
pdf = pd.DataFrame(data=data)
gdf = cudf.DataFrame.from_pandas(pdf)
assert_eq(func(pdf, skipna=skipna), func(gdf, skipna=skipna))
@pytest.mark.parametrize(
"data",
[
{"x": [np.nan, 2, 3, 4, 100, np.nan], "y": [4, 5, 6, 88, 99, np.nan]},
{"x": [1, 2, 3], "y": [4, 5, 6]},
{"x": [np.nan, np.nan, np.nan], "y": [np.nan, np.nan, np.nan]},
{"x": [], "y": []},
{"x": []},
],
)
@pytest.mark.parametrize("func", [lambda df: df.count()])
def test_dataframe_count_reduction(data, func):
pdf = pd.DataFrame(data=data)
gdf = cudf.DataFrame.from_pandas(pdf)
assert_eq(func(pdf), func(gdf))
@pytest.mark.parametrize(
"data",
[
{"x": [np.nan, 2, 3, 4, 100, np.nan], "y": [4, 5, 6, 88, 99, np.nan]},
{"x": [1, 2, 3], "y": [4, 5, 6]},
{"x": [np.nan, np.nan, np.nan], "y": [np.nan, np.nan, np.nan]},
{"x": [], "y": []},
{"x": []},
],
)
@pytest.mark.parametrize("ops", ["sum", "product", "prod"])
@pytest.mark.parametrize("skipna", [True, False, None])
@pytest.mark.parametrize("min_count", [-10, -1, 0, 1, 2, 3, 10])
def test_dataframe_min_count_ops(data, ops, skipna, min_count):
psr = pd.DataFrame(data)
gsr = cudf.DataFrame(data)
if PANDAS_GE_120 and psr.shape[0] * psr.shape[1] < min_count:
pytest.xfail("https://github.com/pandas-dev/pandas/issues/39738")
assert_eq(
getattr(psr, ops)(skipna=skipna, min_count=min_count),
getattr(gsr, ops)(skipna=skipna, min_count=min_count),
check_dtype=False,
)
@pytest.mark.parametrize(
"binop",
[
operator.add,
operator.mul,
operator.floordiv,
operator.truediv,
operator.mod,
operator.pow,
operator.eq,
operator.lt,
operator.le,
operator.gt,
operator.ge,
operator.ne,
],
)
def test_binops_df(pdf, gdf, binop):
pdf = pdf + 1.0
gdf = gdf + 1.0
d = binop(pdf, pdf)
g = binop(gdf, gdf)
assert_eq(d, g)
@pytest.mark.parametrize("binop", [operator.and_, operator.or_, operator.xor])
def test_bitwise_binops_df(pdf, gdf, binop):
d = binop(pdf, pdf + 1)
g = binop(gdf, gdf + 1)
assert_eq(d, g)
@pytest.mark.parametrize(
"binop",
[
operator.add,
operator.mul,
operator.floordiv,
operator.truediv,
operator.mod,
operator.pow,
operator.eq,
operator.lt,
operator.le,
operator.gt,
operator.ge,
operator.ne,
],
)
def test_binops_series(pdf, gdf, binop):
pdf = pdf + 1.0
gdf = gdf + 1.0
d = binop(pdf.x, pdf.y)
g = binop(gdf.x, gdf.y)
assert_eq(d, g)
@pytest.mark.parametrize("binop", [operator.and_, operator.or_, operator.xor])
def test_bitwise_binops_series(pdf, gdf, binop):
d = binop(pdf.x, pdf.y + 1)
g = binop(gdf.x, gdf.y + 1)
assert_eq(d, g)
@pytest.mark.parametrize("unaryop", [operator.neg, operator.inv, operator.abs])
def test_unaryops_df(pdf, gdf, unaryop):
d = unaryop(pdf - 5)
g = unaryop(gdf - 5)
assert_eq(d, g)
@pytest.mark.parametrize(
"func",
[
lambda df: df.empty,
lambda df: df.x.empty,
lambda df: df.x.fillna(123, limit=None, method=None, axis=None),
lambda df: df.drop("x", axis=1, errors="raise"),
],
)
def test_unary_operators(func, pdf, gdf):
p = func(pdf)
g = func(gdf)
assert_eq(p, g)
def test_is_monotonic(gdf):
pdf = pd.DataFrame({"x": [1, 2, 3]}, index=[3, 1, 2])
gdf = cudf.DataFrame.from_pandas(pdf)
assert not gdf.index.is_monotonic
assert not gdf.index.is_monotonic_increasing
assert not gdf.index.is_monotonic_decreasing
def test_iter(pdf, gdf):
assert list(pdf) == list(gdf)
def test_iteritems(gdf):
for k, v in gdf.iteritems():
assert k in gdf.columns
assert isinstance(v, cudf.Series)
assert_eq(v, gdf[k])
@pytest.mark.parametrize("q", [0.5, 1, 0.001, [0.5], [], [0.005, 0.5, 1]])
@pytest.mark.parametrize("numeric_only", [True, False])
def test_quantile(q, numeric_only):
ts = pd.date_range("2018-08-24", periods=5, freq="D")
td = pd.to_timedelta(np.arange(5), unit="h")
pdf = pd.DataFrame(
{"date": ts, "delta": td, "val": np.random.randn(len(ts))}
)
gdf = cudf.DataFrame.from_pandas(pdf)
assert_eq(pdf["date"].quantile(q), gdf["date"].quantile(q))
assert_eq(pdf["delta"].quantile(q), gdf["delta"].quantile(q))
assert_eq(pdf["val"].quantile(q), gdf["val"].quantile(q))
if numeric_only:
assert_eq(pdf.quantile(q), gdf.quantile(q))
else:
q = q if isinstance(q, list) else [q]
assert_eq(
pdf.quantile(
q if isinstance(q, list) else [q], numeric_only=False
),
gdf.quantile(q, numeric_only=False),
)
def test_empty_quantile():
pdf = pd.DataFrame({"x": []})
df = cudf.DataFrame({"x": []})
actual = df.quantile()
expected = pdf.quantile()
assert_eq(actual, expected)
def test_from_pandas_function(pdf):
gdf = cudf.from_pandas(pdf)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(pdf, gdf)
gdf = cudf.from_pandas(pdf.x)
assert isinstance(gdf, cudf.Series)
assert_eq(pdf.x, gdf)
with pytest.raises(TypeError):
cudf.from_pandas(123)
@pytest.mark.parametrize("preserve_index", [True, False])
def test_arrow_pandas_compat(pdf, gdf, preserve_index):
pdf["z"] = range(10)
pdf = pdf.set_index("z")
gdf["z"] = range(10)
gdf = gdf.set_index("z")
pdf_arrow_table = pa.Table.from_pandas(pdf, preserve_index=preserve_index)
gdf_arrow_table = gdf.to_arrow(preserve_index=preserve_index)
assert pa.Table.equals(pdf_arrow_table, gdf_arrow_table)
gdf2 = cudf.DataFrame.from_arrow(pdf_arrow_table)
pdf2 = pdf_arrow_table.to_pandas()
assert_eq(pdf2, gdf2)
@pytest.mark.parametrize("nrows", [1, 8, 100, 1000, 100000])
def test_series_hash_encode(nrows):
data = np.asarray(range(nrows))
# Python hash returns different value which sometimes
# results in enc_with_name_arr and enc_arr to be same.
# And there is no other better way to make hash return same value.
# So using an integer name to get constant value back from hash.
s = cudf.Series(data, name=1)
num_features = 1000
encoded_series = s.hash_encode(num_features)
assert isinstance(encoded_series, cudf.Series)
enc_arr = encoded_series.to_array()
assert np.all(enc_arr >= 0)
assert np.max(enc_arr) < num_features
enc_with_name_arr = s.hash_encode(num_features, use_name=True).to_array()
assert enc_with_name_arr[0] != enc_arr[0]
@pytest.mark.parametrize("dtype", NUMERIC_TYPES + ["bool"])
def test_cuda_array_interface(dtype):
np_data = np.arange(10).astype(dtype)
cupy_data = cupy.array(np_data)
pd_data = pd.Series(np_data)
cudf_data = cudf.Series(cupy_data)
assert_eq(pd_data, cudf_data)
gdf = cudf.DataFrame()
gdf["test"] = cupy_data
pd_data.name = "test"
assert_eq(pd_data, gdf["test"])
@pytest.mark.parametrize("nelem", [0, 2, 3, 100])
@pytest.mark.parametrize("nchunks", [1, 2, 5, 10])
@pytest.mark.parametrize("data_type", dtypes)
def test_from_arrow_chunked_arrays(nelem, nchunks, data_type):
np_list_data = [
np.random.randint(0, 100, nelem).astype(data_type)
for i in range(nchunks)
]
pa_chunk_array = pa.chunked_array(np_list_data)
expect = pd.Series(pa_chunk_array.to_pandas())
got = cudf.Series(pa_chunk_array)
assert_eq(expect, got)
np_list_data2 = [
np.random.randint(0, 100, nelem).astype(data_type)
for i in range(nchunks)
]
pa_chunk_array2 = pa.chunked_array(np_list_data2)
pa_table = pa.Table.from_arrays(
[pa_chunk_array, pa_chunk_array2], names=["a", "b"]
)
expect = pa_table.to_pandas()
got = cudf.DataFrame.from_arrow(pa_table)
assert_eq(expect, got)
@pytest.mark.skip(reason="Test was designed to be run in isolation")
def test_gpu_memory_usage_with_boolmask():
ctx = cuda.current_context()
def query_GPU_memory(note=""):
memInfo = ctx.get_memory_info()
usedMemoryGB = (memInfo.total - memInfo.free) / 1e9
return usedMemoryGB
cuda.current_context().deallocations.clear()
nRows = int(1e8)
nCols = 2
dataNumpy = np.asfortranarray(np.random.rand(nRows, nCols))
colNames = ["col" + str(iCol) for iCol in range(nCols)]
pandasDF = pd.DataFrame(data=dataNumpy, columns=colNames, dtype=np.float32)
cudaDF = cudf.core.DataFrame.from_pandas(pandasDF)
boolmask = cudf.Series(np.random.randint(1, 2, len(cudaDF)).astype("bool"))
memory_used = query_GPU_memory()
cudaDF = cudaDF[boolmask]
assert (
cudaDF.index._values.data_array_view.device_ctypes_pointer
== cudaDF["col0"].index._values.data_array_view.device_ctypes_pointer
)
assert (
cudaDF.index._values.data_array_view.device_ctypes_pointer
== cudaDF["col1"].index._values.data_array_view.device_ctypes_pointer
)
assert memory_used == query_GPU_memory()
def test_boolmask(pdf, gdf):
boolmask = np.random.randint(0, 2, len(pdf)) > 0
gdf = gdf[boolmask]
pdf = pdf[boolmask]
assert_eq(pdf, gdf)
@pytest.mark.parametrize(
"mask_shape",
[
(2, "ab"),
(2, "abc"),
(3, "ab"),
(3, "abc"),
(3, "abcd"),
(4, "abc"),
(4, "abcd"),
],
)
def test_dataframe_boolmask(mask_shape):
pdf = pd.DataFrame()
for col in "abc":
pdf[col] = np.random.randint(0, 10, 3)
pdf_mask = pd.DataFrame()
for col in mask_shape[1]:
pdf_mask[col] = np.random.randint(0, 2, mask_shape[0]) > 0
gdf = cudf.DataFrame.from_pandas(pdf)
gdf_mask = cudf.DataFrame.from_pandas(pdf_mask)
gdf = gdf[gdf_mask]
pdf = pdf[pdf_mask]
assert np.array_equal(gdf.columns, pdf.columns)
for col in gdf.columns:
assert np.array_equal(
gdf[col].fillna(-1).to_pandas().values, pdf[col].fillna(-1).values
)
@pytest.mark.parametrize(
"mask",
[
[True, False, True],
pytest.param(
cudf.Series([True, False, True]),
marks=pytest.mark.xfail(
reason="Pandas can't index a multiindex with a Series"
),
),
],
)
def test_dataframe_multiindex_boolmask(mask):
gdf = cudf.DataFrame(
{"w": [3, 2, 1], "x": [1, 2, 3], "y": [0, 1, 0], "z": [1, 1, 1]}
)
gdg = gdf.groupby(["w", "x"]).count()
pdg = gdg.to_pandas()
assert_eq(gdg[mask], pdg[mask])
def test_dataframe_assignment():
pdf = pd.DataFrame()
for col in "abc":
pdf[col] = np.array([0, 1, 1, -2, 10])
gdf = cudf.DataFrame.from_pandas(pdf)
gdf[gdf < 0] = 999
pdf[pdf < 0] = 999
assert_eq(gdf, pdf)
def test_1row_arrow_table():
data = [pa.array([0]), pa.array([1])]
batch = pa.RecordBatch.from_arrays(data, ["f0", "f1"])
table = pa.Table.from_batches([batch])
expect = table.to_pandas()
got = cudf.DataFrame.from_arrow(table)
assert_eq(expect, got)
def test_arrow_handle_no_index_name(pdf, gdf):
gdf_arrow = gdf.to_arrow()
pdf_arrow = pa.Table.from_pandas(pdf)
assert pa.Table.equals(pdf_arrow, gdf_arrow)
got = cudf.DataFrame.from_arrow(gdf_arrow)
expect = pdf_arrow.to_pandas()
assert_eq(expect, got)
@pytest.mark.parametrize("num_rows", [1, 3, 10, 100])
@pytest.mark.parametrize("num_bins", [1, 2, 4, 20])
@pytest.mark.parametrize("right", [True, False])
@pytest.mark.parametrize("dtype", NUMERIC_TYPES + ["bool"])
@pytest.mark.parametrize("series_bins", [True, False])
def test_series_digitize(num_rows, num_bins, right, dtype, series_bins):
data = np.random.randint(0, 100, num_rows).astype(dtype)
bins = np.unique(np.sort(np.random.randint(2, 95, num_bins).astype(dtype)))
s = cudf.Series(data)
if series_bins:
s_bins = cudf.Series(bins)
indices = s.digitize(s_bins, right)
else:
indices = s.digitize(bins, right)
np.testing.assert_array_equal(
np.digitize(data, bins, right), indices.to_array()
)
def test_series_digitize_invalid_bins():
s = cudf.Series(np.random.randint(0, 30, 80), dtype="int32")
bins = cudf.Series([2, None, None, 50, 90], dtype="int32")
with pytest.raises(
ValueError, match="`bins` cannot contain null entries."
):
_ = s.digitize(bins)
def test_pandas_non_contiguious():
arr1 = np.random.sample([5000, 10])
assert arr1.flags["C_CONTIGUOUS"] is True
df = pd.DataFrame(arr1)
for col in df.columns:
assert df[col].values.flags["C_CONTIGUOUS"] is False
gdf = cudf.DataFrame.from_pandas(df)
assert_eq(gdf.to_pandas(), df)
@pytest.mark.parametrize("num_elements", [0, 2, 10, 100])
@pytest.mark.parametrize("null_type", [np.nan, None, "mixed"])
def test_series_all_null(num_elements, null_type):
if null_type == "mixed":
data = []
data1 = [np.nan] * int(num_elements / 2)
data2 = [None] * int(num_elements / 2)
for idx in range(len(data1)):
data.append(data1[idx])
data.append(data2[idx])
else:
data = [null_type] * num_elements
# Typecast Pandas because None will return `object` dtype
expect = pd.Series(data, dtype="float64")
got = cudf.Series(data)
assert_eq(expect, got)
@pytest.mark.parametrize("num_elements", [0, 2, 10, 100])
def test_series_all_valid_nan(num_elements):
data = [np.nan] * num_elements
sr = cudf.Series(data, nan_as_null=False)
np.testing.assert_equal(sr.null_count, 0)
def test_series_rename():
pds = pd.Series([1, 2, 3], name="asdf")
gds = cudf.Series([1, 2, 3], name="asdf")
expect = pds.rename("new_name")
got = gds.rename("new_name")
assert_eq(expect, got)
pds = pd.Series(expect)
gds = cudf.Series(got)
assert_eq(pds, gds)
pds = pd.Series(expect, name="name name")
gds = cudf.Series(got, name="name name")
assert_eq(pds, gds)
@pytest.mark.parametrize("data_type", dtypes)
@pytest.mark.parametrize("nelem", [0, 100])
def test_head_tail(nelem, data_type):
def check_index_equality(left, right):
assert left.index.equals(right.index)
def check_values_equality(left, right):
if len(left) == 0 and len(right) == 0:
return None
np.testing.assert_array_equal(left.to_pandas(), right.to_pandas())
def check_frame_series_equality(left, right):
check_index_equality(left, right)
check_values_equality(left, right)
gdf = cudf.DataFrame(
{
"a": np.random.randint(0, 1000, nelem).astype(data_type),
"b": np.random.randint(0, 1000, nelem).astype(data_type),
}
)
check_frame_series_equality(gdf.head(), gdf[:5])
check_frame_series_equality(gdf.head(3), gdf[:3])
check_frame_series_equality(gdf.head(-2), gdf[:-2])
check_frame_series_equality(gdf.head(0), gdf[0:0])
check_frame_series_equality(gdf["a"].head(), gdf["a"][:5])
check_frame_series_equality(gdf["a"].head(3), gdf["a"][:3])
check_frame_series_equality(gdf["a"].head(-2), gdf["a"][:-2])
check_frame_series_equality(gdf.tail(), gdf[-5:])
check_frame_series_equality(gdf.tail(3), gdf[-3:])
check_frame_series_equality(gdf.tail(-2), gdf[2:])
check_frame_series_equality(gdf.tail(0), gdf[0:0])
check_frame_series_equality(gdf["a"].tail(), gdf["a"][-5:])
check_frame_series_equality(gdf["a"].tail(3), gdf["a"][-3:])
check_frame_series_equality(gdf["a"].tail(-2), gdf["a"][2:])
def test_tail_for_string():
gdf = cudf.DataFrame()
gdf["id"] = cudf.Series(["a", "b"], dtype=np.object_)
gdf["v"] = cudf.Series([1, 2])
assert_eq(gdf.tail(3), gdf.to_pandas().tail(3))
@pytest.mark.parametrize("drop", [True, False])
def test_reset_index(pdf, gdf, drop):
assert_eq(
pdf.reset_index(drop=drop, inplace=False),
gdf.reset_index(drop=drop, inplace=False),
)
assert_eq(
pdf.x.reset_index(drop=drop, inplace=False),
gdf.x.reset_index(drop=drop, inplace=False),
)
@pytest.mark.parametrize("drop", [True, False])
def test_reset_named_index(pdf, gdf, drop):
pdf.index.name = "cudf"
gdf.index.name = "cudf"
assert_eq(
pdf.reset_index(drop=drop, inplace=False),
gdf.reset_index(drop=drop, inplace=False),
)
assert_eq(
pdf.x.reset_index(drop=drop, inplace=False),
gdf.x.reset_index(drop=drop, inplace=False),
)
@pytest.mark.parametrize("drop", [True, False])
def test_reset_index_inplace(pdf, gdf, drop):
pdf.reset_index(drop=drop, inplace=True)
gdf.reset_index(drop=drop, inplace=True)
assert_eq(pdf, gdf)
@pytest.mark.parametrize(
"data",
[
{
"a": [1, 2, 3, 4, 5],
"b": ["a", "b", "c", "d", "e"],
"c": [1.0, 2.0, 3.0, 4.0, 5.0],
}
],
)
@pytest.mark.parametrize(
"index",
[
"a",
["a", "b"],
pd.CategoricalIndex(["I", "II", "III", "IV", "V"]),
pd.Series(["h", "i", "k", "l", "m"]),
["b", pd.Index(["I", "II", "III", "IV", "V"])],
["c", [11, 12, 13, 14, 15]],
pd.MultiIndex(
levels=[
["I", "II", "III", "IV", "V"],
["one", "two", "three", "four", "five"],
],
codes=[[0, 1, 2, 3, 4], [4, 3, 2, 1, 0]],
names=["col1", "col2"],
),
pd.RangeIndex(0, 5), # corner case
[pd.Series(["h", "i", "k", "l", "m"]), pd.RangeIndex(0, 5)],
[
pd.MultiIndex(
levels=[
["I", "II", "III", "IV", "V"],
["one", "two", "three", "four", "five"],
],
codes=[[0, 1, 2, 3, 4], [4, 3, 2, 1, 0]],
names=["col1", "col2"],
),
pd.RangeIndex(0, 5),
],
],
)
@pytest.mark.parametrize("drop", [True, False])
@pytest.mark.parametrize("append", [True, False])
@pytest.mark.parametrize("inplace", [True, False])
def test_set_index(data, index, drop, append, inplace):
gdf = cudf.DataFrame(data)
pdf = gdf.to_pandas()
expected = pdf.set_index(index, inplace=inplace, drop=drop, append=append)
actual = gdf.set_index(index, inplace=inplace, drop=drop, append=append)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"data",
[
{
"a": [1, 1, 2, 2, 5],
"b": ["a", "b", "c", "d", "e"],
"c": [1.0, 2.0, 3.0, 4.0, 5.0],
}
],
)
@pytest.mark.parametrize("index", ["a", pd.Index([1, 1, 2, 2, 3])])
@pytest.mark.parametrize("verify_integrity", [True])
@pytest.mark.xfail
def test_set_index_verify_integrity(data, index, verify_integrity):
gdf = cudf.DataFrame(data)
gdf.set_index(index, verify_integrity=verify_integrity)
@pytest.mark.parametrize("drop", [True, False])
@pytest.mark.parametrize("nelem", [10, 200, 1333])
def test_set_index_multi(drop, nelem):
np.random.seed(0)
a = np.arange(nelem)
np.random.shuffle(a)
df = pd.DataFrame(
{
"a": a,
"b": np.random.randint(0, 4, size=nelem),
"c": np.random.uniform(low=0, high=4, size=nelem),
"d": np.random.choice(["green", "black", "white"], nelem),
}
)
df["e"] = df["d"].astype("category")
gdf = cudf.DataFrame.from_pandas(df)
assert_eq(gdf.set_index("a", drop=drop), gdf.set_index(["a"], drop=drop))
assert_eq(
df.set_index(["b", "c"], drop=drop),
gdf.set_index(["b", "c"], drop=drop),
)
assert_eq(
df.set_index(["d", "b"], drop=drop),
gdf.set_index(["d", "b"], drop=drop),
)
assert_eq(
df.set_index(["b", "d", "e"], drop=drop),
gdf.set_index(["b", "d", "e"], drop=drop),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_0(copy):
# TODO (ptaylor): pandas changes `int` dtype to `float64`
# when reindexing and filling new label indices with NaN
gdf = cudf.datasets.randomdata(
nrows=6,
dtypes={
"a": "category",
# 'b': int,
"c": float,
"d": str,
},
)
pdf = gdf.to_pandas()
# Validate reindex returns a copy unmodified
assert_eq(pdf.reindex(copy=True), gdf.reindex(copy=copy))
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_1(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as index when axis defaults to 0
assert_eq(pdf.reindex(index, copy=True), gdf.reindex(index, copy=copy))
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_2(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as index when axis=0
assert_eq(
pdf.reindex(index, axis=0, copy=True),
gdf.reindex(index, axis=0, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_3(copy):
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as columns when axis=0
assert_eq(
pdf.reindex(columns, axis=1, copy=True),
gdf.reindex(columns, axis=1, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_4(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as index when axis=0
assert_eq(
pdf.reindex(labels=index, axis=0, copy=True),
gdf.reindex(labels=index, axis=0, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_5(copy):
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as columns when axis=1
assert_eq(
pdf.reindex(labels=columns, axis=1, copy=True),
gdf.reindex(labels=columns, axis=1, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_6(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as index when axis='index'
assert_eq(
pdf.reindex(labels=index, axis="index", copy=True),
gdf.reindex(labels=index, axis="index", copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_7(copy):
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as columns when axis='columns'
assert_eq(
pdf.reindex(labels=columns, axis="columns", copy=True),
gdf.reindex(labels=columns, axis="columns", copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_8(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate reindexes labels when index=labels
assert_eq(
pdf.reindex(index=index, copy=True),
gdf.reindex(index=index, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_9(copy):
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate reindexes column names when columns=labels
assert_eq(
pdf.reindex(columns=columns, copy=True),
gdf.reindex(columns=columns, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_10(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate reindexes both labels and column names when
# index=index_labels and columns=column_labels
assert_eq(
pdf.reindex(index=index, columns=columns, copy=True),
gdf.reindex(index=index, columns=columns, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_change_dtype(copy):
if PANDAS_GE_110:
kwargs = {"check_freq": False}
else:
kwargs = {}
index = pd.date_range("12/29/2009", periods=10, freq="D")
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate reindexes both labels and column names when
# index=index_labels and columns=column_labels
assert_eq(
pdf.reindex(index=index, columns=columns, copy=True),
gdf.reindex(index=index, columns=columns, copy=copy),
**kwargs,
)
@pytest.mark.parametrize("copy", [True, False])
def test_series_categorical_reindex(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(nrows=6, dtypes={"a": "category"})
pdf = gdf.to_pandas()
assert_eq(pdf["a"].reindex(copy=True), gdf["a"].reindex(copy=copy))
assert_eq(
pdf["a"].reindex(index, copy=True), gdf["a"].reindex(index, copy=copy)
)
assert_eq(
pdf["a"].reindex(index=index, copy=True),
gdf["a"].reindex(index=index, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_series_float_reindex(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(nrows=6, dtypes={"c": float})
pdf = gdf.to_pandas()
assert_eq(pdf["c"].reindex(copy=True), gdf["c"].reindex(copy=copy))
assert_eq(
pdf["c"].reindex(index, copy=True), gdf["c"].reindex(index, copy=copy)
)
assert_eq(
pdf["c"].reindex(index=index, copy=True),
gdf["c"].reindex(index=index, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_series_string_reindex(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(nrows=6, dtypes={"d": str})
pdf = gdf.to_pandas()
assert_eq(pdf["d"].reindex(copy=True), gdf["d"].reindex(copy=copy))
assert_eq(
pdf["d"].reindex(index, copy=True), gdf["d"].reindex(index, copy=copy)
)
assert_eq(
pdf["d"].reindex(index=index, copy=True),
gdf["d"].reindex(index=index, copy=copy),
)
def test_to_frame(pdf, gdf):
assert_eq(pdf.x.to_frame(), gdf.x.to_frame())
name = "foo"
gdf_new_name = gdf.x.to_frame(name=name)
pdf_new_name = pdf.x.to_frame(name=name)
assert_eq(pdf.x.to_frame(), gdf.x.to_frame())
name = False
gdf_new_name = gdf.x.to_frame(name=name)
pdf_new_name = pdf.x.to_frame(name=name)
assert_eq(gdf_new_name, pdf_new_name)
assert gdf_new_name.columns[0] is name
def test_dataframe_empty_sort_index():
pdf = pd.DataFrame({"x": []})
gdf = cudf.DataFrame.from_pandas(pdf)
expect = pdf.sort_index()
got = gdf.sort_index()
assert_eq(expect, got)
@pytest.mark.parametrize("axis", [0, 1, "index", "columns"])
@pytest.mark.parametrize("ascending", [True, False])
@pytest.mark.parametrize("ignore_index", [True, False])
@pytest.mark.parametrize("inplace", [True, False])
@pytest.mark.parametrize("na_position", ["first", "last"])
def test_dataframe_sort_index(
axis, ascending, inplace, ignore_index, na_position
):
pdf = pd.DataFrame(
{"b": [1, 3, 2], "a": [1, 4, 3], "c": [4, 1, 5]},
index=[3.0, 1.0, np.nan],
)
gdf = cudf.DataFrame.from_pandas(pdf)
expected = pdf.sort_index(
axis=axis,
ascending=ascending,
ignore_index=ignore_index,
inplace=inplace,
na_position=na_position,
)
got = gdf.sort_index(
axis=axis,
ascending=ascending,
ignore_index=ignore_index,
inplace=inplace,
na_position=na_position,
)
if inplace is True:
assert_eq(pdf, gdf)
else:
assert_eq(expected, got)
@pytest.mark.parametrize("axis", [0, 1, "index", "columns"])
@pytest.mark.parametrize(
"level",
[
0,
"b",
1,
["b"],
"a",
["a", "b"],
["b", "a"],
[0, 1],
[1, 0],
[0, 2],
None,
],
)
@pytest.mark.parametrize("ascending", [True, False])
@pytest.mark.parametrize("ignore_index", [True, False])
@pytest.mark.parametrize("inplace", [True, False])
@pytest.mark.parametrize("na_position", ["first", "last"])
def test_dataframe_mulitindex_sort_index(
axis, level, ascending, inplace, ignore_index, na_position
):
pdf = pd.DataFrame(
{
"b": [1.0, 3.0, np.nan],
"a": [1, 4, 3],
1: ["a", "b", "c"],
"e": [3, 1, 4],
"d": [1, 2, 8],
}
).set_index(["b", "a", 1])
gdf = cudf.DataFrame.from_pandas(pdf)
# ignore_index is supported in v.1.0
expected = pdf.sort_index(
axis=axis,
level=level,
ascending=ascending,
inplace=inplace,
na_position=na_position,
)
if ignore_index is True:
expected = expected
got = gdf.sort_index(
axis=axis,
level=level,
ascending=ascending,
ignore_index=ignore_index,
inplace=inplace,
na_position=na_position,
)
if inplace is True:
if ignore_index is True:
pdf = pdf.reset_index(drop=True)
assert_eq(pdf, gdf)
else:
if ignore_index is True:
expected = expected.reset_index(drop=True)
assert_eq(expected, got)
@pytest.mark.parametrize("dtype", dtypes + ["category"])
def test_dataframe_0_row_dtype(dtype):
if dtype == "category":
data = pd.Series(["a", "b", "c", "d", "e"], dtype="category")
else:
data = np.array([1, 2, 3, 4, 5], dtype=dtype)
expect = cudf.DataFrame()
expect["x"] = data
expect["y"] = data
got = expect.head(0)
for col_name in got.columns:
assert expect[col_name].dtype == got[col_name].dtype
expect = cudf.Series(data)
got = expect.head(0)
assert expect.dtype == got.dtype
@pytest.mark.parametrize("nan_as_null", [True, False])
def test_series_list_nanasnull(nan_as_null):
data = [1.0, 2.0, 3.0, np.nan, None]
expect = pa.array(data, from_pandas=nan_as_null)
got = cudf.Series(data, nan_as_null=nan_as_null).to_arrow()
# Bug in Arrow 0.14.1 where NaNs aren't handled
expect = expect.cast("int64", safe=False)
got = got.cast("int64", safe=False)
assert pa.Array.equals(expect, got)
def test_column_assignment():
gdf = cudf.datasets.randomdata(
nrows=20, dtypes={"a": "category", "b": int, "c": float}
)
new_cols = ["q", "r", "s"]
gdf.columns = new_cols
assert list(gdf.columns) == new_cols
def test_select_dtype():
gdf = cudf.datasets.randomdata(
nrows=20, dtypes={"a": "category", "b": int, "c": float, "d": str}
)
pdf = gdf.to_pandas()
assert_eq(pdf.select_dtypes("float64"), gdf.select_dtypes("float64"))
assert_eq(pdf.select_dtypes(np.float64), gdf.select_dtypes(np.float64))
assert_eq(
pdf.select_dtypes(include=["float64"]),
gdf.select_dtypes(include=["float64"]),
)
assert_eq(
pdf.select_dtypes(include=["object", "int", "category"]),
gdf.select_dtypes(include=["object", "int", "category"]),
)
assert_eq(
pdf.select_dtypes(include=["int64", "float64"]),
gdf.select_dtypes(include=["int64", "float64"]),
)
assert_eq(
pdf.select_dtypes(include=np.number),
gdf.select_dtypes(include=np.number),
)
assert_eq(
pdf.select_dtypes(include=[np.int64, np.float64]),
gdf.select_dtypes(include=[np.int64, np.float64]),
)
assert_eq(
pdf.select_dtypes(include=["category"]),
gdf.select_dtypes(include=["category"]),
)
assert_eq(
pdf.select_dtypes(exclude=np.number),
gdf.select_dtypes(exclude=np.number),
)
assert_exceptions_equal(
lfunc=pdf.select_dtypes,
rfunc=gdf.select_dtypes,
lfunc_args_and_kwargs=([], {"includes": ["Foo"]}),
rfunc_args_and_kwargs=([], {"includes": ["Foo"]}),
)
assert_exceptions_equal(
lfunc=pdf.select_dtypes,
rfunc=gdf.select_dtypes,
lfunc_args_and_kwargs=(
[],
{"exclude": np.number, "include": np.number},
),
rfunc_args_and_kwargs=(
[],
{"exclude": np.number, "include": np.number},
),
)
gdf = cudf.DataFrame(
{"A": [3, 4, 5], "C": [1, 2, 3], "D": ["a", "b", "c"]}
)
pdf = gdf.to_pandas()
assert_eq(
pdf.select_dtypes(include=["object", "int", "category"]),
gdf.select_dtypes(include=["object", "int", "category"]),
)
assert_eq(
pdf.select_dtypes(include=["object"], exclude=["category"]),
gdf.select_dtypes(include=["object"], exclude=["category"]),
)
gdf = cudf.DataFrame({"a": range(10), "b": range(10, 20)})
pdf = gdf.to_pandas()
assert_eq(
pdf.select_dtypes(include=["category"]),
gdf.select_dtypes(include=["category"]),
)
assert_eq(
pdf.select_dtypes(include=["float"]),
gdf.select_dtypes(include=["float"]),
)
assert_eq(
pdf.select_dtypes(include=["object"]),
gdf.select_dtypes(include=["object"]),
)
assert_eq(
pdf.select_dtypes(include=["int"]), gdf.select_dtypes(include=["int"])
)
assert_eq(
pdf.select_dtypes(exclude=["float"]),
gdf.select_dtypes(exclude=["float"]),
)
assert_eq(
pdf.select_dtypes(exclude=["object"]),
gdf.select_dtypes(exclude=["object"]),
)
assert_eq(
pdf.select_dtypes(include=["int"], exclude=["object"]),
gdf.select_dtypes(include=["int"], exclude=["object"]),
)
assert_exceptions_equal(
lfunc=pdf.select_dtypes, rfunc=gdf.select_dtypes,
)
gdf = cudf.DataFrame(
{"a": cudf.Series([], dtype="int"), "b": cudf.Series([], dtype="str")}
)
pdf = gdf.to_pandas()
assert_eq(
pdf.select_dtypes(exclude=["object"]),
gdf.select_dtypes(exclude=["object"]),
)
assert_eq(
pdf.select_dtypes(include=["int"], exclude=["object"]),
gdf.select_dtypes(include=["int"], exclude=["object"]),
)
def test_select_dtype_datetime():
gdf = cudf.datasets.timeseries(
start="2000-01-01", end="2000-01-02", freq="3600s", dtypes={"x": int}
)
gdf = gdf.reset_index()
pdf = gdf.to_pandas()
assert_eq(pdf.select_dtypes("datetime64"), gdf.select_dtypes("datetime64"))
assert_eq(
pdf.select_dtypes(np.dtype("datetime64")),
gdf.select_dtypes(np.dtype("datetime64")),
)
assert_eq(
pdf.select_dtypes(include="datetime64"),
gdf.select_dtypes(include="datetime64"),
)
def test_select_dtype_datetime_with_frequency():
gdf = cudf.datasets.timeseries(
start="2000-01-01", end="2000-01-02", freq="3600s", dtypes={"x": int}
)
gdf = gdf.reset_index()
pdf = gdf.to_pandas()
assert_exceptions_equal(
pdf.select_dtypes,
gdf.select_dtypes,
(["datetime64[ms]"],),
(["datetime64[ms]"],),
)
def test_array_ufunc():
gdf = cudf.DataFrame({"x": [2, 3, 4.0], "y": [9.0, 2.5, 1.1]})
pdf = gdf.to_pandas()
assert_eq(np.sqrt(gdf), np.sqrt(pdf))
assert_eq(np.sqrt(gdf.x), np.sqrt(pdf.x))
@pytest.mark.parametrize("nan_value", [-5, -5.0, 0, 5, 5.0, None, "pandas"])
def test_series_to_gpu_array(nan_value):
s = cudf.Series([0, 1, None, 3])
np.testing.assert_array_equal(
s.to_array(nan_value), s.to_gpu_array(nan_value).copy_to_host()
)
def test_dataframe_describe_exclude():
np.random.seed(12)
data_length = 10000
df = cudf.DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["x"] = df.x.astype("int64")
df["y"] = np.random.normal(10, 1, data_length)
pdf = df.to_pandas()
gdf_results = df.describe(exclude=["float"])
pdf_results = pdf.describe(exclude=["float"])
assert_eq(gdf_results, pdf_results)
def test_dataframe_describe_include():
np.random.seed(12)
data_length = 10000
df = cudf.DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["x"] = df.x.astype("int64")
df["y"] = np.random.normal(10, 1, data_length)
pdf = df.to_pandas()
gdf_results = df.describe(include=["int"])
pdf_results = pdf.describe(include=["int"])
assert_eq(gdf_results, pdf_results)
def test_dataframe_describe_default():
np.random.seed(12)
data_length = 10000
df = cudf.DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["y"] = np.random.normal(10, 1, data_length)
pdf = df.to_pandas()
gdf_results = df.describe()
pdf_results = pdf.describe()
assert_eq(pdf_results, gdf_results)
def test_series_describe_include_all():
np.random.seed(12)
data_length = 10000
df = cudf.DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["x"] = df.x.astype("int64")
df["y"] = np.random.normal(10, 1, data_length)
df["animal"] = np.random.choice(["dog", "cat", "bird"], data_length)
pdf = df.to_pandas()
gdf_results = df.describe(include="all")
pdf_results = pdf.describe(include="all")
assert_eq(gdf_results[["x", "y"]], pdf_results[["x", "y"]])
assert_eq(gdf_results.index, pdf_results.index)
assert_eq(gdf_results.columns, pdf_results.columns)
assert_eq(
gdf_results[["animal"]].fillna(-1).astype("str"),
pdf_results[["animal"]].fillna(-1).astype("str"),
)
def test_dataframe_describe_percentiles():
np.random.seed(12)
data_length = 10000
sample_percentiles = [0.0, 0.1, 0.33, 0.84, 0.4, 0.99]
df = cudf.DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["y"] = np.random.normal(10, 1, data_length)
pdf = df.to_pandas()
gdf_results = df.describe(percentiles=sample_percentiles)
pdf_results = pdf.describe(percentiles=sample_percentiles)
assert_eq(pdf_results, gdf_results)
def test_get_numeric_data():
pdf = pd.DataFrame(
{"x": [1, 2, 3], "y": [1.0, 2.0, 3.0], "z": ["a", "b", "c"]}
)
gdf = cudf.from_pandas(pdf)
assert_eq(pdf._get_numeric_data(), gdf._get_numeric_data())
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize("period", [-1, -5, -10, -20, 0, 1, 5, 10, 20])
@pytest.mark.parametrize("data_empty", [False, True])
def test_shift(dtype, period, data_empty):
if data_empty:
data = None
else:
if dtype == np.int8:
# to keep data in range
data = gen_rand(dtype, 100000, low=-2, high=2)
else:
data = gen_rand(dtype, 100000)
gdf = cudf.DataFrame({"a": cudf.Series(data, dtype=dtype)})
pdf = pd.DataFrame({"a": pd.Series(data, dtype=dtype)})
shifted_outcome = gdf.a.shift(period).fillna(0)
expected_outcome = pdf.a.shift(period).fillna(0).astype(dtype)
if data_empty:
assert_eq(shifted_outcome, expected_outcome, check_index_type=False)
else:
assert_eq(shifted_outcome, expected_outcome)
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize("period", [-1, -5, -10, -20, 0, 1, 5, 10, 20])
@pytest.mark.parametrize("data_empty", [False, True])
def test_diff(dtype, period, data_empty):
if data_empty:
data = None
else:
if dtype == np.int8:
# to keep data in range
data = gen_rand(dtype, 100000, low=-2, high=2)
else:
data = gen_rand(dtype, 100000)
gdf = cudf.DataFrame({"a": cudf.Series(data, dtype=dtype)})
pdf = pd.DataFrame({"a": pd.Series(data, dtype=dtype)})
expected_outcome = pdf.a.diff(period)
diffed_outcome = gdf.a.diff(period).astype(expected_outcome.dtype)
if data_empty:
assert_eq(diffed_outcome, expected_outcome, check_index_type=False)
else:
assert_eq(diffed_outcome, expected_outcome)
@pytest.mark.parametrize("df", _dataframe_na_data())
@pytest.mark.parametrize("nan_as_null", [True, False, None])
def test_dataframe_isnull_isna(df, nan_as_null):
gdf = cudf.DataFrame.from_pandas(df, nan_as_null=nan_as_null)
assert_eq(df.isnull(), gdf.isnull())
assert_eq(df.isna(), gdf.isna())
# Test individual columns
for col in df:
assert_eq(df[col].isnull(), gdf[col].isnull())
assert_eq(df[col].isna(), gdf[col].isna())
@pytest.mark.parametrize("df", _dataframe_na_data())
@pytest.mark.parametrize("nan_as_null", [True, False, None])
def test_dataframe_notna_notnull(df, nan_as_null):
gdf = cudf.DataFrame.from_pandas(df, nan_as_null=nan_as_null)
assert_eq(df.notnull(), gdf.notnull())
assert_eq(df.notna(), gdf.notna())
# Test individual columns
for col in df:
assert_eq(df[col].notnull(), gdf[col].notnull())
assert_eq(df[col].notna(), gdf[col].notna())
def test_ndim():
pdf = pd.DataFrame({"x": range(5), "y": range(5, 10)})
gdf = cudf.DataFrame.from_pandas(pdf)
assert pdf.ndim == gdf.ndim
assert pdf.x.ndim == gdf.x.ndim
s = pd.Series(dtype="float64")
gs = cudf.Series()
assert s.ndim == gs.ndim
@pytest.mark.parametrize(
"decimals",
[
-3,
0,
5,
pd.Series([1, 4, 3, -6], index=["w", "x", "y", "z"]),
cudf.Series([-4, -2, 12], index=["x", "y", "z"]),
{"w": -1, "x": 15, "y": 2},
],
)
def test_dataframe_round(decimals):
pdf = pd.DataFrame(
{
"w": np.arange(0.5, 10.5, 1),
"x": np.random.normal(-100, 100, 10),
"y": np.array(
[
14.123,
2.343,
np.nan,
0.0,
-8.302,
np.nan,
94.313,
-112.236,
-8.029,
np.nan,
]
),
"z": np.repeat([-0.6459412758761901], 10),
}
)
gdf = cudf.DataFrame.from_pandas(pdf)
if isinstance(decimals, cudf.Series):
pdecimals = decimals.to_pandas()
else:
pdecimals = decimals
result = gdf.round(decimals)
expected = pdf.round(pdecimals)
assert_eq(result, expected)
# with nulls, maintaining existing null mask
for c in pdf.columns:
arr = pdf[c].to_numpy().astype("float64") # for pandas nulls
arr.ravel()[np.random.choice(10, 5, replace=False)] = np.nan
pdf[c] = gdf[c] = arr
result = gdf.round(decimals)
expected = pdf.round(pdecimals)
assert_eq(result, expected)
for c in gdf.columns:
np.array_equal(gdf[c].nullmask.to_array(), result[c].to_array())
@pytest.mark.parametrize(
"data",
[
[0, 1, 2, 3],
[-2, -1, 2, 3, 5],
[-2, -1, 0, 3, 5],
[True, False, False],
[True],
[False],
[],
[True, None, False],
[True, True, None],
[None, None],
[[0, 5], [1, 6], [2, 7], [3, 8], [4, 9]],
[[1, True], [2, False], [3, False]],
pytest.param(
[["a", True], ["b", False], ["c", False]],
marks=[
pytest.mark.xfail(
reason="NotImplementedError: all does not "
"support columns of object dtype."
)
],
),
],
)
def test_all(data):
# Pandas treats `None` in object type columns as True for some reason, so
# replacing with `False`
if np.array(data).ndim <= 1:
pdata = cudf.utils.utils._create_pandas_series(data=data).replace(
[None], False
)
gdata = cudf.Series.from_pandas(pdata)
else:
pdata = pd.DataFrame(data, columns=["a", "b"]).replace([None], False)
gdata = cudf.DataFrame.from_pandas(pdata)
# test bool_only
if pdata["b"].dtype == "bool":
got = gdata.all(bool_only=True)
expected = pdata.all(bool_only=True)
assert_eq(got, expected)
else:
with pytest.raises(NotImplementedError):
gdata.all(bool_only=False)
with pytest.raises(NotImplementedError):
gdata.all(level="a")
got = gdata.all()
expected = pdata.all()
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[0, 1, 2, 3],
[-2, -1, 2, 3, 5],
[-2, -1, 0, 3, 5],
[0, 0, 0, 0, 0],
[0, 0, None, 0],
[True, False, False],
[True],
[False],
[],
[True, None, False],
[True, True, None],
[None, None],
[[0, 5], [1, 6], [2, 7], [3, 8], [4, 9]],
[[1, True], [2, False], [3, False]],
pytest.param(
[["a", True], ["b", False], ["c", False]],
marks=[
pytest.mark.xfail(
reason="NotImplementedError: any does not "
"support columns of object dtype."
)
],
),
],
)
@pytest.mark.parametrize("axis", [0, 1])
def test_any(data, axis):
if np.array(data).ndim <= 1:
pdata = cudf.utils.utils._create_pandas_series(data=data)
gdata = cudf.Series.from_pandas(pdata)
if axis == 1:
with pytest.raises(NotImplementedError):
gdata.any(axis=axis)
else:
got = gdata.any(axis=axis)
expected = pdata.any(axis=axis)
assert_eq(got, expected)
else:
pdata = pd.DataFrame(data, columns=["a", "b"])
gdata = cudf.DataFrame.from_pandas(pdata)
# test bool_only
if pdata["b"].dtype == "bool":
got = gdata.any(bool_only=True)
expected = pdata.any(bool_only=True)
assert_eq(got, expected)
else:
with pytest.raises(NotImplementedError):
gdata.any(bool_only=False)
with pytest.raises(NotImplementedError):
gdata.any(level="a")
got = gdata.any(axis=axis)
expected = pdata.any(axis=axis)
assert_eq(got, expected)
@pytest.mark.parametrize("axis", [0, 1])
def test_empty_dataframe_any(axis):
pdf = pd.DataFrame({}, columns=["a", "b"])
gdf = cudf.DataFrame.from_pandas(pdf)
got = gdf.any(axis=axis)
expected = pdf.any(axis=axis)
assert_eq(got, expected, check_index_type=False)
@pytest.mark.parametrize("indexed", [False, True])
def test_dataframe_sizeof(indexed):
rows = int(1e6)
index = list(i for i in range(rows)) if indexed else None
gdf = cudf.DataFrame({"A": [8] * rows, "B": [32] * rows}, index=index)
for c in gdf._data.columns:
assert gdf._index.__sizeof__() == gdf._index.__sizeof__()
cols_sizeof = sum(c.__sizeof__() for c in gdf._data.columns)
assert gdf.__sizeof__() == (gdf._index.__sizeof__() + cols_sizeof)
@pytest.mark.parametrize("a", [[], ["123"]])
@pytest.mark.parametrize("b", ["123", ["123"]])
@pytest.mark.parametrize(
"misc_data",
["123", ["123"] * 20, 123, [1, 2, 0.8, 0.9] * 50, 0.9, 0.00001],
)
@pytest.mark.parametrize("non_list_data", [123, "abc", "zyx", "rapids", 0.8])
def test_create_dataframe_cols_empty_data(a, b, misc_data, non_list_data):
expected = pd.DataFrame({"a": a})
actual = cudf.DataFrame.from_pandas(expected)
expected["b"] = b
actual["b"] = b
assert_eq(actual, expected)
expected = pd.DataFrame({"a": []})
actual = cudf.DataFrame.from_pandas(expected)
expected["b"] = misc_data
actual["b"] = misc_data
assert_eq(actual, expected)
expected = pd.DataFrame({"a": a})
actual = cudf.DataFrame.from_pandas(expected)
expected["b"] = non_list_data
actual["b"] = non_list_data
assert_eq(actual, expected)
def test_empty_dataframe_describe():
pdf = pd.DataFrame({"a": [], "b": []})
gdf = cudf.from_pandas(pdf)
expected = pdf.describe()
actual = gdf.describe()
assert_eq(expected, actual)
def test_as_column_types():
col = column.as_column(cudf.Series([]))
assert_eq(col.dtype, np.dtype("float64"))
gds = cudf.Series(col)
pds = pd.Series(pd.Series([], dtype="float64"))
assert_eq(pds, gds)
col = column.as_column(cudf.Series([]), dtype="float32")
assert_eq(col.dtype, np.dtype("float32"))
gds = cudf.Series(col)
pds = pd.Series(pd.Series([], dtype="float32"))
assert_eq(pds, gds)
col = column.as_column(cudf.Series([]), dtype="str")
assert_eq(col.dtype, np.dtype("object"))
gds = cudf.Series(col)
pds = pd.Series(pd.Series([], dtype="str"))
assert_eq(pds, gds)
col = column.as_column(cudf.Series([]), dtype="object")
assert_eq(col.dtype, np.dtype("object"))
gds = cudf.Series(col)
pds = pd.Series(pd.Series([], dtype="object"))
assert_eq(pds, gds)
pds = pd.Series(np.array([1, 2, 3]), dtype="float32")
gds = cudf.Series(column.as_column(np.array([1, 2, 3]), dtype="float32"))
assert_eq(pds, gds)
pds = pd.Series([1, 2, 3], dtype="float32")
gds = cudf.Series([1, 2, 3], dtype="float32")
assert_eq(pds, gds)
pds = pd.Series([], dtype="float64")
gds = cudf.Series(column.as_column(pds))
assert_eq(pds, gds)
pds = | pd.Series([1, 2, 4], dtype="int64") | pandas.Series |
# -*- coding: utf-8 -*-
from collections import OrderedDict
from datetime import date, datetime, timedelta
import numpy as np
import pytest
from pandas.compat import product, range
import pandas as pd
from pandas import (
Categorical, DataFrame, Grouper, Index, MultiIndex, Series, concat,
date_range)
from pandas.api.types import CategoricalDtype as CDT
from pandas.core.reshape.pivot import crosstab, pivot_table
import pandas.util.testing as tm
@pytest.fixture(params=[True, False])
def dropna(request):
return request.param
class TestPivotTable(object):
def setup_method(self, method):
self.data = DataFrame({'A': ['foo', 'foo', 'foo', 'foo',
'bar', 'bar', 'bar', 'bar',
'foo', 'foo', 'foo'],
'B': ['one', 'one', 'one', 'two',
'one', 'one', 'one', 'two',
'two', 'two', 'one'],
'C': ['dull', 'dull', 'shiny', 'dull',
'dull', 'shiny', 'shiny', 'dull',
'shiny', 'shiny', 'shiny'],
'D': np.random.randn(11),
'E': np.random.randn(11),
'F': np.random.randn(11)})
def test_pivot_table(self):
index = ['A', 'B']
columns = 'C'
table = pivot_table(self.data, values='D',
index=index, columns=columns)
table2 = self.data.pivot_table(
values='D', index=index, columns=columns)
tm.assert_frame_equal(table, table2)
# this works
pivot_table(self.data, values='D', index=index)
if len(index) > 1:
assert table.index.names == tuple(index)
else:
assert table.index.name == index[0]
if len(columns) > 1:
assert table.columns.names == columns
else:
assert table.columns.name == columns[0]
expected = self.data.groupby(
index + [columns])['D'].agg(np.mean).unstack()
tm.assert_frame_equal(table, expected)
def test_pivot_table_nocols(self):
df = DataFrame({'rows': ['a', 'b', 'c'],
'cols': ['x', 'y', 'z'],
'values': [1, 2, 3]})
rs = df.pivot_table(columns='cols', aggfunc=np.sum)
xp = df.pivot_table(index='cols', aggfunc=np.sum).T
tm.assert_frame_equal(rs, xp)
rs = df.pivot_table(columns='cols', aggfunc={'values': 'mean'})
xp = df.pivot_table(index='cols', aggfunc={'values': 'mean'}).T
tm.assert_frame_equal(rs, xp)
def test_pivot_table_dropna(self):
df = DataFrame({'amount': {0: 60000, 1: 100000, 2: 50000, 3: 30000},
'customer': {0: 'A', 1: 'A', 2: 'B', 3: 'C'},
'month': {0: 201307, 1: 201309, 2: 201308, 3: 201310},
'product': {0: 'a', 1: 'b', 2: 'c', 3: 'd'},
'quantity': {0: 2000000, 1: 500000,
2: 1000000, 3: 1000000}})
pv_col = df.pivot_table('quantity', 'month', [
'customer', 'product'], dropna=False)
pv_ind = df.pivot_table(
'quantity', ['customer', 'product'], 'month', dropna=False)
m = MultiIndex.from_tuples([('A', 'a'), ('A', 'b'), ('A', 'c'),
('A', 'd'), ('B', 'a'), ('B', 'b'),
('B', 'c'), ('B', 'd'), ('C', 'a'),
('C', 'b'), ('C', 'c'), ('C', 'd')],
names=['customer', 'product'])
tm.assert_index_equal(pv_col.columns, m)
tm.assert_index_equal(pv_ind.index, m)
def test_pivot_table_categorical(self):
cat1 = Categorical(["a", "a", "b", "b"],
categories=["a", "b", "z"], ordered=True)
cat2 = Categorical(["c", "d", "c", "d"],
categories=["c", "d", "y"], ordered=True)
df = DataFrame({"A": cat1, "B": cat2, "values": [1, 2, 3, 4]})
result = pd.pivot_table(df, values='values', index=['A', 'B'],
dropna=True)
exp_index = pd.MultiIndex.from_arrays(
[cat1, cat2],
names=['A', 'B'])
expected = DataFrame(
{'values': [1, 2, 3, 4]},
index=exp_index)
tm.assert_frame_equal(result, expected)
def test_pivot_table_dropna_categoricals(self, dropna):
# GH 15193
categories = ['a', 'b', 'c', 'd']
df = DataFrame({'A': ['a', 'a', 'a', 'b', 'b', 'b', 'c', 'c', 'c'],
'B': [1, 2, 3, 1, 2, 3, 1, 2, 3],
'C': range(0, 9)})
df['A'] = df['A'].astype(CDT(categories, ordered=False))
result = df.pivot_table(index='B', columns='A', values='C',
dropna=dropna)
expected_columns = Series(['a', 'b', 'c'], name='A')
expected_columns = expected_columns.astype(
CDT(categories, ordered=False))
expected_index = Series([1, 2, 3], name='B')
expected = DataFrame([[0, 3, 6],
[1, 4, 7],
[2, 5, 8]],
index=expected_index,
columns=expected_columns,)
if not dropna:
# add back the non observed to compare
expected = expected.reindex(
columns=Categorical(categories)).astype('float')
tm.assert_frame_equal(result, expected)
def test_pivot_with_non_observable_dropna(self, dropna):
# gh-21133
df = pd.DataFrame(
{'A': pd.Categorical([np.nan, 'low', 'high', 'low', 'high'],
categories=['low', 'high'],
ordered=True),
'B': range(5)})
result = df.pivot_table(index='A', values='B', dropna=dropna)
expected = pd.DataFrame(
{'B': [2, 3]},
index=pd.Index(
pd.Categorical.from_codes([0, 1],
categories=['low', 'high'],
ordered=True),
name='A'))
tm.assert_frame_equal(result, expected)
# gh-21378
df = pd.DataFrame(
{'A': pd.Categorical(['left', 'low', 'high', 'low', 'high'],
categories=['low', 'high', 'left'],
ordered=True),
'B': range(5)})
result = df.pivot_table(index='A', values='B', dropna=dropna)
expected = pd.DataFrame(
{'B': [2, 3, 0]},
index=pd.Index(
pd.Categorical.from_codes([0, 1, 2],
categories=['low', 'high', 'left'],
ordered=True),
name='A'))
tm.assert_frame_equal(result, expected)
def test_pass_array(self):
result = self.data.pivot_table(
'D', index=self.data.A, columns=self.data.C)
expected = self.data.pivot_table('D', index='A', columns='C')
tm.assert_frame_equal(result, expected)
def test_pass_function(self):
result = self.data.pivot_table('D', index=lambda x: x // 5,
columns=self.data.C)
expected = self.data.pivot_table('D', index=self.data.index // 5,
columns='C')
tm.assert_frame_equal(result, expected)
def test_pivot_table_multiple(self):
index = ['A', 'B']
columns = 'C'
table = pivot_table(self.data, index=index, columns=columns)
expected = self.data.groupby(index + [columns]).agg(np.mean).unstack()
tm.assert_frame_equal(table, expected)
def test_pivot_dtypes(self):
# can convert dtypes
f = DataFrame({'a': ['cat', 'bat', 'cat', 'bat'], 'v': [
1, 2, 3, 4], 'i': ['a', 'b', 'a', 'b']})
assert f.dtypes['v'] == 'int64'
z = pivot_table(f, values='v', index=['a'], columns=[
'i'], fill_value=0, aggfunc=np.sum)
result = z.get_dtype_counts()
expected = Series(dict(int64=2))
tm.assert_series_equal(result, expected)
# cannot convert dtypes
f = DataFrame({'a': ['cat', 'bat', 'cat', 'bat'], 'v': [
1.5, 2.5, 3.5, 4.5], 'i': ['a', 'b', 'a', 'b']})
assert f.dtypes['v'] == 'float64'
z = pivot_table(f, values='v', index=['a'], columns=[
'i'], fill_value=0, aggfunc=np.mean)
result = z.get_dtype_counts()
expected = Series(dict(float64=2))
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('columns,values',
[('bool1', ['float1', 'float2']),
('bool1', ['float1', 'float2', 'bool1']),
('bool2', ['float1', 'float2', 'bool1'])])
def test_pivot_preserve_dtypes(self, columns, values):
# GH 7142 regression test
v = np.arange(5, dtype=np.float64)
df = DataFrame({'float1': v, 'float2': v + 2.0,
'bool1': v <= 2, 'bool2': v <= 3})
df_res = df.reset_index().pivot_table(
index='index', columns=columns, values=values)
result = dict(df_res.dtypes)
expected = {col: np.dtype('O') if col[0].startswith('b')
else np.dtype('float64') for col in df_res}
assert result == expected
def test_pivot_no_values(self):
# GH 14380
idx = pd.DatetimeIndex(['2011-01-01', '2011-02-01', '2011-01-02',
'2011-01-01', '2011-01-02'])
df = pd.DataFrame({'A': [1, 2, 3, 4, 5]},
index=idx)
res = df.pivot_table(index=df.index.month, columns=df.index.day)
exp_columns = pd.MultiIndex.from_tuples([('A', 1), ('A', 2)])
exp = pd.DataFrame([[2.5, 4.0], [2.0, np.nan]],
index=[1, 2], columns=exp_columns)
tm.assert_frame_equal(res, exp)
df = pd.DataFrame({'A': [1, 2, 3, 4, 5],
'dt': pd.date_range('2011-01-01', freq='D',
periods=5)},
index=idx)
res = df.pivot_table(index=df.index.month,
columns=pd.Grouper(key='dt', freq='M'))
exp_columns = pd.MultiIndex.from_tuples([('A',
pd.Timestamp('2011-01-31'))])
exp_columns.names = [None, 'dt']
exp = pd.DataFrame([3.25, 2.0],
index=[1, 2], columns=exp_columns)
tm.assert_frame_equal(res, exp)
res = df.pivot_table(index=pd.Grouper(freq='A'),
columns=pd.Grouper(key='dt', freq='M'))
exp = pd.DataFrame([3],
index=pd.DatetimeIndex(['2011-12-31']),
columns=exp_columns)
tm.assert_frame_equal(res, exp)
def test_pivot_multi_values(self):
result = pivot_table(self.data, values=['D', 'E'],
index='A', columns=['B', 'C'], fill_value=0)
expected = pivot_table(self.data.drop(['F'], axis=1),
index='A', columns=['B', 'C'], fill_value=0)
tm.assert_frame_equal(result, expected)
def test_pivot_multi_functions(self):
f = lambda func: pivot_table(self.data, values=['D', 'E'],
index=['A', 'B'], columns='C',
aggfunc=func)
result = f([np.mean, np.std])
means = f(np.mean)
stds = f(np.std)
expected = concat([means, stds], keys=['mean', 'std'], axis=1)
tm.assert_frame_equal(result, expected)
# margins not supported??
f = lambda func: pivot_table(self.data, values=['D', 'E'],
index=['A', 'B'], columns='C',
aggfunc=func, margins=True)
result = f([np.mean, np.std])
means = f(np.mean)
stds = f(np.std)
expected = concat([means, stds], keys=['mean', 'std'], axis=1)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize('method', [True, False])
def test_pivot_index_with_nan(self, method):
# GH 3588
nan = np.nan
df = DataFrame({'a': ['R1', 'R2', nan, 'R4'],
'b': ['C1', 'C2', 'C3', 'C4'],
'c': [10, 15, 17, 20]})
if method:
result = df.pivot('a', 'b', 'c')
else:
result = pd.pivot(df, 'a', 'b', 'c')
expected = DataFrame([[nan, nan, 17, nan], [10, nan, nan, nan],
[nan, 15, nan, nan], [nan, nan, nan, 20]],
index=Index([nan, 'R1', 'R2', 'R4'], name='a'),
columns=Index(['C1', 'C2', 'C3', 'C4'], name='b'))
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(df.pivot('b', 'a', 'c'), expected.T)
# GH9491
df = DataFrame({'a': pd.date_range('2014-02-01', periods=6, freq='D'),
'c': 100 + np.arange(6)})
df['b'] = df['a'] - pd.Timestamp('2014-02-02')
df.loc[1, 'a'] = df.loc[3, 'a'] = nan
df.loc[1, 'b'] = df.loc[4, 'b'] = nan
if method:
pv = df.pivot('a', 'b', 'c')
else:
pv = pd.pivot(df, 'a', 'b', 'c')
assert pv.notna().values.sum() == len(df)
for _, row in df.iterrows():
assert pv.loc[row['a'], row['b']] == row['c']
if method:
result = df.pivot('b', 'a', 'c')
else:
result = pd.pivot(df, 'b', 'a', 'c')
tm.assert_frame_equal(result, pv.T)
@pytest.mark.parametrize('method', [True, False])
def test_pivot_with_tz(self, method):
# GH 5878
df = DataFrame({'dt1': [datetime(2013, 1, 1, 9, 0),
datetime(2013, 1, 2, 9, 0),
datetime(2013, 1, 1, 9, 0),
datetime(2013, 1, 2, 9, 0)],
'dt2': [datetime(2014, 1, 1, 9, 0),
datetime(2014, 1, 1, 9, 0),
datetime(2014, 1, 2, 9, 0),
datetime(2014, 1, 2, 9, 0)],
'data1': np.arange(4, dtype='int64'),
'data2': np.arange(4, dtype='int64')})
df['dt1'] = df['dt1'].apply(lambda d: pd.Timestamp(d, tz='US/Pacific'))
df['dt2'] = df['dt2'].apply(lambda d: pd.Timestamp(d, tz='Asia/Tokyo'))
exp_col1 = Index(['data1', 'data1', 'data2', 'data2'])
exp_col2 = pd.DatetimeIndex(['2014/01/01 09:00',
'2014/01/02 09:00'] * 2,
name='dt2', tz='Asia/Tokyo')
exp_col = pd.MultiIndex.from_arrays([exp_col1, exp_col2])
expected = DataFrame([[0, 2, 0, 2], [1, 3, 1, 3]],
index=pd.DatetimeIndex(['2013/01/01 09:00',
'2013/01/02 09:00'],
name='dt1',
tz='US/Pacific'),
columns=exp_col)
if method:
pv = df.pivot(index='dt1', columns='dt2')
else:
pv = pd.pivot(df, index='dt1', columns='dt2')
tm.assert_frame_equal(pv, expected)
expected = DataFrame([[0, 2], [1, 3]],
index=pd.DatetimeIndex(['2013/01/01 09:00',
'2013/01/02 09:00'],
name='dt1',
tz='US/Pacific'),
columns=pd.DatetimeIndex(['2014/01/01 09:00',
'2014/01/02 09:00'],
name='dt2',
tz='Asia/Tokyo'))
if method:
pv = df.pivot(index='dt1', columns='dt2', values='data1')
else:
pv = pd.pivot(df, index='dt1', columns='dt2', values='data1')
tm.assert_frame_equal(pv, expected)
@pytest.mark.parametrize('method', [True, False])
def test_pivot_periods(self, method):
df = DataFrame({'p1': [pd.Period('2013-01-01', 'D'),
pd.Period('2013-01-02', 'D'),
pd.Period('2013-01-01', 'D'),
pd.Period('2013-01-02', 'D')],
'p2': [pd.Period('2013-01', 'M'),
pd.Period('2013-01', 'M'),
pd.Period('2013-02', 'M'),
pd.Period('2013-02', 'M')],
'data1': np.arange(4, dtype='int64'),
'data2': np.arange(4, dtype='int64')})
exp_col1 = Index(['data1', 'data1', 'data2', 'data2'])
exp_col2 = pd.PeriodIndex(['2013-01', '2013-02'] * 2,
name='p2', freq='M')
exp_col = pd.MultiIndex.from_arrays([exp_col1, exp_col2])
expected = DataFrame([[0, 2, 0, 2], [1, 3, 1, 3]],
index=pd.PeriodIndex(['2013-01-01', '2013-01-02'],
name='p1', freq='D'),
columns=exp_col)
if method:
pv = df.pivot(index='p1', columns='p2')
else:
pv = pd.pivot(df, index='p1', columns='p2')
tm.assert_frame_equal(pv, expected)
expected = DataFrame([[0, 2], [1, 3]],
index=pd.PeriodIndex(['2013-01-01', '2013-01-02'],
name='p1', freq='D'),
columns=pd.PeriodIndex(['2013-01', '2013-02'],
name='p2', freq='M'))
if method:
pv = df.pivot(index='p1', columns='p2', values='data1')
else:
pv = pd.pivot(df, index='p1', columns='p2', values='data1')
tm.assert_frame_equal(pv, expected)
@pytest.mark.parametrize('values', [
['baz', 'zoo'], np.array(['baz', 'zoo']),
pd.Series(['baz', 'zoo']), pd.Index(['baz', 'zoo'])
])
@pytest.mark.parametrize('method', [True, False])
def test_pivot_with_list_like_values(self, values, method):
# issue #17160
df = pd.DataFrame({'foo': ['one', 'one', 'one', 'two', 'two', 'two'],
'bar': ['A', 'B', 'C', 'A', 'B', 'C'],
'baz': [1, 2, 3, 4, 5, 6],
'zoo': ['x', 'y', 'z', 'q', 'w', 't']})
if method:
result = df.pivot(index='foo', columns='bar', values=values)
else:
result = pd.pivot(df, index='foo', columns='bar', values=values)
data = [[1, 2, 3, 'x', 'y', 'z'],
[4, 5, 6, 'q', 'w', 't']]
index = Index(data=['one', 'two'], name='foo')
columns = MultiIndex(levels=[['baz', 'zoo'], ['A', 'B', 'C']],
codes=[[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]],
names=[None, 'bar'])
expected = DataFrame(data=data, index=index,
columns=columns, dtype='object')
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize('values', [
['bar', 'baz'], np.array(['bar', 'baz']),
pd.Series(['bar', 'baz']), pd.Index(['bar', 'baz'])
])
@pytest.mark.parametrize('method', [True, False])
def test_pivot_with_list_like_values_nans(self, values, method):
# issue #17160
df = pd.DataFrame({'foo': ['one', 'one', 'one', 'two', 'two', 'two'],
'bar': ['A', 'B', 'C', 'A', 'B', 'C'],
'baz': [1, 2, 3, 4, 5, 6],
'zoo': ['x', 'y', 'z', 'q', 'w', 't']})
if method:
result = df.pivot(index='zoo', columns='foo', values=values)
else:
result = pd.pivot(df, index='zoo', columns='foo', values=values)
data = [[np.nan, 'A', np.nan, 4],
[np.nan, 'C', np.nan, 6],
[np.nan, 'B', np.nan, 5],
['A', np.nan, 1, np.nan],
['B', np.nan, 2, np.nan],
['C', np.nan, 3, np.nan]]
index = Index(data=['q', 't', 'w', 'x', 'y', 'z'], name='zoo')
columns = MultiIndex(levels=[['bar', 'baz'], ['one', 'two']],
codes=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=[None, 'foo'])
expected = DataFrame(data=data, index=index,
columns=columns, dtype='object')
tm.assert_frame_equal(result, expected)
@pytest.mark.xfail(reason='MultiIndexed unstack with tuple names fails'
'with KeyError GH#19966')
@pytest.mark.parametrize('method', [True, False])
def test_pivot_with_multiindex(self, method):
# issue #17160
index = Index(data=[0, 1, 2, 3, 4, 5])
data = [['one', 'A', 1, 'x'],
['one', 'B', 2, 'y'],
['one', 'C', 3, 'z'],
['two', 'A', 4, 'q'],
['two', 'B', 5, 'w'],
['two', 'C', 6, 't']]
columns = MultiIndex(levels=[['bar', 'baz'], ['first', 'second']],
codes=[[0, 0, 1, 1], [0, 1, 0, 1]])
df = DataFrame(data=data, index=index, columns=columns, dtype='object')
if method:
result = df.pivot(index=('bar', 'first'),
columns=('bar', 'second'),
values=('baz', 'first'))
else:
result = pd.pivot(df,
index=('bar', 'first'),
columns=('bar', 'second'),
values=('baz', 'first'))
data = {'A': Series([1, 4], index=['one', 'two']),
'B': Series([2, 5], index=['one', 'two']),
'C': Series([3, 6], index=['one', 'two'])}
expected = DataFrame(data)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize('method', [True, False])
def test_pivot_with_tuple_of_values(self, method):
# issue #17160
df = pd.DataFrame({'foo': ['one', 'one', 'one', 'two', 'two', 'two'],
'bar': ['A', 'B', 'C', 'A', 'B', 'C'],
'baz': [1, 2, 3, 4, 5, 6],
'zoo': ['x', 'y', 'z', 'q', 'w', 't']})
with pytest.raises(KeyError):
# tuple is seen as a single column name
if method:
df.pivot(index='zoo', columns='foo', values=('bar', 'baz'))
else:
pd.pivot(df, index='zoo', columns='foo', values=('bar', 'baz'))
def test_margins(self):
def _check_output(result, values_col, index=['A', 'B'],
columns=['C'],
margins_col='All'):
col_margins = result.loc[result.index[:-1], margins_col]
expected_col_margins = self.data.groupby(index)[values_col].mean()
tm.assert_series_equal(col_margins, expected_col_margins,
check_names=False)
assert col_margins.name == margins_col
result = result.sort_index()
index_margins = result.loc[(margins_col, '')].iloc[:-1]
expected_ix_margins = self.data.groupby(columns)[values_col].mean()
tm.assert_series_equal(index_margins, expected_ix_margins,
check_names=False)
assert index_margins.name == (margins_col, '')
grand_total_margins = result.loc[(margins_col, ''), margins_col]
expected_total_margins = self.data[values_col].mean()
assert grand_total_margins == expected_total_margins
# column specified
result = self.data.pivot_table(values='D', index=['A', 'B'],
columns='C',
margins=True, aggfunc=np.mean)
_check_output(result, 'D')
# Set a different margins_name (not 'All')
result = self.data.pivot_table(values='D', index=['A', 'B'],
columns='C',
margins=True, aggfunc=np.mean,
margins_name='Totals')
_check_output(result, 'D', margins_col='Totals')
# no column specified
table = self.data.pivot_table(index=['A', 'B'], columns='C',
margins=True, aggfunc=np.mean)
for value_col in table.columns.levels[0]:
_check_output(table[value_col], value_col)
# no col
# to help with a buglet
self.data.columns = [k * 2 for k in self.data.columns]
table = self.data.pivot_table(index=['AA', 'BB'], margins=True,
aggfunc=np.mean)
for value_col in table.columns:
totals = table.loc[('All', ''), value_col]
assert totals == self.data[value_col].mean()
# no rows
rtable = self.data.pivot_table(columns=['AA', 'BB'], margins=True,
aggfunc=np.mean)
assert isinstance(rtable, Series)
table = self.data.pivot_table(index=['AA', 'BB'], margins=True,
aggfunc='mean')
for item in ['DD', 'EE', 'FF']:
totals = table.loc[('All', ''), item]
assert totals == self.data[item].mean()
def test_margins_dtype(self):
# GH 17013
df = self.data.copy()
df[['D', 'E', 'F']] = np.arange(len(df) * 3).reshape(len(df), 3)
mi_val = list(product(['bar', 'foo'], ['one', 'two'])) + [('All', '')]
mi = MultiIndex.from_tuples(mi_val, names=('A', 'B'))
expected = DataFrame({'dull': [12, 21, 3, 9, 45],
'shiny': [33, 0, 36, 51, 120]},
index=mi).rename_axis('C', axis=1)
expected['All'] = expected['dull'] + expected['shiny']
result = df.pivot_table(values='D', index=['A', 'B'],
columns='C', margins=True,
aggfunc=np.sum, fill_value=0)
tm.assert_frame_equal(expected, result)
@pytest.mark.xfail(reason='GH#17035 (len of floats is casted back to '
'floats)')
def test_margins_dtype_len(self):
mi_val = list(product(['bar', 'foo'], ['one', 'two'])) + [('All', '')]
mi = MultiIndex.from_tuples(mi_val, names=('A', 'B'))
expected = DataFrame({'dull': [1, 1, 2, 1, 5],
'shiny': [2, 0, 2, 2, 6]},
index=mi).rename_axis('C', axis=1)
expected['All'] = expected['dull'] + expected['shiny']
result = self.data.pivot_table(values='D', index=['A', 'B'],
columns='C', margins=True,
aggfunc=len, fill_value=0)
tm.assert_frame_equal(expected, result)
def test_pivot_integer_columns(self):
# caused by upstream bug in unstack
d = date.min
data = list(product(['foo', 'bar'], ['A', 'B', 'C'], ['x1', 'x2'],
[d + timedelta(i)
for i in range(20)], [1.0]))
df = DataFrame(data)
table = df.pivot_table(values=4, index=[0, 1, 3], columns=[2])
df2 = df.rename(columns=str)
table2 = df2.pivot_table(
values='4', index=['0', '1', '3'], columns=['2'])
tm.assert_frame_equal(table, table2, check_names=False)
def test_pivot_no_level_overlap(self):
# GH #1181
data = DataFrame({'a': ['a', 'a', 'a', 'a', 'b', 'b', 'b', 'b'] * 2,
'b': [0, 0, 0, 0, 1, 1, 1, 1] * 2,
'c': (['foo'] * 4 + ['bar'] * 4) * 2,
'value': np.random.randn(16)})
table = data.pivot_table('value', index='a', columns=['b', 'c'])
grouped = data.groupby(['a', 'b', 'c'])['value'].mean()
expected = grouped.unstack('b').unstack('c').dropna(axis=1, how='all')
tm.assert_frame_equal(table, expected)
def test_pivot_columns_lexsorted(self):
n = 10000
dtype = np.dtype([
("Index", object),
("Symbol", object),
("Year", int),
("Month", int),
("Day", int),
("Quantity", int),
("Price", float),
])
products = np.array([
('SP500', 'ADBE'),
('SP500', 'NVDA'),
('SP500', 'ORCL'),
('NDQ100', 'AAPL'),
('NDQ100', 'MSFT'),
('NDQ100', 'GOOG'),
('FTSE', 'DGE.L'),
('FTSE', 'TSCO.L'),
('FTSE', 'GSK.L'),
], dtype=[('Index', object), ('Symbol', object)])
items = np.empty(n, dtype=dtype)
iproduct = np.random.randint(0, len(products), n)
items['Index'] = products['Index'][iproduct]
items['Symbol'] = products['Symbol'][iproduct]
dr = pd.date_range(date(2000, 1, 1),
date(2010, 12, 31))
dates = dr[np.random.randint(0, len(dr), n)]
items['Year'] = dates.year
items['Month'] = dates.month
items['Day'] = dates.day
items['Price'] = np.random.lognormal(4.0, 2.0, n)
df = DataFrame(items)
pivoted = df.pivot_table('Price', index=['Month', 'Day'],
columns=['Index', 'Symbol', 'Year'],
aggfunc='mean')
assert pivoted.columns.is_monotonic
def test_pivot_complex_aggfunc(self):
f = OrderedDict([('D', ['std']), ('E', ['sum'])])
expected = self.data.groupby(['A', 'B']).agg(f).unstack('B')
result = self.data.pivot_table(index='A', columns='B', aggfunc=f)
tm.assert_frame_equal(result, expected)
def test_margins_no_values_no_cols(self):
# Regression test on pivot table: no values or cols passed.
result = self.data[['A', 'B']].pivot_table(
index=['A', 'B'], aggfunc=len, margins=True)
result_list = result.tolist()
assert sum(result_list[:-1]) == result_list[-1]
def test_margins_no_values_two_rows(self):
# Regression test on pivot table: no values passed but rows are a
# multi-index
result = self.data[['A', 'B', 'C']].pivot_table(
index=['A', 'B'], columns='C', aggfunc=len, margins=True)
assert result.All.tolist() == [3.0, 1.0, 4.0, 3.0, 11.0]
def test_margins_no_values_one_row_one_col(self):
# Regression test on pivot table: no values passed but row and col
# defined
result = self.data[['A', 'B']].pivot_table(
index='A', columns='B', aggfunc=len, margins=True)
assert result.All.tolist() == [4.0, 7.0, 11.0]
def test_margins_no_values_two_row_two_cols(self):
# Regression test on pivot table: no values passed but rows and cols
# are multi-indexed
self.data['D'] = ['a', 'b', 'c', 'd',
'e', 'f', 'g', 'h', 'i', 'j', 'k']
result = self.data[['A', 'B', 'C', 'D']].pivot_table(
index=['A', 'B'], columns=['C', 'D'], aggfunc=len, margins=True)
assert result.All.tolist() == [3.0, 1.0, 4.0, 3.0, 11.0]
def test_pivot_table_with_margins_set_margin_name(self):
# see gh-3335
for margin_name in ['foo', 'one', 666, None, ['a', 'b']]:
with pytest.raises(ValueError):
# multi-index index
pivot_table(self.data, values='D', index=['A', 'B'],
columns=['C'], margins=True,
margins_name=margin_name)
with pytest.raises(ValueError):
# multi-index column
pivot_table(self.data, values='D', index=['C'],
columns=['A', 'B'], margins=True,
margins_name=margin_name)
with pytest.raises(ValueError):
# non-multi-index index/column
pivot_table(self.data, values='D', index=['A'],
columns=['B'], margins=True,
margins_name=margin_name)
def test_pivot_timegrouper(self):
df = DataFrame({
'Branch': 'A A A A A A A B'.split(),
'Buyer': '<NAME> <NAME>'.split(),
'Quantity': [1, 3, 5, 1, 8, 1, 9, 3],
'Date': [datetime(2013, 1, 1),
datetime(2013, 1, 1),
datetime(2013, 10, 1),
datetime(2013, 10, 2),
datetime(2013, 10, 1),
datetime(2013, 10, 2),
datetime(2013, 12, 2),
datetime(2013, 12, 2), ]}).set_index('Date')
expected = DataFrame(np.array([10, 18, 3], dtype='int64')
.reshape(1, 3),
index=[datetime(2013, 12, 31)],
columns='<NAME>'.split())
expected.index.name = 'Date'
expected.columns.name = 'Buyer'
result = pivot_table(df, index=Grouper(freq='A'), columns='Buyer',
values='Quantity', aggfunc=np.sum)
tm.assert_frame_equal(result, expected)
result = pivot_table(df, index='Buyer', columns=Grouper(freq='A'),
values='Quantity', aggfunc=np.sum)
tm.assert_frame_equal(result, expected.T)
expected = DataFrame(np.array([1, np.nan, 3, 9, 18, np.nan])
.reshape(2, 3),
index=[datetime(2013, 1, 1),
datetime(2013, 7, 1)],
columns='<NAME>'.split())
expected.index.name = 'Date'
expected.columns.name = 'Buyer'
result = pivot_table(df, index=Grouper(freq='6MS'), columns='Buyer',
values='Quantity', aggfunc=np.sum)
tm.assert_frame_equal(result, expected)
result = pivot_table(df, index='Buyer', columns=Grouper(freq='6MS'),
values='Quantity', aggfunc=np.sum)
tm.assert_frame_equal(result, expected.T)
# passing the name
df = df.reset_index()
result = pivot_table(df, index=Grouper(freq='6MS', key='Date'),
columns='Buyer',
values='Quantity', aggfunc=np.sum)
tm.assert_frame_equal(result, expected)
result = pivot_table(df, index='Buyer',
columns=Grouper(freq='6MS', key='Date'),
values='Quantity', aggfunc=np.sum)
tm.assert_frame_equal(result, expected.T)
pytest.raises(KeyError, lambda: pivot_table(
df, index=Grouper(freq='6MS', key='foo'),
columns='Buyer', values='Quantity', aggfunc=np.sum))
pytest.raises(KeyError, lambda: pivot_table(
df, index='Buyer',
columns=Grouper(freq='6MS', key='foo'),
values='Quantity', aggfunc=np.sum))
# passing the level
df = df.set_index('Date')
result = pivot_table(df, index=Grouper(freq='6MS', level='Date'),
columns='Buyer', values='Quantity',
aggfunc=np.sum)
tm.assert_frame_equal(result, expected)
result = pivot_table(df, index='Buyer',
columns=Grouper(freq='6MS', level='Date'),
values='Quantity', aggfunc=np.sum)
tm.assert_frame_equal(result, expected.T)
pytest.raises(ValueError, lambda: pivot_table(
df, index=Grouper(freq='6MS', level='foo'),
columns='Buyer', values='Quantity', aggfunc=np.sum))
pytest.raises(ValueError, lambda: pivot_table(
df, index='Buyer',
columns=Grouper(freq='6MS', level='foo'),
values='Quantity', aggfunc=np.sum))
# double grouper
df = DataFrame({
'Branch': 'A A A A A A A B'.split(),
'Buyer': '<NAME>'.split(),
'Quantity': [1, 3, 5, 1, 8, 1, 9, 3],
'Date': [datetime(2013, 11, 1, 13, 0), datetime(2013, 9, 1, 13, 5),
datetime(2013, 10, 1, 20, 0),
datetime(2013, 10, 2, 10, 0),
datetime(2013, 11, 1, 20, 0),
datetime(2013, 10, 2, 10, 0),
datetime(2013, 10, 2, 12, 0),
datetime(2013, 12, 5, 14, 0)],
'PayDay': [datetime(2013, 10, 4, 0, 0),
datetime(2013, 10, 15, 13, 5),
datetime(2013, 9, 5, 20, 0),
datetime(2013, 11, 2, 10, 0),
datetime(2013, 10, 7, 20, 0),
datetime(2013, 9, 5, 10, 0),
datetime(2013, 12, 30, 12, 0),
datetime(2013, 11, 20, 14, 0), ]})
result = pivot_table(df, index=Grouper(freq='M', key='Date'),
columns=Grouper(freq='M', key='PayDay'),
values='Quantity', aggfunc=np.sum)
expected = DataFrame(np.array([np.nan, 3, np.nan, np.nan,
6, np.nan, 1, 9,
np.nan, 9, np.nan, np.nan, np.nan,
np.nan, 3, np.nan]).reshape(4, 4),
index=[datetime(2013, 9, 30),
datetime(2013, 10, 31),
datetime(2013, 11, 30),
datetime(2013, 12, 31)],
columns=[datetime(2013, 9, 30),
datetime(2013, 10, 31),
datetime(2013, 11, 30),
datetime(2013, 12, 31)])
expected.index.name = 'Date'
expected.columns.name = 'PayDay'
tm.assert_frame_equal(result, expected)
result = pivot_table(df, index=Grouper(freq='M', key='PayDay'),
columns=Grouper(freq='M', key='Date'),
values='Quantity', aggfunc=np.sum)
tm.assert_frame_equal(result, expected.T)
tuples = [(datetime(2013, 9, 30), datetime(2013, 10, 31)),
(datetime(2013, 10, 31),
datetime(2013, 9, 30)),
(datetime(2013, 10, 31),
datetime(2013, 11, 30)),
(datetime(2013, 10, 31),
datetime(2013, 12, 31)),
(datetime(2013, 11, 30),
datetime(2013, 10, 31)),
(datetime(2013, 12, 31), datetime(2013, 11, 30)), ]
idx = MultiIndex.from_tuples(tuples, names=['Date', 'PayDay'])
expected = DataFrame(np.array([3, np.nan, 6, np.nan, 1, np.nan,
9, np.nan, 9, np.nan,
np.nan, 3]).reshape(6, 2),
index=idx, columns=['A', 'B'])
expected.columns.name = 'Branch'
result = pivot_table(
df, index=[Grouper(freq='M', key='Date'),
Grouper(freq='M', key='PayDay')], columns=['Branch'],
values='Quantity', aggfunc=np.sum)
tm.assert_frame_equal(result, expected)
result = pivot_table(df, index=['Branch'],
columns=[Grouper(freq='M', key='Date'),
Grouper(freq='M', key='PayDay')],
values='Quantity', aggfunc=np.sum)
tm.assert_frame_equal(result, expected.T)
def test_pivot_datetime_tz(self):
dates1 = ['2011-07-19 07:00:00', '2011-07-19 08:00:00',
'2011-07-19 09:00:00',
'2011-07-19 07:00:00', '2011-07-19 08:00:00',
'2011-07-19 09:00:00']
dates2 = ['2013-01-01 15:00:00', '2013-01-01 15:00:00',
'2013-01-01 15:00:00',
'2013-02-01 15:00:00', '2013-02-01 15:00:00',
'2013-02-01 15:00:00']
df = DataFrame({'label': ['a', 'a', 'a', 'b', 'b', 'b'],
'dt1': dates1, 'dt2': dates2,
'value1': np.arange(6, dtype='int64'),
'value2': [1, 2] * 3})
df['dt1'] = df['dt1'].apply(lambda d: pd.Timestamp(d, tz='US/Pacific'))
df['dt2'] = df['dt2'].apply(lambda d: pd.Timestamp(d, tz='Asia/Tokyo'))
exp_idx = pd.DatetimeIndex(['2011-07-19 07:00:00',
'2011-07-19 08:00:00',
'2011-07-19 09:00:00'],
tz='US/Pacific', name='dt1')
exp_col1 = Index(['value1', 'value1'])
exp_col2 = Index(['a', 'b'], name='label')
exp_col = MultiIndex.from_arrays([exp_col1, exp_col2])
expected = DataFrame([[0, 3], [1, 4], [2, 5]],
index=exp_idx, columns=exp_col)
result = pivot_table(df, index=['dt1'], columns=[
'label'], values=['value1'])
tm.assert_frame_equal(result, expected)
exp_col1 = Index(['sum', 'sum', 'sum', 'sum',
'mean', 'mean', 'mean', 'mean'])
exp_col2 = Index(['value1', 'value1', 'value2', 'value2'] * 2)
exp_col3 = pd.DatetimeIndex(['2013-01-01 15:00:00',
'2013-02-01 15:00:00'] * 4,
tz='Asia/Tokyo', name='dt2')
exp_col = MultiIndex.from_arrays([exp_col1, exp_col2, exp_col3])
expected = DataFrame(np.array([[0, 3, 1, 2, 0, 3, 1, 2],
[1, 4, 2, 1, 1, 4, 2, 1],
[2, 5, 1, 2, 2, 5, 1, 2]],
dtype='int64'),
index=exp_idx,
columns=exp_col)
result = pivot_table(df, index=['dt1'], columns=['dt2'],
values=['value1', 'value2'],
aggfunc=[np.sum, np.mean])
tm.assert_frame_equal(result, expected)
def test_pivot_dtaccessor(self):
# GH 8103
dates1 = ['2011-07-19 07:00:00', '2011-07-19 08:00:00',
'2011-07-19 09:00:00',
'2011-07-19 07:00:00', '2011-07-19 08:00:00',
'2011-07-19 09:00:00']
dates2 = ['2013-01-01 15:00:00', '2013-01-01 15:00:00',
'2013-01-01 15:00:00',
'2013-02-01 15:00:00', '2013-02-01 15:00:00',
'2013-02-01 15:00:00']
df = DataFrame({'label': ['a', 'a', 'a', 'b', 'b', 'b'],
'dt1': dates1, 'dt2': dates2,
'value1': np.arange(6, dtype='int64'),
'value2': [1, 2] * 3})
df['dt1'] = df['dt1'].apply(lambda d: pd.Timestamp(d))
df['dt2'] = df['dt2'].apply(lambda d: pd.Timestamp(d))
result = pivot_table(df, index='label', columns=df['dt1'].dt.hour,
values='value1')
exp_idx = Index(['a', 'b'], name='label')
expected = DataFrame({7: [0, 3], 8: [1, 4], 9: [2, 5]},
index=exp_idx,
columns=Index([7, 8, 9], name='dt1'))
tm.assert_frame_equal(result, expected)
result = pivot_table(df, index=df['dt2'].dt.month,
columns=df['dt1'].dt.hour,
values='value1')
expected = DataFrame({7: [0, 3], 8: [1, 4], 9: [2, 5]},
index=Index([1, 2], name='dt2'),
columns=Index([7, 8, 9], name='dt1'))
tm.assert_frame_equal(result, expected)
result = pivot_table(df, index=df['dt2'].dt.year.values,
columns=[df['dt1'].dt.hour, df['dt2'].dt.month],
values='value1')
exp_col = MultiIndex.from_arrays(
[[7, 7, 8, 8, 9, 9], [1, 2] * 3], names=['dt1', 'dt2'])
expected = DataFrame(np.array([[0, 3, 1, 4, 2, 5]], dtype='int64'),
index=[2013], columns=exp_col)
tm.assert_frame_equal(result, expected)
result = pivot_table(df, index=np.array(['X', 'X', 'X',
'X', 'Y', 'Y']),
columns=[df['dt1'].dt.hour, df['dt2'].dt.month],
values='value1')
expected = DataFrame(np.array([[0, 3, 1, np.nan, 2, np.nan],
[np.nan, np.nan, np.nan,
4, np.nan, 5]]),
index=['X', 'Y'], columns=exp_col)
tm.assert_frame_equal(result, expected)
def test_daily(self):
rng = date_range('1/1/2000', '12/31/2004', freq='D')
ts = Series(np.random.randn(len(rng)), index=rng)
annual = pivot_table(DataFrame(ts), index=ts.index.year,
columns=ts.index.dayofyear)
annual.columns = annual.columns.droplevel(0)
doy = np.asarray(ts.index.dayofyear)
for i in range(1, 367):
subset = ts[doy == i]
subset.index = subset.index.year
result = annual[i].dropna()
tm.assert_series_equal(result, subset, check_names=False)
assert result.name == i
def test_monthly(self):
rng = date_range('1/1/2000', '12/31/2004', freq='M')
ts = Series(np.random.randn(len(rng)), index=rng)
annual = pivot_table(pd.DataFrame(ts), index=ts.index.year,
columns=ts.index.month)
annual.columns = annual.columns.droplevel(0)
month = ts.index.month
for i in range(1, 13):
subset = ts[month == i]
subset.index = subset.index.year
result = annual[i].dropna()
tm.assert_series_equal(result, subset, check_names=False)
assert result.name == i
def test_pivot_table_with_iterator_values(self):
# GH 12017
aggs = {'D': 'sum', 'E': 'mean'}
pivot_values_list = pd.pivot_table(
self.data, index=['A'], values=list(aggs.keys()), aggfunc=aggs,
)
pivot_values_keys = pd.pivot_table(
self.data, index=['A'], values=aggs.keys(), aggfunc=aggs,
)
tm.assert_frame_equal(pivot_values_keys, pivot_values_list)
agg_values_gen = (value for value in aggs.keys())
pivot_values_gen = pd.pivot_table(
self.data, index=['A'], values=agg_values_gen, aggfunc=aggs,
)
tm.assert_frame_equal(pivot_values_gen, pivot_values_list)
def test_pivot_table_margins_name_with_aggfunc_list(self):
# GH 13354
margins_name = 'Weekly'
costs = pd.DataFrame(
{'item': ['bacon', 'cheese', 'bacon', 'cheese'],
'cost': [2.5, 4.5, 3.2, 3.3],
'day': ['M', 'M', 'T', 'T']}
)
table = costs.pivot_table(
index="item", columns="day", margins=True,
margins_name=margins_name, aggfunc=[np.mean, max]
)
ix = pd.Index(
['bacon', 'cheese', margins_name], dtype='object', name='item'
)
tups = [('mean', 'cost', 'M'), ('mean', 'cost', 'T'),
('mean', 'cost', margins_name), ('max', 'cost', 'M'),
('max', 'cost', 'T'), ('max', 'cost', margins_name)]
cols = pd.MultiIndex.from_tuples(tups, names=[None, None, 'day'])
expected = pd.DataFrame(table.values, index=ix, columns=cols)
tm.assert_frame_equal(table, expected)
@pytest.mark.xfail(reason='GH#17035 (np.mean of ints is casted back to '
'ints)')
def test_categorical_margins(self, observed):
# GH 10989
df = pd.DataFrame({'x': np.arange(8),
'y': np.arange(8) // 4,
'z': np.arange(8) % 2})
expected = pd.DataFrame([[1.0, 2.0, 1.5], [5, 6, 5.5], [3, 4, 3.5]])
expected.index = Index([0, 1, 'All'], name='y')
expected.columns = Index([0, 1, 'All'], name='z')
table = df.pivot_table('x', 'y', 'z', dropna=observed, margins=True)
tm.assert_frame_equal(table, expected)
@pytest.mark.xfail(reason='GH#17035 (np.mean of ints is casted back to '
'ints)')
def test_categorical_margins_category(self, observed):
df = pd.DataFrame({'x': np.arange(8),
'y': np.arange(8) // 4,
'z': np.arange(8) % 2})
expected = pd.DataFrame([[1.0, 2.0, 1.5], [5, 6, 5.5], [3, 4, 3.5]])
expected.index = Index([0, 1, 'All'], name='y')
expected.columns = Index([0, 1, 'All'], name='z')
df.y = df.y.astype('category')
df.z = df.z.astype('category')
table = df.pivot_table('x', 'y', 'z', dropna=observed, margins=True)
tm.assert_frame_equal(table, expected)
def test_categorical_aggfunc(self, observed):
# GH 9534
df = pd.DataFrame({"C1": ["A", "B", "C", "C"],
"C2": ["a", "a", "b", "b"],
"V": [1, 2, 3, 4]})
df["C1"] = df["C1"].astype("category")
result = df.pivot_table("V", index="C1", columns="C2",
dropna=observed, aggfunc="count")
expected_index = pd.CategoricalIndex(['A', 'B', 'C'],
categories=['A', 'B', 'C'],
ordered=False,
name='C1')
expected_columns = pd.Index(['a', 'b'], name='C2')
expected_data = np.array([[1., np.nan],
[1., np.nan],
[np.nan, 2.]])
expected = pd.DataFrame(expected_data,
index=expected_index,
columns=expected_columns)
tm.assert_frame_equal(result, expected)
def test_categorical_pivot_index_ordering(self, observed):
# GH 8731
df = pd.DataFrame({'Sales': [100, 120, 220],
'Month': ['January', 'January', 'January'],
'Year': [2013, 2014, 2013]})
months = ['January', 'February', 'March', 'April', 'May', 'June',
'July', 'August', 'September', 'October', 'November',
'December']
df['Month'] = df['Month'].astype('category').cat.set_categories(months)
result = df.pivot_table(values='Sales',
index='Month',
columns='Year',
dropna=observed,
aggfunc='sum')
expected_columns = pd.Int64Index([2013, 2014], name='Year')
expected_index = pd.CategoricalIndex(['January'],
categories=months,
ordered=False,
name='Month')
expected = pd.DataFrame([[320, 120]],
index=expected_index,
columns=expected_columns)
if not observed:
result = result.dropna().astype(np.int64)
tm.assert_frame_equal(result, expected)
def test_pivot_table_not_series(self):
# GH 4386
# pivot_table always returns a DataFrame
# when values is not list like and columns is None
# and aggfunc is not instance of list
df = DataFrame({'col1': [3, 4, 5],
'col2': ['C', 'D', 'E'],
'col3': [1, 3, 9]})
result = df.pivot_table('col1', index=['col3', 'col2'], aggfunc=np.sum)
m = MultiIndex.from_arrays([[1, 3, 9],
['C', 'D', 'E']],
names=['col3', 'col2'])
expected = DataFrame([3, 4, 5],
index=m, columns=['col1'])
tm.assert_frame_equal(result, expected)
result = df.pivot_table(
'col1', index='col3', columns='col2', aggfunc=np.sum
)
expected = DataFrame([[3, np.NaN, np.NaN],
[np.NaN, 4, np.NaN],
[np.NaN, np.NaN, 5]],
index=Index([1, 3, 9], name='col3'),
columns=Index(['C', 'D', 'E'], name='col2'))
tm.assert_frame_equal(result, expected)
result = df.pivot_table('col1', index='col3', aggfunc=[np.sum])
m = MultiIndex.from_arrays([['sum'],
['col1']])
expected = DataFrame([3, 4, 5],
index=Index([1, 3, 9], name='col3'),
columns=m)
tm.assert_frame_equal(result, expected)
def test_pivot_margins_name_unicode(self):
# issue #13292
greek = u'\u0394\u03bf\u03ba\u03b9\u03bc\u03ae'
frame = pd.DataFrame({'foo': [1, 2, 3]})
table = pd.pivot_table(frame, index=['foo'], aggfunc=len, margins=True,
margins_name=greek)
index = pd.Index([1, 2, 3, greek], dtype='object', name='foo')
expected = pd.DataFrame(index=index)
tm.assert_frame_equal(table, expected)
def test_pivot_string_as_func(self):
# GH #18713
# for correctness purposes
data = DataFrame({'A': ['foo', 'foo', 'foo', 'foo', 'bar', 'bar',
'bar', 'bar', 'foo', 'foo', 'foo'],
'B': ['one', 'one', 'one', 'two', 'one', 'one',
'one', 'two', 'two', 'two', 'one'],
'C': range(11)})
result = pivot_table(data, index='A', columns='B', aggfunc='sum')
mi = MultiIndex(levels=[['C'], ['one', 'two']],
codes=[[0, 0], [0, 1]], names=[None, 'B'])
expected = DataFrame({('C', 'one'): {'bar': 15, 'foo': 13},
('C', 'two'): {'bar': 7, 'foo': 20}},
columns=mi).rename_axis('A')
tm.assert_frame_equal(result, expected)
result = pivot_table(data, index='A', columns='B',
aggfunc=['sum', 'mean'])
mi = MultiIndex(levels=[['sum', 'mean'], ['C'], ['one', 'two']],
codes=[[0, 0, 1, 1], [0, 0, 0, 0], [0, 1, 0, 1]],
names=[None, None, 'B'])
expected = DataFrame({('mean', 'C', 'one'): {'bar': 5.0, 'foo': 3.25},
('mean', 'C', 'two'): {'bar': 7.0,
'foo': 6.666666666666667},
('sum', 'C', 'one'): {'bar': 15, 'foo': 13},
('sum', 'C', 'two'): {'bar': 7, 'foo': 20}},
columns=mi).rename_axis('A')
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize('f, f_numpy',
[('sum', np.sum),
('mean', np.mean),
('std', np.std),
(['sum', 'mean'], [np.sum, np.mean]),
(['sum', 'std'], [np.sum, np.std]),
(['std', 'mean'], [np.std, np.mean])])
def test_pivot_string_func_vs_func(self, f, f_numpy):
# GH #18713
# for consistency purposes
result = pivot_table(self.data, index='A', columns='B', aggfunc=f)
expected = pivot_table(self.data, index='A', columns='B',
aggfunc=f_numpy)
tm.assert_frame_equal(result, expected)
@pytest.mark.slow
def test_pivot_number_of_levels_larger_than_int32(self):
# GH 20601
df = DataFrame({'ind1': np.arange(2 ** 16),
'ind2': np.arange(2 ** 16),
'count': 0})
with pytest.raises(ValueError, match='int32 overflow'):
df.pivot_table(index='ind1', columns='ind2',
values='count', aggfunc='count')
class TestCrosstab(object):
def setup_method(self, method):
df = DataFrame({'A': ['foo', 'foo', 'foo', 'foo',
'bar', 'bar', 'bar', 'bar',
'foo', 'foo', 'foo'],
'B': ['one', 'one', 'one', 'two',
'one', 'one', 'one', 'two',
'two', 'two', 'one'],
'C': ['dull', 'dull', 'shiny', 'dull',
'dull', 'shiny', 'shiny', 'dull',
'shiny', 'shiny', 'shiny'],
'D': np.random.randn(11),
'E': np.random.randn(11),
'F': np.random.randn(11)})
self.df = df.append(df, ignore_index=True)
def test_crosstab_single(self):
df = self.df
result = crosstab(df['A'], df['C'])
expected = df.groupby(['A', 'C']).size().unstack()
tm.assert_frame_equal(result, expected.fillna(0).astype(np.int64))
def test_crosstab_multiple(self):
df = self.df
result = crosstab(df['A'], [df['B'], df['C']])
expected = df.groupby(['A', 'B', 'C']).size()
expected = expected.unstack(
'B').unstack('C').fillna(0).astype(np.int64)
tm.assert_frame_equal(result, expected)
result = crosstab([df['B'], df['C']], df['A'])
expected = df.groupby(['B', 'C', 'A']).size()
expected = expected.unstack('A').fillna(0).astype(np.int64)
tm.assert_frame_equal(result, expected)
def test_crosstab_ndarray(self):
a = np.random.randint(0, 5, size=100)
b = np.random.randint(0, 3, size=100)
c = np.random.randint(0, 10, size=100)
df = DataFrame({'a': a, 'b': b, 'c': c})
result = crosstab(a, [b, c], rownames=['a'], colnames=('b', 'c'))
expected = crosstab(df['a'], [df['b'], df['c']])
tm.assert_frame_equal(result, expected)
result = crosstab([b, c], a, colnames=['a'], rownames=('b', 'c'))
expected = crosstab([df['b'], df['c']], df['a'])
| tm.assert_frame_equal(result, expected) | pandas.util.testing.assert_frame_equal |
import argparse
from augur.utils import write_json, read_node_data
import Bio.SeqIO
from collections import OrderedDict
import hdbscan
import matplotlib.pyplot as plt
import math
import matplotlib as mpl
import numpy as np
import pandas as pd
import re
from scipy.spatial.distance import pdist
import seaborn as sns
from sklearn.manifold import TSNE, MDS
from sklearn.decomposition import PCA
from sklearn.metrics import confusion_matrix, matthews_corrcoef
from sklearn.model_selection import RepeatedKFold
import sys
from umap import UMAP
from Helpers import get_euclidean_data_frame, get_PCA_feature_matrix
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--distance-matrix", help="csv file with the distance matrix")
parser.add_argument("--alignment", help="FASTA file with the alignment")
parser.add_argument("--clades", help="json file containing information about clade membership")
parser.add_argument("--column-metadata", default="clade_membership", help="the column which contains the clade information")
parser.add_argument("--threshold-information", nargs="+", help="the distance threshold values to be used on HDBSCAN. if not provided, it will run without.")
parser.add_argument("--output", help="the csv path where the best label of the data and strain name per method will be saved.")
parser.add_argument("--output-full", help="the csv path where the full list of accuracy data per threshold will be saved")
parser.add_argument("--output-figure", help="PNG with the MCC values displayed graphically per method")
args = parser.parse_args()
if args.output is None and args.output_cluster_labels is not None:
print("You must select output in order to get cluster labels", file=sys.stderr)
sys.exit(1)
def _get_embedding_columns_by_method(method):
if method in ("pca"):
return list(f"{method}1 {method}2 {method}3 {method}4 {method}5 {method}6 {method}7 {method}8 {method}9 {method}10".split())
if method in ("mds"):
return list(f"{method}1 {method}2".split())
if method in ("t-sne"):
return list("tsne_x tsne_y".split())
else:
return list(f"{method}_x {method}_y".split())
if(args.threshold_information is not None):
#threshold_df = pd.read_csv(args.threshold_information) threshold_df.loc[threshold_df['embedding'] == args.method][args.column_threshold].values.tolist()[0]
distance_thresholds = args.threshold_information
else:
distance_thresholds = np.arange(0,20,2)
default_tuned_values = []
list_of_embedding_strings = ["t-sne", "umap", "mds", "pca"] #["t-SNE","UMAP","MDS", "PCA"]
embedding_class = [TSNE, UMAP, MDS, PCA]
# reading in the distance matrix and node data
distance_matrix = pd.read_csv(args.distance_matrix, index_col=0)
sequence_names = distance_matrix.index.values.tolist()
# parameters for the methods taken from the exhaustive grid search
embedding_parameters = {
"metric": "precomputed",
"perplexity": 30,
"learning_rate": 500.0,
"square_distances" : True
}
default_tuned_values.append(embedding_parameters)
embedding_parameters = {
"init": "spectral",
'n_neighbors': 25,
"min_dist": 0.05
}
default_tuned_values.append(embedding_parameters)
embedding_parameters = {
"dissimilarity": "precomputed",
"n_components" : 2,
"n_jobs": 1,
"n_init": 2,
}
default_tuned_values.append(embedding_parameters)
embedding_parameters = {
"n_components" : 10,
"svd_solver" : "full"
}
default_tuned_values.append(embedding_parameters)
# creating dataframe of clade information
node_data = read_node_data(args.clades)
clade_annotations = pd.DataFrame([
{"strain": sequence_name, "clade_membership": node_data["nodes"][sequence_name][args.column_metadata]}
for sequence_name in sequence_names if sequence_name in node_data["nodes"]
])
strains_df = pd.DataFrame(distance_matrix.index.values.tolist(), columns=["strain"])
clade_annotations = clade_annotations.merge(strains_df, on="strain")
distance_matrix.columns = distance_matrix.index
indices_to_drop = distance_matrix[~distance_matrix.index.isin(clade_annotations["strain"])]
distance_matrix = distance_matrix[distance_matrix.index.isin(clade_annotations["strain"])].dropna(how = 'all')
distance_matrix = distance_matrix.drop(indices_to_drop.index, axis=1)
distance_matrix = distance_matrix.to_numpy()
numbers = get_PCA_feature_matrix(args.alignment, sequence_names)
# k fold analysis with thresholds
random_state = 12883823
rkf = RepeatedKFold(n_splits=2, n_repeats=3, random_state=random_state)
k = 0
total_list_methods = []
for training_index, validation_index in rkf.split(clade_annotations["strain"].values.tolist()):
print(len(training_index))
i = 0
print("here " + str(k))
for embed in default_tuned_values:
print(i)
for distance_threshold in distance_thresholds:
if(list_of_embedding_strings[i] == "pca"):
#performing PCA on my pandas dataframe
numbers_subset = numbers[training_index]
pca = PCA(**embed) #can specify n, since with no prior knowledge, I use None
training_embedding = pca.fit_transform(numbers_subset)
else:
# Subset distance matrix to training indices.
training_distance_matrix = distance_matrix[training_index][:, training_index]
# Embed training distance matrix.
embedder = embedding_class[i](**embed)
training_embedding = embedder.fit_transform(training_distance_matrix)
list_columns_val = _get_embedding_columns_by_method(list_of_embedding_strings[i])
val_df = pd.DataFrame(training_embedding, columns=list_columns_val)
val_df[["strain", "clade_membership"]] = pd.DataFrame(clade_annotations[["strain", "clade_membership"]].values[training_index].tolist())
KDE_df_normal = get_euclidean_data_frame(sampled_df=val_df, column_for_analysis="clade_membership", embedding="method", column_list=_get_embedding_columns_by_method(list_of_embedding_strings[i]))
distance_threshold = float(distance_threshold)
clusterer = hdbscan.HDBSCAN(cluster_selection_epsilon=distance_threshold)
clusterer.fit(val_df[_get_embedding_columns_by_method(list_of_embedding_strings[i])])
val_df[f"{list_of_embedding_strings[i]}_label_{k}"] = clusterer.labels_.astype(str)
list_columns = _get_embedding_columns_by_method(list_of_embedding_strings[i])
list_columns.extend(["strain", f"{list_of_embedding_strings[i]}_label_{k}"])
KDE_df_cluster = get_euclidean_data_frame(sampled_df=val_df[list_columns], column_for_analysis=f"{list_of_embedding_strings[i]}_label_{k}", embedding=list_of_embedding_strings[i], column_list=_get_embedding_columns_by_method(list_of_embedding_strings[i]))
confusion_matrix_val = confusion_matrix(KDE_df_normal["clade_status"], KDE_df_cluster["clade_status"])
matthews_cc_val = matthews_corrcoef(KDE_df_normal["clade_status"], KDE_df_cluster["clade_status"])
method_dict = {}
method_dict["method"] = list_of_embedding_strings[i]
method_dict["distance_threshold_number"] = f"{list_of_embedding_strings[i]}_label_{k}"
method_dict["confusion_matrix_training"] = confusion_matrix_val
method_dict["matthews_cc_training"] = matthews_cc_val
method_dict["num_undefined_training"] = (val_df[f"{list_of_embedding_strings[i]}_label_{k}"].values == '-1').sum()
if(list_of_embedding_strings[i] == "pca"):
#performing PCA on my pandas dataframe
numbers_subset = numbers[validation_index]
pca = PCA(**embed) #can specify n, since with no prior knowledge, I use None
validation_embedding = pca.fit_transform(numbers_subset)
else:
# Subset distance matrix to validation indices.
validation_distance_matrix = distance_matrix[validation_index][:, validation_index]
# Embed validation distance matrix.
validation_embedding = embedder.fit_transform(validation_distance_matrix)
val_df = | pd.DataFrame(validation_embedding, columns=list_columns_val) | pandas.DataFrame |
import numpy as np
import pandas as pd
import json
from pathlib import Path
from typing import Union, Sequence
from emukit.benchmarking.loop_benchmarking.benchmark_result import BenchmarkResult
from emukit.core import ParameterSpace
import logging
_log = logging.getLogger(__name__)
class BenchmarkData:
"""
Acts as a container, complete with all required interfaces, for the final results of performing a benchmarking
operation.
"""
model_names: Sequence[str]
metric_names: Sequence[str]
n_repeats: int
n_iters: int
metrics_df: pd.DataFrame
metrics_row_index_names: Sequence[str] = ("model", "metric", "rng_offset", "iteration")
metrics_col_index_names: Sequence[str] = ("metric_value",)
runhistory_df: pd.DataFrame
runhistory_col_labels: Sequence[str]
runhistory_base_col_name: str = "run_data"
runhistory_yname: str = "objective_value"
runhistory_row_index_names: Sequence[str] = ("model", "rng_offset", "iteration")
def __init__(self):
self.model_names = None
self.metric_names = None
self.n_repeats = None
self.n_iters = None
self.metrics_df = None
self.runhistory_df = None
self.runhistory_col_labels = None
@property
def n_models(self):
return len(self.model_names)
@property
def n_metrics(self):
return len(self.metric_names)
@classmethod
def from_emutkit_results(cls, results: BenchmarkResult, include_runhistories: bool = True,
emukit_space: ParameterSpace = None, outx: np.ndarray = None, outy: np.ndarray = None,
rng_offsets: Sequence[int] = None):
"""
Given the results of an emukit benchmarking operation stored within a BenchmarkResult object, reads the data
and prepares it for use within PyBNN's data analysis and visualizatino pipeline.
:param results: emukit.benchmarking.loop_benchmarking.benchmark_result.BenchmarkResult
:param include_runhistories: bool
Flag to indicate that the BenchmarkResults include a pseudo-metric for tracking the run history that should
be handled appropriately. This should be the last metric in the sequence of metrics.
:param emukit_space: emukit.core.ParameterSpace
An emukit ParameterSpace object used to extract some metadata for storing the runhistory. Not necessary
unless include_runhistories = True.
:param outx: np.ndarray
The run history of the configurations. Not necessary unless include_runhistories = True.
:param outy: np.ndarray
The run history of the function evaluations. Not necessary unless include_runhistories = True.
:param rng_offsets: np.ndarray
A sequence of integers, which functions as a mapping for the 'repetitions' index in BenchmarkResults. If
None (default), the repetition indices are used as is.
:return: None
"""
# Remember, no. of metric calculations per repeat = num loop iterations + 1 due to initial metric calculation
obj = cls()
obj.model_names = results.loop_names
obj.metric_names = results.metric_names
if include_runhistories:
# Exclude the pseudo-metric for tracking run histories
obj.metric_names = obj.metric_names[:-1]
assert outx is not None and outy is not None, "Expected run histories to be provided in parameters " \
"'outx' and 'outy', received %s and %s respectively." % \
(str(type(outx)), str(type(outy)))
obj.n_repeats = results.n_repeats
obj.n_iters = results.extract_metric_as_array(obj.model_names[0], obj.metric_names[0]).shape[-1]
_log.debug("Reading data for %d models and %d metrics, over %d repetitions of %d iterations each." %
(obj.n_models, obj.n_metrics, obj.n_repeats, obj.n_iters))
all_indices = [
obj.model_names,
obj.metric_names,
rng_offsets if rng_offsets is not None else np.arange(obj.n_repeats),
np.arange(obj.n_iters)
]
assert len(obj.metrics_row_index_names) == len(all_indices), \
"This is unexpected. The number of row index labels %d should be exactly equal to the number of index " \
"arrays %d." % (len(obj.metrics_row_index_names), len(all_indices))
indices = [ | pd.MultiIndex.from_product(all_indices[i:], names=obj.metrics_row_index_names[i:]) | pandas.MultiIndex.from_product |
# Arithmetic tests for DataFrame/Series/Index/Array classes that should
# behave identically.
from datetime import datetime, timedelta
import numpy as np
import pytest
from pandas.errors import (
NullFrequencyError, OutOfBoundsDatetime, PerformanceWarning)
import pandas as pd
from pandas import (
DataFrame, DatetimeIndex, NaT, Series, Timedelta, TimedeltaIndex,
Timestamp, timedelta_range)
import pandas.util.testing as tm
def get_upcast_box(box, vector):
"""
Given two box-types, find the one that takes priority
"""
if box is DataFrame or isinstance(vector, DataFrame):
return DataFrame
if box is Series or isinstance(vector, Series):
return Series
if box is pd.Index or isinstance(vector, pd.Index):
return pd.Index
return box
# ------------------------------------------------------------------
# Timedelta64[ns] dtype Comparisons
class TestTimedelta64ArrayLikeComparisons:
# Comparison tests for timedelta64[ns] vectors fully parametrized over
# DataFrame/Series/TimedeltaIndex/TimedeltaArray. Ideally all comparison
# tests will eventually end up here.
def test_compare_timedelta64_zerodim(self, box_with_array):
# GH#26689 should unbox when comparing with zerodim array
box = box_with_array
xbox = box_with_array if box_with_array is not pd.Index else np.ndarray
tdi = pd.timedelta_range('2H', periods=4)
other = np.array(tdi.to_numpy()[0])
tdi = tm.box_expected(tdi, box)
res = tdi <= other
expected = np.array([True, False, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(res, expected)
with pytest.raises(TypeError):
# zero-dim of wrong dtype should still raise
tdi >= np.array(4)
class TestTimedelta64ArrayComparisons:
# TODO: All of these need to be parametrized over box
def test_compare_timedelta_series(self):
# regression test for GH#5963
s = pd.Series([timedelta(days=1), timedelta(days=2)])
actual = s > timedelta(days=1)
expected = pd.Series([False, True])
tm.assert_series_equal(actual, expected)
def test_tdi_cmp_str_invalid(self, box_with_array):
# GH#13624
xbox = box_with_array if box_with_array is not pd.Index else np.ndarray
tdi = TimedeltaIndex(['1 day', '2 days'])
tdarr = tm.box_expected(tdi, box_with_array)
for left, right in [(tdarr, 'a'), ('a', tdarr)]:
with pytest.raises(TypeError):
left > right
with pytest.raises(TypeError):
left >= right
with pytest.raises(TypeError):
left < right
with pytest.raises(TypeError):
left <= right
result = left == right
expected = np.array([False, False], dtype=bool)
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
result = left != right
expected = np.array([True, True], dtype=bool)
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize('dtype', [None, object])
def test_comp_nat(self, dtype):
left = pd.TimedeltaIndex([pd.Timedelta('1 days'), pd.NaT,
pd.Timedelta('3 days')])
right = pd.TimedeltaIndex([pd.NaT, pd.NaT, pd.Timedelta('3 days')])
lhs, rhs = left, right
if dtype is object:
lhs, rhs = left.astype(object), right.astype(object)
result = rhs == lhs
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = rhs != lhs
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(lhs == pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT == rhs, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(lhs != pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT != lhs, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(lhs < pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT > lhs, expected)
def test_comparisons_nat(self):
tdidx1 = pd.TimedeltaIndex(['1 day', pd.NaT, '1 day 00:00:01', pd.NaT,
'1 day 00:00:01', '5 day 00:00:03'])
tdidx2 = pd.TimedeltaIndex(['2 day', '2 day', pd.NaT, pd.NaT,
'1 day 00:00:02', '5 days 00:00:03'])
tdarr = np.array([np.timedelta64(2, 'D'),
np.timedelta64(2, 'D'), np.timedelta64('nat'),
np.timedelta64('nat'),
np.timedelta64(1, 'D') + np.timedelta64(2, 's'),
np.timedelta64(5, 'D') + np.timedelta64(3, 's')])
cases = [(tdidx1, tdidx2), (tdidx1, tdarr)]
# Check pd.NaT is handles as the same as np.nan
for idx1, idx2 in cases:
result = idx1 < idx2
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx2 > idx1
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= idx2
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx2 >= idx1
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 == idx2
expected = np.array([False, False, False, False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 != idx2
expected = np.array([True, True, True, True, True, False])
tm.assert_numpy_array_equal(result, expected)
# TODO: better name
def test_comparisons_coverage(self):
rng = timedelta_range('1 days', periods=10)
result = rng < rng[3]
expected = np.array([True, True, True] + [False] * 7)
tm.assert_numpy_array_equal(result, expected)
# raise TypeError for now
with pytest.raises(TypeError):
rng < rng[3].value
result = rng == list(rng)
exp = rng == rng
tm.assert_numpy_array_equal(result, exp)
# ------------------------------------------------------------------
# Timedelta64[ns] dtype Arithmetic Operations
class TestTimedelta64ArithmeticUnsorted:
# Tests moved from type-specific test files but not
# yet sorted/parametrized/de-duplicated
def test_ufunc_coercions(self):
# normal ops are also tested in tseries/test_timedeltas.py
idx = TimedeltaIndex(['2H', '4H', '6H', '8H', '10H'],
freq='2H', name='x')
for result in [idx * 2, np.multiply(idx, 2)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['4H', '8H', '12H', '16H', '20H'],
freq='4H', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == '4H'
for result in [idx / 2, np.divide(idx, 2)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['1H', '2H', '3H', '4H', '5H'],
freq='H', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == 'H'
idx = TimedeltaIndex(['2H', '4H', '6H', '8H', '10H'],
freq='2H', name='x')
for result in [-idx, np.negative(idx)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['-2H', '-4H', '-6H', '-8H', '-10H'],
freq='-2H', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == '-2H'
idx = TimedeltaIndex(['-2H', '-1H', '0H', '1H', '2H'],
freq='H', name='x')
for result in [abs(idx), np.absolute(idx)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['2H', '1H', '0H', '1H', '2H'],
freq=None, name='x')
tm.assert_index_equal(result, exp)
assert result.freq is None
def test_subtraction_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = pd.date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
msg = "cannot subtract a datelike from a TimedeltaArray"
with pytest.raises(TypeError, match=msg):
tdi - dt
with pytest.raises(TypeError, match=msg):
tdi - dti
msg = (r"descriptor '__sub__' requires a 'datetime\.datetime' object"
" but received a 'Timedelta'")
with pytest.raises(TypeError, match=msg):
td - dt
msg = "bad operand type for unary -: 'DatetimeArray'"
with pytest.raises(TypeError, match=msg):
td - dti
result = dt - dti
expected = | TimedeltaIndex(['0 days', '-1 days', '-2 days'], name='bar') | pandas.TimedeltaIndex |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
import logging
logger = logging.getLogger(__name__)
import traceback
import datetime
from abc import ABCMeta, abstractmethod
import requests
import pandas as pd
from pandas.tseries.frequencies import to_offset
from pandas.core.common import is_number
from arctic.exceptions import NoDataFoundException
from ..library import idx_min, idx_max
class Updater(object):
__metaclass__ = ABCMeta
def __init__(self, session=None, *args, **kwargs):
self.session = session
def start_default(self, freq):
if freq is None:
return datetime.datetime(2010, 1, 1)
else:
return self.end_default(freq) - freq * self.periods_default
@property
def periods_default(self):
return 1000
def end_default(self, freq):
if freq == to_offset('D'):
return (datetime.datetime.utcnow() - freq).replace(hour=0, minute=0, second=0, microsecond=0)
elif freq is None:
dt = datetime.datetime.utcnow()
year = dt.year
month = dt.month
if month == 1:
year -= 1
month = (month - 2) % 12 + 1
return datetime.datetime(year=year, month=month, day=1)
else:
return datetime.datetime.utcnow()
def _sanitize_dates(self, start, end, freq=None):
"""
Return (datetime_start, datetime_end) tuple
if start is None - default is 2010/01/01
if end is None - default is today
"""
if is_number(start):
# regard int as year
start = datetime.datetime(start, 1, 1)
start = pd.to_datetime(start)
if | is_number(end) | pandas.core.common.is_number |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import base64
import glob
from scipy.signal import medfilt
from scipy.integrate import trapz
import xml.etree.ElementTree as et
from datetime import date
today = date.today()
np.warnings.filterwarnings('ignore')
sns.set(style="darkgrid")
roots = []
root_names = []
for n in glob.glob('*.xml'):
roots.append(et.parse(n).getroot())
root_names.append(n)
def modified_z_score(intensity):
median_int = np.median(intensity)
mad_int = np.median([np.abs(intensity - median_int)])
if mad_int == 0:
mad_int = 1
modified_z_scores = 0.6745 * (intensity - median_int) / mad_int
return modified_z_scores
def df_fixer(y,n):
threshold = 0
x = 0
while threshold == 0:
if np.nanquantile(abs(np.array(modified_z_score(np.diff(y)))), 1) > 150:
if abs(np.array(modified_z_score(np.diff(y))))[int(data.Qonset[n*12])+x:int(data.Qoffset[n*12])+30].max() < np.nanquantile(abs(np.array(modified_z_score(np.diff(y)))), .98)+55:
threshold = abs(np.array(modified_z_score(np.diff(y))))[int(data.Qonset[n*12])+x:int(data.Qoffset[n*12])+30].max() + 1
elif abs(np.array(modified_z_score(np.diff(y))))[int(data.Qonset[n*12])+x:int(data.Qoffset[n*12])+30].max() > np.nanquantile(abs(np.array(modified_z_score(np.diff(y)))), .98)+55:
x += 5
elif np.nanquantile(abs(np.array(modified_z_score(np.diff(y)))), 1) <= 150:
if abs(np.array(modified_z_score(np.diff(y))))[int(data.Qonset[n*12])+x:int(data.Qoffset[n*12])+30].max() < np.nanquantile(abs(np.array(modified_z_score(np.diff(y)))), .992)+55:
threshold = abs(np.array(modified_z_score(np.diff(y))))[int(data.Qonset[n*12])+x:int(data.Qoffset[n*12])+30].max() + 1
elif abs(np.array(modified_z_score(np.diff(y))))[int(data.Qonset[n*12])+x:int(data.Qoffset[n*12])+30].max() > np.nanquantile(abs(np.array(modified_z_score(np.diff(y)))), .992)+55:
x += 5
spikes = abs(np.array(modified_z_score(np.diff(y)))) > threshold
y_out = y.copy()
for i in np.arange(len(spikes)):
if spikes[i] != 0:
y_out[i+y_out.index[0]] = None
return y_out
def half_df_fixer(y,n):
threshold = 0
x = 0
while threshold == 0:
if np.nanquantile(abs(np.array(modified_z_score(np.diff(y)))), 1) > 150:
if abs(np.array(modified_z_score(np.diff(y))))[int(half_data.Qonset[n*12])+x:int(half_data.Qoffset[n*12])+30].max() < np.nanquantile(abs(np.array(modified_z_score(np.diff(y)))), .98)+60:
threshold = abs(np.array(modified_z_score(np.diff(y))))[int(half_data.Qonset[n*12])+x:int(half_data.Qoffset[n*12])+30].max() + 1
elif abs(np.array(modified_z_score(np.diff(y))))[int(half_data.Qonset[n*12])+x:int(half_data.Qoffset[n*12])+30].max() > np.nanquantile(abs(np.array(modified_z_score(np.diff(y)))), .98)+60:
x += 2
elif np.nanquantile(abs(np.array(modified_z_score(np.diff(y)))), 1) <= 150:
if abs(np.array(modified_z_score(np.diff(y))))[int(half_data.Qonset[n*12])+x:int(half_data.Qoffset[n*12])+30].max() < np.nanquantile(abs(np.array(modified_z_score(np.diff(y)))), .992)+60:
threshold = abs(np.array(modified_z_score(np.diff(y))))[int(half_data.Qonset[n*12])+x:int(half_data.Qoffset[n*12])+30].max() + 1
elif abs(np.array(modified_z_score(np.diff(y))))[int(half_data.Qonset[n*12])+x:int(half_data.Qoffset[n*12])+30].max() > np.nanquantile(abs(np.array(modified_z_score(np.diff(y)))), .992)+60:
x += 2
spikes = abs(np.array(modified_z_score(np.diff(y)))) > threshold
y_out = y.copy()
for i in np.arange(len(spikes)):
if spikes[i] != 0:
y_out[i+y_out.index[0]] = None
return y_out
def hanging_line(point1, point2):
a = (point2[1] - point1[1])/(np.cosh(point2[0] % 600) - np.cosh(point1[0] % 600))
b = point1[1] - a*np.cosh(point1[0] % 600)
x = np.linspace(point1[0], point2[0], (point2[0] - point1[0])+1)
y = a*np.cosh(x % 600) + b
return (x,y)
Tags = {'tags':[]}
tags = {'tags':[]}
for root in roots:
if len(root.find('{http://www3.medical.philips.com}waveforms').getchildren()) == 2:
if int(root.find('{http://www3.medical.philips.com}waveforms')[1].attrib['samplespersec']) == 1000:
for elem in root.find('{http://www3.medical.philips.com}waveforms')[1]:
tag = {}
tag['Lead'] = elem.attrib['leadname']
if (root[6][1][0][14].text == 'Invalid' or elem[0].text == 'Invalid') and root[6].tag == '{http://www3.medical.philips.com}internalmeasurements':
if root[6][1][0][14].text == None or root[7][0].find('{http://www3.medical.philips.com}globalmeasurements')[5].text == 'Invalid' or root[6][1][0][14].text == '\n ' or root[6][1][0][14].text == 'Failed':
tag['Ponset'] = 0
tag['Pdur'] = 0
tag['Print'] = 0
tag['Poffset'] = 0
else:
tag['Ponset'] = int(root[7][0].find('{http://www3.medical.philips.com}globalmeasurements')[5].text) - int(root[6][1][0][14].text)
tag['Pdur'] = 0
tag['Print'] = int(root[6][1][0][14].text)
tag['Poffset'] = (int(root[7][0].find('{http://www3.medical.philips.com}globalmeasurements')[5].text) - int(root[6][1][0][14].text)) + 0
elif root[7][0].find('{http://www3.medical.philips.com}globalmeasurements')[5].text == 'Invalid' or root[6][1][0][14].text == None or root[7][0].find('{http://www3.medical.philips.com}globalmeasurements')[5].text == 'Failed' or root[6][1][0][14].text == 'Failed' or (root[6][1][0][14].text == 'Invalid' or elem[0].text == 'Invalid'):
tag['Ponset'] = 0
tag['Pdur'] = 0
tag['Print'] = 0
tag['Poffset'] = 0
else:
tag['Ponset'] = int(root[7][0].find('{http://www3.medical.philips.com}globalmeasurements')[5].text) - int(root[6][1][0][14].text)
tag['Pdur'] = int(elem[0].text)
tag['Print'] = int(root[6][1][0][14].text)
tag['Poffset'] = (int(root[7][0].find('{http://www3.medical.philips.com}globalmeasurements')[5].text) - int(root[6][1][0][14].text)) + int(elem[0].text)
if (root[7][0].find('{http://www3.medical.philips.com}globalmeasurements')[5].text == 'Invalid' or root[6][0][29].text == 'Invalid' or elem[4].text == 'Invalid' or root[6][1][0][18].text == 'Invalid'):
tag['Qonset'] = np.nan
tag['Qrsdur'] = np.nan
tag['Qoffset'] = np.nan
tag['Tonset'] = np.nan
tag['Qtint'] = np.nan
tag['Toffset'] = np.nan
tag['Tdur'] = np.nan
else:
tag['Qonset'] = int(root[7][0].find('{http://www3.medical.philips.com}globalmeasurements')[5].text)
tag['Qrsdur'] = int(root[6][0][29].text)
tag['Qoffset'] = tag['Qonset'] + tag['Qrsdur']
tag['Tonset'] = int(elem[4].text)
tag['Qtint'] = int(root[6][1][0][18].text)
tag['Toffset'] = tag['Qonset'] + tag['Qtint']
tag['Tdur'] = tag['Qoffset'] - tag['Qonset']
if root[7].tag == '{http://www3.medical.philips.com}interpretations' and root[6].tag == '{http://www3.medical.philips.com}internalmeasurements':
if root[7][0][1][0].text != None and (root[7][0][1][0].text).isdigit(): tag['HeartRate'] = int(root[7][0][1][0].text)
if root[7][0].find('{http://www3.medical.philips.com}globalmeasurements')[1].text != None: tag['RRint'] = int(root[7][0].find('{http://www3.medical.philips.com}globalmeasurements')[1].text)
if root[6][1][0][9].text != None: tag['AtrialRate'] = int(root[6][1][0][9].text)
if root[6][0][15].text != None and root[6][0][15].text != 'Indeterminate': tag['QRSFrontAxis'] = int(root[6][0][15].text)
if root[6][0][31].text != None and root[6][0][31].text != 'Failed': tag['QTC'] = int(root[6][0][31].text)
tag['Target'] = []
for n in range(len(root[7][0][root[7][0].getchildren().index(root[7][0].find('{http://www3.medical.philips.com}statement')):])):
tag['Target'].append(root[7][0][root[7][0].getchildren().index(root[7][0].find('{http://www3.medical.philips.com}statement')):][n][0].text)
else:
tag['HeartRate'] = np.nan
tag['RRint'] = np.nan
tag['AtrialRate'] = np.nan
tag['QRSFrontAxis'] = np.nan
tag['QTC'] = np.nan
tag['Target'] = []
if root[3].tag == '{http://www3.medical.philips.com}reportinfo' and root[5].tag == '{http://www3.medical.philips.com}patient':
time = root[3].attrib
tag['Date'] = time['date']
tag['Time'] = time['time']
tag['Sex'] = root[5][0][6].text
tag['ID'] = root[5][0][0].text
tag['Name'] = root[5][0].find('{http://www3.medical.philips.com}name')[0].text + ', ' + root[5][0].find('{http://www3.medical.philips.com}name')[1].text
if root[5][0].find('{http://www3.medical.philips.com}age')[0].tag == '{http://www3.medical.philips.com}dateofbirth':
tag['Age'] = int(today.strftime("%Y")) - int(root[5][0].find('{http://www3.medical.philips.com}age')[0].text[0:4])
if root[5][0].find('{http://www3.medical.philips.com}age')[0].tag == '{http://www3.medical.philips.com}years':
tag['Age'] = int(root[5][0].find('{http://www3.medical.philips.com}age')[0].text)
tag['Waveform'] = elem[6].text
# tag['LongWaveform'] = root[8][0].text
tags['tags'].append(tag)
else:
for elem in root.find('{http://www3.medical.philips.com}waveforms')[1]:
Tag = {}
Tag['Lead'] = elem.attrib['leadname']
if (root[6][1][0][14].text == 'Invalid' or elem[0].text == 'Invalid') and root[6].tag == '{http://www3.medical.philips.com}internalmeasurements':
if root[6][1][0][14].text == None or root[7][0].find('{http://www3.medical.philips.com}globalmeasurements')[5].text == 'Invalid' or root[6][1][0][14].text == '\n ' or root[6][1][0][14].text == 'Failed':
Tag['Ponset'] = 0
Tag['Pdur'] = 0
Tag['Print'] = 0
Tag['Poffset'] = 0
else:
Tag['Ponset'] = float(root[7][0].find('{http://www3.medical.philips.com}globalmeasurements')[5].text) - int(root[6][1][0][14].text)
Tag['Pdur'] = 0
Tag['Print'] = int(root[6][1][0][14].text)
Tag['Poffset'] = (int(root[7][0].find('{http://www3.medical.philips.com}globalmeasurements')[5].text) - int(root[6][1][0][14].text)) + 0
elif root[7][0].find('{http://www3.medical.philips.com}globalmeasurements')[5].text == 'Invalid' or root[6][1][0][14].text == None or root[7][0].find('{http://www3.medical.philips.com}globalmeasurements')[5].text == None or root[6][1][0][14].text == 'Invalid' or elem[0].text == 'Invalid' and root[6].tag == '{http://www3.medical.philips.com}internalmeasurements':
Tag['Ponset'] = 0
Tag['Pdur'] = 0
Tag['Print'] = 0
Tag['Poffset'] = 0
else:
Tag['Ponset'] = int(root[7][0].find('{http://www3.medical.philips.com}globalmeasurements')[5].text) - int(root[6][1][0][14].text)
Tag['Pdur'] = int(elem[0].text)
Tag['Print'] = int(root[6][1][0][14].text)
Tag['Poffset'] = (int(root[7][0].find('{http://www3.medical.philips.com}globalmeasurements')[5].text) - int(root[6][1][0][14].text)) + int(elem[0].text)
if (root[7][0].find('{http://www3.medical.philips.com}globalmeasurements')[5].text == 'Invalid' or root[6][1][0][18].text == None or root[6][0][29].text == 'Invalid' or elem[4].text == 'Invalid' or root[6][1][0][18].text == 'Invalid'):
Tag['Qonset'] = np.nan
Tag['Qrsdur'] = np.nan
Tag['Qoffset'] = np.nan
Tag['Tonset'] = np.nan
Tag['Qtint'] = np.nan
Tag['Toffset'] = np.nan
Tag['Tdur'] = np.nan
else:
Tag['Qonset'] = int(root[7][0].find('{http://www3.medical.philips.com}globalmeasurements')[5].text)
Tag['Qrsdur'] = int(root[6][0][29].text)
Tag['Qoffset'] = Tag['Qonset'] + Tag['Qrsdur']
Tag['Tonset'] = int(elem[4].text)
Tag['Qtint'] = int(root[6][1][0][18].text)
Tag['Toffset'] = Tag['Qonset'] + Tag['Qtint']
Tag['Tdur'] = Tag['Qoffset'] - Tag['Qonset']
if root[7].tag == '{http://www3.medical.philips.com}interpretations' and root[6].tag == '{http://www3.medical.philips.com}internalmeasurements':
if root[7][0][1][0].text != None and (root[7][0][1][0].text).isdigit(): Tag['HeartRate'] = int(root[7][0][1][0].text)
if root[7][0].find('{http://www3.medical.philips.com}globalmeasurements')[1].text != None: Tag['RRint'] = int(root[7][0].find('{http://www3.medical.philips.com}globalmeasurements')[1].text)
if root[6][1][0][9].text != None: Tag['AtrialRate'] = int(root[6][1][0][9].text)
if root[6][0][15].text != None and root[6][0][15].text != 'Indeterminate': Tag['QRSFrontAxis'] = int(root[6][0][15].text)
if root[6][0][31].text != None: Tag['QTC'] = int(root[6][0][31].text)
Tag['Target'] = []
for n in range(len(root[7][0][root[7][0].getchildren().index(root[7][0].find('{http://www3.medical.philips.com}statement')):])):
Tag['Target'].append(root[7][0][root[7][0].getchildren().index(root[7][0].find('{http://www3.medical.philips.com}statement')):][n][0].text)
else:
Tag['HeartRate'] = np.nan
Tag['RRint'] = np.nan
Tag['AtrialRate'] = np.nan
Tag['QRSFrontAxis'] = np.nan
Tag['QTC'] = np.nan
Tag['Target'] = []
if root[3].tag == '{http://www3.medical.philips.com}reportinfo' and root[5].tag == '{http://www3.medical.philips.com}patient':
time = root[3].attrib
Tag['Date'] = time['date']
Tag['Time'] = time['time']
Tag['Sex'] = root[5][0][6].text
Tag['ID'] = root[5][0][0].text
Tag['Name'] = root[5][0].find('{http://www3.medical.philips.com}name')[0].text + ', ' + root[5][0].find('{http://www3.medical.philips.com}name')[1].text
if len(root[5][0].find('{http://www3.medical.philips.com}age')) > 0:
if root[5][0].find('{http://www3.medical.philips.com}age')[0].tag == '{http://www3.medical.philips.com}dateofbirth':
Tag['Age'] = int(today.strftime("%Y")) - int(root[5][0].find('{http://www3.medical.philips.com}age')[0].text[0:4])
if root[5][0].find('{http://www3.medical.philips.com}age')[0].tag == '{http://www3.medical.philips.com}years':
Tag['Age'] = int(root[5][0].find('{http://www3.medical.philips.com}age')[0].text)
Tag['Waveform'] = elem[6].text
# Tag['LongWaveform'] = root[8][0].text
Tags['tags'].append(Tag)
half_data = pd.DataFrame(Tags['tags'])
data = pd.DataFrame(tags['tags'])
del roots
del root
del elem
count1000 = int(len(data)/12)
count500 = int(len(half_data)/12)
count = count1000 + count500
if len(data) > 0:
array = np.unique(data[data.isnull().any(axis=1)][['ID', 'Date', 'Time']])
missing_data = data.loc[data['ID'].isin(array) & data['Date'].isin(array) & data['Time'].isin(array)]
data.drop(missing_data.index, axis=0,inplace=True)
missing_data = missing_data.reset_index(drop=True)
del tag
del tags
data = data.reset_index(drop=True)
for n in range(count1000):
data.Tonset[n*12:(n+1)*12] = np.repeat(int(data.Tonset[n*12:(n+1)*12].sum()/12), 12)
data.Pdur[n*12:(n+1)*12] = np.repeat(int(data.Pdur[n*12:(n+1)*12].sum()/12), 12)
x = 0
p = []
for x in range(len(data.Waveform)):
t = base64.b64decode(data.Waveform[x])
p.append(np.asarray(t))
x+=1
p = np.asarray(p)
a = []
for i in p:
o = []
for x in i:
o.append(x)
a.append(o)
df = pd.DataFrame(a)
df.insert(0, 'Lead', data['Lead'])
blank = []
for n in range(count1000):
blank.append(pd.pivot_table(df[(n*12):(n+1)*12], columns=df.Lead))
test = pd.concat(blank)
new = []
array = []
for n in range(13):
for index, num in zip(test.iloc[:, n-1][::2], test.iloc[:, n-1][1::2]):
if num > 128:
new.append(index - (256 * (256 - num)))
elif num < 128:
new.append(index + (256 * num))
elif num == 0:
new.append(index)
else:
new.append(index)
new = []
array.append(new)
array = np.asarray([array[0], array[1], array[2], array[3], array[4], array[5], array[6], array[7], array[8], array[9], array[10], array[11]])
df = pd.DataFrame(array)
df = | pd.pivot_table(df, columns=test.columns) | pandas.pivot_table |
from logs import logDecorator as lD
import jsonref
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import csv
from sklearn import tree
from sklearn.tree import DecisionTreeClassifier
from sklearn.preprocessing import LabelEncoder, OneHotEncoder, StandardScaler
from sklearn.model_selection import train_test_split as tts
from sklearn.metrics import confusion_matrix, accuracy_score
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.datasets import load_files
from keras import backend as K
from keras.models import Model, Sequential
from keras.layers.core import Activation
from keras.layers import Input, Embedding, Flatten, dot, Dense
from keras.optimizers import Adam
from psycopg2.sql import SQL, Identifier, Literal
from lib.databaseIO import pgIO
from collections import Counter
from textwrap import wrap
config = jsonref.load(open('../config/config.json'))
logBase = config['logging']['logBase'] + '.modules.utils.utils'
t1_config = jsonref.load(open('../config/tejasT1.json'))
@lD.log(logBase + '.createDF_allRaces_anySUD')
def createDF_allRaces_anySUD(logger):
'''Creates dataframe for total sample, dependent variable = any sud
This function creates a dataframe for the total sample, where the
dependent variable is any sud and the independent variables are:
race, age, sex and setting.
Decorators:
lD.log
Arguments:
logger {logging.Logger} -- logs error information
'''
try:
query = '''
SELECT * from tejas.restofusers_t3_p1
'''
data = pgIO.getAllData(query)
sud_data = [d[0] for d in data]
race_data = [d[1] for d in data]
age_data = [d[2] for d in data]
sex_data = [d[3] for d in data]
setting_data = [d[4] for d in data]
d = {'sud': sud_data, 'race': race_data, 'age': age_data, 'sex': sex_data, 'setting': setting_data}
main = pd.DataFrame(data=d)
df = main.copy()
# Change sud column to binary, dummify the other columns
df.replace({False:0, True:1}, inplace=True)
dummy_races = pd.get_dummies(main['race'])
df = df[['sud']].join(dummy_races.ix[:, 'MR':])
main.replace(to_replace=list(range(12, 18)), value="12-17", inplace=True)
main.replace(to_replace=list(range(18, 35)), value="18-34", inplace=True)
main.replace(to_replace=list(range(35, 50)), value="35-49", inplace=True)
main.replace(to_replace=list(range(50, 100)), value="50+", inplace=True)
dummy_ages = pd.get_dummies(main['age'])
df = df[['sud', 'MR', 'NHPI']].join(dummy_ages.ix[:, :'50+'])
dummy_sexes = pd.get_dummies(main['sex'])
df = df[['sud', 'MR', 'NHPI', '12-17', '18-34', '35-49', '50+']].join(dummy_sexes.ix[:, 'M':])
dummy_setting = pd.get_dummies(main['setting'])
df = df[['sud', 'MR', 'NHPI', '12-17', '18-34', '35-49', 'M']].join(dummy_setting.ix[:, :'Inpatient'])
df['intercept'] = 1.0
except Exception as e:
logger.error('createDF_allRaces_anySUD failed because of {}'.format(e))
return df
@lD.log(logBase + '.make_dataset')
def make_dataset(logger, csvfile, query):
data = pgIO.getAllData(query)#returns list of tuples (T/F,.......)
with open(csvfile,'w+') as f:
csv_out=csv.writer(f)
csv_out.writerow(['sud','race','age','sex','setting'])
csv_out.writerows(data)
f.close()
dataset = pd.read_csv(csvfile)
return dataset
@lD.log(logBase + '.logRegress')
def nnClassify(logger, layers):
'''Performs classification with hidden layer NN
Decorators:
lD.log
Arguments:
logger {logging.Logger} -- logs error information
df {dataframe} -- input dataframe where first column is 'sud'
'''
try:
print("Performing classification with hidden layer NN...")
query = '''
SELECT * from tejas.first_thou
'''
csvfile = '../data/firstThouSUDorNah.csv'
dataset = make_dataset(csvfile, query)
# print(dataset)
X = dataset.iloc[:,1:].values #X now takes everything but sud
y = dataset.iloc[:,0].values #sud saved for y
lab_enc_race = LabelEncoder()
X[:,0] = lab_enc_race.fit_transform(X[:,0])
lab_enc_sex = LabelEncoder()
X[:,2] = lab_enc_sex.fit_transform(X[:,2])
lab_enc_setting = LabelEncoder()
X[:,3] = lab_enc_setting.fit_transform(X[:,3])
# sex and setting are binary variables
# must create dummy variable for race since race = 'aa', 'nhpi' or 'mr'
onehotencoder = OneHotEncoder(categorical_features = [0])
X = onehotencoder.fit_transform(X).toarray()
#80-20 train-test split
X_train, X_test, y_train, y_test = tts(X, y, test_size = 0.2)
#feature scaling
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
#Initializing Neural Network
classifier = Sequential()
buildClassifier(classifier, layers)
# # Adding the output layer
# classifier.add(Dense(output_dim = 1, init = 'uniform',
# activation = 'sigmoid'))
# Compiling Neural Network
classifier.compile(optimizer = 'adam', loss = 'binary_crossentropy',
metrics = ['accuracy'])
# Fitting our model
# Number of epochs and batch sizes are hyperparameters
history = classifier.fit(X_train, y_train, epochs = 40,
verbose = 0, batch_size = 100,
validation_data=(X_test,y_test))
trainL = history.history['loss']
testL = history.history['val_loss']
epoch_count = range(1,1+len(trainL))
plt.plot(epoch_count,trainL,'r--')
plt.plot(epoch_count,testL,'b-')
plt.legend(['Training Loss','Test Loss'])
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.show()
# Predicting the Test set results
y_pred = classifier.predict(X_test)
y_pred = (y_pred > 0.5)
# # Creating the Confusion Matrix
acc = accuracy_score(y_test, y_pred)
print('\n%.2f'%(100*acc))
# accuracy = (cm[0][0]+cm[1][1])/(cm[0][0]+cm[0][1]+cm[1][0]+cm[1][1])
# accuracy *=100
except Exception as e:
logger.error('logRegress failed because of {}'.format(e))
return
@lD.log(logBase + '.createDF_allRaces_morethan2SUD')
def createDF_allRaces_morethan2SUD(logger):
'''Creates dataframe for total sample, dependent variable = at least 2 sud
This function creates a dataframe for the total sample, where the
dependent variable is >=2 sud and the independent variables are:
race, age, sex and setting.
Decorators:
lD.log
Arguments:
logger {logging.Logger} -- logs error information
'''
try:
query = '''
SELECT morethan2sud, race, age, sex, visit_type
FROM tejas.sud_race_age
WHERE age BETWEEN 12 AND 100
'''
data = pgIO.getAllData(query)
sud_data = [d[0] for d in data]
race_data = [d[1] for d in data]
age_data = [d[2] for d in data]
sex_data = [d[3] for d in data]
setting_data = [d[4] for d in data]
d = {'sud': sud_data, 'race': race_data, 'age': age_data, 'sex': sex_data, 'setting': setting_data}
main = | pd.DataFrame(data=d) | pandas.DataFrame |
import os
import sys
import logging
import sqlite3
import pyupbit
import pandas as pd
from PyQt5 import QtCore
from PyQt5.QtCore import QThread
from pyupbit import WebSocketManager
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
from utility.setting import *
from utility.static import now, timedelta_sec, strf_time, telegram_msg, timedelta_hour, strp_time
class Trader(QThread):
data0 = QtCore.pyqtSignal(list)
data1 = QtCore.pyqtSignal(list)
data2 = QtCore.pyqtSignal(list)
def __init__(self, windowQ, workerQ, queryQ, soundQ, stg1Q, stg2Q, stg3Q, stg4Q):
super().__init__()
self.log = logging.getLogger('Worker')
self.log.setLevel(logging.INFO)
filehandler = logging.FileHandler(filename=f"{system_path}/log/S{strf_time('%Y%m%d')}.txt", encoding='utf-8')
self.log.addHandler(filehandler)
self.windowQ = windowQ
self.workerQ = workerQ
self.queryQ = queryQ
self.soundQ = soundQ
self.stg1Q = stg1Q
self.stg2Q = stg2Q
self.stg3Q = stg3Q
self.stg4Q = stg4Q
self.upbit = None # 매도수 주문 및 체결 확인용 객체
self.tickers1 = None # 전략 연산 1 프로세스로 보낼 티커 리스트
self.tickers2 = None # 전략 연산 2 프로세스로 보낼 티커 리스트
self.tickers3 = None # 전략 연산 3 프로세스로 보낼 티커 리스트
self.tickers4 = None # 전략 연산 4 프로세스로 보낼 티커 리스트
self.buy_uuid = None # 매수 주문 저장용 list: [티커명, uuid]
self.sell_uuid = None # 매도 주문 저장용 list: [티커명, uuid]
self.websocketQ = None # 실시간데이터 수신용 웹소켓큐
self.df_cj = | pd.DataFrame(columns=columns_cj) | pandas.DataFrame |
#!Python3
#Automated batch email program (Addresses.xlsx)
#by <NAME>
import os, glob, win32com.client, logging, smtplib
import pandas as pd
from email.utils import parseaddr
from email_validator import validate_email, EmailNotValidError
emailCount = 0
logging.basicConfig(filename = 'log.txt', level=logging.DEBUG, format=' %(asctime)s - %(levelname)s - %(message)s')
templateFolder = os.getcwd()+ '\\Email template\\' #set the run folder path
attachmentFolder = os.getcwd()+ '\\Attachments\\' #set attachment folder
# TODO get email template from folder
file = pd.ExcelFile('Email addresses.xlsx') #Establishes the excel file you wish to import into Pandas
logging.debug(file.sheet_names)
#--- Validate Email addresses excel file ---
print("Do you wish to:")
print("1.Validate spreadsheet 2.Test run/draft emails 3.Send emails")
testflag= input()
try:
testflag=int(testflag)
except:
print("Invalid input")
print("reading spreedsheet...")
for s in file.sheet_names:
df = file.parse(s) #Uploads Sheet1 from the Excel file into a dataframe
#--- Iterate through all sheets ---
if testflag >= 1:
for index, row in df.iterrows(): #Loops through each row in the dataframe
email = (row['Email Address']) #Sets dataframe variable, 'email' to cells in column 'Email Addresss'
subject = (row['Subject']) #Sets dataframe variable, 'subject' to cells in column 'Subject'
body = (row['Email HTML Body']) #Sets dataframe variable, 'body' to cells in column 'Email HTML Body'
#--- Print warnings ---
if pd.isnull(email): #Skips over rows where one of the cells in the three main columns is blank
print('Sheet %s row %s: - Warning - email is null.' % (s, (index +2)))
else:
try:
email2=(str(email))
validate_email(email2) # validate and get info
except EmailNotValidError as e:
print('Sheet %s row %s: - Warning - %s' % (s, (index +2), str(e) ))
if | pd.isnull(subject) | pandas.isnull |
#-*-conding: utf-8 -*-
""""""
# tools
import pandas as pd
def now():
"""
acces a l'heure et a la date actuelle
:return: str
'date time'
"""
response = pd.datetime.today()
response = f'{response.day}-{response.month}-{response.year} {response.hour}:{response.minute}'
# WARNING : trouver une fonction qui ffait ca mieux
response = '25-10-2018 10:10'
return response
def now_date():
""""""
# today
date = pd.to_datetime(now(), format='%d-%m-%Y')
return f'{date.day}-{date.month}-{date.year}'
def add_margin_time(date_time, margin_time):
"""
ajout d'une marge a une heure
:param date_time: str
:param margin_time: int
{minutes}
:return: pd.to_datetime
"""
print(f"date time jhgfdsghfvdjhf : {date_time}")
# date_time
date_time = pd.to_datetime(date_time)
# add margin time
minute = date_time.minute + margin_time
date_time = | pd.to_datetime(f'{date_time.day}-{date_time.month}-{date_time.year} {date_time.hour}:{minute}') | pandas.to_datetime |
import os
import json
import pandas as pd
class Importer:
def extract(self, file_name):
name, extension = os.path.splitext(file_name)
df = | pd.DataFrame() | pandas.DataFrame |
#%%
import os
from pyteomics import mzid, mzml
import pandas as pd
import numpy as np
import glob
"""
Files are downloaded and manually randomly divided into different folders
the following code is repeated but has the same effect, it is applied to various folders to
generate pandas data frames and to store all the data in a single hdf5 file
"""
#%%
os.chdir('./files/train')
mzid_files=glob.glob('*.mzid')
indexed_mzid = mzid.chain.from_iterable(mzid_files, use_index=True)
def _parse_mzid_entry(entry):
spectrum_id = str(entry['spectrumID'])
seq = str(entry['SpectrumIdentificationItem'][0]['PeptideSequence'])
try:
mods = entry['SpectrumIdentificationItem'][0]['Modification']
except:
mods = None
rank = int(entry['SpectrumIdentificationItem'][0]['rank'])
file_location = str(entry['name'])
return file_location,spectrum_id,seq,mods,rank
all_mzid = []
for entry in(indexed_mzid):
all_mzid.append(_parse_mzid_entry(entry))
file_location,spectrum_ids,seq,mods,rank = zip(*all_mzid)
mzid_df = pd.DataFrame({'file':file_location,'id':spectrum_ids,'seq':seq})
def _parse_mzml_entry(entry):
ID = str(entry['id'])
mz = np.array(entry['m/z array'])
intensities = np.array(entry['intensity array'])
return ID, mz, intensities
all_spectra = []
for file in np.unique(file_location):
print(file)
indexed = mzml.MzML(file)
for i,entry in enumerate(indexed.map(_parse_mzml_entry)):
tupl = (file,)+entry
all_spectra.append(tupl)
mzml_location, ids, mz, intensities = zip(*all_spectra)
spectra_df = pd.DataFrame({'file':mzml_location,'id':ids,'mz':mz,'intensities':intensities})
#### MERGE: mzid + mzml
merged_df = pd.merge(mzid_df,spectra_df,how='left',on=['file','id'])
merged_df = merged_df[['id','seq','mz','intensities']]
#%%
hdf = pd.HDFStore('/home/ubuntu/data/jiahao/files/train.hdf5', mode="w")
hdf.put(value=merged_df, key="df")
#%%
os.chdir('./train_1')
mzid_files_1=glob.glob('*.mzid')
indexed_mzid_1 = mzid.chain.from_iterable(mzid_files_1, use_index=True)
def _parse_mzid_entry(entry):
spectrum_id = str(entry['spectrumID'])
seq = str(entry['SpectrumIdentificationItem'][0]['PeptideSequence'])
try:
mods = entry['SpectrumIdentificationItem'][0]['Modification']
except:
mods = None
rank = int(entry['SpectrumIdentificationItem'][0]['rank'])
file_location = str(entry['name'])
return file_location,spectrum_id,seq,mods,rank
all_mzid_1 = []
for entry in(indexed_mzid_1):
all_mzid_1.append(_parse_mzid_entry(entry))
file_location,spectrum_ids,seq,mods,rank = zip(*all_mzid_1)
mzid_df_1 = pd.DataFrame({'file':file_location,'id':spectrum_ids,'seq':seq})
def _parse_mzml_entry(entry):
ID = str(entry['id'])
mz = np.array(entry['m/z array'])
intensities = np.array(entry['intensity array'])
return ID, mz, intensities
all_spectra_1 = []
for file in np.unique(file_location):
print(file)
indexed = mzml.MzML(file)
for i,entry in enumerate(indexed.map(_parse_mzml_entry)):
tupl = (file,)+entry
all_spectra_1.append(tupl)
mzml_location, ids, mz, intensities = zip(*all_spectra_1)
spectra_df_1 = pd.DataFrame({'file':mzml_location,'id':ids,'mz':mz,'intensities':intensities})
#### MERGE: mzid + mzml
merged_df_1 = pd.merge(mzid_df_1,spectra_df_1,how='left',on=['file','id'])
merged_df_1 = merged_df_1[['id','seq','mz','intensities']]
#%%
hdf.put(value=merged_df_1, key="df1")
# %%
os.chdir('./train_2')
mzid_files_2=glob.glob('*.mzid')
indexed_mzid_2 = mzid.chain.from_iterable(mzid_files_2, use_index=True)
def _parse_mzid_entry(entry):
spectrum_id = str(entry['spectrumID'])
seq = str(entry['SpectrumIdentificationItem'][0]['PeptideSequence'])
try:
mods = entry['SpectrumIdentificationItem'][0]['Modification']
except:
mods = None
rank = int(entry['SpectrumIdentificationItem'][0]['rank'])
file_location = str(entry['name'])
return file_location,spectrum_id,seq,mods,rank
all_mzid_2 = []
for entry in(indexed_mzid_2):
all_mzid_2.append(_parse_mzid_entry(entry))
file_location,spectrum_ids,seq,mods,rank = zip(*all_mzid_2)
mzid_df_2 = pd.DataFrame({'file':file_location,'id':spectrum_ids,'seq':seq})
def _parse_mzml_entry(entry):
ID = str(entry['id'])
mz = np.array(entry['m/z array'])
intensities = np.array(entry['intensity array'])
return ID, mz, intensities
all_spectra_2 = []
for file in np.unique(file_location):
print(file)
indexed = mzml.MzML(file)
for i,entry in enumerate(indexed.map(_parse_mzml_entry)):
tupl = (file,)+entry
all_spectra_2.append(tupl)
mzml_location, ids, mz, intensities = zip(*all_spectra_2)
spectra_df_2 = | pd.DataFrame({'file':mzml_location,'id':ids,'mz':mz,'intensities':intensities}) | pandas.DataFrame |
import networkx as nx
import numpy as np
import pandas as pd
from tmp.utils import cols_attr, cols_glove, cols_empath
# Gets mean and median between tweets
tweets = pd.read_csv("../data/preprocessing/tweets.csv")
tweets.sort_values(by=["user_id", "tweet_creation"], ascending=True, inplace=True)
tweets["time_diff"] = tweets.groupby("user_id", sort=False).tweet_creation.diff()
time_diff_series_mean = tweets.groupby("user_id", sort=False).time_diff.mean()
time_diff_series_median = tweets.groupby("user_id", sort=False).time_diff.median()
time_diff = time_diff_series_mean.to_frame()
time_diff["time_diff_median"] = time_diff_series_median
time_diff.to_csv("../data/features/time_diff.csv")
users_attributes = pd.read_csv("../data/features/users_attributes.csv")
users_content = pd.read_csv("../data/features/users_content.csv")
users_content2 = pd.read_csv("../data/features/users_content2.csv")
users_time = pd.read_csv("../data/features/time_diff.csv")
users_deleted = pd.read_csv("../data/extra/deleted_account_before_guideline.csv")
users_deleted_after_guideline = pd.read_csv("../data/extra/deleted_account_after_guideline.csv")
users_date = pd.read_csv("../data/extra/created_at.csv")
df = | pd.merge(left=users_attributes, right=users_content, on="user_id", how="left") | pandas.merge |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import re
import numpy as np
import pandas as pd
from pprint import pprint
# Gensim
import gensim
import gensim.corpora as corpora
from gensim.utils import simple_preprocess
from gensim.models import CoherenceModel
import nltk
from nltk.stem import WordNetLemmatizer
# Plotting tools
import pyLDAvis
import pyLDAvis.gensim # don't skip this
import matplotlib.pyplot as plt
get_ipython().run_line_magic('matplotlib', 'inline')
# In[3]:
from sklearn.datasets import fetch_20newsgroups
newsgroups_train = fetch_20newsgroups(subset='train', shuffle = True)
newsgroups_test = fetch_20newsgroups(subset='test', shuffle = True)
# In[8]:
print(list(newsgroups_train.target_names))
# In[13]:
(newsgroups_train.data[:1])
# In[12]:
def preprocess(text):
return [w for w in gensim.utils.simple_preprocess(text) if w not in gensim.parsing.preprocessing.STOPWORDS and len(w)>3]
def lemmatize(text):
return [WordNetLemmatizer().lemmatize(w) for w in text]
# In[33]:
preproc_doc = []
for s in newsgroups_train.data:
preproc_doc.append(lemmatize(preprocess(s)))
# In[27]:
len(preproc_doc[0])
# In[34]:
dwords = gensim.corpora.Dictionary(preproc_doc)
# In[35]:
c = 0
for k,v in dwords.iteritems():
c+=1
print(k,v)
if c==10:
break
# In[36]:
dwords.filter_extremes(no_below=15, no_above=0.5, keep_n=100000)
# In[38]:
bow = [dwords.doc2bow(s) for s in preproc_doc]
# In[52]:
from gensim import corpora, models
tfidf = models.TfidfModel(bow)
corpus_tfidf = tfidf[bow]
from pprint import pprint
for doc in corpus_tfidf:
pprint(doc)
break
# In[54]:
lda_model_tfidf = gensim.models.LdaMulticore(corpus_tfidf, num_topics=20, id2word=dwords, passes=2, workers=4)
for idx, topic in lda_model_tfidf.print_topics(-1):
print('Topic: {} Word: {}'.format(idx, topic))
# In[57]:
bow_test = dwords.doc2bow(lemmatize(preprocess(newsgroups_test.data[0])))
for index, score in sorted(lda_model_tfidf[bow_test], key=lambda tup: -1*tup[1]):
print("Score: {}\t Topic: {}".format(score, lda_model_tfidf.print_topic(index, 10)))
# In[58]:
print(newsgroups_test.target[0])
# In[61]:
def format_topics_sentences(ldamodel, corpus, texts):
# Init output
sent_topics_df = pd.DataFrame()
# Get main topic in each document
for i, row_list in enumerate(ldamodel[corpus]):
row = row_list[0] if ldamodel.per_word_topics else row_list
# print(row)
row = sorted(row, key=lambda x: (x[1]), reverse=True)
# Get the Dominant topic, Perc Contribution and Keywords for each document
for j, (topic_num, prop_topic) in enumerate(row):
if j == 0: # => dominant topic
wp = ldamodel.show_topic(topic_num)
topic_keywords = ", ".join([word for word, prop in wp])
sent_topics_df = sent_topics_df.append(pd.Series([int(topic_num), round(prop_topic,4), topic_keywords]), ignore_index=True)
else:
break
sent_topics_df.columns = ['Dominant_Topic', 'Perc_Contribution', 'Topic_Keywords']
# Add original text to the end of the output
contents = pd.Series(texts)
sent_topics_df = | pd.concat([sent_topics_df, contents], axis=1) | pandas.concat |
"""
Prediction of GH7 subtypes (CBH/EG) with machine learning (ML)
"""
# Imports
#===========#
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import pydot_ng as pydot
from imblearn.under_sampling import RandomUnderSampler
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import KFold
from sklearn.tree import DecisionTreeClassifier
from sklearn.svm import SVC
from sklearn.neighbors import KNeighborsClassifier
from sklearn.linear_model import LogisticRegression
from sklearn import tree
from sklearn.externals.six import StringIO
import warnings
warnings.filterwarnings("ignore")
import bioinf
# Get lengths of loops from MSA
#================================================#
def get_gh7looplength(msafasta, trecel7a_pos=0):
''' Return a DataFrame of the number of residues in the 8 loops of GH7 sequences in
an MSA fasta file. TreCel7A is used as reference for determining the loop positions
in the MSA. The position of TreCel7A in the fasta file is trecel7a_pos (0 if first).
Loop lengths are in the order [A1, A2, A3, A4, B1, B2, B3, B4]. '''
# Loop residues in TreCel7A
loopres = ['QSAQK', 'TSSGVPAQVESQS', 'DYYAN', 'TNETSSTPGA',
'YDGNTW', 'PSSNNANT', 'GGTYSDNRYG', 'GGSS'] # Residues in the loops of TreCel7A
loopmore = ['NVGARLY', 'PNAKVTFSNIK', 'MLWLDST', 'VRGSCSTSSGVPA',
'SSTLCPD', 'GIGGHGSCCS', 'GTCDPDGCDWNP', 'FSDKGGL'] # Residues after the loops
# Get aligned sequences
[heads, sequences] = bioinf.split_fasta(msafasta) # Retrieve sequences from fasta file
trecel7a_seq_msa = sequences[trecel7a_pos]
trecel7a_nogaps = trecel7a_seq_msa.replace('-','')
trecel7a_list = list(trecel7a_seq_msa)
# Get loop positions in MSA (using TreCel7A as reference)
numb = -1
for k in range(len(trecel7a_list)):
if trecel7a_list[k].isalpha():
numb += 1
trecel7a_list[k] = str(numb)
startpos = [trecel7a_list.index(str(trecel7a_nogaps.index(loopres[i])))
for i in range(len(loopres))]
stoppos = [trecel7a_list.index(str(trecel7a_nogaps.index(loopmore[i])))
for i in range(len(loopmore))]
length = [stoppos[i] - startpos[i] for i in range(len(startpos))]
# Determine loop length
store = []
for i in range(len(sequences)):
seq = sequences[i]
loopregion = [seq[startpos[k]:stoppos[k]] for k in range(len(loopres))]
looplength = [length[k] - loopregion[k].count('-') for k in range(len(loopres))]
store.append(looplength)
# Save results as DataFrame
result = pd.DataFrame(store)
result.columns = ['A1', 'A2', 'A3', 'A4', 'B1', 'B2', 'B3', 'B4']
return result
# Calculate loop lengths
msafile = 'fasta/structure_based_alignment/cel7_nr99_structaln.fasta'
looplength = get_gh7looplength(msafile, trecel7a_pos=0)
# Write results to spreadhseet
looplength.index = range(1, len(looplength)+1)
looplength['accession'] = bioinf.get_accession(msafile)
looplength.to_csv('results_final/looplength.csv')
# Data preprocessing: prepare data for machine learning
#================================================================#
# Retreive data
looplength = pd.read_csv('results_final/looplength.csv', index_col=0)
subtype = pd.read_csv('results_final/cel7_subtypes.csv', index_col=0)
looplength.index = range(len(looplength))
subtype.index = range(len(subtype))
assert looplength.accession.equals(subtype.accession) # Ensure sequence positions are the same
# View the distribution to intuitively determine outliers
maxlength = [14, 20, 25, 16, 52, 141, 50, 14] # Values equal or greater than are outliers
topcode_vals = [] # Change the outlier values to top-coded values
for i in range(8):
sortedvals = sorted(looplength.iloc[:,i])
maxval = maxlength[i]
topcode_vals.append(sortedvals[sortedvals.index(maxval) - 1])
color = ['blue' if x<maxval else 'red' for x in sortedvals]
loop = looplength.columns[i]
plt.scatter(range(len(looplength)), sortedvals, color=color,
marker='.')
plt.xlabel('Index')
plt.ylabel('Length')
plt.title(loop)
plt.show()
plt.close()
# Deal with outliers
# Top-coding outliers before calculating Z-scores
X_grand = looplength.iloc[:,:-1]
for i in range(len(X_grand.columns)):
vals = list(X_grand.iloc[:,i])
vals = [x if x<maxlength[i] else topcode_vals[i] for x in vals]
X_grand.iloc[:,i] = pd.Series(vals)
# Standardize data (convert to Z-scores)
scx = StandardScaler()
X_grand = pd.DataFrame(scx.fit_transform(X_grand))
X_grand.columns = looplength.iloc[:,1:].columns
y_grand = pd.Series(list(subtype['ncbi_pred_class']), index=range(len(subtype)))
# Apply machine learning to predict subtypes from loop lengths
#==============================================================================#
def get_classifier(clf_type, depth=1):
'''Return an instance of the specified classifier.'''
if clf_type == 'dec':
classifier = DecisionTreeClassifier(criterion='entropy', max_depth=depth)
elif clf_type == 'svm':
classifier = SVC(kernel='rbf')
elif clf_type == 'knn':
classifier = KNeighborsClassifier(n_neighbors=10)
elif clf_type == 'log':
classifier = LogisticRegression()
return classifier
def apply_ML(X_grand, y_grand, clf_type, monte_count=100):
'''Apply ML to predict subtypes from loop lengths.
Return a tuple of dataframes of performance results,
(sensitivity, specificity, accuracy, MCC).'''
# Empty lists for storing final results
sens_final, spec_final, acc_final, mcc_final = [], [], [], []
# Monte Carlo loop
for i in range(monte_count):
RUS = RandomUnderSampler(random_state=None)
X_select, y_select = RUS.fit_resample(X_grand, y_grand)
X_select, y_select = pd.DataFrame(X_select), pd.Series(y_select)
# K-fold cross validation
kf = KFold(n_splits=5, shuffle=True, random_state=None)
kf_indices = kf.split(X_select)
for train_index, test_index in kf_indices:
X_train, y_train = X_select.iloc[train_index,:], y_select.iloc[train_index]
X_test, y_test = X_select.iloc[test_index,:], y_select.iloc[test_index]
# Empty lists for storing kfold cross validation results
sens_store, spec_store, acc_store, mcc_store = [], [], [], []
# Single-feature loop (Train classifier using single feature independently)
for j in range(8):
# Get classifier and fit to training data
classifier = get_classifier(clf_type)
classifier.fit(pd.DataFrame(X_train.iloc[:,j]), y_train)
# Test classifier on test set
y_pred = classifier.predict(pd.DataFrame(X_test.iloc[:,j]))
# Evaluate performance
cm = confusion_matrix(y_test, y_pred)
tn, tp, fn, fp = cm[0][0], cm[1][1], cm[1][0], cm[0][1]
n = tp + fp + tn + fn
accuracy = (tp + tn)/n * 100
mcc = ((tp*tn) - (fp*fn))/np.sqrt((tp+fp)*(tn+fn)*(tp+fp)*(tn+fp))
sens = tp/(tp + fn) * 100 if tp + fp != 0 else 0
spec = tn/(tn + fp) * 100 if tn + fn != 0 else 0
# Save results
sens_store.append(sens)
spec_store.append(spec)
acc_store.append(accuracy)
mcc_store.append(mcc)
# Multiple-features (Train classifier on all features)
classifier = get_classifier(clf_type, depth=8)
classifier.fit(X_train, y_train)
y_pred = classifier.predict(pd.DataFrame(X_test))
# Evaluate performance
cm = confusion_matrix(y_test, y_pred)
tn, tp, fn, fp = cm[0][0], cm[1][1], cm[1][0], cm[0][1]
n = tp + fp + tn + fn
accuracy = (tp + tn)/n * 100
mcc = ((tp*tn) - (fp*fn))/np.sqrt((tp+fp)*(tn+fn)*(tp+fp)*(tn+fp))
sens = tp/(tp + fn) * 100 if tp + fp != 0 else 0
spec = tn/(tn + fp) * 100 if tn + fn != 0 else 0
# Save results
sens_store.append(sens)
spec_store.append(spec)
acc_store.append(accuracy)
mcc_store.append(mcc)
# Save all results to final store
sens_final.append(sens_store)
spec_final.append(spec_store)
acc_final.append(acc_store)
mcc_final.append(mcc_store)
sens_final = pd.DataFrame(sens_final)
spec_final = pd.DataFrame(spec_final)
acc_final = | pd.DataFrame(acc_final) | pandas.DataFrame |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
Date: 2022/3/5 12:08
Desc: 金十数据-数据中心-主要机构-宏观经济
https://datacenter.jin10.com/
"""
import json
import time
import math
import pandas as pd
import requests
from tqdm import tqdm
from akshare.economic.cons import (
JS_CONS_GOLD_ETF_URL,
JS_CONS_SLIVER_ETF_URL,
JS_CONS_OPEC_URL,
)
def macro_cons_gold_volume() -> pd.Series:
"""
全球最大黄金 ETF—SPDR Gold Trust 持仓报告, 数据区间从 20041118-至今
https://datacenter.jin10.com/reportType/dc_etf_gold
:return: 持仓报告
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
JS_CONS_GOLD_ETF_URL.format(
str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)
)
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["黄金"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["总库存(吨)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "etf",
"attr_id": "1",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", keep="last", inplace=True)
temp_df.set_index("index", inplace=True)
for_times = math.ceil(int(str((temp_df.index[-1] - value_df.index[-1])).split(' ')[0]) / 20)
big_df = temp_df
big_df.columns = ['总库存']
for i in tqdm(range(for_times), leave=False):
params = {
"max_date": temp_df.index[-1],
"category": "etf",
"attr_id": "1",
"_": str(int(round(t * 1000))),
}
r = requests.get(url, params=params, headers=headers)
temp_df = pd.DataFrame(r.json()["data"]["values"])
temp_df.index = pd.to_datetime(temp_df.iloc[:, 0])
temp_df = temp_df.iloc[:, 1:]
temp_df.columns = [item["name"] for item in r.json()["data"]["keys"]][1:]
big_df = big_df.append(temp_df.iloc[:, [0]])
big_df.dropna(inplace=True)
big_df.sort_index(inplace=True)
big_df = big_df.reset_index()
big_df.drop_duplicates(subset="index", keep="last", inplace=True)
big_df.set_index("index", inplace=True)
big_df = big_df.squeeze()
big_df.index.name = None
big_df.name = "gold_volume"
big_df = big_df.astype(float)
return big_df
def macro_cons_gold_change():
"""
全球最大黄金 ETF—SPDR Gold Trust 持仓报告, 数据区间从 20041118-至今
:return: pandas.Series
"""
t = time.time()
res = requests.get(
JS_CONS_GOLD_ETF_URL.format(
str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)
)
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["黄金"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["增持/减持(吨)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "etf",
"attr_id": "1",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, [0, 2]]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", keep="last", inplace=True)
temp_df.set_index("index", inplace=True)
for_times = math.ceil(int(str((temp_df.index[-1] - value_df.index[-1])).split(' ')[0]) / 20)
big_df = temp_df
big_df.columns = ['增持/减持']
for i in tqdm(range(for_times), leave=False):
params = {
"max_date": temp_df.index[-1],
"category": "etf",
"attr_id": "1",
"_": str(int(round(t * 1000))),
}
r = requests.get(url, params=params, headers=headers)
temp_df = pd.DataFrame(r.json()["data"]["values"])
temp_df.index = pd.to_datetime(temp_df.iloc[:, 0])
temp_df = temp_df.iloc[:, 1:]
temp_df.columns = [item["name"] for item in r.json()["data"]["keys"]][1:]
big_df = big_df.append(temp_df.iloc[:, [1]])
big_df.dropna(inplace=True)
big_df.sort_index(inplace=True)
big_df = big_df.reset_index()
big_df.drop_duplicates(subset="index", keep="last", inplace=True)
big_df.set_index("index", inplace=True)
big_df = big_df.squeeze()
big_df.index.name = None
big_df.name = "gold_change"
big_df = big_df.astype(float)
return big_df
def macro_cons_gold_amount():
"""
全球最大黄金 ETF—SPDR Gold Trust 持仓报告, 数据区间从 20041118-至今
:return: pandas.Series
"""
t = time.time()
res = requests.get(
JS_CONS_GOLD_ETF_URL.format(
str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)
)
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["黄金"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["总价值(美元)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "etf",
"attr_id": "1",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, [0, 3]]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", keep="last", inplace=True)
temp_df.set_index("index", inplace=True)
for_times = math.ceil(int(str((temp_df.index[-1] - value_df.index[-1])).split(' ')[0]) / 20)
big_df = temp_df
big_df.columns = ['总价值']
for i in tqdm(range(for_times), leave=False):
params = {
"max_date": temp_df.index[-1],
"category": "etf",
"attr_id": "1",
"_": str(int(round(t * 1000))),
}
r = requests.get(url, params=params, headers=headers)
temp_df = pd.DataFrame(r.json()["data"]["values"])
temp_df.index = pd.to_datetime(temp_df.iloc[:, 0])
temp_df = temp_df.iloc[:, 1:]
temp_df.columns = [item["name"] for item in r.json()["data"]["keys"]][1:]
big_df = big_df.append(temp_df.iloc[:, [2]])
big_df.dropna(inplace=True)
big_df.sort_index(inplace=True)
big_df = big_df.reset_index()
big_df.drop_duplicates(subset="index", keep="last", inplace=True)
big_df.set_index("index", inplace=True)
big_df = big_df.squeeze()
big_df.index.name = None
big_df.name = "gold_change"
big_df = big_df.astype(float)
return big_df
def macro_cons_silver_volume():
"""
全球最大白银 ETF--iShares Silver Trust 持仓报告, 数据区间从 20060429-至今
:return: pandas.Series
"""
t = time.time()
res = requests.get(
JS_CONS_SLIVER_ETF_URL.format(
str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)
)
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["白银"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["总库存(吨)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "etf",
"attr_id": "2",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, [0, 1]]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", keep="last", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "silver_volume"
url = "https://cdn.jin10.com/data_center/reports/etf_2.json"
r = requests.get(url)
data_json = r.json()
append_temp_df = pd.DataFrame(data_json["values"]).T
append_temp_df.columns = [item["name"] for item in data_json["keys"]]
temp_append_df = append_temp_df["总库存"]
temp_append_df.name = "silver_volume"
temp_df = temp_df.reset_index()
temp_df["index"] = temp_df["index"].astype(str)
temp_df = temp_df.append(temp_append_df.reset_index())
temp_df.drop_duplicates(subset=["index"], keep="last", inplace=True)
temp_df.index = pd.to_datetime(temp_df["index"])
del temp_df["index"]
temp_df = temp_df[temp_df != 'Show All']
temp_df.sort_index(inplace=True)
temp_df = temp_df.astype(float)
return temp_df
def macro_cons_silver_change():
"""
全球最大白银ETF--iShares Silver Trust持仓报告, 数据区间从20060429-至今
:return: pandas.Series
2006-04-29 0
2006-05-02 0.00
2006-05-03 342.11
2006-05-04 202.15
2006-05-05 108.86
...
2019-10-17 -58.16
2019-10-18 0.00
2019-10-21 -34.89
2019-10-22 -61.06
2019-10-23 0.00
"""
t = time.time()
res = requests.get(
JS_CONS_SLIVER_ETF_URL.format(
str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)
)
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["白银"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["增持/减持(吨)"]
temp_df.name = "silver_change"
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "etf",
"attr_id": "2",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, [0, 2]]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", keep="last", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "silver_change"
url = "https://cdn.jin10.com/data_center/reports/etf_2.json"
r = requests.get(url)
data_json = r.json()
append_temp_df = pd.DataFrame(data_json["values"]).T
append_temp_df.columns = [item["name"] for item in data_json["keys"]]
temp_append_df = append_temp_df["增持/减持"]
temp_append_df.name = "silver_change"
temp_df = temp_df.reset_index()
temp_df["index"] = temp_df["index"].astype(str)
temp_df = temp_df.append(temp_append_df.reset_index())
temp_df.drop_duplicates(subset=["index"], keep="last", inplace=True)
temp_df.index = pd.to_datetime(temp_df["index"])
del temp_df["index"]
temp_df = temp_df[temp_df != 'Show All']
temp_df.sort_index(inplace=True)
temp_df = temp_df.astype(float)
return temp_df
def macro_cons_silver_amount():
"""
全球最大白银ETF--iShares Silver Trust持仓报告, 数据区间从20060429-至今
:return: pandas.Series
2006-04-29 263651152
2006-05-02 263651152
2006-05-03 445408550
2006-05-04 555123947
2006-05-05 574713264
...
2019-10-17 Show All
2019-10-18 Show All
2019-10-21 Show All
2019-10-22 Show All
2019-10-23 Show All
"""
t = time.time()
res = requests.get(
JS_CONS_SLIVER_ETF_URL.format(
str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)
)
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["白银"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = | pd.to_datetime(date_list) | pandas.to_datetime |
# Initialize Earth Engine Python API.
import ee
#ee.Authenticate()
ee.Initialize()
# For data manipulation and analysis.
import math
import pandas as pd
import numpy as np
np.set_printoptions(precision=4, suppress=True)
import scipy.signal
# For plotting
import matplotlib.pyplot as plt
from matplotlib import gridspec
def MakeGrid(geometry, scale):
"""Takes a polygon and creates a grid of polygons inside the shape.
Adapted from: https://developers.google.com/earth-engine/tutorials/community/drawing-tools
Keyword arguments:
geometry -- Earth Engine geometry object
scale -- desired spacing of grid
"""
# pixelLonLat returns an image with each pixel labeled with longitude and
# latitude values.
lonLat = ee.Image.pixelLonLat()
# Select the longitude and latitude bands, multiply by a large number then
# truncate them to integers.
lonGrid = lonLat.select('longitude').multiply(10000000).toInt()
latGrid = lonLat.select('latitude').multiply(10000000).toInt()
# To produce the grid, multiply the latitude and longitude images and then use
# reduce to vectors at the 10km resolution to group the grid into vectors.
return lonGrid.multiply(latGrid).reduceToVectors(geometry = geometry, scale = scale, geometryType = 'polygon',)
def Add_ID_to_Features(dataset):
"""Gives a unique ID number to each feature in a feature collection, as a new property.
Adapted from: https://gis.stackexchange.com/questions/374137/add-an-incremential-number-to-each-feature-in-a-featurecollection-in-gee
Keyword argument:
dataset -- Earth Engine feature collection
"""
indexes = ee.List(dataset.aggregate_array('system:index'))
feat_ids = ee.List.sequence(1, indexes.size())
idByIndex = ee.Dictionary.fromLists(indexes, feat_ids)
def function(feature):
"""Adds the ID number to the feature."""
return feature.set('ID', idByIndex.get(feature.get('system:index')))
# Map the function over the dataset.
return dataset.map(function)
def Get_BeforeAfter_Imagery(out_dir, ID, event_time, region, sizeWindows):
"""Obtain cloud-masked Sentinel-2 imagery before/after a given date.
Keyword arguments:
ID -- unique integer identifier of polygon representing landslide detection area
event_time -- mean date of detection window (input format)
region -- bounding box of area of interest (input format)
sizeWindows -- duration of the detection window in days
"""
def MaskS2clouds(image):
"""Filter and mask clouds for Sentinel-2 optical data
From: https://developers.google.com/earth-engine/datasets/catalog/COPERNICUS_S2
"""
qa = image.select('QA60')
#Bits 10 and 11 are clouds and cirrus, respectively.
cloudBitMask = 1 << 10
cirrusBitMask = 1 << 11
# Both flags should be set to zero, indicating clear conditions.
mask = (qa.bitwiseAnd(cloudBitMask).eq(0)
.And(qa.bitwiseAnd(cirrusBitMask).eq(0))
)
return image.updateMask(mask).divide(10000)
str_pre = "{}_{}_pre".format(ID, str(event_time)[ 0 : 10 ])
str_post = "{}_{}_post".format(ID, str(event_time)[ 0 : 10 ])
event_time_ee = ee.Date(event_time)
# Pre-window time period for pre-event S2 image collection.
preWindow_T1 = event_time_ee.advance(-sizeWindows - 30, 'day')
preWindow_T2 = event_time_ee.advance(-sizeWindows, 'day')
# Post-window time period for post-event S2 image collection.
postWindow_T1 = event_time_ee.advance(sizeWindows, 'day')
postWindow_T2 = event_time_ee.advance(sizeWindows + 30, 'day')
optical_pre = (ee.ImageCollection('COPERNICUS/S2')
.filterDate(preWindow_T1, preWindow_T2)
.filter(ee.Filter.lt('CLOUDY_PIXEL_PERCENTAGE', 10))
.filterBounds(region)
.map(MaskS2clouds)
.median()
)
optical_post = (ee.ImageCollection('COPERNICUS/S2')
.filterDate(postWindow_T1, postWindow_T2)
.filter(ee.Filter.lt('CLOUDY_PIXEL_PERCENTAGE', 10))
.filterBounds(region)
.map(MaskS2clouds)
.median()
)
exportRegion = ee.Geometry.Polygon(region.filter(ee.Filter.eq('ID', ID)).first().getInfo()['geometry']['coordinates'])
exportImgPre = ee.batch.Export.image.toDrive(image = optical_pre,
folder = out_dir,
description = str_pre,
region = exportRegion,
scale = 10,
maxPixels = 1e9,
fileFormat = 'GeoTIFF')
exportImgPost = ee.batch.Export.image.toDrive(image = optical_post,
folder = out_dir,
description = str_post,
region = exportRegion,
scale = 10,
maxPixels = 1e9,
fileFormat = 'GeoTIFF')
exportImgPre.start()
exportImgPost.start()
def Get_BeforeAfter_NDVI(ID, event_time, region, sizeWindows):
"""Obtain cloud-masked median NDVI before/after a given date.
Keyword arguments:
ID -- unique integer identifier of polygon representing landslide detection area
event_time -- mean date of detection window (input format)
region -- bounding box of area of interest (input format)
sizeWindows -- duration of the detection window in days
"""
def AddNDVI(image):
"""Adds an NDVI band to a Sentinel-2 image. NDVI is calculated as
the normalized difference between the near-infrared band and the red band,
which correspond to the 8th and 4th band in the Sentinel-2 imagery.
From: https://developers.google.com/earth-engine/tutorials/tutorial_api_06
"""
ndvi = image.normalizedDifference(['B8', 'B4']).rename('NDVI')
return image.addBands(ndvi)
# Function to filter and mask clouds for Sentinel-2 optical data
def MaskS2clouds(image):
"""Filter and mask clouds for Sentinel-2 optical data
From: https://developers.google.com/earth-engine/datasets/catalog/COPERNICUS_S2
"""
qa = image.select('QA60')
#Bits 10 and 11 are clouds and cirrus, respectively.
cloudBitMask = 1 << 10
cirrusBitMask = 1 << 11
# Both flags should be set to zero, indicating clear conditions.
mask = (qa.bitwiseAnd(cloudBitMask).eq(0)
.And(qa.bitwiseAnd(cirrusBitMask).eq(0))
)
return image.updateMask(mask).divide(10000)
event_time_ee = ee.Date(event_time)
# Define pre-event time period, for pre-event NDVI image.
preWindow_T1 = event_time_ee.advance(-sizeWindows - 30, 'day')
preWindow_T2 = event_time_ee.advance(-sizeWindows, 'day')
# Define post-event time period, for post-event NDVI image
postWindow_T1 = event_time_ee.advance(sizeWindows, 'day')
postWindow_T2 = event_time_ee.advance(sizeWindows + 30, 'day')
# Get Sentinel 2 surface reflectance imagery before and after the window.
s2_sr_before = ee.ImageCollection('COPERNICUS/S2').filterDate(preWindow_T1, preWindow_T2).filterBounds(region)
s2_sr_after = ee.ImageCollection('COPERNICUS/S2').filterDate(postWindow_T1, postWindow_T2).filterBounds(region)
# Apply the cloud masking function to the before/after image collections.
s2_sr_before = s2_sr_before.map(MaskS2clouds)
s2_sr_after = s2_sr_after.map(MaskS2clouds)
# Apply the NDVI function to the before/after image collections.
s2_ndvi_before = s2_sr_before.map(AddNDVI)
s2_ndvi_after = s2_sr_after.map(AddNDVI)
# Find median of the images in the pre-event image collection to get a pre-event image.
pre_event_NDVI_img = s2_ndvi_before.select('NDVI').median()
# Find median of the images in the post-event image collection, to get a post-event image.
post_event_NDVI_img = s2_ndvi_after.select('NDVI').median()
# Get the average NDVI over the area
pre_NDVI = pre_event_NDVI_img.reduceRegion(
geometry = region.filter(ee.Filter.eq('ID', ID)),
reducer = ee.Reducer.mean(),
scale = 10,
bestEffort = True,
maxPixels = 1e9
)
post_NDVI = post_event_NDVI_img.reduceRegion(
geometry = region.filter(ee.Filter.eq('ID', ID)),
reducer = ee.Reducer.mean(),
scale = 10,
bestEffort = True,
maxPixels = 1e9
)
return pre_NDVI, post_NDVI
# Output Drive folder for CSVs and GeoTiffs.
out_dir = 'EE_SAR_MovingWindow_ChangeDetection_HolyFire'
# Define duration of window.
sizeWindows = 7 # days
# Area of interest to make a grid over.
ee_import = ee.FeatureCollection("users/bvoelker/Wildfires/Holy_Fire_2018_08_06_Boundary")
#ee_import = ee.FeatureCollection("users/bvoelker/hiroshima_landslide_subset2")
aoi_bounding_box = ee_import.geometry().bounds();
# The polygons to check for changes in will be a regularly spaced grid.
grid_size = 500 # 500m scale
grid = MakeGrid(aoi_bounding_box, grid_size)
grid = Add_ID_to_Features(grid)
# Number of cells in grid.
numPolys = grid.size().getInfo()
# Load in the CSVs:
# Contains many irrelevant columns. The needed columns are the shapefile
# joinkeys and the intensity ratio data of each polygon.
data_sum_raw = | pd.read_csv('HF_CA_99th_Ptile_sum.csv') | pandas.read_csv |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
Date: 2022/2/14 18:19
Desc: 新浪财经-股票期权
https://stock.finance.sina.com.cn/option/quotes.html
期权-中金所-沪深 300 指数
https://stock.finance.sina.com.cn/futures/view/optionsCffexDP.php
期权-上交所-50ETF
期权-上交所-300ETF
https://stock.finance.sina.com.cn/option/quotes.html
"""
import json
import datetime
from typing import Dict, List, Tuple
import requests
from bs4 import BeautifulSoup
import pandas as pd
# 期权-中金所-沪深300指数
def option_cffex_hs300_list_sina() -> Dict[str, List[str]]:
"""
新浪财经-中金所-沪深300指数-所有合约, 返回的第一个合约为主力合约
目前新浪财经-中金所只有 沪深300指数 一个品种的数据
:return: 中金所-沪深300指数-所有合约
:rtype: dict
"""
url = "https://stock.finance.sina.com.cn/futures/view/optionsCffexDP.php"
r = requests.get(url)
soup = BeautifulSoup(r.text, "lxml")
symbol = soup.find(attrs={"id": "option_symbol"}).find("li").text
temp_attr = soup.find(attrs={"id": "option_suffix"}).find_all("li")
contract = [item.text for item in temp_attr]
return {symbol: contract}
def option_cffex_hs300_spot_sina(symbol: str = "io2104") -> pd.DataFrame:
"""
中金所-沪深300指数-指定合约-实时行情
https://stock.finance.sina.com.cn/futures/view/optionsCffexDP.php
:param symbol: 合约代码; 用 option_cffex_hs300_list_sina 函数查看
:type symbol: str
:return: 中金所-沪深300指数-指定合约-看涨看跌实时行情
:rtype: pd.DataFrame
"""
url = "https://stock.finance.sina.com.cn/futures/api/openapi.php/OptionService.getOptionData"
params = {
"type": "futures",
"product": "io",
"exchange": "cffex",
"pinzhong": symbol,
}
r = requests.get(url, params=params)
data_text = r.text
data_json = json.loads(data_text[data_text.find("{") : data_text.rfind("}") + 1])
option_call_df = pd.DataFrame(
data_json["result"]["data"]["up"],
columns=[
"看涨合约-买量",
"看涨合约-买价",
"看涨合约-最新价",
"看涨合约-卖价",
"看涨合约-卖量",
"看涨合约-持仓量",
"看涨合约-涨跌",
"行权价",
"看涨合约-标识",
],
)
option_put_df = pd.DataFrame(
data_json["result"]["data"]["down"],
columns=[
"看跌合约-买量",
"看跌合约-买价",
"看跌合约-最新价",
"看跌合约-卖价",
"看跌合约-卖量",
"看跌合约-持仓量",
"看跌合约-涨跌",
"看跌合约-标识",
],
)
data_df = pd.concat([option_call_df, option_put_df], axis=1)
data_df['看涨合约-买量'] = pd.to_numeric(data_df['看涨合约-买量'])
data_df['看涨合约-买价'] = pd.to_numeric(data_df['看涨合约-买价'])
data_df['看涨合约-最新价'] = pd.to_numeric(data_df['看涨合约-最新价'])
data_df['看涨合约-卖价'] = pd.to_numeric(data_df['看涨合约-卖价'])
data_df['看涨合约-卖量'] = pd.to_numeri | c(data_df['看涨合约-卖量']) | pandas.to_numeric |
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.optim as optim
import torch.utils.data as Data # 里面有minibatch实现所需要的DataLoader
from torch.autograd import Variable
from sklearn.utils import shuffle
from argparse import ArgumentParser, FileType, ArgumentDefaultsHelpFormatter
import sys
import numpy as np
from sklearn import preprocessing
from BiNE_graph_utils import GraphUtils
import random
import math
import os
import pandas as pd
from sklearn import metrics
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import average_precision_score, auc, precision_recall_fscore_support
from copy import deepcopy
from data import *
from prediction import *
from evaluation import *
from UBCF_implicit import *
from UBCF_explicit import *
from PhysRec import *
from FunkSVD import *
from PMF import *
from SAE import *
from NCF import *
from FM2 import *
from BiNE_graph import *
from BiNE_graph_utils import *
from BiNE_lsh import *
from TransE import *
from TryOne import *
# import surprise # 一个专门的 recommender system 包
# import xlearn # 一个专门的 FM 家族的包
def OverallAverage_main():
total_rating = 0.0
count_train = 0.0
for row in train_dataset.iloc():
total_rating += row['rating']
count_train += 1.0
overall_avg = total_rating/count_train
total_MAE = 0.0
total_RMSE = 0.0
count_test = 0.0
for row in test_dataset.iloc():
total_MAE += abs(row['rating'] - overall_avg)
total_RMSE += (row['rating'] - overall_avg)**2
count_test += 1.0
MAE = total_MAE / count_test
RMSE = math.sqrt(total_MAE / count_test)
print('MAE:', MAE, 'RMSE:', RMSE)
def UBCF_explicit_main(K):
train_dataset_rating_matrix = data_ratings.GET_TRAIN_RATING_MATRIX(train_dataset)
ubcf = UBCF_explicit("pearsonr" , train_dataset_rating_matrix, K)
similarity_matrix = ubcf.similarity_matrix()
similarity_matrix[similarity_matrix < 0] = 0 # 因为后面计算评分时,分母必须是相似性绝对值的和(注意,分子的相对程度不是奥!),所以这里要么转换为绝对值,要么将负的视为0(源代码用的后面这个方法)
estimated_rating_matrix = ubcf.prediction(similarity_matrix) # 这里的estimated_rating_matrix还没有进行-9999处理
for row in train_dataset.iloc():
estimated_rating_matrix.loc[row['user'], row['item']] = -9999
valid_items = estimated_rating_matrix.columns
all_recommendation_list = {}
for i in range(estimated_rating_matrix.shape[0]):
row = estimated_rating_matrix.iloc[i]
user_id = row.name
items = row.sort_values(ascending=False).index
all_recommendation_list[user_id] = items
prediction = PREDICTION(train_dataset)
for recommendation_size in range(10, 60, 10):
recommendation_list = prediction.GET_RECOMMENDATION_LIST(recommendation_size, all_recommendation_list)
evaluation = EVALUATION(estimated_rating_matrix, recommendation_size, recommendation_list, train_dataset, valid_items, test_dataset)
Precision, HR, NDCG, MAE, RMSE = evaluation.MERICS_01()
print(Precision, '\t',HR, '\t', NDCG, '\t', MAE, '\t', RMSE)
del recommendation_list
def IBCF_explicit_main(K):
train_dataset_rating_matrix = data_ratings.GET_TRAIN_RATING_MATRIX(train_dataset)
train_dataset_rating_matrix_T = | pd.DataFrame(train_dataset_rating_matrix.values.T,index=train_dataset_rating_matrix.columns.values,columns=train_dataset_rating_matrix.index.values) | pandas.DataFrame |
import os
import pickle
import time
import pandas as pd
from sklearn.base import BaseEstimator, TransformerMixin
class ColumnSelector(BaseEstimator, TransformerMixin):
def __init__(self, columns):
self.columns = columns
def fit(self, X, y=None):
return self
def transform(self, X):
assert isinstance(X, pd.DataFrame)
try:
return X[self.columns]
except KeyError:
cols_error = list(set(self.columns) - set(X.columns))
raise KeyError(
f'DataFrame does not contain the following columns: {cols_error}')
class AddFeatures(BaseEstimator, TransformerMixin):
def __init__(self, features, silent=True):
self.features = features
self.silent = silent
def fit(self, X, y=None):
return self
def transform(self, X):
if not self.silent:
start_t = time.time()
print('Start adding features'.center(100, '*'))
assert isinstance(X, pd.DataFrame), 'This is not a pandas dataframe'
X_features = self.features.loc[self.features.index.isin(
X.index.unique())]
X_features = X_features.sort_values('buy_time') \
.groupby('id').last()
X_merge = X.reset_index() \
.merge(X_features.reset_index(), on=X.index.name, how='left', suffixes=('_train', '_features')) \
.set_index(X.index.name)
assert X_merge.shape[0] == X.shape[
0], f'Shapes of dataframe don\'t match: {X_merge.shape[0]} and {X.shape[0]}'
assert (X_merge.index == X.index).all(), 'Index Sort Error'
if not self.silent:
print(
f'End adding features, run time: {time_format(time.time()-start_t)}'.center(100, '*'))
print()
return X_merge
class MemUseOptimizing(BaseEstimator, TransformerMixin):
def __init__(self, silent=True):
self.silent = silent
def fit(self, X, y=None):
return self
def transform(self, X):
start_t = time.time()
assert isinstance(X, pd.DataFrame), 'This is not a pandas dataframe'
if not self.silent:
print('Start of dataframe memory use optimizing'.center(100, '*'))
start_memory_usage = X.memory_usage(deep=True).sum() / 1024**2
X_dtype = pd.DataFrame(
X.dtypes, columns=['dtype'], index=X.columns)
X_dtype['min'] = X.select_dtypes(['int', 'float']).min()
X_dtype['max'] = X.select_dtypes(['int', 'float']).max()
X_dtype['is_int'] = ~(X.select_dtypes(['int', 'float']).astype(
int).sum() - X.select_dtypes(['int', 'float']).sum()).astype('bool_')
X_dtype.loc[(X_dtype['is_int'] == True), 'dtype'] = 'int64'
X_dtype.loc[(X_dtype['is_int'] == True) & (X_dtype['min'] >= np.iinfo(
'int32').min) & (X_dtype['max'] <= np.iinfo('int32').max), 'dtype'] = 'int32'
X_dtype.loc[(X_dtype['is_int'] == True) & (X_dtype['min'] >= np.iinfo(
'int16').min) & (X_dtype['max'] <= np.iinfo('int16').max), 'dtype'] = 'int16'
X_dtype.loc[(X_dtype['is_int'] == True) & (X_dtype['min'] >= np.iinfo(
'int8').min) & (X_dtype['max'] <= np.iinfo('int8').max), 'dtype'] = 'int8'
X_dtype.loc[(X_dtype['is_int'] == True) & (
X_dtype['min'] >= np.iinfo('uint64').min), 'dtype'] = 'uint64'
X_dtype.loc[(X_dtype['is_int'] == True) & (X_dtype['min'] >= np.iinfo(
'uint32').min) & (X_dtype['max'] <= np.iinfo('uint32').max), 'dtype'] = 'uint32'
X_dtype.loc[(X_dtype['is_int'] == True) & (X_dtype['min'] >= np.iinfo(
'uint16').min) & (X_dtype['max'] <= np.iinfo('uint16').max), 'dtype'] = 'uint16'
X_dtype.loc[(X_dtype['is_int'] == True) & (X_dtype['min'] >= np.iinfo(
'uint8').min) & (X_dtype['max'] <= np.iinfo('uint8').max), 'dtype'] = 'uint8'
X_dtype.loc[(X_dtype['is_int'] == True) & (X_dtype['min'] == 0) & (
X_dtype['max'] == 1), 'dtype'] = 'bool_'
X_dtype.loc[(X_dtype['is_int'] == False), 'dtype'] = 'float64'
X_dtype.loc[(X_dtype['is_int'] == False) & (X_dtype['min'] >= np.finfo(
'float32').min) & (X_dtype['max'] <= np.finfo('float32').max), 'dtype'] = 'float32'
X_dtype.loc[(X_dtype['is_int'] == False) & (X_dtype['min'] >= np.finfo(
'float16').min) & (X_dtype['max'] <= np.finfo('float16').max), 'dtype'] = 'float16'
for col in X.select_dtypes('object').columns:
num_unique_values = len(X[col].unique())
num_total_values = len(X[col])
if num_unique_values / num_total_values < 0.5:
X_dtype.loc[col, 'dtype'] = 'category'
dtype = X_dtype['dtype'].to_dict()
X = X.astype(dtype)
if not self.silent:
memory_usage = X.memory_usage(deep=True).sum() / 1024**2
print('Memory use optimizing'.center(100, '*'))
print(
f'Memory usage of properties dataframe before optimizing: {start_memory_usage:.02f} MB')
print(
f'Memory usage of properties dataframe after optimizing: {memory_usage:.02f} MB')
print(
f'This is {100*memory_usage/start_memory_usage:.02f} % of the initial size')
print(
f'End of dataframe memory use optimizing, run time: {time_format(time.time()-start_t)}'.center(64, '*'))
print()
return X
class GetDate(BaseEstimator, TransformerMixin):
def __init__(self, silent=True):
self.silent = silent
def fit(self, X, y=None):
return self
def transform(self, X):
if not self.silent:
start_t = time.time()
print('Start geting date from timestamp'.center(100, '*'))
if isinstance(X, pd.Series):
X = pd.DataFrame(X)
assert isinstance(
X, pd.DataFrame), 'This is not a pandas dataframe or series'
df = | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
bad_rate_1 = pd.read_csv("E:/mojing/train_val/bad_rate_1.csv")
bad_rate_3 = pd.read_csv("E:/mojing/train_val/bad_rate_3.csv")
bad_rate_5 = pd.read_csv("E:/mojing/train_val/bad_rate_5.csv")
bad_rate_7 = pd.read_csv("E:/mojing/train_val/bad_rate_7.csv")
bad_rate_9 = pd.read_csv("E:/mojing/train_val/bad_rate_9.csv")
bad_rate_11 = pd.read_csv("E:/mojing/train_val/bad_rate_11.csv")
dis_f = pd.read_csv("E:/mojing/adding_feature.csv")
bad_rate = pd.merge(bad_rate_5, bad_rate_7, on="Idx")
bad_rate = pd.merge(bad_rate, bad_rate_9, on="Idx")
bad_rate = pd.merge(bad_rate, bad_rate_11, on="Idx")
bad_rate = pd.merge(bad_rate, bad_rate_1, on="Idx")
bad_rate = | pd.merge(bad_rate, bad_rate_3, on="Idx") | pandas.merge |
import numpy as np
import pandas as pd
import pytest
import torch
from greattunes.data_format_mappings import pretty2tensor_covariate, tensor2pretty_covariate, \
pretty2tensor_response, tensor2pretty_response
@pytest.mark.parametrize(
"x_pandas, x_torch_return",
[
[pd.DataFrame({'a': [2], 'b': [3.2], 'c': ['blue']}), torch.tensor([[2.0, 3.2, 0.0, 0.0, 1.0]], dtype=torch.double, device=torch.device("cuda" if torch.cuda.is_available() else "cpu"))],
[pd.DataFrame({'a': [2, -1], 'b': [3.2, 1.7], 'c': ['blue', 'red']}), torch.tensor([[2.0, 3.2, 0.0, 0.0, 1.0], [-1.0, 1.7, 1.0, 0.0, 0.0]], dtype=torch.double, device=torch.device("cuda" if torch.cuda.is_available() else "cpu"))]
]
)
def test_pretty2tensor_covariate_works(covar_details_covar_mapped_names, x_pandas, x_torch_return):
"""
test that pretty2tensor_covariate works for single and multiple observations
"""
# get covariate details
covar_details = covar_details_covar_mapped_names[0]
covar_mapped_names = covar_details_covar_mapped_names[1]
# the device for the method (for torch-stuff)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# run the method
x_torch, x_numpy = pretty2tensor_covariate(x_pandas, covar_details, covar_mapped_names, device=device)
# check result by checking x_torch_return
x_torch_check = (x_torch == x_torch_return)
for j in range(x_torch_check.size()[0]):
for i in range(x_torch_check.size()[1]):
assert x_torch_check[j, i].item()
# check numpy results
x_numpy_check = (x_numpy == x_torch_return.numpy())
for j in range(x_numpy_check.shape[0]):
for i in range(x_numpy_check.shape[1]):
assert x_numpy_check[j, i]
@pytest.mark.parametrize(
"x_pandas, x_torch_return",
[
[pd.DataFrame({'a': [2], 'b': [3.2], 'd': [12]}), torch.tensor([[2.0, 3.2, 0.0, 0.0, 0.0]], dtype=torch.double, device=torch.device("cuda" if torch.cuda.is_available() else "cpu"))],
[pd.DataFrame({'a': [2, -1], 'b': [3.2, 1.7], 'd': [12, 15]}), torch.tensor([[2.0, 3.2, 0.0, 0.0, 0.0], [-1.0, 1.7, 0.0, 0.0, 0.0]], dtype=torch.double, device=torch.device("cuda" if torch.cuda.is_available() else "cpu"))]
]
)
def test_pretty2tensor_covariate_adds_missing_columns_removes_extra_columns(covar_details_covar_mapped_names, x_pandas, x_torch_return):
"""
checks that columns are added to outcome (x_output) for variables even if said variable is not present in input
x_data (pandas). In these examples are expecting a categorical variable with three elements (red, green, blue) to
be present in data as well.
also checks that additional columns not needed (as specified by covar_details) are removed
"""
# get covariate details
covar_details = covar_details_covar_mapped_names[0]
covar_mapped_names = covar_details_covar_mapped_names[1]
# the device for the method (for torch-stuff)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# run the method
x_torch, x_numpy = pretty2tensor_covariate(x_pandas, covar_details, covar_mapped_names, device=device)
# check result by checking x_torch_return
x_torch_check = (x_torch == x_torch_return)
for j in range(x_torch_check.size()[0]):
for i in range(x_torch_check.size()[1]):
assert x_torch_check[j, i].item()
# check numpy results
x_numpy_check = (x_numpy == x_torch_return.numpy())
for j in range(x_numpy_check.shape[0]):
for i in range(x_numpy_check.shape[1]):
assert x_numpy_check[j, i]
@pytest.mark.parametrize(
"train_X_sample, pandas_out",
[
[torch.tensor([[2.0, 3.2, 0.0, 0.0, 1.0]], dtype=torch.double, device=torch.device("cuda" if torch.cuda.is_available() else "cpu")), pd.DataFrame({'a': [2], 'b': [3.2], 'c': ['blue']})],
[torch.tensor([[2.0, 3.2, 0.0, 0.0, 1.0], [-1.0, 1.7, 1.0, 0.0, 0.0]], dtype=torch.double, device=torch.device("cuda" if torch.cuda.is_available() else "cpu")), | pd.DataFrame({'a': [2, -1], 'b': [3.2, 1.7], 'c': ['blue', 'red']}) | pandas.DataFrame |
import pandas as pd
import great_expectations.expectations.metrics
from great_expectations.core import IDDict
from great_expectations.core.batch import Batch, BatchRequest, PartitionRequest
from great_expectations.core.expectation_configuration import ExpectationConfiguration
from great_expectations.core.expectation_validation_result import (
ExpectationValidationResult,
)
from great_expectations.exceptions.metric_exceptions import MetricProviderError
from great_expectations.execution_engine import PandasExecutionEngine
from great_expectations.expectations.core import ExpectColumnMaxToBeBetween
from great_expectations.expectations.core.expect_column_value_z_scores_to_be_less_than import (
ExpectColumnValueZScoresToBeLessThan,
)
from great_expectations.expectations.registry import get_expectation_impl
from great_expectations.validator.validation_graph import (
MetricConfiguration,
MetricEdge,
ValidationGraph,
)
from great_expectations.validator.validator import Validator
def test_parse_validation_graph():
df = pd.DataFrame({"a": [1, 5, 22, 3, 5, 10], "b": [1, 2, 3, 4, 5, 6]})
expectationConfiguration = ExpectationConfiguration(
expectation_type="expect_column_value_z_scores_to_be_less_than",
kwargs={
"column": "a",
"mostly": 0.9,
"threshold": 4,
"double_sided": True,
},
)
expectation = ExpectColumnValueZScoresToBeLessThan(expectationConfiguration)
batch = Batch(data=df)
graph = ValidationGraph()
engine = PandasExecutionEngine()
for configuration in [expectationConfiguration]:
expectation_impl = get_expectation_impl(
"expect_column_value_z_scores_to_be_less_than"
)
validation_dependencies = expectation_impl(
configuration
).get_validation_dependencies(configuration, engine)
for metric_configuration in validation_dependencies["metrics"].values():
Validator(execution_engine=engine).build_metric_dependency_graph(
graph, metric_configuration, configuration, execution_engine=engine
)
ready_metrics, needed_metrics = Validator(engine)._parse_validation_graph(
validation_graph=graph, metrics=dict()
)
assert len(ready_metrics) == 4 and len(needed_metrics) == 5
# Should be passing tests even if given incorrect MetricProvider data
def test_parse_validation_graph_with_bad_metrics_args():
df = pd.DataFrame({"a": [1, 5, 22, 3, 5, 10], "b": [1, 2, 3, 4, 5, 6]})
expectationConfiguration = ExpectationConfiguration(
expectation_type="expect_column_value_z_scores_to_be_less_than",
kwargs={
"column": "a",
"mostly": 0.9,
"threshold": 4,
"double_sided": True,
},
)
graph = ValidationGraph()
engine = PandasExecutionEngine()
validator = Validator(execution_engine=engine)
for configuration in [expectationConfiguration]:
expectation_impl = get_expectation_impl(
"expect_column_value_z_scores_to_be_less_than"
)
validation_dependencies = expectation_impl(
configuration
).get_validation_dependencies(
configuration,
execution_engine=engine,
)
for metric_configuration in validation_dependencies["metrics"].values():
validator.build_metric_dependency_graph(
graph, metric_configuration, configuration, execution_engine=engine
)
ready_metrics, needed_metrics = validator._parse_validation_graph(
validation_graph=graph, metrics=("nonexistent", "NONE")
)
assert len(ready_metrics) == 4 and len(needed_metrics) == 5
def test_populate_dependencies():
df = pd.DataFrame({"a": [1, 5, 22, 3, 5, 10], "b": [1, 2, 3, 4, 5, 6]})
expectationConfiguration = ExpectationConfiguration(
expectation_type="expect_column_value_z_scores_to_be_less_than",
kwargs={
"column": "a",
"mostly": 0.9,
"threshold": 4,
"double_sided": True,
},
)
expectation = ExpectColumnValueZScoresToBeLessThan(expectationConfiguration)
batch = Batch(data=df)
graph = ValidationGraph()
engine = PandasExecutionEngine()
for configuration in [expectationConfiguration]:
expectation_impl = get_expectation_impl(
"expect_column_value_z_scores_to_be_less_than"
)
validation_dependencies = expectation_impl(
configuration
).get_validation_dependencies(
configuration,
engine,
)
for metric_configuration in validation_dependencies["metrics"].values():
Validator(execution_engine=engine).build_metric_dependency_graph(
graph, metric_configuration, configuration, execution_engine=engine
)
assert len(graph.edges) == 10
def test_populate_dependencies_with_incorrect_metric_name():
df = pd.DataFrame({"a": [1, 5, 22, 3, 5, 10], "b": [1, 2, 3, 4, 5, 6]})
expectationConfiguration = ExpectationConfiguration(
expectation_type="expect_column_value_z_scores_to_be_less_than",
kwargs={
"column": "a",
"mostly": 0.9,
"threshold": 4,
"double_sided": True,
},
)
expectation = ExpectColumnValueZScoresToBeLessThan(expectationConfiguration)
batch = Batch(data=df)
graph = ValidationGraph()
engine = PandasExecutionEngine()
for configuration in [expectationConfiguration]:
expectation_impl = get_expectation_impl(
"expect_column_value_z_scores_to_be_less_than"
)
validation_dependencies = expectation_impl(
configuration
).get_validation_dependencies(
configuration,
engine,
)
try:
Validator(execution_engine=engine).build_metric_dependency_graph(
graph,
MetricConfiguration("column_values.not_a_metric", IDDict()),
configuration,
execution_engine=engine,
)
except MetricProviderError as e:
graph = e
assert isinstance(graph, MetricProviderError)
def test_graph_validate(basic_datasource):
df = pd.DataFrame({"a": [1, 5, 22, 3, 5, 10], "b": [1, 2, 3, 4, 5, None]})
expectationConfiguration = ExpectationConfiguration(
expectation_type="expect_column_value_z_scores_to_be_less_than",
kwargs={
"column": "b",
"mostly": 0.9,
"threshold": 4,
"double_sided": True,
},
)
batch = basic_datasource.get_single_batch_from_batch_request(
BatchRequest(
**{
"datasource_name": "my_datasource",
"data_connector_name": "test_runtime_data_connector",
"batch_data": df,
"partition_request": PartitionRequest(
**{
"partition_identifiers": {
"pipeline_stage_name": 0,
"airflow_run_id": 0,
"custom_key_0": 0,
}
}
),
}
)
)
result = Validator(
execution_engine=PandasExecutionEngine(), batches=[batch]
).graph_validate(configurations=[expectationConfiguration])
assert result == [
ExpectationValidationResult(
success=True,
expectation_config=None,
meta={},
result={
"element_count": 6,
"unexpected_count": 0,
"unexpected_percent": 0.0,
"partial_unexpected_list": [],
"missing_count": 1,
"missing_percent": 16.666666666666664,
"unexpected_percent_nonmissing": 0.0,
},
exception_info=None,
)
]
# this might indicate that we need to validate configuration a little more strictly prior to actually validating
def test_graph_validate_with_bad_config(basic_datasource):
df = pd.DataFrame({"a": [1, 5, 22, 3, 5, 10], "b": [1, 2, 3, 4, 5, None]})
expectationConfiguration = ExpectationConfiguration(
expectation_type="expect_column_max_to_be_between",
kwargs={"column": "not_in_table", "min_value": 1, "max_value": 29},
)
expectation = ExpectColumnMaxToBeBetween(expectationConfiguration)
batch = basic_datasource.get_single_batch_from_batch_request(
BatchRequest(
**{
"datasource_name": "my_datasource",
"data_connector_name": "test_runtime_data_connector",
"batch_data": df,
"partition_request": PartitionRequest(
**{
"partition_identifiers": {
"pipeline_stage_name": 0,
"airflow_run_id": 0,
"custom_key_0": 0,
}
}
),
}
)
)
try:
result = Validator(
execution_engine=PandasExecutionEngine(), batches=[batch]
).graph_validate(configurations=[expectationConfiguration])
except KeyError as e:
result = e
assert isinstance(result, KeyError)
# Tests that runtime configuration actually works during graph validation
def test_graph_validate_with_runtime_config(basic_datasource):
df = pd.DataFrame(
{"a": [1, 5, 22, 3, 5, 10, 2, 3], "b": [97, 332, 3, 4, 5, 6, 7, None]}
)
expectationConfiguration = ExpectationConfiguration(
expectation_type="expect_column_value_z_scores_to_be_less_than",
kwargs={"column": "b", "mostly": 1, "threshold": 2, "double_sided": True},
)
expectation = ExpectColumnValueZScoresToBeLessThan(expectationConfiguration)
batch = basic_datasource.get_single_batch_from_batch_request(
BatchRequest(
**{
"datasource_name": "my_datasource",
"data_connector_name": "test_runtime_data_connector",
"batch_data": df,
"partition_request": PartitionRequest(
**{
"partition_identifiers": {
"pipeline_stage_name": 0,
"airflow_run_id": 0,
"custom_key_0": 0,
}
}
),
}
)
)
try:
result = Validator(
execution_engine=PandasExecutionEngine(), batches=(batch,)
).graph_validate(
configurations=[expectationConfiguration],
runtime_configuration={"result_format": "COMPLETE"},
)
except AssertionError as e:
result = e
assert result == [
ExpectationValidationResult(
success=False,
meta={},
result={
"element_count": 8,
"unexpected_count": 1,
"unexpected_percent": 12.5,
"partial_unexpected_list": [332.0],
"missing_count": 1,
"missing_percent": 12.5,
"unexpected_percent_nonmissing": 14.285714285714285,
"partial_unexpected_index_list": None,
"partial_unexpected_counts": [{"value": 332.0, "count": 1}],
"unexpected_list": [332.0],
"unexpected_index_list": None,
},
expectation_config=None,
exception_info=None,
)
]
def test_validator_default_expectation_args__pandas(basic_datasource):
df = | pd.DataFrame({"a": [1, 5, 22, 3, 5, 10], "b": [1, 2, 3, 4, 5, None]}) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Jun 17 23:28:15 2017
@author: konodera
そのユーザーがその時間にそのアイテムを買う率
"""
import pandas as pd
import numpy as np
from tqdm import tqdm
import utils
utils.start(__file__)
#==============================================================================
# load
#==============================================================================
col = ['order_id', 'user_id', 'product_id', 'order_dow', 'order_hour_of_day', 'order_number_rev']
log = utils.read_pickles('../input/mk/log', col).sort_values('user_id')
log = pd.merge(log, | pd.read_pickle('../input/mk/timezone.p') | pandas.read_pickle |
from datetime import datetime, timedelta
import operator
from typing import Any, Sequence, Type, Union, cast
import warnings
import numpy as np
from pandas._libs import NaT, NaTType, Timestamp, algos, iNaT, lib
from pandas._libs.tslibs.c_timestamp import integer_op_not_supported
from pandas._libs.tslibs.period import DIFFERENT_FREQ, IncompatibleFrequency, Period
from pandas._libs.tslibs.timedeltas import Timedelta, delta_to_nanoseconds
from pandas._libs.tslibs.timestamps import RoundTo, round_nsint64
from pandas._typing import DatetimeLikeScalar
from pandas.compat import set_function_name
from pandas.compat.numpy import function as nv
from pandas.errors import AbstractMethodError, NullFrequencyError, PerformanceWarning
from pandas.util._decorators import Appender, Substitution
from pandas.util._validators import validate_fillna_kwargs
from pandas.core.dtypes.common import (
is_categorical_dtype,
is_datetime64_any_dtype,
is_datetime64_dtype,
is_datetime64tz_dtype,
is_datetime_or_timedelta_dtype,
is_dtype_equal,
is_float_dtype,
is_integer_dtype,
is_list_like,
is_object_dtype,
is_period_dtype,
is_string_dtype,
is_timedelta64_dtype,
is_unsigned_integer_dtype,
pandas_dtype,
)
from pandas.core.dtypes.generic import ABCSeries
from pandas.core.dtypes.inference import is_array_like
from pandas.core.dtypes.missing import is_valid_nat_for_dtype, isna
from pandas.core import missing, nanops, ops
from pandas.core.algorithms import checked_add_with_arr, take, unique1d, value_counts
from pandas.core.arrays.base import ExtensionArray, ExtensionOpsMixin
import pandas.core.common as com
from pandas.core.indexers import check_bool_array_indexer
from pandas.core.ops.common import unpack_zerodim_and_defer
from pandas.core.ops.invalid import invalid_comparison, make_invalid_op
from pandas.tseries import frequencies
from pandas.tseries.offsets import DateOffset, Tick
def _datetimelike_array_cmp(cls, op):
"""
Wrap comparison operations to convert Timestamp/Timedelta/Period-like to
boxed scalars/arrays.
"""
opname = f"__{op.__name__}__"
nat_result = opname == "__ne__"
@unpack_zerodim_and_defer(opname)
def wrapper(self, other):
if isinstance(other, str):
try:
# GH#18435 strings get a pass from tzawareness compat
other = self._scalar_from_string(other)
except ValueError:
# failed to parse as Timestamp/Timedelta/Period
return invalid_comparison(self, other, op)
if isinstance(other, self._recognized_scalars) or other is NaT:
other = self._scalar_type(other)
self._check_compatible_with(other)
other_i8 = self._unbox_scalar(other)
result = op(self.view("i8"), other_i8)
if isna(other):
result.fill(nat_result)
elif not is_list_like(other):
return invalid_comparison(self, other, op)
elif len(other) != len(self):
raise ValueError("Lengths must match")
else:
if isinstance(other, list):
# TODO: could use pd.Index to do inference?
other = np.array(other)
if not isinstance(other, (np.ndarray, type(self))):
return invalid_comparison(self, other, op)
if is_object_dtype(other):
# We have to use comp_method_OBJECT_ARRAY instead of numpy
# comparison otherwise it would fail to raise when
# comparing tz-aware and tz-naive
with np.errstate(all="ignore"):
result = ops.comp_method_OBJECT_ARRAY(
op, self.astype(object), other
)
o_mask = isna(other)
elif not type(self)._is_recognized_dtype(other.dtype):
return invalid_comparison(self, other, op)
else:
# For PeriodDType this casting is unnecessary
other = type(self)._from_sequence(other)
self._check_compatible_with(other)
result = op(self.view("i8"), other.view("i8"))
o_mask = other._isnan
if o_mask.any():
result[o_mask] = nat_result
if self._hasnans:
result[self._isnan] = nat_result
return result
return set_function_name(wrapper, opname, cls)
class AttributesMixin:
_data: np.ndarray
@classmethod
def _simple_new(cls, values, **kwargs):
raise AbstractMethodError(cls)
@property
def _scalar_type(self) -> Type[DatetimeLikeScalar]:
"""The scalar associated with this datelike
* PeriodArray : Period
* DatetimeArray : Timestamp
* TimedeltaArray : Timedelta
"""
raise AbstractMethodError(self)
def _scalar_from_string(
self, value: str
) -> Union[Period, Timestamp, Timedelta, NaTType]:
"""
Construct a scalar type from a string.
Parameters
----------
value : str
Returns
-------
Period, Timestamp, or Timedelta, or NaT
Whatever the type of ``self._scalar_type`` is.
Notes
-----
This should call ``self._check_compatible_with`` before
unboxing the result.
"""
raise AbstractMethodError(self)
def _unbox_scalar(self, value: Union[Period, Timestamp, Timedelta, NaTType]) -> int:
"""
Unbox the integer value of a scalar `value`.
Parameters
----------
value : Union[Period, Timestamp, Timedelta]
Returns
-------
int
Examples
--------
>>> self._unbox_scalar(Timedelta('10s')) # DOCTEST: +SKIP
10000000000
"""
raise AbstractMethodError(self)
def _check_compatible_with(
self, other: Union[Period, Timestamp, Timedelta, NaTType], setitem: bool = False
) -> None:
"""
Verify that `self` and `other` are compatible.
* DatetimeArray verifies that the timezones (if any) match
* PeriodArray verifies that the freq matches
* Timedelta has no verification
In each case, NaT is considered compatible.
Parameters
----------
other
setitem : bool, default False
For __setitem__ we may have stricter compatiblity resrictions than
for comparisons.
Raises
------
Exception
"""
raise AbstractMethodError(self)
class DatelikeOps:
"""
Common ops for DatetimeIndex/PeriodIndex, but not TimedeltaIndex.
"""
@Substitution(
URL="https://docs.python.org/3/library/datetime.html"
"#strftime-and-strptime-behavior"
)
def strftime(self, date_format):
"""
Convert to Index using specified date_format.
Return an Index of formatted strings specified by date_format, which
supports the same string format as the python standard library. Details
of the string format can be found in `python string format
doc <%(URL)s>`__.
Parameters
----------
date_format : str
Date format string (e.g. "%%Y-%%m-%%d").
Returns
-------
ndarray
NumPy ndarray of formatted strings.
See Also
--------
to_datetime : Convert the given argument to datetime.
DatetimeIndex.normalize : Return DatetimeIndex with times to midnight.
DatetimeIndex.round : Round the DatetimeIndex to the specified freq.
DatetimeIndex.floor : Floor the DatetimeIndex to the specified freq.
Examples
--------
>>> rng = pd.date_range(pd.Timestamp("2018-03-10 09:00"),
... periods=3, freq='s')
>>> rng.strftime('%%B %%d, %%Y, %%r')
Index(['March 10, 2018, 09:00:00 AM', 'March 10, 2018, 09:00:01 AM',
'March 10, 2018, 09:00:02 AM'],
dtype='object')
"""
result = self._format_native_types(date_format=date_format, na_rep=np.nan)
return result.astype(object)
class TimelikeOps:
"""
Common ops for TimedeltaIndex/DatetimeIndex, but not PeriodIndex.
"""
_round_doc = """
Perform {op} operation on the data to the specified `freq`.
Parameters
----------
freq : str or Offset
The frequency level to {op} the index to. Must be a fixed
frequency like 'S' (second) not 'ME' (month end). See
:ref:`frequency aliases <timeseries.offset_aliases>` for
a list of possible `freq` values.
ambiguous : 'infer', bool-ndarray, 'NaT', default 'raise'
Only relevant for DatetimeIndex:
- 'infer' will attempt to infer fall dst-transition hours based on
order
- bool-ndarray where True signifies a DST time, False designates
a non-DST time (note that this flag is only applicable for
ambiguous times)
- 'NaT' will return NaT where there are ambiguous times
- 'raise' will raise an AmbiguousTimeError if there are ambiguous
times.
.. versionadded:: 0.24.0
nonexistent : 'shift_forward', 'shift_backward', 'NaT', timedelta, \
default 'raise'
A nonexistent time does not exist in a particular timezone
where clocks moved forward due to DST.
- 'shift_forward' will shift the nonexistent time forward to the
closest existing time
- 'shift_backward' will shift the nonexistent time backward to the
closest existing time
- 'NaT' will return NaT where there are nonexistent times
- timedelta objects will shift nonexistent times by the timedelta
- 'raise' will raise an NonExistentTimeError if there are
nonexistent times.
.. versionadded:: 0.24.0
Returns
-------
DatetimeIndex, TimedeltaIndex, or Series
Index of the same type for a DatetimeIndex or TimedeltaIndex,
or a Series with the same index for a Series.
Raises
------
ValueError if the `freq` cannot be converted.
Examples
--------
**DatetimeIndex**
>>> rng = pd.date_range('1/1/2018 11:59:00', periods=3, freq='min')
>>> rng
DatetimeIndex(['2018-01-01 11:59:00', '2018-01-01 12:00:00',
'2018-01-01 12:01:00'],
dtype='datetime64[ns]', freq='T')
"""
_round_example = """>>> rng.round('H')
DatetimeIndex(['2018-01-01 12:00:00', '2018-01-01 12:00:00',
'2018-01-01 12:00:00'],
dtype='datetime64[ns]', freq=None)
**Series**
>>> pd.Series(rng).dt.round("H")
0 2018-01-01 12:00:00
1 2018-01-01 12:00:00
2 2018-01-01 12:00:00
dtype: datetime64[ns]
"""
_floor_example = """>>> rng.floor('H')
DatetimeIndex(['2018-01-01 11:00:00', '2018-01-01 12:00:00',
'2018-01-01 12:00:00'],
dtype='datetime64[ns]', freq=None)
**Series**
>>> pd.Series(rng).dt.floor("H")
0 2018-01-01 11:00:00
1 2018-01-01 12:00:00
2 2018-01-01 12:00:00
dtype: datetime64[ns]
"""
_ceil_example = """>>> rng.ceil('H')
DatetimeIndex(['2018-01-01 12:00:00', '2018-01-01 12:00:00',
'2018-01-01 13:00:00'],
dtype='datetime64[ns]', freq=None)
**Series**
>>> pd.Series(rng).dt.ceil("H")
0 2018-01-01 12:00:00
1 2018-01-01 12:00:00
2 2018-01-01 13:00:00
dtype: datetime64[ns]
"""
def _round(self, freq, mode, ambiguous, nonexistent):
# round the local times
if is_datetime64tz_dtype(self):
# operate on naive timestamps, then convert back to aware
naive = self.tz_localize(None)
result = naive._round(freq, mode, ambiguous, nonexistent)
aware = result.tz_localize(
self.tz, ambiguous=ambiguous, nonexistent=nonexistent
)
return aware
values = self.view("i8")
result = round_nsint64(values, mode, freq)
result = self._maybe_mask_results(result, fill_value=NaT)
return self._simple_new(result, dtype=self.dtype)
@Appender((_round_doc + _round_example).format(op="round"))
def round(self, freq, ambiguous="raise", nonexistent="raise"):
return self._round(freq, RoundTo.NEAREST_HALF_EVEN, ambiguous, nonexistent)
@Appender((_round_doc + _floor_example).format(op="floor"))
def floor(self, freq, ambiguous="raise", nonexistent="raise"):
return self._round(freq, RoundTo.MINUS_INFTY, ambiguous, nonexistent)
@Appender((_round_doc + _ceil_example).format(op="ceil"))
def ceil(self, freq, ambiguous="raise", nonexistent="raise"):
return self._round(freq, RoundTo.PLUS_INFTY, ambiguous, nonexistent)
class DatetimeLikeArrayMixin(ExtensionOpsMixin, AttributesMixin, ExtensionArray):
"""
Shared Base/Mixin class for DatetimeArray, TimedeltaArray, PeriodArray
Assumes that __new__/__init__ defines:
_data
_freq
and that the inheriting class has methods:
_generate_range
"""
@property
def ndim(self) -> int:
return self._data.ndim
@property
def shape(self):
return self._data.shape
def reshape(self, *args, **kwargs):
# Note: we drop any freq
data = self._data.reshape(*args, **kwargs)
return type(self)(data, dtype=self.dtype)
def ravel(self, *args, **kwargs):
# Note: we drop any freq
data = self._data.ravel(*args, **kwargs)
return type(self)(data, dtype=self.dtype)
@property
def _box_func(self):
"""
box function to get object from internal representation
"""
raise AbstractMethodError(self)
def _box_values(self, values):
"""
apply box func to passed values
"""
return lib.map_infer(values, self._box_func)
def __iter__(self):
return (self._box_func(v) for v in self.asi8)
@property
def asi8(self) -> np.ndarray:
"""
Integer representation of the values.
Returns
-------
ndarray
An ndarray with int64 dtype.
"""
# do not cache or you'll create a memory leak
return self._data.view("i8")
@property
def _ndarray_values(self):
return self._data
# ----------------------------------------------------------------
# Rendering Methods
def _format_native_types(self, na_rep="NaT", date_format=None):
"""
Helper method for astype when converting to strings.
Returns
-------
ndarray[str]
"""
raise AbstractMethodError(self)
def _formatter(self, boxed=False):
# TODO: Remove Datetime & DatetimeTZ formatters.
return "'{}'".format
# ----------------------------------------------------------------
# Array-Like / EA-Interface Methods
@property
def nbytes(self):
return self._data.nbytes
def __array__(self, dtype=None) -> np.ndarray:
# used for Timedelta/DatetimeArray, overwritten by PeriodArray
if is_object_dtype(dtype):
return np.array(list(self), dtype=object)
return self._data
@property
def size(self) -> int:
"""The number of elements in this array."""
return np.prod(self.shape)
def __len__(self) -> int:
return len(self._data)
def __getitem__(self, key):
"""
This getitem defers to the underlying array, which by-definition can
only handle list-likes, slices, and integer scalars
"""
is_int = lib.is_integer(key)
if lib.is_scalar(key) and not is_int:
raise IndexError(
"only integers, slices (`:`), ellipsis (`...`), "
"numpy.newaxis (`None`) and integer or boolean "
"arrays are valid indices"
)
getitem = self._data.__getitem__
if is_int:
val = getitem(key)
if lib.is_scalar(val):
# i.e. self.ndim == 1
return self._box_func(val)
return type(self)(val, dtype=self.dtype)
if com.is_bool_indexer(key):
key = check_bool_array_indexer(self, key)
if key.all():
key = slice(0, None, None)
else:
key = lib.maybe_booleans_to_slice(key.view(np.uint8))
is_period = is_period_dtype(self)
if is_period:
freq = self.freq
else:
freq = None
if isinstance(key, slice):
if self.freq is not None and key.step is not None:
freq = key.step * self.freq
else:
freq = self.freq
elif key is Ellipsis:
# GH#21282 indexing with Ellipsis is similar to a full slice,
# should preserve `freq` attribute
freq = self.freq
result = getitem(key)
if result.ndim > 1:
# To support MPL which performs slicing with 2 dim
# even though it only has 1 dim by definition
if is_period:
return self._simple_new(result, dtype=self.dtype, freq=freq)
return result
return self._simple_new(result, dtype=self.dtype, freq=freq)
def __setitem__(
self,
key: Union[int, Sequence[int], Sequence[bool], slice],
value: Union[NaTType, Any, Sequence[Any]],
) -> None:
# I'm fudging the types a bit here. "Any" above really depends
# on type(self). For PeriodArray, it's Period (or stuff coercible
# to a period in from_sequence). For DatetimeArray, it's Timestamp...
# I don't know if mypy can do that, possibly with Generics.
# https://mypy.readthedocs.io/en/latest/generics.html
if lib.is_scalar(value) and not isna(value):
value = com.maybe_box_datetimelike(value)
if is_list_like(value):
is_slice = isinstance(key, slice)
if lib.is_scalar(key):
raise ValueError("setting an array element with a sequence.")
if not is_slice:
key = cast(Sequence, key)
if len(key) != len(value) and not com.is_bool_indexer(key):
msg = (
f"shape mismatch: value array of length '{len(key)}' "
"does not match indexing result of length "
f"'{len(value)}'."
)
raise ValueError(msg)
elif not len(key):
return
value = type(self)._from_sequence(value, dtype=self.dtype)
self._check_compatible_with(value, setitem=True)
value = value.asi8
elif isinstance(value, self._scalar_type):
self._check_compatible_with(value, setitem=True)
value = self._unbox_scalar(value)
elif is_valid_nat_for_dtype(value, self.dtype):
value = iNaT
else:
msg = (
f"'value' should be a '{self._scalar_type.__name__}', 'NaT', "
f"or array of those. Got '{type(value).__name__}' instead."
)
raise TypeError(msg)
self._data[key] = value
self._maybe_clear_freq()
def _maybe_clear_freq(self):
# inplace operations like __setitem__ may invalidate the freq of
# DatetimeArray and TimedeltaArray
pass
def astype(self, dtype, copy=True):
# Some notes on cases we don't have to handle here in the base class:
# 1. PeriodArray.astype handles period -> period
# 2. DatetimeArray.astype handles conversion between tz.
# 3. DatetimeArray.astype handles datetime -> period
from pandas import Categorical
dtype = pandas_dtype(dtype)
if is_object_dtype(dtype):
return self._box_values(self.asi8)
elif is_string_dtype(dtype) and not is_categorical_dtype(dtype):
return self._format_native_types()
elif is_integer_dtype(dtype):
# we deliberately ignore int32 vs. int64 here.
# See https://github.com/pandas-dev/pandas/issues/24381 for more.
values = self.asi8
if is_unsigned_integer_dtype(dtype):
# Again, we ignore int32 vs. int64
values = values.view("uint64")
if copy:
values = values.copy()
return values
elif (
is_datetime_or_timedelta_dtype(dtype)
and not is_dtype_equal(self.dtype, dtype)
) or is_float_dtype(dtype):
# disallow conversion between datetime/timedelta,
# and conversions for any datetimelike to float
msg = f"Cannot cast {type(self).__name__} to dtype {dtype}"
raise TypeError(msg)
elif is_categorical_dtype(dtype):
return Categorical(self, dtype=dtype)
else:
return np.asarray(self, dtype=dtype)
def view(self, dtype=None):
if dtype is None or dtype is self.dtype:
return type(self)(self._data, dtype=self.dtype)
return self._data.view(dtype=dtype)
# ------------------------------------------------------------------
# ExtensionArray Interface
def unique(self):
result = unique1d(self.asi8)
return type(self)(result, dtype=self.dtype)
def _validate_fill_value(self, fill_value):
"""
If a fill_value is passed to `take` convert it to an i8 representation,
raising ValueError if this is not possible.
Parameters
----------
fill_value : object
Returns
-------
fill_value : np.int64
Raises
------
ValueError
"""
if isna(fill_value):
fill_value = iNaT
elif isinstance(fill_value, self._recognized_scalars):
self._check_compatible_with(fill_value)
fill_value = self._scalar_type(fill_value)
fill_value = self._unbox_scalar(fill_value)
else:
raise ValueError(
f"'fill_value' should be a {self._scalar_type}. Got '{fill_value}'."
)
return fill_value
def take(self, indices, allow_fill=False, fill_value=None):
if allow_fill:
fill_value = self._validate_fill_value(fill_value)
new_values = take(
self.asi8, indices, allow_fill=allow_fill, fill_value=fill_value
)
return type(self)(new_values, dtype=self.dtype)
@classmethod
def _concat_same_type(cls, to_concat):
dtypes = {x.dtype for x in to_concat}
assert len(dtypes) == 1
dtype = list(dtypes)[0]
values = np.concatenate([x.asi8 for x in to_concat])
return cls(values, dtype=dtype)
def copy(self):
values = self.asi8.copy()
return type(self)._simple_new(values, dtype=self.dtype, freq=self.freq)
def _values_for_factorize(self):
return self.asi8, iNaT
@classmethod
def _from_factorized(cls, values, original):
return cls(values, dtype=original.dtype)
def _values_for_argsort(self):
return self._data
# ------------------------------------------------------------------
# Additional array methods
# These are not part of the EA API, but we implement them because
# pandas assumes they're there.
def searchsorted(self, value, side="left", sorter=None):
"""
Find indices where elements should be inserted to maintain order.
Find the indices into a sorted array `self` such that, if the
corresponding elements in `value` were inserted before the indices,
the order of `self` would be preserved.
Parameters
----------
value : array_like
Values to insert into `self`.
side : {'left', 'right'}, optional
If 'left', the index of the first suitable location found is given.
If 'right', return the last such index. If there is no suitable
index, return either 0 or N (where N is the length of `self`).
sorter : 1-D array_like, optional
Optional array of integer indices that sort `self` into ascending
order. They are typically the result of ``np.argsort``.
Returns
-------
indices : array of ints
Array of insertion points with the same shape as `value`.
"""
if isinstance(value, str):
value = self._scalar_from_string(value)
if not (isinstance(value, (self._scalar_type, type(self))) or isna(value)):
raise ValueError(f"Unexpected type for 'value': {type(value)}")
self._check_compatible_with(value)
if isinstance(value, type(self)):
value = value.asi8
else:
value = self._unbox_scalar(value)
return self.asi8.searchsorted(value, side=side, sorter=sorter)
def repeat(self, repeats, *args, **kwargs):
"""
Repeat elements of an array.
See Also
--------
numpy.ndarray.repeat
"""
nv.validate_repeat(args, kwargs)
values = self._data.repeat(repeats)
return type(self)(values.view("i8"), dtype=self.dtype)
def value_counts(self, dropna=False):
"""
Return a Series containing counts of unique values.
Parameters
----------
dropna : bool, default True
Don't include counts of NaT values.
Returns
-------
Series
"""
from pandas import Series, Index
if dropna:
values = self[~self.isna()]._data
else:
values = self._data
cls = type(self)
result = value_counts(values, sort=False, dropna=dropna)
index = Index(
cls(result.index.view("i8"), dtype=self.dtype), name=result.index.name
)
return Series(result.values, index=index, name=result.name)
def map(self, mapper):
# TODO(GH-23179): Add ExtensionArray.map
# Need to figure out if we want ExtensionArray.map first.
# If so, then we can refactor IndexOpsMixin._map_values to
# a standalone function and call from here..
# Else, just rewrite _map_infer_values to do the right thing.
from pandas import Index
return Index(self).map(mapper).array
# ------------------------------------------------------------------
# Null Handling
def isna(self):
return self._isnan
@property # NB: override with cache_readonly in immutable subclasses
def _isnan(self):
"""
return if each value is nan
"""
return self.asi8 == iNaT
@property # NB: override with cache_readonly in immutable subclasses
def _hasnans(self):
"""
return if I have any nans; enables various perf speedups
"""
return bool(self._isnan.any())
def _maybe_mask_results(self, result, fill_value=iNaT, convert=None):
"""
Parameters
----------
result : a ndarray
fill_value : object, default iNaT
convert : str, dtype or None
Returns
-------
result : ndarray with values replace by the fill_value
mask the result if needed, convert to the provided dtype if its not
None
This is an internal routine.
"""
if self._hasnans:
if convert:
result = result.astype(convert)
if fill_value is None:
fill_value = np.nan
result[self._isnan] = fill_value
return result
def fillna(self, value=None, method=None, limit=None):
# TODO(GH-20300): remove this
# Just overriding to ensure that we avoid an astype(object).
# Either 20300 or a `_values_for_fillna` would avoid this duplication.
if isinstance(value, ABCSeries):
value = value.array
value, method = validate_fillna_kwargs(value, method)
mask = self.isna()
if is_array_like(value):
if len(value) != len(self):
raise ValueError(
f"Length of 'value' does not match. Got ({len(value)}) "
f" expected {len(self)}"
)
value = value[mask]
if mask.any():
if method is not None:
if method == "pad":
func = missing.pad_1d
else:
func = missing.backfill_1d
values = self._data
if not is_period_dtype(self):
# For PeriodArray self._data is i8, which gets copied
# by `func`. Otherwise we need to make a copy manually
# to avoid modifying `self` in-place.
values = values.copy()
new_values = func(values, limit=limit, mask=mask)
if is_datetime64tz_dtype(self):
# we need to pass int64 values to the constructor to avoid
# re-localizing incorrectly
new_values = new_values.view("i8")
new_values = type(self)(new_values, dtype=self.dtype)
else:
# fill with value
new_values = self.copy()
new_values[mask] = value
else:
new_values = self.copy()
return new_values
# ------------------------------------------------------------------
# Frequency Properties/Methods
@property
def freq(self):
"""
Return the frequency object if it is set, otherwise None.
"""
return self._freq
@freq.setter
def freq(self, value):
if value is not None:
value = frequencies.to_offset(value)
self._validate_frequency(self, value)
self._freq = value
@property
def freqstr(self):
"""
Return the frequency object as a string if its set, otherwise None
"""
if self.freq is None:
return None
return self.freq.freqstr
@property # NB: override with cache_readonly in immutable subclasses
def inferred_freq(self):
"""
Tryies to return a string representing a frequency guess,
generated by infer_freq. Returns None if it can't autodetect the
frequency.
"""
if self.ndim != 1:
return None
try:
return frequencies.infer_freq(self)
except ValueError:
return None
@property # NB: override with cache_readonly in immutable subclasses
def _resolution(self):
return frequencies.Resolution.get_reso_from_freq(self.freqstr)
@property # NB: override with cache_readonly in immutable subclasses
def resolution(self):
"""
Returns day, hour, minute, second, millisecond or microsecond
"""
return frequencies.Resolution.get_str(self._resolution)
@classmethod
def _validate_frequency(cls, index, freq, **kwargs):
"""
Validate that a frequency is compatible with the values of a given
Datetime Array/Index or Timedelta Array/Index
Parameters
----------
index : DatetimeIndex or TimedeltaIndex
The index on which to determine if the given frequency is valid
freq : DateOffset
The frequency to validate
"""
if is_period_dtype(cls):
# Frequency validation is not meaningful for Period Array/Index
return None
inferred = index.inferred_freq
if index.size == 0 or inferred == freq.freqstr:
return None
try:
on_freq = cls._generate_range(
start=index[0], end=None, periods=len(index), freq=freq, **kwargs
)
if not np.array_equal(index.asi8, on_freq.asi8):
raise ValueError
except ValueError as e:
if "non-fixed" in str(e):
# non-fixed frequencies are not meaningful for timedelta64;
# we retain that error message
raise e
# GH#11587 the main way this is reached is if the `np.array_equal`
# check above is False. This can also be reached if index[0]
# is `NaT`, in which case the call to `cls._generate_range` will
# raise a ValueError, which we re-raise with a more targeted
# message.
raise ValueError(
f"Inferred frequency {inferred} from passed values "
f"does not conform to passed frequency {freq.freqstr}"
)
# monotonicity/uniqueness properties are called via frequencies.infer_freq,
# see GH#23789
@property
def _is_monotonic_increasing(self):
return algos.is_monotonic(self.asi8, timelike=True)[0]
@property
def _is_monotonic_decreasing(self):
return algos.is_monotonic(self.asi8, timelike=True)[1]
@property
def _is_unique(self):
return len(unique1d(self.asi8)) == len(self)
# ------------------------------------------------------------------
# Arithmetic Methods
_create_comparison_method = classmethod(_datetimelike_array_cmp)
# pow is invalid for all three subclasses; TimedeltaArray will override
# the multiplication and division ops
__pow__ = make_invalid_op("__pow__")
__rpow__ = make_invalid_op("__rpow__")
__mul__ = make_invalid_op("__mul__")
__rmul__ = make_invalid_op("__rmul__")
__truediv__ = make_invalid_op("__truediv__")
__rtruediv__ = make_invalid_op("__rtruediv__")
__floordiv__ = make_invalid_op("__floordiv__")
__rfloordiv__ = make_invalid_op("__rfloordiv__")
__mod__ = make_invalid_op("__mod__")
__rmod__ = make_invalid_op("__rmod__")
__divmod__ = make_invalid_op("__divmod__")
__rdivmod__ = make_invalid_op("__rdivmod__")
def _add_datetimelike_scalar(self, other):
# Overridden by TimedeltaArray
raise TypeError(f"cannot add {type(self).__name__} and {type(other).__name__}")
_add_datetime_arraylike = _add_datetimelike_scalar
def _sub_datetimelike_scalar(self, other):
# Overridden by DatetimeArray
assert other is not NaT
raise TypeError(f"cannot subtract a datelike from a {type(self).__name__}")
_sub_datetime_arraylike = _sub_datetimelike_scalar
def _sub_period(self, other):
# Overridden by PeriodArray
raise TypeError(f"cannot subtract Period from a {type(self).__name__}")
def _add_offset(self, offset):
raise AbstractMethodError(self)
def _add_delta(self, other):
"""
Add a timedelta-like, Tick or TimedeltaIndex-like object
to self, yielding an int64 numpy array
Parameters
----------
delta : {timedelta, np.timedelta64, Tick,
TimedeltaIndex, ndarray[timedelta64]}
Returns
-------
result : ndarray[int64]
Notes
-----
The result's name is set outside of _add_delta by the calling
method (__add__ or __sub__), if necessary (i.e. for Indexes).
"""
if isinstance(other, (Tick, timedelta, np.timedelta64)):
new_values = self._add_timedeltalike_scalar(other)
elif is_timedelta64_dtype(other):
# ndarray[timedelta64] or TimedeltaArray/index
new_values = self._add_delta_tdi(other)
return new_values
def _add_timedeltalike_scalar(self, other):
"""
Add a delta of a timedeltalike
return the i8 result view
"""
if isna(other):
# i.e np.timedelta64("NaT"), not recognized by delta_to_nanoseconds
new_values = np.empty(self.shape, dtype="i8")
new_values[:] = iNaT
return new_values
inc = delta_to_nanoseconds(other)
new_values = checked_add_with_arr(self.asi8, inc, arr_mask=self._isnan).view(
"i8"
)
new_values = self._maybe_mask_results(new_values)
return new_values.view("i8")
def _add_delta_tdi(self, other):
"""
Add a delta of a TimedeltaIndex
return the i8 result view
"""
if len(self) != len(other):
raise ValueError("cannot add indices of unequal length")
if isinstance(other, np.ndarray):
# ndarray[timedelta64]; wrap in TimedeltaIndex for op
from pandas.core.arrays import TimedeltaArray
other = TimedeltaArray._from_sequence(other)
self_i8 = self.asi8
other_i8 = other.asi8
new_values = checked_add_with_arr(
self_i8, other_i8, arr_mask=self._isnan, b_mask=other._isnan
)
if self._hasnans or other._hasnans:
mask = (self._isnan) | (other._isnan)
new_values[mask] = iNaT
return new_values.view("i8")
def _add_nat(self):
"""
Add pd.NaT to self
"""
if is_period_dtype(self):
raise TypeError(
f"Cannot add {type(self).__name__} and {type(NaT).__name__}"
)
# GH#19124 pd.NaT is treated like a timedelta for both timedelta
# and datetime dtypes
result = np.zeros(self.shape, dtype=np.int64)
result.fill(iNaT)
return type(self)(result, dtype=self.dtype, freq=None)
def _sub_nat(self):
"""
Subtract pd.NaT from self
"""
# GH#19124 Timedelta - datetime is not in general well-defined.
# We make an exception for pd.NaT, which in this case quacks
# like a timedelta.
# For datetime64 dtypes by convention we treat NaT as a datetime, so
# this subtraction returns a timedelta64 dtype.
# For period dtype, timedelta64 is a close-enough return dtype.
result = np.zeros(self.shape, dtype=np.int64)
result.fill(iNaT)
return result.view("timedelta64[ns]")
def _sub_period_array(self, other):
"""
Subtract a Period Array/Index from self. This is only valid if self
is itself a Period Array/Index, raises otherwise. Both objects must
have the same frequency.
Parameters
----------
other : PeriodIndex or PeriodArray
Returns
-------
result : np.ndarray[object]
Array of DateOffset objects; nulls represented by NaT.
"""
if not is_period_dtype(self):
raise TypeError(
f"cannot subtract {other.dtype}-dtype from {type(self).__name__}"
)
if self.freq != other.freq:
msg = DIFFERENT_FREQ.format(
cls=type(self).__name__, own_freq=self.freqstr, other_freq=other.freqstr
)
raise IncompatibleFrequency(msg)
new_values = checked_add_with_arr(
self.asi8, -other.asi8, arr_mask=self._isnan, b_mask=other._isnan
)
new_values = np.array([self.freq.base * x for x in new_values])
if self._hasnans or other._hasnans:
mask = (self._isnan) | (other._isnan)
new_values[mask] = NaT
return new_values
def _addsub_object_array(self, other: np.ndarray, op):
"""
Add or subtract array-like of DateOffset objects
Parameters
----------
other : np.ndarray[object]
op : {operator.add, operator.sub}
Returns
-------
result : same class as self
"""
assert op in [operator.add, operator.sub]
if len(other) == 1:
return op(self, other[0])
warnings.warn(
"Adding/subtracting array of DateOffsets to "
f"{type(self).__name__} not vectorized",
PerformanceWarning,
)
# For EA self.astype('O') returns a numpy array, not an Index
left = self.astype("O")
res_values = op(left, np.array(other))
kwargs = {}
if not is_period_dtype(self):
kwargs["freq"] = "infer"
try:
res = type(self)._from_sequence(res_values, **kwargs)
except ValueError:
# e.g. we've passed a Timestamp to TimedeltaArray
res = res_values
return res
def _time_shift(self, periods, freq=None):
"""
Shift each value by `periods`.
Note this is different from ExtensionArray.shift, which
shifts the *position* of each element, padding the end with
missing values.
Parameters
----------
periods : int
Number of periods to shift by.
freq : pandas.DateOffset, pandas.Timedelta, or str
Frequency increment to shift by.
"""
if freq is not None and freq != self.freq:
if isinstance(freq, str):
freq = frequencies.to_offset(freq)
offset = periods * freq
result = self + offset
return result
if periods == 0:
# immutable so OK
return self.copy()
if self.freq is None:
raise NullFrequencyError("Cannot shift with no freq")
start = self[0] + periods * self.freq
end = self[-1] + periods * self.freq
# Note: in the DatetimeTZ case, _generate_range will infer the
# appropriate timezone from `start` and `end`, so tz does not need
# to be passed explicitly.
return self._generate_range(start=start, end=end, periods=None, freq=self.freq)
@unpack_zerodim_and_defer("__add__")
def __add__(self, other):
# scalar others
if other is NaT:
result = self._add_nat()
elif isinstance(other, (Tick, timedelta, np.timedelta64)):
result = self._add_delta(other)
elif isinstance(other, DateOffset):
# specifically _not_ a Tick
result = self._add_offset(other)
elif isinstance(other, (datetime, np.datetime64)):
result = self._add_datetimelike_scalar(other)
elif lib.is_integer(other):
# This check must come after the check for np.timedelta64
# as is_integer returns True for these
if not is_period_dtype(self):
raise integer_op_not_supported(self)
result = self._time_shift(other)
# array-like others
elif is_timedelta64_dtype(other):
# TimedeltaIndex, ndarray[timedelta64]
result = self._add_delta(other)
elif is_object_dtype(other):
# e.g. Array/Index of DateOffset objects
result = self._addsub_object_array(other, operator.add)
elif is_datetime64_dtype(other) or is_datetime64tz_dtype(other):
# DatetimeIndex, ndarray[datetime64]
return self._add_datetime_arraylike(other)
elif is_integer_dtype(other):
if not is_period_dtype(self):
raise integer_op_not_supported(self)
result = self._addsub_int_array(other, operator.add)
else:
# Includes Categorical, other ExtensionArrays
# For PeriodDtype, if self is a TimedeltaArray and other is a
# PeriodArray with a timedelta-like (i.e. Tick) freq, this
# operation is valid. Defer to the PeriodArray implementation.
# In remaining cases, this will end up raising TypeError.
return NotImplemented
if is_timedelta64_dtype(result) and isinstance(result, np.ndarray):
from pandas.core.arrays import TimedeltaArray
return TimedeltaArray(result)
return result
def __radd__(self, other):
# alias for __add__
return self.__add__(other)
@unpack_zerodim_and_defer("__sub__")
def __sub__(self, other):
# scalar others
if other is NaT:
result = self._sub_nat()
elif isinstance(other, (Tick, timedelta, np.timedelta64)):
result = self._add_delta(-other)
elif isinstance(other, DateOffset):
# specifically _not_ a Tick
result = self._add_offset(-other)
elif isinstance(other, (datetime, np.datetime64)):
result = self._sub_datetimelike_scalar(other)
elif lib.is_integer(other):
# This check must come after the check for np.timedelta64
# as is_integer returns True for these
if not is_period_dtype(self):
raise integer_op_not_supported(self)
result = self._time_shift(-other)
elif isinstance(other, Period):
result = self._sub_period(other)
# array-like others
elif is_timedelta64_dtype(other):
# TimedeltaIndex, ndarray[timedelta64]
result = self._add_delta(-other)
elif is_object_dtype(other):
# e.g. Array/Index of DateOffset objects
result = self._addsub_object_array(other, operator.sub)
elif is_datetime64_dtype(other) or is_datetime64tz_dtype(other):
# DatetimeIndex, ndarray[datetime64]
result = self._sub_datetime_arraylike(other)
elif is_period_dtype(other):
# PeriodIndex
result = self._sub_period_array(other)
elif | is_integer_dtype(other) | pandas.core.dtypes.common.is_integer_dtype |
from app import app
import pandas as pd
qh_info = app.config["QH_INFO"]
def get_qh_sub(target, sub="all"):
if sub == "all": # 含所有小弟【信访绩效考核】
if target in qh_info["shej_02"].tolist():
shij = qh_info["shij_02"].tolist()
xj = qh_info["xj_02"].tolist()
return shij + xj
if target in qh_info["shij_02"].tolist():
xj = qh_info["xj_02"][qh_info["shij_02"]==target].tolist()
return xj
if target in qh_info["xj_02"].tolist():
return []
else: # 只走下面一层是 go_down
if target in qh_info["shej_02"].tolist():
shij = qh_info["shij_02"].tolist()
return shij
if target in qh_info["shij_02"].tolist():
xj = qh_info["xj_02"][qh_info["shij_02"]==target].tolist()
return xj
if target in qh_info["xj_02"].tolist():
return [target]
def get_qh_level(target):
if target in qh_info.get("shej_02", pd.Series()).tolist():
return "shej_02"
if target in qh_info.get("shij_02", pd.Series()).tolist():
return "shij_02"
if target in qh_info.get("xj_02", | pd.Series() | pandas.Series |
# import numpy as np
import pandas as pd
# import matplotlib.pyplot as plt
# import seaborn as sns
df = | pd.read_csv("Dataset/covid_19_india.csv") | pandas.read_csv |
import sys,os
import pandas as pd
import fnmatch
import pyarrow as pa
import pyarrow.parquet as pq
import datetime
defaultcols=['store_code_uc','upc','units','price','feature','display','dma_code','retailer_code','upc_ver_uc','week_end']
# This is the main interface
# This pre-processes files directly downloaded from Kilts and understands the Kilts-Nielsen directory structure
# The processed results are saved by DMA code in a parquet file for fast reading and processing
#
# Arguments:
# - read_dir: base directory of Kilts-Nielsen files to search through
# - outfile: stub of file name for processed output, creates two files:
# - .parquet: with price and quantity data
# - .hdf: with stores and product tables (named "stores", and "prods")
# Filtering the data
# Optional Arguments:
# - statelist (list of two letter state abbreviations: eg ['CT','NY','NJ'])
# - dmalist (list of dma codes eg: [603, 506])
# - module_code a single module code (eg. 1344 for Cereal, etc.)
# - channel_filter: a list of channels (e.g ['F','D'] for food and drug stores)
def read_all_data_new(read_dir,outfile,statelist=None,dmalist=None,module_code=None,channel_filter=['F','M'],cols=defaultcols):
# list of price-quantity files
fns=get_files(read_dir,'Movement')
# filter numeric part of module code out of filename
if module_code:
fns=[s for s in fns if module_code ==int(os.path.split(s)[1].split('_')[0])]
# this does all of the work
df=pd.concat([read_single_file_new(fn,read_dir,statelist,dmalist,channel_filter) for fn in fns],ignore_index=True)
# some cleaning up to reduce space
df.feature.fillna(-1,inplace=True)
df.display.fillna(-1,inplace=True)
df['display']=df['display'].astype('int8')
df['feature']=df['feature'].astype('int8')
df['panel_year']=df['panel_year'].astype('int16')
df['store_zip3']=df.store_zip3.astype('int16')
df['retailer_code']=df['retailer_code'].astype('int16')
df['dma_code']=df['dma_code'].astype('int16')
df['prmult']=df['prmult'].astype('int8')
# fix 2 for $5.00 as $2.50
df.loc[df.prmult>1,'price']=df.loc[df.prmult>1,'price']/df.loc[df.prmult>1,'prmult']
# Read the products (matching only)
prods=pd.merge(pd.read_table(get_files(read_dir,'products.tsv')[0]),df[['upc','upc_ver_uc']].drop_duplicates(),on=['upc','upc_ver_uc'])
print("Number of Products: ",str(len(prods)))
# Read the stores (matching only)
stores=pd.concat([get_stores(read_dir,statelist=None,my_year=my_year,dmalist=dmalist) for my_year in range(2006,2015+1)]).groupby(level=0).last()
# Use python dates not Nielsen dates
df=fix_dates(df)
# Write to an parquet file (too big for other formats!)
write_by_dma(df[cols],outfile+'.parquet')
# Write the rest as HDF5 tables
stores.to_hdf(outfile+'.hdf','stores',table=False,append=False)
prods.drop_duplicates().to_hdf(outfile+'.hdf','prods',table=False,append=False)
# This reads a single movement file
def read_single_file_new(fn,read_dir,statelist=None,dmalist=None,channel_filter=None):
print ("Processing ",fn)
my_year=int(fn.split('_')[-1].split('.')[0])
rms=pd.read_table(filter_year(get_files(read_dir,'rms_version'),my_year))
all_stores = get_stores(read_dir,statelist,my_year,storelist=None,dmalist=dmalist)[['store_zip3','dma_code','channel_code','retailer_code']]
if channel_filter:
our_stores=all_stores[all_stores.channel_code.isin(list(channel_filter))]
else:
our_stores=all_stores
return pd.merge(pd.merge(pd.read_table(fn),our_stores,left_on='store_code_uc',right_index=True),rms,on='upc')
# This fixes nielsen dates to python dates
def fix_dates(df):
a=df.week_end.unique()
x= | pd.Series(a,index=a,name='week') | pandas.Series |
# 크롤링에 필요한 library
import requests
import time
import pandas as pd
from bs4 import BeautifulSoup
import selenium
from selenium import webdriver as wd
from tqdm import tqdm
import lxml.html
import os
import re
import logging.config
import logging.config
def dart_crawling(my_driver_path, url):
"""
다트 사이트에서 html을 이용하여 공시 table 가지고 오는 함수
params:
driver : chromedriver가 있는 경로를 넣어주세요.
url : 공시 url을 넣어주세요
return:
DART_df : 공시 table 내용을 가지고 옴
"""
try :
# 크롬드라이버 경로 설정
current_path = os.getcwd() #현재 경로 저장
os.chdir(my_driver_path) #chromedriver가 있는 경로로 바꿔줌
driver = wd.Chrome(r'chromedriver') #chromedriver 불러오기
os.chdir(current_path) # 원래 경로로 다시 바꾸기
time.sleep(2)
driver.get(url)
# iframe 바꿔주기
driver.switch_to_default_content #iframe 안 내용으로 바꿔줌.
driver.switch_to.frame('ifrm')
# html에서 table html 추출하기
html = driver.page_source
# table 시작 index
p = re.compile("<table")
m = p.search(html)
start_idx= m.start()
#table 끝 index
p = re.compile("</table>")
m = p.search(html)
end_idx= m.end()
#table html 불러오기
table_html = html[start_idx: end_idx]
html_df = | pd.read_html(table_html) | pandas.read_html |
"""
Number of chords by song for all songs
"""
from pymir import settings
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import os
def compute(db):
chords_freq = {}
for song in db:
freq = len(song['chord_vector_detected'])
if not freq:
continue
if freq in chords_freq:
chords_freq[freq] += 1
else:
chords_freq[freq] = 1
df = | pd.Series(chords_freq) | pandas.Series |
import requests
import json
import pandas as pd
from datetime import datetime
# gets data from redcap
def get_data(project, start=None, stop=None,variables=None):
"""
:param project: A project object
:param start: start date eg '2009-01-01'. leave None for beginning of study
:param stop: stop date eg '2009-01-02'. leave None for latest input
:param variables:
:return:
"""
data = {
'token': project.token,
'content': 'record',
'format': 'json',
'type': 'flat',
'fields[0]': project.id_var,
'fields[1]': project.date_var,
#'record[]': outputTwo(),
'rawOrLabel': 'raw',
'rawOrLabelHeaders': 'raw',
'exportCheckboxLabel': 'false',
'exportSurveyFields': 'false',
'exportDataAccessGroups': 'false',
'returnFormat': 'json'
}
request = requests.post(project.url, data=data, verify=False)
data = json.loads(request.text)
data2 = pd.DataFrame(data)
data2[project.date_var] = pd.to_datetime(data2[project.date_var])
if start is not None:
data2 = data2.loc[data2[project.date_var] >= pd.to_datetime(start), :]
if stop is not None:
data2 = data2.loc[data2[project.date_var] <= | pd.to_datetime(stop) | pandas.to_datetime |
# ---------------------------------- Imports --------------------------------- #
import logging
import discord
from discord.ext import commands
from discord_slash import SlashContext
from lavalink import DefaultPlayer
import spotipy
from spotipy import SpotifyClientCredentials
import pandas as pd
from . import config
from . import util
# ---------------------------------- Config ---------------------------------- #
cfg = config.load_config()
client_credentials_manager = SpotifyClientCredentials(
client_id=cfg['spotify']['id'],
client_secret=cfg['spotify']['secret']
)
spotify = spotipy.Spotify(client_credentials_manager = client_credentials_manager)
# ---------------------------------------------------------------------------- #
# Methods / Commands #
# ---------------------------------------------------------------------------- #
# --------------------------- ANCHOR SPOTIFY self --------------------------- #
# Gets spotify playlist info and returns a pandas dataframe of all the songs in
# the playlist.
def sp_get_playlist(playlist_id: str) -> dict:
playlist_features = ['artist', 'track_name', 'album']
playlist_df = | pd.DataFrame(columns=playlist_features) | pandas.DataFrame |
import numpy as np
import pandas as pd
from io import StringIO
import re
import csv
from csv import reader, writer
import sys
import os
import glob
import fnmatch
from os import path
import matplotlib
from matplotlib import pyplot as plt
print("You are using Zorbit Analyzer v0.1")
directory_path = input("Please enter the path to the directory of your files. All files should be in the same location: ") #Asks users for path
os.chdir(directory_path)
x = input('Input your Interproscan output gff3 file(s):') #Asks users for gff3 input
if "*" in x: #Handles the case of *.gff3
gff3_input = glob.glob("*.gff3")
else:
y = re.sub('[|; ]', ', ', x) #Substitutes possible gff3 file delimeters with commas
gff3_input = re.split(', ', y) #Splits gff3 input into a list
for i in gff3_input:
if os.path.exists(i): #Checks existence of gff3 file
pass
else:
print("There does not seem to be a file by that name. Please check your path/filename and try again")
sys.exit()
fasta_input = input('Input your fasta file:') #Asks users for fasta input file
if os.path.exists(fasta_input): #Checks existence of fasta input file
pass
else:
print("There does not seem to be a file by that name. Please check your path/filename and try again")
sys.exit()
if fnmatch.fnmatch(fasta_input, '*fastq*'):
print("Zorbit Analyzer is not specifically constructed to handle fastq files but will try. If errors convert to fasta format")
ortho_input = input ('Input your ProteinOrtho output file:') #Asks users for ProteinOrtho input
if os.path.exists(ortho_input): #Checks existence of ProteinOrtho input
pass
else:
print("There does not seem to be a file by that name. Please check your path/filename and try again")
sys.exit()
ortho_input_file_name = input ('Input your ProteinOrtho input file name (faa). Leave blank if unknown though will run slower:') #Asks users for ProteinOrtho output file
while True:
file_to_write = input('Input your desired ZorbitAnalyzer output file name: ') #Asks users for output file
if file_to_write != '': #Checks to see if user entered a file name
break
else:
print("You did not enter an output file name") #Repeatedly asks for output file name if not given
continue
Choice = ['yes', 'y', 'no', 'n']
flag = True
while flag is True:
exclusion_flag = input("Would you like to exclude sequences that do not have either Interproscan or ProteinOrtho hits? (Yes/No) ").lower()
for i in Choice:
if exclusion_flag.startswith(i):
flag = False
break
else:
continue
if exclusion_flag.startswith('y'):
exclusion_flag = 1
else:
exclusion_flag = 0
print("Analyzing files") #Lets user know input portion has completed
pdortho = pd.read_csv(ortho_input, "/t", engine="python") #Creates ProteinOrtho pd
test_file = 'test.txt'
test2_file = 'test2.txt'
test3_file = 'test3.txt'
#Testing open/closing files
def try_file(input_file): #Defining function that creates/opens user output file and truncates it before closing it
try:
open(input_file, 'w+').close()
except IOError:
print("Unable to open output file")
try_file('file_to_write.txt') #Creates/opens output file and truncates it before closing it
try_file('test.txt') #Creates/opens test file and truncates it before closing it
try_file('gff3_file_to_write.txt') #Creates/opens gff3 output file and truncates it before closing it
try_file('gff3_statsfile_to_write.txt') #Creates/opens gff3 output file and truncates it before closing i
try_file('fasta_file_to_write.txt') #Creates/opens fasta output file and truncates it before closing it
try_file('ortho_file_to_write.txt') #Creates/opens ProteinOrtho output file and truncates it before closing it
try_file('ortho_file_to_write2.txt') #Creates/opens a second ProteinOrtho output file and truncates it before closing it
try_file('zorbit_statistics.txt') #Creates/opens a statistics file and truncates it before closing it
#Defining variables for later use
fasta_file_to_write = 'fasta_file_to_write.txt' #Defining the interim fasta file to write
gff3_file_to_write = 'gff3_file_to_write.txt' #Defining the interim gff3 file to write
gff3_statsfile_to_write = 'gff3_statsfile_to_write.txt'
ortho_file_to_write = 'ortho_file_to_write.txt' #Defining the interim Protein Ortho file to write
zorbit_statistics = 'zorbit_statistics.txt' #Defining the Zorbit Statistics variable
string_to_remove1 = '##' #Removes header and gene introduction lines
string_to_remove2 = 'polypeptide' #Removes redundant polypeptide line
string_to_remove3 = 'MobiDBLite' #Removes results from MobiDBLite database
string_to_end = '##FASTA' #Sets end of file as the start of the fasta/code part of gff3 files
#fasta
fasta_file = None
fastq_file = None
fasta_type = "amino_acid"
fastq_start_character = '@'
fasta_start_character = '>' #Setting start character for fasta information line
fastq_third_line_character ='+'
fna_type = "fna"
if fna_type in fasta_input:
fasta_type = "nucleotide"
with open(fasta_input, 'r') as fasta: #Opening fasta input file to read
for line in fasta: #reading lines in fasta file
if line.startswith(fasta_start_character): #Altering lines with > but not sequence lines
fasta_file = fasta_input
break
elif line.startswith(fastq_start_character): #Altering lines with @ but not sequence lines (for fastq)
fastq_file = fasta_input
fasta_type = "nucleotide"
break
else:
print("The fasta input file does not seem to have typical fasta or fastq format")
sys.exit()
if fasta_file is not None: #Checking to see if fasta input was fasta file (should not be empty)
print("Working on fasta file")
with open(fasta_input, 'r') as fasta: #Opening fasta input file to read
with open(fasta_file_to_write, 'a') as f: #Opens the output file to append
for line in fasta: #reading lines in fasta file
if line.startswith(fasta_start_character): #Altering lines with > but not sequence lines
fasta_nostart = re.sub('>', '\n', line) #Removing > symbol and replacing with carriage return from each occurrence
fasta_nospace = ', '.join(fasta_nostart.rsplit('\n',1)) #Removes carriage return (before aa or na code) and replaces with comma
fasta_csv = ', '.join(fasta_nospace.split(' ',1)) #Removes first space (after Trinity output name) and replaces with comma
f.write(fasta_csv) #Writes output to file
else:
if not line.isspace(): #Will not write blank lines
sequence_no_carriage = re.sub('\n', '', line) #Removes carriage return from before the sequence data
sequence_no_line_break = re.sub('\r', '', sequence_no_carriage) #Removes line break from before the sequence data
f.write(sequence_no_line_break) #Writes the sequence line without line breaks or carriage returns
else:
continue
elif fastq_file is not None: #Checking to see if fasta input was fastq file (should not be empty)
print("Working on fastq file")
with open(fasta_input, 'r', encoding="latin-1") as fasta: #Opening fasta input file to read
with open(fasta_file_to_write, 'a', encoding="latin-1") as f: #Opens the output file to append
for i, line in enumerate(fasta): #reading lines in fasta file
if i == 0: # Dealing with first line differently (no line break)
fasta_nostart = re.sub('@', '', line) #Removing @ symbol from each occurrence and replaces with nothing
fasta_nospace = ', '.join(fasta_nostart.rsplit('\n',1)) #Removes carriage return (before aa or na code) and replaces with comma
fasta_csv = ', '.join(fasta_nospace.split(' ',1)) #Removes first space (after Trinity output name) and replaces with comma
f.write(fasta_csv) #Writes output to file
elif line.startswith(fastq_start_character): #Altering lines with @ but not sequence lines (for fastq)
fasta_nostart = re.sub('@', '\n', line) #Removing @ symbol from each occurrence and replaces with carriage return
fasta_nospace = ', '.join(fasta_nostart.rsplit('\n',1)) #Removes carriage return (before aa or na code) and replaces with comma
fasta_csv = ', '.join(fasta_nospace.split(' ',1)) #Removes first space (after Trinity output name) and replaces with comma
f.write(fasta_csv) #Writes output to file
elif i % 4 == 1: #Writing line 2/4 (sequence file) to output file
sequence_no_carriage = re.sub('\n', '', line) #Removes carriage return from before the sequence data
sequence_no_line_break = re.sub('\r', '', sequence_no_carriage) #Removes line break from before the sequence data
f.write(sequence_no_line_break) #Writes the sequence line without line breaks or carriage returns
else:
pass
else:
print("The input file does not seem to be in typical fasta or fastq format. Please check and try again") #Ending if atypical fasta/fastq format
sys.exit()
for i in gff3_input: #Cleaning up gff3 file prior to conversion to dataframe
with open(i, 'r') as stack:
with open(gff3_file_to_write, 'a') as f:
for line in stack:
if string_to_end in line: #Closes file at the start of the sequence data without including
f.close()
break
elif string_to_remove1 in line: #Removing header and gene introduction lines (if present)
continue
elif string_to_remove2 in line: #Removing polypeptide line (if present)
continue
elif string_to_remove3 in line: #Removing MobiDBLite database (if present)
continue
else:
f.write(line)
for i in gff3_input: #Saving unedited gff3 input into file for statistics purposes later
with open(i, 'r') as stack:
with open(gff3_statsfile_to_write, 'a') as f:
for line in stack:
if string_to_end in line: #Closes file at the start of the sequence data without including
f.close()
break
elif string_to_remove1 in line: #Removing header and gene introduction lines (if present)
continue
else:
f.write(line)
fasta_column_names = ['SeqID', 'Information', 'Sequence'] #Defining the list of fasta column names to pass to the dataframe
fastapd = pd.read_csv(fasta_file_to_write, names=fasta_column_names, engine = "python", header=None) #Creating a Pandas dataframe from the fasta output csv
SeqID_list = fastapd["SeqID"].tolist() #Saving contents of the SeqID column to a list
fasta_row_number = len(fastapd) #Counting the number of rows in the fasta dataframe for the statistics output
with open(zorbit_statistics, 'a') as f:
f.write("The number of sequences in the fasta is " + str(fasta_row_number) + "\n")
#Start orthopd
print("Working on ProteinOrtho dataframe")
orthopd = pd.read_csv(ortho_input, sep='\t', engine="python", na_values="*") #Creates a Pandas dataframe from ProteinOrtho input csv
ortho_column_names = list(orthopd.columns)
#Defining the SeqID column
if ortho_input_file_name != "":
orthopd.columns = ["SeqID" if col.startswith(ortho_input_file_name) else col for col in orthopd.columns] #Renaming the fasta input column in ProteinOrtho dataframe to SeqID to match other dataframes
else: pass
#Attempting to identify which column corresponds to the input fasta
fasta_input_split = fasta_input.split('.', 1)[0] #Trying to delete file handle from the fasta input file in case there was .fasta versus .faa, etc
orthopd_pruned = orthopd.drop(columns=['# Species', 'Genes', 'Alg.-Conn.']) #Creating a new dataframe without the first three columns which will always have data in each row in order to id longest column
if orthopd.columns.astype(str).str.contains("SeqID").any(): #Checking to see if fasta input file name is in the ProteinOrtho column name list
print("Found fasta Sequence ID column in ProteinOrtho file")
else:
print("Trying to find fasta file in ProteinOrtho file through other means")
orthopd.columns = ["SeqID" if col.startswith(fasta_input_split) else col for col in orthopd.columns] #Using the input fasta file name as a guess for the faa file name
if orthopd.columns.astype(str).str.contains("SeqID").any(): #Breaks loops if the column name has been found/replaced
print("Found fasta Sequence ID column in ProteinOrtho file")
else:
print("Attempting another way of identifying fasta file column. This may take some time")
orthopd_fasta_column_name = orthopd_pruned.count().idxmax() #Finding column with the least number of NaN which is likely the input fasta
for l in SeqID_list: #Searching to see if any values from the fastapd SeqID column (l) are in the putative SeqID ProteinOrtho column
if orthopd[orthopd_fasta_column_name].astype(str).str.contains(l).any():
orthopd.rename(columns=lambda x: x.replace(orthopd_fasta_column_name, "SeqID"), inplace=True) #Renaming the ProteinOrtho column with fasta sequence names as SeqID
break
else:
print("Final method to identify fasta file column. This may take hours")
orthopd = orthopd.drop(orthopd[(orthopd['Genes'] == 1)].index) #Gets rid of rows with just a single gene found in order to speed up full frame search
for l in SeqID_list: #Searching to see if any values from the fastapd SeqID column (l) are in the ProteinOrtho dataframe
for i in orthopd.columns:
if orthopd[i].astype(str).str.contains(l).any():
orthopd.rename(columns=lambda x: x.replace(i, "SeqID"), inplace=True) #Renaming the ProteinOrtho column with fasta sequence names as SeqID
break
orthopd = orthopd.drop(orthopd[(orthopd['SeqID'].isna())].index)#Removing SeqID rows with NaN
#Splitting the duplicated entries in the SeqID column and making new rows with a SeqID member on each but with same data otherwise
def pir2(df, c): #Defining function to split the SeqID column at each comma and place one of each split value onto a new, otherwise duplicated row
colc = df[c].astype(str).str.split(',')
clst = colc.values.astype(object).tolist()
lens = [len(l) for l in clst]
j = df.columns.get_loc(c)
v = df.values
n, m = v.shape
r = np.arange(n).repeat(lens)
return pd.DataFrame(
np.column_stack([v[r, 0:j], np.concatenate(clst), v[r, j+1:]]),
columns=orthopd.columns
)
orthopd3 = pir2(orthopd, "SeqID") #Running column split function on the SeqID column on orthopd
print("Beginning data analysis on the ProteinOrtho dataframe")
#Graph Algebraic Connectivity
orthopd_algconn_nozero = orthopd3[orthopd3['Alg.-Conn.'] != 0] #Removing zero and one counts in orthopd for graph
orthopd_algconn_noone = orthopd_algconn_nozero[orthopd_algconn_nozero['Alg.-Conn.'] != 1] #Getting the count of each Alg.Conn in the gff3 dataframe
orthopd_algconn_noone['Alg.-Conn.'].plot.hist(grid=True, bins=100,
color='#607c8e')
plt.title('Distribution of Algebraic Connectivity without Unity')
plt.xlabel('Degree of Connectivity')
plt.ylabel('Number of Genes with Degree of Connectivity')
plt.tight_layout()
plt.savefig("ProteinOrtho_AlgConn_graph_noone.png")#Saving graph to file
plt.clf()
orthopd_algconn_nozero['Alg.-Conn.'].plot.hist(grid=True, bins=100,
color='#607c8e')
plt.title('Distribution of Algebraic Connectivity')
plt.xlabel('Degree of Connectivity')
plt.ylabel('Number of Genes with Degree of Connectivity')
plt.tight_layout()
plt.savefig("ProteinOrtho_AlgConn_graph.png")#Saving graph to file
plt.clf()
#Graph Gene Counts
orthopd_gene_count_values = orthopd3['Genes'].value_counts() #Getting the count of each database in the gff3 dataframe
orthopd_gene_count_values.plot(kind='bar') #Graphing the database counts
plt.title('Graph of Gene Counts')
plt.xlabel('Number of Shared transcripts')
plt.ylabel('Number of Genes with same frequency')
plt.tight_layout()
plt.savefig("ProteinOrtho_gene_graph.png")#Saving graph to file
plt.clf()
#Start gff3pd
print("Working on gff3 dataframe")
gff3pd_column_names = ['SeqID', 'Database', 'Match type', 'Start', 'Stop', 'Score', 'Strand', 'Phase', 'Match information'] #Renaming static gff3 columns
statsgff3pd = pd.read_csv(gff3_statsfile_to_write, sep='\t', names=gff3pd_column_names, header=None, engine="python") #Creating a dataframe for gff3 stats
gff3pd_original_row_number = len(statsgff3pd) #Counting the number of rows in the original gff3pd dataframe for the statistics output
with open(zorbit_statistics, 'a') as f: #Writing the number of rows in the original gff3pd dataframe to the statistics output
f.write("The number of sequences in the original gff3 file is " + str(gff3pd_original_row_number) + "\n")
gff3pd = pd.read_csv(gff3_file_to_write, sep='\t', names=gff3pd_column_names, header=None, engine = "python") #Creating a Pandas dataframe from the gff3 output csv
gff3pd_row_number = len(gff3pd) #Counting the number of rows in the final gff3 file dataframe for the statistics output
gff3pd_max_score = gff3pd['Score'].max() #Finding maximum value in Score column of gff3 dataframe
gff3pd_without_null = gff3pd[gff3pd['Score'] != "."] #Finding minimum value in Score column of gff3 dataframe
gff3pd_without_null_or_zero = gff3pd_without_null[gff3pd_without_null['Score'] != 0.0]
gff3pd_min_score = gff3pd_without_null_or_zero['Score'].min()
statsgff3pd_without_null = statsgff3pd[statsgff3pd['Score'] != "."]
statsgff3pd_max_score = statsgff3pd_without_null['Score'].max()
with open(zorbit_statistics, 'a') as f:
f.write("The number of sequences in the gff3 file after removal of MobiDBLite and duplicates is " + str(gff3pd_row_number) + "\n") #Adding cleaned gff3 stastitics to file
f.write("The range of quality scores for the gff3 file range from " + str(gff3pd_min_score) + " to " + str(gff3pd_max_score) + "\n")#Adding range of scores to statistics file
f.write("The maximum quality score for the original gff3 file is " + str(statsgff3pd_max_score) + "\n")
#Graph database distribution
gff3pd_database_count_values = gff3pd['Database'].value_counts() #Getting the count of each database in the gff3 dataframe
gff3pd_database_count_values.plot(kind='bar') #Graphing the database counts
plt.title('Distribution of Database hits')
plt.xlabel('Database name')
plt.ylabel('Number of Database hits')
plt.tight_layout()
plt.savefig("Gff3_database_graph.png")#Saving graph to file
plt.clf()
#Preparing dataframes for merging
print("Preparing dataframes for merge")
gff3pd['SeqID'] = gff3pd['SeqID'].astype(str) #Setting column type as string
orthopd3['SeqID'] = orthopd3['SeqID'].astype(str) #Setting column type as string
fastapd['SeqID'] = fastapd['SeqID'].astype(str) #Setting column type as string
#Dealing with fna versus faa
protein_flag = 0
if fasta_type == "nucleotide": #Checking to see if the fasta_type is nucleotide
gff3pd_split = gff3pd['SeqID'].str.rsplit('_', n=2, expand=True) #Removing the extra two numbers after the fasta SeqID to allow match
gff3pd['SeqID'] = gff3pd_split[0] #Setting the gff3 SeqID column as the split column
orthopd_split = orthopd3['SeqID'].str.rsplit('_', n=2, expand=True) #Removing the extra two numbers after the fasta SeqID to allow match
orthopd['SeqID'] = orthopd_split[0] #Setting the ProteinOrtho SeqID column as the split column
else:
#Pulling out reading frame information
protein_flag = 1
gff3pd['SeqID2'] = gff3pd['SeqID']
gff3pd_split = gff3pd['SeqID2'].str.rsplit('_', n=1, expand=True) #Removing the extra number after the fasta SeqID
gff3pd['SeqID2'] = gff3pd_split[0] #Setting the gff3 SeqID column as the split column
gff3pd_split = gff3pd['SeqID2'].str.rsplit('_', n=1, expand=True) #Splitting the frame number out
gff3pd['SeqID2'] = gff3pd_split[0] #Setting the gff3 SeqID column
gff3pd['Reading_Frame'] = gff3pd_split[1] #Setting the gff3 Frame column
gff3pd = gff3pd.drop(['SeqID2'], axis=1)
orthopd3['SeqID2'] = orthopd3['SeqID']
orthopd_split = orthopd3['SeqID2'].str.rsplit('_', n=1, expand=True) #Removing the extra two numbers after the fasta SeqID to allow match
orthopd3['SeqID2'] = orthopd_split[0] #Setting the ProteinOrtho SeqID column as the split column
orthopd_split = orthopd3['SeqID2'].str.rsplit('_', n=1, expand=True) #Splitting the frame number out
orthopd3['SeqID2'] = orthopd_split[0] #Setting the orthopd SeqID column
orthopd3['Reading_Frame'] = orthopd_split[1] #Setting the gff3 Frame column
orthopd = orthopd3.drop(['SeqID2'], axis=1)
#Merging
print("Combining dataframes")
gff3_ortho_merge = pd.merge(orthopd, gff3pd, how='outer', on=['SeqID']) #Merging the ProteinOrtho and interproscan dataframes
all_merge = pd.merge(gff3_ortho_merge, fastapd, how='outer', on=['SeqID']) #Merging the fasta dataframe with the combined ProteinOrtho/Interproscan dataframes
#Adding marks to merged dataframe to make fasta
all_merge['SeqID'] = all_merge['SeqID'].apply(lambda x: f'>{x}') #Placing > at the beginning of each new line and a tab at the end of SeqID
all_merge['Sequence'] = all_merge['Sequence'].apply(lambda x: f'\n{x}') #Placing a new line before the Sequence data
all_merge = all_merge[ ['SeqID'] + [ col for col in all_merge.columns if col != 'SeqID' ] ] #Moving SeqID to the far left of the dataframe
all_merge = all_merge[ [ col for col in all_merge.columns if col != 'Sequence' ] + ['Sequence'] ] #Moving Sequence to the far right of the dataframe
#Statistics on the merged dataframe
all_merge_both = all_merge.drop(all_merge[((all_merge['Database'].isna()) | (all_merge['Genes'] == 1))].index)
all_merge_neither = all_merge.drop(all_merge[((all_merge['Database'].notna()) | (all_merge['Genes'] !=1))].index)
all_merge_just_ortho = all_merge.drop(all_merge[((all_merge['Database'].notna()) | (all_merge['Genes'] == 1))].index)
all_merge_just_inter = all_merge.drop(all_merge[((all_merge['Database'].isna()) | (all_merge['Genes'] !=1))].index)
all_merge_all = len(pd.unique(all_merge['SeqID'])) #Calculating the number of unique sequences
all_merge_both = len(pd.unique(all_merge_both['SeqID'])) #Calculating unique sequences with both interproscan and proteinortho hits
all_merge_neither = len(pd.unique(all_merge_neither['SeqID'])) #Calculating unique sequences without interproscan or proteinortho hits
all_merge_just_ortho = len(pd.unique(all_merge_just_ortho['SeqID'])) #Calculating unique sequences with proteinortho but not interproscan hits
all_merge_just_inter = len(pd.unique(all_merge_just_inter['SeqID'])) #Calculating unique sequences with interproscan but no proteinortho hits
#Writing merged dataframe statistics to file
with open(zorbit_statistics, 'a') as f:
f.write("The total number of unique sequences is: " + str(all_merge_all) + "\n")
f.write("The number of unique sequences with both ProteinOrtho and Interproscan hits is: " + str(all_merge_both) + "\n")
f.write("The number of unique sequences with neither ProteinOrtho nor Interproscan hits is: " + str(all_merge_neither) + "\n")
f.write("The number of unique sequences with only ProteinOrtho and not Interproscan hits is: " + str(all_merge_just_ortho) + "\n")
f.write("The number of unique sequences with only Interproscan and not ProteinOrtho hits is: " + str(all_merge_just_inter) + "\n")
#Excluding based on user preference
if exclusion_flag == 1:
all_merge = all_merge.drop(all_merge[(all_merge['Genes'] == 1 & all_merge['Database'].isna())].index)
print("Dropped rows")
#Statistics on the merged dataframe
all_merge_both = all_merge.drop(all_merge[((all_merge['Database'].isna()) | (all_merge['Genes'] == 1))].index)
all_merge_neither = all_merge.drop(all_merge[((all_merge['Database'].notna()) | (all_merge['Genes'] !=1))].index)
all_merge_just_ortho = all_merge.drop(all_merge[((all_merge['Database'].notna()) | (all_merge['Genes'] == 1))].index)
all_merge_just_inter = all_merge.drop(all_merge[((all_merge['Database'].isna()) | (all_merge['Genes'] !=1))].index)
all_merge_all = len(pd.unique(all_merge['SeqID'])) #Calculating the number of unique sequences
all_merge_both = len(pd.unique(all_merge_both['SeqID'])) #Calculating unique sequences with both interproscan and proteinortho hits
all_merge_neither = len(pd.unique(all_merge_neither['SeqID'])) #Calculating unique sequences without interproscan or proteinortho hits
all_merge_just_ortho = len(pd.unique(all_merge_just_ortho['SeqID'])) #Calculating unique sequences with proteinortho but not interproscan hits
all_merge_just_inter = len(pd.unique(all_merge_just_inter['SeqID'])) #Calculating unique sequences with interproscan but no proteinortho hits
#Writing merged dataframe statistics to file
with open(zorbit_statistics, 'a') as f:
f.write("The total number of unique sequences (after dropping) is: " + str(all_merge_all) + "\n")
f.write("The number of unique sequences with both ProteinOrtho and Interproscan hits (after dropping) is: " + str(all_merge_both) + "\n")
f.write("The number of unique sequences with neither ProteinOrtho nor Interproscan hits (after dropping) is: " + str(all_merge_neither) + "\n")
f.write("The number of unique sequences with only ProteinOrtho and not Interproscan hits (after dropping) is: " + str(all_merge_just_ortho) + "\n")
f.write("The number of unique sequences with only Interproscan and not ProteinOrtho hits (after dropping) is: " + str(all_merge_just_inter) + "\n")
else:
pass
if protein_flag == 1: #Taking care of aa sequence fastas
#Plotting Frame data as a histogram
all_merge = all_merge.drop(['Reading_Frame_y'], axis=1)
all_merge = all_merge.rename(columns = {"Reading_Frame_x": "Reading_Frame"})
all_merge['Reading_Frame'] = all_merge['Reading_Frame'].astype(str) #Setting column type as string
all_merge_frame_counts = all_merge['Reading_Frame'].value_counts() #Getting the count of Frames in the all_merge dataframe
all_merge_frame_counts.plot(kind='bar') #Graphing the frame counts
plt.title('Distribution of Frame hits')
plt.xlabel('Frame number')
plt.ylabel('Number of Frame hits')
plt.tight_layout()
plt.savefig("Reading_frame.png")#Saving graph to file
plt.clf()
#Calculating number of duplicated, multi-frame sequences there are
all_merge_duplicates = all_merge
all_merge_split = all_merge['SeqID'].str.rsplit('_', n=2, expand=True) #Removing the extra two numbers after the fasta SeqID to allow match
all_merge_duplicates['SeqID'] = all_merge_split[0] #Setting the ProteinOrtho SeqID column as the split column
all_merge_two_columns = all_merge_duplicates[['SeqID', 'Reading_Frame']]
all_merge_two_columns.to_csv(test2_file, sep='\t', index=False, header=False, quoting=csv.QUOTE_NONE, escapechar="\\")
all_merge_unique_sequence_count = len( | pd.unique(all_merge_two_columns['SeqID']) | pandas.unique |
import time
startTime = time.time()
import pickle
import numpy as np
import pandas as pd
from sklearn.feature_extraction.text import TfidfVectorizer
csv1 = | pd.read_csv("Desktop/NLP/archive/articles1.csv") | pandas.read_csv |
from Kernel import Kernel
from agent.ExchangeAgent import ExchangeAgent
from agent.examples.QLearningAgent import QLearningAgent
from agent.ZeroIntelligenceAgent import ZeroIntelligenceAgent
from util.order import LimitOrder
from util.oracle.SparseMeanRevertingOracle import SparseMeanRevertingOracle
from util import util
from util.model.QTable import QTable
import numpy as np
import pandas as pd
import sys
from math import ceil, floor
###### Helper functions for this configuration file. Just commonly-used code ######
###### that would otherwise have been repeated many times. ######
def get_rand_obj(seed_obj):
return np.random.RandomState(seed = seed_obj.randint(low = 0, high = 2**32))
###### One-time configuration section. This section sets up definitions that ######
###### will apply to the entire experiment. They will not be repeated or ######
###### reinitialized for each instance of the simulation contained within ######
###### this experiment. ######
### EXPERIMENT CONFIGURATION.
num_consecutive_simulations = 50
### DATE CONFIGURATION.
# Since the simulator often pulls historical data, we use nanosecond
# timestamps (pandas.Timestamp) for all simulation time tracking.
# Thus our discrete time stamps are effectively nanoseconds, although
# they can be interepreted otherwise for ahistorical (e.g. generated)
# simulations. These timestamps do require a valid date component.
midnight = pd.to_datetime('2014-01-28')
### STOCK SYMBOL CONFIGURATION.
symbols = { 'IBM' : { 'r_bar' : 1e5, 'kappa' : 1.67e-12, 'agent_kappa' : 1.67e-15,
'sigma_s' : 0, 'fund_vol' : 1e-4, 'megashock_lambda_a' : 2.77778e-13,
'megashock_mean' : 1e3, 'megashock_var' : 5e4 }
}
### INITIAL AGENT DISTRIBUTION.
### You must instantiate the agents in the same order you record them
### in the agent_types and agent_strats lists. (Currently they are
### parallel arrays.)
###
### When conducting "agent of change" experiments, the new agents should
### be added at the END only.
agent_types = []
agent_strats = []
### Count by agent type.
num_exch = 1
num_zi = 100
num_qlearners = 1
### EXCHANGE AGENTS
mkt_open = midnight + pd.to_timedelta('09:30:00')
mkt_close = midnight + pd.to_timedelta('16:00:00')
### Record the type and strategy of the agents for reporting purposes.
for i in range(num_exch):
agent_types.append("ExchangeAgent")
agent_strats.append("ExchangeAgent")
### ZERO INTELLIGENCE AGENTS
### ZeroIntelligence fixed parameters (i.e. not strategic).
zi_obs_noise = 1000000 # a property of the agent, not an individual stock
### Lay out the ZI strategies (parameter settings) that will be used in this
### experiment, so we can assign particular numbers of agents to each strategy.
### Tuples are: (R_min, R_max, eta).
zi_strategy = [ (0, 250, 1), (0, 500, 1), (0, 1000, 0.8), (0, 1000, 1),
(0, 2000, 0.8), (250, 500, 0.8), (250, 500, 1) ]
### Record the initial distribution of agents to ZI strategies.
### Split the agents as evenly as possible among the strategy settings.
zi = [ floor(num_zi / len(zi_strategy)) ] * len(zi_strategy)
i = 0
while sum(zi) < num_zi:
zi[i] += 1
i += 1
### Record the type and strategy of the agents for reporting purposes.
for i in range(len(zi_strategy)):
x = zi_strategy[i]
strat_name = "Type {} [{} <= R <= {}, eta={}]".format(i+1, x[0], x[1], x[2])
agent_types.extend([ 'ZeroIntelligenceAgent' ] * zi[i])
agent_strats.extend([ 'ZeroIntelligenceAgent ({})'.format(strat_name) ] * zi[i])
### Q-LEARNING AGENTS
### QLearning fixed parameters (i.e. not strategic).
### Record the type and strategy of the agents for reporting purposes.
for i in range(num_qlearners):
agent_types.append("QLearningAgent")
agent_strats.append("QLearningAgent")
### FINAL AGENT PREPARATION
### Record the total number of agents here, so we can create a list of lists
### of random seeds to use for the agents across the iterated simulations.
### Also create an empty list of appropriate size to store agent state
### across simulations (for those agents which require it).
num_agents = num_exch + num_zi + num_qlearners
agent_saved_states = {}
agent_saved_states['agent_state'] = [None] * num_agents
### SIMULATION CONTROL SETTINGS.
# We allow some high-level parameters to be specified on the command line at
# runtime, rather than being explicitly coded in the config file. This really
# only makes sense for parameters that affect the entire series of simulations
# (i.e. the entire "experiment"), rather than a single instance of the simulation.
import argparse
parser = argparse.ArgumentParser(description='Detailed options for sparse_zi config.')
parser.add_argument('-b', '--book_freq', default=None,
help='Frequency at which to archive order book for visualization')
parser.add_argument('-c', '--config', required=True,
help='Name of config file to execute')
parser.add_argument('-l', '--log_dir', default=None,
help='Log directory name (default: unix timestamp at program start)')
parser.add_argument('-o', '--log_orders', action='store_true',
help='Log every order-related action by every agent.')
parser.add_argument('-s', '--seed', type=int, default=None,
help='numpy.random.seed() for simulation')
parser.add_argument('-v', '--verbose', action='store_true',
help='Maximum verbosity!')
parser.add_argument('--config_help', action='store_true',
help='Print argument options for this config file')
args, remaining_args = parser.parse_known_args()
if args.config_help:
parser.print_help()
sys.exit()
# If nothing specifically requested, use starting timestamp. In either case, successive
# simulations will have simulation number appended.
log_dir = args.log_dir
if log_dir is None: log_dir = str(int(pd.Timestamp('now').timestamp()))
# Requested order book snapshot archive frequency.
book_freq = args.book_freq
# Random seed specification on the command line. Default: None (by clock).
# If none, we select one via a specific random method and pass it to seed()
# so we can record it for future use. (You cannot reasonably obtain the
# automatically generated seed when seed() is called without a parameter.)
# Note that this seed is used to (1) make any random decisions within this
# config file itself and (2) to generate random number seeds for the
# (separate) Random objects given to each agent. This ensure that when
# the agent population is appended, prior agents will continue to behave
# in the same manner save for influences by the new agents. (i.e. all prior
# agents still have their own separate PRNG sequence, and it is the same as
# before)
seed = args.seed
if not seed: seed = int(pd.Timestamp.now().timestamp() * 1000000) % (2**32 - 1)
np.random.seed(seed)
# Config parameter that causes util.util.print to suppress most output.
# Also suppresses formatting of limit orders (which is time consuming).
util.silent_mode = not args.verbose
LimitOrder.silent_mode = not args.verbose
# Config parameter that causes every order-related action to be logged by
# every agent. Activate only when really needed as there is a significant
# time penalty to all that object serialization!
log_orders = args.log_orders
print ("Silent mode: {}".format(util.silent_mode))
print ("Logging orders: {}".format(log_orders))
print ("Book freq: {}".format(book_freq))
print ("Configuration seed: {}\n".format(seed))
### STOCHASTIC CONTROL
### For every entity that requires a source of randomness, create (from the global seed)
### a RandomState object, which can be used to generate SEEDS for that entity at the
### start of each simulation. This will premit each entity to receive a different
### seed for each simulation, but the entire experiment will still be deterministic
### given the same initial (global) seed.
kernel_seeds = np.random.RandomState(seed=np.random.randint(low=0,high=2**32))
symbol_seeds = {}
for sym in symbols: symbol_seeds[sym] = np.random.RandomState(seed=np.random.randint(low=0,high=2**32))
agent_seeds = [ np.random.RandomState(seed=np.random.randint(low=0,high=2**32)) ] * num_agents
### LATENCY CONFIGURATION
### Configure a simple message latency matrix for the agents. Each entry is the minimum
### nanosecond delay on communication [from][to] agent ID.
# Square numpy array with dimensions equal to total agent count. Most agents are handled
# at init, drawn from a uniform distribution from:
# Times Square (3.9 miles from NYSE, approx. 21 microseconds at the speed of light) to:
# Pike Place Starbucks in Seattle, WA (2402 miles, approx. 13 ms at the speed of light).
# Other agents can be explicitly set afterward (and the mirror half of the matrix is also).
# This configures all agents to a starting latency as described above.
latency = np.random.uniform(low = 21000, high = 13000000, size=(len(agent_types),len(agent_types)))
# Overriding the latency for certain agent pairs happens below, as does forcing mirroring
# of the matrix to be symmetric.
for i, t1 in zip(range(latency.shape[0]), agent_types):
for j, t2 in zip(range(latency.shape[1]), agent_types):
# Three cases for symmetric array. Set latency when j > i, copy it when i > j, same agent when i == j.
if j > i:
# Presently, strategy agents shouldn't be talking to each other, so we set them to extremely high latency.
if (t1 == "ZeroIntelligenceAgent" and t2 == "ZeroIntelligenceAgent"):
latency[i,j] = 1000000000 * 60 * 60 * 24 # Twenty-four hours.
elif i > j:
# This "bottom" half of the matrix simply mirrors the top.
latency[i,j] = latency[j,i]
else:
# This is the same agent. How long does it take to reach localhost? In our data center, it actually
# takes about 20 microseconds.
latency[i,j] = 20000
# Configure a simple latency noise model for the agents.
# Index is ns extra delay, value is probability of this delay being applied.
noise = [ 0.25, 0.25, 0.20, 0.15, 0.10, 0.05 ]
### FINAL GLOBAL CONFIGURATION FOR ALL SIMULATIONS
# The kernel's start and stop times must be pandas.Timestamp
# objects, including a date. (For ahistorical simulations, the date
# selected is irrelevant.) This range represents the maximum extents
# of simulated time, and should generally be a superset of "market hours".
# There is no requirement these times be on the same date, although
# none of the current agents handle markets closing and reopening.
kernelStartTime = midnight
kernelStopTime = midnight + | pd.to_timedelta('17:00:00') | pandas.to_timedelta |
"""
train a classifier with random forest.
"""
import pandas as pd
import os
from sklearn.ensemble import RandomForestClassifier
def arrange_data(data):
"""
replace categories with one hot forms.
drop useless columns.
"""
# age to 3 categories: less than 16, greater than 15, unknown
data['Age'].fillna(-2, inplace=True)
data.loc[data['Age'] > 15, 'Age'] = -1
data.loc[data['Age'] > 0, 'Age'] = 0
data.loc[data['Age'] == -1, 'Age'] = 1
data.loc[data['Age'] == -2, 'Age'] = 2
# one hot Age
dummies = pd.get_dummies(data['Age'])
dummies.columns = ['Age_0', 'Age_1', 'Age_2']
data = data.join(dummies)
# one hot Embarked
dummies = pd.get_dummies(data['Embarked'])
dummies.columns = ['Embarked_S', 'Embarked_C', 'Embarked_Q']
data = data.join(dummies)
# one hot Pclass
dummies = | pd.get_dummies(data['Pclass']) | pandas.get_dummies |
from lxml import etree
import numpy as np
import pandas as pd
import re
from sklearn.model_selection import train_test_split
import Bio
from Bio import SeqIO
from pathlib import Path
import glob
#console
from tqdm import tqdm as tqdm
import re
import os
import itertools
#jupyter
#from tqdm import tqdm_notebook as tqdm
#not supported in current tqdm version
#from tqdm.autonotebook import tqdm
#import logging
#logging.getLogger('proteomics_utils').addHandler(logging.NullHandler())
#logger=logging.getLogger('proteomics_utils')
#for cd-hit
import subprocess
from sklearn.metrics import f1_score
import hashlib #for mhcii datasets
from utils.dataset_utils import split_clusters_single,pick_all_members_from_clusters
#######################################################################################################
#Parsing all sorts of protein data
#######################################################################################################
def parse_uniprot_xml(filename,max_entries=0,parse_features=[]):
'''parse uniprot xml file, which contains the full uniprot information (e.g. ftp://ftp.uniprot.org/pub/databases/uniprot/current_release/knowledgebase/complete/uniprot_sprot.xml.gz)
using custom low-level https://www.ibm.com/developerworks/xml/library/x-hiperfparse/
c.f. for full format https://www.uniprot.org/docs/uniprot.xsd
parse_features: a list of strings specifying the kind of features to be parsed such as "modified residue" for phosphorylation sites etc. (see https://www.uniprot.org/help/mod_res)
(see the xsd file for all possible entries)
'''
context = etree.iterparse(str(filename), events=["end"], tag="{http://uniprot.org/uniprot}entry")
context = iter(context)
rows =[]
for _, elem in tqdm(context):
parse_func_uniprot(elem,rows,parse_features=parse_features)
elem.clear()
while elem.getprevious() is not None:
del elem.getparent()[0]
if(max_entries > 0 and len(rows)==max_entries):
break
df=pd.DataFrame(rows).set_index("ID")
df['name'] = df.name.astype(str)
df['dataset'] = df.dataset.astype('category')
df['organism'] = df.organism.astype('category')
df['sequence'] = df.sequence.astype(str)
return df
def parse_func_uniprot(elem, rows, parse_features=[]):
'''extracting a single record from uniprot xml'''
seqs = elem.findall("{http://uniprot.org/uniprot}sequence")
sequence=""
#print(seqs)
for s in seqs:
sequence=s.text
#print("sequence",sequence)
if sequence =="" or str(sequence)=="None":
continue
else:
break
#Sequence & fragment
sequence=""
fragment_map = {"single":1, "multiple":2}
fragment = 0
seqs = elem.findall("{http://uniprot.org/uniprot}sequence")
for s in seqs:
if 'fragment' in s.attrib:
fragment = fragment_map[s.attrib["fragment"]]
sequence=s.text
if sequence != "":
break
#print("sequence:",sequence)
#print("fragment:",fragment)
#dataset
dataset=elem.attrib["dataset"]
#accession
accession = ""
accessions = elem.findall("{http://uniprot.org/uniprot}accession")
for a in accessions:
accession=a.text
if accession !="":#primary accession! https://www.uniprot.org/help/accession_numbers!!!
break
#print("accession",accession)
#protein existence (PE in plain text)
proteinexistence_map = {"evidence at protein level":5,"evidence at transcript level":4,"inferred from homology":3,"predicted":2,"uncertain":1}
proteinexistence = -1
accessions = elem.findall("{http://uniprot.org/uniprot}proteinExistence")
for a in accessions:
proteinexistence=proteinexistence_map[a.attrib["type"]]
break
#print("protein existence",proteinexistence)
#name
name = ""
names = elem.findall("{http://uniprot.org/uniprot}name")
for n in names:
name=n.text
break
#print("name",name)
#organism
organism = ""
organisms = elem.findall("{http://uniprot.org/uniprot}organism")
for s in organisms:
s1=s.findall("{http://uniprot.org/uniprot}name")
for s2 in s1:
if(s2.attrib["type"]=='scientific'):
organism=s2.text
break
if organism !="":
break
#print("organism",organism)
#dbReference: PMP,GO,Pfam, EC
ids = elem.findall("{http://uniprot.org/uniprot}dbReference")
pfams = []
gos =[]
ecs = []
pdbs =[]
for i in ids:
#print(i.attrib["id"],i.attrib["type"])
#cf. http://geneontology.org/external2go/uniprotkb_kw2go for Uniprot Keyword<->GO mapping
#http://geneontology.org/ontology/go-basic.obo for List of go terms
#https://www.uniprot.org/help/keywords_vs_go keywords vs. go
if(i.attrib["type"]=="GO"):
tmp1 = i.attrib["id"]
for i2 in i:
if i2.attrib["type"]=="evidence":
tmp2= i2.attrib["value"]
gos.append([int(tmp1[3:]),int(tmp2[4:])]) #first value is go code, second eco evidence ID (see mapping below)
elif(i.attrib["type"]=="Pfam"):
pfams.append(i.attrib["id"])
elif(i.attrib["type"]=="EC"):
ecs.append(i.attrib["id"])
elif(i.attrib["type"]=="PDB"):
pdbs.append(i.attrib["id"])
#print("PMP: ", pmp)
#print("GOs:",gos)
#print("Pfams:",pfam)
#print("ECs:",ecs)
#print("PDBs:",pdbs)
#keyword
keywords = elem.findall("{http://uniprot.org/uniprot}keyword")
keywords_lst = []
#print(keywords)
for k in keywords:
keywords_lst.append(int(k.attrib["id"][-4:]))#remove the KW-
#print("keywords: ",keywords_lst)
#comments = elem.findall("{http://uniprot.org/uniprot}comment")
#comments_lst=[]
##print(comments)
#for c in comments:
# if(c.attrib["type"]=="function"):
# for c1 in c:
# comments_lst.append(c1.text)
#print("function: ",comments_lst)
#ptm etc
if len(parse_features)>0:
ptms=[]
features = elem.findall("{http://uniprot.org/uniprot}feature")
for f in features:
if(f.attrib["type"] in parse_features):#only add features of the requested type
locs=[]
for l in f[0]:
locs.append(int(l.attrib["position"]))
ptms.append([f.attrib["type"],f.attrib["description"] if 'description' in f.attrib else "NaN",locs, f.attrib['evidence'] if 'evidence' in f.attrib else "NaN"])
#print(ptms)
data_dict={"ID": accession, "name": name, "dataset":dataset, "proteinexistence":proteinexistence, "fragment":fragment, "organism":organism, "ecs": ecs, "pdbs": pdbs, "pfams" : pfams, "keywords": keywords_lst, "gos": gos, "sequence": sequence}
if len(parse_features)>0:
data_dict["features"]=ptms
#print("all children:")
#for c in elem:
# print(c)
# print(c.tag)
# print(c.attrib)
rows.append(data_dict)
def parse_uniprot_seqio(filename,max_entries=0):
'''parse uniprot xml file using the SeqIO parser (smaller functionality e.g. does not extract evidence codes for GO)'''
sprot = SeqIO.parse(filename, "uniprot-xml")
rows = []
for p in tqdm(sprot):
accession = str(p.name)
name = str(p.id)
dataset = str(p.annotations['dataset'])
organism = str(p.annotations['organism'])
ecs, pdbs, pfams, gos = [],[],[],[]
for ref in p.dbxrefs:
k = ref.split(':')
if k[0] == 'GO':
gos.append(':'.join(k[1:]))
elif k[0] == 'Pfam':
pfams.append(k[1])
elif k[0] == 'EC':
ecs.append(k[1])
elif k[0] == 'PDB':
pdbs.append(k[1:])
if 'keywords' in p.annotations.keys():
keywords = p.annotations['keywords']
else:
keywords = []
sequence = str(p.seq)
row = {
'ID': accession,
'name':name,
'dataset':dataset,
'organism':organism,
'ecs':ecs,
'pdbs':pdbs,
'pfams':pfams,
'keywords':keywords,
'gos':gos,
'sequence':sequence}
rows.append(row)
if(max_entries>0 and len(rows)==max_entries):
break
df=pd.DataFrame(rows).set_index("ID")
df['name'] = df.name.astype(str)
df['dataset'] = df.dataset.astype('category')
df['organism'] = df.organism.astype('category')
df['sequence'] = df.sequence.astype(str)
return df
def filter_human_proteome(df_sprot):
'''extracts human proteome from swissprot proteines in DataFrame with column organism '''
is_Human = np.char.find(df_sprot.organism.values.astype(str), "Human") !=-1
is_human = np.char.find(df_sprot.organism.values.astype(str), "human") !=-1
is_sapiens = np.char.find(df_sprot.organism.values.astype(str), "sapiens") !=-1
is_Sapiens = np.char.find(df_sprot.organism.values.astype(str), "Sapiens") !=-1
return df_sprot[is_Human|is_human|is_sapiens|is_Sapiens]
def filter_aas(df, exclude_aas=["B","J","X","Z"]):
'''excludes sequences containing exclude_aas: B = D or N, J = I or L, X = unknown, Z = E or Q'''
return df[~df.sequence.apply(lambda x: any([e in x for e in exclude_aas]))]
######################################################################################################
def explode_clusters_df(df_cluster):
'''aux. function to convert cluster dataframe from one row per cluster to one row per ID'''
df=df_cluster.reset_index(level=0)
rows = []
if('repr_accession' in df.columns):#include representative if it exists
_ = df.apply(lambda row: [rows.append([nn,row['entry_id'], row['repr_accession']==nn ]) for nn in row.members], axis=1)
df_exploded = pd.DataFrame(rows, columns=['ID',"cluster_ID","representative"]).set_index(['ID'])
else:
_ = df.apply(lambda row: [rows.append([nn,row['entry_id']]) for nn in row.members], axis=1)
df_exploded = | pd.DataFrame(rows, columns=['ID',"cluster_ID"]) | pandas.DataFrame |
import pandas as pd
import numpy as np
from utils import duplicates_in_list
from sklearn.cluster import KMeans
from matplotlib import pyplot
import random
def combine_estimation_methods():
return None
def conc_flood_fill_com(conc_estimated, threshold=0.95):
"""
Flood fills from center of mass
"""
conc_estimated_tr = conc_estimated.transpose().reset_index()
conc_estimated_tr['index_int'] = conc_estimated_tr.index
original_cols = conc_estimated.index.to_list()
# Find center of mass for each source category
positions = []
for c in original_cols:
# Test whether the threshold is less than the column minimum
if conc_estimated_tr[c].max()*0.95 < threshold:
tmp_threshold = conc_estimated_tr[c].max()*0.95
else:
tmp_threshold = threshold
# Select values larger than the threshold
c_top_prob = conc_estimated_tr.loc[conc_estimated_tr[c] > tmp_threshold][[c, 'index_int']]
count_matches = c_top_prob.shape[0]
assert count_matches > 0, 'No predictions for source label: ' + c
probs = c_top_prob[c].values.reshape(-1, 1)
idxs = c_top_prob['index_int'].values.reshape(-1, 1)
com = np.average(idxs, axis=0, weights=probs**3)
positions.append(int(com))
# Initialise at the com points
conc_ar = np.zeros(conc_estimated.shape, dtype=int)
conc_est_ar = conc_estimated.to_numpy()
conc_rel_imp = conc_est_ar / conc_est_ar.sum(axis=1).reshape(-1, 1) # relative importances
for i, j in enumerate(positions):
if sum(conc_ar[:, j]) == 0:
conc_ar[i, j] = 1
elif sum(conc_ar[:, j+1]) == 0:
conc_ar[i, j+1] = 1
elif sum(conc_ar[:, j - 1]) == 0:
conc_ar[i, j - 1] = 1
assert all(np.sum(conc_ar, axis=0) <= 1), 'Source matches are not unique!'
# Flood fill
k = 1
hard_max_iter = 15000
max_iter = 4000
n_columns = conc_estimated.shape[1]
while sum(sum(conc_ar)) < n_columns and k <= hard_max_iter:
for i, row in enumerate(conc_ar):
# Indices already set in this row
matches = np.nonzero(row)
assert matches is not None
if matches[0] is not None and len(matches[0]) > 0:
# Choose direction in which to move
move = [min(matches[0]) - 1, max(matches[0]) + 1]
possible_moves = [x for x in move if 0 <= x <= n_columns-1] # not reached edge
if possible_moves is not None and possible_moves != []:
next_idx = random.choice(possible_moves)
if sum(conc_ar[:, next_idx]) == 0: # column has no entries in other rows
prob_new = conc_est_ar[i, next_idx]
if prob_new >= min(min(conc_est_ar[i, matches])):
conc_ar[i, next_idx] = 1
elif prob_new * (1+(k/max_iter)) >= np.quantile(conc_est_ar[i, :], 0.98):
conc_ar[i, next_idx] = 1
elif conc_rel_imp[i, next_idx] * (1+(0.8*k/max_iter)) > max(conc_rel_imp[:, next_idx]):
conc_ar[i, next_idx] = 1
elif np.sum(conc_ar[i, 0:next_idx], axis=0) == 0: # lower edge
assert sum(conc_ar[:, next_idx]) == 0
conc_ar[i, next_idx] = 1
elif np.sum(conc_ar[i, next_idx:], axis=0) == 0: # upper edge
assert sum(conc_ar[:, next_idx]) == 0
conc_ar[i, next_idx] = 1
k = k + 1
# Log progress
if k % 100 == 0:
completeness = sum(conc_ar.reshape(-1))/n_columns * 100
print('k: ' + str(k) + ', fill: ' + str(round(completeness)) + '% complete')
# Tests
assert all(np.sum(conc_ar, axis=0) <= 1), 'Source matches are not unique!'
# Convert to dataframe
conc_com = | pd.DataFrame(conc_ar, columns=conc_estimated.columns, index=conc_estimated.index, dtype=int) | pandas.DataFrame |
# -*- coding: UTF-8 -*-
import pandas
class Group(object):
def __init__(self):
self.arrayDeFrames = []
self.ENTITY_NULL = "nulo"
def removeEmptyEntities(self, dataFrame):
return dataFrame[dataFrame.LATITUDE.astype(str) != self.ENTITY_NULL]
def main(self, files, filename, SSPDS=True):
for file in files:
dataFrame = | pandas.read_csv("./data/" + file + ".csv") | pandas.read_csv |
"""Project: PhiK - correlation analyzer library
Created: 2018/09/05
Description:
Functions for the Phik correlation calculation
Authors:
KPMG Advanced Analytics & Big Data team, Amstelveen, The Netherlands
Redistribution and use in source and binary forms, with or without
modification, are permitted according to the terms listed in the file
LICENSE.
"""
from typing import Tuple, Union, Optional
import numpy as np
import itertools
import pandas as pd
from joblib import Parallel, delayed
from scipy.linalg import inv, pinv
from phik import definitions as defs
from .bivariate import phik_from_chi2
from .statistics import (
get_chi2_using_dependent_frequency_estimates,
estimate_simple_ndof,
get_pearson_chi_square,
)
from .binning import create_correlation_overview_table, bin_data, auto_bin_data
from .data_quality import dq_check_hist2d
from .utils import array_like_to_dataframe, make_shapes_equal
def spark_phik_matrix_from_hist2d_dict(spark_context, hist_dict: dict):
"""Correlation matrix of bivariate gaussian using spark parallelization over variable-pair 2d histograms
See spark notebook phik_tutorial_spark.ipynb as example.
Each input histogram gets converted into correlation coefficient of bivariate gauss
with correlation value rho, assuming giving binning and number of records.
Correlation coefficient value is between 0 and 1.
:param spark_context: spark context
:param hist_dict: dict of 2d numpy grids with value-counts. keys are histogram names.
:return: phik correlation matrix
"""
for k, v in hist_dict.items():
if not isinstance(v, np.ndarray) and v.shape[1] == 2:
raise TypeError("hist_dict should be a dictionary of 2d numpy arrays.")
hist_list = list(hist_dict.items())
hist_rdd = spark_context.parallelize(hist_list)
phik_rdd = hist_rdd.map(_phik_from_row)
phik_list = phik_rdd.collect()
phik_overview = create_correlation_overview_table(phik_list)
return phik_overview
def _phik_from_row(row: Tuple[str, np.ndarray]) -> Tuple[str, str, float]:
"""Helper function for spark parallel processing
:param row: rdd row, where row[0] is key and rdd[1]
:return: union of key, phik-value
"""
key, grid = row
c = key.split(":")
if len(c) == 2 and c[0] == c[1]:
return c[0], c[1], 1.0
try:
phik_value = phik_from_hist2d(grid)
except TypeError:
phik_value = np.nan
return c[0], c[1], phik_value
def phik_from_hist2d(
observed: np.ndarray, noise_correction: bool = True, expected: np.ndarray = None
) -> float:
"""
correlation coefficient of bivariate gaussian derived from chi2-value
Chi2-value gets converted into correlation coefficient of bivariate gauss
with correlation value rho, assuming giving binning and number of records.
Correlation coefficient value is between 0 and 1.
Bivariate gaussian's range is set to [-5,5] by construction.
:param observed: 2d-array observed values
:param bool noise_correction: apply noise correction in phik calculation
:param expected: 2d-array expected values. Optional, default is None, otherwise evaluated automatically.
:returns float: correlation coefficient phik
"""
if isinstance(observed, pd.DataFrame):
observed = observed.values
if isinstance(expected, pd.DataFrame):
expected = expected.values
# chi2 contingency test
chi2 = (
get_chi2_using_dependent_frequency_estimates(observed, lambda_="pearson")
if expected is None
else get_pearson_chi_square(observed, expected)
)
# noise pedestal
pedestal = estimate_simple_ndof(observed) if noise_correction else 0
if pedestal < 0:
pedestal = 0
# phik calculation adds noise pedestal to theoretical chi2
return phik_from_chi2(chi2, observed.sum(), *observed.shape, pedestal=pedestal)
def phik_from_rebinned_df(
data_binned: pd.DataFrame,
noise_correction: bool = True,
dropna: bool = True,
drop_underflow: bool = True,
drop_overflow: bool = True,
njobs: int = -1,
) -> pd.DataFrame:
"""
Correlation matrix of bivariate gaussian derived from chi2-value
Chi2-value gets converted into correlation coefficient of bivariate gauss
with correlation value rho, assuming giving binning and number of records.
Correlation coefficient value is between 0 and 1.
Bivariate gaussian's range is set to [-5,5] by construction.
:param pd.DataFrame data_binned: input data where interval variables have been binned
:param bool noise_correction: apply noise correction in phik calculation
:param bool dropna: remove NaN values with True
:param bool drop_underflow: do not take into account records in underflow bin when True (relevant when binning\
a numeric variable)
:param bool drop_overflow: do not take into account records in overflow bin when True (relevant when binning\
a numeric variable)
:param int njobs: number of parallel jobs used for calculation of phik. default is -1. 1 uses no parallel jobs.
:return: phik correlation matrix
"""
if not dropna:
# if not dropna replace the NaN values with the string NaN. Otherwise the rows with NaN are dropped
# by the groupby.
data_binned.replace(np.nan, defs.NaN, inplace=True)
if drop_underflow:
data_binned.replace(defs.UF, np.nan, inplace=True)
if drop_overflow:
data_binned.replace(defs.OF, np.nan, inplace=True)
# cache column order (https://github.com/KaveIO/PhiK/issues/1)
column_order = data_binned.columns
if njobs == 1:
# Useful when for instance using cProfiler: https://docs.python.org/3/library/profile.html
phik_list = [
_calc_phik(co, data_binned[list(co)], noise_correction)
for co in itertools.combinations_with_replacement(
data_binned.columns.values, 2
)
]
else:
phik_list = Parallel(n_jobs=njobs)(
delayed(_calc_phik)(co, data_binned[list(co)], noise_correction)
for co in itertools.combinations_with_replacement(
data_binned.columns.values, 2
)
)
if len(phik_list) == 0:
return pd.DataFrame(np.nan, index=column_order, columns=column_order)
phik_overview = create_correlation_overview_table(phik_list)
# restore column order
phik_overview = phik_overview.reindex(columns=column_order)
phik_overview = phik_overview.reindex(index=column_order)
return phik_overview
def _calc_phik(
comb: tuple, data_binned: pd.DataFrame, noise_correction: bool
) -> Tuple[str, str, float]:
"""Split off calculation of phik for parallel processing
:param tuple comb: union of two string columns
:param pd.DataFrame data_binned: input data where interval variables have been binned
:param bool noise_correction: apply noise correction in phik calculation
:return: tuple of variable-x, variable-y, phik-value
"""
c0, c1 = comb
if c0 == c1:
return c0, c1, 1.0
datahist = data_binned.groupby([c0, c1])[c0].count().to_frame().unstack().fillna(0)
# If 0 or only 1 values for one of the two variables, it is not possible to calculate phik.
# This check needs to be done after creation of OF, UF and NaN bins.
if any([v in datahist.shape for v in [0, 1]]):
return c0, c1, np.nan
datahist.columns = datahist.columns.droplevel()
phikvalue = phik_from_hist2d(datahist.values, noise_correction=noise_correction)
return c0, c1, phikvalue
def phik_matrix(
df: pd.DataFrame,
interval_cols: Optional[list] = None,
bins: Union[int, list, np.ndarray, dict] = 10,
quantile: bool = False,
noise_correction: bool = True,
dropna: bool = True,
drop_underflow: bool = True,
drop_overflow: bool = True,
verbose: bool = True,
njobs: int = -1,
) -> pd.DataFrame:
"""
Correlation matrix of bivariate gaussian derived from chi2-value
Chi2-value gets converted into correlation coefficient of bivariate gauss
with correlation value rho, assuming giving binning and number of records.
Correlation coefficient value is between 0 and 1.
Bivariate gaussian's range is set to [-5,5] by construction.
:param pd.DataFrame data_binned: input data
:param list interval_cols: column names of columns with interval variables.
:param bins: number of bins, or a list of bin edges (same for all columns), or a dictionary where per column the\
bins are specified (default=10). E.g.: bins = {'mileage':5, 'driver_age':[18,25,35,45,55,65,125]}
:param quantile: when bins is an integer, uniform bins (False) or bins based on quantiles (True)
:param bool noise_correction: apply noise correction in phik calculation
:param bool dropna: remove NaN values with True
:param bool drop_underflow: do not take into account records in underflow bin when True (relevant when binning\
a numeric variable)
:param bool drop_overflow: do not take into account records in overflow bin when True (relevant when binning\
a numeric variable)
:param bool verbose: if False, do not print all interval columns that are guessed
:param int njobs: number of parallel jobs used for calculation of phik. default is -1. 1 uses no parallel jobs.
:return: phik correlation matrix
"""
data_binned, binning_dict = auto_bin_data(
df=df,
interval_cols=interval_cols,
bins=bins,
quantile=quantile,
dropna=dropna,
verbose=verbose,
)
return phik_from_rebinned_df(
data_binned,
noise_correction,
dropna=dropna,
drop_underflow=drop_underflow,
drop_overflow=drop_overflow,
njobs=njobs,
)
def global_phik_from_rebinned_df(
data_binned: pd.DataFrame,
noise_correction: bool = True,
dropna: bool = True,
drop_underflow: bool = True,
drop_overflow: bool = True,
njobs: int = -1,
) -> Tuple[np.ndarray, np.ndarray]:
"""
Global correlation values of variables, obtained from the PhiK correlation matrix.
A global correlation value is a simple approximation of how well one feature can be modeled in terms of all others.
The global correlation coefficient is a number between zero and one, obtained from the PhiK correlation matrix,
that gives the highest possible correlation between a variable and the linear combination of all other variables.
See PhiK paper or for original definition: https://inspirehep.net/literature/101965
Global PhiK uses two important simplifications / approximations:
- The variables are assumed to belong to a multinormal distribution, which is typically not the case.
- The correlation should be a Pearson correlation matrix, allowing for negative values, which is not the case
with PhiK correlations (which are positive by construction).
To correct for these, the Global PhiK values are artificially capped between 0 and 1.
Still, the Global PhiK values are useful, quick, simple estimates that are interesting in an exploratory study.
For a solid, trustworthy estimate be sure to use a classifier or regression model instead.
:param pd.DataFrame data_binned: rebinned input data
:param bool noise_correction: apply noise correction in phik calculation
:param bool dropna: remove NaN values with True
:param bool drop_underflow: do not take into account records in underflow bin when True (relevant when binning\
a numeric variable)
:param bool drop_overflow: do not take into account records in overflow bin when True (relevant when binning\
a numeric variable)
:param int njobs: number of parallel jobs used for calculation of phik. default is -1. 1 uses no parallel jobs.
:return: global correlations array
"""
phik_overview = phik_from_rebinned_df(
data_binned,
noise_correction,
dropna=dropna,
drop_underflow=drop_underflow,
drop_overflow=drop_overflow,
njobs=njobs,
)
V = phik_overview.values
# check if V is ill-conditioned
if np.linalg.cond(V) > np.finfo(V.dtype).eps:
Vinv = inv(V)
else:
# use pseudo inverse to try handle finite but ill-conditioned arrays;
# non-finite values will still trigger an exception
Vinv = pinv(V)
global_correlations = np.sqrt(
1 - (1 / (np.diagonal(V) * np.diagonal(Vinv)))
)[:, None]
# Cap values to [0.0, 1.0] domain. See issue:
# https://github.com/KaveIO/PhiK/issues/37
global_correlations[global_correlations > 1.0] = 1.0
global_correlations[global_correlations < 0.0] = 0.0
return global_correlations, phik_overview.index.values
def global_phik_array(
df: pd.DataFrame,
interval_cols: list = None,
bins: Union[int, list, np.ndarray, dict] = 10,
quantile: bool = False,
noise_correction: bool = True,
dropna: bool = True,
drop_underflow: bool = True,
drop_overflow: bool = True,
verbose: bool = True,
njobs: int = -1,
) -> Tuple[np.ndarray, np.ndarray]:
"""
Global correlation values of variables, obtained from the PhiK correlation matrix.
A global correlation value is a simple approximation of how well one feature can be modeled in terms of all others.
The global correlation coefficient is a number between zero and one, obtained from the PhiK correlation matrix,
that gives the highest possible correlation between a variable and the linear combination of all other variables.
See PhiK paper or for original definition: https://inspirehep.net/literature/101965
Global PhiK uses two important simplifications / approximations:
- The variables are assumed to belong to a multinormal distribution, which is typically not the case.
- The correlation should be a Pearson correlation matrix, allowing for negative values, which is not the case
with PhiK correlations (which are positive by construction).
To correct for these, the Global PhiK values are artificially capped between 0 and 1.
Still, the Global PhiK values are useful, quick, simple estimates that are interesting in an exploratory study.
For a solid, trustworthy estimate be sure to use a classifier or regression model instead.
:param pd.DataFrame data_binned: input data
:param list interval_cols: column names of columns with interval variables.
:param bins: number of bins, or a list of bin edges (same for all columns), or a dictionary where per column the\
bins are specified (default=10). E.g.: bins = {'mileage':5, 'driver_age':[18,25,35,45,55,65,125]}
:param quantile: when bins is an integer, uniform bins (False) or bins based on quantiles (True)
:param bool noise_correction: apply noise correction in phik calculation
:param bool dropna: remove NaN values with True
:param bool drop_underflow: do not take into account records in underflow bin when True (relevant when binning\
a numeric variable)
:param bool drop_overflow: do not take into account records in overflow bin when True (relevant when binning\
a numeric variable)
:param bool verbose: if False, do not print all interval columns that are guessed
:param int njobs: number of parallel jobs used for calc of global phik. default is -1. 1 uses no parallel jobs.
:return: global correlations array
"""
data_binned, binning_dict = auto_bin_data(
df=df,
interval_cols=interval_cols,
bins=bins,
quantile=quantile,
dropna=dropna,
verbose=verbose,
)
return global_phik_from_rebinned_df(
data_binned,
noise_correction=noise_correction,
dropna=dropna,
drop_underflow=drop_underflow,
drop_overflow=drop_overflow,
njobs=njobs,
)
def phik_from_array(
x: Union[np.ndarray, pd.Series],
y: Union[np.ndarray, pd.Series],
num_vars: Union[str, list] = None,
bins: Union[int, dict, list, np.ndarray] = 10,
quantile: bool = False,
noise_correction: bool = True,
dropna: bool = True,
drop_underflow: bool = True,
drop_overflow: bool = True,
) -> float:
"""
Correlation matrix of bivariate gaussian derived from chi2-value
Chi2-value gets converted into correlation coefficient of bivariate gauss
with correlation value rho, assuming giving binning and number of records.
Correlation coefficient value is between 0 and 1.
Bivariate gaussian's range is set to [-5,5] by construction.
:param x: array-like input
:param y: array-like input
:param num_vars: list of numeric variables which need to be binned, e.g. ['x'] or ['x','y']
:param bins: number of bins, or a list of bin edges (same for all columns), or a dictionary where per column the\
bins are specified (default=10). E.g.: bins = {'mileage':5, 'driver_age':[18,25,35,45,55,65,125]}
:param quantile: when bins is an integer, uniform bins (False) or bins based on quantiles (True)
:param bool noise_correction: apply noise correction in phik calculation
:param bool dropna: remove NaN values with True
:param bool drop_underflow: do not take into account records in underflow bin when True (relevant when binning\
a numeric variable)
:param bool drop_overflow: do not take into account records in overflow bin when True (relevant when binning\
a numeric variable)
:return: phik correlation coefficient
"""
if num_vars is None:
num_vars = []
elif isinstance(num_vars, str):
num_vars = [num_vars]
if len(num_vars) > 0:
df = array_like_to_dataframe(x, y)
x, y = bin_data(df, num_vars, bins=bins, quantile=quantile).T.values
return phik_from_binned_array(
x,
y,
noise_correction=noise_correction,
dropna=dropna,
drop_underflow=drop_underflow,
drop_overflow=drop_overflow,
)
def phik_from_binned_array(
x: Union[np.ndarray, pd.Series],
y: Union[np.ndarray, pd.Series],
noise_correction: bool = True,
dropna: bool = True,
drop_underflow: bool = True,
drop_overflow: bool = True,
) -> float:
"""
Correlation matrix of bivariate gaussian derived from chi2-value
Chi2-value gets converted into correlation coefficient of bivariate gauss
with correlation value rho, assuming giving binning and number of records.
Correlation coefficient value is between 0 and 1.
Bivariate gaussian's range is set to [-5,5] by construction.
:param x: array-like input. Interval variables need to be binned beforehand.
:param y: array-like input. Interval variables need to be binned beforehand.
:param bool noise_correction: apply noise correction in phik calculation
:param bool dropna: remove NaN values with True
:param bool drop_underflow: do not take into account records in underflow bin when True (relevant when binning\
a numeric variable)
:param bool drop_overflow: do not take into account records in overflow bin when True (relevant when binning\
a numeric variable)
:return: phik correlation coefficient
"""
if not dropna:
x = pd.Series(x).fillna(defs.NaN).astype(str).values
y = pd.Series(y).fillna(defs.NaN).astype(str).values
if drop_underflow or drop_overflow:
x = pd.Series(x).astype(str).values
y = pd.Series(y).astype(str).values
if drop_underflow:
x[np.where(x == defs.UF)] = np.nan
y[np.where(y == defs.UF)] = np.nan
if drop_overflow:
y[np.where(y == defs.OF)] = np.nan
x[np.where(x == defs.OF)] = np.nan
hist2d = pd.crosstab(x, y).values
dq_okay = dq_check_hist2d(hist2d)
if not dq_okay:
return np.nan
return phik_from_hist2d(hist2d, noise_correction=noise_correction)
def phik_observed_vs_expected_from_rebinned_df(
obs_binned: pd.DataFrame,
exp_binned: pd.DataFrame,
noise_correction: bool = True,
dropna: bool = True,
drop_underflow: bool = True,
drop_overflow: bool = True,
njobs: int = -1,
) -> pd.DataFrame:
"""
PhiK correlation matrix of comparing observed with expected dataset
Chi2-value gets converted into correlation coefficient of bivariate gauss
with correlation value rho, assuming giving binning and number of records.
Correlation coefficient value is between 0 and 1.
:param pd.DataFrame obs_binned: observed input data where interval variables have been binned
:param pd.DataFrame exp_binned: expected input data where interval variables have been binned
:param bool noise_correction: apply noise correction in phik calculation
:param bool dropna: remove NaN values with True
:param bool drop_underflow: do not take into account records in underflow bin when True (relevant when binning\
a numeric variable)
:param bool drop_overflow: do not take into account records in overflow bin when True (relevant when binning\
a numeric variable)
:param int njobs: number of parallel jobs used for calculation of phik. default is -1. 1 uses no parallel jobs.
:return: phik correlation matrix
"""
# basic checks
if isinstance(obs_binned, np.ndarray):
obs_binned = | pd.DataFrame(obs_binned) | pandas.DataFrame |
import os
import pandas as pd
import pickle
import numpy as np
from sklearn import preprocessing, impute
import directories
RND_SEED = 1729
def preprocess_training_inputs():
with open(os.path.join(directories.model_dir,"training_features.p"),"rb") as file:
training_inputs = pickle.load(file)
# ground truth
training_outputs = training_inputs.pop("LABEL")
# encode F/M as 1/0
enc = preprocessing.OrdinalEncoder().fit(pd.DataFrame(training_inputs.GENDER))
training_inputs.GENDER = enc.transform(pd.DataFrame(training_inputs.GENDER))
# extract epoch (number of 6-hr windows away from pressor event)
# and patient-tag indicating pressor nor not (observed label)
# and subject ID
training_times = training_inputs.pop("EPOCH")
pressor_at_all = training_inputs.pop("EPISODE")
pressor_at_all = pressor_at_all==1
training_groups = training_inputs.pop("SUBJECT_ID").values
hadm_id = training_inputs.pop("HADM_ID")
scaler = preprocessing.StandardScaler().fit(training_inputs)
with open(os.path.join(directories.model_dir,"scaler_encoder.p"), "wb") as file:
pickle.dump({"scaler": scaler,"encoder": enc}, file)
X = training_inputs
X = pd.DataFrame(X,columns=training_inputs.columns)
X["EPOCH"] = training_times.values
X["PRESSOR_AT_ALL"] = pressor_at_all.values
X["SUBJECT_ID"] = training_groups
X["HADM_ID"] = hadm_id.values
y = training_outputs.values.astype(int)
with open(os.path.join(directories.model_dir,"training_set.p"),"wb") as file:
pickle.dump({"X": X, "y": y, "cv_groups": training_groups}, file)
def preprocess_test_inputs():
with open(os.path.join(directories.model_dir, "test_features.p"), "rb") as file:
test_inputs = pickle.load(file)
# ground truth
test_outputs = test_inputs.pop("LABEL")
# encode F/M as 1/0
with open(os.path.join(directories.model_dir, "scaler_encoder.p"), "rb") as file:
dict = pickle.load(file)
enc = dict["encoder"]
test_inputs.GENDER = enc.transform(pd.DataFrame(test_inputs.GENDER))
# extract epoch (number of 6-hr windows away from pressor event)
# and patient-tag indicating pressor nor not (observed label)
# and subject ID
test_times = test_inputs.pop("EPOCH")
pressor_at_all = test_inputs.pop("EPISODE")
pressor_at_all = pressor_at_all==1
test_groups = test_inputs.pop("SUBJECT_ID").values
hadm_id = test_inputs.pop("HADM_ID")
X = test_inputs
X = | pd.DataFrame(X,columns=test_inputs.columns) | pandas.DataFrame |
from datetime import (
datetime,
timedelta,
)
import re
import numpy as np
import pytest
from pandas._libs import iNaT
from pandas.errors import InvalidIndexError
import pandas.util._test_decorators as td
from pandas.core.dtypes.common import is_integer
import pandas as pd
from pandas import (
Categorical,
DataFrame,
DatetimeIndex,
Index,
MultiIndex,
Series,
Timestamp,
date_range,
isna,
notna,
)
import pandas._testing as tm
import pandas.core.common as com
# We pass through a TypeError raised by numpy
_slice_msg = "slice indices must be integers or None or have an __index__ method"
class TestDataFrameIndexing:
def test_getitem(self, float_frame):
# Slicing
sl = float_frame[:20]
assert len(sl.index) == 20
# Column access
for _, series in sl.items():
assert len(series.index) == 20
assert tm.equalContents(series.index, sl.index)
for key, _ in float_frame._series.items():
assert float_frame[key] is not None
assert "random" not in float_frame
with pytest.raises(KeyError, match="random"):
float_frame["random"]
def test_getitem2(self, float_frame):
df = float_frame.copy()
df["$10"] = np.random.randn(len(df))
ad = np.random.randn(len(df))
df["@awesome_domain"] = ad
with pytest.raises(KeyError, match=re.escape("'df[\"$10\"]'")):
df.__getitem__('df["$10"]')
res = df["@awesome_domain"]
tm.assert_numpy_array_equal(ad, res.values)
def test_setitem_list(self, float_frame):
float_frame["E"] = "foo"
data = float_frame[["A", "B"]]
float_frame[["B", "A"]] = data
tm.assert_series_equal(float_frame["B"], data["A"], check_names=False)
tm.assert_series_equal(float_frame["A"], data["B"], check_names=False)
msg = "Columns must be same length as key"
with pytest.raises(ValueError, match=msg):
data[["A"]] = float_frame[["A", "B"]]
newcolumndata = range(len(data.index) - 1)
msg = (
rf"Length of values \({len(newcolumndata)}\) "
rf"does not match length of index \({len(data)}\)"
)
with pytest.raises(ValueError, match=msg):
data["A"] = newcolumndata
def test_setitem_list2(self):
df = DataFrame(0, index=range(3), columns=["tt1", "tt2"], dtype=np.int_)
df.loc[1, ["tt1", "tt2"]] = [1, 2]
result = df.loc[df.index[1], ["tt1", "tt2"]]
expected = Series([1, 2], df.columns, dtype=np.int_, name=1)
tm.assert_series_equal(result, expected)
df["tt1"] = df["tt2"] = "0"
df.loc[df.index[1], ["tt1", "tt2"]] = ["1", "2"]
result = df.loc[df.index[1], ["tt1", "tt2"]]
expected = Series(["1", "2"], df.columns, name=1)
tm.assert_series_equal(result, expected)
def test_getitem_boolean(self, mixed_float_frame, mixed_int_frame, datetime_frame):
# boolean indexing
d = datetime_frame.index[10]
indexer = datetime_frame.index > d
indexer_obj = indexer.astype(object)
subindex = datetime_frame.index[indexer]
subframe = datetime_frame[indexer]
tm.assert_index_equal(subindex, subframe.index)
with pytest.raises(ValueError, match="Item wrong length"):
datetime_frame[indexer[:-1]]
subframe_obj = datetime_frame[indexer_obj]
tm.assert_frame_equal(subframe_obj, subframe)
with pytest.raises(ValueError, match="Boolean array expected"):
datetime_frame[datetime_frame]
# test that Series work
indexer_obj = Series(indexer_obj, datetime_frame.index)
subframe_obj = datetime_frame[indexer_obj]
tm.assert_frame_equal(subframe_obj, subframe)
# test that Series indexers reindex
# we are producing a warning that since the passed boolean
# key is not the same as the given index, we will reindex
# not sure this is really necessary
with tm.assert_produces_warning(UserWarning):
indexer_obj = indexer_obj.reindex(datetime_frame.index[::-1])
subframe_obj = datetime_frame[indexer_obj]
tm.assert_frame_equal(subframe_obj, subframe)
# test df[df > 0]
for df in [
datetime_frame,
mixed_float_frame,
mixed_int_frame,
]:
data = df._get_numeric_data()
bif = df[df > 0]
bifw = DataFrame(
{c: np.where(data[c] > 0, data[c], np.nan) for c in data.columns},
index=data.index,
columns=data.columns,
)
# add back other columns to compare
for c in df.columns:
if c not in bifw:
bifw[c] = df[c]
bifw = bifw.reindex(columns=df.columns)
tm.assert_frame_equal(bif, bifw, check_dtype=False)
for c in df.columns:
if bif[c].dtype != bifw[c].dtype:
assert bif[c].dtype == df[c].dtype
def test_getitem_boolean_casting(self, datetime_frame):
# don't upcast if we don't need to
df = datetime_frame.copy()
df["E"] = 1
df["E"] = df["E"].astype("int32")
df["E1"] = df["E"].copy()
df["F"] = 1
df["F"] = df["F"].astype("int64")
df["F1"] = df["F"].copy()
casted = df[df > 0]
result = casted.dtypes
expected = Series(
[np.dtype("float64")] * 4
+ [np.dtype("int32")] * 2
+ [np.dtype("int64")] * 2,
index=["A", "B", "C", "D", "E", "E1", "F", "F1"],
)
tm.assert_series_equal(result, expected)
# int block splitting
df.loc[df.index[1:3], ["E1", "F1"]] = 0
casted = df[df > 0]
result = casted.dtypes
expected = Series(
[np.dtype("float64")] * 4
+ [np.dtype("int32")]
+ [np.dtype("float64")]
+ [np.dtype("int64")]
+ [np.dtype("float64")],
index=["A", "B", "C", "D", "E", "E1", "F", "F1"],
)
tm.assert_series_equal(result, expected)
def test_getitem_boolean_list(self):
df = DataFrame(np.arange(12).reshape(3, 4))
def _checkit(lst):
result = df[lst]
expected = df.loc[df.index[lst]]
tm.assert_frame_equal(result, expected)
_checkit([True, False, True])
_checkit([True, True, True])
_checkit([False, False, False])
def test_getitem_boolean_iadd(self):
arr = np.random.randn(5, 5)
df = DataFrame(arr.copy(), columns=["A", "B", "C", "D", "E"])
df[df < 0] += 1
arr[arr < 0] += 1
tm.assert_almost_equal(df.values, arr)
def test_boolean_index_empty_corner(self):
# #2096
blah = DataFrame(np.empty([0, 1]), columns=["A"], index=DatetimeIndex([]))
# both of these should succeed trivially
k = np.array([], bool)
blah[k]
blah[k] = 0
def test_getitem_ix_mixed_integer(self):
df = DataFrame(
np.random.randn(4, 3), index=[1, 10, "C", "E"], columns=[1, 2, 3]
)
result = df.iloc[:-1]
expected = df.loc[df.index[:-1]]
tm.assert_frame_equal(result, expected)
result = df.loc[[1, 10]]
expected = df.loc[Index([1, 10])]
tm.assert_frame_equal(result, expected)
def test_getitem_ix_mixed_integer2(self):
# 11320
df = DataFrame(
{
"rna": (1.5, 2.2, 3.2, 4.5),
-1000: [11, 21, 36, 40],
0: [10, 22, 43, 34],
1000: [0, 10, 20, 30],
},
columns=["rna", -1000, 0, 1000],
)
result = df[[1000]]
expected = df.iloc[:, [3]]
tm.assert_frame_equal(result, expected)
result = df[[-1000]]
expected = df.iloc[:, [1]]
tm.assert_frame_equal(result, expected)
def test_getattr(self, float_frame):
tm.assert_series_equal(float_frame.A, float_frame["A"])
msg = "'DataFrame' object has no attribute 'NONEXISTENT_NAME'"
with pytest.raises(AttributeError, match=msg):
float_frame.NONEXISTENT_NAME
def test_setattr_column(self):
df = DataFrame({"foobar": 1}, index=range(10))
df.foobar = 5
assert (df.foobar == 5).all()
def test_setitem(self, float_frame):
# not sure what else to do here
series = float_frame["A"][::2]
float_frame["col5"] = series
assert "col5" in float_frame
assert len(series) == 15
assert len(float_frame) == 30
exp = np.ravel(np.column_stack((series.values, [np.nan] * 15)))
exp = Series(exp, index=float_frame.index, name="col5")
tm.assert_series_equal(float_frame["col5"], exp)
series = float_frame["A"]
float_frame["col6"] = series
tm.assert_series_equal(series, float_frame["col6"], check_names=False)
# set ndarray
arr = np.random.randn(len(float_frame))
float_frame["col9"] = arr
assert (float_frame["col9"] == arr).all()
float_frame["col7"] = 5
assert (float_frame["col7"] == 5).all()
float_frame["col0"] = 3.14
assert (float_frame["col0"] == 3.14).all()
float_frame["col8"] = "foo"
assert (float_frame["col8"] == "foo").all()
# this is partially a view (e.g. some blocks are view)
# so raise/warn
smaller = float_frame[:2]
msg = r"\nA value is trying to be set on a copy of a slice from a DataFrame"
with pytest.raises(com.SettingWithCopyError, match=msg):
smaller["col10"] = ["1", "2"]
assert smaller["col10"].dtype == np.object_
assert (smaller["col10"] == ["1", "2"]).all()
def test_setitem2(self):
# dtype changing GH4204
df = DataFrame([[0, 0]])
df.iloc[0] = np.nan
expected = DataFrame([[np.nan, np.nan]])
tm.assert_frame_equal(df, expected)
df = DataFrame([[0, 0]])
df.loc[0] = np.nan
tm.assert_frame_equal(df, expected)
def test_setitem_boolean(self, float_frame):
df = float_frame.copy()
values = float_frame.values
df[df["A"] > 0] = 4
values[values[:, 0] > 0] = 4
tm.assert_almost_equal(df.values, values)
# test that column reindexing works
series = df["A"] == 4
series = series.reindex(df.index[::-1])
df[series] = 1
values[values[:, 0] == 4] = 1
tm.assert_almost_equal(df.values, values)
df[df > 0] = 5
values[values > 0] = 5
tm.assert_almost_equal(df.values, values)
df[df == 5] = 0
values[values == 5] = 0
tm.assert_almost_equal(df.values, values)
# a df that needs alignment first
df[df[:-1] < 0] = 2
np.putmask(values[:-1], values[:-1] < 0, 2)
tm.assert_almost_equal(df.values, values)
# indexed with same shape but rows-reversed df
df[df[::-1] == 2] = 3
values[values == 2] = 3
tm.assert_almost_equal(df.values, values)
msg = "Must pass DataFrame or 2-d ndarray with boolean values only"
with pytest.raises(TypeError, match=msg):
df[df * 0] = 2
# index with DataFrame
mask = df > np.abs(df)
expected = df.copy()
df[df > np.abs(df)] = np.nan
expected.values[mask.values] = np.nan
tm.assert_frame_equal(df, expected)
# set from DataFrame
expected = df.copy()
df[df > np.abs(df)] = df * 2
np.putmask(expected.values, mask.values, df.values * 2)
tm.assert_frame_equal(df, expected)
def test_setitem_cast(self, float_frame):
float_frame["D"] = float_frame["D"].astype("i8")
assert float_frame["D"].dtype == np.int64
# #669, should not cast?
# this is now set to int64, which means a replacement of the column to
# the value dtype (and nothing to do with the existing dtype)
float_frame["B"] = 0
assert float_frame["B"].dtype == np.int64
# cast if pass array of course
float_frame["B"] = np.arange(len(float_frame))
assert issubclass(float_frame["B"].dtype.type, np.integer)
float_frame["foo"] = "bar"
float_frame["foo"] = 0
assert float_frame["foo"].dtype == np.int64
float_frame["foo"] = "bar"
float_frame["foo"] = 2.5
assert float_frame["foo"].dtype == np.float64
float_frame["something"] = 0
assert float_frame["something"].dtype == np.int64
float_frame["something"] = 2
assert float_frame["something"].dtype == np.int64
float_frame["something"] = 2.5
assert float_frame["something"].dtype == np.float64
def test_setitem_corner(self, float_frame):
# corner case
df = DataFrame({"B": [1.0, 2.0, 3.0], "C": ["a", "b", "c"]}, index=np.arange(3))
del df["B"]
df["B"] = [1.0, 2.0, 3.0]
assert "B" in df
assert len(df.columns) == 2
df["A"] = "beginning"
df["E"] = "foo"
df["D"] = "bar"
df[datetime.now()] = "date"
df[datetime.now()] = 5.0
# what to do when empty frame with index
dm = DataFrame(index=float_frame.index)
dm["A"] = "foo"
dm["B"] = "bar"
assert len(dm.columns) == 2
assert dm.values.dtype == np.object_
# upcast
dm["C"] = 1
assert dm["C"].dtype == np.int64
dm["E"] = 1.0
assert dm["E"].dtype == np.float64
# set existing column
dm["A"] = "bar"
assert "bar" == dm["A"][0]
dm = DataFrame(index=np.arange(3))
dm["A"] = 1
dm["foo"] = "bar"
del dm["foo"]
dm["foo"] = "bar"
assert dm["foo"].dtype == np.object_
dm["coercible"] = ["1", "2", "3"]
assert dm["coercible"].dtype == np.object_
def test_setitem_corner2(self):
data = {
"title": ["foobar", "bar", "foobar"] + ["foobar"] * 17,
"cruft": np.random.random(20),
}
df = DataFrame(data)
ix = df[df["title"] == "bar"].index
df.loc[ix, ["title"]] = "foobar"
df.loc[ix, ["cruft"]] = 0
assert df.loc[1, "title"] == "foobar"
assert df.loc[1, "cruft"] == 0
def test_setitem_ambig(self):
# Difficulties with mixed-type data
from decimal import Decimal
# Created as float type
dm = DataFrame(index=range(3), columns=range(3))
coercable_series = Series([Decimal(1) for _ in range(3)], index=range(3))
uncoercable_series = Series(["foo", "bzr", "baz"], index=range(3))
dm[0] = np.ones(3)
assert len(dm.columns) == 3
dm[1] = coercable_series
assert len(dm.columns) == 3
dm[2] = uncoercable_series
assert len(dm.columns) == 3
assert dm[2].dtype == np.object_
def test_setitem_None(self, float_frame):
# GH #766
float_frame[None] = float_frame["A"]
tm.assert_series_equal(
float_frame.iloc[:, -1], float_frame["A"], check_names=False
)
tm.assert_series_equal(
float_frame.loc[:, None], float_frame["A"], check_names=False
)
tm.assert_series_equal(float_frame[None], float_frame["A"], check_names=False)
repr(float_frame)
def test_loc_setitem_boolean_mask_allfalse(self):
# GH 9596
df = DataFrame(
{"a": ["1", "2", "3"], "b": ["11", "22", "33"], "c": ["111", "222", "333"]}
)
result = df.copy()
result.loc[result.b.isna(), "a"] = result.a
tm.assert_frame_equal(result, df)
def test_getitem_fancy_slice_integers_step(self):
df = DataFrame(np.random.randn(10, 5))
# this is OK
result = df.iloc[:8:2] # noqa
df.iloc[:8:2] = np.nan
assert isna(df.iloc[:8:2]).values.all()
def test_getitem_setitem_integer_slice_keyerrors(self):
df = DataFrame(np.random.randn(10, 5), index=range(0, 20, 2))
# this is OK
cp = df.copy()
cp.iloc[4:10] = 0
assert (cp.iloc[4:10] == 0).values.all()
# so is this
cp = df.copy()
cp.iloc[3:11] = 0
assert (cp.iloc[3:11] == 0).values.all()
result = df.iloc[2:6]
result2 = df.loc[3:11]
expected = df.reindex([4, 6, 8, 10])
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(result2, expected)
# non-monotonic, raise KeyError
df2 = df.iloc[list(range(5)) + list(range(5, 10))[::-1]]
with pytest.raises(KeyError, match=r"^3$"):
df2.loc[3:11]
with pytest.raises(KeyError, match=r"^3$"):
df2.loc[3:11] = 0
@td.skip_array_manager_invalid_test # already covered in test_iloc_col_slice_view
def test_fancy_getitem_slice_mixed(self, float_frame, float_string_frame):
sliced = float_string_frame.iloc[:, -3:]
assert sliced["D"].dtype == np.float64
# get view with single block
# setting it triggers setting with copy
sliced = float_frame.iloc[:, -3:]
assert np.shares_memory(sliced["C"]._values, float_frame["C"]._values)
msg = r"\nA value is trying to be set on a copy of a slice from a DataFrame"
with pytest.raises(com.SettingWithCopyError, match=msg):
sliced.loc[:, "C"] = 4.0
assert (float_frame["C"] == 4).all()
def test_getitem_setitem_non_ix_labels(self):
df = tm.makeTimeDataFrame()
start, end = df.index[[5, 10]]
result = df.loc[start:end]
result2 = df[start:end]
expected = df[5:11]
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(result2, expected)
result = df.copy()
result.loc[start:end] = 0
result2 = df.copy()
result2[start:end] = 0
expected = df.copy()
expected[5:11] = 0
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(result2, expected)
def test_ix_multi_take(self):
df = DataFrame(np.random.randn(3, 2))
rs = df.loc[df.index == 0, :]
xp = df.reindex([0])
tm.assert_frame_equal(rs, xp)
# GH#1321
df = DataFrame(np.random.randn(3, 2))
rs = df.loc[df.index == 0, df.columns == 1]
xp = df.reindex(index=[0], columns=[1])
tm.assert_frame_equal(rs, xp)
def test_getitem_fancy_scalar(self, float_frame):
f = float_frame
ix = f.loc
# individual value
for col in f.columns:
ts = f[col]
for idx in f.index[::5]:
assert ix[idx, col] == ts[idx]
@td.skip_array_manager_invalid_test # TODO(ArrayManager) rewrite not using .values
def test_setitem_fancy_scalar(self, float_frame):
f = float_frame
expected = float_frame.copy()
ix = f.loc
# individual value
for j, col in enumerate(f.columns):
ts = f[col] # noqa
for idx in f.index[::5]:
i = f.index.get_loc(idx)
val = np.random.randn()
expected.values[i, j] = val
ix[idx, col] = val
tm.assert_frame_equal(f, expected)
def test_getitem_fancy_boolean(self, float_frame):
f = float_frame
ix = f.loc
expected = f.reindex(columns=["B", "D"])
result = ix[:, [False, True, False, True]]
tm.assert_frame_equal(result, expected)
expected = f.reindex(index=f.index[5:10], columns=["B", "D"])
result = ix[f.index[5:10], [False, True, False, True]]
tm.assert_frame_equal(result, expected)
boolvec = f.index > f.index[7]
expected = f.reindex(index=f.index[boolvec])
result = ix[boolvec]
tm.assert_frame_equal(result, expected)
result = ix[boolvec, :]
tm.assert_frame_equal(result, expected)
result = ix[boolvec, f.columns[2:]]
expected = f.reindex(index=f.index[boolvec], columns=["C", "D"])
tm.assert_frame_equal(result, expected)
@td.skip_array_manager_invalid_test # TODO(ArrayManager) rewrite not using .values
def test_setitem_fancy_boolean(self, float_frame):
# from 2d, set with booleans
frame = float_frame.copy()
expected = float_frame.copy()
mask = frame["A"] > 0
frame.loc[mask] = 0.0
expected.values[mask.values] = 0.0
tm.assert_frame_equal(frame, expected)
frame = float_frame.copy()
expected = float_frame.copy()
frame.loc[mask, ["A", "B"]] = 0.0
expected.values[mask.values, :2] = 0.0
tm.assert_frame_equal(frame, expected)
def test_getitem_fancy_ints(self, float_frame):
result = float_frame.iloc[[1, 4, 7]]
expected = float_frame.loc[float_frame.index[[1, 4, 7]]]
tm.assert_frame_equal(result, expected)
result = float_frame.iloc[:, [2, 0, 1]]
expected = float_frame.loc[:, float_frame.columns[[2, 0, 1]]]
tm.assert_frame_equal(result, expected)
def test_getitem_setitem_boolean_misaligned(self, float_frame):
# boolean index misaligned labels
mask = float_frame["A"][::-1] > 1
result = float_frame.loc[mask]
expected = float_frame.loc[mask[::-1]]
tm.assert_frame_equal(result, expected)
cp = float_frame.copy()
expected = float_frame.copy()
cp.loc[mask] = 0
expected.loc[mask] = 0
tm.assert_frame_equal(cp, expected)
def test_getitem_setitem_boolean_multi(self):
df = DataFrame(np.random.randn(3, 2))
# get
k1 = np.array([True, False, True])
k2 = np.array([False, True])
result = df.loc[k1, k2]
expected = df.loc[[0, 2], [1]]
tm.assert_frame_equal(result, expected)
expected = df.copy()
df.loc[np.array([True, False, True]), np.array([False, True])] = 5
expected.loc[[0, 2], [1]] = 5
tm.assert_frame_equal(df, expected)
def test_getitem_setitem_float_labels(self):
index = Index([1.5, 2, 3, 4, 5])
df = DataFrame(np.random.randn(5, 5), index=index)
result = df.loc[1.5:4]
expected = df.reindex([1.5, 2, 3, 4])
tm.assert_frame_equal(result, expected)
assert len(result) == 4
result = df.loc[4:5]
expected = df.reindex([4, 5]) # reindex with int
tm.assert_frame_equal(result, expected, check_index_type=False)
assert len(result) == 2
result = df.loc[4:5]
expected = df.reindex([4.0, 5.0]) # reindex with float
tm.assert_frame_equal(result, expected)
assert len(result) == 2
# loc_float changes this to work properly
result = df.loc[1:2]
expected = df.iloc[0:2]
tm.assert_frame_equal(result, expected)
df.loc[1:2] = 0
result = df[1:2]
assert (result == 0).all().all()
# #2727
index = Index([1.0, 2.5, 3.5, 4.5, 5.0])
df = DataFrame(np.random.randn(5, 5), index=index)
# positional slicing only via iloc!
msg = (
"cannot do positional indexing on Float64Index with "
r"these indexers \[1.0\] of type float"
)
with pytest.raises(TypeError, match=msg):
df.iloc[1.0:5]
result = df.iloc[4:5]
expected = df.reindex([5.0])
tm.assert_frame_equal(result, expected)
assert len(result) == 1
cp = df.copy()
with pytest.raises(TypeError, match=_slice_msg):
cp.iloc[1.0:5] = 0
with pytest.raises(TypeError, match=msg):
result = cp.iloc[1.0:5] == 0
assert result.values.all()
assert (cp.iloc[0:1] == df.iloc[0:1]).values.all()
cp = df.copy()
cp.iloc[4:5] = 0
assert (cp.iloc[4:5] == 0).values.all()
assert (cp.iloc[0:4] == df.iloc[0:4]).values.all()
# float slicing
result = df.loc[1.0:5]
expected = df
tm.assert_frame_equal(result, expected)
assert len(result) == 5
result = df.loc[1.1:5]
expected = df.reindex([2.5, 3.5, 4.5, 5.0])
tm.assert_frame_equal(result, expected)
assert len(result) == 4
result = df.loc[4.51:5]
expected = df.reindex([5.0])
tm.assert_frame_equal(result, expected)
assert len(result) == 1
result = df.loc[1.0:5.0]
expected = df.reindex([1.0, 2.5, 3.5, 4.5, 5.0])
tm.assert_frame_equal(result, expected)
assert len(result) == 5
cp = df.copy()
cp.loc[1.0:5.0] = 0
result = cp.loc[1.0:5.0]
assert (result == 0).values.all()
def test_setitem_single_column_mixed_datetime(self):
df = DataFrame(
np.random.randn(5, 3),
index=["a", "b", "c", "d", "e"],
columns=["foo", "bar", "baz"],
)
df["timestamp"] = Timestamp("20010102")
# check our dtypes
result = df.dtypes
expected = Series(
[np.dtype("float64")] * 3 + [np.dtype("datetime64[ns]")],
index=["foo", "bar", "baz", "timestamp"],
)
tm.assert_series_equal(result, expected)
# GH#16674 iNaT is treated as an integer when given by the user
df.loc["b", "timestamp"] = iNaT
assert not isna(df.loc["b", "timestamp"])
assert df["timestamp"].dtype == np.object_
assert df.loc["b", "timestamp"] == iNaT
# allow this syntax (as of GH#3216)
df.loc["c", "timestamp"] = np.nan
assert isna(df.loc["c", "timestamp"])
# allow this syntax
df.loc["d", :] = np.nan
assert not isna(df.loc["c", :]).all()
def test_setitem_mixed_datetime(self):
# GH 9336
expected = DataFrame(
{
"a": [0, 0, 0, 0, 13, 14],
"b": [
datetime(2012, 1, 1),
1,
"x",
"y",
datetime(2013, 1, 1),
datetime(2014, 1, 1),
],
}
)
df = DataFrame(0, columns=list("ab"), index=range(6))
df["b"] = pd.NaT
df.loc[0, "b"] = datetime(2012, 1, 1)
df.loc[1, "b"] = 1
df.loc[[2, 3], "b"] = "x", "y"
A = np.array(
[
[13, np.datetime64("2013-01-01T00:00:00")],
[14, np.datetime64("2014-01-01T00:00:00")],
]
)
df.loc[[4, 5], ["a", "b"]] = A
tm.assert_frame_equal(df, expected)
def test_setitem_frame_float(self, float_frame):
piece = float_frame.loc[float_frame.index[:2], ["A", "B"]]
float_frame.loc[float_frame.index[-2] :, ["A", "B"]] = piece.values
result = float_frame.loc[float_frame.index[-2:], ["A", "B"]].values
expected = piece.values
tm.assert_almost_equal(result, expected)
def test_setitem_frame_mixed(self, float_string_frame):
# GH 3216
# already aligned
f = float_string_frame.copy()
piece = DataFrame(
[[1.0, 2.0], [3.0, 4.0]], index=f.index[0:2], columns=["A", "B"]
)
key = (f.index[slice(None, 2)], ["A", "B"])
f.loc[key] = piece
tm.assert_almost_equal(f.loc[f.index[0:2], ["A", "B"]].values, piece.values)
def test_setitem_frame_mixed_rows_unaligned(self, float_string_frame):
# GH#3216 rows unaligned
f = float_string_frame.copy()
piece = DataFrame(
[[1.0, 2.0], [3.0, 4.0], [5.0, 6.0], [7.0, 8.0]],
index=list(f.index[0:2]) + ["foo", "bar"],
columns=["A", "B"],
)
key = (f.index[slice(None, 2)], ["A", "B"])
f.loc[key] = piece
tm.assert_almost_equal(
f.loc[f.index[0:2:], ["A", "B"]].values, piece.values[0:2]
)
def test_setitem_frame_mixed_key_unaligned(self, float_string_frame):
# GH#3216 key is unaligned with values
f = float_string_frame.copy()
piece = f.loc[f.index[:2], ["A"]]
piece.index = f.index[-2:]
key = (f.index[slice(-2, None)], ["A", "B"])
f.loc[key] = piece
piece["B"] = np.nan
tm.assert_almost_equal(f.loc[f.index[-2:], ["A", "B"]].values, piece.values)
def test_setitem_frame_mixed_ndarray(self, float_string_frame):
# GH#3216 ndarray
f = float_string_frame.copy()
piece = float_string_frame.loc[f.index[:2], ["A", "B"]]
key = (f.index[slice(-2, None)], ["A", "B"])
f.loc[key] = piece.values
tm.assert_almost_equal(f.loc[f.index[-2:], ["A", "B"]].values, piece.values)
def test_setitem_frame_upcast(self):
# needs upcasting
df = DataFrame([[1, 2, "foo"], [3, 4, "bar"]], columns=["A", "B", "C"])
df2 = df.copy()
df2.loc[:, ["A", "B"]] = df.loc[:, ["A", "B"]] + 0.5
expected = df.reindex(columns=["A", "B"])
expected += 0.5
expected["C"] = df["C"]
tm.assert_frame_equal(df2, expected)
def test_setitem_frame_align(self, float_frame):
piece = float_frame.loc[float_frame.index[:2], ["A", "B"]]
piece.index = float_frame.index[-2:]
piece.columns = ["A", "B"]
float_frame.loc[float_frame.index[-2:], ["A", "B"]] = piece
result = float_frame.loc[float_frame.index[-2:], ["A", "B"]].values
expected = piece.values
tm.assert_almost_equal(result, expected)
def test_getitem_setitem_ix_duplicates(self):
# #1201
df = DataFrame(np.random.randn(5, 3), index=["foo", "foo", "bar", "baz", "bar"])
result = df.loc["foo"]
expected = df[:2]
tm.assert_frame_equal(result, expected)
result = df.loc["bar"]
expected = df.iloc[[2, 4]]
tm.assert_frame_equal(result, expected)
result = df.loc["baz"]
expected = df.iloc[3]
tm.assert_series_equal(result, expected)
def test_getitem_ix_boolean_duplicates_multiple(self):
# #1201
df = DataFrame(np.random.randn(5, 3), index=["foo", "foo", "bar", "baz", "bar"])
result = df.loc[["bar"]]
exp = df.iloc[[2, 4]]
tm.assert_frame_equal(result, exp)
result = df.loc[df[1] > 0]
exp = df[df[1] > 0]
tm.assert_frame_equal(result, exp)
result = df.loc[df[0] > 0]
exp = df[df[0] > 0]
tm.assert_frame_equal(result, exp)
@pytest.mark.parametrize("bool_value", [True, False])
def test_getitem_setitem_ix_bool_keyerror(self, bool_value):
# #2199
df = DataFrame({"a": [1, 2, 3]})
message = f"{bool_value}: boolean label can not be used without a boolean index"
with pytest.raises(KeyError, match=message):
df.loc[bool_value]
msg = "cannot use a single bool to index into setitem"
with pytest.raises(KeyError, match=msg):
df.loc[bool_value] = 0
# TODO: rename? remove?
def test_single_element_ix_dont_upcast(self, float_frame):
float_frame["E"] = 1
assert issubclass(float_frame["E"].dtype.type, (int, np.integer))
result = float_frame.loc[float_frame.index[5], "E"]
assert is_integer(result)
# GH 11617
df = DataFrame({"a": [1.23]})
df["b"] = 666
result = df.loc[0, "b"]
assert is_integer(result)
expected = Series([666], [0], name="b")
result = df.loc[[0], "b"]
tm.assert_series_equal(result, expected)
def test_iloc_row(self):
df = DataFrame(np.random.randn(10, 4), index=range(0, 20, 2))
result = df.iloc[1]
exp = df.loc[2]
tm.assert_series_equal(result, exp)
result = df.iloc[2]
exp = df.loc[4]
tm.assert_series_equal(result, exp)
# slice
result = df.iloc[slice(4, 8)]
expected = df.loc[8:14]
tm.assert_frame_equal(result, expected)
# list of integers
result = df.iloc[[1, 2, 4, 6]]
expected = df.reindex(df.index[[1, 2, 4, 6]])
tm.assert_frame_equal(result, expected)
def test_iloc_row_slice_view(self, using_array_manager):
df = DataFrame(np.random.randn(10, 4), index=range(0, 20, 2))
original = df.copy()
# verify slice is view
# setting it makes it raise/warn
subset = df.iloc[slice(4, 8)]
assert np.shares_memory(df[2], subset[2])
msg = r"\nA value is trying to be set on a copy of a slice from a DataFrame"
with pytest.raises(com.SettingWithCopyError, match=msg):
subset.loc[:, 2] = 0.0
exp_col = original[2].copy()
# TODO(ArrayManager) verify it is expected that the original didn't change
if not using_array_manager:
exp_col[4:8] = 0.0
tm.assert_series_equal(df[2], exp_col)
def test_iloc_col(self):
df = DataFrame(np.random.randn(4, 10), columns=range(0, 20, 2))
result = df.iloc[:, 1]
exp = df.loc[:, 2]
tm.assert_series_equal(result, exp)
result = df.iloc[:, 2]
exp = df.loc[:, 4]
tm.assert_series_equal(result, exp)
# slice
result = df.iloc[:, slice(4, 8)]
expected = df.loc[:, 8:14]
tm.assert_frame_equal(result, expected)
# list of integers
result = df.iloc[:, [1, 2, 4, 6]]
expected = df.reindex(columns=df.columns[[1, 2, 4, 6]])
tm.assert_frame_equal(result, expected)
def test_iloc_col_slice_view(self, using_array_manager):
df = DataFrame(np.random.randn(4, 10), columns=range(0, 20, 2))
original = df.copy()
subset = df.iloc[:, slice(4, 8)]
if not using_array_manager:
# verify slice is view
assert np.shares_memory(df[8]._values, subset[8]._values)
# and that we are setting a copy
msg = r"\nA value is trying to be set on a copy of a slice from a DataFrame"
with pytest.raises(com.SettingWithCopyError, match=msg):
subset.loc[:, 8] = 0.0
assert (df[8] == 0).all()
else:
# TODO(ArrayManager) verify this is the desired behaviour
subset[8] = 0.0
# subset changed
assert (subset[8] == 0).all()
# but df itself did not change (setitem replaces full column)
tm.assert_frame_equal(df, original)
def test_loc_duplicates(self):
# gh-17105
# insert a duplicate element to the index
trange = date_range(
start=Timestamp(year=2017, month=1, day=1),
end=Timestamp(year=2017, month=1, day=5),
)
trange = trange.insert(loc=5, item=Timestamp(year=2017, month=1, day=5))
df = DataFrame(0, index=trange, columns=["A", "B"])
bool_idx = np.array([False, False, False, False, False, True])
# assignment
df.loc[trange[bool_idx], "A"] = 6
expected = DataFrame(
{"A": [0, 0, 0, 0, 6, 6], "B": [0, 0, 0, 0, 0, 0]}, index=trange
)
tm.assert_frame_equal(df, expected)
# in-place
df = DataFrame(0, index=trange, columns=["A", "B"])
df.loc[trange[bool_idx], "A"] += 6
tm.assert_frame_equal(df, expected)
def test_setitem_with_unaligned_tz_aware_datetime_column(self):
# GH 12981
# Assignment of unaligned offset-aware datetime series.
# Make sure timezone isn't lost
column = Series(date_range("2015-01-01", periods=3, tz="utc"), name="dates")
df = DataFrame({"dates": column})
df["dates"] = column[[1, 0, 2]]
tm.assert_series_equal(df["dates"], column)
df = DataFrame({"dates": column})
df.loc[[0, 1, 2], "dates"] = column[[1, 0, 2]]
tm.assert_series_equal(df["dates"], column)
def test_loc_setitem_datetimelike_with_inference(self):
# GH 7592
# assignment of timedeltas with NaT
one_hour = timedelta(hours=1)
df = DataFrame(index=date_range("20130101", periods=4))
df["A"] = np.array([1 * one_hour] * 4, dtype="m8[ns]")
df.loc[:, "B"] = np.array([2 * one_hour] * 4, dtype="m8[ns]")
df.loc[df.index[:3], "C"] = np.array([3 * one_hour] * 3, dtype="m8[ns]")
df.loc[:, "D"] = np.array([4 * one_hour] * 4, dtype="m8[ns]")
df.loc[df.index[:3], "E"] = np.array([5 * one_hour] * 3, dtype="m8[ns]")
df["F"] = np.timedelta64("NaT")
df.loc[df.index[:-1], "F"] = np.array([6 * one_hour] * 3, dtype="m8[ns]")
df.loc[df.index[-3] :, "G"] = date_range("20130101", periods=3)
df["H"] = np.datetime64("NaT")
result = df.dtypes
expected = Series(
[np.dtype("timedelta64[ns]")] * 6 + [np.dtype("datetime64[ns]")] * 2,
index=list("ABCDEFGH"),
)
tm.assert_series_equal(result, expected)
def test_getitem_boolean_indexing_mixed(self):
df = DataFrame(
{
0: {35: np.nan, 40: np.nan, 43: np.nan, 49: np.nan, 50: np.nan},
1: {
35: np.nan,
40: 0.32632316859446198,
43: np.nan,
49: 0.32632316859446198,
50: 0.39114724480578139,
},
2: {
35: np.nan,
40: np.nan,
43: 0.29012581014105987,
49: np.nan,
50: np.nan,
},
3: {35: np.nan, 40: np.nan, 43: np.nan, 49: np.nan, 50: np.nan},
4: {
35: 0.34215328467153283,
40: np.nan,
43: np.nan,
49: np.nan,
50: np.nan,
},
"y": {35: 0, 40: 0, 43: 0, 49: 0, 50: 1},
}
)
# mixed int/float ok
df2 = df.copy()
df2[df2 > 0.3] = 1
expected = df.copy()
expected.loc[40, 1] = 1
expected.loc[49, 1] = 1
expected.loc[50, 1] = 1
expected.loc[35, 4] = 1
tm.assert_frame_equal(df2, expected)
df["foo"] = "test"
msg = "not supported between instances|unorderable types"
with pytest.raises(TypeError, match=msg):
df[df > 0.3] = 1
def test_type_error_multiindex(self):
# See gh-12218
mi = MultiIndex.from_product([["x", "y"], [0, 1]], names=[None, "c"])
dg = DataFrame(
[[1, 1, 2, 2], [3, 3, 4, 4]], columns=mi, index=Index([0, 1], name="i")
)
with pytest.raises(InvalidIndexError, match="slice"):
dg[:, 0]
index = Index(range(2), name="i")
columns = MultiIndex(
levels=[["x", "y"], [0, 1]], codes=[[0, 1], [0, 0]], names=[None, "c"]
)
expected = DataFrame([[1, 2], [3, 4]], columns=columns, index=index)
result = dg.loc[:, (slice(None), 0)]
tm.assert_frame_equal(result, expected)
name = ("x", 0)
index = Index(range(2), name="i")
expected = Series([1, 3], index=index, name=name)
result = dg["x", 0]
tm.assert_series_equal(result, expected)
def test_getitem_interval_index_partial_indexing(self):
# GH#36490
df = DataFrame(
np.ones((3, 4)), columns=pd.IntervalIndex.from_breaks(np.arange(5))
)
expected = df.iloc[:, 0]
res = df[0.5]
tm.assert_series_equal(res, expected)
res = df.loc[:, 0.5]
tm.assert_series_equal(res, expected)
def test_setitem_array_as_cell_value(self):
# GH#43422
df = | DataFrame(columns=["a", "b"], dtype=object) | pandas.DataFrame |
from __future__ import print_function
from __future__ import division
from future import standard_library
standard_library.install_aliases()
from builtins import zip
from builtins import str
from builtins import range
from builtins import object
import http.client
import os
import shutil
import re
import subprocess
import warnings
from collections import OrderedDict, defaultdict
import numpy as np
import pandas as pd
from bs4 import BeautifulSoup
from editdistance import eval as editdist # Alternative library: python-levenshtein
from PIL import Image
import pytesseract
class VisualLinker(object):
def __init__(self, time=False, verbose=False):
self.pdf_file = None
self.verbose = verbose
self.time = time
self.coordinate_map = None
self.pdf_word_list = None
self.pdf_image_list = None
self.coordinate_image_map = None
self.OCR_list = None
self.html_word_list = None
self.links = None
self.pdf_dim = None
self.html_pdf_dim_ratio = None
delimiters = u"([\(\)\,\?\u2212\u201C\u201D\u2018\u2019\u00B0\*\']|(?<!http):|\.$|\.\.\.)"
self.separators = re.compile(delimiters)
def parse_visual(self, document_name, phrases, figures, pdf_path):
self.phrases = phrases
self.figures = figures
self.pdf_file = pdf_path + document_name + '.pdf'
if not os.path.isfile(self.pdf_file):
self.pdf_file = self.pdf_file[:-3] + "PDF"
try:
self.extract_pdf_image_words()
#self.extract_pdf_words()
except RuntimeError as e:
print('WARNING: no image or words found in pdf!')
return
self.extract_html_words()
self.link_lists(search_max=200)
for phrase in self.update_coordinates():
yield phrase
for figure in self.update_html_images():
yield figure
#
def help_OCR(self):
img_ocr_dict = {}
for cap, src_img in self.coordinate_image_map.items():
path = "./" + src_img.get('src')
try:
img = Image.open(path)
ocr_text = pytesseract.image_to_string(img, lang='eng')
img_ocr_dict.update({cap: ocr_text})
except:
print("OCR: Image not found!")
return img_ocr_dict
def extract_pdf_image_words(self):
num_pages = subprocess.check_output(
"pdfinfo '{}' | grep -a Pages | sed 's/[^0-9]*//'".format(
self.pdf_file),
shell=True)
pdf_word_list = []
pdf_image_list = []
coordinate_map = {}
coordinate_image_map = {}
tempDir = self.pdf_file[:-4]
if not os.path.exists(tempDir): os.mkdir(tempDir)
os.chdir(tempDir)
for i in range(1, int(num_pages) + 1):
html_content = subprocess.check_output(
"pdftohtml -f {} -l {} -xml -stdout '{}' -".format(
str(i), str(i), self.pdf_file),
shell=True)
w_html_content = subprocess.check_output(
"pdftotext -f {} -l {} -bbox-layout '{}' -".format(
str(i), str(i), self.pdf_file),
shell=True)
w_soup = BeautifulSoup(w_html_content, "html.parser")
soup = BeautifulSoup(html_content, "xml")
w_pages = w_soup.find_all('page')
pages = soup.find_all('page')
pdf_image_list_i, coordinate_image_map_i = self._coordinates_from_XML_Image(
pages[0], i)
pdf_word_list_i, coordinate_map_i = self._coordinates_from_HTML(
w_pages[0], i)
pdf_word_list += pdf_word_list_i
pdf_image_list += pdf_image_list_i
# update coordinate map
coordinate_map.update(coordinate_map_i)
coordinate_image_map.update(coordinate_image_map_i)
self.pdf_word_list = pdf_word_list
self.coordinate_map = coordinate_map
self.pdf_image_list = pdf_image_list
self.coordinate_image_map = coordinate_image_map
# <NAME>
self.OCR_dict = self.help_OCR()
if len(self.pdf_word_list) == 0:
raise RuntimeError(
"Words could not be extracted from PDF: %s" % self.pdf_file)
if len(self.pdf_image_list) == 0:
raise RuntimeError(
"Images could not be extracted from PDF: %s" % self.pdf_file)
# take last page dimensions
page_width, page_height = int(float(w_pages[0].get('width'))), int(
float(w_pages[0].get('height')))
html_page_width_ratio, html_page_height_ratio = float(w_pages[0].get('width'))/float(pages[0].get('width')), \
float(w_pages[0].get('height'))/float(pages[0].get('height'))
self.pdf_dim = (page_width, page_height)
self.html_pdf_dim_ratio = (html_page_width_ratio, html_page_height_ratio)
if self.verbose:
print("Extracted %d pdf words" % len(self.pdf_word_list))
if self.verbose:
print("Extracted %d pdf images" % len(self.pdf_image_list))
os.chdir('../')
shutil.rmtree(tempDir)
def _coordinates_from_XML_Image(self, page, page_num):
coordinate_caption_map = {}
coordinate_image_map = {}
captions = []
image_list = []
for candidate in page.find_all('text'):
result = re.search('((Fig)|(Scheme)|(Figure))\s*\.?\s*\d+', candidate.text)
if result and result.span()[0] == 0:
result = result.group()
key = ''.join(result.split())
coordinate_caption_map[key] = candidate.attrs
captions.append(candidate)
for candidate in page.find_all('image'):
found = None
for fig_name, coord_dict in coordinate_caption_map.items():
if self.help_match(candidate.attrs, coord_dict, page):
candidate.attrs['page_num'] = page_num
coordinate_image_map[fig_name] = candidate.attrs
found = fig_name
image_list.append(fig_name)
break
if found:
del coordinate_caption_map[found]
return image_list, coordinate_image_map
def help_match(self, dict1, dict2, page):
return abs(float(dict1['top'])+float(dict1['height']) - float(dict2['top'])) < float(page.get('height'))/10 and \
abs(float(dict1['left']) - float(dict2['left'])) < float(page.get('width'))/5
def extract_pdf_words(self):
num_pages = subprocess.check_output(
"pdfinfo '{}' | grep -a Pages | sed 's/[^0-9]*//'".format(
self.pdf_file),
shell=True)
pdf_word_list = []
coordinate_map = {}
for i in range(1, int(num_pages) + 1):
html_content = subprocess.check_output(
"pdftotext -f {} -l {} -bbox-layout '{}' -".format(
str(i), str(i), self.pdf_file),
shell=True)
soup = BeautifulSoup(html_content, "html.parser")
pages = soup.find_all('page')
pdf_word_list_i, coordinate_map_i = self._coordinates_from_HTML(
pages[0], i)
pdf_word_list += pdf_word_list_i
# update coordinate map
coordinate_map.update(coordinate_map_i)
self.pdf_word_list = pdf_word_list
self.coordinate_map = coordinate_map
if len(self.pdf_word_list) == 0:
raise RuntimeError(
"Words could not be extracted from PDF: %s" % self.pdf_file)
# take last page dimensions
page_width, page_height = int(float(pages[0].get('width'))), int(
float(pages[0].get('height')))
self.pdf_dim = (page_width, page_height)
if self.verbose:
print("Extracted %d pdf words" % len(self.pdf_word_list))
def _coordinates_from_HTML(self, page, page_num):
pdf_word_list = []
coordinate_map = {}
block_coordinates = {}
blocks = page.find_all('block')
i = 0 # counter for word_id in page_num
for block in blocks:
x_min_block = int(float(block.get('xmin')))
y_min_block = int(float(block.get('ymin')))
lines = block.find_all('line')
for line in lines:
y_min_line = int(float(line.get('ymin')))
y_max_line = int(float(line.get('ymax')))
words = line.find_all("word")
for word in words:
xmin = int(float(word.get('xmin')))
xmax = int(float(word.get('xmax')))
for content in self.separators.split(word.getText()):
if len(content) > 0: # Ignore empty characters
word_id = (page_num, i)
pdf_word_list.append((word_id, content))
coordinate_map[word_id] = (page_num, y_min_line,
xmin, y_max_line, xmax)
block_coordinates[word_id] = (y_min_block,
x_min_block)
i += 1
# sort pdf_word_list by page, block top then block left, top, then left
pdf_word_list = sorted(
pdf_word_list,
key=
lambda word_id__: block_coordinates[word_id__[0]] + coordinate_map[word_id__[0]][1:3]
)
return pdf_word_list, coordinate_map
def extract_html_words(self):
html_word_list = []
for phrase in self.phrases:
for i, word in enumerate(phrase.words):
html_word_list.append(((phrase.stable_id, i), word))
self.html_word_list = html_word_list
if self.verbose:
print("Extracted %d html words" % len(self.html_word_list))
def update_html_images(self):
for figure in self.figures:
figure_name = figure.name.replace(' ', '')
src_img = self.coordinate_image_map.get(figure_name)
if not src_img:
continue
figure.top = int(float(src_img.get('top'))*self.html_pdf_dim_ratio[1])
figure.bottom = int((float(src_img.get('top')) + float(src_img.get('height')))*self.html_pdf_dim_ratio[1])
figure.left = int(float(src_img.get('left'))*self.html_pdf_dim_ratio[0])
figure.right = int((float(src_img.get('left')) + float(src_img.get('width')))*self.html_pdf_dim_ratio[0])
figure.text = self.OCR_dict.get(figure_name)
figure.page = src_img.get('page_num')
yield figure
if self.verbose:
print("Updated coordinates in database")
def link_lists(self, search_max=100, edit_cost=20, offset_cost=1):
# NOTE: there are probably some inefficiencies here from rehashing words
# multiple times, but we're not going to worry about that for now
def link_exact(l, u):
l, u, L, U = get_anchors(l, u)
html_dict = defaultdict(list)
pdf_dict = defaultdict(list)
for i, (_, word) in enumerate(self.html_word_list[l:u]):
if html_to_pdf[l + i] is None:
html_dict[word].append(l + i)
for j, (_, word) in enumerate(self.pdf_word_list[L:U]):
if pdf_to_html[L + j] is None:
pdf_dict[word].append(L + j)
for word, html_list in list(html_dict.items()):
pdf_list = pdf_dict[word]
if len(html_list) == len(pdf_list):
for k in range(len(html_list)):
html_to_pdf[html_list[k]] = pdf_list[k]
pdf_to_html[pdf_list[k]] = html_list[k]
def link_fuzzy(i):
(_, word) = self.html_word_list[i]
l = u = i
l, u, L, U = get_anchors(l, u)
offset = int(L + float(i - l) / (u - l) * (U - L))
searchIndices = np.clip(offset + search_order, 0, M - 1)
cost = [0] * search_max
for j, k in enumerate(searchIndices):
other = self.pdf_word_list[k][1]
if (word.startswith(other) or word.endswith(other)
or other.startswith(word) or other.endswith(word)):
html_to_pdf[i] = k
return
else:
cost[j] = int(editdist(
word, other)) * edit_cost + j * offset_cost
html_to_pdf[i] = searchIndices[np.argmin(cost)]
return
def get_anchors(l, u):
while l >= 0 and html_to_pdf[l] is None:
l -= 1
while u < N and html_to_pdf[u] is None:
u += 1
if l < 0:
l = 0
L = 0
else:
L = html_to_pdf[l]
if u >= N:
u = N
U = M
else:
U = html_to_pdf[u]
return l, u, L, U
def display_match_counts():
matches = sum([
html_to_pdf[i] is not None and self.html_word_list[i][1] ==
self.pdf_word_list[html_to_pdf[i]][1]
for i in range(len(self.html_word_list))
])
total = len(self.html_word_list)
print("({:d}/{:d}) = {:.2f}".format(matches, total,
matches / total))
return matches
N = len(self.html_word_list)
M = len(self.pdf_word_list)
assert (N > 0 and M > 0)
html_to_pdf = [None] * N
pdf_to_html = [None] * M
search_radius = search_max // 2
# first pass: global search for exact matches
link_exact(0, N)
if self.verbose:
print("Global exact matching:")
display_match_counts()
# second pass: local search for exact matches
for i in range(((N + 2) // search_radius) + 1):
link_exact(
max(0, i * search_radius - search_radius),
min(N, i * search_radius + search_radius))
if self.verbose:
print("Local exact matching:")
display_match_counts()
# third pass: local search for approximate matches
search_order = np.array(
[(-1)**(i % 2) * (i // 2) for i in range(1, search_max + 1)])
for i in range(len(html_to_pdf)):
if html_to_pdf[i] is None:
link_fuzzy(i)
if self.verbose:
print("Local approximate matching:")
display_match_counts()
# convert list to dict
matches = sum([
html_to_pdf[i] is not None and
self.html_word_list[i][1] == self.pdf_word_list[html_to_pdf[i]][1]
for i in range(len(self.html_word_list))
])
total = len(self.html_word_list)
if self.verbose:
print("Linked {:d}/{:d} ({:.2f}) html words exactly".format(
matches, total, matches / total))
self.links = OrderedDict((self.html_word_list[i][0],
self.pdf_word_list[html_to_pdf[i]][0])
for i in range(len(self.html_word_list)))
def link_lists_old(self,
search_max=200,
editCost=20,
offsetCost=1,
offsetInertia=5):
DEBUG = False
if DEBUG:
offsetHist = []
jHist = []
editDistHist = 0
offset = self._calculate_offset(self.html_word_list,
self.pdf_word_list,
max((search_max // 10), 5), search_max)
offsets = [offset] * offsetInertia
searchOrder = np.array(
[(-1)**(i % 2) * (i // 2) for i in range(1, search_max + 1)])
links = OrderedDict()
for i, a in enumerate(self.html_word_list):
j = 0
searchIndices = np.clip(offset + searchOrder, 0,
len(self.pdf_word_list) - 1)
jMax = len(searchIndices)
matched = False
# Search first for exact matches
while not matched and j < jMax:
b = self.pdf_word_list[searchIndices[j]]
if a[1] == b[1]:
links[a[0]] = b[0]
matched = True
offsets[i % offsetInertia] = searchIndices[j] + 1
offset = int(np.median(offsets))
if DEBUG:
jHist.append(j)
offsetHist.append(offset)
j += 1
# If necessary, search for min edit distance
if not matched:
cost = [0] * search_max
for k, m in enumerate(searchIndices):
cost[k] = (
editdist(a[1], self.pdf_word_list[m][1]) * editCost +
k * offsetCost)
nearest = np.argmin(cost)
links[a[0]] = self.pdf_word_list[searchIndices[nearest]][0]
if DEBUG:
jHist.append(nearest)
offsetHist.append(searchIndices[nearest])
editDistHist += 1
if DEBUG:
print(offsetHist)
print(jHist)
print(editDistHist)
self.offsetHist = offsetHist
self.links = links
if self.verbose:
print("Linked {:d} words to {:d} bounding boxes".format(
len(self.html_word_list), len(self.pdf_word_list)))
def _calculate_offset(self, listA, listB, seedSize, maxOffset):
wordsA = zip(*listA[:seedSize])[1]
wordsB = zip(*listB[:maxOffset])[1]
offsets = []
for i in range(seedSize):
try:
offsets.append(wordsB.index(wordsA[i]) - i)
except:
pass
return int(np.median(offsets))
def display_links(self, max_rows=100):
html = []
pdf = []
j = []
for i, l in enumerate(self.links):
html.append(self.html_word_list[i][1])
for k, b in enumerate(self.pdf_word_list):
if b[0] == self.links[self.html_word_list[i][0]]:
pdf.append(b[1])
j.append(k)
break
assert (len(pdf) == len(html))
total = 0
match = 0
for i, word in enumerate(html):
total += 1
if word == pdf[i]:
match += 1
print((match, total, match / total))
data = {
# 'i': range(len(self.links)),
'html': html,
'pdf': pdf,
'j': j,
}
| pd.set_option('display.max_rows', max_rows) | pandas.set_option |
import numpy as np
from datetime import timedelta
import pandas as pd
import pandas.tslib as tslib
import pandas.util.testing as tm
import pandas.tseries.period as period
from pandas import (DatetimeIndex, PeriodIndex, period_range, Series, Period,
_np_version_under1p10, Index, Timedelta, offsets)
from pandas.tests.test_base import Ops
class TestPeriodIndexOps(Ops):
def setUp(self):
super(TestPeriodIndexOps, self).setUp()
mask = lambda x: (isinstance(x, DatetimeIndex) or
isinstance(x, PeriodIndex))
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = [o for o in self.objs if not mask(o)]
def test_ops_properties(self):
self.check_ops_properties(
['year', 'month', 'day', 'hour', 'minute', 'second', 'weekofyear',
'week', 'dayofweek', 'dayofyear', 'quarter'])
self.check_ops_properties(['qyear'],
lambda x: isinstance(x, PeriodIndex))
def test_asobject_tolist(self):
idx = pd.period_range(start='2013-01-01', periods=4, freq='M',
name='idx')
expected_list = [pd.Period('2013-01-31', freq='M'),
pd.Period('2013-02-28', freq='M'),
pd.Period('2013-03-31', freq='M'),
pd.Period('2013-04-30', freq='M')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = PeriodIndex(['2013-01-01', '2013-01-02', 'NaT',
'2013-01-04'], freq='D', name='idx')
expected_list = [pd.Period('2013-01-01', freq='D'),
pd.Period('2013-01-02', freq='D'),
pd.Period('NaT', freq='D'),
pd.Period('2013-01-04', freq='D')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
tm.assert_index_equal(result, expected)
for i in [0, 1, 3]:
self.assertEqual(result[i], expected[i])
self.assertIs(result[2], pd.NaT)
self.assertEqual(result.name, expected.name)
result_list = idx.tolist()
for i in [0, 1, 3]:
self.assertEqual(result_list[i], expected_list[i])
self.assertIs(result_list[2], pd.NaT)
def test_minmax(self):
# monotonic
idx1 = pd.PeriodIndex([pd.NaT, '2011-01-01', '2011-01-02',
'2011-01-03'], freq='D')
self.assertTrue(idx1.is_monotonic)
# non-monotonic
idx2 = pd.PeriodIndex(['2011-01-01', pd.NaT, '2011-01-03',
'2011-01-02', pd.NaT], freq='D')
self.assertFalse(idx2.is_monotonic)
for idx in [idx1, idx2]:
self.assertEqual(idx.min(), pd.Period('2011-01-01', freq='D'))
self.assertEqual(idx.max(), pd.Period('2011-01-03', freq='D'))
self.assertEqual(idx1.argmin(), 1)
self.assertEqual(idx2.argmin(), 0)
self.assertEqual(idx1.argmax(), 3)
self.assertEqual(idx2.argmax(), 2)
for op in ['min', 'max']:
# Return NaT
obj = PeriodIndex([], freq='M')
result = getattr(obj, op)()
self.assertIs(result, tslib.NaT)
obj = PeriodIndex([pd.NaT], freq='M')
result = getattr(obj, op)()
self.assertIs(result, tslib.NaT)
obj = PeriodIndex([pd.NaT, pd.NaT, pd.NaT], freq='M')
result = getattr(obj, op)()
self.assertIs(result, tslib.NaT)
def test_numpy_minmax(self):
pr = pd.period_range(start='2016-01-15', end='2016-01-20')
self.assertEqual(np.min(pr), Period('2016-01-15', freq='D'))
self.assertEqual(np.max(pr), Period('2016-01-20', freq='D'))
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.min, pr, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.max, pr, out=0)
self.assertEqual(np.argmin(pr), 0)
self.assertEqual(np.argmax(pr), 5)
if not _np_version_under1p10:
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.argmin, pr, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.argmax, pr, out=0)
def test_representation(self):
# GH 7601
idx1 = PeriodIndex([], freq='D')
idx2 = PeriodIndex(['2011-01-01'], freq='D')
idx3 = PeriodIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = PeriodIndex(['2011-01-01', '2011-01-02', '2011-01-03'],
freq='D')
idx5 = PeriodIndex(['2011', '2012', '2013'], freq='A')
idx6 = PeriodIndex(['2011-01-01 09:00', '2012-02-01 10:00',
'NaT'], freq='H')
idx7 = pd.period_range('2013Q1', periods=1, freq="Q")
idx8 = pd.period_range('2013Q1', periods=2, freq="Q")
idx9 = pd.period_range('2013Q1', periods=3, freq="Q")
idx10 = PeriodIndex(['2011-01-01', '2011-02-01'], freq='3D')
exp1 = """PeriodIndex([], dtype='period[D]', freq='D')"""
exp2 = """PeriodIndex(['2011-01-01'], dtype='period[D]', freq='D')"""
exp3 = ("PeriodIndex(['2011-01-01', '2011-01-02'], dtype='period[D]', "
"freq='D')")
exp4 = ("PeriodIndex(['2011-01-01', '2011-01-02', '2011-01-03'], "
"dtype='period[D]', freq='D')")
exp5 = ("PeriodIndex(['2011', '2012', '2013'], dtype='period[A-DEC]', "
"freq='A-DEC')")
exp6 = ("PeriodIndex(['2011-01-01 09:00', '2012-02-01 10:00', 'NaT'], "
"dtype='period[H]', freq='H')")
exp7 = ("PeriodIndex(['2013Q1'], dtype='period[Q-DEC]', "
"freq='Q-DEC')")
exp8 = ("PeriodIndex(['2013Q1', '2013Q2'], dtype='period[Q-DEC]', "
"freq='Q-DEC')")
exp9 = ("PeriodIndex(['2013Q1', '2013Q2', '2013Q3'], "
"dtype='period[Q-DEC]', freq='Q-DEC')")
exp10 = ("PeriodIndex(['2011-01-01', '2011-02-01'], "
"dtype='period[3D]', freq='3D')")
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5,
idx6, idx7, idx8, idx9, idx10],
[exp1, exp2, exp3, exp4, exp5,
exp6, exp7, exp8, exp9, exp10]):
for func in ['__repr__', '__unicode__', '__str__']:
result = getattr(idx, func)()
self.assertEqual(result, expected)
def test_representation_to_series(self):
# GH 10971
idx1 = PeriodIndex([], freq='D')
idx2 = PeriodIndex(['2011-01-01'], freq='D')
idx3 = PeriodIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = PeriodIndex(['2011-01-01', '2011-01-02',
'2011-01-03'], freq='D')
idx5 = PeriodIndex(['2011', '2012', '2013'], freq='A')
idx6 = PeriodIndex(['2011-01-01 09:00', '2012-02-01 10:00',
'NaT'], freq='H')
idx7 = pd.period_range('2013Q1', periods=1, freq="Q")
idx8 = pd.period_range('2013Q1', periods=2, freq="Q")
idx9 = pd.period_range('2013Q1', periods=3, freq="Q")
exp1 = """Series([], dtype: object)"""
exp2 = """0 2011-01-01
dtype: object"""
exp3 = """0 2011-01-01
1 2011-01-02
dtype: object"""
exp4 = """0 2011-01-01
1 2011-01-02
2 2011-01-03
dtype: object"""
exp5 = """0 2011
1 2012
2 2013
dtype: object"""
exp6 = """0 2011-01-01 09:00
1 2012-02-01 10:00
2 NaT
dtype: object"""
exp7 = """0 2013Q1
dtype: object"""
exp8 = """0 2013Q1
1 2013Q2
dtype: object"""
exp9 = """0 2013Q1
1 2013Q2
2 2013Q3
dtype: object"""
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5,
idx6, idx7, idx8, idx9],
[exp1, exp2, exp3, exp4, exp5,
exp6, exp7, exp8, exp9]):
result = repr(pd.Series(idx))
self.assertEqual(result, expected)
def test_summary(self):
# GH9116
idx1 = PeriodIndex([], freq='D')
idx2 = PeriodIndex(['2011-01-01'], freq='D')
idx3 = PeriodIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = PeriodIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = PeriodIndex(['2011', '2012', '2013'], freq='A')
idx6 = PeriodIndex(
['2011-01-01 09:00', '2012-02-01 10:00', 'NaT'], freq='H')
idx7 = pd.period_range('2013Q1', periods=1, freq="Q")
idx8 = pd.period_range('2013Q1', periods=2, freq="Q")
idx9 = pd.period_range('2013Q1', periods=3, freq="Q")
exp1 = """PeriodIndex: 0 entries
Freq: D"""
exp2 = """PeriodIndex: 1 entries, 2011-01-01 to 2011-01-01
Freq: D"""
exp3 = """PeriodIndex: 2 entries, 2011-01-01 to 2011-01-02
Freq: D"""
exp4 = """PeriodIndex: 3 entries, 2011-01-01 to 2011-01-03
Freq: D"""
exp5 = """PeriodIndex: 3 entries, 2011 to 2013
Freq: A-DEC"""
exp6 = """PeriodIndex: 3 entries, 2011-01-01 09:00 to NaT
Freq: H"""
exp7 = """PeriodIndex: 1 entries, 2013Q1 to 2013Q1
Freq: Q-DEC"""
exp8 = """PeriodIndex: 2 entries, 2013Q1 to 2013Q2
Freq: Q-DEC"""
exp9 = """PeriodIndex: 3 entries, 2013Q1 to 2013Q3
Freq: Q-DEC"""
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5,
idx6, idx7, idx8, idx9],
[exp1, exp2, exp3, exp4, exp5,
exp6, exp7, exp8, exp9]):
result = idx.summary()
self.assertEqual(result, expected)
def test_resolution(self):
for freq, expected in zip(['A', 'Q', 'M', 'D', 'H',
'T', 'S', 'L', 'U'],
['day', 'day', 'day', 'day',
'hour', 'minute', 'second',
'millisecond', 'microsecond']):
idx = pd.period_range(start='2013-04-01', periods=30, freq=freq)
self.assertEqual(idx.resolution, expected)
def test_add_iadd(self):
rng = pd.period_range('1/1/2000', freq='D', periods=5)
other = pd.period_range('1/6/2000', freq='D', periods=5)
# previously performed setop union, now raises TypeError (GH14164)
with tm.assertRaises(TypeError):
rng + other
with tm.assertRaises(TypeError):
rng += other
# offset
# DateOffset
rng = pd.period_range('2014', '2024', freq='A')
result = rng + pd.offsets.YearEnd(5)
expected = pd.period_range('2019', '2029', freq='A')
tm.assert_index_equal(result, expected)
rng += pd.offsets.YearEnd(5)
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(365, 'D'),
timedelta(365), Timedelta(days=365)]:
msg = ('Input has different freq(=.+)? '
'from PeriodIndex\\(freq=A-DEC\\)')
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
rng + o
rng = pd.period_range('2014-01', '2016-12', freq='M')
result = rng + pd.offsets.MonthEnd(5)
expected = pd.period_range('2014-06', '2017-05', freq='M')
tm.assert_index_equal(result, expected)
rng += pd.offsets.MonthEnd(5)
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(365, 'D'),
timedelta(365), Timedelta(days=365)]:
rng = pd.period_range('2014-01', '2016-12', freq='M')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=M\\)'
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
rng + o
# Tick
offsets = [pd.offsets.Day(3), timedelta(days=3),
np.timedelta64(3, 'D'), pd.offsets.Hour(72),
timedelta(minutes=60 * 24 * 3), np.timedelta64(72, 'h'),
Timedelta('72:00:00')]
for delta in offsets:
rng = pd.period_range('2014-05-01', '2014-05-15', freq='D')
result = rng + delta
expected = pd.period_range('2014-05-04', '2014-05-18', freq='D')
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(4, 'h'),
timedelta(hours=23), Timedelta('23:00:00')]:
rng = pd.period_range('2014-05-01', '2014-05-15', freq='D')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=D\\)'
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
rng + o
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), pd.offsets.Minute(120),
timedelta(minutes=120), np.timedelta64(120, 'm'),
Timedelta(minutes=120)]
for delta in offsets:
rng = pd.period_range('2014-01-01 10:00', '2014-01-05 10:00',
freq='H')
result = rng + delta
expected = pd.period_range('2014-01-01 12:00', '2014-01-05 12:00',
freq='H')
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
for delta in [pd.offsets.YearBegin(2), timedelta(minutes=30),
np.timedelta64(30, 's'), Timedelta(seconds=30)]:
rng = pd.period_range('2014-01-01 10:00', '2014-01-05 10:00',
freq='H')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=H\\)'
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
result = rng + delta
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
rng += delta
# int
rng = pd.period_range('2000-01-01 09:00', freq='H', periods=10)
result = rng + 1
expected = pd.period_range('2000-01-01 10:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng += 1
tm.assert_index_equal(rng, expected)
def test_sub(self):
rng = period_range('2007-01', periods=50)
result = rng - 5
exp = rng + (-5)
tm.assert_index_equal(result, exp)
def test_sub_isub(self):
# previously performed setop, now raises TypeError (GH14164)
# TODO needs to wait on #13077 for decision on result type
rng = pd.period_range('1/1/2000', freq='D', periods=5)
other = pd.period_range('1/6/2000', freq='D', periods=5)
with tm.assertRaises(TypeError):
rng - other
with tm.assertRaises(TypeError):
rng -= other
# offset
# DateOffset
rng = pd.period_range('2014', '2024', freq='A')
result = rng - pd.offsets.YearEnd(5)
expected = pd.period_range('2009', '2019', freq='A')
tm.assert_index_equal(result, expected)
rng -= pd.offsets.YearEnd(5)
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(365, 'D'),
timedelta(365)]:
rng = pd.period_range('2014', '2024', freq='A')
msg = ('Input has different freq(=.+)? '
'from PeriodIndex\\(freq=A-DEC\\)')
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
rng - o
rng = pd.period_range('2014-01', '2016-12', freq='M')
result = rng - pd.offsets.MonthEnd(5)
expected = pd.period_range('2013-08', '2016-07', freq='M')
tm.assert_index_equal(result, expected)
rng -= pd.offsets.MonthEnd(5)
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(365, 'D'),
timedelta(365)]:
rng = pd.period_range('2014-01', '2016-12', freq='M')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=M\\)'
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
rng - o
# Tick
offsets = [pd.offsets.Day(3), timedelta(days=3),
np.timedelta64(3, 'D'), pd.offsets.Hour(72),
timedelta(minutes=60 * 24 * 3), np.timedelta64(72, 'h')]
for delta in offsets:
rng = pd.period_range('2014-05-01', '2014-05-15', freq='D')
result = rng - delta
expected = pd.period_range('2014-04-28', '2014-05-12', freq='D')
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(4, 'h'),
timedelta(hours=23)]:
rng = pd.period_range('2014-05-01', '2014-05-15', freq='D')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=D\\)'
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
rng - o
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), pd.offsets.Minute(120),
timedelta(minutes=120), np.timedelta64(120, 'm')]
for delta in offsets:
rng = pd.period_range('2014-01-01 10:00', '2014-01-05 10:00',
freq='H')
result = rng - delta
expected = pd.period_range('2014-01-01 08:00', '2014-01-05 08:00',
freq='H')
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
for delta in [pd.offsets.YearBegin(2), timedelta(minutes=30),
np.timedelta64(30, 's')]:
rng = pd.period_range('2014-01-01 10:00', '2014-01-05 10:00',
freq='H')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=H\\)'
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
result = rng + delta
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
rng += delta
# int
rng = pd.period_range('2000-01-01 09:00', freq='H', periods=10)
result = rng - 1
expected = pd.period_range('2000-01-01 08:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng -= 1
tm.assert_index_equal(rng, expected)
def test_comp_nat(self):
left = pd.PeriodIndex([pd.Period('2011-01-01'), pd.NaT,
pd.Period('2011-01-03')])
right = pd.PeriodIndex([pd.NaT, pd.NaT, pd.Period('2011-01-03')])
for l, r in [(left, right), (left.asobject, right.asobject)]:
result = l == r
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = l != r
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l == pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT == r, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(l != pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT != l, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l < pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT > l, expected)
def test_value_counts_unique(self):
# GH 7735
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=10)
# create repeated values, 'n'th element is repeated by n+1 times
idx = PeriodIndex(np.repeat(idx.values, range(1, len(idx) + 1)),
freq='H')
exp_idx = PeriodIndex(['2011-01-01 18:00', '2011-01-01 17:00',
'2011-01-01 16:00', '2011-01-01 15:00',
'2011-01-01 14:00', '2011-01-01 13:00',
'2011-01-01 12:00', '2011-01-01 11:00',
'2011-01-01 10:00',
'2011-01-01 09:00'], freq='H')
expected = Series(range(10, 0, -1), index=exp_idx, dtype='int64')
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
expected = pd.period_range('2011-01-01 09:00', freq='H',
periods=10)
tm.assert_index_equal(idx.unique(), expected)
idx = PeriodIndex(['2013-01-01 09:00', '2013-01-01 09:00',
'2013-01-01 09:00', '2013-01-01 08:00',
'2013-01-01 08:00', pd.NaT], freq='H')
exp_idx = PeriodIndex(['2013-01-01 09:00', '2013-01-01 08:00'],
freq='H')
expected = Series([3, 2], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
exp_idx = PeriodIndex(['2013-01-01 09:00', '2013-01-01 08:00',
pd.NaT], freq='H')
expected = Series([3, 2, 1], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(dropna=False), expected)
tm.assert_index_equal(idx.unique(), exp_idx)
def test_drop_duplicates_metadata(self):
# GH 10115
idx = pd.period_range('2011-01-01', '2011-01-31', freq='D', name='idx')
result = idx.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertEqual(idx.freq, result.freq)
idx_dup = idx.append(idx) # freq will not be reset
result = idx_dup.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertEqual(idx.freq, result.freq)
def test_drop_duplicates(self):
# to check Index/Series compat
base = pd.period_range('2011-01-01', '2011-01-31', freq='D',
name='idx')
idx = base.append(base[:5])
res = idx.drop_duplicates()
tm.assert_index_equal(res, base)
res = Series(idx).drop_duplicates()
tm.assert_series_equal(res, Series(base))
res = idx.drop_duplicates(keep='last')
exp = base[5:].append(base[:5])
tm.assert_index_equal(res, exp)
res = Series(idx).drop_duplicates(keep='last')
tm.assert_series_equal(res, Series(exp, index=np.arange(5, 36)))
res = idx.drop_duplicates(keep=False)
tm.assert_index_equal(res, base[5:])
res = Series(idx).drop_duplicates(keep=False)
tm.assert_series_equal(res, Series(base[5:], index=np.arange(5, 31)))
def test_order_compat(self):
def _check_freq(index, expected_index):
if isinstance(index, PeriodIndex):
self.assertEqual(index.freq, expected_index.freq)
pidx = PeriodIndex(['2011', '2012', '2013'], name='pidx', freq='A')
# for compatibility check
iidx = Index([2011, 2012, 2013], name='idx')
for idx in [pidx, iidx]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, idx)
_check_freq(ordered, idx)
ordered = idx.sort_values(ascending=False)
self.assert_index_equal(ordered, idx[::-1])
_check_freq(ordered, idx[::-1])
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, idx)
self.assert_numpy_array_equal(indexer,
np.array([0, 1, 2]),
check_dtype=False)
_check_freq(ordered, idx)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
self.assert_index_equal(ordered, idx[::-1])
self.assert_numpy_array_equal(indexer,
np.array([2, 1, 0]),
check_dtype=False)
_check_freq(ordered, idx[::-1])
pidx = PeriodIndex(['2011', '2013', '2015', '2012',
'2011'], name='pidx', freq='A')
pexpected = PeriodIndex(
['2011', '2011', '2012', '2013', '2015'], name='pidx', freq='A')
# for compatibility check
iidx = Index([2011, 2013, 2015, 2012, 2011], name='idx')
iexpected = Index([2011, 2011, 2012, 2013, 2015], name='idx')
for idx, expected in [(pidx, pexpected), (iidx, iexpected)]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, expected)
_check_freq(ordered, idx)
ordered = idx.sort_values(ascending=False)
self.assert_index_equal(ordered, expected[::-1])
_check_freq(ordered, idx)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, expected)
exp = np.array([0, 4, 3, 1, 2])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
_check_freq(ordered, idx)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
self.assert_index_equal(ordered, expected[::-1])
exp = np.array([2, 1, 3, 4, 0])
self.assert_numpy_array_equal(indexer, exp,
check_dtype=False)
_check_freq(ordered, idx)
pidx = PeriodIndex(['2011', '2013', 'NaT', '2011'], name='pidx',
freq='D')
result = pidx.sort_values()
expected = PeriodIndex(['NaT', '2011', '2011', '2013'],
name='pidx', freq='D')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, 'D')
result = pidx.sort_values(ascending=False)
expected = PeriodIndex(
['2013', '2011', '2011', 'NaT'], name='pidx', freq='D')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, 'D')
def test_order(self):
for freq in ['D', '2D', '4D']:
idx = PeriodIndex(['2011-01-01', '2011-01-02', '2011-01-03'],
freq=freq, name='idx')
ordered = idx.sort_values()
self.assert_index_equal(ordered, idx)
self.assertEqual(ordered.freq, idx.freq)
ordered = idx.sort_values(ascending=False)
expected = idx[::-1]
self.assert_index_equal(ordered, expected)
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq, freq)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, idx)
self.assert_numpy_array_equal(indexer,
np.array([0, 1, 2]),
check_dtype=False)
self.assertEqual(ordered.freq, idx.freq)
self.assertEqual(ordered.freq, freq)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
expected = idx[::-1]
self.assert_index_equal(ordered, expected)
self.assert_numpy_array_equal(indexer,
np.array([2, 1, 0]),
check_dtype=False)
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq, freq)
idx1 = PeriodIndex(['2011-01-01', '2011-01-03', '2011-01-05',
'2011-01-02', '2011-01-01'], freq='D', name='idx1')
exp1 = PeriodIndex(['2011-01-01', '2011-01-01', '2011-01-02',
'2011-01-03', '2011-01-05'], freq='D', name='idx1')
idx2 = PeriodIndex(['2011-01-01', '2011-01-03', '2011-01-05',
'2011-01-02', '2011-01-01'],
freq='D', name='idx2')
exp2 = PeriodIndex(['2011-01-01', '2011-01-01', '2011-01-02',
'2011-01-03', '2011-01-05'],
freq='D', name='idx2')
idx3 = PeriodIndex([pd.NaT, '2011-01-03', '2011-01-05',
'2011-01-02', pd.NaT], freq='D', name='idx3')
exp3 = PeriodIndex([pd.NaT, pd.NaT, '2011-01-02', '2011-01-03',
'2011-01-05'], freq='D', name='idx3')
for idx, expected in [(idx1, exp1), (idx2, exp2), (idx3, exp3)]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, expected)
self.assertEqual(ordered.freq, 'D')
ordered = idx.sort_values(ascending=False)
self.assert_index_equal(ordered, expected[::-1])
self.assertEqual(ordered.freq, 'D')
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, expected)
exp = np.array([0, 4, 3, 1, 2])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertEqual(ordered.freq, 'D')
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
self.assert_index_equal(ordered, expected[::-1])
exp = np.array([2, 1, 3, 4, 0])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertEqual(ordered.freq, 'D')
def test_nat_new(self):
idx = pd.period_range('2011-01', freq='M', periods=5, name='x')
result = idx._nat_new()
exp = pd.PeriodIndex([pd.NaT] * 5, freq='M', name='x')
tm.assert_index_equal(result, exp)
result = idx._nat_new(box=False)
exp = np.array([tslib.iNaT] * 5, dtype=np.int64)
tm.assert_numpy_array_equal(result, exp)
def test_shift(self):
# GH 9903
idx = pd.PeriodIndex([], name='xxx', freq='H')
with tm.assertRaises(TypeError):
# period shift doesn't accept freq
idx.shift(1, freq='H')
tm.assert_index_equal(idx.shift(0), idx)
tm.assert_index_equal(idx.shift(3), idx)
idx = pd.PeriodIndex(['2011-01-01 10:00', '2011-01-01 11:00'
'2011-01-01 12:00'], name='xxx', freq='H')
tm.assert_index_equal(idx.shift(0), idx)
exp = pd.PeriodIndex(['2011-01-01 13:00', '2011-01-01 14:00'
'2011-01-01 15:00'], name='xxx', freq='H')
tm.assert_index_equal(idx.shift(3), exp)
exp = pd.PeriodIndex(['2011-01-01 07:00', '2011-01-01 08:00'
'2011-01-01 09:00'], name='xxx', freq='H')
tm.assert_index_equal(idx.shift(-3), exp)
def test_repeat(self):
index = pd.period_range('2001-01-01', periods=2, freq='D')
exp = pd.PeriodIndex(['2001-01-01', '2001-01-01',
'2001-01-02', '2001-01-02'], freq='D')
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
index = pd.period_range('2001-01-01', periods=2, freq='2D')
exp = pd.PeriodIndex(['2001-01-01', '2001-01-01',
'2001-01-03', '2001-01-03'], freq='2D')
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
index = pd.PeriodIndex(['2001-01', 'NaT', '2003-01'], freq='M')
exp = pd.PeriodIndex(['2001-01', '2001-01', '2001-01',
'NaT', 'NaT', 'NaT',
'2003-01', '2003-01', '2003-01'], freq='M')
for res in [index.repeat(3), np.repeat(index, 3)]:
tm.assert_index_equal(res, exp)
def test_nat(self):
self.assertIs(pd.PeriodIndex._na_value, pd.NaT)
self.assertIs(pd.PeriodIndex([], freq='M')._na_value, pd.NaT)
idx = pd.PeriodIndex(['2011-01-01', '2011-01-02'], freq='D')
self.assertTrue(idx._can_hold_na)
tm.assert_numpy_array_equal(idx._isnan, np.array([False, False]))
self.assertFalse(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([], dtype=np.intp))
idx = pd.PeriodIndex(['2011-01-01', 'NaT'], freq='D')
self.assertTrue(idx._can_hold_na)
tm.assert_numpy_array_equal(idx._isnan, np.array([False, True]))
self.assertTrue(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([1], dtype=np.intp))
def test_equals(self):
# GH 13107
for freq in ['D', 'M']:
idx = pd.PeriodIndex(['2011-01-01', '2011-01-02', 'NaT'],
freq=freq)
self.assertTrue(idx.equals(idx))
self.assertTrue(idx.equals(idx.copy()))
self.assertTrue(idx.equals(idx.asobject))
self.assertTrue(idx.asobject.equals(idx))
self.assertTrue(idx.asobject.equals(idx.asobject))
self.assertFalse(idx.equals(list(idx)))
self.assertFalse(idx.equals(pd.Series(idx)))
idx2 = pd.PeriodIndex(['2011-01-01', '2011-01-02', 'NaT'],
freq='H')
self.assertFalse(idx.equals(idx2))
self.assertFalse(idx.equals(idx2.copy()))
self.assertFalse(idx.equals(idx2.asobject))
self.assertFalse(idx.asobject.equals(idx2))
self.assertFalse(idx.equals(list(idx2)))
self.assertFalse(idx.equals(pd.Series(idx2)))
# same internal, different tz
idx3 = pd.PeriodIndex._simple_new(idx.asi8, freq='H')
tm.assert_numpy_array_equal(idx.asi8, idx3.asi8)
self.assertFalse(idx.equals(idx3))
self.assertFalse(idx.equals(idx3.copy()))
self.assertFalse(idx.equals(idx3.asobject))
self.assertFalse(idx.asobject.equals(idx3))
self.assertFalse(idx.equals(list(idx3)))
self.assertFalse(idx.equals(pd.Series(idx3)))
class TestPeriodIndexSeriesMethods(tm.TestCase):
""" Test PeriodIndex and Period Series Ops consistency """
def _check(self, values, func, expected):
idx = pd.PeriodIndex(values)
result = func(idx)
if isinstance(expected, pd.Index):
tm.assert_index_equal(result, expected)
else:
# comp op results in bool
tm.assert_numpy_array_equal(result, expected)
s = pd.Series(values)
result = func(s)
exp = pd.Series(expected, name=values.name)
tm.assert_series_equal(result, exp)
def test_pi_ops(self):
idx = PeriodIndex(['2011-01', '2011-02', '2011-03',
'2011-04'], freq='M', name='idx')
expected = PeriodIndex(['2011-03', '2011-04',
'2011-05', '2011-06'], freq='M', name='idx')
self._check(idx, lambda x: x + 2, expected)
self._check(idx, lambda x: 2 + x, expected)
self._check(idx + 2, lambda x: x - 2, idx)
result = idx - Period('2011-01', freq='M')
exp = pd.Index([0, 1, 2, 3], name='idx')
tm.assert_index_equal(result, exp)
result = Period('2011-01', freq='M') - idx
exp = pd.Index([0, -1, -2, -3], name='idx')
tm.assert_index_equal(result, exp)
def test_pi_ops_errors(self):
idx = PeriodIndex(['2011-01', '2011-02', '2011-03',
'2011-04'], freq='M', name='idx')
s = pd.Series(idx)
msg = r"unsupported operand type\(s\)"
for obj in [idx, s]:
for ng in ["str", 1.5]:
with tm.assertRaisesRegexp(TypeError, msg):
obj + ng
with tm.assertRaises(TypeError):
# error message differs between PY2 and 3
ng + obj
with tm.assertRaisesRegexp(TypeError, msg):
obj - ng
with tm.assertRaises(TypeError):
np.add(obj, ng)
if _np_version_under1p10:
self.assertIs(np.add(ng, obj), NotImplemented)
else:
with tm.assertRaises(TypeError):
np.add(ng, obj)
with tm.assertRaises(TypeError):
np.subtract(obj, ng)
if _np_version_under1p10:
self.assertIs(np.subtract(ng, obj), NotImplemented)
else:
with tm.assertRaises(TypeError):
np.subtract(ng, obj)
def test_pi_ops_nat(self):
idx = PeriodIndex(['2011-01', '2011-02', 'NaT',
'2011-04'], freq='M', name='idx')
expected = PeriodIndex(['2011-03', '2011-04',
'NaT', '2011-06'], freq='M', name='idx')
self._check(idx, lambda x: x + 2, expected)
self._check(idx, lambda x: 2 + x, expected)
self._check(idx, lambda x: np.add(x, 2), expected)
self._check(idx + 2, lambda x: x - 2, idx)
self._check(idx + 2, lambda x: np.subtract(x, 2), idx)
# freq with mult
idx = PeriodIndex(['2011-01', '2011-02', 'NaT',
'2011-04'], freq='2M', name='idx')
expected = PeriodIndex(['2011-07', '2011-08',
'NaT', '2011-10'], freq='2M', name='idx')
self._check(idx, lambda x: x + 3, expected)
self._check(idx, lambda x: 3 + x, expected)
self._check(idx, lambda x: np.add(x, 3), expected)
self._check(idx + 3, lambda x: x - 3, idx)
self._check(idx + 3, lambda x: np.subtract(x, 3), idx)
def test_pi_ops_array_int(self):
idx = PeriodIndex(['2011-01', '2011-02', 'NaT',
'2011-04'], freq='M', name='idx')
f = lambda x: x + np.array([1, 2, 3, 4])
exp = PeriodIndex(['2011-02', '2011-04', 'NaT',
'2011-08'], freq='M', name='idx')
self._check(idx, f, exp)
f = lambda x: np.add(x, np.array([4, -1, 1, 2]))
exp = PeriodIndex(['2011-05', '2011-01', 'NaT',
'2011-06'], freq='M', name='idx')
self._check(idx, f, exp)
f = lambda x: x - np.array([1, 2, 3, 4])
exp = PeriodIndex(['2010-12', '2010-12', 'NaT',
'2010-12'], freq='M', name='idx')
self._check(idx, f, exp)
f = lambda x: np.subtract(x, np.array([3, 2, 3, -2]))
exp = PeriodIndex(['2010-10', '2010-12', 'NaT',
'2011-06'], freq='M', name='idx')
self._check(idx, f, exp)
def test_pi_ops_offset(self):
idx = PeriodIndex(['2011-01-01', '2011-02-01', '2011-03-01',
'2011-04-01'], freq='D', name='idx')
f = lambda x: x + offsets.Day()
exp = PeriodIndex(['2011-01-02', '2011-02-02', '2011-03-02',
'2011-04-02'], freq='D', name='idx')
self._check(idx, f, exp)
f = lambda x: x + offsets.Day(2)
exp = PeriodIndex(['2011-01-03', '2011-02-03', '2011-03-03',
'2011-04-03'], freq='D', name='idx')
self._check(idx, f, exp)
f = lambda x: x - offsets.Day(2)
exp = PeriodIndex(['2010-12-30', '2011-01-30', '2011-02-27',
'2011-03-30'], freq='D', name='idx')
self._check(idx, f, exp)
def test_pi_offset_errors(self):
idx = PeriodIndex(['2011-01-01', '2011-02-01', '2011-03-01',
'2011-04-01'], freq='D', name='idx')
s = pd.Series(idx)
# Series op is applied per Period instance, thus error is raised
# from Period
msg_idx = r"Input has different freq from PeriodIndex\(freq=D\)"
msg_s = r"Input cannot be converted to Period\(freq=D\)"
for obj, msg in [(idx, msg_idx), (s, msg_s)]:
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
obj + offsets.Hour(2)
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
offsets.Hour(2) + obj
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
obj - offsets.Hour(2)
def test_pi_sub_period(self):
# GH 13071
idx = PeriodIndex(['2011-01', '2011-02', '2011-03',
'2011-04'], freq='M', name='idx')
result = idx - pd.Period('2012-01', freq='M')
exp = pd.Index([-12, -11, -10, -9], name='idx')
tm.assert_index_equal(result, exp)
result = np.subtract(idx, pd.Period('2012-01', freq='M'))
tm.assert_index_equal(result, exp)
result = pd.Period('2012-01', freq='M') - idx
exp = pd.Index([12, 11, 10, 9], name='idx')
tm.assert_index_equal(result, exp)
result = np.subtract(pd.Period('2012-01', freq='M'), idx)
if _np_version_under1p10:
self.assertIs(result, NotImplemented)
else:
tm.assert_index_equal(result, exp)
exp = pd.TimedeltaIndex([np.nan, np.nan, np.nan, np.nan], name='idx')
tm.assert_index_equal(idx - pd.Period('NaT', freq='M'), exp)
tm.assert_index_equal(pd.Period('NaT', freq='M') - idx, exp)
def test_pi_sub_pdnat(self):
# GH 13071
idx = PeriodIndex(['2011-01', '2011-02', 'NaT',
'2011-04'], freq='M', name='idx')
exp = pd.TimedeltaIndex([pd.NaT] * 4, name='idx')
tm.assert_index_equal(pd.NaT - idx, exp)
tm.assert_index_equal(idx - pd.NaT, exp)
def test_pi_sub_period_nat(self):
# GH 13071
idx = PeriodIndex(['2011-01', 'NaT', '2011-03',
'2011-04'], freq='M', name='idx')
result = idx - pd.Period('2012-01', freq='M')
exp = pd.Index([-12, np.nan, -10, -9], name='idx')
tm.assert_index_equal(result, exp)
result = pd.Period('2012-01', freq='M') - idx
exp = pd.Index([12, np.nan, 10, 9], name='idx')
tm.assert_index_equal(result, exp)
exp = pd.TimedeltaIndex([np.nan, np.nan, np.nan, np.nan], name='idx')
tm.assert_index_equal(idx - pd.Period('NaT', freq='M'), exp)
tm.assert_index_equal(pd.Period('NaT', freq='M') - idx, exp)
def test_pi_comp_period(self):
idx = PeriodIndex(['2011-01', '2011-02', '2011-03',
'2011-04'], freq='M', name='idx')
f = lambda x: x == pd.Period('2011-03', freq='M')
exp = np.array([False, False, True, False], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: pd.Period('2011-03', freq='M') == x
self._check(idx, f, exp)
f = lambda x: x != pd.Period('2011-03', freq='M')
exp = np.array([True, True, False, True], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: pd.Period('2011-03', freq='M') != x
self._check(idx, f, exp)
f = lambda x: pd.Period('2011-03', freq='M') >= x
exp = np.array([True, True, True, False], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: x > pd.Period('2011-03', freq='M')
exp = np.array([False, False, False, True], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: pd.Period('2011-03', freq='M') >= x
exp = np.array([True, True, True, False], dtype=np.bool)
self._check(idx, f, exp)
def test_pi_comp_period_nat(self):
idx = PeriodIndex(['2011-01', 'NaT', '2011-03',
'2011-04'], freq='M', name='idx')
f = lambda x: x == pd.Period('2011-03', freq='M')
exp = np.array([False, False, True, False], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: pd.Period('2011-03', freq='M') == x
self._check(idx, f, exp)
f = lambda x: x == tslib.NaT
exp = np.array([False, False, False, False], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: tslib.NaT == x
self._check(idx, f, exp)
f = lambda x: x != pd.Period('2011-03', freq='M')
exp = np.array([True, True, False, True], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: pd.Period('2011-03', freq='M') != x
self._check(idx, f, exp)
f = lambda x: x != tslib.NaT
exp = np.array([True, True, True, True], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: tslib.NaT != x
self._check(idx, f, exp)
f = lambda x: pd.Period('2011-03', freq='M') >= x
exp = np.array([True, False, True, False], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: x < pd.Period('2011-03', freq='M')
exp = np.array([True, False, False, False], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: x > tslib.NaT
exp = np.array([False, False, False, False], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: tslib.NaT >= x
exp = np.array([False, False, False, False], dtype=np.bool)
self._check(idx, f, exp)
class TestSeriesPeriod(tm.TestCase):
def setUp(self):
self.series = Series(period_range('2000-01-01', periods=10, freq='D'))
def test_ops_series_timedelta(self):
# GH 13043
s = pd.Series([pd.Period('2015-01-01', freq='D'),
pd.Period('2015-01-02', freq='D')], name='xxx')
self.assertEqual(s.dtype, object)
exp = pd.Series([pd.Period('2015-01-02', freq='D'),
pd.Period('2015-01-03', freq='D')], name='xxx')
tm.assert_series_equal(s + | pd.Timedelta('1 days') | pandas.Timedelta |
import pandas as pd
import pytest
from msda import preprocessing
from pandas.util.testing import assert_frame_equal
def test_quantile_normalize():
df = pd.DataFrame({'C1': {'A': 5, 'B': 2, 'C': 3, 'D': 4},
'C2': {'A': 4, 'B': 1, 'C': 4, 'D': 2},
'C3': {'A': 3, 'B': 4, 'C': 6, 'D': 8}})
dfq = preprocessing.quantile_normalize(df)
dfq_ref = pd.DataFrame({'C1': {'A': 5.666667, 'B': 2.0,
'C': 3.0, 'D': 4.666667},
'C2': {'A': 4.666667, 'B': 2,
'C': 4.666667, 'D': 3.0},
'C3': {'A': 2.0, 'B': 3.0,
'C': 4.666667, 'D': 5.666667}})
assert_frame_equal(dfq, dfq_ref)
def test_remove_human_contaminants():
df = pd.DataFrame({'Uniprot_Id': ['P1234', '2_HUMAN_contaminant',
'Q4567', '4_HUMAN_contaminant'],
'C2': [4, 1, 4, 2],
'C3': [3, 5, 6, 8]},
index=[0, 1, 2, 3])
dfs = preprocessing.remove_human_contaminants(df)
dfs_ref = pd.DataFrame({'Uniprot_Id': ['P1234', 'Q4567'],
'C2': [4, 4],
'C3': [3, 6]},
index=[0, 1])
assert_frame_equal(dfs, dfs_ref)
def test_remove_reverse_proteins():
df = pd.DataFrame({'Uniprot_Id': ['P1234', '##P1234',
'Q4567', '##Q4567'],
'C2': [4, 1, 4, 2],
'C3': [3, 5, 6, 8]},
index=[0, 1, 2, 3])
dfs = preprocessing.remove_reverse_proteins(df)
dfs_ref = pd.DataFrame({'Uniprot_Id': ['P1234', 'Q4567'],
'C2': [4, 4],
'C3': [3, 6]},
index=[0, 1])
assert_frame_equal(dfs, dfs_ref)
def test_correct_gene_names():
df = pd.DataFrame({'Gene_Symbol': ['CDK2', '2016-03-06 00:00:00'],
'Uniprot_Id': ['P24941', 'O60337'],
'C3': [3, 5]})
dfg = preprocessing.correct_gene_names(df)
dfg_ref = pd.DataFrame({'Gene_Symbol': ['CDK2', 'MARCH6'],
'Uniprot_Id': ['P24941', 'O60337'],
'C3': [3, 5]})
| assert_frame_equal(dfg, dfg_ref) | pandas.util.testing.assert_frame_equal |
from __future__ import division #brings in Python 3.0 mixed type calculation rules
import datetime
import inspect
import numpy as np
import numpy.testing as npt
import os.path
import pandas as pd
import sys
from tabulate import tabulate
import unittest
print("Python version: " + sys.version)
print("Numpy version: " + np.__version__)
# #find parent directory and import model
# parent_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir))
# sys.path.append(parent_dir)
from ..trex_exe import Trex
test = {}
class TestTrex(unittest.TestCase):
"""
Unit tests for T-Rex model.
"""
print("trex unittests conducted at " + str(datetime.datetime.today()))
def setUp(self):
"""
Setup routine for trex unit tests.
:return:
"""
pass
# setup the test as needed
# e.g. pandas to open trex qaqc csv
# Read qaqc csv and create pandas DataFrames for inputs and expected outputs
def tearDown(self):
"""
Teardown routine for trex unit tests.
:return:
"""
pass
# teardown called after each test
# e.g. maybe write test results to some text file
def create_trex_object(self):
# create empty pandas dataframes to create empty object for testing
df_empty = pd.DataFrame()
# create an empty trex object
trex_empty = Trex(df_empty, df_empty)
return trex_empty
def test_app_rate_parsing(self):
"""
unittest for function app_rate_testing:
method extracts 1st and maximum from each list in a series of lists of app rates
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([], dtype="object")
result = pd.Series([], dtype="object")
expected_results = [[0.34, 0.78, 2.34], [0.34, 3.54, 2.34]]
try:
trex_empty.app_rates = pd.Series([[0.34], [0.78, 3.54], [2.34, 1.384, 2.22]], dtype='object')
# trex_empty.app_rates = ([[0.34], [0.78, 3.54], [2.34, 1.384, 2.22]])
# parse app_rates Series of lists
trex_empty.app_rate_parsing()
result = [trex_empty.first_app_rate, trex_empty.max_app_rate]
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_conc_initial(self):
"""
unittest for function conc_initial:
conc_0 = (app_rate * self.frac_act_ing * food_multiplier)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
result = pd.Series([], dtype = 'float')
expected_results = [12.7160, 9.8280, 11.2320]
try:
# specify an app_rates Series (that is a series of lists, each list representing
# a set of application rates for 'a' model simulation)
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4]], dtype='float')
trex_empty.food_multiplier_init_sg = pd.Series([110., 15., 240.], dtype='float')
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
for i in range(len(trex_empty.frac_act_ing)):
result[i] = trex_empty.conc_initial(i, trex_empty.app_rates[i][0], trex_empty.food_multiplier_init_sg[i])
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_conc_timestep(self):
"""
unittest for function conc_timestep:
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
result = pd.Series([], dtype = 'float')
expected_results = [6.25e-5, 0.039685, 7.8886e-30]
try:
trex_empty.foliar_diss_hlife = pd.Series([.25, 0.75, 0.01], dtype='float')
conc_0 = pd.Series([0.001, 0.1, 10.0])
for i in range(len(conc_0)):
result[i] = trex_empty.conc_timestep(i, conc_0[i])
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_percent_to_frac(self):
"""
unittest for function percent_to_frac:
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([.04556, .1034, .9389], dtype='float')
try:
trex_empty.percent_incorp = pd.Series([4.556, 10.34, 93.89], dtype='float')
result = trex_empty.percent_to_frac(trex_empty.percent_incorp)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_inches_to_feet(self):
"""
unittest for function inches_to_feet:
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([0.37966, 0.86166, 7.82416], dtype='float')
try:
trex_empty.bandwidth = pd.Series([4.556, 10.34, 93.89], dtype='float')
result = trex_empty.inches_to_feet(trex_empty.bandwidth)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_at_bird(self):
"""
unittest for function at_bird:
adjusted_toxicity = self.ld50_bird * (aw_bird / self.tw_bird_ld50) ** (self.mineau_sca_fact - 1)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
result = pd.Series([], dtype = 'float')
expected_results = pd.Series([69.17640, 146.8274, 56.00997], dtype='float')
try:
trex_empty.ld50_bird = pd.Series([100., 125., 90.], dtype='float')
trex_empty.tw_bird_ld50 = pd.Series([175., 100., 200.], dtype='float')
trex_empty.mineau_sca_fact = pd.Series([1.15, 0.9, 1.25], dtype='float')
# following variable is unique to at_bird and is thus sent via arg list
trex_empty.aw_bird_sm = pd.Series([15., 20., 30.], dtype='float')
for i in range(len(trex_empty.aw_bird_sm)):
result[i] = trex_empty.at_bird(i, trex_empty.aw_bird_sm[i])
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_at_bird1(self):
"""
unittest for function at_bird1; alternative approach using more vectorization:
adjusted_toxicity = self.ld50_bird * (aw_bird / self.tw_bird_ld50) ** (self.mineau_sca_fact - 1)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
result = pd.Series([], dtype = 'float')
expected_results = pd.Series([69.17640, 146.8274, 56.00997], dtype='float')
try:
trex_empty.ld50_bird = pd.Series([100., 125., 90.], dtype='float')
trex_empty.tw_bird_ld50 = pd.Series([175., 100., 200.], dtype='float')
trex_empty.mineau_sca_fact = pd.Series([1.15, 0.9, 1.25], dtype='float')
trex_empty.aw_bird_sm = pd.Series([15., 20., 30.], dtype='float')
# for i in range(len(trex_empty.aw_bird_sm)):
# result[i] = trex_empty.at_bird(i, trex_empty.aw_bird_sm[i])
result = trex_empty.at_bird1(trex_empty.aw_bird_sm)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_fi_bird(self):
"""
unittest for function fi_bird:
food_intake = (0.648 * (aw_bird ** 0.651)) / (1 - mf_w_bird)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([4.19728, 22.7780, 59.31724], dtype='float')
try:
#?? 'mf_w_bird_1' is a constant (i.e., not an input whose value changes per model simulation run); thus it should
#?? be specified here as a constant and not a pd.series -- if this is correct then go ahead and change next line
trex_empty.mf_w_bird_1 = pd.Series([0.1, 0.8, 0.9], dtype='float')
trex_empty.aw_bird_sm = pd.Series([15., 20., 30.], dtype='float')
result = trex_empty.fi_bird(trex_empty.aw_bird_sm, trex_empty.mf_w_bird_1)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_sc_bird(self):
"""
unittest for function sc_bird:
m_s_a_r = ((self.app_rate * self.frac_act_ing) / 128) * self.density * 10000 # maximum seed application rate=application rate*10000
risk_quotient = m_s_a_r / self.noaec_bird
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([6.637969, 77.805, 34.96289, np.nan], dtype='float')
try:
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4], [3.]], dtype='object')
trex_empty.app_rate_parsing() #get 'first_app_rate' per model simulation run
trex_empty.frac_act_ing = pd.Series([0.15, 0.20, 0.34, np.nan], dtype='float')
trex_empty.density = pd.Series([8.33, 7.98, 6.75, np.nan], dtype='float')
trex_empty.noaec_bird = pd.Series([5., 1.25, 12., np.nan], dtype='float')
result = trex_empty.sc_bird()
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_sa_bird_1(self):
"""
# unit test for function sa_bird_1
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
result_sm = pd.Series([], dtype = 'float')
result_md = pd.Series([], dtype = 'float')
result_lg = pd.Series([], dtype = 'float')
expected_results_sm = pd.Series([0.228229, 0.704098, 0.145205], dtype = 'float')
expected_results_md = pd.Series([0.126646, 0.540822, 0.052285], dtype = 'float')
expected_results_lg = pd.Series([0.037707, 0.269804, 0.01199], dtype = 'float')
try:
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4]], dtype='float')
trex_empty.app_rate_parsing() #get 'first_app_rate' per model simulation run
trex_empty.density = pd.Series([8.33, 7.98, 6.75], dtype='float')
# following parameter values are needed for internal call to "test_at_bird"
# results from "test_at_bird" test using these values are [69.17640, 146.8274, 56.00997]
trex_empty.ld50_bird = pd.Series([100., 125., 90.], dtype='float')
trex_empty.tw_bird_ld50 = pd.Series([175., 100., 200.], dtype='float')
trex_empty.mineau_sca_fact = pd.Series([1.15, 0.9, 1.25], dtype='float')
trex_empty.aw_bird_sm = pd.Series([15., 20., 30.], dtype='float')
trex_empty.aw_bird_md = pd.Series([115., 120., 130.], dtype='float')
trex_empty.aw_bird_lg = pd.Series([1015., 1020., 1030.], dtype='float')
#reitierate constants here (they have been set in 'trex_inputs'; repeated here for clarity)
trex_empty.mf_w_bird_1 = 0.1
trex_empty.nagy_bird_coef_sm = 0.02
trex_empty.nagy_bird_coef_md = 0.1
trex_empty.nagy_bird_coef_lg = 1.0
result_sm = trex_empty.sa_bird_1("small")
npt.assert_allclose(result_sm,expected_results_sm,rtol=1e-4, atol=0, err_msg='', verbose=True)
result_md = trex_empty.sa_bird_1("medium")
npt.assert_allclose(result_md,expected_results_md,rtol=1e-4, atol=0, err_msg='', verbose=True)
result_lg = trex_empty.sa_bird_1("large")
npt.assert_allclose(result_lg,expected_results_lg,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab_sm = [result_sm, expected_results_sm]
tab_md = [result_md, expected_results_md]
tab_lg = [result_lg, expected_results_lg]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab_sm, headers='keys', tablefmt='rst'))
print(tabulate(tab_md, headers='keys', tablefmt='rst'))
print(tabulate(tab_lg, headers='keys', tablefmt='rst'))
return
def test_sa_bird_2(self):
"""
# unit test for function sa_bird_2
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
result_sm = pd.Series([], dtype = 'float')
result_md = pd.Series([], dtype = 'float')
result_lg = pd.Series([], dtype = 'float')
expected_results_sm =pd.Series([0.018832, 0.029030, 0.010483], dtype = 'float')
expected_results_md = pd.Series([2.774856e-3, 6.945353e-3, 1.453192e-3], dtype = 'float')
expected_results_lg =pd.Series([2.001591e-4, 8.602729e-4, 8.66163e-5], dtype = 'float')
try:
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4]], dtype='object')
trex_empty.app_rate_parsing() #get 'first_app_rate' per model simulation run
trex_empty.density = pd.Series([8.33, 7.98, 6.75], dtype='float')
trex_empty.max_seed_rate = pd.Series([33.19, 20.0, 45.6])
# following parameter values are needed for internal call to "test_at_bird"
# results from "test_at_bird" test using these values are [69.17640, 146.8274, 56.00997]
trex_empty.ld50_bird = pd.Series([100., 125., 90.], dtype='float')
trex_empty.tw_bird_ld50 = pd.Series([175., 100., 200.], dtype='float')
trex_empty.mineau_sca_fact = pd.Series([1.15, 0.9, 1.25], dtype='float')
trex_empty.aw_bird_sm = pd.Series([15., 20., 30.], dtype='float')
trex_empty.aw_bird_md = pd.Series([115., 120., 130.], dtype='float')
trex_empty.aw_bird_lg = pd.Series([1015., 1020., 1030.], dtype='float')
#reitierate constants here (they have been set in 'trex_inputs'; repeated here for clarity)
trex_empty.nagy_bird_coef_sm = 0.02
trex_empty.nagy_bird_coef_md = 0.1
trex_empty.nagy_bird_coef_lg = 1.0
result_sm = trex_empty.sa_bird_2("small")
npt.assert_allclose(result_sm,expected_results_sm,rtol=1e-4, atol=0, err_msg='', verbose=True)
result_md = trex_empty.sa_bird_2("medium")
npt.assert_allclose(result_md,expected_results_md,rtol=1e-4, atol=0, err_msg='', verbose=True)
result_lg = trex_empty.sa_bird_2("large")
npt.assert_allclose(result_lg,expected_results_lg,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab_sm = [result_sm, expected_results_sm]
tab_md = [result_md, expected_results_md]
tab_lg = [result_lg, expected_results_lg]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab_sm, headers='keys', tablefmt='rst'))
print(tabulate(tab_md, headers='keys', tablefmt='rst'))
print(tabulate(tab_lg, headers='keys', tablefmt='rst'))
return
def test_sa_mamm_1(self):
"""
# unit test for function sa_mamm_1
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
result_sm = pd.Series([], dtype = 'float')
result_md = pd.Series([], dtype = 'float')
result_lg = pd.Series([], dtype = 'float')
expected_results_sm =pd.Series([0.022593, 0.555799, 0.010178], dtype = 'float')
expected_results_md = pd.Series([0.019298, 0.460911, 0.00376], dtype = 'float')
expected_results_lg =pd.Series([0.010471, 0.204631, 0.002715], dtype = 'float')
try:
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4]], dtype='object')
trex_empty.app_rate_parsing() #get 'first_app_rate' per model simulation run
trex_empty.density = pd.Series([8.33, 7.98, 6.75], dtype='float')
# following parameter values are needed for internal call to "test_at_bird"
# results from "test_at_bird" test using these values are [69.17640, 146.8274, 56.00997]
trex_empty.tw_mamm = pd.Series([350., 225., 390.], dtype='float')
trex_empty.ld50_mamm = pd.Series([321., 100., 400.], dtype='float')
trex_empty.aw_mamm_sm = pd.Series([15., 20., 30.], dtype='float')
trex_empty.aw_mamm_md = pd.Series([35., 45., 25.], dtype='float')
trex_empty.aw_mamm_lg = pd.Series([1015., 1020., 1030.], dtype='float')
#reitierate constants here (they have been set in 'trex_inputs'; repeated here for clarity)
trex_empty.mf_w_bird_1 = 0.1
trex_empty.nagy_mamm_coef_sm = 0.015
trex_empty.nagy_mamm_coef_md = 0.035
trex_empty.nagy_mamm_coef_lg = 1.0
result_sm = trex_empty.sa_mamm_1("small")
npt.assert_allclose(result_sm,expected_results_sm,rtol=1e-4, atol=0, err_msg='', verbose=True)
result_md = trex_empty.sa_mamm_1("medium")
npt.assert_allclose(result_md,expected_results_md,rtol=1e-4, atol=0, err_msg='', verbose=True)
result_lg = trex_empty.sa_mamm_1("large")
npt.assert_allclose(result_lg,expected_results_lg,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab_sm = [result_sm, expected_results_sm]
tab_md = [result_md, expected_results_md]
tab_lg = [result_lg, expected_results_lg]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab_sm, headers='keys', tablefmt='rst'))
print(tabulate(tab_md, headers='keys', tablefmt='rst'))
print(tabulate(tab_lg, headers='keys', tablefmt='rst'))
return
def test_sa_mamm_2(self):
"""
# unit test for function sa_mamm_2
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
result_sm = pd.Series([], dtype = 'float')
result_md = pd.Series([], dtype = 'float')
result_lg = pd.Series([], dtype = 'float')
expected_results_sm =pd.Series([2.46206e-3, 3.103179e-2, 1.03076e-3], dtype = 'float')
expected_results_md = pd.Series([1.304116e-3, 1.628829e-2, 4.220702e-4], dtype = 'float')
expected_results_lg =pd.Series([1.0592147e-4, 1.24391489e-3, 3.74263186e-5], dtype = 'float')
try:
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4]], dtype='object')
trex_empty.app_rate_parsing() #get 'first_app_rate' per model simulation run
trex_empty.density = pd.Series([8.33, 7.98, 6.75], dtype='float')
trex_empty.max_seed_rate = pd.Series([33.19, 20.0, 45.6])
# following parameter values are needed for internal call to "test_at_bird"
# results from "test_at_bird" test using these values are [69.17640, 146.8274, 56.00997]
trex_empty.tw_mamm = pd.Series([350., 225., 390.], dtype='float')
trex_empty.ld50_mamm = pd.Series([321., 100., 400.], dtype='float')
trex_empty.aw_mamm_sm = pd.Series([15., 20., 30.], dtype='float')
trex_empty.aw_mamm_md = pd.Series([35., 45., 25.], dtype='float')
trex_empty.aw_mamm_lg = pd.Series([1015., 1020., 1030.], dtype='float')
#reitierate constants here (they have been set in 'trex_inputs'; repeated here for clarity)
trex_empty.mf_w_mamm_1 = 0.1
trex_empty.nagy_mamm_coef_sm = 0.015
trex_empty.nagy_mamm_coef_md = 0.035
trex_empty.nagy_mamm_coef_lg = 1.0
result_sm = trex_empty.sa_mamm_2("small")
npt.assert_allclose(result_sm,expected_results_sm,rtol=1e-4, atol=0, err_msg='', verbose=True)
result_md = trex_empty.sa_mamm_2("medium")
npt.assert_allclose(result_md,expected_results_md,rtol=1e-4, atol=0, err_msg='', verbose=True)
result_lg = trex_empty.sa_mamm_2("large")
npt.assert_allclose(result_lg,expected_results_lg,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab_sm = [result_sm, expected_results_sm]
tab_md = [result_md, expected_results_md]
tab_lg = [result_lg, expected_results_lg]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab_sm, headers='keys', tablefmt='rst'))
print(tabulate(tab_md, headers='keys', tablefmt='rst'))
print(tabulate(tab_lg, headers='keys', tablefmt='rst'))
return
def test_sc_mamm(self):
"""
# unit test for function sc_mamm
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
result_sm = pd.Series([], dtype = 'float')
result_md = pd.Series([], dtype = 'float')
result_lg = pd.Series([], dtype = 'float')
expected_results_sm =pd.Series([2.90089, 15.87995, 8.142130], dtype = 'float')
expected_results_md = pd.Series([2.477926, 13.16889, 3.008207], dtype = 'float')
expected_results_lg =pd.Series([1.344461, 5.846592, 2.172211], dtype = 'float')
try:
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4]], dtype='object')
trex_empty.app_rate_parsing() #get 'first_app_rate' per model simulation run
trex_empty.density = pd.Series([8.33, 7.98, 6.75], dtype='float')
# following parameter values are needed for internal call to "test_at_bird"
# results from "test_at_bird" test using these values are [69.17640, 146.8274, 56.00997]
trex_empty.tw_mamm = pd.Series([350., 225., 390.], dtype='float')
trex_empty.noael_mamm = pd.Series([2.5, 3.5, 0.5], dtype='float')
trex_empty.aw_mamm_sm = pd.Series([15., 20., 30.], dtype='float')
trex_empty.aw_mamm_md = pd.Series([35., 45., 25.], dtype='float')
trex_empty.aw_mamm_lg = pd.Series([1015., 1020., 1030.], dtype='float')
#reitierate constants here (they have been set in 'trex_inputs'; repeated here for clarity)
trex_empty.mf_w_mamm_1 = 0.1
trex_empty.nagy_mamm_coef_sm = 0.015
trex_empty.nagy_mamm_coef_md = 0.035
trex_empty.nagy_mamm_coef_lg = 1.0
result_sm = trex_empty.sc_mamm("small")
npt.assert_allclose(result_sm,expected_results_sm,rtol=1e-4, atol=0, err_msg='', verbose=True)
result_md = trex_empty.sc_mamm("medium")
npt.assert_allclose(result_md,expected_results_md,rtol=1e-4, atol=0, err_msg='', verbose=True)
result_lg = trex_empty.sc_mamm("large")
npt.assert_allclose(result_lg,expected_results_lg,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab_sm = [result_sm, expected_results_sm]
tab_md = [result_md, expected_results_md]
tab_lg = [result_lg, expected_results_lg]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab_sm, headers='keys', tablefmt='rst'))
print(tabulate(tab_md, headers='keys', tablefmt='rst'))
print(tabulate(tab_lg, headers='keys', tablefmt='rst'))
return
def test_ld50_rg_bird(self):
"""
# unit test for function ld50_rg_bird (LD50ft-2 for Row/Band/In-furrow granular birds)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
result = pd.Series([], dtype = 'float')
expected_results = pd.Series([346.4856, 25.94132, np.nan], dtype='float')
try:
# following parameter values are unique for ld50_bg_bird
trex_empty.application_type = pd.Series(['Row/Band/In-furrow-Granular',
'Row/Band/In-furrow-Granular',
'Row/Band/In-furrow-Liquid'], dtype='object')
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4]], dtype='object')
trex_empty.app_rate_parsing() #get 'max app rate' per model simulation run
trex_empty.frac_incorp = pd.Series([0.25, 0.76, 0.05], dtype= 'float')
trex_empty.bandwidth = pd.Series([2., 10., 30.], dtype = 'float')
trex_empty.row_spacing = pd.Series([20., 32., 50.], dtype = 'float')
# following parameter values are needed for internal call to "test_at_bird"
# results from "test_at_bird" test using these values are [69.17640, 146.8274, 56.00997]
trex_empty.ld50_bird = pd.Series([100., 125., 90.], dtype='float')
trex_empty.tw_bird_ld50 = pd.Series([175., 100., 200.], dtype='float')
trex_empty.mineau_sca_fact = pd.Series([1.15, 0.9, 1.25], dtype='float')
trex_empty.aw_bird_sm = pd.Series([15., 20., 30.], dtype='float')
result = trex_empty.ld50_rg_bird(trex_empty.aw_bird_sm)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0,
equal_nan=True, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_ld50_rg_bird1(self):
"""
# unit test for function ld50_rg_bird1 (LD50ft-2 for Row/Band/In-furrow granular birds)
this is a duplicate of the 'test_ld50_rg_bird' method using a more vectorized approach to the
calculations; if desired other routines could be modified similarly
--comparing this method with 'test_ld50_rg_bird' it appears (for this test) that both run in the same time
--but I don't think this would be the case when 100's of model simulation runs are executed (and only a small
--number of the application_types apply to this method; thus I conclude we continue to use the non-vectorized
--approach -- should be revisited when we have a large run to execute
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
result = pd.Series([], dtype = 'float')
expected_results = pd.Series([346.4856, 25.94132, np.nan], dtype='float')
try:
# following parameter values are unique for ld50_bg_bird
trex_empty.application_type = pd.Series(['Row/Band/In-furrow-Granular',
'Row/Band/In-furrow-Granular',
'Row/Band/In-furrow-Liquid'], dtype='object')
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4]], dtype='object')
trex_empty.app_rate_parsing() #get 'max app rate' per model simulation run
trex_empty.frac_incorp = pd.Series([0.25, 0.76, 0.05], dtype= 'float')
trex_empty.bandwidth = pd.Series([2., 10., 30.], dtype = 'float')
trex_empty.row_spacing = pd.Series([20., 32., 50.], dtype = 'float')
# following parameter values are needed for internal call to "test_at_bird"
# results from "test_at_bird" test using these values are [69.17640, 146.8274, 56.00997]
trex_empty.ld50_bird = pd.Series([100., 125., 90.], dtype='float')
trex_empty.tw_bird_ld50 = pd.Series([175., 100., 200.], dtype='float')
trex_empty.mineau_sca_fact = pd.Series([1.15, 0.9, 1.25], dtype='float')
trex_empty.aw_bird_sm = pd.Series([15., 20., 30.], dtype='float')
result = trex_empty.ld50_rg_bird1(trex_empty.aw_bird_sm)
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, equal_nan=True, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_ld50_bl_bird(self):
"""
# unit test for function ld50_bl_bird (LD50ft-2 for broadcast liquid birds)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([46.19808, 33.77777, np.nan], dtype='float')
try:
# following parameter values are unique for ld50_bl_bird
trex_empty.application_type = pd.Series(['Broadcast-Liquid', 'Broadcast-Liquid',
'Non-Broadcast'], dtype='object')
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4]], dtype='object')
# following parameter values are needed for internal call to "test_at_bird"
# results from "test_at_bird" test using these values are [69.17640, 146.8274, 56.00997]
trex_empty.ld50_bird = pd.Series([100., 125., 90.], dtype='float')
trex_empty.tw_bird_ld50 = pd.Series([175., 100., 200.], dtype='float')
trex_empty.mineau_sca_fact = pd.Series([1.15, 0.9, 1.25], dtype='float')
trex_empty.aw_bird_sm = pd.Series([15., 20., 30.], dtype='float')
result = trex_empty.ld50_bl_bird(trex_empty.aw_bird_sm)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0,
err_msg='', verbose=True, equal_nan=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_ld50_bg_bird(self):
"""
# unit test for function ld50_bg_bird (LD50ft-2 for broadcast granular)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([46.19808, np.nan, 0.4214033], dtype='float')
try:
# following parameter values are unique for ld50_bg_bird
trex_empty.application_type = pd.Series(['Broadcast-Granular', 'Broadcast-Liquid',
'Broadcast-Granular'], dtype='object')
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4]], dtype='object')
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
# following parameter values are needed for internal call to "test_at_bird"
# results from "test_at_bird" test using these values are [69.17640, 146.8274, 56.00997]
trex_empty.ld50_bird = pd.Series([100., 125., 90.], dtype='float')
trex_empty.tw_bird_ld50 = pd.Series([175., 100., 200.], dtype='float')
trex_empty.mineau_sca_fact = pd.Series([1.15, 0.9, 1.25], dtype='float')
trex_empty.aw_bird_sm = pd.Series([15., 20., 30.], dtype='float')
result = trex_empty.ld50_bg_bird(trex_empty.aw_bird_sm)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0,
err_msg='', verbose=True, equal_nan=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_ld50_rl_bird(self):
"""
# unit test for function ld50_rl_bird (LD50ft-2 for Row/Band/In-furrow liquid birds)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([np.nan, 2.20701, 0.0363297], dtype='float')
try:
# following parameter values are unique for ld50_bg_bird
trex_empty.application_type = pd.Series(['Broadcast-Granular', 'Row/Band/In-furrow-Liquid',
'Row/Band/In-furrow-Liquid'], dtype='object')
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4]], dtype='object')
trex_empty.frac_incorp = pd.Series([0.25, 0.76, 0.05], dtype= 'float')
trex_empty.bandwidth = pd.Series([2., 10., 30.], dtype = 'float')
# following parameter values are needed for internal call to "test_at_bird"
# results from "test_at_bird" test using these values are [69.17640, 146.8274, 56.00997]
trex_empty.ld50_bird = pd.Series([100., 125., 90.], dtype='float')
trex_empty.tw_bird_ld50 = pd.Series([175., 100., 200.], dtype='float')
trex_empty.mineau_sca_fact = pd.Series([1.15, 0.9, 1.25], dtype='float')
trex_empty.aw_bird_sm = pd.Series([15., 20., 30.], dtype='float')
result = trex_empty.ld50_rl_bird(trex_empty.aw_bird_sm)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0,
err_msg='', verbose=True, equal_nan=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_at_mamm(self):
"""
unittest for function at_mamm:
adjusted_toxicity = self.ld50_mamm * ((self.tw_mamm / aw_mamm) ** 0.25)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
result = pd.Series([], dtype = 'float')
expected_results = pd.Series([705.5036, 529.5517, 830.6143], dtype='float')
try:
trex_empty.ld50_mamm = pd.Series([321., 275., 432.], dtype='float')
trex_empty.tw_mamm = pd.Series([350., 275., 410.], dtype='float')
trex_empty.aw_mamm_sm = pd.Series([15., 20., 30.], dtype='float')
for i in range(len(trex_empty.ld50_mamm)):
result[i] = trex_empty.at_mamm(i, trex_empty.aw_mamm_sm[i])
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_anoael_mamm(self):
"""
unittest for function anoael_mamm:
adjusted_toxicity = self.noael_mamm * ((self.tw_mamm / aw_mamm) ** 0.25)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([5.49457, 9.62821, 2.403398], dtype='float')
try:
trex_empty.noael_mamm = pd.Series([2.5, 5.0, 1.25], dtype='float')
trex_empty.tw_mamm = pd.Series([350., 275., 410.], dtype='float')
trex_empty.aw_mamm_sm = pd.Series([15., 20., 30.], dtype='float')
result = trex_empty.anoael_mamm(trex_empty.aw_mamm_sm)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_fi_mamm(self):
"""
unittest for function fi_mamm:
food_intake = (0.621 * (aw_mamm ** 0.564)) / (1 - mf_w_mamm)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([3.17807, 16.8206, 42.28516], dtype='float')
try:
trex_empty.mf_w_mamm_1 = | pd.Series([0.1, 0.8, 0.9], dtype='float') | pandas.Series |
"""Tests for the sdv.constraints.tabular module."""
import uuid
from datetime import datetime
from unittest.mock import Mock
import numpy as np
import pandas as pd
import pytest
from sdv.constraints.errors import MissingConstraintColumnError
from sdv.constraints.tabular import (
Between, ColumnFormula, CustomConstraint, GreaterThan, Negative, OneHotEncoding, Positive,
Rounding, Unique, UniqueCombinations)
def dummy_transform_table(table_data):
return table_data
def dummy_reverse_transform_table(table_data):
return table_data
def dummy_is_valid_table(table_data):
return [True] * len(table_data)
def dummy_transform_table_column(table_data, column):
return table_data
def dummy_reverse_transform_table_column(table_data, column):
return table_data
def dummy_is_valid_table_column(table_data, column):
return [True] * len(table_data[column])
def dummy_transform_column(column_data):
return column_data
def dummy_reverse_transform_column(column_data):
return column_data
def dummy_is_valid_column(column_data):
return [True] * len(column_data)
class TestCustomConstraint():
def test___init__(self):
"""Test the ``CustomConstraint.__init__`` method.
The ``transform``, ``reverse_transform`` and ``is_valid`` methods
should be replaced by the given ones, importing them if necessary.
Setup:
- Create dummy functions (created above this class).
Input:
- dummy transform and revert_transform + is_valid FQN
Output:
- Instance with all the methods replaced by the dummy versions.
"""
is_valid_fqn = __name__ + '.dummy_is_valid_table'
# Run
instance = CustomConstraint(
transform=dummy_transform_table,
reverse_transform=dummy_reverse_transform_table,
is_valid=is_valid_fqn
)
# Assert
assert instance._transform == dummy_transform_table
assert instance._reverse_transform == dummy_reverse_transform_table
assert instance._is_valid == dummy_is_valid_table
def test__run_transform_table(self):
"""Test the ``CustomConstraint._run`` method.
The ``_run`` method excutes ``transform`` and ``reverse_transform``
based on the signature of the functions. In this test, we evaluate
the execution of "table" based functions.
Setup:
- Pass dummy transform function with ``table_data`` argument.
Side Effects:
- Run transform function once with ``table_data`` as input.
Output:
- applied identity transformation "table_data = transformed".
"""
# Setup
table_data = pd.DataFrame({'a': [1, 2, 3]})
dummy_transform_mock = Mock(side_effect=dummy_transform_table,
return_value=table_data)
# Run
instance = CustomConstraint(transform=dummy_transform_mock)
transformed = instance.transform(table_data)
# Asserts
called = dummy_transform_mock.call_args
dummy_transform_mock.assert_called_once()
pd.testing.assert_frame_equal(called[0][0], table_data)
pd.testing.assert_frame_equal(transformed, dummy_transform_mock.return_value)
def test__run_reverse_transform_table(self):
"""Test the ``CustomConstraint._run`` method.
The ``_run`` method excutes ``transform`` and ``reverse_transform``
based on the signature of the functions. In this test, we evaluate
the execution of "table" based functions.
Setup:
- Pass dummy reverse transform function with ``table_data`` argument.
Side Effects:
- Run reverse transform function once with ``table_data`` as input.
Output:
- applied identity transformation "table_data = reverse_transformed".
"""
# Setup
table_data = pd.DataFrame({'a': [1, 2, 3]})
dummy_reverse_transform_mock = Mock(side_effect=dummy_reverse_transform_table,
return_value=table_data)
# Run
instance = CustomConstraint(reverse_transform=dummy_reverse_transform_mock)
reverse_transformed = instance.reverse_transform(table_data)
# Asserts
called = dummy_reverse_transform_mock.call_args
dummy_reverse_transform_mock.assert_called_once()
pd.testing.assert_frame_equal(called[0][0], table_data)
pd.testing.assert_frame_equal(
reverse_transformed, dummy_reverse_transform_mock.return_value)
def test__run_is_valid_table(self):
"""Test the ``CustomConstraint._run_is_valid`` method.
The ``_run_is_valid`` method excutes ``is_valid`` based on
the signature of the functions. In this test, we evaluate
the execution of "table" based functions.
Setup:
- Pass dummy is valid function with ``table_data`` argument.
Side Effects:
- Run is valid function once with ``table_data`` as input.
Output:
- Return a list of [True] of length ``table_data``.
"""
# Setup
table_data = pd.DataFrame({'a': [1, 2, 3]})
dummy_is_valid_mock = Mock(side_effect=dummy_is_valid_table)
# Run
instance = CustomConstraint(is_valid=dummy_is_valid_mock)
is_valid = instance.is_valid(table_data)
# Asserts
expected_out = [True] * len(table_data)
called = dummy_is_valid_mock.call_args
dummy_is_valid_mock.assert_called_once()
pd.testing.assert_frame_equal(called[0][0], table_data)
np.testing.assert_array_equal(is_valid, expected_out)
def test__run_transform_table_column(self):
"""Test the ``CustomConstraint._run`` method.
The ``_run`` method excutes ``transform`` and ``reverse_transform``
based on the signature of the functions. In this test, we evaluate
the execution of "table" and "column" based functions.
Setup:
- Pass dummy transform function with ``table_data`` and ``column`` arguments.
Side Effects:
- Run transform function once with ``table_data`` and ``column`` as input.
Output:
- applied identity transformation "table_data = transformed".
"""
# Setup
table_data = pd.DataFrame({'a': [1, 2, 3]})
dummy_transform_mock = Mock(side_effect=dummy_transform_table_column,
return_value=table_data)
# Run
instance = CustomConstraint(columns='a', transform=dummy_transform_mock)
transformed = instance.transform(table_data)
# Asserts
called = dummy_transform_mock.call_args
assert called[0][1] == 'a'
dummy_transform_mock.assert_called_once()
pd.testing.assert_frame_equal(called[0][0], table_data)
pd.testing.assert_frame_equal(transformed, dummy_transform_mock.return_value)
def test__run_reverse_transform_table_column(self):
"""Test the ``CustomConstraint._run`` method.
The ``_run`` method excutes ``transform`` and ``reverse_transform``
based on the signature of the functions. In this test, we evaluate
the execution of "table" and "column" based functions.
Setup:
- Pass dummy reverse transform function with ``table_data`` and ``column`` arguments.
Side Effects:
- Run reverse transform function once with ``table_data`` and ``column`` as input.
Output:
- applied identity transformation "table_data = transformed".
"""
# Setup
table_data = pd.DataFrame({'a': [1, 2, 3]})
dummy_reverse_transform_mock = Mock(side_effect=dummy_reverse_transform_table_column,
return_value=table_data)
# Run
instance = CustomConstraint(columns='a', reverse_transform=dummy_reverse_transform_mock)
reverse_transformed = instance.reverse_transform(table_data)
# Asserts
called = dummy_reverse_transform_mock.call_args
assert called[0][1] == 'a'
dummy_reverse_transform_mock.assert_called_once()
pd.testing.assert_frame_equal(called[0][0], table_data)
pd.testing.assert_frame_equal(
reverse_transformed, dummy_reverse_transform_mock.return_value)
def test__run_is_valid_table_column(self):
"""Test the ``CustomConstraint._run_is_valid`` method.
The ``_run_is_valid`` method excutes ``is_valid`` based on
the signature of the functions. In this test, we evaluate
the execution of "table" and "column" based functions.
Setup:
- Pass dummy is valid function with ``table_data`` and ``column`` argument.
Side Effects:
- Run is valid function once with ``table_data`` and ``column`` as input.
Output:
- Return a list of [True] of length ``table_data``.
"""
# Setup
table_data = pd.DataFrame({'a': [1, 2, 3]})
dummy_is_valid_mock = Mock(side_effect=dummy_is_valid_table_column)
# Run
instance = CustomConstraint(columns='a', is_valid=dummy_is_valid_mock)
is_valid = instance.is_valid(table_data)
# Asserts
expected_out = [True] * len(table_data)
called = dummy_is_valid_mock.call_args
assert called[0][1] == 'a'
dummy_is_valid_mock.assert_called_once()
pd.testing.assert_frame_equal(called[0][0], table_data)
np.testing.assert_array_equal(is_valid, expected_out)
def test__run_transform_column(self):
"""Test the ``CustomConstraint._run`` method.
The ``_run`` method excutes ``transform`` and ``reverse_transform``
based on the signature of the functions. In this test, we evaluate
the execution of "column" based functions.
Setup:
- Pass dummy transform function with ``column_data`` argument.
Side Effects:
- Run transform function twice, once with the attempt of
``table_data`` and ``column`` and second with ``column_data`` as input.
Output:
- applied identity transformation "table_data = transformed".
"""
# Setup
table_data = pd.DataFrame({'a': [1, 2, 3]})
dummy_transform_mock = Mock(side_effect=dummy_transform_column,
return_value=table_data)
# Run
instance = CustomConstraint(columns='a', transform=dummy_transform_mock)
transformed = instance.transform(table_data)
# Asserts
called = dummy_transform_mock.call_args_list
assert len(called) == 2
# call 1 (try)
assert called[0][0][1] == 'a'
pd.testing.assert_frame_equal(called[0][0][0], table_data)
# call 2 (catch TypeError)
pd.testing.assert_series_equal(called[1][0][0], table_data['a'])
pd.testing.assert_frame_equal(transformed, dummy_transform_mock.return_value)
def test__run_reverse_transform_column(self):
"""Test the ``CustomConstraint._run`` method.
The ``_run`` method excutes ``transform`` and ``reverse_transform``
based on the signature of the functions. In this test, we evaluate
the execution of "column" based functions.
Setup:
- Pass dummy reverse transform function with ``column_data`` argument.
Side Effects:
- Run reverse transform function twice, once with the attempt of
``table_data`` and ``column`` and second with ``column_data`` as input.
Output:
- Applied identity transformation "table_data = transformed".
"""
# Setup
table_data = pd.DataFrame({'a': [1, 2, 3]})
dummy_reverse_transform_mock = Mock(side_effect=dummy_reverse_transform_column,
return_value=table_data)
# Run
instance = CustomConstraint(columns='a', reverse_transform=dummy_reverse_transform_mock)
reverse_transformed = instance.reverse_transform(table_data)
# Asserts
called = dummy_reverse_transform_mock.call_args_list
assert len(called) == 2
# call 1 (try)
assert called[0][0][1] == 'a'
pd.testing.assert_frame_equal(called[0][0][0], table_data)
# call 2 (catch TypeError)
pd.testing.assert_series_equal(called[1][0][0], table_data['a'])
pd.testing.assert_frame_equal(
reverse_transformed, dummy_reverse_transform_mock.return_value)
def test__run_is_valid_column(self):
"""Test the ``CustomConstraint._run_is_valid`` method.
The ``_run_is_valid`` method excutes ``is_valid`` based on
the signature of the functions. In this test, we evaluate
the execution of "column" based functions.
Setup:
- Pass dummy is valid function with ``column_data`` argument.
Side Effects:
- Run is valid function twice, once with the attempt of
``table_data`` and ``column`` and second with ``column_data`` as input.
Output:
- Return a list of [True] of length ``table_data``.
"""
# Setup
table_data = pd.DataFrame({'a': [1, 2, 3]})
dummy_is_valid_mock = Mock(side_effect=dummy_is_valid_column)
# Run
instance = CustomConstraint(columns='a', is_valid=dummy_is_valid_mock)
is_valid = instance.is_valid(table_data)
# Asserts
expected_out = [True] * len(table_data)
called = dummy_is_valid_mock.call_args_list
assert len(called) == 2
# call 1 (try)
assert called[0][0][1] == 'a'
pd.testing.assert_frame_equal(called[0][0][0], table_data)
# call 2 (catch TypeError)
pd.testing.assert_series_equal(called[1][0][0], table_data['a'])
np.testing.assert_array_equal(is_valid, expected_out)
class TestUniqueCombinations():
def test___init__(self):
"""Test the ``UniqueCombinations.__init__`` method.
It is expected to create a new Constraint instance and receiving the names of
the columns that need to produce unique combinations.
Side effects:
- instance._colums == columns
"""
# Setup
columns = ['b', 'c']
# Run
instance = UniqueCombinations(columns=columns)
# Assert
assert instance._columns == columns
def test___init__sets_rebuild_columns_if_not_reject_sampling(self):
"""Test the ``UniqueCombinations.__init__`` method.
The rebuild columns should only be set if the ``handling_strategy``
is not ``reject_sampling``.
Side effects:
- instance.rebuild_columns are set
"""
# Setup
columns = ['b', 'c']
# Run
instance = UniqueCombinations(columns=columns, handling_strategy='transform')
# Assert
assert instance.rebuild_columns == tuple(columns)
def test___init__does_not_set_rebuild_columns_reject_sampling(self):
"""Test the ``UniqueCombinations.__init__`` method.
The rebuild columns should not be set if the ``handling_strategy``
is ``reject_sampling``.
Side effects:
- instance.rebuild_columns are empty
"""
# Setup
columns = ['b', 'c']
# Run
instance = UniqueCombinations(columns=columns, handling_strategy='reject_sampling')
# Assert
assert instance.rebuild_columns == ()
def test___init__with_one_column(self):
"""Test the ``UniqueCombinations.__init__`` method with only one constraint column.
Expect a ``ValueError`` because UniqueCombinations requires at least two
constraint columns.
Side effects:
- A ValueError is raised
"""
# Setup
columns = ['c']
# Run and assert
with pytest.raises(ValueError):
UniqueCombinations(columns=columns)
def test_fit(self):
"""Test the ``UniqueCombinations.fit`` method.
The ``UniqueCombinations.fit`` method is expected to:
- Call ``UniqueCombinations._valid_separator``.
- Find a valid separator for the data and generate the joint column name.
Input:
- Table data (pandas.DataFrame)
"""
# Setup
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
# Run
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
instance.fit(table_data)
# Asserts
expected_combinations = pd.DataFrame({
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
assert instance._separator == '#'
assert instance._joint_column == 'b#c'
pd.testing.assert_frame_equal(instance._combinations, expected_combinations)
def test_is_valid_true(self):
"""Test the ``UniqueCombinations.is_valid`` method.
If the input data satisfies the constraint, result is a series of ``True`` values.
Input:
- Table data (pandas.DataFrame), satisfying the constraint.
Output:
- Series of ``True`` values (pandas.Series)
Side effects:
- Since the ``is_valid`` method needs ``self._combinations``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
out = instance.is_valid(table_data)
expected_out = pd.Series([True, True, True], name='b#c')
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_false(self):
"""Test the ``UniqueCombinations.is_valid`` method.
If the input data doesn't satisfy the constraint, result is a series of ``False`` values.
Input:
- Table data (pandas.DataFrame), which does not satisfy the constraint.
Output:
- Series of ``False`` values (pandas.Series)
Side effects:
- Since the ``is_valid`` method needs ``self._combinations``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
incorrect_table = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['D', 'E', 'F'],
'c': ['g', 'h', 'i']
})
out = instance.is_valid(incorrect_table)
# Assert
expected_out = pd.Series([False, False, False], name='b#c')
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_non_string_true(self):
"""Test the ``UniqueCombinations.is_valid`` method with non string columns.
If the input data satisfies the constraint, result is a series of ``True`` values.
Input:
- Table data (pandas.DataFrame), satisfying the constraint.
Output:
- Series of ``True`` values (pandas.Series)
Side effects:
- Since the ``is_valid`` method needs ``self._combinations``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': [1, 2, 3],
'c': ['g', 'h', 'i'],
'd': [2.4, 1.23, 5.6]
})
columns = ['b', 'c', 'd']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
out = instance.is_valid(table_data)
expected_out = pd.Series([True, True, True], name='b#c#d')
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_non_string_false(self):
"""Test the ``UniqueCombinations.is_valid`` method with non string columns.
If the input data doesn't satisfy the constraint, result is a series of ``False`` values.
Input:
- Table data (pandas.DataFrame), which does not satisfy the constraint.
Output:
- Series of ``False`` values (pandas.Series)
Side effects:
- Since the ``is_valid`` method needs ``self._combinations``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': [1, 2, 3],
'c': ['g', 'h', 'i'],
'd': [2.4, 1.23, 5.6]
})
columns = ['b', 'c', 'd']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
incorrect_table = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': [6, 7, 8],
'c': ['g', 'h', 'i'],
'd': [2.4, 1.23, 5.6]
})
out = instance.is_valid(incorrect_table)
# Assert
expected_out = pd.Series([False, False, False], name='b#c#d')
pd.testing.assert_series_equal(expected_out, out)
def test_transform(self):
"""Test the ``UniqueCombinations.transform`` method.
It is expected to return a Table data with the columns concatenated by the separator.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data transformed, with the columns concatenated (pandas.DataFrame)
Side effects:
- Since the ``transform`` method needs ``self._joint_column``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
out = instance.transform(table_data)
# Assert
assert instance._combinations_to_uuids is not None
assert instance._uuids_to_combinations is not None
expected_out_a = pd.Series(['a', 'b', 'c'], name='a')
pd.testing.assert_series_equal(expected_out_a, out['a'])
try:
[uuid.UUID(u) for c, u in out['b#c'].items()]
except ValueError:
assert False
def test_transform_non_string(self):
"""Test the ``UniqueCombinations.transform`` method with non strings.
It is expected to return a Table data with the columns concatenated by the separator.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data transformed, with the columns as UUIDs.
Side effects:
- Since the ``transform`` method needs ``self._joint_column``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': [1, 2, 3],
'c': ['g', 'h', 'i'],
'd': [2.4, 1.23, 5.6]
})
columns = ['b', 'c', 'd']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
out = instance.transform(table_data)
# Assert
assert instance._combinations_to_uuids is not None
assert instance._uuids_to_combinations is not None
expected_out_a = pd.Series(['a', 'b', 'c'], name='a')
pd.testing.assert_series_equal(expected_out_a, out['a'])
try:
[uuid.UUID(u) for c, u in out['b#c#d'].items()]
except ValueError:
assert False
def test_transform_not_all_columns_provided(self):
"""Test the ``UniqueCombinations.transform`` method.
If some of the columns needed for the transform are missing, and
``fit_columns_model`` is False, it will raise a ``MissingConstraintColumnError``.
Input:
- Table data (pandas.DataFrame)
Output:
- Raises ``MissingConstraintColumnError``.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns, fit_columns_model=False)
instance.fit(table_data)
# Run/Assert
with pytest.raises(MissingConstraintColumnError):
instance.transform(pd.DataFrame({'a': ['a', 'b', 'c']}))
def test_reverse_transform(self):
"""Test the ``UniqueCombinations.reverse_transform`` method.
It is expected to return the original data separating the concatenated columns.
Input:
- Table data transformed (pandas.DataFrame)
Output:
- Original table data, with the concatenated columns separated (pandas.DataFrame)
Side effects:
- Since the ``transform`` method needs ``self._joint_column``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
transformed_data = instance.transform(table_data)
out = instance.reverse_transform(transformed_data)
# Assert
assert instance._combinations_to_uuids is not None
assert instance._uuids_to_combinations is not None
expected_out = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
pd.testing.assert_frame_equal(expected_out, out)
def test_reverse_transform_non_string(self):
"""Test the ``UniqueCombinations.reverse_transform`` method with a non string column.
It is expected to return the original data separating the concatenated columns.
Input:
- Table data transformed (pandas.DataFrame)
Output:
- Original table data, with the concatenated columns separated (pandas.DataFrame)
Side effects:
- Since the ``transform`` method needs ``self._joint_column``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': [1, 2, 3],
'c': ['g', 'h', 'i'],
'd': [2.4, 1.23, 5.6]
})
columns = ['b', 'c', 'd']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
transformed_data = instance.transform(table_data)
out = instance.reverse_transform(transformed_data)
# Assert
assert instance._combinations_to_uuids is not None
assert instance._uuids_to_combinations is not None
expected_out = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': [1, 2, 3],
'c': ['g', 'h', 'i'],
'd': [2.4, 1.23, 5.6]
})
pd.testing.assert_frame_equal(expected_out, out)
class TestGreaterThan():
def test__validate_scalar(self):
"""Test the ``_validate_scalar`` method.
This method validates the inputs if and transforms them into
the correct format.
Input:
- scalar_column = 0
- column_names = 'b'
Output:
- column_names == ['b']
"""
# Setup
scalar_column = 0
column_names = 'b'
scalar = 'high'
# Run
out = GreaterThan._validate_scalar(scalar_column, column_names, scalar)
# Assert
out == ['b']
def test__validate_scalar_list(self):
"""Test the ``_validate_scalar`` method.
This method validates the inputs if and transforms them into
the correct format.
Input:
- scalar_column = 0
- column_names = ['b']
Output:
- column_names == ['b']
"""
# Setup
scalar_column = 0
column_names = ['b']
scalar = 'low'
# Run
out = GreaterThan._validate_scalar(scalar_column, column_names, scalar)
# Assert
out == ['b']
def test__validate_scalar_error(self):
"""Test the ``_validate_scalar`` method.
This method raises an error when the the scalar column is a list.
Input:
- scalar_column = 0
- column_names = 'b'
Side effect:
- Raise error since the scalar is a list
"""
# Setup
scalar_column = [0]
column_names = 'b'
scalar = 'high'
# Run / Assert
with pytest.raises(TypeError):
GreaterThan._validate_scalar(scalar_column, column_names, scalar)
def test__validate_inputs_high_is_scalar(self):
"""Test the ``_validate_inputs`` method.
This method checks ``scalar`` and formats the data based
on what is expected to be a list or not. In addition, it
returns the ``constraint_columns``.
Input:
- low = 'a'
- high = 3
- scalar = 'high'
Output:
- low == ['a']
- high == 3
- constraint_columns = ('a')
"""
# Setup / Run
low, high, constraint_columns = GreaterThan._validate_inputs(
low='a', high=3, scalar='high', drop=None)
# Assert
low == ['a']
high == 3
constraint_columns == ('a',)
def test__validate_inputs_low_is_scalar(self):
"""Test the ``_validate_inputs`` method.
This method checks ``scalar`` and formats the data based
on what is expected to be a list or not. In addition, it
returns the ``constraint_columns``.
Input:
- low = 3
- high = 'b'
- scalar = 'low'
- drop = None
Output:
- low == 3
- high == ['b']
- constraint_columns = ('b')
"""
# Setup / Run
low, high, constraint_columns = GreaterThan._validate_inputs(
low=3, high='b', scalar='low', drop=None)
# Assert
low == 3
high == ['b']
constraint_columns == ('b',)
def test__validate_inputs_scalar_none(self):
"""Test the ``_validate_inputs`` method.
This method checks ``scalar`` and formats the data based
on what is expected to be a list or not. In addition, it
returns the ``constraint_columns``.
Input:
- low = 'a'
- high = 3 # where 3 is a column name
- scalar = None
- drop = None
Output:
- low == ['a']
- high == [3]
- constraint_columns = ('a', 3)
"""
# Setup / Run
low, high, constraint_columns = GreaterThan._validate_inputs(
low='a', high=3, scalar=None, drop=None)
# Assert
low == ['a']
high == [3]
constraint_columns == ('a', 3)
def test__validate_inputs_scalar_none_lists(self):
"""Test the ``_validate_inputs`` method.
This method checks ``scalar`` and formats the data based
on what is expected to be a list or not. In addition, it
returns the ``constraint_columns``.
Input:
- low = ['a']
- high = ['b', 'c']
- scalar = None
- drop = None
Output:
- low == ['a']
- high == ['b', 'c']
- constraint_columns = ('a', 'b', 'c')
"""
# Setup / Run
low, high, constraint_columns = GreaterThan._validate_inputs(
low=['a'], high=['b', 'c'], scalar=None, drop=None)
# Assert
low == ['a']
high == ['b', 'c']
constraint_columns == ('a', 'b', 'c')
def test__validate_inputs_scalar_none_two_lists(self):
"""Test the ``_validate_inputs`` method.
This method checks ``scalar`` and formats the data based
on what is expected to be a list or not. In addition, it
returns the ``constraint_columns``.
Input:
- low = ['a', 0]
- high = ['b', 'c']
- scalar = None
- drop = None
Side effect:
- Raise error because both high and low are more than one column
"""
# Run / Assert
with pytest.raises(ValueError):
GreaterThan._validate_inputs(low=['a', 0], high=['b', 'c'], scalar=None, drop=None)
def test__validate_inputs_scalar_unknown(self):
"""Test the ``_validate_inputs`` method.
This method checks ``scalar`` and formats the data based
on what is expected to be a list or not. In addition, it
returns the ``constraint_columns``.
Input:
- low = 'a'
- high = 'b'
- scalar = 'unknown'
- drop = None
Side effect:
- Raise error because scalar is unknown
"""
# Run / Assert
with pytest.raises(ValueError):
GreaterThan._validate_inputs(low='a', high='b', scalar='unknown', drop=None)
def test__validate_inputs_drop_error_low(self):
"""Test the ``_validate_inputs`` method.
Make sure the method raises an error if ``drop``==``scalar``
when ``scalar`` is not ``None``.
Input:
- low = 2
- high = 'b'
- scalar = 'low'
- drop = 'low'
Side effect:
- Raise error because scalar is unknown
"""
# Run / Assert
with pytest.raises(ValueError):
GreaterThan._validate_inputs(low=2, high='b', scalar='low', drop='low')
def test__validate_inputs_drop_error_high(self):
"""Test the ``_validate_inputs`` method.
Make sure the method raises an error if ``drop``==``scalar``
when ``scalar`` is not ``None``.
Input:
- low = 'a'
- high = 3
- scalar = 'high'
- drop = 'high'
Side effect:
- Raise error because scalar is unknown
"""
# Run / Assert
with pytest.raises(ValueError):
GreaterThan._validate_inputs(low='a', high=3, scalar='high', drop='high')
def test__validate_inputs_drop_success(self):
"""Test the ``_validate_inputs`` method.
Make sure the method raises an error if ``drop``==``scalar``
when ``scalar`` is not ``None``.
Input:
- low = 'a'
- high = 'b'
- scalar = 'high'
- drop = 'low'
Output:
- low = ['a']
- high = 0
- constraint_columns == ('a')
"""
# Run / Assert
low, high, constraint_columns = GreaterThan._validate_inputs(
low='a', high=0, scalar='high', drop='low')
assert low == ['a']
assert high == 0
assert constraint_columns == ('a',)
def test___init___(self):
"""Test the ``GreaterThan.__init__`` method.
The passed arguments should be stored as attributes.
Input:
- low = 'a'
- high = 'b'
Side effects:
- instance._low == 'a'
- instance._high == 'b'
- instance._strict == False
"""
# Run
instance = GreaterThan(low='a', high='b')
# Asserts
assert instance._low == ['a']
assert instance._high == ['b']
assert instance._strict is False
assert instance._scalar is None
assert instance._drop is None
assert instance.constraint_columns == ('a', 'b')
def test___init__sets_rebuild_columns_if_not_reject_sampling(self):
"""Test the ``GreaterThan.__init__`` method.
The rebuild columns should only be set if the ``handling_strategy``
is not ``reject_sampling``.
Side effects:
- instance.rebuild_columns are set
"""
# Run
instance = GreaterThan(low='a', high='b', handling_strategy='transform')
# Assert
assert instance.rebuild_columns == ['b']
def test___init__does_not_set_rebuild_columns_reject_sampling(self):
"""Test the ``GreaterThan.__init__`` method.
The rebuild columns should not be set if the ``handling_strategy``
is ``reject_sampling``.
Side effects:
- instance.rebuild_columns are empty
"""
# Run
instance = GreaterThan(low='a', high='b', handling_strategy='reject_sampling')
# Assert
assert instance.rebuild_columns == ()
def test___init___high_is_scalar(self):
"""Test the ``GreaterThan.__init__`` method.
The passed arguments should be stored as attributes. Make sure ``scalar``
is set to ``'high'``.
Input:
- low = 'a'
- high = 0
- strict = True
- drop = 'low'
- scalar = 'high'
Side effects:
- instance._low == 'a'
- instance._high == 0
- instance._strict == True
- instance._drop = 'low'
- instance._scalar == 'high'
"""
# Run
instance = GreaterThan(low='a', high=0, strict=True, drop='low', scalar='high')
# Asserts
assert instance._low == ['a']
assert instance._high == 0
assert instance._strict is True
assert instance._scalar == 'high'
assert instance._drop == 'low'
assert instance.constraint_columns == ('a',)
def test___init___low_is_scalar(self):
"""Test the ``GreaterThan.__init__`` method.
The passed arguments should be stored as attributes. Make sure ``scalar``
is set to ``'high'``.
Input:
- low = 0
- high = 'a'
- strict = True
- drop = 'high'
- scalar = 'low'
Side effects:
- instance._low == 0
- instance._high == 'a'
- instance._stric == True
- instance._drop = 'high'
- instance._scalar == 'low'
"""
# Run
instance = GreaterThan(low=0, high='a', strict=True, drop='high', scalar='low')
# Asserts
assert instance._low == 0
assert instance._high == ['a']
assert instance._strict is True
assert instance._scalar == 'low'
assert instance._drop == 'high'
assert instance.constraint_columns == ('a',)
def test___init___strict_is_false(self):
"""Test the ``GreaterThan.__init__`` method.
Ensure that ``operator`` is set to ``np.greater_equal``
when ``strict`` is set to ``False``.
Input:
- low = 'a'
- high = 'b'
- strict = False
"""
# Run
instance = GreaterThan(low='a', high='b', strict=False)
# Assert
assert instance.operator == np.greater_equal
def test___init___strict_is_true(self):
"""Test the ``GreaterThan.__init__`` method.
Ensure that ``operator`` is set to ``np.greater``
when ``strict`` is set to ``True``.
Input:
- low = 'a'
- high = 'b'
- strict = True
"""
# Run
instance = GreaterThan(low='a', high='b', strict=True)
# Assert
assert instance.operator == np.greater
def test__init__get_columns_to_reconstruct_default(self):
"""Test the ``GreaterThan._get_columns_to_reconstruct`` method.
This method returns:
- ``_high`` if drop is "high"
- ``_low`` if drop is "low"
- ``_low`` if scalar is "high"
- ``_high`` otherwise
Setup:
- low = 'a'
- high = 'b'
Side effects:
- self._columns_to_reconstruct == ['b']
"""
# Setup
instance = GreaterThan(low='a', high='b')
instance._columns_to_reconstruct == ['b']
def test__init__get_columns_to_reconstruct_drop_high(self):
"""Test the ``GreaterThan._get_columns_to_reconstruct`` method.
This method returns:
- ``_high`` if drop is "high"
- ``_low`` if drop is "low"
- ``_low`` if scalar is "high"
- ``_high`` otherwise
Setup:
- low = 'a'
- high = 'b'
- drop = 'high'
Side effects:
- self._columns_to_reconstruct == ['b']
"""
# Setup
instance = GreaterThan(low='a', high='b', drop='high')
instance._columns_to_reconstruct == ['b']
def test__init__get_columns_to_reconstruct_drop_low(self):
"""Test the ``GreaterThan._get_columns_to_reconstruct`` method.
This method returns:
- ``_high`` if drop is "high"
- ``_low`` if drop is "low"
- ``_low`` if scalar is "high"
- ``_high`` otherwise
Setup:
- low = 'a'
- high = 'b'
- drop = 'low'
Side effects:
- self._columns_to_reconstruct == ['a']
"""
# Setup
instance = GreaterThan(low='a', high='b', drop='low')
instance._columns_to_reconstruct == ['a']
def test__init__get_columns_to_reconstruct_scalar_high(self):
"""Test the ``GreaterThan._get_columns_to_reconstruct`` method.
This method returns:
- ``_high`` if drop is "high"
- ``_low`` if drop is "low"
- ``_low`` if scalar is "high"
- ``_high`` otherwise
Setup:
- low = 'a'
- high = 0
- scalar = 'high'
Side effects:
- self._columns_to_reconstruct == ['a']
"""
# Setup
instance = GreaterThan(low='a', high=0, scalar='high')
instance._columns_to_reconstruct == ['a']
def test__get_value_column_list(self):
"""Test the ``GreaterThan._get_value`` method.
This method returns a scalar or a ndarray of values
depending on the type of the ``field``.
Input:
- Table with given data.
- field = 'low'
"""
# Setup
instance = GreaterThan(low='a', high='b')
table_data = pd.DataFrame({
'a': [1, 2, 4],
'b': [4, 5, 6],
'c': [7, 8, 9]
})
out = instance._get_value(table_data, 'low')
# Assert
expected = table_data[['a']].values
np.testing.assert_array_equal(out, expected)
def test__get_value_scalar(self):
"""Test the ``GreaterThan._get_value`` method.
This method returns a scalar or a ndarray of values
depending on the type of the ``field``.
Input:
- Table with given data.
- field = 'low'
- scalar = 'low'
"""
# Setup
instance = GreaterThan(low=3, high='b', scalar='low')
table_data = pd.DataFrame({
'a': [1, 2, 4],
'b': [4, 5, 6],
'c': [7, 8, 9]
})
out = instance._get_value(table_data, 'low')
# Assert
expected = 3
assert out == expected
def test__get_diff_columns_name_low_is_scalar(self):
"""Test the ``GreaterThan._get_diff_columns_name`` method.
The returned names should be equal to the given columns plus
tokenized with '#'.
Input:
- Table with given data.
"""
# Setup
instance = GreaterThan(low=0, high=['a', 'b#'], scalar='low')
table_data = pd.DataFrame({
'a': [1, 2, 4],
'b#': [4, 5, 6]
})
out = instance._get_diff_columns_name(table_data)
# Assert
expected = ['a#', 'b##']
assert out == expected
def test__get_diff_columns_name_high_is_scalar(self):
"""Test the ``GreaterThan._get_diff_columns_name`` method.
The returned names should be equal to the given columns plus
tokenized with '#'.
Input:
- Table with given data.
"""
# Setup
instance = GreaterThan(low=['a', 'b'], high=0, scalar='high')
table_data = pd.DataFrame({
'a': [1, 2, 4],
'b': [4, 5, 6]
})
out = instance._get_diff_columns_name(table_data)
# Assert
expected = ['a#', 'b#']
assert out == expected
def test__get_diff_columns_name_scalar_is_none(self):
"""Test the ``GreaterThan._get_diff_columns_name`` method.
The returned names should be equal one name of the two columns
with a token between them.
Input:
- Table with given data.
"""
# Setup
instance = GreaterThan(low='a', high='b#', scalar=None)
table_data = pd.DataFrame({
'a': [1, 2, 4],
'b#': [4, 5, 6]
})
out = instance._get_diff_columns_name(table_data)
# Assert
expected = ['b##a']
assert out == expected
def test__get_diff_columns_name_scalar_is_none_multi_column_low(self):
"""Test the ``GreaterThan._get_diff_columns_name`` method.
The returned names should be equal one name of the two columns
with a token between them.
Input:
- Table with given data.
"""
# Setup
instance = GreaterThan(low=['a#', 'c'], high='b', scalar=None)
table_data = pd.DataFrame({
'a#': [1, 2, 4],
'b': [4, 5, 6],
'c#': [7, 8, 9]
})
out = instance._get_diff_columns_name(table_data)
# Assert
expected = ['a##b', 'c#b']
assert out == expected
def test__get_diff_columns_name_scalar_is_none_multi_column_high(self):
"""Test the ``GreaterThan._get_diff_columns_name`` method.
The returned names should be equal one name of the two columns
with a token between them.
Input:
- Table with given data.
"""
# Setup
instance = GreaterThan(low=0, high=['b', 'c'], scalar=None)
table_data = pd.DataFrame({
0: [1, 2, 4],
'b': [4, 5, 6],
'c#': [7, 8, 9]
})
out = instance._get_diff_columns_name(table_data)
# Assert
expected = ['b#0', 'c#0']
assert out == expected
def test__check_columns_exist_success(self):
"""Test the ``GreaterThan._check_columns_exist`` method.
This method raises an error if the specified columns in
``low`` or ``high`` do not exist.
Input:
- Table with given data.
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run / Assert
table_data = pd.DataFrame({
'a': [1, 2, 4],
'b': [4, 5, 6]
})
instance._check_columns_exist(table_data, 'low')
instance._check_columns_exist(table_data, 'high')
def test__check_columns_exist_error(self):
"""Test the ``GreaterThan._check_columns_exist`` method.
This method raises an error if the specified columns in
``low`` or ``high`` do not exist.
Input:
- Table with given data.
"""
# Setup
instance = GreaterThan(low='a', high='c')
# Run / Assert
table_data = pd.DataFrame({
'a': [1, 2, 4],
'b': [4, 5, 6]
})
instance._check_columns_exist(table_data, 'low')
with pytest.raises(KeyError):
instance._check_columns_exist(table_data, 'high')
def test__fit_only_one_datetime_arg(self):
"""Test the ``Between._fit`` method by passing in only one arg as datetime.
If only one of the high / low args is a datetime type, expect a ValueError.
Input:
- low is an int column
- high is a datetime
Output:
- n/a
Side Effects:
- ValueError
"""
# Setup
instance = GreaterThan(low='a', high=pd.to_datetime('2021-01-01'), scalar='high')
# Run and assert
table_data = pd.DataFrame({
'a': [1., 2., 3.],
'b': [4, 5, 6]
})
with pytest.raises(ValueError):
instance._fit(table_data)
def test__fit__low_is_not_found_and_scalar_is_none(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should raise an error if
the ``low`` is set to a value not seen in ``table_data``.
Input:
- Table without ``low`` in columns.
Side Effect:
- KeyError.
"""
# Setup
instance = GreaterThan(low=3, high='b')
# Run / Assert
table_data = pd.DataFrame({
'a': [1., 2., 3.],
'b': [4, 5, 6]
})
with pytest.raises(KeyError):
instance._fit(table_data)
def test__fit__high_is_not_found_and_scalar_is_none(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should raise an error if
the ``high`` is set to a value not seen in ``table_data``.
Input:
- Table without ``high`` in columns.
Side Effect:
- KeyError.
"""
# Setup
instance = GreaterThan(low='a', high=3)
# Run / Assert
table_data = pd.DataFrame({
'a': [1., 2., 3.],
'b': [4, 5, 6]
})
with pytest.raises(KeyError):
instance._fit(table_data)
def test__fit__low_is_not_found_scalar_is_high(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should raise an error if
the ``low`` is set to a value not seen in ``table_data``.
Input:
- Table without ``low`` in columns.
Side Effect:
- KeyError.
"""
# Setup
instance = GreaterThan(low='c', high=3, scalar='high')
# Run / Assert
table_data = pd.DataFrame({
'a': [1., 2., 3.],
'b': [4, 5, 6]
})
with pytest.raises(KeyError):
instance._fit(table_data)
def test__fit__high_is_not_found_scalar_is_high(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should raise an error if
the ``high`` is set to a value not seen in ``table_data``.
Input:
- Table without ``high`` in columns.
Side Effect:
- KeyError.
"""
# Setup
instance = GreaterThan(low=3, high='c', scalar='low')
# Run / Assert
table_data = pd.DataFrame({
'a': [1., 2., 3.],
'b': [4, 5, 6]
})
with pytest.raises(KeyError):
instance._fit(table_data)
def test__fit__columns_to_reconstruct_drop_high(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should set ``_columns_to_reconstruct``
to ``instance._high`` if ``instance_drop`` is `high`.
Input:
- Table with two columns.
Side Effect:
- ``_columns_to_reconstruct`` is ``instance._high``
"""
# Setup
instance = GreaterThan(low='a', high='b', drop='high')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6]
})
instance._fit(table_data)
# Asserts
assert instance._columns_to_reconstruct == ['b']
def test__fit__columns_to_reconstruct_drop_low(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should set ``_columns_to_reconstruct``
to ``instance._low`` if ``instance_drop`` is `low`.
Input:
- Table with two columns.
Side Effect:
- ``_columns_to_reconstruct`` is ``instance._low``
"""
# Setup
instance = GreaterThan(low='a', high='b', drop='low')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6]
})
instance._fit(table_data)
# Asserts
assert instance._columns_to_reconstruct == ['a']
def test__fit__columns_to_reconstruct_default(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should set ``_columns_to_reconstruct``
to `high` by default.
Input:
- Table with two columns.
Side Effect:
- ``_columns_to_reconstruct`` is ``instance._high``
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6]
})
instance._fit(table_data)
# Asserts
assert instance._columns_to_reconstruct == ['b']
def test__fit__columns_to_reconstruct_high_is_scalar(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should set ``_columns_to_reconstruct``
to `low` if ``instance._scalar`` is ``'high'``.
Input:
- Table with two columns.
Side Effect:
- ``_columns_to_reconstruct`` is ``instance._low``
"""
# Setup
instance = GreaterThan(low='a', high='b', scalar='high')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6]
})
instance._fit(table_data)
# Asserts
assert instance._columns_to_reconstruct == ['a']
def test__fit__columns_to_reconstruct_low_is_scalar(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should set ``_columns_to_reconstruct``
to `high` if ``instance._scalar`` is ``'low'``.
Input:
- Table with two columns.
Side Effect:
- ``_columns_to_reconstruct`` is ``instance._high``
"""
# Setup
instance = GreaterThan(low='a', high='b', scalar='low')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6]
})
instance._fit(table_data)
# Asserts
assert instance._columns_to_reconstruct == ['b']
def test__fit__diff_columns_one_column(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should set ``_diff_columns``
to the one column in ``instance.constraint_columns`` plus a
token if there is only one column in that set.
Input:
- Table with one column.
Side Effect:
- ``_columns_to_reconstruct`` is ``instance._low``
"""
# Setup
instance = GreaterThan(low='a', high=3, scalar='high')
# Run
table_data = pd.DataFrame({'a': [1, 2, 3]})
instance._fit(table_data)
# Asserts
assert instance._diff_columns == ['a#']
def test__fit__diff_columns_multiple_columns(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should set ``_diff_columns``
to the two columns in ``instance.constraint_columns`` separated
by a token if there both columns are in that set.
Input:
- Table with two column.
Side Effect:
- ``_columns_to_reconstruct`` is ``instance._low``
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6]
})
instance._fit(table_data)
# Asserts
assert instance._diff_columns == ['b#a']
def test__fit_int(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should only learn and store the
``dtype`` of the ``high`` column as the ``_dtype`` attribute
if ``_low_is_scalar`` and ``high_is_scalar`` are ``False``.
Input:
- Table that contains two constrained columns with the high one
being made of integers.
Side Effect:
- The _dtype attribute gets `int` as the value even if the low
column has a different dtype.
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run
table_data = pd.DataFrame({
'a': [1., 2., 3.],
'b': [4, 5, 6],
'c': [7, 8, 9]
})
instance._fit(table_data)
# Asserts
assert all([dtype.kind == 'i' for dtype in instance._dtype])
def test__fit_float(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should only learn and store the
``dtype`` of the ``high`` column as the ``_dtype`` attribute
if ``_low_is_scalar`` and ``high_is_scalar`` are ``False``.
Input:
- Table that contains two constrained columns with the high one
being made of float values.
Side Effect:
- The _dtype attribute gets `float` as the value even if the low
column has a different dtype.
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4., 5., 6.],
'c': [7, 8, 9]
})
instance._fit(table_data)
# Asserts
assert all([dtype.kind == 'f' for dtype in instance._dtype])
def test__fit_datetime(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should only learn and store the
``dtype`` of the ``high`` column as the ``_dtype`` attribute
if ``_low_is_scalar`` and ``high_is_scalar`` are ``False``.
Input:
- Table that contains two constrained columns of datetimes.
Side Effect:
- The _dtype attribute gets `datetime` as the value.
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run
table_data = pd.DataFrame({
'a': pd.to_datetime(['2020-01-01']),
'b': pd.to_datetime(['2020-01-02'])
})
instance._fit(table_data)
# Asserts
assert all([dtype.kind == 'M' for dtype in instance._dtype])
def test__fit_type__high_is_scalar(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should learn and store the
``dtype`` of the ``low`` column as the ``_dtype`` attribute
if ``_scalar`` is ``'high'``.
Input:
- Table that contains two constrained columns with the low one
being made of floats.
Side Effect:
- The _dtype attribute gets `float` as the value.
"""
# Setup
instance = GreaterThan(low='a', high=3, scalar='high')
# Run
table_data = pd.DataFrame({
'a': [1., 2., 3.],
'b': [4, 5, 6],
'c': [7, 8, 9]
})
instance._fit(table_data)
# Asserts
assert all([dtype.kind == 'f' for dtype in instance._dtype])
def test__fit_type__low_is_scalar(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should learn and store the
``dtype`` of the ``high`` column as the ``_dtype`` attribute
if ``_scalar`` is ``'low'``.
Input:
- Table that contains two constrained columns with the high one
being made of floats.
Side Effect:
- The _dtype attribute gets `float` as the value.
"""
# Setup
instance = GreaterThan(low=3, high='b', scalar='low')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4., 5., 6.],
'c': [7, 8, 9]
})
instance._fit(table_data)
# Asserts
assert all([dtype.kind == 'f' for dtype in instance._dtype])
def test__fit_high_is_scalar_multi_column(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should learn and store the
``dtype`` of the ``high`` column as the ``_dtype`` attribute.
Input:
- Table that contains two constrained columns with different dtype.
"""
# Setup
instance = GreaterThan(low=['a', 'b'], high=0, scalar='high')
dtype_int = pd.Series([1]).dtype
dtype_float = np.dtype('float')
table_data = pd.DataFrame({
'a': [1, 2, 4],
'b': [4., 5., 6.]
})
instance._fit(table_data)
# Assert
expected_diff_columns = ['a#', 'b#']
expected_dtype = pd.Series([dtype_int, dtype_float], index=table_data.columns)
assert instance._diff_columns == expected_diff_columns
pd.testing.assert_series_equal(instance._dtype, expected_dtype)
def test__fit_low_is_scalar_multi_column(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should learn and store the
``dtype`` of the ``high`` column as the ``_dtype`` attribute.
Input:
- Table that contains two constrained columns with different dtype.
"""
# Setup
instance = GreaterThan(low=0, high=['a', 'b'], scalar='low')
dtype_int = pd.Series([1]).dtype
dtype_float = np.dtype('float')
table_data = pd.DataFrame({
'a': [1, 2, 4],
'b': [4., 5., 6.]
})
instance._fit(table_data)
# Assert
expected_diff_columns = ['a#', 'b#']
expected_dtype = pd.Series([dtype_int, dtype_float], index=table_data.columns)
assert instance._diff_columns == expected_diff_columns
pd.testing.assert_series_equal(instance._dtype, expected_dtype)
def test_is_valid_strict_false(self):
"""Test the ``GreaterThan.is_valid`` method with strict False.
If strict is False, equal values should count as valid.
Input:
- Table with a strictly valid row, a strictly invalid row and
a row that has the same value for both high and low.
Output:
- False should be returned for the strictly invalid row and True
for the other two.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=False)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 2, 2],
'c': [7, 8, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = [True, True, False]
np.testing.assert_array_equal(expected_out, out)
def test_is_valid_strict_true(self):
"""Test the ``GreaterThan.is_valid`` method with strict True.
If strict is True, equal values should count as invalid.
Input:
- Table with a strictly valid row, a strictly invalid row and
a row that has the same value for both high and low.
Output:
- True should be returned for the strictly valid row and False
for the other two.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 2, 2],
'c': [7, 8, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = [True, False, False]
np.testing.assert_array_equal(expected_out, out)
def test_is_valid_low_is_scalar_high_is_column(self):
"""Test the ``GreaterThan.is_valid`` method.
If low is a scalar, and high is a column name, then
the values in that column should all be higher than
``instance._low``.
Input:
- Table with values above and below low.
Output:
- True should be returned for the rows where the high
column is above low.
"""
# Setup
instance = GreaterThan(low=3, high='b', strict=False, scalar='low')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 2, 2],
'c': [7, 8, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = [True, False, False]
np.testing.assert_array_equal(expected_out, out)
def test_is_valid_high_is_scalar_low_is_column(self):
"""Test the ``GreaterThan.is_valid`` method.
If high is a scalar, and low is a column name, then
the values in that column should all be lower than
``instance._high``.
Input:
- Table with values above and below high.
Output:
- True should be returned for the rows where the low
column is below high.
"""
# Setup
instance = GreaterThan(low='a', high=2, strict=False, scalar='high')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 2, 2],
'c': [7, 8, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = [True, True, False]
np.testing.assert_array_equal(expected_out, out)
def test_is_valid_high_is_scalar_multi_column(self):
"""Test the ``GreaterThan.is_valid`` method.
If high is a scalar, and low is multi column, then
the values in that column should all be lower than
``instance._high``.
Input:
- Table with values above and below high.
Output:
- True should be returned for the rows where the low
column is below high.
"""
# Setup
instance = GreaterThan(low=['a', 'b'], high=2, strict=False, scalar='high')
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 2, 2],
'c': [7, 8, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = [False, True, False]
np.testing.assert_array_equal(expected_out, out)
def test_is_valid_low_is_scalar_multi_column(self):
"""Test the ``GreaterThan.is_valid`` method.
If low is a scalar, and high is multi column, then
the values in that column should all be higher than
``instance._low``.
Input:
- Table with values above and below low.
Output:
- True should be returned for the rows where the high
column is above low.
"""
# Setup
instance = GreaterThan(low=2, high=['a', 'b'], strict=False, scalar='low')
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 2, 2],
'c': [7, 8, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = [False, True, True]
np.testing.assert_array_equal(expected_out, out)
def test_is_valid_scalar_is_none_multi_column(self):
"""Test the ``GreaterThan.is_valid`` method.
If scalar is none, and high is multi column, then
the values in that column should all be higher than
in the low column.
Input:
- Table with values above and below low.
Output:
- True should be returned for the rows where the high
column is above low.
"""
# Setup
instance = GreaterThan(low='b', high=['a', 'c'], strict=False)
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 2, 2],
'c': [7, 8, 9]
})
# Run
out = instance.is_valid(table_data)
# Assert
expected_out = [False, True, True]
np.testing.assert_array_equal(expected_out, out)
def test_is_valid_high_is_datetime(self):
"""Test the ``GreaterThan.is_valid`` method.
If high is a datetime and low is a column,
the values in that column should all be lower than
``instance._high``.
Input:
- Table with values above and below `high`.
Output:
- True should be returned for the rows where the low
column is below `high`.
"""
# Setup
high_dt = pd.to_datetime('8/31/2021')
instance = GreaterThan(low='a', high=high_dt, strict=False, scalar='high')
table_data = pd.DataFrame({
'a': [datetime(2020, 5, 17), datetime(2020, 2, 1), datetime(2021, 9, 1)],
'b': [4, 2, 2],
})
# Run
out = instance.is_valid(table_data)
# Assert
expected_out = [True, True, False]
np.testing.assert_array_equal(expected_out, out)
def test_is_valid_low_is_datetime(self):
"""Test the ``GreaterThan.is_valid`` method.
If low is a datetime and high is a column,
the values in that column should all be higher than
``instance._low``.
Input:
- Table with values above and below `low`.
Output:
- True should be returned for the rows where the high
column is above `low`.
"""
# Setup
low_dt = pd.to_datetime('8/31/2021')
instance = GreaterThan(low=low_dt, high='a', strict=False, scalar='low')
table_data = pd.DataFrame({
'a': [datetime(2021, 9, 17), datetime(2021, 7, 1), datetime(2021, 9, 1)],
'b': [4, 2, 2],
})
# Run
out = instance.is_valid(table_data)
# Assert
expected_out = [True, False, True]
np.testing.assert_array_equal(expected_out, out)
def test_is_valid_two_cols_with_nans(self):
"""Test the ``GreaterThan.is_valid`` method with nan values.
If there is a NaN row, expect that `is_valid` returns True.
Input:
- Table with a NaN row
Output:
- True should be returned for the NaN row.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
# Run
table_data = pd.DataFrame({
'a': [1, None, 3],
'b': [4, None, 2],
'c': [7, 8, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = [True, True, False]
np.testing.assert_array_equal(expected_out, out)
def test_is_valid_two_cols_with_one_nan(self):
"""Test the ``GreaterThan.is_valid`` method with nan values.
If there is a row in which we compare one NaN value with one
non-NaN value, expect that `is_valid` returns True.
Input:
- Table with a row that contains only one NaN value.
Output:
- True should be returned for the row with the NaN value.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
# Run
table_data = pd.DataFrame({
'a': [1, None, 3],
'b': [4, 5, 2],
'c': [7, 8, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = [True, True, False]
np.testing.assert_array_equal(expected_out, out)
def test__transform_int_drop_none(self):
"""Test the ``GreaterThan._transform`` method passing a high column of type int.
The ``GreaterThan._transform`` method is expected to compute the distance
between the high and low columns and create a diff column with the
logarithm of the distance + 1.
Setup:
- ``_drop`` is set to ``None``, so all original columns will be in output.
Input:
- Table with two columns two constrained columns at a constant distance of
exactly 3 and one additional dummy column.
Output:
- Same table with a diff column of the logarithms of the distances + 1,
which is np.log(4).
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
instance._diff_columns = ['a#b']
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
pd.testing.assert_frame_equal(out, expected_out)
def test__transform_int_drop_high(self):
"""Test the ``GreaterThan._transform`` method passing a high column of type int.
The ``GreaterThan._transform`` method is expected to compute the distance
between the high and low columns and create a diff column with the
logarithm of the distance + 1. It should also drop the high column.
Setup:
- ``_drop`` is set to ``high``.
Input:
- Table with two columns two constrained columns at a constant distance of
exactly 3 and one additional dummy column.
Output:
- Same table with a diff column of the logarithms of the distances + 1,
which is np.log(4) and the high column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, drop='high')
instance._diff_columns = ['a#b']
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
pd.testing.assert_frame_equal(out, expected_out)
def test__transform_int_drop_low(self):
"""Test the ``GreaterThan._transform`` method passing a high column of type int.
The ``GreaterThan._transform`` method is expected to compute the distance
between the high and low columns and create a diff column with the
logarithm of the distance + 1. It should also drop the low column.
Setup:
- ``_drop`` is set to ``low``.
Input:
- Table with two columns two constrained columns at a constant distance of
exactly 3 and one additional dummy column.
Output:
- Same table with a diff column of the logarithms of the distances + 1,
which is np.log(4) and the low column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, drop='low')
instance._diff_columns = ['a#b']
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
pd.testing.assert_frame_equal(out, expected_out)
def test__transform_float_drop_none(self):
"""Test the ``GreaterThan._transform`` method passing a high column of type float.
The ``GreaterThan._transform`` method is expected to compute the distance
between the high and low columns and create a diff column with the
logarithm of the distance + 1.
Setup:
- ``_drop`` is set to ``None``, so all original columns will be in output.
Input:
- Table with two constrained columns at a constant distance of
exactly 3 and one additional dummy column.
Output:
- Same table with a diff column of the logarithms of the distances + 1,
which is np.log(4).
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
instance._diff_columns = ['a#b']
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4., 5., 6.],
'c': [7, 8, 9],
})
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4., 5., 6.],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
pd.testing.assert_frame_equal(out, expected_out)
def test__transform_datetime_drop_none(self):
"""Test the ``GreaterThan._transform`` method passing a high column of type datetime.
If the columns are of type datetime, ``_transform`` is expected
to convert the timedelta distance into numeric before applying
the +1 and logarithm.
Setup:
- ``_drop`` is set to ``None``, so all original columns will be in output.
Input:
- Table with values at a distance of exactly 1 second.
Output:
- Same table with a diff column of the logarithms
of the dinstance in nanoseconds + 1, which is np.log(1_000_000_001).
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
instance._diff_columns = ['a#b']
instance._is_datetime = True
# Run
table_data = pd.DataFrame({
'a': pd.to_datetime(['2020-01-01T00:00:00', '2020-01-02T00:00:00']),
'b': pd.to_datetime(['2020-01-01T00:00:01', '2020-01-02T00:00:01']),
'c': [1, 2],
})
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': pd.to_datetime(['2020-01-01T00:00:00', '2020-01-02T00:00:00']),
'b': pd.to_datetime(['2020-01-01T00:00:01', '2020-01-02T00:00:01']),
'c': [1, 2],
'a#b': [np.log(1_000_000_001), np.log(1_000_000_001)],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_transform_not_all_columns_provided(self):
"""Test the ``GreaterThan.transform`` method.
If some of the columns needed for the transform are missing, it will raise
a ``MissingConstraintColumnError``.
Input:
- Table data (pandas.DataFrame)
Output:
- Raises ``MissingConstraintColumnError``.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, fit_columns_model=False)
# Run/Assert
with pytest.raises(MissingConstraintColumnError):
instance.transform(pd.DataFrame({'a': ['a', 'b', 'c']}))
def test__transform_high_is_scalar(self):
"""Test the ``GreaterThan._transform`` method with high as scalar.
The ``GreaterThan._transform`` method is expected to compute the distance
between the high scalar value and the low column and create a diff column
with the logarithm of the distance + 1.
Setup:
- ``_high`` is set to 5 and ``_scalar`` is ``'high'``.
Input:
- Table with one low column and two dummy columns.
Output:
- Same table with a diff column of the logarithms of the distances + 1,
which is np.log(4).
"""
# Setup
instance = GreaterThan(low='a', high=5, strict=True, scalar='high')
instance._diff_columns = ['a#b']
instance.constraint_columns = ['a']
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#b': [np.log(5), np.log(4), np.log(3)],
})
pd.testing.assert_frame_equal(out, expected_out)
def test__transform_low_is_scalar(self):
"""Test the ``GreaterThan._transform`` method with high as scalar.
The ``GreaterThan._transform`` method is expected to compute the distance
between the high scalar value and the low column and create a diff column
with the logarithm of the distance + 1.
Setup:
- ``_high`` is set to 5 and ``_scalar`` is ``'low'``.
Input:
- Table with one low column and two dummy columns.
Output:
- Same table with a diff column of the logarithms of the distances + 1,
which is np.log(4).
"""
# Setup
instance = GreaterThan(low=2, high='b', strict=True, scalar='low')
instance._diff_columns = ['a#b']
instance.constraint_columns = ['b']
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#b': [np.log(3), np.log(4), np.log(5)],
})
pd.testing.assert_frame_equal(out, expected_out)
def test__transform_high_is_scalar_multi_column(self):
"""Test the ``GreaterThan._transform`` method.
The ``GreaterThan._transform`` method is expected to compute the logarithm
of given columns + 1.
Input:
- Table with given data.
Output:
- Same table with additional columns of the logarithms + 1.
"""
# Setup
instance = GreaterThan(low=['a', 'b'], high=3, strict=True, scalar='high')
instance._diff_columns = ['a#', 'b#']
instance.constraint_columns = ['a', 'b']
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
out = instance._transform(table_data)
# Assert
expected = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#': [np.log(3), np.log(2), np.log(1)],
'b#': [np.log(0), np.log(-1), np.log(-2)],
})
pd.testing.assert_frame_equal(out, expected)
def test__transform_low_is_scalar_multi_column(self):
"""Test the ``GreaterThan._transform`` method.
The ``GreaterThan._transform`` method is expected to compute the logarithm
of given columns + 1.
Input:
- Table with given data.
Output:
- Same table with additional columns of the logarithms + 1.
"""
# Setup
instance = GreaterThan(low=3, high=['a', 'b'], strict=True, scalar='low')
instance._diff_columns = ['a#', 'b#']
instance.constraint_columns = ['a', 'b']
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
out = instance._transform(table_data)
# Assert
expected = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#': [np.log(-1), np.log(0), np.log(1)],
'b#': [np.log(2), np.log(3), np.log(4)],
})
pd.testing.assert_frame_equal(out, expected)
def test__transform_scalar_is_none_multi_column(self):
"""Test the ``GreaterThan._transform`` method.
The ``GreaterThan._transform`` method is expected to compute the logarithm
of given columns + 1.
Input:
- Table with given data.
Output:
- Same table with additional columns of the logarithms + 1.
"""
# Setup
instance = GreaterThan(low=['a', 'c'], high='b', strict=True)
instance._diff_columns = ['a#', 'c#']
instance.constraint_columns = ['a', 'c']
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
out = instance._transform(table_data)
# Assert
expected = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#': [np.log(4)] * 3,
'c#': [np.log(-2)] * 3,
})
pd.testing.assert_frame_equal(out, expected)
def test_reverse_transform_int_drop_high(self):
"""Test the ``GreaterThan.reverse_transform`` method for dtype int.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- add the low column
- convert the output to integers
- add back the dropped column
Setup:
- ``_drop`` is set to ``high``.
Input:
- Table with a diff column that contains the constant np.log(4).
Output:
- Same table with the high column replaced by the low one + 3, as int
and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, drop='high')
instance._dtype = [pd.Series([1]).dtype] # exact dtype (32 or 64) depends on OS
instance._diff_columns = ['a#b']
instance._columns_to_reconstruct = ['b']
# Run
transformed = pd.DataFrame({
'a': [1, 2, 3],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'c': [7, 8, 9],
'b': [4, 5, 6],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_float_drop_high(self):
"""Test the ``GreaterThan.reverse_transform`` method for dtype float.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- add the low column
- convert the output to float values
- add back the dropped column
Setup:
- ``_drop`` is set to ``high``.
Input:
- Table with a diff column that contains the constant np.log(4).
Output:
- Same table with the high column replaced by the low one + 3, as float values
and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, drop='high')
instance._dtype = [np.dtype('float')]
instance._diff_columns = ['a#b']
instance._columns_to_reconstruct = ['b']
# Run
transformed = pd.DataFrame({
'a': [1.1, 2.2, 3.3],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': [1.1, 2.2, 3.3],
'c': [7, 8, 9],
'b': [4.1, 5.2, 6.3],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_datetime_drop_high(self):
"""Test the ``GreaterThan.reverse_transform`` method for dtype datetime.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- convert the distance to a timedelta
- add the low column
- convert the output to datetimes
Setup:
- ``_drop`` is set to ``high``.
Input:
- Table with a diff column that contains the constant np.log(1_000_000_001).
Output:
- Same table with the high column replaced by the low one + one second
and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, drop='high')
instance._dtype = [np.dtype('<M8[ns]')]
instance._diff_columns = ['a#b']
instance._is_datetime = True
instance._columns_to_reconstruct = ['b']
# Run
transformed = pd.DataFrame({
'a': pd.to_datetime(['2020-01-01T00:00:00', '2020-01-02T00:00:00']),
'c': [1, 2],
'a#b': [np.log(1_000_000_001), np.log(1_000_000_001)],
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': pd.to_datetime(['2020-01-01T00:00:00', '2020-01-02T00:00:00']),
'c': [1, 2],
'b': pd.to_datetime(['2020-01-01T00:00:01', '2020-01-02T00:00:01'])
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_int_drop_low(self):
"""Test the ``GreaterThan.reverse_transform`` method for dtype int.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- subtract from the high column
- convert the output to integers
- add back the dropped column
Setup:
- ``_drop`` is set to ``low``.
Input:
- Table with a diff column that contains the constant np.log(4).
Output:
- Same table with the low column replaced by the high one - 3, as int
and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, drop='low')
instance._dtype = [pd.Series([1]).dtype] # exact dtype (32 or 64) depends on OS
instance._diff_columns = ['a#b']
instance._columns_to_reconstruct = ['a']
# Run
transformed = pd.DataFrame({
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'b': [4, 5, 6],
'c': [7, 8, 9],
'a': [1, 2, 3],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_datetime_drop_low(self):
"""Test the ``GreaterThan.reverse_transform`` method for dtype datetime.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- convert the distance to a timedelta
- subtract from the high column
- convert the output to datetimes
Setup:
- ``_drop`` is set to ``low``.
Input:
- Table with a diff column that contains the constant np.log(1_000_000_001).
Output:
- Same table with the low column replaced by the high one - one second
and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, drop='low')
instance._dtype = [np.dtype('<M8[ns]')]
instance._diff_columns = ['a#b']
instance._is_datetime = True
instance._columns_to_reconstruct = ['a']
# Run
transformed = pd.DataFrame({
'b': pd.to_datetime(['2020-01-01T00:00:01', '2020-01-02T00:00:01']),
'c': [1, 2],
'a#b': [np.log(1_000_000_001), np.log(1_000_000_001)],
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'b': pd.to_datetime(['2020-01-01T00:00:01', '2020-01-02T00:00:01']),
'c': [1, 2],
'a': pd.to_datetime(['2020-01-01T00:00:00', '2020-01-02T00:00:00'])
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_int_drop_none(self):
"""Test the ``GreaterThan.reverse_transform`` method for dtype int.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- add the low column when the row is invalid
- convert the output to integers
Setup:
- ``_drop`` is set to ``None``.
Input:
- Table with a diff column that contains the constant np.log(4).
The table should have one invalid row where the low column is
higher than the high column.
Output:
- Same table with the high column replaced by the low one + 3 for all
invalid rows, as int and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
instance._dtype = [pd.Series([1]).dtype] # exact dtype (32 or 64) depends on OS
instance._diff_columns = ['a#b']
instance._columns_to_reconstruct = ['b']
# Run
transformed = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 1, 6],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_datetime_drop_none(self):
"""Test the ``GreaterThan.reverse_transform`` method for dtype datetime.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- convert the distance to a timedelta
- add the low column when the row is invalid
- convert the output to datetimes
Setup:
- ``_drop`` is set to ``None``.
Input:
- Table with a diff column that contains the constant np.log(1_000_000_001).
The table should have one invalid row where the low column is
higher than the high column.
Output:
- Same table with the high column replaced by the low one + one second
for all invalid rows, and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
instance._dtype = [np.dtype('<M8[ns]')]
instance._diff_columns = ['a#b']
instance._is_datetime = True
instance._columns_to_reconstruct = ['b']
# Run
transformed = pd.DataFrame({
'a': pd.to_datetime(['2020-01-01T00:00:00', '2020-01-02T00:00:00']),
'b': pd.to_datetime(['2020-01-01T00:00:01', '2020-01-01T00:00:01']),
'c': [1, 2],
'a#b': [np.log(1_000_000_001), np.log(1_000_000_001)],
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': pd.to_datetime(['2020-01-01T00:00:00', '2020-01-02T00:00:00']),
'b': pd.to_datetime(['2020-01-01T00:00:01', '2020-01-02T00:00:01']),
'c': [1, 2]
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_low_is_scalar(self):
"""Test the ``GreaterThan.reverse_transform`` method with low as a scalar.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- add the low value when the row is invalid
- convert the output to integers
Setup:
- ``_drop`` is set to ``None``.
- ``_low`` is set to an int and ``_scalar`` is ``'low'``.
Input:
- Table with a diff column that contains the constant np.log(4).
The table should have one invalid row where the low value is
higher than the high column.
Output:
- Same table with the high column replaced by the low value + 3 for all
invalid rows, as int and the diff column dropped.
"""
# Setup
instance = GreaterThan(low=3, high='b', strict=True, scalar='low')
instance._dtype = [pd.Series([1]).dtype] # exact dtype (32 or 64) depends on OS
instance._diff_columns = ['a#b']
instance._columns_to_reconstruct = ['b']
# Run
transformed = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 1, 6],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 6, 6],
'c': [7, 8, 9],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_high_is_scalar(self):
"""Test the ``GreaterThan.reverse_transform`` method with high as a scalar.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- subtract from the high value when the row is invalid
- convert the output to integers
Setup:
- ``_drop`` is set to ``None``.
- ``_high`` is set to an int and ``_scalar`` is ``'high'``.
Input:
- Table with a diff column that contains the constant np.log(4).
The table should have one invalid row where the low column is
higher than the high value.
Output:
- Same table with the low column replaced by the high one - 3 for all
invalid rows, as int and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high=3, strict=True, scalar='high')
instance._dtype = [pd.Series([1]).dtype] # exact dtype (32 or 64) depends on OS
instance._diff_columns = ['a#b']
instance._columns_to_reconstruct = ['a']
# Run
transformed = pd.DataFrame({
'a': [1, 2, 4],
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 0],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_high_is_scalar_multi_column(self):
"""Test the ``GreaterThan.reverse_transform`` method with high as a scalar.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- subtract from the high value when the row is invalid
- convert the output to integers
Setup:
- ``_drop`` is set to ``None``.
- ``_high`` is set to an int and ``_scalar`` is ``'high'``.
- ``_low`` is set to multiple columns.
Input:
- Table with a diff column that contains the constant np.log(4)/np.log(5).
The table should have one invalid row where the low column is
higher than the high value.
Output:
- Same table with the low column replaced by the high one - 3/-4 for all
invalid rows, as int and the diff column dropped.
"""
# Setup
instance = GreaterThan(low=['a', 'b'], high=3, strict=True, scalar='high')
dtype = pd.Series([1]).dtype # exact dtype (32 or 64) depends on OS
instance._dtype = [dtype, dtype]
instance._diff_columns = ['a#', 'b#']
instance._columns_to_reconstruct = ['a', 'b']
# Run
transformed = pd.DataFrame({
'a': [1, 2, 4],
'b': [0, 5, 6],
'c': [7, 8, 9],
'a#': [np.log(4)] * 3,
'b#': [np.log(5)] * 3,
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': [1, 0, 0],
'b': [0, -1, -1],
'c': [7, 8, 9],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_low_is_scalar_multi_column(self):
"""Test the ``GreaterThan.reverse_transform`` method with low as a scalar.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- add the low value when the row is invalid
- convert the output to integers
Setup:
- ``_drop`` is set to ``None``.
- ``_low`` is set to an int and ``_scalar`` is ``'low'``.
- ``_high`` is set to multiple columns.
Input:
- Table with a diff column that contains the constant np.log(4)/np.log(5).
The table should have one invalid row where the low value is
higher than the high column.
Output:
- Same table with the high column replaced by the low value +3/+4 for all
invalid rows, as int and the diff column dropped.
"""
# Setup
instance = GreaterThan(low=3, high=['a', 'b'], strict=True, scalar='low')
dtype = pd.Series([1]).dtype # exact dtype (32 or 64) depends on OS
instance._dtype = [dtype, dtype]
instance._diff_columns = ['a#', 'b#']
instance._columns_to_reconstruct = ['a', 'b']
# Run
transformed = pd.DataFrame({
'a': [1, 2, 4],
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#': [np.log(4)] * 3,
'b#': [np.log(5)] * 3,
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': [6, 6, 4],
'b': [7, 7, 6],
'c': [7, 8, 9],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_scalar_is_none_multi_column(self):
"""Test the ``GreaterThan.reverse_transform`` method with low as a scalar.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- add the low value when the row is invalid
- convert the output to integers
Setup:
- ``_low`` = ['a', 'c'].
- ``_high`` = ['b'].
Input:
- Table with a diff column that contains the constant np.log(4)/np.log(-2).
The table should have one invalid row where the low value is
higher than the high column.
Output:
- Same table with the high column replaced by the low value +3/-4 for all
invalid rows, as int and the diff column dropped.
"""
# Setup
instance = GreaterThan(low=['a', 'c'], high=['b'], strict=True)
dtype = pd.Series([1]).dtype # exact dtype (32 or 64) depends on OS
instance._dtype = [dtype, dtype]
instance._diff_columns = ['a#', 'c#']
instance._columns_to_reconstruct = ['a', 'c']
# Run
transformed = pd.DataFrame({
'a': [1, 2, 4],
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#': [np.log(1)] * 3,
'c#': [np.log(1)] * 3,
})
out = instance.reverse_transform(transformed)
print(out)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 4],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_multi_column_positive(self):
"""Test the ``GreaterThan.reverse_transform`` method for positive constraint.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- subtract from the high value when the row is invalid
- convert the output to integers
Input:
- Table with given data.
Output:
- Same table with with replaced rows and dropped columns.
"""
# Setup
instance = GreaterThan(low=0, high=['a', 'b'], strict=True, scalar='low')
dtype = pd.Series([1]).dtype # exact dtype (32 or 64) depends on OS
instance._dtype = [dtype, dtype]
instance._diff_columns = ['a#', 'b#']
instance._columns_to_reconstruct = ['a', 'b']
# Run
transformed = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, -1],
'c': [7, 8, 9],
'a#': [np.log(2), np.log(3), np.log(4)],
'b#': [np.log(5), np.log(6), np.log(0)],
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 0],
'c': [7, 8, 9],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_multi_column_negative(self):
"""Test the ``GreaterThan.reverse_transform`` method for negative constraint.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- subtract from the high value when the row is invalid
- convert the output to integers
Input:
- Table with given data.
Output:
- Same table with with replaced rows and dropped columns.
"""
# Setup
instance = GreaterThan(low=['a', 'b'], high=0, strict=True, scalar='high')
dtype = pd.Series([1]).dtype # exact dtype (32 or 64) depends on OS
instance._dtype = [dtype, dtype]
instance._diff_columns = ['a#', 'b#']
instance._columns_to_reconstruct = ['a', 'b']
# Run
transformed = pd.DataFrame({
'a': [-1, -2, 1],
'b': [-4, -5, -1],
'c': [7, 8, 9],
'a#': [np.log(2), np.log(3), np.log(0)],
'b#': [np.log(5), np.log(6), np.log(2)],
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': [-1, -2, 0],
'b': [-4, -5, -1],
'c': [7, 8, 9],
})
pd.testing.assert_frame_equal(out, expected_out)
class TestPositive():
def test__init__(self):
"""
Test the ``Positive.__init__`` method.
The method is expected to set the ``_low`` instance variable
to 0, the ``_scalar`` variable to ``'low'``. The rest of the
parameters should be passed. Check that ``_drop`` is set to
``None`` when ``drop`` is ``False``.
Input:
- strict = True
- low = 'a'
- drop = False
Side effects:
- instance._low == 'a'
- instance._high == 0
- instance._strict == True
- instance._scalar == 'low'
- instance._drop = None
"""
# Run
instance = Positive(columns='a', strict=True, drop=False)
# Asserts
assert instance._low == 0
assert instance._high == ['a']
assert instance._strict is True
assert instance._scalar == 'low'
assert instance._drop is None
def test__init__drop_true(self):
"""
Test the ``Positive.__init__`` method with drop is ``True``.
Check that ``_drop`` is set to 'high' when ``drop`` is ``True``.
Input:
- strict = True
- low = 'a'
- drop = True
Side effects:
- instance._low == 'a'
- instance._high == 0
- instance._strict == True
- instance._scalar == 'low'
- instance._drop = 'high'
"""
# Run
instance = Positive(columns='a', strict=True, drop=True)
# Asserts
assert instance._low == 0
assert instance._high == ['a']
assert instance._strict is True
assert instance._scalar == 'low'
assert instance._drop == 'high'
class TestNegative():
def test__init__(self):
"""
Test the ``Negative.__init__`` method.
The method is expected to set the ``_high`` instance variable
to 0, the ``_scalar`` variable to ``'high'``. The rest of the
parameters should be passed. Check that ``_drop`` is set to
``None`` when ``drop`` is ``False``.
Input:
- strict = True
- low = 'a'
- drop = False
Side effects:
- instance._low == 'a'
- instance._high == 0
- instance._strict == True
- instance._scalar = 'high'
- instance._drop = None
"""
# Run
instance = Negative(columns='a', strict=True, drop=False)
# Asserts
assert instance._low == ['a']
assert instance._high == 0
assert instance._strict is True
assert instance._scalar == 'high'
assert instance._drop is None
def test__init__drop_true(self):
"""
Test the ``Negative.__init__`` method with drop is ``True``.
Check that ``_drop`` is set to 'low' when ``drop`` is ``True``.
Input:
- strict = True
- low = 'a'
- drop = True
Side effects:
- instance._low == 'a'
- instance._high == 0
- instance._strict == True
- instance._scalar = 'high'
- instance._drop = 'low'
"""
# Run
instance = Negative(columns='a', strict=True, drop=True)
# Asserts
assert instance._low == ['a']
assert instance._high == 0
assert instance._strict is True
assert instance._scalar == 'high'
assert instance._drop == 'low'
def new_column(data):
"""Formula to be used for the ``TestColumnFormula`` class."""
if data['a'] is None or data['b'] is None:
return None
return data['a'] + data['b']
class TestColumnFormula():
def test___init__(self):
"""Test the ``ColumnFormula.__init__`` method.
It is expected to create a new Constraint instance,
import the formula to use for the computation, and
set the specified constraint column.
Input:
- column = 'col'
- formula = new_column
"""
# Setup
column = 'col'
# Run
instance = ColumnFormula(column=column, formula=new_column)
# Assert
assert instance._column == column
assert instance._formula == new_column
assert instance.constraint_columns == ('col', )
def test___init__sets_rebuild_columns_if_not_reject_sampling(self):
"""Test the ``ColumnFormula.__init__`` method.
The rebuild columns should only be set if the ``handling_strategy``
is not ``reject_sampling``.
Side effects:
- instance.rebuild_columns are set
"""
# Setup
column = 'col'
# Run
instance = ColumnFormula(column=column, formula=new_column, handling_strategy='transform')
# Assert
assert instance.rebuild_columns == (column,)
def test___init__does_not_set_rebuild_columns_reject_sampling(self):
"""Test the ``ColumnFormula.__init__`` method.
The rebuild columns should not be set if the ``handling_strategy``
is ``reject_sampling``.
Side effects:
- instance.rebuild_columns are empty
"""
# Setup
column = 'col'
# Run
instance = ColumnFormula(column=column, formula=new_column,
handling_strategy='reject_sampling')
# Assert
assert instance.rebuild_columns == ()
def test_is_valid_valid(self):
"""Test the ``ColumnFormula.is_valid`` method for a valid data.
If the data fulfills the formula, result is a series of ``True`` values.
Input:
- Table data fulfilling the formula (pandas.DataFrame)
Output:
- Series of ``True`` values (pandas.Series)
"""
# Setup
column = 'c'
instance = ColumnFormula(column=column, formula=new_column)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [5, 7, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, True, True])
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_non_valid(self):
"""Test the ``ColumnFormula.is_valid`` method for a non-valid data.
If the data does not fulfill the formula, result is a series of ``False`` values.
Input:
- Table data not fulfilling the formula (pandas.DataFrame)
Output:
- Series of ``False`` values (pandas.Series)
"""
# Setup
column = 'c'
instance = ColumnFormula(column=column, formula=new_column)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [1, 2, 3]
})
instance = ColumnFormula(column=column, formula=new_column)
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([False, False, False])
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_with_nans(self):
"""Test the ``ColumnFormula.is_valid`` method for with a formula that produces nans.
If the data fulfills the formula, result is a series of ``True`` values.
Input:
- Table data fulfilling the formula (pandas.DataFrame)
Output:
- Series of ``True`` values (pandas.Series)
"""
# Setup
column = 'c'
instance = ColumnFormula(column=column, formula=new_column)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, None],
'c': [5, 7, None]
})
instance = ColumnFormula(column=column, formula=new_column)
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, True, True])
pd.testing.assert_series_equal(expected_out, out)
def test__transform(self):
"""Test the ``ColumnFormula._transform`` method.
It is expected to drop the indicated column from the table.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data without the indicated column (pandas.DataFrame)
"""
# Setup
column = 'c'
instance = ColumnFormula(column=column, formula=new_column)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [5, 7, 9]
})
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
})
pd.testing.assert_frame_equal(expected_out, out)
def test__transform_without_dropping_column(self):
"""Test the ``ColumnFormula._transform`` method without dropping the column.
If `drop_column` is false, expect to not drop the constraint column.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data with the indicated column (pandas.DataFrame)
"""
# Setup
column = 'c'
instance = ColumnFormula(column=column, formula=new_column, drop_column=False)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [5, 7, 9]
})
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [5, 7, 9]
})
pd.testing.assert_frame_equal(expected_out, out)
def test__transform_missing_column(self):
"""Test the ``ColumnFormula._transform`` method when the constraint column is missing.
When ``_transform`` is called with data that does not contain the constraint column,
expect to return the data as-is.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data, unchanged (pandas.DataFrame)
"""
# Setup
column = 'c'
instance = ColumnFormula(column=column, formula=new_column)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'd': [5, 7, 9]
})
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'd': [5, 7, 9]
})
pd.testing.assert_frame_equal(expected_out, out)
def test_reverse_transform(self):
"""Test the ``ColumnFormula.reverse_transform`` method.
It is expected to compute the indicated column by applying the given formula.
Input:
- Table data with the column with incorrect values (pandas.DataFrame)
Output:
- Table data with the computed column (pandas.DataFrame)
"""
# Setup
column = 'c'
instance = ColumnFormula(column=column, formula=new_column)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [1, 1, 1]
})
out = instance.reverse_transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [5, 7, 9]
})
pd.testing.assert_frame_equal(expected_out, out)
class TestRounding():
def test___init__(self):
"""Test the ``Rounding.__init__`` method.
It is expected to create a new Constraint instance
and set the rounding args.
Input:
- columns = ['b', 'c']
- digits = 2
"""
# Setup
columns = ['b', 'c']
digits = 2
# Run
instance = Rounding(columns=columns, digits=digits)
# Assert
assert instance._columns == columns
assert instance._digits == digits
def test___init__invalid_digits(self):
"""Test the ``Rounding.__init__`` method with an invalid argument.
Pass in an invalid ``digits`` argument, and expect a ValueError.
Input:
- columns = ['b', 'c']
- digits = 20
"""
# Setup
columns = ['b', 'c']
digits = 20
# Run
with pytest.raises(ValueError):
Rounding(columns=columns, digits=digits)
def test___init__invalid_tolerance(self):
"""Test the ``Rounding.__init__`` method with an invalid argument.
Pass in an invalid ``tolerance`` argument, and expect a ValueError.
Input:
- columns = ['b', 'c']
- digits = 2
- tolerance = 0.1
"""
# Setup
columns = ['b', 'c']
digits = 2
tolerance = 0.1
# Run
with pytest.raises(ValueError):
Rounding(columns=columns, digits=digits, tolerance=tolerance)
def test_is_valid_positive_digits(self):
"""Test the ``Rounding.is_valid`` method for a positive digits argument.
Input:
- Table data with desired decimal places (pandas.DataFrame)
Output:
- Series of ``True`` values (pandas.Series)
"""
# Setup
columns = ['b', 'c']
digits = 2
tolerance = 1e-3
instance = Rounding(columns=columns, digits=digits, tolerance=tolerance)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3, 4, 5],
'b': [4.12, 5.51, None, 6.941, 1.129],
'c': [5.315, 7.12, 1.12, 9.131, 12.329],
'd': ['a', 'b', 'd', 'e', None],
'e': [123.31598, -1.12001, 1.12453, 8.12129, 1.32923]
})
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([False, True, False, True, True])
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_negative_digits(self):
"""Test the ``Rounding.is_valid`` method for a negative digits argument.
Input:
- Table data with desired decimal places (pandas.DataFrame)
Output:
- Series of ``True`` values (pandas.Series)
"""
# Setup
columns = ['b']
digits = -2
tolerance = 1
instance = Rounding(columns=columns, digits=digits, tolerance=tolerance)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3, 4, 5],
'b': [401, 500, 6921, 799, None],
'c': [5.3134, 7.1212, 9.1209, 101.1234, None],
'd': ['a', 'b', 'd', 'e', 'f']
})
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, True, False, True, False])
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_zero_digits(self):
"""Test the ``Rounding.is_valid`` method for a zero digits argument.
Input:
- Table data not with the desired decimal places (pandas.DataFrame)
Output:
- Series of ``False`` values (pandas.Series)
"""
# Setup
columns = ['b', 'c']
digits = 0
tolerance = 1e-4
instance = Rounding(columns=columns, digits=digits, tolerance=tolerance)
# Run
table_data = pd.DataFrame({
'a': [1, 2, None, 3, 4],
'b': [4, 5.5, 1.2, 6.0001, 5.99999],
'c': [5, 7.12, 1.31, 9.00001, 4.9999],
'd': ['a', 'b', None, 'd', 'e'],
'e': [2.1254, 17.12123, 124.12, 123.0112, -9.129434]
})
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, False, False, True, True])
pd.testing.assert_series_equal(expected_out, out)
def test_reverse_transform_positive_digits(self):
"""Test the ``Rounding.reverse_transform`` method with positive digits.
Expect that the columns are rounded to the specified integer digit.
Input:
- Table data with the column with incorrect values (pandas.DataFrame)
Output:
- Table data with the computed column (pandas.DataFrame)
"""
# Setup
columns = ['b', 'c']
digits = 3
instance = Rounding(columns=columns, digits=digits)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3, None, 4],
'b': [4.12345, None, 5.100, 6.0001, 1.7999],
'c': [1.1, 1.234, 9.13459, 4.3248, 6.1312],
'd': ['a', 'b', 'd', 'e', None]
})
out = instance.reverse_transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3, None, 4],
'b': [4.123, None, 5.100, 6.000, 1.800],
'c': [1.100, 1.234, 9.135, 4.325, 6.131],
'd': ['a', 'b', 'd', 'e', None]
})
pd.testing.assert_frame_equal(expected_out, out)
def test_reverse_transform_negative_digits(self):
"""Test the ``Rounding.reverse_transform`` method with negative digits.
Expect that the columns are rounded to the specified integer digit.
Input:
- Table data with the column with incorrect values (pandas.DataFrame)
Output:
- Table data with the computed column (pandas.DataFrame)
"""
# Setup
columns = ['b']
digits = -3
instance = Rounding(columns=columns, digits=digits)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3, 4, 5],
'b': [41234.5, None, 5000, 6001, 5928],
'c': [1.1, 1.23423, 9.13459, 12.12125, 18.12152],
'd': ['a', 'b', 'd', 'e', 'f']
})
out = instance.reverse_transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3, 4, 5],
'b': [41000.0, None, 5000.0, 6000.0, 6000.0],
'c': [1.1, 1.23423, 9.13459, 12.12125, 18.12152],
'd': ['a', 'b', 'd', 'e', 'f']
})
pd.testing.assert_frame_equal(expected_out, out)
def test_reverse_transform_zero_digits(self):
"""Test the ``Rounding.reverse_transform`` method with zero digits.
Expect that the columns are rounded to the specified integer digit.
Input:
- Table data with the column with incorrect values (pandas.DataFrame)
Output:
- Table data with the computed column (pandas.DataFrame)
"""
# Setup
columns = ['b', 'c']
digits = 0
instance = Rounding(columns=columns, digits=digits)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3, 4, 5],
'b': [4.12345, None, 5.0, 6.01, 7.9],
'c': [1.1, 1.0, 9.13459, None, 8.89],
'd': ['a', 'b', 'd', 'e', 'f']
})
out = instance.reverse_transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3, 4, 5],
'b': [4.0, None, 5.0, 6.0, 8.0],
'c': [1.0, 1.0, 9.0, None, 9.0],
'd': ['a', 'b', 'd', 'e', 'f']
})
pd.testing.assert_frame_equal(expected_out, out)
def transform(data, low, high):
"""Transform to be used for the TestBetween class."""
data = (data - low) / (high - low) * 0.95 + 0.025
return np.log(data / (1.0 - data))
class TestBetween():
def test___init__sets_rebuild_columns_if_not_reject_sampling(self):
"""Test the ``Between.__init__`` method.
The rebuild columns should only be set if the ``handling_strategy``
is not ``reject_sampling``.
Side effects:
- instance.rebuild_columns are set
"""
# Setup
column = 'col'
# Run
instance = Between(column=column, low=10, high=20, handling_strategy='transform')
# Assert
assert instance.rebuild_columns == (column,)
def test___init__does_not_set_rebuild_columns_reject_sampling(self):
"""Test the ``Between.__init__`` method.
The rebuild columns should not be set if the ``handling_strategy``
is ``reject_sampling``.
Side effects:
- instance.rebuild_columns are empty
"""
# Setup
column = 'col'
# Run
instance = Between(column=column, low=10, high=20, handling_strategy='reject_sampling')
# Assert
assert instance.rebuild_columns == ()
def test_fit_only_one_datetime_arg(self):
"""Test the ``Between.fit`` method by passing in only one arg as datetime.
If only one of the bound parameters is a datetime type, expect a ValueError.
Input:
- low is an int scalar
- high is a datetime
Output:
- n/a
Side Effects:
- ValueError
"""
# Setup
column = 'a'
low = 0.0
high = pd.to_datetime('2021-01-01')
instance = Between(column=column, low=low, high=high)
# Run and assert
table_data = pd.DataFrame({
'a': [0.1, 0.5, 0.9],
'b': [4, 5, 6],
})
with pytest.raises(ValueError):
instance.fit(table_data)
def test_transform_scalar_scalar(self):
"""Test the ``Between.transform`` method by passing ``low`` and ``high`` as scalars.
It is expected to create a new column similar to the constraint ``column``, and then
scale and apply a logit function to that column.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data with an extra column containing the transformed ``column`` (pandas.DataFrame)
"""
# Setup
column = 'a'
low = 0.0
high = 1.0
instance = Between(column=column, low=low, high=high, high_is_scalar=True,
low_is_scalar=True)
# Run
table_data = pd.DataFrame({
'a': [0.1, 0.5, 0.9],
'b': [4, 5, 6],
})
instance.fit(table_data)
out = instance.transform(table_data)
# Assert
expected_out = pd.DataFrame({
'b': [4, 5, 6],
'a#0.0#1.0': transform(table_data[column], low, high)
})
pd.testing.assert_frame_equal(expected_out, out)
def test__transform_scalar_column(self):
"""Test the ``Between._transform`` method with ``low`` as scalar and ``high`` as a column.
It is expected to create a new column similar to the constraint ``column``, and then
scale and apply a logit function to that column.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data with an extra column containing the transformed ``column`` (pandas.DataFrame)
"""
# Setup
column = 'a'
low = 0.0
high = 'b'
instance = Between(column=column, low=low, high=high, low_is_scalar=True)
# Run
table_data = pd.DataFrame({
'a': [0.1, 0.5, 0.9],
'b': [0.5, 1, 6],
})
instance.fit(table_data)
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'b': [0.5, 1, 6],
'a#0.0#b': transform(table_data[column], low, table_data[high])
})
pd.testing.assert_frame_equal(expected_out, out)
def test__transform_column_scalar(self):
"""Test the ``Between._transform`` method with ``low`` as a column and ``high`` as scalar.
It is expected to create a new column similar to the constraint ``column``, and then
scale and apply a logit function to that column.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data with an extra column containing the transformed ``column`` (pandas.DataFrame)
"""
# Setup
column = 'a'
low = 'b'
high = 1.0
instance = Between(column=column, low=low, high=high, high_is_scalar=True)
# Run
table_data = pd.DataFrame({
'a': [0.1, 0.5, 0.9],
'b': [0, -1, 0.5],
})
instance.fit(table_data)
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'b': [0, -1, 0.5],
'a#b#1.0': transform(table_data[column], table_data[low], high)
})
pd.testing.assert_frame_equal(expected_out, out)
def test__transform_column_column(self):
"""Test the ``Between._transform`` method by passing ``low`` and ``high`` as columns.
It is expected to create a new column similar to the constraint ``column``, and then
scale and apply a logit function to that column.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data with an extra column containing the transformed ``column`` (pandas.DataFrame)
"""
# Setup
column = 'a'
low = 'b'
high = 'c'
instance = Between(column=column, low=low, high=high)
# Run
table_data = pd.DataFrame({
'a': [0.1, 0.5, 0.9],
'b': [0, -1, 0.5],
'c': [0.5, 1, 6]
})
instance.fit(table_data)
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'b': [0, -1, 0.5],
'c': [0.5, 1, 6],
'a#b#c': transform(table_data[column], table_data[low], table_data[high])
})
pd.testing.assert_frame_equal(expected_out, out)
def test__transform_datetime_datetime(self):
"""Test the ``Between._transform`` method by passing ``low`` and ``high`` as datetimes.
It is expected to create a new column similar to the constraint ``column``, and then
scale and apply a logit function to that column.
Input:
- Table data (pandas.DataFrame)
- High and Low as datetimes
Output:
- Table data with an extra column containing the transformed ``column`` (pandas.DataFrame)
"""
# Setup
column = 'a'
low = pd.to_datetime('1900-01-01')
high = pd.to_datetime('2021-01-01')
instance = Between(column=column, low=low, high=high, high_is_scalar=True,
low_is_scalar=True)
# Run
table_data = pd.DataFrame({
'a': [
pd.to_datetime('2020-09-03'),
pd.to_datetime('2020-08-01'),
pd.to_datetime('2020-08-03'),
],
'b': [4, 5, 6],
})
instance.fit(table_data)
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'b': [4, 5, 6],
'a#1900-01-01T00:00:00.000000000#2021-01-01T00:00:00.000000000': transform(
table_data[column], low, high)
})
pd.testing.assert_frame_equal(expected_out, out)
def test__transform_datetime_column(self):
"""Test the ``Between._transform`` method with ``low`` as datetime and ``high`` as a column.
It is expected to create a new column similar to the constraint ``column``, and then
scale and apply a logit function to that column.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data with an extra column containing the transformed ``column`` (pandas.DataFrame)
"""
# Setup
column = 'a'
low = pd.to_datetime('1900-01-01')
high = 'b'
instance = Between(column=column, low=low, high=high, low_is_scalar=True)
# Run
table_data = pd.DataFrame({
'a': [
pd.to_datetime('2020-09-03'),
pd.to_datetime('2020-08-01'),
pd.to_datetime('2020-08-03'),
],
'b': [
pd.to_datetime('2020-10-03'),
pd.to_datetime('2020-11-01'),
pd.to_datetime('2020-11-03'),
],
})
instance.fit(table_data)
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'b': [
pd.to_datetime('2020-10-03'),
pd.to_datetime('2020-11-01'),
pd.to_datetime('2020-11-03'),
],
'a#1900-01-01T00:00:00.000000000#b': transform(
table_data[column], low, table_data[high])
})
pd.testing.assert_frame_equal(expected_out, out)
def test__transform_column_datetime(self):
"""Test the ``Between._transform`` method with ``low`` as a column and ``high`` as datetime.
It is expected to create a new column similar to the constraint ``column``, and then
scale and apply a logit function to that column.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data with an extra column containing the transformed ``column`` (pandas.DataFrame)
"""
# Setup
column = 'a'
low = 'b'
high = pd.to_datetime('2021-01-01')
instance = Between(column=column, low=low, high=high, high_is_scalar=True)
# Run
table_data = pd.DataFrame({
'a': [
pd.to_datetime('2020-09-03'),
pd.to_datetime('2020-08-01'),
pd.to_datetime('2020-08-03'),
],
'b': [
pd.to_datetime('2020-01-03'),
pd.to_datetime('2020-02-01'),
pd.to_datetime('2020-02-03'),
],
})
instance.fit(table_data)
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'b': [
pd.to_datetime('2020-01-03'),
pd.to_datetime('2020-02-01'),
pd.to_datetime('2020-02-03'),
],
'a#b#2021-01-01T00:00:00.000000000': transform(
table_data[column], table_data[low], high)
})
pd.testing.assert_frame_equal(expected_out, out)
def test__transform_column_column_datetime(self):
"""Test the ``Between._transform`` method with ``low`` and ``high`` as datetime columns.
It is expected to create a new column similar to the constraint ``column``, and then
scale and apply a logit function to that column.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data with an extra column containing the transformed ``column`` (pandas.DataFrame)
"""
# Setup
column = 'a'
low = 'b'
high = 'c'
instance = Between(column=column, low=low, high=high)
# Run
table_data = pd.DataFrame({
'a': [
pd.to_datetime('2020-09-03'),
pd.to_datetime('2020-08-01'),
pd.to_datetime('2020-08-03'),
],
'b': [
pd.to_datetime('2020-01-03'),
pd.to_datetime('2020-02-01'),
pd.to_datetime('2020-02-03'),
],
'c': [
pd.to_datetime('2020-10-03'),
pd.to_datetime('2020-11-01'),
pd.to_datetime('2020-11-03'),
]
})
instance.fit(table_data)
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'b': [
pd.to_datetime('2020-01-03'),
pd.to_datetime('2020-02-01'),
pd.to_datetime('2020-02-03'),
],
'c': [
pd.to_datetime('2020-10-03'),
pd.to_datetime('2020-11-01'),
pd.to_datetime('2020-11-03'),
],
'a#b#c': transform(table_data[column], table_data[low], table_data[high])
})
pd.testing.assert_frame_equal(expected_out, out)
def test_reverse_transform_scalar_scalar(self):
"""Test ``Between.reverse_transform`` with ``low`` and ``high`` as scalars.
It is expected to recover the original table which was transformed, but with different
column order. It does so by applying a sigmoid to the transformed column and then
scaling it back to the original space. It also replaces the transformed column with
an equal column but with the original name.
Input:
- Transformed table data (pandas.DataFrame)
Output:
- Original table data, without necessarily keepying the column order (pandas.DataFrame)
"""
# Setup
column = 'a'
low = 0.0
high = 1.0
instance = Between(column=column, low=low, high=high, high_is_scalar=True,
low_is_scalar=True)
table_data = pd.DataFrame({
'b': [4, 5, 6],
'a': [0.1, 0.5, 0.9]
})
# Run
instance.fit(table_data)
transformed = pd.DataFrame({
'b': [4, 5, 6],
'a#0.0#1.0': transform(table_data[column], low, high)
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = table_data
pd.testing.assert_frame_equal(expected_out, out)
def test_reverse_transform_scalar_column(self):
"""Test ``Between.reverse_transform`` with ``low`` as scalar and ``high`` as a column.
It is expected to recover the original table which was transformed, but with different
column order. It does so by applying a sigmoid to the transformed column and then
scaling it back to the original space. It also replaces the transformed column with
an equal column but with the original name.
Input:
- Transformed table data (pandas.DataFrame)
Output:
- Original table data, without necessarily keepying the column order (pandas.DataFrame)
"""
# Setup
column = 'a'
low = 0.0
high = 'b'
instance = Between(column=column, low=low, high=high, low_is_scalar=True)
table_data = pd.DataFrame({
'b': [0.5, 1, 6],
'a': [0.1, 0.5, 0.9]
})
# Run
instance.fit(table_data)
transformed = pd.DataFrame({
'b': [0.5, 1, 6],
'a#0.0#b': transform(table_data[column], low, table_data[high])
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = table_data
| pd.testing.assert_frame_equal(expected_out, out) | pandas.testing.assert_frame_equal |
from functools import reduce
from typing import Tuple, Dict, Any
import pandas as pd
import streamlit as st
import numpy as np
import altair as alt
hide_menu_style = """
<style>
#MainMenu {visibility: hidden;}
</style>
"""
st.markdown(hide_menu_style, unsafe_allow_html=True)
delaware = 564696
chester = 519293
montgomery = 826075
bucks = 628341
philly = 1581000
S_default = delaware + chester + montgomery + bucks + philly
known_infections = 91 # update daily
known_cases = 4 # update daily
# Widgets
current_hosp = st.sidebar.number_input(
"Currently Hospitalized COVID-19 Patients", value=known_cases, step=1, format="%i"
)
doubling_time = st.sidebar.number_input(
"Doubling time before social distancing (days)", value=6, step=1, format="%i"
)
relative_contact_rate = st.sidebar.number_input(
"Social distancing (% reduction in social contact)", 0, 100, value=0, step=5, format="%i"
)/100.0
hosp_rate = (
st.sidebar.number_input("Hospitalization %(total infections)", 0.0, 100.0, value=5.0, step=1.0, format="%f")
/ 100.0
)
icu_rate = (
st.sidebar.number_input("ICU %(total infections)", 0.0, 100.0, value=2.0, step=1.0, format="%f") / 100.0
)
vent_rate = (
st.sidebar.number_input("Ventilated %(total infections)", 0.0, 100.0, value=1.0, step=1.0, format="%f")
/ 100.0
)
hosp_los = st.sidebar.number_input("Hospital Length of Stay", value=7, step=1, format="%i")
icu_los = st.sidebar.number_input("ICU Length of Stay", value=9, step=1, format="%i")
vent_los = st.sidebar.number_input("Vent Length of Stay", value=10, step=1, format="%i")
Penn_market_share = (
st.sidebar.number_input(
"Hospital Market Share (%)", 0.0, 100.0, value=15.0, step=1.0, format="%f"
)
/ 100.0
)
S = st.sidebar.number_input(
"Regional Population", value=S_default, step=100000, format="%i"
)
initial_infections = st.sidebar.number_input(
"Currently Known Regional Infections (only used to compute detection rate - does not change projections)", value=known_infections, step=10, format="%i"
)
total_infections = current_hosp / Penn_market_share / hosp_rate
detection_prob = initial_infections / total_infections
S, I, R = S, initial_infections / detection_prob, 0
intrinsic_growth_rate = 2 ** (1 / doubling_time) - 1
recovery_days = 14.0
# mean recovery rate, gamma, (in 1/days).
gamma = 1 / recovery_days
# Contact rate, beta
beta = (
intrinsic_growth_rate + gamma
) / S * (1-relative_contact_rate) # {rate based on doubling time} / {initial S}
r_t = beta / gamma * S # r_t is r_0 after distancing
r_naught = r_t / (1-relative_contact_rate)
doubling_time_t = 1/np.log2(beta*S - gamma +1) # doubling time after distancing
def head():
st.markdown("""
<link rel="stylesheet" href="https://www1.pennmedicine.org/styles/shared/penn-medicine-header.css">
<div class="penn-medicine-header__content">
<a href="https://www.pennmedicine.org" class="penn-medicine-header__logo"
title="Go to the Penn Medicine home page">Penn Medicine</a>
<a id="title" class="penn-medicine-header__title">Penn Medicine - COVID-19 Hospital Impact Model for Epidemics</a>
</div>
""", unsafe_allow_html=True)
st.markdown(
"""*This tool was developed by the [Predictive Healthcare team](http://predictivehealthcare.pennmedicine.org/) at
Penn Medicine. For questions and comments please see our
[contact page](http://predictivehealthcare.pennmedicine.org/contact/). Code can be found on [Github](https://github.com/pennsignals/chime).
Join our [Slack channel](https://codeforphilly.org/chat?channel=covid19-chime-penn) if you would like to get involved!*""")
st.markdown(
"""The estimated number of currently infected individuals is **{total_infections:.0f}**. The **{initial_infections}**
confirmed cases in the region imply a **{detection_prob:.0%}** rate of detection. This is based on current inputs for
Hospitalizations (**{current_hosp}**), Hospitalization rate (**{hosp_rate:.0%}**), Region size (**{S}**),
and Hospital market share (**{Penn_market_share:.0%}**).
An initial doubling time of **{doubling_time}** days and a recovery time of **{recovery_days}** days imply an $R_0$ of
**{r_naught:.2f}**.
**Mitigation**: A **{relative_contact_rate:.0%}** reduction in social contact after the onset of the outbreak reduces the doubling time to **{doubling_time_t:.1f}** days, implying an effective $R_t$ of **${r_t:.2f}$**.
""".format(
total_infections=total_infections,
initial_infections=initial_infections,
detection_prob=detection_prob,
current_hosp=current_hosp,
hosp_rate=hosp_rate,
S=S,
Penn_market_share=Penn_market_share,
recovery_days=recovery_days,
r_naught=r_naught,
doubling_time=doubling_time,
relative_contact_rate=relative_contact_rate,
r_t=r_t,
doubling_time_t=doubling_time_t
)
)
return None
head()
def show_more_info_about_this_tool():
"""a lot of streamlit writing to screen."""
st.subheader(
"[Discrete-time SIR modeling](https://mathworld.wolfram.com/SIRModel.html) of infections/recovery"
)
st.markdown(
"""The model consists of individuals who are either _Susceptible_ ($S$), _Infected_ ($I$), or _Recovered_ ($R$).
The epidemic proceeds via a growth and decline process. This is the core model of infectious disease spread and has been in use in epidemiology for many years."""
)
st.markdown("""The dynamics are given by the following 3 equations.""")
st.latex("S_{t+1} = (-\\beta S_t I_t) + S_t")
st.latex("I_{t+1} = (\\beta S_t I_t - \\gamma I_t) + I_t")
st.latex("R_{t+1} = (\\gamma I_t) + R_t")
st.markdown(
"""To project the expected impact to Penn Medicine, we estimate the terms of the model.
To do this, we use a combination of estimates from other locations, informed estimates based on logical reasoning, and best guesses from the American Hospital Association.
### Parameters
The model's parameters, $\\beta$ and $\\gamma$, determine the virulence of the epidemic.
$$\\beta$$ can be interpreted as the _effective contact rate_:
""")
st.latex("\\beta = \\tau \\times c")
st.markdown(
"""which is the transmissibility ($\\tau$) multiplied by the average number of people exposed ($$c$$). The transmissibility is the basic virulence of the pathogen. The number of people exposed $c$ is the parameter that can be changed through social distancing.
$\\gamma$ is the inverse of the mean recovery time, in days. I.e.: if $\\gamma = 1/{recovery_days}$, then the average infection will clear in {recovery_days} days.
An important descriptive parameter is the _basic reproduction number_, or $R_0$. This represents the average number of people who will be infected by any given infected person. When $R_0$ is greater than 1, it means that a disease will grow. Higher $R_0$'s imply more rapid growth. It is defined as """.format(recovery_days=int(recovery_days) , c='c'))
st.latex("R_0 = \\beta /\\gamma")
st.markdown("""
$R_0$ gets bigger when
- there are more contacts between people
- when the pathogen is more virulent
- when people have the pathogen for longer periods of time
A doubling time of {doubling_time} days and a recovery time of {recovery_days} days imply an $R_0$ of {r_naught:.2f}.
#### Effect of social distancing
After the beginning of the outbreak, actions to reduce social contact will lower the parameter $c$. If this happens at
time $t$, then the number of people infected by any given infected person is $R_t$, which will be lower than $R_0$.
A {relative_contact_rate:.0%} reduction in social contact would increase the time it takes for the outbreak to double,
to {doubling_time_t:.2f} days from {doubling_time:.2f} days, with a $R_t$ of {r_t:.2f}.
#### Using the model
We need to express the two parameters $\\beta$ and $\\gamma$ in terms of quantities we can estimate.
- $\\gamma$: the CDC is recommending 14 days of self-quarantine, we'll use $\\gamma = 1/{recovery_days}$.
- To estimate $$\\beta$$ directly, we'd need to know transmissibility and social contact rates. since we don't know these things, we can extract it from known _doubling times_. The AHA says to expect a doubling time $T_d$ of 7-10 days. That means an early-phase rate of growth can be computed by using the doubling time formula:
""".format(doubling_time=doubling_time,
recovery_days=recovery_days,
r_naught=r_naught,
relative_contact_rate=relative_contact_rate,
doubling_time_t=doubling_time_t,
r_t=r_t)
)
st.latex("g = 2^{1/T_d} - 1")
st.markdown(
"""
- Since the rate of new infections in the SIR model is $g = \\beta S - \\gamma$, and we've already computed $\\gamma$, $\\beta$ becomes a function of the initial population size of susceptible individuals.
$$\\beta = (g + \\gamma)$$.
### Initial Conditions
- The total size of the susceptible population will be the entire catchment area for Penn Medicine entities (HUP, PAH, PMC, CCH)
- Delaware = {delaware}
- Chester = {chester}
- Montgomery = {montgomery}
- Bucks = {bucks}
- Philly = {philly}""".format(
delaware=delaware,
chester=chester,
montgomery=montgomery,
bucks=bucks,
philly=philly,
)
)
return None
if st.checkbox("Show more info about this tool"):
show_more_info_about_this_tool()
# The SIR model, one time step
def sir(y, beta, gamma, N):
S, I, R = y
Sn = (-beta * S * I) + S
In = (beta * S * I - gamma * I) + I
Rn = gamma * I + R
if Sn < 0:
Sn = 0
if In < 0:
In = 0
if Rn < 0:
Rn = 0
scale = N / (Sn + In + Rn)
return Sn * scale, In * scale, Rn * scale
# Run the SIR model forward in time
def sim_sir(S, I, R, beta, gamma, n_days, beta_decay=None):
N = S + I + R
s, i, r = [S], [I], [R]
for day in range(n_days):
y = S, I, R
S, I, R = sir(y, beta, gamma, N)
if beta_decay:
beta = beta * (1 - beta_decay)
s.append(S)
i.append(I)
r.append(R)
s, i, r = np.array(s), np.array(i), np.array(r)
return s, i, r
n_days = st.slider("Number of days to project", 30, 200, 60, 1, "%i")
beta_decay = 0.0
s, i, r = sim_sir(S, I, R, beta, gamma, n_days, beta_decay=beta_decay)
hosp = i * hosp_rate * Penn_market_share
icu = i * icu_rate * Penn_market_share
vent = i * vent_rate * Penn_market_share
days = np.array(range(0, n_days + 1))
data_list = [days, hosp, icu, vent]
data_dict = dict(zip(["day", "hosp", "icu", "vent"], data_list))
projection = | pd.DataFrame.from_dict(data_dict) | pandas.DataFrame.from_dict |
from collections import defaultdict
import re
import click
import numpy as np
import pandas as pd
import vcfpy
import hgvs.parser
from hgvs.sequencevariant import SequenceVariant
from tqdm import tqdm
from pathlib import Path
from pyfaidx import Fasta
from pathos.pools import _ProcessPool as Pool
from .protein_variant_predictor import ProteinVariantPredictor
from .utils import GenomePosition, GenomeIntervalTree, GFFFeature, \
vcf_alt_affected_range, sequence_variants_are_equivalent
class AminoAcidMutationFinder():
def __init__(self, cosmic_df, annotation_df, genome_faidx):
if cosmic_df is not None:
filtered_cosmic_df = self._make_filtered_cosmic_df(cosmic_df)
self._cosmic_genome_tree = GenomeIntervalTree(
lambda row: GenomePosition.from_str(
str(row["Mutation genome position"])),
(row for _, row in filtered_cosmic_df.iterrows()))
else:
self._cosmic_genome_tree = None
self._annotation_genome_tree = GenomeIntervalTree(
lambda feat: feat.pos,
(GFFFeature(row) for _, row in annotation_df.iterrows()))
self._protein_variant_predictor = ProteinVariantPredictor(
self._annotation_genome_tree, genome_faidx)
@classmethod
def _make_filtered_cosmic_df(cls, cosmic_df):
"""Return a view of the input `cosmic_df` filtered for
`Primary site` == `lung`."""
filtered_db = cosmic_df.loc[cosmic_df["Primary site"] == "lung"]
return filtered_db
hgvs_parser = hgvs.parser.Parser(expose_all_rules=True)
mutation_aa_silent_sub_fix_pattern = re.compile(
r"(p\.)([A-Z](?:[a-z]{2})?)(\d+)\2")
def _get_cosmic_record_protein_variant(self, cosmic_record):
mutation_aa = cosmic_record["Mutation AA"]
transcript_accession = cosmic_record["Accession Number"]
for tx_id, tx_record in self._protein_variant_predictor \
.transcript_records.items():
if tx_id.split('.')[0] == transcript_accession:
transcript_record = tx_record
break
else:
# If we can't find the transcript record, it probably is not
# protein-coding.
print(f"[{transcript_accession} -> N/A] {mutation_aa}")
return None
protein_accession = transcript_record.feat.attributes["protein_id"]
# COSMIC incorrectly reports silent substitutions as `X{pos}X`, rather
# than `X{pos}=`.
mutation_aa = self.mutation_aa_silent_sub_fix_pattern.sub(
r"\1\2\3=", mutation_aa)
# COSMIC mutation CDS strings have uncertainty variants which are not HGVS-compliant.
# mutation_cds = self.mutation_cds_uncertain_ins_del_fix_pattern.sub(r"\1(\2)\3(\4)", mutation_cds)
cosmic_protein_posedit = (
self.hgvs_parser # pylint: disable=no-member
.parse_p_typed_posedit(mutation_aa)).posedit
cosmic_protein_variant = SequenceVariant(
ac=protein_accession, type='p', posedit=cosmic_protein_posedit)
return cosmic_protein_variant
def _make_mutation_counts_df(self, cell_genemuts_pairs):
"""Transform a list of tuples in the form
[(cell_name, {gene_name: {aa_mutation}})] into a `DataFrame`."""
cell_names, cell_gene_muts = list(zip(*cell_genemuts_pairs))
cell_names, cell_gene_muts = list(cell_names), list(cell_gene_muts)
# Aggregate all of the gene names into a 2D array
cell_gene_names = [
list(gene_muts.keys()) for gene_muts in cell_gene_muts
]
# Flatten them into a set
all_gene_names = set([
gene_name for sublist in cell_gene_names for gene_name in sublist
])
mutation_data = {}
for gene_name in all_gene_names:
aa_mutations = [list() for _ in range(len(cell_genemuts_pairs))]
for idx, gene_muts in enumerate(cell_gene_muts):
aa_mutations[idx] = list(gene_muts[gene_name])
mutation_data[gene_name] = aa_mutations
# TODO: cell_names index?
return pd.DataFrame(index=cell_names, data=mutation_data)
def find_cell_gene_aa_mutations(self, stream=None, path=None):
"""Create a `dict` mapping gene names to amino acid-level mutations
found in that gene (filtered by COSMIC).
Accepts a `stream` or a `path` as input."""
vcf_reader = vcfpy.Reader.from_stream(
stream) if stream is not None else vcfpy.Reader.from_path(path)
gene_aa_mutations = defaultdict(set)
for record in vcf_reader:
record_pos = GenomePosition.from_vcf_record(record)
# TODO: maybe clean this up into a method
protein_variant_results = self._protein_variant_predictor \
.predict_for_vcf_record(record)
if not protein_variant_results:
continue
target_variants = None
if self._cosmic_genome_tree:
overlaps = self._cosmic_genome_tree.get_all_overlaps(
record_pos)
target_variants = (
self._get_cosmic_record_protein_variant(overlap)
for overlap in overlaps)
# Filter out variants which could not be correctly obtained for
# some reason.
target_variants = [
variant for variant in target_variants if variant
]
if not target_variants:
continue
for result in protein_variant_results:
predicted_variant = result.predicted_variant
if target_variants is not None:
for target_variant in target_variants:
# `strict_silent` is enabled because silent mutations
# have effects which are not discernable at the protein
# level and could easily be different at the DNA level.
if sequence_variants_are_equivalent(
target_variant,
predicted_variant,
strict_unknown=False,
strict_silent=True):
break
else:
# This protein variant didn't match any of the target
# variants; don't proceed.
continue
# The predicted protein variant matches one or more target
# variants (if there are any).
gene_name = result.transcript_feat.attributes["gene_name"]
gene_aa_mutations[gene_name].add(str(predicted_variant))
return gene_aa_mutations
def find_aa_mutations(self, paths, processes=1):
"""Create a `DataFrame` of mutation counts, where the row indices are
cell names and the column indices are gene names."""
def init_process(aa_mutation_finder):
global current_process_aa_mutation_finder
current_process_aa_mutation_finder = aa_mutation_finder
def process_cell(path):
return (
Path(path).stem,
current_process_aa_mutation_finder \
.find_cell_gene_aa_mutations(path=path)
)
if processes > 1:
with Pool(processes, initializer=init_process,
initargs=(self, )) as pool:
results = list(
tqdm(pool.imap(process_cell, paths),
total=len(paths),
smoothing=0.01))
else:
init_process(self)
results = list(map(process_cell, tqdm(paths)))
return self._make_mutation_counts_df(results)
@click.command()
@click.option("--processes",
"num_processes",
default=1,
help="number of processes to use for computation",
type=int)
@click.option("--cosmicdb",
"cosmicdb_path",
help="optional path to cosmic db file (.tsv)",
default=None)
@click.option("--annotation",
"annotation_path",
help="path to genome annotation (.gtf)",
required=True)
@click.option("--genomefa",
"genomefa_path",
help="path to full genome sequences (.fasta)",
required=True)
@click.option("--output",
"output_path",
help="path to output file (.csv)",
required=True)
@click.argument("input_files", required=True, nargs=-1)
def find_aa_mutations(num_processes, cosmicdb_path, annotation_path,
genomefa_path, output_path, input_files):
""" report amino-acid level SNPs and indels in each sample """
print("Beginning setup (this may take several minutes!)")
if cosmicdb_path:
print("Loading COSMIC database...")
cosmic_df = | pd.read_csv(cosmicdb_path, sep='\t') | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Copyright 2018 Infosys Ltd.
Use of this source code is governed by MIT license that can be found in the LICENSE file or at
https://opensource.org/licenses/MIT.
@author: zineb , Mohan
# -*- coding: utf-8 -*-
"""
#%%
import xml.dom.minidom
import pandas as pd
import requests
import datetime as DT
from dateutil.parser import parse
import win32com.client as win32
import newspaper
from newspaper import Article
import nltk
nltk.download('all')
from TwitterSearch import TwitterSearchOrder
from TwitterSearch import TwitterUserOrder
from TwitterSearch import TwitterSearchException
from TwitterSearch import TwitterSearch
from bs4 import BeautifulSoup as bs
import urllib3
import xmltodict
import traceback2 as traceback
import re
import warnings
import contextlib
from urllib3.exceptions import InsecureRequestWarning
import Algo_4
#%%
old_merge_environment_settings = requests.Session.merge_environment_settings
@contextlib.contextmanager
def no_ssl_verification():
opened_adapters = set()
def merge_environment_settings(self, url, proxies, stream, verify, cert):
# Verification happens only once per connection so we need to close
# all the opened adapters once we're done. Otherwise, the effects of
# verify=False persist beyond the end of this context manager.
opened_adapters.add(self.get_adapter(url))
settings = old_merge_environment_settings(self, url, proxies, stream, verify, cert)
settings['verify'] = False
return settings
requests.Session.merge_environment_settings = merge_environment_settings
try:
with warnings.catch_warnings():
warnings.simplefilter('ignore', InsecureRequestWarning)
yield
finally:
requests.Session.merge_environment_settings = old_merge_environment_settings
for adapter in opened_adapters:
try:
adapter.close()
except:
pass
keywords=[]
companies_names=[]
days_count=[]
emails=[]
persons_names=[]
connections=[]
companies_ids=[]
relevantsubject=[]
twitter_list=[]
main_list=[]
log_date=[]
def main():
#COMPANIES.XML
doc_comp = xml.dom.minidom.parse("companies.xml");
# print(doc.nodeName)
# print(doc.firstChild.tagName)
companies=doc_comp.getElementsByTagName("company")
for company in companies:
#print(company.getElementsByTagName("name"))
company_name=company.getElementsByTagName("c_name")[0].childNodes[0]
companies_names.append(company_name.nodeValue)
keyword=company.getElementsByTagName("keyword")
x=[]
for word in keyword:
x.append(word.childNodes[0].nodeValue)
keywords.append(x)
company_id=company.getElementsByTagName("c_id")[0].childNodes[0]
companies_ids.append(company_id.nodeValue)
twitter=company.getElementsByTagName("twitter_name")[0].childNodes[0].nodeValue
youtube=company.getElementsByTagName("youtube")[0].childNodes[0].nodeValue
hashtag=company.getElementsByTagName("hashtag")
z=[]
for word in hashtag:
z.append(word.childNodes[0].nodeValue)
twitter_list.append([twitter,z])
main_list.append([company_name.nodeValue,x,twitter,z,youtube])
#NEW DATE
doc_log = xml.dom.minidom.parse("log.xml");
log_date.append(doc_log.getElementsByTagName('day')[0].childNodes[0].nodeValue)
#PEOPLE.XML
doc = xml.dom.minidom.parse("people_v2.xml");
#print(doc.nodeName)
#print(doc.firstChild.tagName)
person=doc.getElementsByTagName("person")
for info in person:
# print(company.getElementsByTagName("name"))
person_name=info.getElementsByTagName("p_name")[0].childNodes[0]
#print(person_name)
persons_names.append(person_name.nodeValue)
email=info.getElementsByTagName("email")[0].childNodes[0]
emails.append(email.nodeValue)
grouped_company=info.getElementsByTagName("group")
group=[]
for g in grouped_company:
group_name=g.getElementsByTagName("g_name")[0].childNodes[0]
#group.append(group_name.nodeValue)
comp_name=g.getElementsByTagName("comp_id")
comp=[]
for c in range(len(comp_name)):
comp.append(comp_name[c].childNodes[0].nodeValue)
group.append([group_name.nodeValue,comp])
#connections.append(group)
single_companies=info.getElementsByTagName("single")[0]
cs_name=single_companies.getElementsByTagName("comp_id")
single_comp=[]
for s in range(len(cs_name)):
single_name=cs_name[s].childNodes[0].nodeValue
single_comp.append(single_name)
group.append(single_comp)
connections.append(group)
#Keywords.XML
doc_words = xml.dom.minidom.parse("keywords_list.xml");
#print(doc_date.nodeName)
for i in range(len(doc_words.getElementsByTagName('word'))):
word=doc_words.getElementsByTagName('word')[i]
l=word.childNodes[0].nodeValue
relevantsubject.append(l)
if __name__ == "__main__":
main();
#%%
urls=[]
current_companies=[]
datasets={}
API_KEY = ''
def content():
today = DT.date.today()
# days_ago = today - DT.timedelta(days=int(days_count[0]))
todayf = today.strftime("%Y-%m-%d")
# days_agof = days_ago.strftime("%Y-%m-%d")
#URLS
url = 'https://newsapi.org/v2/everything?q='
url_p2='&from='+log_date[0]+'&to='+todayf+'+&sortBy=publishedAt&language=en&apiKey='+ API_KEY
for company in range(len(keywords)):
# print(company)
# print(len(company))
if len(keywords[company]) == 0 :
print('no keywords given')
if len(keywords[company]) > 1 :
new_url = url + keywords[company][0]
for i in range(1,len(keywords[company])):
new_url = new_url + "%20AND%20"+ keywords[company][i]
final_url = new_url + url_p2
else:
final_url= url + keywords[company][0] + url_p2
# print(url)
urls.append(final_url)
# Build df with article info + create excel sheet
count = 0
# current_companies=[]
# datasets={}
for url in urls:
JSONContent = requests.get(url).json()
#content = json.dumps(JSONContent, indent = 4, sort_keys=True)
article_list = []
for i in range(len(JSONContent['articles'])):
article_list.append([JSONContent['articles'][i]['source']['name'],
JSONContent['articles'][i]['title'],
JSONContent['articles'][i]['publishedAt'],
JSONContent['articles'][i]['url']
])
#print(article_list)
if article_list != []:
datasets[companies_names[count]]= pd.DataFrame(article_list)
datasets[companies_names[count]].columns = ['Source/User','Title/Tweet','Date','Link']
datasets[companies_names[count]]['Date']=datasets[companies_names[count]]['Date'].str.replace('T',' ')
datasets[companies_names[count]]['Date']=datasets[companies_names[count]]['Date'].str.replace('Z','')
datasets[companies_names[count]]['Date']=datasets[companies_names[count]]['Date'].str.split(expand=True)
for i in range(len(datasets[companies_names[count]]['Date'])):
datasets[companies_names[count]]['Date'][i]=parse(datasets[companies_names[count]]['Date'][i])
datasets[companies_names[count]]['Date'][i]=datasets[companies_names[count]]['Date'][i].date()
#datasets[companies_names[count]]['Date'][i]=datasets[companies_names[count]]['Date'][i].str.split(expand=True)
#ds = '2012-03-01T10:00:00Z' # or any date sting of differing formats.
#date = parser.parse(ds)
#datasets[companies_names[count]]['Date']=pd.to_datetime(datasets[companies_names[count]]['Date'])
#print(datasets[companies_names[count]])
current_companies.append(companies_names[count])
count=count+1
else:
None
count=count+1
content()
duplicate_df=[]
def duplicate_check():
for article in datasets:
d=datasets[article][datasets[article].duplicated(['Title/Tweet'],keep='first')==True]
print(d)
if d.empty == False:
duplicate_df.append(d)
else:
None
#duplicate_article.append(d)
#duplicate_article = duplicate_article.concat([duplicate_article,d], axis=0)
#print(d)
duplicate_check()
def duplicate_drop():
for article in datasets:
datasets[article]=datasets[article].drop_duplicates(['Title/Tweet'],keep='first')
datasets[article]=datasets[article].reset_index()
datasets[article]=datasets[article].drop(['index'], axis=1)
duplicate_drop()
#%%
def Scoring():
for a in datasets:
try:
datasets[a].insert(0,'Category','Article')
datasets[a].insert(1,'Company',str(a))
datasets[a].insert(3,'Keywords','none')
datasets[a].insert(4,'Subjects/Views','none')
for i in range(len(datasets[a]['Link'])):
r=[]
article = Article(datasets[a]['Link'][i])
article.download()
article.html
article.parse()
txt=article.text.encode('ascii','ignore').decode('ascii')
#f=requests.get(datasets[article]['Link'][i])
#txt=f.text.encode('ascii','ignore').decode('ascii')
txt=txt.lower()
#total_word= wordcounter(txt).get_word_count()
for word in relevantsubject:
result=txt.count(word)
if result != 0:
r.append(word +'('+ str(txt.count(word)) +')')
else:
None
# relevanceLink.append(r)
r=', '.join(word for word in r)
if r != []:
datasets[a]['Subjects/Views'][i]=str(r + ' (totalWords:'+ str(len(txt.split()))+')')
else:
datasets[a]['Subjects/Views'][i]=str('None')
article.nlp()
k=', '.join(keyword for keyword in article.keywords)
datasets[a]['Keywords'][i]=str(k)
except newspaper.article.ArticleException:
None
#k= []
#for keyword in article.keywords:
# k.append[keyword]
# k=', '.join(keyword for keyword in k)
# datasets[a]['Keywords'][i]=str(k)
Scoring()
# datasets[article]
#%% Formatting
companies_dic=dict(zip(companies_names, companies_ids))
people_comp_dic=dict(zip(persons_names, connections))
people_email_dic=dict(zip(persons_names, emails))
Subject = pd.DataFrame(relevantsubject)
Subject.columns=['Subject Interest']
Companies = pd.DataFrame(companies_names)
Companies.columns=['Companies Interest']
CS = pd.concat([Subject, Companies], axis=1)
CS.fillna('',inplace=True)
MainDF= | pd.DataFrame(main_list) | pandas.DataFrame |
import sys
import tempfile
import skbio
import pandas as pd
import qiime2
from qiime2.plugin import (
Plugin,
Metadata,
Int,
Range,
Choices,
Str,
MetadataColumn,
Categorical,
Float,
List,
Citations,
Bool,
TypeMatch,
)
from q2_types.feature_data import (
FeatureData,
DNAFASTAFormat,
DNASequencesDirectoryFormat,
Sequence,
AlignedSequence,
)
import genome_sampler
from genome_sampler.common import (
IDSelectionDirFmt,
IDSelection,
Selection,
IDMetadataFormat,
UNIXListFormat,
GISAIDDNAFASTAFormat,
VCFMaskFormat,
VCFMaskDirFmt,
AlignmentMask,
)
from genome_sampler.sample_random import sample_random
from genome_sampler.sample_longitudinal import sample_longitudinal
from genome_sampler.sample_neighbors import sample_neighbors
from genome_sampler.sample_diversity import sample_diversity
from genome_sampler.filter import filter_seqs
from genome_sampler.combine import combine_selections
from genome_sampler.summarize import summarize_selections
from genome_sampler.label_seqs import label_seqs
from genome_sampler.mask import mask
citations = Citations.load('citations.bib', package='genome_sampler')
plugin = Plugin(
name='genome-sampler',
website='https://caporasolab.us/genome-sampler',
package='genome_sampler',
version=genome_sampler.__version__,
description='Tools for sampling from collections of genomes.',
short_description='Genome sampler.',
citations=[citations['genomesampler']]
)
plugin.register_formats(IDSelectionDirFmt)
plugin.register_formats(GISAIDDNAFASTAFormat)
plugin.register_formats(VCFMaskFormat)
plugin.register_formats(VCFMaskDirFmt)
plugin.register_semantic_types(Selection)
plugin.register_semantic_types(AlignmentMask)
plugin.register_semantic_type_to_format(FeatureData[Selection],
artifact_format=IDSelectionDirFmt)
plugin.register_semantic_type_to_format(AlignmentMask,
artifact_format=VCFMaskDirFmt)
@plugin.register_transformer
def _1(obj: IDSelection) -> IDSelectionDirFmt:
result = IDSelectionDirFmt()
inclusion = obj.inclusion
assert not inclusion.index.has_duplicates
include = inclusion.index[inclusion]
exclude = inclusion.index[~inclusion]
with open(result.included.path_maker(), 'w') as fh:
fh.write('\n'.join(include))
with open(result.excluded.path_maker(), 'w') as fh:
fh.write('\n'.join(exclude))
obj.metadata.save(result.metadata.path_maker())
with open(result.label.path_maker(), 'w') as fh:
fh.write(obj.label)
return result
@plugin.register_transformer
def _2(fmt: IDSelectionDirFmt) -> qiime2.Metadata:
md = fmt.metadata.view(IDMetadataFormat).to_metadata()
return md.filter_ids(fmt.included.view(UNIXListFormat).to_list())
@plugin.register_transformer
def _3(fmt: IDSelectionDirFmt) -> IDSelection:
md = fmt.metadata.view(IDMetadataFormat).to_metadata()
inclusion = pd.Series(False, index=md.to_dataframe().index)
included = fmt.included.view(UNIXListFormat).to_list()
inclusion[included] = True
with fmt.label.view(UNIXListFormat).open() as fh:
label = fh.read().strip()
return IDSelection(inclusion, md, label)
def _read_gisaid_dna_fasta(path, temp_fh):
def _cleanup_gen():
with open(path) as input_f:
lines = None
for line in input_f:
if line.startswith('>'):
if lines is not None:
yield from lines
lines = [line]
elif lines is not None:
# Due to a bug in skbio 0.5.5, the lowercase option can't
# be used with skbio.io.read for reading DNA sequences.
# Convert sequences to uppercase here.
line = line.upper()
# Spaces and gap characters can appear in unaligned GISAID
# sequence records, so we strip those. U characters are
# additionally replaced with T characters.
line = line.replace('-', '')
line = line.replace('.', '')
line = line.replace(' ', '')
line = line.replace('U', 'T')
observed_chars = set(line.strip())
disallowed_chars = observed_chars - skbio.DNA.alphabet
if disallowed_chars:
print('Note: Non-IUPAC DNA characters (%s) in '
'sequence record %s. This record will be '
'excluded from the output.' %
(' '.join(disallowed_chars),
lines[0][1:].split()[0]),
file=sys.stderr)
lines = None
else:
lines.append(line)
else:
continue
if lines is not None:
yield from lines
# perform an initial clean-up pass through the file. if _cleanup_gen()
# is passed directly to skbio.io.read, the file ends up being read into
# memory, which is a problem for large files
temp_fh.writelines(_cleanup_gen())
temp_fh.seek(0)
result = skbio.io.read(temp_fh, verify=False,
format='fasta', constructor=skbio.DNA)
return result
@plugin.register_transformer
def _4(fmt: GISAIDDNAFASTAFormat) -> DNASequencesDirectoryFormat:
df = DNASequencesDirectoryFormat()
ff = DNAFASTAFormat()
with ff.open() as file, \
tempfile.TemporaryFile(mode='w+') as temp_fh:
data = _read_gisaid_dna_fasta(str(fmt), temp_fh)
skbio.io.write(data, format='fasta', into=file)
df.file.write_data(ff, DNAFASTAFormat)
return df
@plugin.register_transformer
def _5(fmt: VCFMaskFormat) -> pd.DataFrame:
data = [(r.CHROM, r.POS, r.ID, r.REF, r.ALT, r.QUAL, r.FILTER[0], r.INFO)
for r in fmt.to_list()]
columns = ['CHROM', 'POS', 'ID', 'REF', 'ALT', 'QUAL', 'FILTER', 'INFO']
return | pd.DataFrame(data, columns=columns) | pandas.DataFrame |
# -*- coding: utf-8 -*-
# %reset -f
"""
@author: <NAME>
"""
# Demonstration of Bayesian optimization for multiple y variables
import warnings
import matplotlib.figure as figure
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from scipy.stats import norm
from sklearn import model_selection
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import Matern, DotProduct, WhiteKernel, RBF, ConstantKernel
warnings.filterwarnings('ignore')
# settings
fold_number = 10
relaxation_value = 0.01
# load datasets and settings
training_data = pd.read_csv('training_data.csv', encoding='SHIFT-JIS', index_col=0)
x_for_prediction = pd.read_csv('x_for_prediction.csv', encoding='SHIFT-JIS', index_col=0)
settings = pd.read_csv('settings.csv', encoding='SHIFT-JIS', index_col=0)
# check datasets and settings
number_of_y_variables = settings.shape[1]
if not number_of_y_variables == (training_data.shape[1] - x_for_prediction.shape[1]):
raise Exception(
'Check the numbers of y-variables and X-variables in training_data.csv, data_for_prediction.csv and settings.csv.')
for i in range(number_of_y_variables):
if settings.iloc[0, i] == 0 and settings.iloc[1, i] >= settings.iloc[2, i]:
raise Exception('`lower_limit` must be lower than `upper_limit` in settings.csv.')
# autoscaling
y = training_data.iloc[:, 0:number_of_y_variables]
x = training_data.iloc[:, number_of_y_variables:]
x_for_prediction.columns = x.columns
autoscaled_x = (x - x.mean()) / x.std()
autoscaled_x_for_prediction = (x_for_prediction - x.mean()) / x.std()
autoscaled_y = (y - y.mean()) / y.std()
mean_of_y = y.mean()
std_of_y = y.std()
# Gaussian process regression
estimated_y_for_prediction = np.zeros([x_for_prediction.shape[0], number_of_y_variables])
std_of_estimated_y_for_prediction = np.zeros([x_for_prediction.shape[0], number_of_y_variables])
plt.rcParams['font.size'] = 18
for y_number in range(number_of_y_variables):
model = GaussianProcessRegressor(ConstantKernel() * RBF() + WhiteKernel(), alpha=0)
model.fit(autoscaled_x, autoscaled_y.iloc[:, y_number])
estimated_y_for_prediction_tmp, std_of_estimated_y_for_prediction_tmp = model.predict(
autoscaled_x_for_prediction, return_std=True)
estimated_y_for_prediction[:, y_number] = estimated_y_for_prediction_tmp
std_of_estimated_y_for_prediction[:, y_number] = std_of_estimated_y_for_prediction_tmp
estimated_y = model.predict(autoscaled_x)
estimated_y = estimated_y * std_of_y.iloc[y_number] + mean_of_y.iloc[y_number]
plt.figure(figsize=figure.figaspect(1))
plt.scatter(y.iloc[:, y_number], estimated_y)
y_max = max(y.iloc[:, y_number].max(), estimated_y.max())
y_min = min(y.iloc[:, y_number].min(), estimated_y.min())
plt.plot([y_min - 0.05 * (y_max - y_min), y_max + 0.05 * (y_max - y_min)],
[y_min - 0.05 * (y_max - y_min), y_max + 0.05 * (y_max - y_min)], 'k-')
plt.ylim(y_min - 0.05 * (y_max - y_min), y_max + 0.05 * (y_max - y_min))
plt.xlim(y_min - 0.05 * (y_max - y_min), y_max + 0.05 * (y_max - y_min))
plt.xlabel('actual y')
plt.ylabel('estimated y')
plt.show()
print('y{0}, r2: {1}'.format(y_number + 1, float(1 - sum((y.iloc[:, y_number] - estimated_y) ** 2) / sum(
(y.iloc[:, y_number] - y.iloc[:, y_number].mean()) ** 2))))
print('y{0}, RMSE: {1}'.format(y_number + 1, float(
(sum((y.iloc[:, y_number] - estimated_y) ** 2) / len(y.iloc[:, y_number])) ** 0.5)))
print('y{0}, MAE: {1}'.format(y_number + 1,
float(sum(abs(y.iloc[:, y_number] - estimated_y)) / len(y.iloc[:, y_number]))))
estimated_y_in_cv = model_selection.cross_val_predict(model, autoscaled_x, autoscaled_y.iloc[:, y_number],
cv=fold_number)
estimated_y_in_cv = estimated_y_in_cv * std_of_y.iloc[y_number] + mean_of_y.iloc[y_number]
plt.figure(figsize=figure.figaspect(1))
plt.scatter(y.iloc[:, y_number], estimated_y_in_cv)
y_max = max(y.iloc[:, y_number].max(), estimated_y_in_cv.max())
y_min = min(y.iloc[:, y_number].min(), estimated_y_in_cv.min())
plt.plot([y_min - 0.05 * (y_max - y_min), y_max + 0.05 * (y_max - y_min)],
[y_min - 0.05 * (y_max - y_min), y_max + 0.05 * (y_max - y_min)], 'k-')
plt.ylim(y_min - 0.05 * (y_max - y_min), y_max + 0.05 * (y_max - y_min))
plt.xlim(y_min - 0.05 * (y_max - y_min), y_max + 0.05 * (y_max - y_min))
plt.xlabel('actual y')
plt.ylabel('estimated y in CV')
plt.show()
print('y{0}, r2cv: {1}'.format(y_number + 1, float(1 - sum((y.iloc[:, y_number] - estimated_y_in_cv) ** 2) / sum(
(y.iloc[:, y_number] - y.iloc[:, y_number].mean()) ** 2))))
print('y{0}, RMSEcv: {1}'.format(y_number + 1, float(
(sum((y.iloc[:, y_number] - estimated_y_in_cv) ** 2) / len(y.iloc[:, y_number])) ** 0.5)))
print('y{0}, MAEcv: {1}'.format(y_number + 1, float(
sum(abs(y.iloc[:, y_number] - estimated_y_in_cv)) / len(y.iloc[:, y_number]))))
print('')
estimated_y_for_prediction = pd.DataFrame(estimated_y_for_prediction)
estimated_y_for_prediction.columns = y.columns
estimated_y_for_prediction = estimated_y_for_prediction * y.std() + y.mean()
std_of_estimated_y_for_prediction = | pd.DataFrame(std_of_estimated_y_for_prediction) | pandas.DataFrame |
"""
Author : <NAME>\n
email : <EMAIL>\n
LICENSE : MIT License
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.cm import get_cmap
import seaborn as sns
import time
from scipy.signal import butter, sosfiltfilt, sosfreqz
from scipy.signal import spectrogram as spect
from scipy.stats import gaussian_kde
import datetime
from threading import Thread
from msdlib import msdExceptions
import os
sns.set()
pd.plotting.register_matplotlib_converters()
# this is a custom designed progress bar for checking the loop timing. The user should follow this approach to make it work
#with ProgressBar(arr, desc = 'intro of arr', perc = 5) as pbar:
# for i in arr:
# 'your code/task inside the loop'
# pbar.inc()
class ProgressBar():
"""
Inputs:
:arr: iterable, it is the array you will use to run the loop, it can be range(10) or any numpy array or python list or any other iterator
:desc: str, description of the loop, default - 'progress'
:barlen: int, length of the progress bar, default is 40
:front space: int, allowed space for description, default is 20
:tblink_max: float/int indicates maximum interval in seconds between two adjacent blinks, default is .4
:tblink_min: float/int indicates minimum interval in seconds between two adjacent blinks, default is .18
Outputs:
there is no output. It creates a progress bar which shows the loop progress.
"""
def __init__(self, arr, desc='progress', barlen=40, front_space = 20, tblink_max = .3, tblink_min = .18):
self.xlen = len(arr)
self.barlen = barlen
if tblink_max >= 1:
tblink_max = .9
print("'tblink_max' was set to .9 seconds beacuse of exceeding maximum limit!")
self.desc = desc[:front_space] + ' ' + '-' * (front_space - len(desc)) * int(front_space > len(desc))+ ' '
self.barend = ' '*15
self.tblmax = tblink_max
self.tblmin = tblink_min
self.blintv = self.tblmax # blinking interval
self.barincs = [int(self.xlen / self.barlen * (i + 1)) for i in range(self.barlen)]
self.barinc = 0
self.sym = '█' # complete symbol in progress bar
self.non = ' ' # gap symbol in progress bar
self.blsyms = ['|', '/', '-', '\\']
self.bllen = len(self.blsyms)
self.blcnt = 0
self.cnt = 0 # iterative counter for x elements
self.barelap = 0 # iterative counter for progress bar
self.set_barelap() # setting proper next value for self.barinc
self.blink = self.blsyms[0]
self.tcntst = False
self.min_tprint = .1 # minimum time interval for two consecutive bar print
self.tprrec = time.time() - self.min_tprint - 1 # initialized with a bigger time
def __enter__(self, ):
self.tst = time.time()
# multithreading initialization
self.thread = Thread(target = self.blink_func)
self.flblink = True
self.thread.start()
# bar initialization
self.prleftime = 'calculating..'
self.tstack = 0
self.tstackst = time.time()
self.pastime = datetime.timedelta(seconds = 0)
return self
def calc_time(self):
self.pastime = datetime.timedelta(seconds = time.time() - self.tst)
self.leftime = self.pastime * (self.xlen / self.cnt - 1)
self.tstackst = time.time()
self.tstack = 0
self.blintv = self.tblmax - (self.tblmax - self.tblmin) * (self.barelap + 1) / self.barlen
def conv_time(self):
d = self.pastime.days
s = int(self.pastime.seconds + self.tstack)
self.prpastime = '%s'%datetime.timedelta(days = d, seconds = s)
if self.tcntst:
d = self.leftime.days
s = int(self.leftime.seconds - self.tstack)
if d < 0:
d, s = 0, 0
self.prleftime = '%s'%datetime.timedelta(days = d, seconds = s)
def set_barelap(self):
if self.cnt == self.barincs[self.barinc]:
if self.barinc < self.barlen - 1:
self.barinc += 1
while self.barincs[self.barinc] == self.barincs[self.barinc - 1] and self.barinc < self.barlen:
self.barinc += 1
self.barelap = int(self.cnt / self.xlen * self.barlen)
def inc(self):
self.cnt += 1
if not self.tcntst: self.tcntst = True
self.set_barelap()
self.calc_time()
self.conv_time()
self.barprint()
def barprint(self, end = ''):
if time.time() - self.tprrec >= self.min_tprint or not self.flblink:
self.bar = self.sym * self.barelap + self.blink * int(self.flblink) + self.non * (self.barlen - self.barelap - int(self.flblink))
self.pr = self.desc + '[' + self.bar + '] ' + '%d/%d <%3d%%>'%(self.cnt, self.xlen, self.cnt / self.xlen * 100) + ' ( %s'%self.prpastime + ' < %s'%self.prleftime + ' )%s'%(self.barend)
print('\r%s'%self.pr, end = end, flush = False)
self.tprrec = time.time()
def blink_func(self):
while self.flblink:
time.sleep(self.blintv)
self.blcnt += 1
if self.blcnt == self.bllen: self.blcnt = 0
self.blink = self.blsyms[self.blcnt]
# time adjustment part
self.tstack = time.time() - self.tstackst
self.conv_time()
self.barprint()
def __exit__(self, exception_type, exception_value, traceback):
self.flblink = False
time.sleep(self.tblmax)
self.barend = ' Complete!' + ' '*15
self.barprint('\n')
def get_time_estimation(time_st, count_ratio=None, current_ep=None, current_batch=None, total_ep=None, total_batch=None, string_out=True):
"""
This function estimates remaining time inside any loop. he function is prepared for estimating
remaining time in machine learning training with mini-batch.
But it can be used for other purposes also by providing count_ratio input value.
Inputs:
:time_st: time.time() instance indicating the starting time count
:count_ratio: float, ratio of elapsed time at any moment.\n
Must be 0 ~ 1, where 1 will indicate that this is the last iteration of the loop
:current_ep: current epoch count
:current_batch: current batch count in mini-batch training
:total_ep: total epoch in the training model
:total_batch: total batch in the mini-batch training
:string_out: bool, whether to output the elapsed and estimated time in string format or not.
True will output time string
Outputs:
:output time: the ouput can be a single string in format\n
'elapsed hour : elapsed minute : elapsed second < remaining hour : remaining minute : remaining second '\n
if string_out flag is True\n
or it can output 6 integer values in the above order for those 6 elements in the string format\n
"""
if count_ratio is None:
# getting count ratio
total_count = total_ep * total_batch
current_count = current_ep * total_batch + current_batch + 1
count_ratio = current_count / total_count
# calculating time
t = time.time()
elapsed_t = t - time_st
# converting time into H:M:S
# elapsed time calculation
el_h = elapsed_t // 3600
el_m = (elapsed_t - el_h * 3600) // 60
el_s = int(elapsed_t - el_h * 3600 - el_m * 60)
if count_ratio == 0:
if string_out:
return '%d:%02d:%02d < %d:%02d:%02d' % (el_h, el_m, el_s, 0, 0, 0)
else:
return el_h, el_m, el_s, 0, 0, 0
# remaining time calculation
total_t = elapsed_t / count_ratio
rem_t = total_t - elapsed_t
rem_h = rem_t // 3600
rem_m = (rem_t - rem_h * 3600) // 60
rem_s = int(rem_t - rem_h * 3600 - rem_m * 60)
if string_out:
out = '%d:%02d:%02d < %d:%02d:%02d' % (el_h, el_m, el_s, rem_h, rem_m, rem_s)
return out
else:
return el_h, el_m, el_s, rem_h, rem_m, rem_s
class Filters():
"""
This class is used to apply FIR filters as high-pass, low-pass, band-pass and band-stop filters.
It also shows the proper graphical representation of the filter response, signal before filtering and after filtering and filter spectram.
The core purpose of this class is to make the filtering process super easy and check filter response comfortably.
Inputs:
:T: float, indicating the sampling period of the signal, must be in seconds (doesnt have any default values)
:n: int, indicating number of fft frequency bins, default is 1000
:savepath: str, path to the directory to store the plot, default is None (doesnt save plots)
:show: bool, whether to show the plot or not, default is True (Shows figures)
:save: bool, whether to store the plot or not, default is False (doesnt save figures)
"""
def __init__(self, T, N = 1000, savepath=None, save=False, show=True):
# T must be in seconds
self.fs = 1 / T
self.N = N
self.savepath = savepath
self.show = show
self.save = save
def raise_cutoff_error(self, msg):
raise msdExceptions.CutoffError(msg)
def raise_filter_error(self, msg):
raise msdExceptions.FilterTypeError(msg)
def vis_spectrum(self, sr, f_lim=[], see_neg=False, show=None, save=None, savepath=None, figsize=(30, 3)):
"""
The purpose of this function is to produce spectrum of time series signal 'sr'.
Inputs:
:sr: numpy ndarray or pandas Series, indicating the time series signal you want to check the spectrum for
:f_lim: python list of len 2, indicating the limits of visualizing frequency spectrum. Default is []
:see_neg: bool, flag indicating whether to check negative side of the spectrum or not. Default is False
:show: bool, whether to show the plot or not, default is None (follows Filters.show attribute)
:save: bool, whether to store the plot or not, default is None (follows Filters.save attribute)
:savepath: str, path to the directory to store the plot, default is None (follows Filters.savepath attribute)
:figsize: tuple, size of the figure plotted to show the fft version of the signal. Default is (30, 3)
Outputs:
doesnt return anything as the purpose is to generate plots
"""
if savepath is None: savepath = self.savepath
if save is None: save = self.save
if show is None: show = self.show
if isinstance(sr, np.ndarray) or isinstance(sr, list): sr = pd.Series(sr)
if sr.name is None: sr.name = 'signal'
if see_neg:
y = np.fft.fft(sr.dropna().values, n = self.N).real
y = np.fft.fftshift(y)
f = (np.arange(self.N) / self.N - .5) * self.fs
y = pd.Series(y, index = f)
else:
y = np.fft.fft(sr.dropna().values, n = self.N * 2).real
f = np.arange(2 * self.N) / (2 * self.N) * self.fs
y = pd.Series(y, index = f)
y = y.iloc[:self.N]
fig, ax = plt.subplots(figsize = figsize)
ax.plot(y.index, y.values, alpha = 1)
fig_title = 'Frequency Spectrum of %s'%sr.name
ax.set_title(fig_title)
ax.set_ylabel('Power')
ax.set_xlabel('Frequency (Hz)')
if len(f_lim) == 2: ax.set_xlim(f_lim[0], f_lim[1])
fig.tight_layout()
if show:
plt.show()
if save and savepath is not None:
os.makedirs(savepath, exist_ok=True)
fig.savefig('%s/%s.jpg'%(savepath, fig_title.replace(' ', '_')), bbox_inches='tight')
plt.close()
def apply(self, sr, filt_type, f_cut, order=10, response=False, plot=False, f_lim=[], savepath=None, show=None, save=None):
"""
The purpose of this function is to apply an FIR filter on a time series signal 'sr' and get the output filtered signal y
Inputs:
:sr: numpy ndarray/pandas Series/list, indicating the time series signal you want to apply the filter on
:filt_type: str, indicating the type of filter you want to apply on sr.\n
{'lp', 'low_pass', 'low pass', 'lowpass'} for applying low pass filter\n
and similar for 'high pass', 'band pass' and 'band stop' filters
:f_cut: float/list/numpy 1d array, n indicating cut off frequencies. Please follow the explanations bellow\n
for lowpass/highpass filters, it must be int/float\n
for bandpass/bandstop filters, it must be a list or numpy array of length divisible by 2
:order: int, filter order, the more the values, the sharp edges at the expense of complexer computation. Default is 10.
:response: bool, whether to check the frequency response of the filter or not, Default is False
:plot: bool, whether to see the spectrum and time series plot of the filtered signal or not, Default is False
:f_lim: list of length 2, frequency limit for the plot. Default is []
:savepath: str, path to the directory to store the plot, default is None (follows Filters.savepath attribute)
:show: bool, whether to show the plot or not, default is None (follows Filters.show attribute)
:save: bool, whether to store the plot or not, default is None (follows Filters.save attribute)
Outputs:
:y: pandas Series, filtered output signal
"""
if savepath is None: savepath = self.savepath
if save is None: save = self.save
if show is None: show = self.show
if isinstance(sr, np.ndarray) or isinstance(sr, list): sr = pd.Series(sr)
if sr.name is None: sr.name = 'signal'
msg = 'Cut offs should be paired up (for bp or bs) or in integer format (for hp or lp)'
if isinstance(f_cut, list) or isinstance(f_cut, np.ndarray):
if len(f_cut) % 2 != 0:
self.raise_cutoff_error(msg)
else:
band = True
f_cut = np.array(f_cut).ravel() / (self.fs / 2)
if np.sum(f_cut <= 0) > 0:
msg = 'Invalid cut offs (0 or negative cut offs)'
self.raise_cutoff_error(msg)
elif np.sum(f_cut > 1) > 0:
msg = 'Invalid cut offs (frequency is greater than nyquest frequency)'
self.raise_cutoff_error(msg)
elif isinstance(f_cut, int) or isinstance(f_cut, float):
band = False
f_cut = f_cut / (self.fs / 2)
if f_cut <= 0:
msg = 'Invalid cut offs (0 or negative cut offs)'
self.raise_cutoff_error(msg)
elif f_cut > 1:
msg = 'Invalid cut offs (frequency is greater than nyquest frequency)'
self.raise_cutoff_error(msg)
else:
self.raise_cutoff_error(msg)
msg = 'Filter type %s is not understood or filter_type-cutoff mis-match'%(filt_type)
filt_type = filt_type.lower()
if filt_type in ['low_pass', 'lp', 'low pass', 'lowpass'] and not band:
_filter = butter(N = order, Wn = f_cut, btype = 'lowpass', analog = False, output = 'sos')
elif filt_type in ['high_pass', 'hp', 'high pass', 'highpass'] and not band:
_filter = butter(N = order, Wn = f_cut, btype = 'highpass', analog = False, output = 'sos')
elif filt_type in ['band_pass', 'bp', 'band pass', 'bandpass'] and band:
_filter = butter(N = order, Wn = f_cut, btype = 'bandpass', analog = False, output = 'sos')
elif filt_type in ['band_stop', 'bs', 'band_stop', 'bandstop'] and band:
_filter = butter(N = order, Wn = f_cut, btype = 'bandstop', analog = False, output = 'sos')
else:
self.raise_filter_error(msg)
y = pd.Series(sosfiltfilt(_filter, sr.dropna().values), index = sr.dropna().index, name = 'filtered_%s'%sr.name)
if response:
self.filter_response(_filter, f_lim = f_lim, savepath=savepath, save=save, show=show)
if plot:
self.plot_filter(y, filt_type, f_cut, f_lim = f_lim, savepath=savepath, save=save, show=show)
return y
def filter_response(self, sos, f_lim=[], savepath=None, show=None, save=None):
"""
This function plots the filter spectram in frequency domain.
Inputs:
:sos: sos object found from butter function
:flim: list/tuple as [lower_cutoff, higher_cutoff], default is []
:savepath: str, path to the directory to store the plot, default is None (follows Filters.savepath attribute)
:show: bool, whether to show the plot or not, default is None (follows Filters.show attribute)
:save: bool, whether to store the plot or not, default is None (follows Filters.save attribute)
Outputs:
It doesnt return anything.
"""
if savepath is None: savepath = self.savepath
if save is None: save = self.save
if show is None: show = self.show
w, h = sosfreqz(sos, worN = 2000)
w = (self.fs / 2) * (w / np.pi)
h = np.abs(h)
fig, ax = plt.subplots(figsize = (30, 3))
ax.plot(w, h)
ax.set_title('Filter spectrum')
ax.set_xlabel('Frequency (Hz)')
ax.set_ylabel('Gain')
if f_lim != []: ax.set_xlim(f_lim)
fig.tight_layout()
if show: plt.show()
if savepath is not None and save:
os.makedirs(savepath, exist_ok=True)
fig.savefig(os.path.join(savepath, 'Filter_spectrum.jpg'), bbox_inches='tight')
plt.close()
def plot_filter(self, y, filt_type, f_cut, f_lim=[], savepath=None, show=None, save=None):
"""
This function plots filter frequency response, time domain signal etc.
Inputs:
:y: pandas Series, time series data to filter
:filt_type: str, indicating the type of filter you want to apply on y.\n
{'lp', 'low_pass', 'low pass', 'lowpass'} for applying low pass filter\n
and similar for 'high pass', 'band pass' and 'band stop' filters
:f_cut: float/list/numpy 1d array, n indicating cut off frequencies. Please follow the explanations bellow\n
for lowpass/highpass filters, it must be int/float\n
for bandpass/bandstop filters, it must be a list or numpy array of length divisible by 2
:f_lim: list of length 2, frequency limit for the plot. Default is []
:savepath: str, path to the directory to store the plot, default is None (follows Filters.savepath attribute)
:show: bool, whether to show the plot or not, default is None (follows Filters.show attribute)
:save: bool, whether to store the plot or not, default is None (follows Filters.save attribute)
Outputs:
No output is returned. The function generates plot.
"""
if savepath is None: savepath = self.savepath
if save is None: save = self.save
if show is None: show = self.show
title = '%s with %s filter with cut offs %s'%(y.name, filt_type, str(f_cut * (self.fs / 2)))
self.vis_spectrum(sr=y, f_lim=f_lim)
fig, ax = plt.subplots(figsize=(30, 3))
ax.set_title(title)
ax.plot(y.index, y.values)
ax.set_xlabel('Time')
ax.set_ylabel('%s'%y.name)
fig.tight_layout()
if show: plt.show()
if save and savepath is not None:
os.makedirs(savepath, exist_ok=True)
fig.savefig(os.path.join(savepath, title.replace(' ', '_')+'.jpg'), bbox_inches='tight')
plt.close()
def get_spectrogram(ts_sr, fs=None, win=('tukey', 0.25), nperseg=None, noverlap=None, mode='psd', figsize=None,
vis_frac=1, ret_sxx=False, show=True, save=False, savepath=None, fname=None):
"""
The purpose of this function is to find the spectrogram of a time series signal.
It computes spectrogram, returns the spectrogram output and also i able to show spectrogram plot as heatmap.
Inputs:
:ts_sr: pandas.Series object containing the time series data, the series should contain its name as ts_sr.name
:fs: int/flaot, sampling frequency of the signal, default is 1
:win: tuple of (str, float), tuple of window parameters, default is (tukey, .25)
:nperseg: int, number of data in each segment of the chunk taken for STFT, default is 256
:noverlap: int, number of data in overlapping region, default is (nperseg // 8)
:mode: str, type of the spectrogram output, default is power spectral density('psd')
:figsize: tuple, figsize of the plot, default is (30, 6)
:vis_frac: float or a list of length 2, fraction of the frequency from lower to higher, you want to visualize, default is 1(full range)
:ret_sxx: bool, whehter to return spectrogram dataframe or not, default is False
:show: bool, whether to show the plot or not, default is True
:save: bool, whether to save the figure or not, default is False
:savepath: str, path to save the figure, default is None
:fname: str, name of the figure to save in the savepath, default is figure title
Outputs:
:sxx: returns spectrogram matrix if ret_sxx flag is set to True
"""
# default values
if fs is None: fs = 1
if nperseg is None: nperseg = 256
if noverlap is None: noverlap = nperseg // 8
if figsize is None: figsize = (30, 6)
if isinstance(vis_frac, int) or isinstance(vis_frac, float): vis_frac = [0, vis_frac]
# applying spectrogram from scipy.signal.spectrogram
f, t, sxx = spect(x = ts_sr.values, fs = fs, window = win, nperseg = nperseg, noverlap = noverlap, mode = mode)
# f = ['%.6f'%i for i in f]
# properly spreading the time stamps
t = [ts_sr.index[i] for i in (ts_sr.shape[0] / t[-1] * t).astype(int) - 1]
# creating dataframe of spectrogram
sxx = pd.DataFrame(sxx, columns = t, index = f)
sxx.index.name = 'frequency'
sxx.columns.name = 'Time'
if show or save:
# plotting the spectrogram
fig, ax = plt.subplots(figsize = figsize)
fig_title = 'Spectrogram of %s'%ts_sr.name if len(ts_sr.name) > 0 else 'Spectrogram'
fig.suptitle(fig_title, y = 1.04, fontsize = 15, fontweight = 'bold')
heatdata = sxx.loc[sxx.index[int(vis_frac[0] * sxx.shape[0])] : sxx.index[int(vis_frac[1] * sxx.shape[0]) - 1]].sort_index(ascending = False)
sns.heatmap(data = heatdata, cbar = True, ax = ax)
fig.tight_layout()
if show: plt.show()
if save and savepath is not None:
os.makedirs(savepath, exist_ok=True)
fname = fig_title if fname is None else fname
fig.savefig('%s/%s.jpg'%(savepath, fname.replace(' ', '_')), bbox_inches = 'tight')
plt.close()
# returning the spectrogram
if ret_sxx: return sxx
def invalid_bins(msg):
raise msdExceptions.InvalidBins(msg)
def grouped_mode(data, bins = None, neglect_values=[], neglect_above=None, neglect_bellow=None, neglect_quan=0):
"""
The purpose of this function is to calculate mode (mode of mean-median-mode) value
Inputs:
:data: pandas Series, list, numpy ndarray - must be 1-D
:bins: int, list or ndarray, indicates bins to be tried to calculate mode value. Default is None
:neglect_values: list, ndarray, the values inside the list will be removed from data. Default is []
:neglect_above: float, values above this will be removed from the data. Default is None
:neglect_beloow: float, values bellow this will be removed from the data. Default is None
:neglect_quan: 0 < float < 1 , percentile range which will be removed from both sides from data distribution. Default is 0
Outputs:
mode for the time series data
"""
if not isinstance(data, pd.Series): data = pd.Series(data)
if neglect_above is not None: data = data[data <= neglect_above]
if neglect_bellow is not None: data = data[data >= neglect_bellow]
if len(neglect_values) > 0: data.replace(to_replace = neglect_values, value = np.nan, inplace = True)
data.dropna(inplace = True)
if neglect_quan != 0: data = data[(data >= data.quantile(q = neglect_quan)) & (data <= data.quantile(q = 1 - neglect_quan))]
if isinstance(bins, type(None)): bins = np.arange(10, 101, 10)
elif isinstance(bins, int): bins = [bins]
elif isinstance(bins, list) or isinstance(bins, np.ndarray): pass
else: invalid_bins('Provided bins are not valid! Please check requirement for bins.')
_mode = []
for _bins in bins:
mode_group = pd.cut(data, bins = _bins).value_counts()
max_val = mode_group.values[0]
if max_val == 1: break
mode_group = mode_group[mode_group == max_val]
_mode.append(np.mean([i.mid for i in mode_group.index]))
return np.mean(_mode)
def get_edges_from_ts(sr, th_method='median', th_factor=.5, th=None, del_side='up', name=None):
"""
The purpose of this function is to find the edges specifically indices of start and end
of certain regions by thresholding the desired time series data.
Inputs:
:sr: pandas Series, time series data with proper timestamp as indices of the series. Index timestamps must be timedelta type values.
Index timestamps must be sorted from old to new time (ascending order).
:th_method: {'mode', 'median', 'mean'}, method of calculating threshold, Default is 'median'
:th_factor: flaot/int, multiplication factor to be multiplied with th_method value to calculate threshold. Default is .5
:th: int/flaot, threshold value inserted as argument. Default is None
:del_side: {'up', 'down'}, indicating which side to be removed to get edges. Default is 'up'
:name: name of the events, default is None
Note: the algorithm starts recording when it exceeds the threshold value, so open interval system.
Outputs:
:edges: pandas DataFrame, contains columns 'start', 'end', 'duration', 'interval' etc.
related to the specific events found after thresholding
"""
if th is None:
if th_method == 'median': th = sr.median() * th_factor
elif th_method == 'mean': th = sr.mean() * th_factor
state = False # keeps the present state, True if its within desired edges
edges = [[], []] # keeps the edges, start and stop
if del_side == 'up':
for i in range(sr.shape[0]):
if sr.iloc[i] < th and not state:
state = True
edges[0].append(sr.index[i])
elif sr.iloc[i] >= th and state:
state = False
edges[1].append(sr.index[i - 1])
elif del_side == 'down':
for i in range(sr.shape[0]):
if sr.iloc[i] > th and not state:
state = True
edges[0].append(sr.index[i])
elif sr.iloc[i] <= th and state:
state = False
edges[1].append(sr.index[i - 1])
if state:
edges[1].append(sr.index[-1])
edges = pd.DataFrame(edges, index = ['start', 'stop']).T.reset_index(drop = True)
edges['duration'] = edges['stop'] - edges['start']
edges['interval'] = np.nan
edges['interval'].iloc[1:] = [j - i for i,j in zip(edges['stop'].iloc[:-1], edges['start'].iloc[1:])]
if name is None:
if sr.name is not None:
edges.columns.name = '%s_edges'%sr.name
else:
edges.columns.name = 'edges'
else:
edges.columns.name = name
return edges
def each_row_max(data):
"""
The purpose of this function is to get the maximum values and corresponding column names for each row of a matrix
Inputs:
:data: list of lists/numpy ndarray or pandas dataframe, matrix data from where max values will be calculated
Outputs:
returns same data with two new columns with max values and corresponding column names
"""
if isinstance(data, np.ndarray) or isinstance(data, list): data = pd.DataFrame(data)
elif isinstance(data, pd.DataFrame): pass
else: msdExceptions.InputVariableError('input data type must be list of lists or pandas dataframe or numpy ndarray!')
if data.isnull().all().all():
data['max_val'] = np.nan
data['max_col'] = np.nan
else:
col = data.columns
row = data.index
data = data.values
max_idx = np.nanargmax(data, axis=1)
max_val = [data[i, max_idx[i]] for i in range(data.shape[0])]
max_col = col[max_idx]
data = pd.DataFrame(data, index = row, columns = col)
data['max_val'] = max_val
data['max_col'] = max_col
return data
def moving_slope(df, fs=None, win=60, take_abs=False, nan_valid=True):
"""
The purpose of this function is to calculate the slope inside a window for a variable in a rolling method
Inputs
:df: pandas DataFrame or Series, contains time series columns
:fs: str, the base sampling period of the time series, default is the mode value of the difference in time between two consecutive samples
:win: int, window length, deafult is 60
:take_abs: bool, indicates whether to take the absolute value of the slope or not. Default is False\n
returns the pandas DataFrame containing resultant windowed slope values. Default is True
Outputs:
:new_df: pandas DataFrame, new dataframe with calculated rolling slope
"""
# checking and resampling by the sampling frequency
isdf = True
if isinstance(df, pd.Series):
isdf = False
df = df.to_frame()
df = df.resample(rule = fs, closed = 'right', label = 'right').last()
# creating index matrix for the window
x = np.arange(1, win + 1)[:, np.newaxis].astype(float)
xmat = np.tile(x, (1, df.shape[1]))
# loop initialization
dfidx = df.index
new_df = pd.DataFrame(np.ones(df.shape) * np.nan, columns = ['%s_slope'%i for i in df.columns])
perc1 = int(df.shape[0] * .01)
cnt = perc1
print('\r 0 percent completed...', end = '')
for i in range(win, df.shape[0] + 1):
if i == cnt:
cnt += perc1
print('\r%3d percent completed...'%(cnt // perc1), end = '')
y = df.iloc[i - win : i, :].values
if nan_valid: xy_bar = np.nanmean(np.multiply(xmat, y), axis = 0)
else: xy_bar = np.mean(np.multiply(xmat, y), axis = 0)
xnan = xmat.copy()
xnan[np.isnan(y)] = np.nan
if nan_valid: x_bar = np.nanmean(xnan, axis = 0)
else: x_bar = np.mean(xnan, axis = 0)
if sum(~np.isnan(x_bar)) != 0:
if nan_valid: xsq_bar = np.nanmean(xnan ** 2, axis = 0)
else: xsq_bar = np.mean(xnan ** 2, axis = 0)
x_barsq = x_bar ** 2
# calculating slope
den = x_barsq - xsq_bar
den[den == 0] = np.nan
if nan_valid: m = (x_bar * np.nanmean(y, axis = 0) - xy_bar) / den
else: m = (x_bar * np.nanmean(y, axis = 0) - xy_bar) / den
new_df.loc[i - 1] = abs(m) if take_abs else m
print('\r100 percent completed... !!')
new_df.index = dfidx
if not isdf: new_df = new_df.iloc[:, 0]
return new_df
def standardize(data, zero_std = 1):
"""
This function applies z-standardization on the data
Inputs:
:data: pandas series, dataframe, list or numpy ndarray, input data to be standardized
:zero_std: float, value used to replace std values in case std is 0 for any column. Default is 1
Outputs:
standardized data
"""
dtype = 0
if isinstance(data, pd.Series):
data = data.to_Frame()
dtype = 1
elif isinstance(data, list) or isinstance(data, np.ndarray):
data = pd.DataFrame(data)
dtype = 2
elif isinstance(data, pd.DataFrame): pass
else: raise ValueError('Provided data is inappropriate! ')
data_std = data.std()
data_std[data_std == 0] = zero_std
data = (data - data.mean()) / data_std
if dtype == 0: return data
elif dtype == 1: return data[data.columns[0]]
else : return data.values
def normalize(data, zero_range = 1, method = 'min_max_0_1'):
"""
The purpose of this function is to apply normalization of the input data
Inputs:
:data: pandas series, dataframe, list or numpy ndarray, input data to be standardized
:zero_range: float, value used to replace range values in case range is 0 for any column. Default is 1
:method: {'zero_mean', 'min_max_0_1', 'min_max_-1_1'}.\n
'zero_mean' : normalizes the data in a way that makes the data mean as 0\n
'min_max_0_1' : normalizes the data in a way that the data becomes confined within 0 and 1\n
'min_max_-1_1' : normalizes the data in a way that the data becomes confined within -1 and 1\n
Default is 'min_max_0_1'
Outputs:
Normalized data
"""
dtype = 0
if isinstance(data, pd.Series):
data = data.to_Frame()
dtype = 1
elif isinstance(data, list) or isinstance(data, np.ndarray):
data = pd.DataFrame(data)
dtype = 2
elif isinstance(data, pd.DataFrame): pass
else: raise ValueError('Provided data is inappropriate! ')
data_range = data.max() - data.min()
data_range[data_range == 0] = zero_range
if method == 'min_max_0_1': data = (data - data.min()) / data_range
elif method == 'min_max_-1_1': data = (data - data.min()) / data_range * 2 - 1
elif method == 'zero_mean': data = (data - data.mean()) / data_range
if dtype == 0: return data
elif dtype == 1: return data[data.columns[0]]
else : return data.values
# the function bellow will divide a string into pieces based on a threshold of lenngth for each line, specially helps for labeling the plot axis names
def word_length_error(msg):
raise msdExceptions.WordLengthError(msg)
# string : str, the input string
# maxlen : int, maximum allowable length for each line
def name_separation(string, maxlen):
strlen = len(string)
if strlen > maxlen:
words = string.split(' ')
wlen = [len(i) for i in words]
if max(wlen) > maxlen:
word_length_error('One or more words have greater length than the maximum allowable length for each line !!')
newstr = ''
totlen = 0
for i in range(len(wlen)):
if totlen + wlen[i] + 1 > maxlen:
totlen = wlen[i]
newstr += '\n%s'%words[i]
else:
if i == 0:
totlen += wlen[i]
newstr += '%s'%words[i]
else:
totlen += wlen[i] + 1
newstr += ' %s'%words[i]
else: newstr = string
return newstr
def input_variable_error(msg):
raise msdExceptions.InputVariableError(msg)
def plot_time_series(same_srs, srs=[], segs=None, same_srs_width=[], spans=[], lines=[], linestyle=[], linewidth=[],
fig_title='', show=True, save=False, savepath=None, fname='', spine_dist=.035, spalpha=[],
ylims=[], name_thres=50, fig_x=30, fig_y=4, marker=None, xlabel='Time', ylabel='Data value', xrot=0, x_ha='center', axobj=None):
"""
This function generates plots for time series data along with much additional information if provided as argument.
This function provides many flexibilities such as dividing a time series into multiple subplots to visualize easily.
It can plot multiple time series with proper legends, backgrounds and grids. It can also plot span plots, veritcal lines, each with separate legends.
It allows multiple axes for multiple time series data with separate value ranges.
Inputs:
:same_srs: list of pandas series holding the variables which share same x-axis in matplotlib plot
:srs: list of pandas series holding the variables which share different x axes, default is []
:segs: int or pandas dataframe with two columns 'start' and 'stop' indicating the start and stop of each axis plot segment (subplot).\n
Providing int will split the time series signals into that number of segments and will show as separate row subplot.\n
Default is None
:same_srs_width: list of flaots indicating each line width of corresponding same_srs time series data,\n
must be same size as length of same_srs, default is []
:spans: list of pandas dataframe indicating the 'start' and 'stop' of the span plotting elements, default is []
:lines: list of pandas series where the values will be datetime of each line position, keys will be just serials, default is []
:linestyle: list of str, marker for each line, '' indicates continuous straight line, default is []
:linewidth: list of constant, line width for each line, default is []
:fig_title: title of the entire figure, default is ''
:show: bool, indicating whether to show the figure or not, default is True
:save: bool, indicating whether to save the figure or not, default is False
:savepath: str, location of the directory where the figure will be saved, default is None
:fname: str, figure name when the figure is saved, default is ''
:spine_dist: constant indicating distance of the right side spines from one another if any, default is 0.035
:spalpha: list of constants indicating alpha values for each of the span in spans, default is []
:ylims: list of lists, indicating y axis limits in case we want to keep the limit same for all subplots, default is []
:name_thres: int, maximum allowed characters in one line for plot labels, default is 50
:fig_x: float, horizontal length of the figure, default is 30
:fig_y: float, vertical length of each row of plot, default is 3
:marker: str, marker for time series plots, default is None
:xlabel: str, label name for x axis for each row, default is 'Time'
:ylabel: str, label name for y axis for each row, default is 'Data value'
:xrot: float, rotation angle of x-axis tick values, default is 0
:x_ha: horizontal alignment of x axis tick values. It can be {'left', 'right', 'center'}. By default, it is 'center'.
:axobj: matplotlib axes object, to draw time series on this axes object plot. Default is None.\n
To use this option, segs must be 1 or None or dataframe with 1 row.
Outputs:
:axobj: Returns matplotlib axes object is axobj is provided in input.\n
Otherwise, this function shows or stores plots, doesnt return anything
"""
totsame = len(same_srs)
totdif = len(srs)
if totsame == 0:
same_srs = [srs[0]]
del srs[0]
totsame = 1
totdif -= 1
if len(same_srs_width) == 0: same_srs_width = [.7 + i * .02 for i in range(totsame)]
elif len(same_srs_width) < totsame: same_srs_width += [.7 + i * .02 for i in range(totsame - len(same_srs_width))]
if isinstance(segs, pd.DataFrame): pass
elif isinstance(segs, int):
tchunk = (same_srs[0].index[-1] - same_srs[0].index[0]) / segs
trange = [same_srs[0].index[0] + tchunk * i for i in range(segs)] + [same_srs[0].index[-1]]
segs = pd.DataFrame([trange[:-1], trange[1:]], index = ['start', 'stop']).T
else: segs = pd.DataFrame([[same_srs[0].index[0], same_srs[0].index[-1]]], columns = ['start', 'stop'])
segs.reset_index(drop = True, inplace = True)
totsp = len(spans)
totline = len(lines)
if isinstance(linestyle, list):
if totline > 0:
if len(linestyle) < totline: linestyle += ['-' for _ in range(totline - len(linestyle))]
elif isinstance(linestyle, str): linestyle = [linestyle for _ in range(totline)]
else: input_variable_error('Invalid line style! linestyle should be str or list of str with equal length of lines')
if isinstance(linewidth, list):
if totline > 0:
if len(linewidth) < totline: linewidth = [1 for _ in range(totline - len(linewidth))]
elif isinstance(linewidth, int) or isinstance(linewidth, float): linewidth = [linewidth for _ in range(totline)]
else: input_variable_error('Invalid line width! linewidth should be int/float or list of int/float with equal length of lines')
nrows = segs.shape[0]
# colors = ['darkcyan', 'coral', 'darkslateblue', 'limegreen', 'crimson', 'purple', 'blue', 'khaki', 'chocolate', 'forestgreen']
colors = get_named_colors()
spcolors = ['darkcyan', 'coral', 'purple', 'red', 'khaki', 'gray', 'darkslateblue', 'limegreen', 'red', 'blue']
lcolors = ['crimson', 'darkcyan' , 'darkolivegreen', 'palevioletred', 'indigo', 'chokolate', 'blue', 'forestgreen', 'grey', 'magenta']
if totsame + totdif > len(colors): colors = get_color_from_cmap(totsame + totdif, 'rainbow')
if totsp > len(spcolors): colors = get_color_from_cmap(totsp, 'rainbow')
if totline > len(lcolors): colors = get_color_from_cmap(totline, 'rainbow')
if spalpha == []: spalpha = [.3 for _ in range(totsp)]
stamp_fl = True if isinstance(same_srs[0].index[0], pd.Timestamp) else False
if fig_title == '': fig_title = 'Time Series Visualization'
if axobj is None:
fig, ax = plt.subplots(figsize = (fig_x, fig_y * nrows), nrows = nrows)
fig.suptitle(fig_title, y = 1.03, fontsize = 20, fontweight = 'bold')
else:
ax = axobj
if nrows == 1: ax = [ax]
for i in range(nrows):
lg = [[], []]
st = segs['start'].iloc[i]
ed = segs['stop'].iloc[i]
# line plots
for j in range(totline):
ln = lines[j][(lines[j] >= st) & (lines[j] <= ed)]
if ln.shape[0] > 0:
for k in range(ln.shape[0]):
l = ax[i].axvline(ln.iloc[k], alpha = 1, color = lcolors[j], lw = linewidth[j], ls = linestyle[j])
lg[0].append(l)
lg[1].append(lines[j].name)
# time series plots
for j in range(totdif):
tx = ax[i].twinx()
l, = tx.plot(srs[j].loc[st : ed], color = colors[totsame + j], zorder = 30 + j * 4, marker = marker)
lg[0].append(l)
lg[1].append(srs[j].name)
tx.spines['right'].set_position(('axes', 1 + j * spine_dist))
tx.spines['right'].set_color(colors[totsame + j])
tx.tick_params(axis = 'y', labelcolor = colors[totsame + j])
tx.set_ylabel(name_separation(srs[j].name, name_thres), color = colors[totsame + j])
tx.grid(False)
if len(ylims) > 1:
if len(ylims[1 + j]) == 2: tx.set_ylim(ylims[1 + j][0], ylims[1 + j][1])
for j in range(totsame):
l, = ax[i].plot(same_srs[j].loc[st : ed], color = colors[j], zorder = j * 4, lw = same_srs_width[j], marker = marker)
lg[0].append(l)
lg[1].append(same_srs[j].name)
# span plots
lgfl = [False for _ in range(totsp)]
for j in range(totsp):
for k in range(spans[j].shape[0]):
spst = spans[j]['start'].iloc[k]
sped = spans[j]['stop'].iloc[k]
if spst < st and (sped > st and sped <= ed):
l = ax[i].axvspan(st, sped, color = spcolors[j], alpha = spalpha[j])
if not lgfl[j]:
lgfl[j] = True
elif spst >= st and sped <= ed:
l = ax[i].axvspan(spst, sped, color = spcolors[j], alpha = spalpha[j])
if not lgfl[j]:
lgfl[j] = True
elif (spst >= st and spst < ed) and sped > ed:
l = ax[i].axvspan(spst, ed, color = spcolors[j], alpha = spalpha[j])
if not lgfl[j]:
lgfl[j] = True
elif spst <= st and sped >= ed:
l = ax[i].axvspan(st, ed, color = spcolors[j], alpha = spalpha[j])
if not lgfl[j]:
lgfl[j] = True
if lgfl[j]:
lg[0].append(l)
lg[1].append(spans[j].columns.name)
# finishing axis arrangements
ax[i].set_xlabel(xlabel)
ax[i].set_ylabel(ylabel if totsame > 1 else name_separation(same_srs[0].name, name_thres))
if stamp_fl: ax[i].set_title('from %s to %s'%(st.strftime('%Y-%m-%d %H-%M-%S'), ed.strftime('%Y-%m-%d %H-%M-%S')), loc = 'right')
ax[i].legend(lg[0], lg[1], loc = 3, bbox_to_anchor = (0, .98), ncol = len(lg[0]), fontsize = 10)
ax[i].set_xlim(st, ed)
ax[i].grid(True)
plt.setp(ax[i].get_xticklabels(), rotation=xrot, ha=x_ha)
if len(ylims) > 0:
if len(ylims[0]) == 2: ax[i].set_ylim(ylims[0][0], ylims[0][1])
if axobj is None:
fig.tight_layout()
if show: plt.show()
if save and savepath is not None:
os.makedirs(savepath, exist_ok=True)
if fname == '': fname = fig_title.replace('.', '')
fig.savefig('%s/%s.jpg'%(savepath, fname), bbox_inches = 'tight')
plt.close()
else:
return ax[0]
def plot_heatmap(data, keep='both', rem_diag=False, cmap='gist_heat', cbar=True, stdz=False, annotate=False, fmt=None, vmin=None, vmax=None, center=None,
show=True, save=False, savepath=None, figsize=(30, 10), fig_title='', file_name='', xrot=90, axobj=None):
"""
This function draws table like heatmap for a matrix data (should be pandas DataFrame)
Inputs:
:data: pandas DataFrame, data to be plotted as heatmap
:stdz: bool, whether to standardize the data or not, default is False
:keep: {'both', 'up', 'down'}, which side of the heatmap matrix to plot, necessary for correlation heatmap plot, default is 'both'
:rem_diag: bool, whether to remove diagoanl if keep_only is not 'both', default is False
:cmap: str, matplotlib colormap, default is 'gist_heat'
:cbar: bool, show the colorbar with the heatmap or not
:annotate: bool, whether to show the values or not
:fmt: str, value format for printing if annotate is True, default is None
:show: bool, show the heatmap or not, default is True
:save: bool, save the figure or not, default is False
:savepath: str, path for saving the figure, default is None
:figsize: tuple, figure size, default is (30, 10)_
:fig_title: str, title of the heatmap, default is 'Heatmap of {data.columns.name}'
:file_name: str, name of the image as will be saved in savepath, default is fig_title
:xrot: float, rotation angle of x-axis labels, default is 0
:axobj: matplotlib axes object, to draw time series on this axes object plot. Default is None
Outputs:
:ax: if axobj is not None and axobj is provided as matplotlib axes object, then this function returns the axes object after drawing
"""
ylb = data.index.name
xlb = data.columns.name
if stdz:
data_std = data.std()
data_std[data_std == 0] = 1
for c in data.columns:
if data[c].replace([0, 1], np.nan).dropna().shape[0] == 0:
data_std.loc[c] = 1
data = (data - data.mean()) / data_std
if keep == 'up':
k = 1 if rem_diag else 0
data = data.where(np.triu(np.ones(data.shape), k = k).astype(bool))
elif keep == 'down':
k = -1 if rem_diag else 0
data = data.where(np.tril(np.ones(data.shape), k = k).astype(bool))
if axobj is None: fig, ax = plt.subplots(figsize = figsize)
else: ax = axobj
sns.heatmap(data, ax = ax, linewidths = 0, cbar = cbar, cmap = cmap, annot = annotate, fmt = fmt, center = center, vmin = vmin, vmax = vmax)
ax.set_xlabel(xlb)
ax.set_ylabel(ylb)
ax.tick_params(axis = 'y', rotation = 0)
ax.tick_params(axis = 'x', rotation = xrot)
if fig_title == '': fig_title = 'Heatmap of %s'%data.columns.name if data.columns.name not in ['', None] else 'Heatmap'
ax.set_title(fig_title)
if axobj is None:
fig.tight_layout()
if show: plt.show()
if save and savepath is not None:
os.makedirs(savepath, exist_ok=True)
if file_name == '': file_name = fig_title
fig.savefig('%s/%s.jpg'%(savepath, file_name), bbox_inches = 'tight')
plt.close()
else:
return ax
def data_gridplot(data, idf=[], idf_pref='', idf_suff='', diag='hist', bins=25, figsize=(16, 12), alpha=.7,
s=None, lg_font='x-small', lg_loc=1, fig_title='', show_corr=True, show_stat=True, show=True,
save=False, savepath=None, fname='', cmap=None):
"""
This function creates a grid on nxn subplots showing scatter plot for each columns of 'data', n being the number of columns in 'data'.
This function also shows histogram/kde/line plot for each columns along grid diagonal.
The additional advantage is that we can separate each subplot data by a classifier item 'idf' which shows the class names for each row of data.
This functionality is specially helpful to understand class distribution of the data.
It also shows the correlation values (non-diagonal subplots) and general statistics (mean, median etc in diagonal subplots) for each subplot.
Inputs:
:data: pandas dataframe, list or numpy ndarray of rank-2, columns are considered as features
:idf: pandas series with length equal to total number of samples in the data, works as clussifier of each sample data,\n
specially useful in clustering, default is []
:idf_pref: str, idf prefix, default is ''
:idf_suff: str, idf suffix, default is ''
:diag: {'hish', 'kde', 'plot'}, selection of diagonal plot, default is 'hist'
:bins: int, number of bins for histogram plot along diagonal, default is 25
:figsize: tuple, size of the whole figure, default is (16, 12)
:alpha: float (0~1), transparency parameter for scatter plot, deafult is .7
:s: float, point size for scatter plot, deafult is None
:lg_font: float or {'xx-small', 'x-small', 'small', 'medium', 'large', 'x-large', 'xx-large'}, legend font size, default is 'x-small'
:lg_loc: int, location parameter for plot legend, deafult is 1
:fig_title: str, titile of the whole figure, default is 'All Columns Grid Plot'
:show_corr: bool, whether to show correlation value for each scatter plot, default is True
:show_stat: bool, whether to show mean and std for each columns od data along diagonal plots, default is True
:show: bool, whether to show the figure or not, default is True
:save: bool, whether to save the graph or not, default is False
:savepath: str, path to store the graph, default is None
:fname: str, name used to store the graph in savepath, defualt is fig_title
:cmap: str, matplotlib color map, default is None ('jet')
Outputs:
This function doesnt return anything.
"""
if isinstance(idf, list) or isinstance(idf, np.ndarray): idf = pd.Series(idf, dtype = float)
if isinstance(data, pd.Series): data = data.to_frame()
elif isinstance(data, list) or isinstance(data, np.ndarray): data = pd.DataFrame(data, columns = ['col_%d'%i for i in range(len(data))])
elif isinstance(data, pd.DataFrame): pass
else: print('invalid data passed inside the function!')
if data.shape[1] <= 1: raise ValueError('Data should include at least 2 columns!')
if idf.shape[0] == 0: idf = pd.Series(np.zeros(data.shape[0]), index = data.index)
colors = ['darkcyan', 'coral', 'limegreen', 'saddlebrown', 'grey', 'darkgoldenrod', 'forestgreen', 'purple',
'crimson', 'cornflowerblue', 'darkslateblue', 'lightseagreen', 'darkkhaki', 'maroon', 'magenta', 'k']
if data.shape[1] > len(colors) and cmap is None: cmap = 'jet'
if cmap is not None: colors = get_color_from_cmap(data.shape[1], cmap = cmap)
fig, ax = plt.subplots(figsize = figsize, nrows = data.shape[1], ncols = data.shape[1])
if fig_title == '': fig_title = 'All Columns Grid Plot'
fig.suptitle(fig_title, fontweight = 'bold', y = 1.03)
col = data.columns
idfs = sorted(idf.unique())
idfslen = len(idfs)
for i in range(data.shape[1]):
for j in range(data.shape[1]):
coldata = data[col[[i, j]]].dropna()
for k in range(idfslen):
idx = idf[idf == idfs[k]].index.intersection(coldata.index)
if idx.shape[0] > 0:
color = colors[j] if idfslen == 1 else colors[k]
if i == j:
kdy = plot_diag(coldata.iloc[:, 0].loc[idx], bins, ax[i, j], diag, color, '%s%s%s'%(idf_pref, idfs[k], idf_suff))
if k == 0: txt = 'mean: %.3f, std: %.3f'%(np.nanmean(data[col[i]]), np.nanstd(data[col[i]])) if show_stat else ''
if diag == 'plot':
txt_x = coldata.index.min()
txt_y = coldata.iloc[:, 0].min()
elif diag == 'hist':
txt_x = coldata.iloc[:, 0].min()
txt_y = .01
elif diag == 'kde':
txt_x = coldata.iloc[:, 0].min()
txt_y = kdy
else:
if k == 0: txt = 'corr: %.3f'%coldata.corr().values[0, 1] if show_corr else ''
ax[i, j].scatter(coldata[col[j]].loc[idx].values, coldata[col[i]].loc[idx].values, color = color, alpha = alpha, s = s, label = '%s%s%s'%(idf_pref, idfs[k], idf_suff))
txt_x = coldata[col[j]].min()
txt_y = coldata[col[i]].min()
if idfslen > 1: ax[i, j].legend(fontsize = lg_font, loc = lg_loc)
ax[i, j].text(txt_x, txt_y, s = txt, ha = 'left', va = 'bottom')
if i == 0: ax[i, j].set_title(col[j])
if j == 0: ax[i, j].set_ylabel(col[i])
ax[i, j].grid(True)
fig.tight_layout()
if show: plt.show()
if save and savepath != '':
os.makedirs(savepath, exist_ok=True)
if fname == '': fname = fig_title
fig.savefig('%s/%s.jpg'%(savepath, fname), bbox_inches = 'tight')
plt.close()
# associative function for data_gridplot()
def plot_diag(arr, bins, ax, diag, color, label):
if diag == 'hist': ax.hist(arr.values, bins = bins, color = color, label = label)
elif diag == 'kde':
x = np.linspace(arr.min(), arr.max(), 500)
ax.plot(x, gaussian_kde(arr.values)(x), color = color, label = label)
return np.min(gaussian_kde(arr.values)(x))
elif diag == 'plot': ax.plot(arr.index, arr.values, color = color, label = label)
else: print('Invalid diagonal plot type!')
return None
def get_color_from_cmap(n, cmap='jet', rng=[.05, .95]):
"""
this function gets color from matplotlib colormap
Inputs:
:n: int, number of colors you want to create
:cmap: str, matplotlib colormap name, default is 'jet'
:rng: array, list or tuple of length 2, edges of colormap, default is [.05, .95]
Outputs:
It returns a list of colors from the selected colormap
"""
return [get_cmap(cmap)(i) for i in np.linspace(rng[0], rng[1], n)]
def get_named_colors():
"""
This function returns 36 custom selected CSS4 named colors available in matplotlib
Outputs:
list of colors/color_names available in matplotlib
"""
colors = ['darkcyan', 'crimson', 'coral', 'forestgreen', 'purple', 'magenta', 'khaki', 'maroon',
'darkslategray', 'brown', 'gray', 'darkorange', 'mediumseagreen', 'royalblue', 'midnightblue',
'turquoise', 'rosybrown', 'darkgoldenrod', 'olive', 'saddlebrown', 'darkseagreen', 'deeppink',
'aqua', 'lawngreen', 'tab:red', 'gold', 'tan', 'peru', 'violet', 'thistle', 'steelblue',
'darkkhaki', 'chocolate', 'mediumspringgreen', 'rebeccapurple', 'tomato']
return colors
def plot_table(_data, cell_width=2.5, cell_height=0.625, font_size=14, header_color='#003333', row_colors=['whitesmoke', 'w'],
edge_color='w', bbox=[0, 0, 1, 1], header_cols=0, index_cols=0, fig_name='Table', save=False, show=True, savepath=None, axobj=None,
**kwargs):
"""
This function creates figure for a pandas dataframe table. It drops the dataframe index at first.
So, if index is necessary to show, keep it as a dataframe column.
Inputs:
:_data: pandas dataframe, which will be used to make table and save as a matplotlib figure. Index is removed in the begining.\n
So, if index is necessary to show, keep it as a dataframe column.
:cell_width: float, each cell width, default is 2.5
:cell_height: float, each cell height, default is 0.625
:font_size: float, font size for table values, default is 14
:header_color: str or rgb color code, column and index color, default is '#003333'
:row_colors: str or rgb color code, alternating colors to separate consecutive rows, default is ['whitesmoke', 'w']
:edge_color: str or rgb color code, cell border color, default is 'w'
:bbox: list of four floats, edges of the table to cover figure, default is [0,0,1,1]
:header_cols: int, number of initial rows to cover column names, default is 0
:index_cols: int, number of columns to cover index of dataframe, default is 0
:fig_name: str, figure name to save the figure when save is True, default is 'Table'
:save: bool, whether to save the figure, default is False
:show: bool, whether to show the plot, default is True
:savepath: path to store the figure, default is current directory from where the code is being run ('.')
:ax: matplotlib axis to include table in another figure, default is None
Outputs:
:ax: matplotlib axes object used for plotting, if provided as axobj
"""
data = _data.reset_index(drop=False)
if axobj is None:
size = (np.array(data.shape[::-1]) + np.array([0, 1])) * np.array([cell_width, cell_height])
fig, ax = plt.subplots(figsize=size)
ax.axis('off')
else:
ax = axobj
mpl_table = ax.table(cellText=data.values, bbox=bbox, colLabels=data.columns, **kwargs)
mpl_table.auto_set_font_size(False)
mpl_table.set_fontsize(font_size)
for rc, cell in mpl_table._cells.items():
cell.set_edgecolor(edge_color)
if rc[0] == 0 or rc[1] < header_cols or rc[1] == 0 or rc[0] < index_cols:
cell.set_text_props(weight='bold', color='w')
cell.set_facecolor(header_color)
else:
cell.set_facecolor(row_colors[rc[0]%len(row_colors) ])
if axobj is None:
fig.tight_layout()
if save and savepath is not None:
os.makedirs(savepath, exist_ok=True)
fig.savefig('%s/%s.png'%(savepath, fig_name), bbox_inches='tight')
if show: plt.show()
else: plt.close()
else:
return ax
def feature_evaluator(data, label_name, label_type_num, n_bin=40, is_all_num=False, is_all_cat=False, num_cols=[],
cat_cols=[], cmap='gist_rainbow', fig_width=30, fig_height=5, rotation=40, show=True, save=False,
savepath=None, fname=None, fig_title=''):
"""
This function calculates feature importance by statistical measures.
Here its okay to use labels before converting into one hot encoding, its even better not to convert into one hot.
The process is below-
--------- feature evaluation criteria --------
for numerical feature evaluation:\n
group- a variable is divided into groups based on a constant interval of its values\n
group diversity- std of mean of every group\n
group uncertainty- mean of std of points for every group\n
for categorical feature evaluation:\n
group diversity- mean of std of percentage of categories inside a feature\n
group uncertainty- mean of std of percentage of groups for each category\n
group confidence (Importance)- group diversity / group uncertainty
Inputs:
:data: must be a pandas dataframe containing feature and label data inside it\n
(it must contain only features and labels, no unnecessary columns are allowed)
:label_name: list; containing names of the columns used as labels
:label_type_num: list of bool, containing flag info if the labels are numerical or not (must be corresponding to label_name)
:n_bin: int, expressing number of divisions of the label values.\n
If any label is categorical, n_bin for that label will be equal to the number of categories.\n
Default is 40 for numerical labels.
:is_all_num: bool, is a simplification flag indicates whether all the features are numerical or not
:is_all_cat: bool, is a simplification flag indicates whether all the features are categorical or not
:num_cols: list, containing name of numerical columns if needed
:cat_cols: list, containing categorical feature names if needed
:cmap: str, is the matplotlib colormap name, default is 'gist_rainbow'
:fig_width: float, is the width of figure, default is 30
:fig_height: float, is the height of figure, default is 5
:rotation: float, rotational angle of x axis labels of the figure, default is 40
:show: bool, whether to show the graph or not, default is True
:save: bool, whether to save the figure or not, default is False
:savepath: str, path where the data will be saved, default is None
:fname: str, filename which will be used for saving the figure, default is None
:fig_title: str, title of the figure, default is 'Feature Confidence Evaluation for categorical Features'
Outputs:
:num_lab_confids: dict, contains confidence values for each label
"""
# separating numerical and categorical feature data
if is_all_num:
num_cols = data.drop(label_name, axis = 1).columns.to_list()
elif is_all_cat:
cat_cols = data.drop(label_name, axis = 1).columns.to_list()
else:
if len(cat_cols) > 0:
num_cols = list(data.drop(list(cat_cols) + label_name, axis = 1).columns)
elif len(num_cols) > 0:
cat_cols = list(data.drop(list(num_cols) + label_name, axis = 1).columns)
num_data = data[num_cols].copy()
cat_data = data[cat_cols].copy()
# numerical feature normalization (min_max_normalization)
num_data = (num_data - num_data.min()) / (num_data.max() - num_data.min())
# taking in numerical and categorical labels
label_name = np.array(label_name)
num_labels = data[label_name[label_type_num]].copy()
cat_labels = data[label_name[~np.array(label_type_num)]]
# n_bin arrangements
default_n_bin = 25
if n_bin is None:
n_bin = [ default_n_bin for i in label_type_num]
else:
n_bin = [ n_bin for i in label_type_num]
num_bin = np.array(n_bin)[label_type_num]
cat_bin = [cat_labels[i].unique().shape[0] for i in cat_labels.columns]
# loop definitions
labels_list = [num_labels, cat_labels]
labels_tag = ['Numerical label', 'Categorical label']
nbins = [num_bin, cat_bin]
fig_title = 'Feature Confidence Evaluation' if fig_title == '' else fig_title
# to collect all values of evaluation
num_lab_confids = {}
# loop for numerical and categorical labels
for l, (labels, nbin) in enumerate(zip(labels_list, nbins)):
num_lab_confids[labels_tag[l]] = {}
# plot definitions
nrows = labels.columns.shape[0]
if not is_all_cat and nrows > 0:
fig_num, ax_num = plt.subplots(figsize = (fig_width, fig_height * nrows), nrows = nrows)
fig_tnum = fig_title + ' for Numerical Features'
fig_num.suptitle(fig_tnum, y = 1.06, fontsize = 20, fontweight = 'bold')
if not is_all_num and nrows > 0:
fig_cat, ax_cat = plt.subplots(figsize = (fig_width, fig_height * nrows), nrows = nrows)
fig_tcat = fig_title + ' for Categorical Features'
fig_cat.suptitle(fig_tcat, y = 1.06, fontsize = 20, fontweight = 'bold')
# special case for nrows = 1
if nrows == 1:
if not is_all_cat:
ax_num = [ax_num]
if not is_all_num:
ax_cat = [ax_cat]
# loops for labels
for i, label in enumerate(labels.columns):
print('Evaluation for label : %s (%s) ....'%(label, labels_tag[l]), end = '')
# dividing the data set into classes depending on the label/target values
divs = pd.cut(labels[label], bins = nbin[i])
divs = divs.replace(to_replace = divs.unique().categories, value = np.arange(divs.unique().categories.shape[0]))
if not is_all_cat:
# ------------- calculation for numerical data ----------------------
# grouping features depending on the segments of the label
num_data['%s_segments'%label] = divs
group = num_data.groupby('%s_segments'%label)
# calculation of confidence of features and sorting in descending order
grp_confid = group.mean().std() / group.std().mean()
grp_confid = grp_confid.sort_values(ascending = False).to_frame().T
num_lab_confids[labels_tag[l]]['%s_numerics'%label] = grp_confid
# removing segment column
num_data.drop(['%s_segments'%label], axis = 1, inplace = True)
# plotting
sns.barplot(data = grp_confid, ax = ax_num[i], palette = cmap)
ax_num[i].set_xticklabels(labels = grp_confid.columns, rotation = rotation, ha = 'right')
ax_num[i].set_xlabel('Features', fontsize = 15)
ax_num[i].set_ylabel('Confidence Value', fontsize = 15)
ax_num[i].grid(axis = 'y', color = 'white', alpha = 1)
ax_num[i].set_title('Feature Confidence for label: %s'%label, fontsize = 15)
if not is_all_num:
# ------------- calculation for categorical data ----------------------
# adding label and category of label columns in the categorical data set
cat_data[label] = labels[label].copy()
cat_data['%s_segments'%label] = divs
# calculation of mean of std of percentage
cat_confid = pd.Series(index = cat_data.columns[:-2], dtype=np.float64)
for j, col in enumerate(cat_data.columns[:-2]):
temp = cat_data.groupby(['%s_segments'%label, col])[label].count().unstack(level = 0).fillna(0)
temp = temp / temp.sum()
cat_confid.loc[col] = temp.std().mean() * temp.T.std().mean()
# sorting confidence values according to descending order
cat_confid = cat_confid.sort_values(ascending = False).to_frame().T
num_lab_confids[labels_tag[l]]['%s_categs'%label] = cat_confid
# removing the label columns
cat_data.drop(['%s_segments'%label, label], axis = 1, inplace = True)
# plotting
sns.barplot(data = cat_confid, ax = ax_cat[i], palette = cmap)
ax_cat[i].set_xticklabels(labels = cat_confid.columns, rotation = rotation, ha = 'right')
ax_cat[i].set_xlabel('Features', fontsize = 15)
ax_cat[i].set_ylabel('Confidence Value', fontsize = 15)
ax_cat[i].grid(axis = 'y', color = 'white', alpha = 1)
ax_cat[i].set_title('Feature Confidence for label: %s'%label, fontsize = 15)
print(' done')
if not is_all_cat and nrows > 0:
fig_num.tight_layout()
if save and savepath is not None:
if fname is None: fname = fig_tnum
if savepath[-1] != '/': savepath += '/'
fig_num.savefig('%s%s.jpg'%(savepath, fname), bbox_inches = 'tight')
if not is_all_num and nrows > 0:
fig_cat.tight_layout()
if save and savepath is not None:
if fname is None: fname = fig_tcat
if savepath[-1] != '/': savepath += '/'
fig_cat.savefig('%s%s.jpg'%(savepath, fname), bbox_inches = 'tight')
if show: plt.show()
if nrows > 0: plt.close()
return num_lab_confids
def get_weighted_scores(score, y_test):
"""
This function calculates weighted average of score values we get from class_result function
Inputs:
:score: pandas DataFrame, contains classifiaction scores that we get from msd.class_result() function
:y_test: numpy array, pandas Series or python list, contains true classification labels
Outputs:
:_score: pandas DataFrame, output score DataFrame with an additional column containing weighted score values
"""
_score = score.copy()
counts = pd.Series(y_test).value_counts().sort_index().to_frame().T
_score['weighted_average'] = (_score.drop('accuracy').drop('average', axis=1).values * counts.values / counts.values.sum()).sum(axis=1).tolist()+[_score['average'].loc['accuracy']]
return _score
def class_result(y, pred, out_confus=False):
"""
This function computes classification result with confusion matrix
For classification result, it calculates precision, recall, f1_score, accuracy and specificity for each classes.
Inputs:
:y: numpy 1-d array, list or pandas Series, true classification labels, a vector holding class names/indices for each sample
:pred: numpy 1-d array, list or pandas Series, predicted values, a vector containing prediction opt for class indices similar to y.\n
Must be same length as y
:out_confus: bool, returns confusion matrix if set True. Default is False
Outputs:
:result: DataFrame containing precision, recall and f1 scores for all labels
:con_mat: DataFrame containing confusion matrix, depends on out_confus
"""
if not any([isinstance(y, np.ndarray), isinstance(y, pd.Series)]):
y = np.array(y)
if not any([isinstance(pred, np.ndarray), isinstance(pred, pd.Series)]):
pred = np.array(pred)
y = y.ravel()
pred = pred.ravel()
y_labels = set(np.unique(y))
p_labels = set(np.unique(pred))
labels = sorted(list(y_labels | p_labels))
con_mat = pd.DataFrame(np.zeros((len(labels), len(labels))).astype(int),
columns = labels, index = labels) #row--> true, col--> pred
con_mat.index.name = 'true'
con_mat.columns.name = 'prediction'
for lab_true in labels:
vy = set(np.where(y == lab_true)[0])
for lab_pred in labels:
vpred = set(np.where(pred == lab_pred)[0])
con_mat[lab_pred].loc[lab_true] = len(vy & vpred)
prec = []
recall = []
f_msr = []
acc = []
spec = []
sum_pred = con_mat.sum()
sum_true = con_mat.T.sum()
total = y.shape[0]
for label in labels:
tn = total - (sum_true.loc[label] + sum_pred.loc[label] - con_mat[label].loc[label])
fp = sum_pred.loc[label] - con_mat[label].loc[label]
if (tn+fp) != 0:
sp = tn / (tn + fp)
else:
sp = 0
spec.append(sp)
if sum_pred.loc[label] != 0:
pr = con_mat[label].loc[label] / sum_pred.loc[label]
else:
pr = 0
prec.append(pr)
if sum_true.loc[label] != 0:
rec = con_mat[label].loc[label] / sum_true.loc[label]
else:
rec = 0
recall.append(rec)
if pr + rec != 0:
f = 2 * pr * rec / (pr + rec)
else:
f = 0
f_msr.append(f)
acc.append(np.nan)
result = pd.DataFrame([spec, prec, recall, f_msr], columns = labels, index = ['specificity', 'precision', 'recall', 'f1_score'])
result.columns = [str(c) for c in result.columns]
avg = result.T.mean()
avg.name = 'average'
result = pd.concat((result, avg), axis = 1, sort = False)
result.columns.name = 'labels'
acc = pd.DataFrame([acc + [np.trace(con_mat.values) / y.shape[0]]], columns = result.columns, index = ['accuracy'])
result = pd.concat((result, acc), axis = 0, sort = False)
result = get_weighted_scores(result, y)
if out_confus:
return result, con_mat
else:
return result
def rsquare_rmse(y, pred):
"""
This function calculates R-square value (coefficient of determination) and root mean squared error value for regression evaluation.
Inputs:
:y: numpy 1-d array, list or pandas Series, contains true regression labels
:pred: numpy 1-d array, list or pandas Series, prediction values against y. Must be same size as y.
Outputs:
:r_sq: calculated R-square value (coefficient of determination)
:rmse: root mean square error value
"""
if not any([isinstance(y, np.ndarray), isinstance(y, pd.Series)]):
y = np.array(y)
if not any([isinstance(pred, np.ndarray), isinstance(pred, pd.Series)]):
pred = np.array(pred)
y = y.ravel()
pred = pred.ravel()
y = np.array(y)
pred = np.array(pred)
y_mean = np.mean(y)
r_sq = 1 - np.sum((pred - y) ** 2) / np.sum((y - y_mean) ** 2)
rmse = np.sqrt(np.mean((y - pred) ** 2))
return r_sq, rmse
def one_hot_encoding(arr, class_label=None, ret_label=False, out_type='ndarray'):
"""
This function converts categorical values into one hot encoded format.
Inputs:
:arr: numpy 1-d array, list or pandas Series, containing all class names/indices
:class_label: numpy array or list, list or pandas Series, containing only class labels inside 'arr'
:out_type: {'ndarray', 'dataframe', 'list'}, data type of the output
Outputs:
:label: one hot encoded output data, can be ndarray, list or pandas DataFrame based on 'out_type'
:class_label: names of class labels corresponding to each column of one hot encoded data.\n
It is provided only if ret_label is set as True.
"""
if isinstance(arr, pd.Series): pass
else: arr = | pd.Series(arr) | pandas.Series |
"""
Cluster ENCODE signal files using sliding windows
"""
import sys
import numpy as np
import glob
#read file
#provide source and automatically cluster the bed files
folder = sys.argv[1]
dataFiles = glob.glob(folder + '*.bed')
for dataFile in dataFiles:
#
# dataFile = '../../data/histones/hmec/ENCFF154XFN_H3K27ac.bed'
# dataFile = '../../data/histones/hmec/ENCFF065TIH_H3K4me3.bed'
# dataFile = '../../data/histones/hmec/ENCFF291WFP_H3K27me3.bed'
# dataFile = '../../data/histones/hmec/ENCFF336DDM_H3K4me1.bed'
# #dataFile = 'tf_experimentallyValidated.bed'
data = np.loadtxt(dataFile, dtype='object')
#sort the file for this to work
#map chr to numbers first
chrMap = {'chr1': 1, 'chr2' : 2, 'chr3' : 3, 'chr4' : 4, 'chr5' : 5, 'chr6' : 6, 'chr7' : 7,
'chr8' : 8, 'chr9' : 9, 'chr10' : 10, 'chr11' : 11, 'chr12' : 12, 'chr13' : 13,
'chr14' : 14, 'chr15' : 15, 'chr16' : 16, 'chr17' : 17, 'chr18' : 18, 'chr19' : 19,
'chr20' : 20, 'chr21' : 21, 'chr22' : 22, 'chrX' : 23, 'chrY' : 24}
mappedData = []
for mark in data:
if mark[0] not in chrMap:
continue
mappedChr = chrMap[mark[0]]
mappedData.append([mappedChr, int(mark[1]), int(mark[2]), mark[3], mark[4]])
mappedData = np.array(mappedData)
sortedData = mappedData[np.lexsort((mappedData[:,1], mappedData[:,0]))]
data = sortedData
import pandas as pd
df = | pd.DataFrame(mappedData, columns=['chr', 'start', 'end', '.', 'signal']) | pandas.DataFrame |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.