prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
# coding=utf-8
import unittest
import numpy as np
import pandas as pd
from clustermatch.utils.data import merge_sources
from .utils import get_data_file
class ReadTomateTest(unittest.TestCase):
def test_merge_sources_using_ps(self):
## Preparar
data_file = get_data_file('ps_2011_2012.csv')
## Correr
ps_pro = merge_sources(data_file)[0]
## Validar
assert ps_pro is not None
assert hasattr(ps_pro, 'shape')
assert ps_pro.shape[0] == 10
assert ps_pro.shape[1] == 13
assert ps_pro.notnull().all().all()
# arriba izquierda
assert ps_pro.round(3).loc['Arom-1', '552'] == 0.000
assert ps_pro.round(3).loc['Arom-1', '553'] == 0.000
assert ps_pro.round(3).loc['Arom-5', '552'] == 0.533
# arriba derecha
assert ps_pro.round(3).loc['Arom-1', 'Bigua'] == 0.111
assert ps_pro.round(3).loc['Arom-1', 'Elpida'] == 0.037
assert ps_pro.round(3).loc['Arom-5', 'Elpida'] == 0.296
# abajo derecha
assert ps_pro.round(3).loc['Jug-4', 'Bigua'] == 0.172
assert ps_pro.round(3).loc['Jug-4', 'Elpida'] == 0.586
assert ps_pro.round(3).loc['Jug-1', 'Elpida'] == 0.000
# abajo izquierda
assert ps_pro.round(3).loc['Jug-4', '553'] == 0.158
assert ps_pro.round(3).loc['Jug-4', '552'] == 0.533
assert ps_pro.round(3).loc['Jug-1', '552'] == 0.000
def test_merge_sources_using_vo(self):
## Preparar
data_file = get_data_file('vo_2011_2012.csv')
## Correr
vo_pro = merge_sources(data_file)[0]
## Validar
assert vo_pro is not None
assert hasattr(vo_pro, 'shape')
assert vo_pro.shape[0] == 42
assert vo_pro.shape[1] == 11
assert vo_pro.notnull().all().all()
# arriba izquierda
assert vo_pro.round(3).loc['UNK 43', '552'] == 5.12
assert vo_pro.round(3).loc['UNK 43', '553'] == 4.77
assert vo_pro.round(3).loc['3mBUTANAL', '552'] == 0.000
# arriba derecha
assert vo_pro.round(3).loc['UNK 43', 'Bigua'] == 2.43
assert vo_pro.round(3).loc['UNK 43', 'Elpida'] == 3.40
assert vo_pro.round(3).loc['3mBUTANAL', 'Elpida'] == 1.34
# abajo derecha
assert vo_pro.round(3).loc['TRANS2HEXENAL', 'Bigua'] == 0.00
assert vo_pro.round(3).loc['TRANS2HEXENAL', 'Elpida'] == 7.11
assert vo_pro.round(3).loc['CIS2HEXENAL', 'Elpida'] == 0.00
# abajo izquierda
assert vo_pro.round(3).loc['TRANS2HEXENAL', '553'] == 6.90
assert vo_pro.round(3).loc['TRANS2HEXENAL', '552'] == 5.40
assert vo_pro.round(3).loc['CIS2HEXENAL', '552'] == 0.000
def test_merge_sources_using_me_with_rep_merge_mean(self):
## Preparar
data_file = get_data_file('me_2011_2012.csv')
## Correr
me_pro = merge_sources(data_file, rep_merge=np.mean)[0]
## Validar
assert me_pro is not None
assert hasattr(me_pro, 'shape')
assert me_pro.shape[0] == 89
assert me_pro.shape[1] == 44
# chequear todos los valores nulos
assert pd.isnull(me_pro.loc['NA_2106.37', '3806'])
assert pd.isnull(me_pro.loc['NA_1608.87', '3815'])
assert pd.isnull(me_pro.loc['NA_2106.37', '4748'])
assert pd.isnull(me_pro.loc['Glucoheptonic acid-1.4-lactone', '4748'])
assert pd.isnull(me_pro.loc['NA_2106.37', '560'])
assert pd.isnull(me_pro.loc['Glucoheptonic acid-1.4-lactone', '560'])
# arriba izquierda
assert me_pro.round(3).loc['serine', '549'] == 19.905
assert me_pro.round(3).loc['serine', '551'] == 13.735
# arriba derecha
assert me_pro.round(3).loc['serine', '4751'] == 38.439
assert me_pro.round(3).loc['Ethanolamine', '4751'] == 1.619
# abajo izquierda
assert me_pro.round(3).loc['Sucrose', '549'] == 171.211
assert me_pro.round(3).loc['NA_2627.66', '549'] == 3.853
# abajo derecha
assert me_pro.round(3).loc['NA_2627.66', '4751'] == 5.018
assert me_pro.round(3).loc['NA_2627.66', '4750'] == 13.353
def test_merge_sources_using_ag(self):
## Preparar
data_file = get_data_file('ag_2011_2012.csv')
## Correr
ag_pro = merge_sources(data_file)[0]
## Validar
assert ag_pro is not None
assert hasattr(ag_pro, 'shape')
assert ag_pro.shape[0] == 16
assert ag_pro.shape[1] == 19
# chequear todos los valores nulos
# assert pd.isnull(ag_pro.loc['perim', '549'])
# arriba izquierda
assert ag_pro.round(3).loc['peso', '549'] == 287.247
assert ag_pro.round(3).loc['peso', '550'] == 189.247
assert ag_pro.round(3).loc['perim', '549'] == 280.336
# arriba derecha
assert ag_pro.round(3).loc['peso', '572'] == 10.31
assert ag_pro.round(3).loc['firmeza', '572'] == 1.383
# abajo izquierda
assert ag_pro.round(3).loc['a_cielab', '549'] == 44.870
assert ag_pro.round(3).loc['b_cielab', '549'] == 61.691
# abajo derecha
assert ag_pro.round(3).loc['b_cielab', '572'] == 57.386
assert ag_pro.round(3).loc['b_cielab', '571'] == 61.842
# Casos especiales
# todos ceros
assert ag_pro.round(3).loc['area_indent', '572'] == 0.000
# valores cercanos a cero
assert ag_pro.round(3).loc['area_indent', '571'] == 0.038
def test_merge_sources_using_ap(self):
## Preparar
data_file = get_data_file('ap_2011_2012.csv')
## Correr
ap_pro = merge_sources(data_file)[0]
## Validar
assert ap_pro is not None
assert hasattr(ap_pro, 'shape')
assert ap_pro.shape[0] == 7
assert ap_pro.shape[1] == 42
# chequear todos los valores nulos
# assert pd.isnull(ag_pro.loc['perim', '549'])
# arriba izquierda
assert ap_pro.round(3).loc['Peso', '549'] == 0.532
assert ap_pro.round(3).loc['Peso', '550'] == 0.620
# arriba derecha
assert ap_pro.round(3).loc['Peso', 'elpida'] == 0.540
assert ap_pro.round(3).loc['TEAC HID (meq. Trolox %)', 'elpida'] == 0.351
# abajo izquierda
assert ap_pro.round(3).loc['carotenos (mg%)', '549'] == 0.260
assert ap_pro.round(3).loc['LICOP (mg%)', '549'] == 3.969
# abajo derecha
assert ap_pro.round(3).loc['carotenos (mg%)', 'elpida'] == 0.511
assert ap_pro.round(3).loc['carotenos (mg%)', 'bigua'] == 0.319
# Casos especiales
# un nan en el medio
assert ap_pro.round(3).loc['TEAC LIP (meq. Trolox %)', '558'] == 0.029
def test_merge_sources_index_name(self):
## Preparar
data_file = get_data_file('ap_2011_2012.csv')
## Correr
ap_pro = merge_sources(data_file)[0]
## Validar
assert ap_pro is not None
assert hasattr(ap_pro, 'index')
assert ap_pro.index.name == 'features'
def test_merge_source_returning_names_using_ag(self):
## Preparar
data_file = get_data_file('ag_2011_2012.csv')
## Correr
ag_pro, ag_nom, _ = merge_sources(data_file)
## Validar
assert ag_pro is not None
assert ag_nom is not None
assert len(ag_nom) == 16
assert ag_nom[0] == 'peso'
assert ag_nom[1] == 'firmeza'
assert ag_nom[7] == 'area_indent'
assert ag_nom[14] == 'a_cielab'
assert ag_nom[15] == 'b_cielab'
def test_merge_source_returning_names_using_ap(self):
## Preparar
data_file = get_data_file('ap_2011_2012.csv')
## Correr
ap_pro, ap_nom, _ = merge_sources(data_file)
## Validar
assert ap_pro is not None
assert ap_nom is not None
assert len(ap_nom) == 7
assert ap_nom[0] == 'Peso'
assert ap_nom[1] == 'TEAC HID (meq. Trolox %)'
assert ap_nom[2] == 'TEAC LIP (meq. Trolox %)'
assert ap_nom[3] == 'FRAP (meq. Trolox %)'
assert ap_nom[4] == 'FOLIN (mg Ac Galico/100g)'
assert ap_nom[5] == 'LICOP (mg%)'
assert ap_nom[6] == 'carotenos (mg%)'
def test_merge_source_returning_names_using_ap_ps(self):
## Preparar
data_files = [get_data_file('ap_2011_2012.csv'),
get_data_file('ps_2011_2012.csv')]
## Correr
pro, nom, _ = merge_sources(data_files)
## Validar
assert pro is not None
assert nom is not None
assert len(nom) == 7 + 10
ap_var_names = ['Peso', 'TEAC HID (meq. Trolox %)', 'TEAC LIP (meq. Trolox %)',
'FRAP (meq. Trolox %)', 'FOLIN (mg Ac Galico/100g)', 'LICOP (mg%)',
'carotenos (mg%)']
if not (ap_var_names == nom[:7] or ap_var_names == nom[-7:]):
self.fail('ap variables not found')
ps_var_names = ['Arom-1', 'Arom-5', 'Sab-1', 'Sab-5', 'Dulz-1', 'Dulz-5', 'Acid-1',
'Acid-5', 'Jug-1', 'Jug-4']
if not (ps_var_names == nom[:10] or ps_var_names == nom[-10:]):
self.fail('ap variables not found')
def test_merge_source_returning_sources_using_ap_ps(self):
## Preparar
data_files = [get_data_file('ap_2011_2012.csv'),
get_data_file('ps_2011_2012.csv')]
## Correr
pro, nom, sources = merge_sources(data_files)
## Validar
assert pro is not None
assert nom is not None
assert sources is not None
assert len(sources) == 7 + 10
assert len(set(sources)) == 2 # unique source names
assert 'ps_2011_2012' in sources
assert 'ap_2011_2012' in sources
if sources[0] == 'ps_2011_2012':
assert len(set(sources[:10])) == 1
assert 'ps_2011_2012' in set(sources[:10])
assert len(set(sources[-7:])) == 1
assert 'ap_2011_2012' in set(sources[-7:])
else:
assert len(set(sources[:7])) == 1
assert 'ap_2011_2012' in set(sources[:7])
assert len(set(sources[-10:])) == 1
assert 'ps_2011_2012' in set(sources[-10:])
def test_merge_sources_multiple_using_ps_vo(self):
## Preparar
ps_data_file = get_data_file('ps_2011_2012.csv')
vo_data_file = get_data_file('vo_2011_2012.csv')
fuentes = [ps_data_file, vo_data_file]
## Correr
procesado, nombres, _ = merge_sources(fuentes)
## Validar
assert procesado is not None
assert hasattr(procesado, 'shape')
assert procesado.shape[0] == 10 + 42
assert procesado.shape[1] == 13 # columnas totales, se cuenta una sola vez las compartidas
# ps
assert procesado.round(3).loc['Arom-1', '552'] == 0.00
assert procesado.round(3).loc['Arom-1', '3837'] == 0.00
assert procesado.round(3).loc['Arom-1', '4735'] == 0.063
assert procesado.round(3).loc['Arom-1', '1589'] == 0.231
assert procesado.round(3).loc['Arom-1', 'Bigua'] == 0.111
assert procesado.round(3).loc['Arom-1', 'Elpida'] == 0.037
assert procesado.round(3).loc['Jug-4', '552'] == 0.533 # abajo izquierda
assert procesado.round(3).loc['Jug-4', 'Elpida'] == 0.586 # abajo derecha
# vo
assert procesado.round(3).loc['UNK 43', '552'] == 5.12
assert procesado.round(3).loc['UNK 43', '3837'] == 3.98
assert pd.isnull(procesado.round(3).loc['UNK 43', '4735'])
assert pd.isnull(procesado.round(3).loc['UNK 43', '1589'])
assert procesado.round(3).loc['UNK 43', 'Bigua'] == 2.430
assert procesado.round(3).loc['UNK 43', 'Elpida'] == 3.400
assert procesado.round(3).loc['TRANS2HEXENAL', '552'] == 5.400 # abajo izquierda
assert procesado.round(3).loc['TRANS2HEXENAL', 'Elpida'] == 7.110 # abajo derecha
def test_merge_sources_multiple_using_me_ag(self):
## Preparar
me_data_file = get_data_file('me_2011_2012.csv')
ag_data_file = get_data_file('ag_2011_2012.csv')
fuentes = [me_data_file, ag_data_file]
## Correr
procesado, nombres, _ = merge_sources(fuentes)
## Validar
assert procesado is not None
assert hasattr(procesado, 'shape')
assert procesado.shape[0] == 89 + 16
assert procesado.shape[1] == 47 # columnas totales, se cuenta una sola vez las compartidas
# me
## valores nulos
assert pd.isnull(procesado.loc['NA_2106.37', '3806'])
assert pd.isnull(procesado.loc['NA_1608.87', '3815'])
assert pd.isnull(procesado.loc['NA_2106.37', '4748'])
assert pd.isnull(procesado.loc['Glucoheptonic acid-1.4-lactone', '4748'])
assert pd.isnull(procesado.loc['NA_2106.37', '560'])
assert pd.isnull(procesado.loc['Glucoheptonic acid-1.4-lactone', '560'])
## arriba izquierda
assert procesado.round(3).loc['serine', '549'] == 19.905
assert procesado.round(3).loc['serine', '551'] == 13.735
## arriba derecha
assert procesado.round(3).loc['serine', '4751'] == 38.439
assert procesado.round(3).loc['Ethanolamine', '4751'] == 1.619
## abajo izquierda
assert procesado.round(3).loc['Sucrose', '549'] == 171.211
assert procesado.round(3).loc['NA_2627.66', '549'] == 3.853
## abajo derecha
assert procesado.round(3).loc['NA_2627.66', '4751'] == 5.018
assert procesado.round(3).loc['NA_2627.66', '4750'] == 13.353
# ag
## arriba izquierda
assert procesado.round(3).loc['peso', '549'] == 287.247
assert procesado.round(3).loc['peso', '550'] == 189.247
assert procesado.round(3).loc['perim', '549'] == 280.336
## arriba derecha
assert procesado.round(3).loc['peso', '572'] == 10.31
assert procesado.round(3).loc['firmeza', '572'] == 1.383
## abajo izquierda
assert procesado.round(3).loc['a_cielab', '549'] == 44.870
assert procesado.round(3).loc['b_cielab', '549'] == 61.691
## abajo derecha
assert procesado.round(3).loc['b_cielab', '572'] == 57.386
assert procesado.round(3).loc['b_cielab', '571'] == 61.842
## todos ceros
assert procesado.round(3).loc['area_indent', '572'] == 0.000
## valores cercanos a cero
assert procesado.round(3).loc['area_indent', '571'] == 0.038
def test_merge_sources_xls_2008_2009(self):
## Correr
procesado = merge_sources(get_data_file('2008-2009.xls'))[0]
## Validar
assert procesado is not None
assert hasattr(procesado, 'shape')
assert procesado.shape[0] == 101 + 26 + 29
# assert procesado.shape[1] == 47 # columnas totales, se cuenta una sola vez las compartidas
# volátiles
## valores nulos
assert pd.isnull(procesado.loc['4-metil-3-hepten-2-ona', '569'])
assert pd.isnull(procesado.loc['4-metil-3-hepten-2-ona', '3806'])
assert pd.isnull(procesado.loc['1,4-pentadieno', '572'])
assert pd.isnull(procesado.loc['1,4-pentadieno', '3842'])
assert | pd.isnull(procesado.loc['1,4-pentadieno', '4618']) | pandas.isnull |
import base64
import requests
import json
from google.cloud import pubsub_v1
import pandas as pd
from pandas.core.reshape.concat import concat
from pandas.core.frame import DataFrame
from google.cloud import bigquery
from google.cloud import storage
bq_project_name = "coredata-trial"
bq_dataset_name = "orderbookdataset"
bq_table_name = "orderbookcrypto"
bq_table_full_path = f"""{bq_project_name}.{bq_dataset_name}.{bq_table_name}"""
bq_client = bigquery.Client(bq_project_name)
def write_to_bigquery(message: dict):
errors = bq_client.insert_rows_json(
bq_table_full_path,
message, # Must be a list of objects, even if only 1 row.
)
for error in errors:
print(f"encountered error: {error}")
def store_data_in_bucket(df: bytes):
# Instantiates a client
project_id = "coredata-trial"
client = storage.Client(project_id)
# Creates a new bucket and uploads an object
fname = "orderbook" + pd.to_datetime('now').strftime("%Y-%m-%d-%H-%M")+".json"
bucket = client.bucket("coredatastore001")
blob = bucket.blob(fname)
blob.upload_from_string(
data=df,
content_type='application/json'
)
print(f"Wrote json with pandas with name {blob.name} to the bucket {bucket.name}.")
def covert_ob_to_dataframe_binance(obdata: dict, exchange, symbol) -> DataFrame:
obframes = {side: | pd.DataFrame(data=obdata[side], columns=['price', 'quantity'], dtype=float) | pandas.DataFrame |
import os
# basic
import numpy as np
import pandas as pd
from sklearn.utils import class_weight
from tqdm import tqdm, trange
import time
import pprint
import datetime
import argparse
from scipy.stats import gmean
import yaml
import shutil
# keras
from keras.optimizers import Adam
from keras.models import load_model
from keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau
# DIY
import utils_classif
from feat_ext import load_audio_file, get_mel_spectrogram, modify_file_variable_length
from data import get_label_files, DataGeneratorPatch, PatchGeneratorPerFile
from architectures import get_model_crnn_seld_tagger
from eval import Evaluator
import csv
import sys
sys.path.append('../')
from parameters import get_params
from compute_doa_metrics import compute_DOA_metrics
from file_utils import write_metadata_result_file, build_result_dict_from_metadata_array, write_output_result_file
start = time.time()
now = datetime.datetime.now()
print("Current date and time:")
print(str(now))
# =========================================================================================================
# =========================================================================================================
# ==================================================================== ARGUMENTS
parser = argparse.ArgumentParser(description='DCASE2019 Task3')
parser.add_argument('-p', '--params_yaml',
dest='params_yaml',
action='store',
required=False,
type=str)
args = parser.parse_args()
print('\nYaml file with parameters defining the experiment: %s\n' % str(args.params_yaml))
# =========================================================================Parameters, paths and variables
# =========================================================================Parameters, paths and variables
# =========================================================================Parameters, paths and variables
# Read parameters file from yaml passed by argument
params = yaml.load(open(args.params_yaml))
params_ctrl = params['ctrl']
params_extract = params['extract']
params_learn = params['learn']
params_loss = params['loss']
params_recog = params['recognizer']
params_crnn = params['crnn']
suffix_in = params['suffix'].get('in')
suffix_out = params['suffix'].get('out')
# determine loss function for stage 1 (or entire training)
if params_loss.get('type') == 'CCE':
params_loss['type'] = 'categorical_crossentropy'
elif params_loss.get('type') == 'MAE':
params_loss['type'] = 'mean_absolute_error'
params_extract['audio_len_samples'] = int(params_extract.get('fs') * params_extract.get('audio_len_s'))
# vip to deploy. for public, put directly params_ctrl.gt('dataset_path') within params_path
path_root_data = params_ctrl.get('dataset_path')
params_path = {'path_to_features': os.path.join(path_root_data, 'features'),
# 'featuredir_dev': 'audio_dev_varup1/',
# 'featuredir_eval': 'audio_eval_varup1/',
'featuredir_dev': 'audio_dev_varup2_64mel/',
'featuredir_eval': 'audio_eval_varup2_64mel/',
# 'featuredir_dev_param': 'audio_dev_param_varup2_64mel/',
# 'featuredir_eval_param': 'audio_eval_param_varup2_64mel/',
'featuredir_dev_param': 'audio_dev_param_Q_varup2_64mel/',
'featuredir_eval_param': 'audio_eval_param_Q_varup2_64mel/',
# 'featuredir_dev': 'audio_dev_varup1_64mel/',
# 'featuredir_eval': 'audio_eval_varup1_64mel/',
'path_to_dataset': path_root_data,
'audiodir_dev': 'wav/dev/',
'audiodir_eval': 'wav/eval/',
# 'audiodir_dev_param': 'wav/dev_param/',
# 'audiodir_eval_param': 'wav/eval_param/',
'audiodir_dev_param': 'wav/dev_param_Q/',
'audiodir_eval_param': 'wav/eval_param_Q/',
'audio_shapedir_dev': 'audio_dev_shapes/',
'audio_shapedir_eval': 'audio_eval_shapes/',
# 'audio_shapedir_dev_param': 'audio_dev_param_shapes/',
# 'audio_shapedir_eval_param': 'audio_eval_param_shapes/',
'audio_shapedir_dev_param': 'audio_dev_param_Q_shapes/',
'audio_shapedir_eval_param': 'audio_eval_param_Q_shapes/',
'gt_files': path_root_data}
if params_extract.get('n_mels') == 40:
params_path['featuredir_dev'] = 'audio_dev_varup2_40mel/'
params_path['featuredir_eval'] = 'audio_eval_varup2_40mel/'
# params_path['featuredir_dev_param'] = 'audio_dev_param_varup2_40mel/'
# params_path['featuredir_eval_param'] = 'audio_eval_param_varup2_40mel/'
params_path['featuredir_dev_param'] = 'audio_dev_param_Q_varup2_40mel/'
params_path['featuredir_eval_param'] = 'audio_eval_param_Q_varup2_40mel/'
elif params_extract.get('n_mels') == 96:
params_path['featuredir_dev'] = 'audio_dev_varup2_96mel/'
params_path['featuredir_eval'] = 'audio_eval_varup2_96mel/'
# params_path['featuredir_dev_param'] = 'audio_dev_param_varup2_96mel/'
# params_path['featuredir_eval_param'] = 'audio_eval_param_varup2_96mel/'
params_path['featuredir_dev_param'] = 'audio_dev_param_Q_varup2_96mel/'
params_path['featuredir_eval_param'] = 'audio_eval_param_Q_varup2_96mel/'
elif params_extract.get('n_mels') == 128:
params_path['featuredir_dev'] = 'audio_dev_varup2_128mel/'
params_path['featuredir_eval'] = 'audio_eval_varup2_128mel/'
# params_path['featuredir_dev_param'] = 'audio_dev_param_varup2_128mel/'
# params_path['featuredir_eval_param'] = 'audio_eval_param_varup2_128mel/'
params_path['featuredir_dev_param'] = 'audio_dev_param_Q_varup2_128mel/'
params_path['featuredir_eval_param'] = 'audio_eval_param_Q_varup2_128mel/'
params_path['featurepath_dev'] = os.path.join(params_path.get('path_to_features'), params_path.get('featuredir_dev'))
params_path['featurepath_eval'] = os.path.join(params_path.get('path_to_features'), params_path.get('featuredir_eval'))
params_path['featurepath_dev_param'] = os.path.join(params_path.get('path_to_features'), params_path.get('featuredir_dev_param'))
params_path['featurepath_eval_param'] = os.path.join(params_path.get('path_to_features'), params_path.get('featuredir_eval_param'))
params_path['audiopath_dev'] = os.path.join(params_path.get('path_to_dataset'), params_path.get('audiodir_dev'))
params_path['audiopath_eval'] = os.path.join(params_path.get('path_to_dataset'), params_path.get('audiodir_eval'))
params_path['audiopath_dev_param'] = os.path.join(params_path.get('path_to_dataset'), params_path.get('audiodir_dev_param'))
params_path['audiopath_eval_param'] = os.path.join(params_path.get('path_to_dataset'), params_path.get('audiodir_eval_param'))
params_path['audio_shapedir_dev'] = os.path.join(params_path.get('path_to_dataset'),
params_path.get('audio_shapedir_dev'))
params_path['audio_shapedir_eval'] = os.path.join(params_path.get('path_to_dataset'),
params_path.get('audio_shapedir_eval'))
params_path['audio_shapedir_dev_param'] = os.path.join(params_path.get('path_to_dataset'),
params_path.get('audio_shapedir_dev_param'))
params_path['audio_shapedir_eval_param'] = os.path.join(params_path.get('path_to_dataset'),
params_path.get('audio_shapedir_eval_param'))
# ======================================================== SPECIFIC PATHS TO SOME IMPORTANT FILES
# ground truth, load model, save model, predictions, results
params_files = {'gt_eval': os.path.join(params_path.get('gt_files'), 'gt_eval.csv'),
'gt_dev': os.path.join(params_path.get('gt_files'), 'gt_dev.csv')}
path_trained_models = utils_classif.make_sure_isdir('trained_models', params_ctrl.get('output_file'))
params_files['save_model'] = os.path.join(path_trained_models, params_ctrl.get('output_file') + '_v' + str(params_ctrl.get('count_trial')) + '.h5')
path_predictions = utils_classif.make_sure_isdir('predictions', params_ctrl.get('output_file'))
params_files['predictions'] = os.path.join(path_predictions, params_ctrl.get('output_file') + '_v' + str(params_ctrl.get('count_trial')) + '.csv')
path_results = utils_classif.make_sure_isdir('logs/results', params_ctrl.get('output_file'))
params_files['results'] = os.path.join(path_results, params_ctrl.get('output_file') + '.pickle')
# params_files['event_durations'] = os.path.join('logs/pics', params_ctrl.get('output_file') + '_event_durations.pickle')
# # ============================================= print all params to keep record in output file
print('\nparams_ctrl=')
pprint.pprint(params_ctrl, width=1, indent=4)
print('params_files=')
pprint.pprint(params_files, width=1, indent=4)
print('params_extract=')
pprint.pprint(params_extract, width=1, indent=4)
print('params_learn=')
pprint.pprint(params_learn, width=1, indent=4)
print('params_loss=')
pprint.pprint(params_loss, width=1, indent=4)
print('params_recog=')
pprint.pprint(params_recog, width=1, indent=4)
print('params_crnn=')
pprint.pprint(params_crnn, width=1, indent=4)
print('\n')
# ============================================================== READ TRAIN and TEST DATA
# ============================================================== READ TRAIN and TEST DATA
# ============================================================== READ TRAIN and TEST DATA
# ============================================================== READ TRAIN and TEST DATA
# aim: lists with all wav files for dev, which includes train/val/test
gt_dev = pd.read_csv(params_files.get('gt_dev'))
splitlist_audio_dev = gt_dev.split.values.tolist()
filelist_audio_dev = gt_dev.fname.values.tolist()
# create dict with ground truth mapping with labels:
# -key: path to wav
# -value: the ground truth label too
file_to_label = {params_path.get('audiopath_dev') + k: v for k, v in zip(gt_dev.fname.values, gt_dev.label.values)}
# ========================================================== CREATE VARS FOR DATASET MANAGEMENT
# list with unique n_classes labels and aso_ids
list_labels = sorted(list(set(gt_dev.label.values)))
# create dicts such that key: value is as follows
# fixed by DCASE
label_to_int = {
'clearthroat': 2,
'cough': 8,
'doorslam': 9,
'drawer': 1,
'keyboard': 6,
'keysDrop': 4,
'knock': 0,
'laughter': 10,
'pageturn': 7,
'phone': 3,
'speech': 5
}
int_to_label = {v: k for k, v in label_to_int.items()}
# create ground truth mapping with categorical values
file_to_label_numeric = {k: label_to_int[v] for k, v in file_to_label.items()}
#
# ========================================================== FEATURE EXTRACTION
# ========================================================== FEATURE EXTRACTION
# ========================================================== FEATURE EXTRACTION
# compute T_F representation
# mel-spectrogram for all files in the dataset and store it
var_lens = {item: [] for item in label_to_int.keys()}
var_lens['overall'] = []
var_lens_dev_param = {}
var_lens_dev_param['overall'] = []
if params_ctrl.get('feat_ext'):
if params_ctrl.get('pipeline') == 'T_F':
n_extracted_dev = 0; n_extracted_te = 0; n_failed_dev = 0; n_failed_te = 0
n_extracted_dev_param = 0; n_failed_dev_param = 0
# only if features have not been extracted, ie
# if folder does not exist, or it exists with less than 80% of the feature files
# create folder and extract features
nb_files_dev = len(filelist_audio_dev)
if not os.path.exists(params_path.get('featurepath_dev')) or \
len(os.listdir(params_path.get('featurepath_dev'))) < nb_files_dev*0.8:
if os.path.exists(params_path.get('featurepath_dev')):
shutil.rmtree(params_path.get('featurepath_dev'))
os.makedirs(params_path.get('featurepath_dev'))
print('\nFeature extraction for dev set (prints enabled). Features dumped in {}.........................'.
format(params_path.get('featurepath_dev')))
for idx, f_name in enumerate(filelist_audio_dev):
f_path = os.path.join(params_path.get('audiopath_dev'), f_name)
if os.path.isfile(f_path) and f_name.endswith('.wav'):
# load entire audio file and modify variable length, if needed
y = load_audio_file(f_path, input_fixed_length=params_extract['audio_len_samples'], params_extract=params_extract)
# keep record of the lengths, per class, for insight
duration_seconds = len(y)/int(params_extract.get('fs'))
var_lens[f_name.split('_')[0]].append(duration_seconds)
var_lens['overall'].append(duration_seconds)
y = modify_file_variable_length(data=y,
input_fixed_length=params_extract['audio_len_samples'],
params_extract=params_extract)
# print('Considered audio length: %6.3f' % (len(y) / params_extract.get('fs')))
# print('%-22s: [%d/%d] of %s' % ('Extracting tr features', (idx + 1), nb_files_tr, f_path))
# compute log-scaled mel spec. row x col = time x freq
mel_spectrogram = get_mel_spectrogram(audio=y, params_extract=params_extract)
# save the T_F rep to a binary file (only the considered length)
utils_classif.save_tensor(var=mel_spectrogram,
out_path=os.path.join(params_path.get('featurepath_dev'),
f_name.replace('.wav', '.data')), suffix='_mel')
# save also label
utils_classif.save_tensor(var=np.array([file_to_label_numeric[f_path]], dtype=float),
out_path=os.path.join(params_path.get('featurepath_dev'),
f_name.replace('.wav', '.data')), suffix='_label')
if os.path.isfile(os.path.join(params_path.get('featurepath_dev'),
f_name.replace('.wav', suffix_in + '.data'))):
n_extracted_dev += 1
print('%-22s: [%d/%d] of %s' % ('Extracted dev features', (idx + 1), nb_files_dev, f_path))
else:
n_failed_dev += 1
print('%-22s: [%d/%d] of %s' % ('FAILING to extract dev features', (idx + 1), nb_files_dev, f_path))
else:
print('%-22s: [%d/%d] of %s' % ('this dev audio is in the csv but not in the folder', (idx + 1), nb_files_dev, f_path))
print('n_extracted_dev: {0} / {1}'.format(n_extracted_dev, nb_files_dev))
print('n_failed_dev: {0} / {1}\n'.format(n_failed_dev, nb_files_dev))
else:
print('Dev set is already extracted in {}'.format(params_path.get('featurepath_dev')))
# do feature extraction for dev_param (outcome of complete parametric frontend)========================================
# do feature extraction for dev_param (outcome of complete parametric frontend)========================================
audio_files_dev_param = [f for f in os.listdir(params_path.get('audiopath_dev_param')) if not f.startswith('.')]
nb_files_dev_param = len(audio_files_dev_param)
if not os.path.exists(params_path.get('featurepath_dev_param')) or \
len(os.listdir(params_path.get('featurepath_dev_param'))) < nb_files_dev_param * 0.8:
if os.path.exists(params_path.get('featurepath_dev_param')):
shutil.rmtree(params_path.get('featurepath_dev_param'))
os.makedirs(params_path.get('featurepath_dev_param'))
print(
'\nFeature extraction for dev set parametric (outcome of parametric frontend). Features dumped in {}.........................'.
format(params_path.get('featurepath_dev_param')))
for idx, f_name in enumerate(audio_files_dev_param):
f_path = os.path.join(params_path.get('audiopath_dev_param'), f_name)
if os.path.isfile(f_path) and f_name.endswith('.wav'):
# load entire audio file and modify variable length, if needed
y = load_audio_file(f_path, input_fixed_length=params_extract['audio_len_samples'],
params_extract=params_extract)
# keep record of the lengths, per class, for insight
duration_seconds = len(y) / int(params_extract.get('fs'))
var_lens_dev_param['overall'].append(duration_seconds)
y = modify_file_variable_length(data=y,
input_fixed_length=params_extract['audio_len_samples'],
params_extract=params_extract)
# print('Considered audio length: %6.3f' % (len(y) / params_extract.get('fs')))
# print('%-22s: [%d/%d] of %s' % ('Extracting tr features', (idx + 1), nb_files_tr, f_path))
# compute log-scaled mel spec. row x col = time x freq
mel_spectrogram = get_mel_spectrogram(audio=y, params_extract=params_extract)
# save the T_F rep to a binary file (only the considered length)
utils_classif.save_tensor(var=mel_spectrogram,
out_path=os.path.join(params_path.get('featurepath_dev_param'),
f_name.replace('.wav', '.data')), suffix='_mel')
if os.path.isfile(os.path.join(params_path.get('featurepath_dev_param'),
f_name.replace('.wav', suffix_in + '.data'))):
n_extracted_dev_param += 1
print('%-22s: [%d/%d] of %s' % ('Extracted dev_param features', (idx + 1), nb_files_dev_param, f_path))
else:
n_failed_dev_param += 1
print('%-22s: [%d/%d] of %s' % (
'FAILING to extract dev_param features', (idx + 1), nb_files_dev_param, f_path))
else:
print('%-22s: [%d/%d] of %s' % (
'this dev_param audio is in the csv but not in the folder', (idx + 1), nb_files_dev_param, f_path))
print('n_extracted_dev_param: {0} / {1}'.format(n_extracted_dev_param, nb_files_dev_param))
print('n_failed_dev_param: {0} / {1}\n'.format(n_failed_dev_param, nb_files_dev_param))
else:
print('Dev_param set is already extracted in {}'.format(params_path.get('featurepath_dev_param')))
# select the subset of training data to consider: all, clean, noisy, noisy_small
# =====================================================================================================================
# =====================================================================================================================
ff_list_dev = [filelist_audio_dev[i].replace('.wav', suffix_in + '.data') for i in range(len(filelist_audio_dev))]
labels_audio_dev = get_label_files(filelist=ff_list_dev,
dire=params_path.get('featurepath_dev'),
suffix_in=suffix_in,
suffix_out=suffix_out
)
print('Number of clips considered as dev set: {0}'.format(len(ff_list_dev)))
print('Number of labels loaded for dev set: {0}'.format(len(labels_audio_dev)))
scalers = [None]*4
# determine the validation setup according to the folds, and perform training / val / test for each fold
for kfo in range(1, 5):
print('\n=========================================================================================================')
print('===Processing fold {} within the x-val setup...'.format(kfo))
print('=========================================================================================================\n')
# x-val setup given by DCASE organizers
if kfo == 1:
splits_tr = [3, 4]
splits_val = [2]
splits_te = [1]
elif kfo == 2:
splits_tr = [4, 1]
splits_val = [3]
splits_te = [2]
elif kfo == 3:
splits_tr = [1, 2]
splits_val = [4]
splits_te = [3]
elif kfo == 4:
splits_tr = [2, 3]
splits_val = [1]
splits_te = [4]
params_ctrl['current_fold'] = kfo
tr_files0 = [fname for idx, fname in enumerate(ff_list_dev) if splitlist_audio_dev[idx] == splits_tr[0]]
tr_files1 = [fname for idx, fname in enumerate(ff_list_dev) if splitlist_audio_dev[idx] == splits_tr[1]]
tr_files = tr_files0 + tr_files1
val_files = [fname for idx, fname in enumerate(ff_list_dev) if splitlist_audio_dev[idx] == splits_val[0]]
te_files = [fname for idx, fname in enumerate(ff_list_dev) if splitlist_audio_dev[idx] == splits_te[0]]
# SC
if len(tr_files) + len(val_files) + len(te_files) != len(ff_list_dev):
print('ERROR: You messed up in x-val setup for fold: {0}'.format(len(kfo)))
print('{} is not {}'.format(len(tr_files) + len(val_files) + len(te_files), len(ff_list_dev)))
# ============================================================BATCH GENERATION
# ============================================================BATCH GENERATION
tr_gen_patch = DataGeneratorPatch(feature_dir=params_path.get('featurepath_dev'),
file_list=tr_files,
params_learn=params_learn,
params_extract=params_extract,
suffix_in='_mel',
suffix_out='_label',
floatx=np.float32
)
# to predict later on on dev_param clips
scalers[kfo-1] = tr_gen_patch.scaler
print("Total number of instances *only* for training: %s" % str(tr_gen_patch.nb_inst_total))
print("Batch_size: %s" % str(tr_gen_patch.batch_size))
print("Number of iterations (batches) in the training subset: %s" % str(tr_gen_patch.nb_iterations))
print("\nShape of training subset: %s" % str(tr_gen_patch.features.shape))
print("Shape of labels in training subset: %s" % str(tr_gen_patch.labels.shape))
# compute class_weigths based on the labels generated
if params_learn.get('mode_class_weight'):
labels_nice = np.reshape(tr_gen_patch.labels, -1) # remove singleton dimension
class_weights = class_weight.compute_class_weight('balanced',
np.unique(labels_nice),
labels_nice)
class_weights_dict = dict(enumerate(class_weights))
else:
class_weights_dict = None
val_gen_patch = DataGeneratorPatch(feature_dir=params_path.get('featurepath_dev'),
file_list=val_files,
params_learn=params_learn,
params_extract=params_extract,
suffix_in='_mel',
suffix_out='_label',
floatx=np.float32,
scaler=tr_gen_patch.scaler
)
print("\nShape of validation subset: %s" % str(val_gen_patch.features.shape))
print("Shape of labels in validation subset: %s" % str(val_gen_patch.labels.shape))
# ============================================================DEFINE AND FIT A MODEL
# ============================================================DEFINE AND FIT A MODEL
tr_loss, val_loss = [0] * params_learn.get('n_epochs'), [0] * params_learn.get('n_epochs')
# ============================================================
if params_ctrl.get('learn'):
if params_learn.get('model') == 'crnn_seld_tagger':
model = get_model_crnn_seld_tagger(params_crnn=params_crnn, params_learn=params_learn,
params_extract=params_extract)
if params_learn.get('stages') == 1:
opt = Adam(lr=params_learn.get('lr'))
model.compile(optimizer=opt, loss=params_loss.get('type'), metrics=['accuracy'])
model.summary()
# callbacks
if params_learn.get('early_stop') == "val_acc":
early_stop = EarlyStopping(monitor='val_acc', patience=params_learn.get('patience'), min_delta=0.001, verbose=1)
elif params_learn.get('early_stop') == "val_loss":
early_stop = EarlyStopping(monitor='val_loss', patience=params_learn.get('patience'), min_delta=0,
verbose=1)
# save one best model for every fold, as needed for submission
params_files['save_model'] = os.path.join(path_trained_models, params_ctrl.get('output_file') + '_v' +
str(params_ctrl.get('count_trial')) + '_f' + str(kfo) + '.h5')
checkpoint = ModelCheckpoint(params_files.get('save_model'), monitor='val_acc', verbose=1, save_best_only=True)
reduce_lr = ReduceLROnPlateau(monitor='val_acc', factor=0.5, patience=5, verbose=1)
callback_list = [checkpoint, early_stop, reduce_lr]
hist = model.fit_generator(tr_gen_patch,
steps_per_epoch=tr_gen_patch.nb_iterations,
epochs=params_learn.get('n_epochs'),
validation_data=val_gen_patch,
validation_steps=val_gen_patch.nb_iterations,
class_weight=class_weights_dict,
workers=4,
verbose=2,
callbacks=callback_list)
# ==================================================================================================== PREDICT
# ==================================================================================================== PREDICT
print('\nCompute predictions on test split, and save them in csv:==============================================\n')
# to store prediction probabilites
te_preds = np.empty((len(te_files), params_learn.get('n_classes')))
te_gen_patch = PatchGeneratorPerFile(feature_dir=params_path.get('featurepath_dev'),
file_list=te_files,
params_extract=params_extract,
suffix_in='_mel',
floatx=np.float32,
scaler=tr_gen_patch.scaler
)
for i in trange(len(te_files), miniters=int(len(te_files) / 100), ascii=True, desc="Predicting..."):
patches_file = te_gen_patch.get_patches_file()
preds_patch_list = model.predict(patches_file).tolist()
preds_patch = np.array(preds_patch_list)
if params_learn.get('predict_agg') == 'amean':
preds_file = np.mean(preds_patch, axis=0)
elif params_recog.get('aggregate') == 'gmean':
preds_file = gmean(preds_patch, axis=0)
else:
print('unkown aggregation method for prediction')
te_preds[i, :] = preds_file
list_labels = np.array(list_labels)
pred_label_files_int = np.argmax(te_preds, axis=1)
pred_labels = [int_to_label[x] for x in pred_label_files_int]
te_files_wav = [f.replace(suffix_in + '.data', '.wav') for f in te_files]
if not os.path.isfile(params_files.get('predictions')):
# fold 1: create the predictions file
pred = pd.DataFrame(te_files_wav, columns=['fname'])
pred['label'] = pred_labels
pred['label_int'] = pred_label_files_int
pred.to_csv(params_files.get('predictions'), index=False)
del pred
else:
pred = pd.read_csv(params_files.get('predictions'))
old_fname = pred.fname.values.tolist()
old_label = pred.label.values.tolist()
old_label_int = pred.label_int.values.tolist()
new_pred_fname = old_fname + te_files_wav
new_pred_label = old_label + pred_labels
new_pred_label_int = old_label_int + pred_label_files_int.tolist()
del pred
pred = | pd.DataFrame(new_pred_fname, columns=['fname']) | pandas.DataFrame |
from lxml import etree
import numpy as np
import pandas as pd
import re
from sklearn.model_selection import train_test_split
import Bio
from Bio import SeqIO
from pathlib import Path
import glob
#console
from tqdm import tqdm as tqdm
import re
import os
import itertools
#jupyter
#from tqdm import tqdm_notebook as tqdm
#not supported in current tqdm version
#from tqdm.autonotebook import tqdm
#import logging
#logging.getLogger('proteomics_utils').addHandler(logging.NullHandler())
#logger=logging.getLogger('proteomics_utils')
#for cd-hit
import subprocess
from sklearn.metrics import f1_score
import hashlib #for mhcii datasets
from utils.dataset_utils import split_clusters_single,pick_all_members_from_clusters
#######################################################################################################
#Parsing all sorts of protein data
#######################################################################################################
def parse_uniprot_xml(filename,max_entries=0,parse_features=[]):
'''parse uniprot xml file, which contains the full uniprot information (e.g. ftp://ftp.uniprot.org/pub/databases/uniprot/current_release/knowledgebase/complete/uniprot_sprot.xml.gz)
using custom low-level https://www.ibm.com/developerworks/xml/library/x-hiperfparse/
c.f. for full format https://www.uniprot.org/docs/uniprot.xsd
parse_features: a list of strings specifying the kind of features to be parsed such as "modified residue" for phosphorylation sites etc. (see https://www.uniprot.org/help/mod_res)
(see the xsd file for all possible entries)
'''
context = etree.iterparse(str(filename), events=["end"], tag="{http://uniprot.org/uniprot}entry")
context = iter(context)
rows =[]
for _, elem in tqdm(context):
parse_func_uniprot(elem,rows,parse_features=parse_features)
elem.clear()
while elem.getprevious() is not None:
del elem.getparent()[0]
if(max_entries > 0 and len(rows)==max_entries):
break
df=pd.DataFrame(rows).set_index("ID")
df['name'] = df.name.astype(str)
df['dataset'] = df.dataset.astype('category')
df['organism'] = df.organism.astype('category')
df['sequence'] = df.sequence.astype(str)
return df
def parse_func_uniprot(elem, rows, parse_features=[]):
'''extracting a single record from uniprot xml'''
seqs = elem.findall("{http://uniprot.org/uniprot}sequence")
sequence=""
#print(seqs)
for s in seqs:
sequence=s.text
#print("sequence",sequence)
if sequence =="" or str(sequence)=="None":
continue
else:
break
#Sequence & fragment
sequence=""
fragment_map = {"single":1, "multiple":2}
fragment = 0
seqs = elem.findall("{http://uniprot.org/uniprot}sequence")
for s in seqs:
if 'fragment' in s.attrib:
fragment = fragment_map[s.attrib["fragment"]]
sequence=s.text
if sequence != "":
break
#print("sequence:",sequence)
#print("fragment:",fragment)
#dataset
dataset=elem.attrib["dataset"]
#accession
accession = ""
accessions = elem.findall("{http://uniprot.org/uniprot}accession")
for a in accessions:
accession=a.text
if accession !="":#primary accession! https://www.uniprot.org/help/accession_numbers!!!
break
#print("accession",accession)
#protein existence (PE in plain text)
proteinexistence_map = {"evidence at protein level":5,"evidence at transcript level":4,"inferred from homology":3,"predicted":2,"uncertain":1}
proteinexistence = -1
accessions = elem.findall("{http://uniprot.org/uniprot}proteinExistence")
for a in accessions:
proteinexistence=proteinexistence_map[a.attrib["type"]]
break
#print("protein existence",proteinexistence)
#name
name = ""
names = elem.findall("{http://uniprot.org/uniprot}name")
for n in names:
name=n.text
break
#print("name",name)
#organism
organism = ""
organisms = elem.findall("{http://uniprot.org/uniprot}organism")
for s in organisms:
s1=s.findall("{http://uniprot.org/uniprot}name")
for s2 in s1:
if(s2.attrib["type"]=='scientific'):
organism=s2.text
break
if organism !="":
break
#print("organism",organism)
#dbReference: PMP,GO,Pfam, EC
ids = elem.findall("{http://uniprot.org/uniprot}dbReference")
pfams = []
gos =[]
ecs = []
pdbs =[]
for i in ids:
#print(i.attrib["id"],i.attrib["type"])
#cf. http://geneontology.org/external2go/uniprotkb_kw2go for Uniprot Keyword<->GO mapping
#http://geneontology.org/ontology/go-basic.obo for List of go terms
#https://www.uniprot.org/help/keywords_vs_go keywords vs. go
if(i.attrib["type"]=="GO"):
tmp1 = i.attrib["id"]
for i2 in i:
if i2.attrib["type"]=="evidence":
tmp2= i2.attrib["value"]
gos.append([int(tmp1[3:]),int(tmp2[4:])]) #first value is go code, second eco evidence ID (see mapping below)
elif(i.attrib["type"]=="Pfam"):
pfams.append(i.attrib["id"])
elif(i.attrib["type"]=="EC"):
ecs.append(i.attrib["id"])
elif(i.attrib["type"]=="PDB"):
pdbs.append(i.attrib["id"])
#print("PMP: ", pmp)
#print("GOs:",gos)
#print("Pfams:",pfam)
#print("ECs:",ecs)
#print("PDBs:",pdbs)
#keyword
keywords = elem.findall("{http://uniprot.org/uniprot}keyword")
keywords_lst = []
#print(keywords)
for k in keywords:
keywords_lst.append(int(k.attrib["id"][-4:]))#remove the KW-
#print("keywords: ",keywords_lst)
#comments = elem.findall("{http://uniprot.org/uniprot}comment")
#comments_lst=[]
##print(comments)
#for c in comments:
# if(c.attrib["type"]=="function"):
# for c1 in c:
# comments_lst.append(c1.text)
#print("function: ",comments_lst)
#ptm etc
if len(parse_features)>0:
ptms=[]
features = elem.findall("{http://uniprot.org/uniprot}feature")
for f in features:
if(f.attrib["type"] in parse_features):#only add features of the requested type
locs=[]
for l in f[0]:
locs.append(int(l.attrib["position"]))
ptms.append([f.attrib["type"],f.attrib["description"] if 'description' in f.attrib else "NaN",locs, f.attrib['evidence'] if 'evidence' in f.attrib else "NaN"])
#print(ptms)
data_dict={"ID": accession, "name": name, "dataset":dataset, "proteinexistence":proteinexistence, "fragment":fragment, "organism":organism, "ecs": ecs, "pdbs": pdbs, "pfams" : pfams, "keywords": keywords_lst, "gos": gos, "sequence": sequence}
if len(parse_features)>0:
data_dict["features"]=ptms
#print("all children:")
#for c in elem:
# print(c)
# print(c.tag)
# print(c.attrib)
rows.append(data_dict)
def parse_uniprot_seqio(filename,max_entries=0):
'''parse uniprot xml file using the SeqIO parser (smaller functionality e.g. does not extract evidence codes for GO)'''
sprot = SeqIO.parse(filename, "uniprot-xml")
rows = []
for p in tqdm(sprot):
accession = str(p.name)
name = str(p.id)
dataset = str(p.annotations['dataset'])
organism = str(p.annotations['organism'])
ecs, pdbs, pfams, gos = [],[],[],[]
for ref in p.dbxrefs:
k = ref.split(':')
if k[0] == 'GO':
gos.append(':'.join(k[1:]))
elif k[0] == 'Pfam':
pfams.append(k[1])
elif k[0] == 'EC':
ecs.append(k[1])
elif k[0] == 'PDB':
pdbs.append(k[1:])
if 'keywords' in p.annotations.keys():
keywords = p.annotations['keywords']
else:
keywords = []
sequence = str(p.seq)
row = {
'ID': accession,
'name':name,
'dataset':dataset,
'organism':organism,
'ecs':ecs,
'pdbs':pdbs,
'pfams':pfams,
'keywords':keywords,
'gos':gos,
'sequence':sequence}
rows.append(row)
if(max_entries>0 and len(rows)==max_entries):
break
df=pd.DataFrame(rows).set_index("ID")
df['name'] = df.name.astype(str)
df['dataset'] = df.dataset.astype('category')
df['organism'] = df.organism.astype('category')
df['sequence'] = df.sequence.astype(str)
return df
def filter_human_proteome(df_sprot):
'''extracts human proteome from swissprot proteines in DataFrame with column organism '''
is_Human = np.char.find(df_sprot.organism.values.astype(str), "Human") !=-1
is_human = np.char.find(df_sprot.organism.values.astype(str), "human") !=-1
is_sapiens = np.char.find(df_sprot.organism.values.astype(str), "sapiens") !=-1
is_Sapiens = np.char.find(df_sprot.organism.values.astype(str), "Sapiens") !=-1
return df_sprot[is_Human|is_human|is_sapiens|is_Sapiens]
def filter_aas(df, exclude_aas=["B","J","X","Z"]):
'''excludes sequences containing exclude_aas: B = D or N, J = I or L, X = unknown, Z = E or Q'''
return df[~df.sequence.apply(lambda x: any([e in x for e in exclude_aas]))]
######################################################################################################
def explode_clusters_df(df_cluster):
'''aux. function to convert cluster dataframe from one row per cluster to one row per ID'''
df=df_cluster.reset_index(level=0)
rows = []
if('repr_accession' in df.columns):#include representative if it exists
_ = df.apply(lambda row: [rows.append([nn,row['entry_id'], row['repr_accession']==nn ]) for nn in row.members], axis=1)
df_exploded = pd.DataFrame(rows, columns=['ID',"cluster_ID","representative"]).set_index(['ID'])
else:
_ = df.apply(lambda row: [rows.append([nn,row['entry_id']]) for nn in row.members], axis=1)
df_exploded = pd.DataFrame(rows, columns=['ID',"cluster_ID"]).set_index(['ID'])
return df_exploded
def parse_uniref(filename,max_entries=0,parse_sequence=False, df_selection=None, exploded=True):
'''parse uniref (clustered sequences) xml ftp://ftp.ebi.ac.uk/pub/databases/uniprot/uniref/uniref50/uniref50.xml.gz unzipped 100GB file
using custom low-level parser https://www.ibm.com/developerworks/xml/library/x-hiperfparse/
max_entries: only return first max_entries entries (0=all)
parse_sequences: return also representative sequence
df_selection: only include entries with accessions that are present in df_selection.index (None keeps all records)
exploded: return one row per ID instead of one row per cluster
c.f. for full format ftp://ftp.ebi.ac.uk/pub/databases/uniprot/uniref/uniref50/README
'''
#issue with long texts https://stackoverflow.com/questions/30577796/etree-incomplete-child-text
#wait for end rather than start tag
context = etree.iterparse(str(filename), events=["end"], tag="{http://uniprot.org/uniref}entry")
context = iter(context)
rows =[]
for _, elem in tqdm(context):
parse_func_uniref(elem,rows,parse_sequence=parse_sequence, df_selection=df_selection)
elem.clear()
while elem.getprevious() is not None:
del elem.getparent()[0]
if(max_entries > 0 and len(rows)==max_entries):
break
df=pd.DataFrame(rows).set_index("entry_id")
df["num_members"]=df.members.apply(len)
if(exploded):
return explode_clusters_df(df)
return df
def parse_func_uniref(elem, rows, parse_sequence=False, df_selection=None):
'''extract a single uniref entry'''
#entry ID
entry_id = elem.attrib["id"]
#print("cluster id",entry_id)
#name
name = ""
names = elem.findall("{http://uniprot.org/uniref}name")
for n in names:
name=n.text[9:]
break
#print("cluster name",name)
members=[]
#representative member
repr_accession = ""
repr_sequence =""
repr = elem.findall("{http://uniprot.org/uniref}representativeMember")
for r in repr:
s1=r.findall("{http://uniprot.org/uniref}dbReference")
for s2 in s1:
for s3 in s2:
if s3.attrib["type"]=="UniProtKB accession":
if(repr_accession == ""):
repr_accession = s3.attrib["value"]#pick primary accession
members.append(s3.attrib["value"])
if parse_sequence is True:
s1=r.findall("{http://uniprot.org/uniref}sequence")
for s2 in s1:
repr_sequence = s2.text
if repr_sequence !="":
break
#print("representative member accession:",repr_accession)
#print("representative member sequence:",repr_sequence)
#all members
repr = elem.findall("{http://uniprot.org/uniref}member")
for r in repr:
s1=r.findall("{http://uniprot.org/uniref}dbReference")
for s2 in s1:
for s3 in s2:
if s3.attrib["type"]=="UniProtKB accession":
members.append(s3.attrib["value"]) #add primary and secondary accessions
#print("members", members)
if(not(df_selection is None)): #apply selection filter
members = [y for y in members if y in df_selection.index]
#print("all children")
#for c in elem:
# print(c)
# print(c.tag)
# print(c.attrib)
if(len(members)>0):
data_dict={"entry_id": entry_id, "name": name, "repr_accession":repr_accession, "members":members}
if parse_sequence is True:
data_dict["repr_sequence"]=repr_sequence
rows.append(data_dict)
###########################################################################################################################
#proteins and peptides from fasta
###########################################################################################################################
def parse_uniprot_fasta(fasta_path, max_entries=0):
'''parse uniprot from fasta file (which contains less information than the corresponding xml but is also much smaller e.g. ftp://ftp.uniprot.org/pub/databases/uniprot/current_release/knowledgebase/complete/uniprot_sprot.fasta)'''
rows=[]
dataset_dict={"sp":"Swiss-Prot","tr":"TrEMBL"}
for seq_record in tqdm(SeqIO.parse(fasta_path, "fasta")):
sid=seq_record.id.split("|")
accession = sid[1]
dataset = dataset_dict[sid[0]]
name = sid[2]
description = seq_record.description
sequence=str(seq_record.seq)
#print(description)
m = re.search('PE=\d', description)
pe=int(m.group(0).split("=")[1])
m = re.search('OS=.* (?=OX=)', description)
organism=m.group(0).split("=")[1].strip()
data_dict={"ID": accession, "name": name, "dataset":dataset, "proteinexistence":pe, "organism":organism, "sequence": sequence}
rows.append(data_dict)
if(max_entries > 0 and len(rows)==max_entries):
break
df=pd.DataFrame(rows).set_index("ID")
df['name'] = df.name.astype(str)
df['dataset'] = df.dataset.astype('category')
df['organism'] = df.organism.astype('category')
df['sequence'] = df.sequence.astype(str)
return df
def proteins_from_fasta(fasta_path):
'''load proteins (as seqrecords) from fasta (just redirects)'''
return seqrecords_from_fasta(fasta_path)
def seqrecords_from_fasta(fasta_path):
'''load seqrecords from fasta file'''
seqrecords = list(SeqIO.parse(fasta_path, "fasta"))
return seqrecords
def seqrecords_to_sequences(seqrecords):
'''converts biopythons seqrecords into a plain list of sequences'''
return [str(p.seq) for p in seqrecords]
def sequences_to_fasta(sequences, fasta_path, sequence_id_prefix="s"):
'''save plain list of sequences to fasta'''
with open(fasta_path, "w") as output_handle:
for i,s in tqdm(enumerate(sequences)):
record = Bio.SeqRecord.SeqRecord(Bio.Seq.Seq(s), id=sequence_id_prefix+str(i), description="")
SeqIO.write(record, output_handle, "fasta")
def df_to_fasta(df, fasta_path):
'''Save column "sequence" from pandas DataFrame to fasta file using the index of the DataFrame as ID. Preserves original IDs in contrast to the function sequences_to_fasta()'''
with open(fasta_path, "w") as output_handle:
for row in df.iterrows():
record = Bio.SeqRecord.SeqRecord(Bio.Seq.Seq(row[1]["sequence"]), id=str(row[0]), description="")
SeqIO.write(record, output_handle, "fasta")
def sequences_to_df(sequences, sequence_id_prefix="s"):
data = {'ID': [(sequence_id_prefix+str(i) if sequence_id_prefix!="" else i) for i in range(len(sequences))], 'sequence': sequences}
df=pd.DataFrame.from_dict(data)
return df.set_index("ID")
def fasta_to_df(fasta_path):
seqs=SeqIO.parse(fasta_path, "fasta")
res=[]
for s in seqs:
res.append({"ID":s.id,"sequence":str(s.seq)})
return pd.DataFrame(res)
def peptides_from_proteins(protein_seqrecords, miss_cleavage=2,min_length=5,max_length=300):
'''extract peptides from proteins seqrecords by trypsin digestion
min_length: only return peptides of length min_length or greater (0 for all)
max_length: only return peptides of length max_length or smaller (0 for all)
'''
peptides = []
for seq in tqdm(protein_seqrecords):
peps = trypsin_digest(str(seq.seq), miss_cleavage)
peptides.extend(peps)
tmp=list(set(peptides))
if(min_length>0 and max_length>0):
tmp=[t for t in tmp if (len(t)>=min_length and len(t)<=max_length)]
elif(min_length==0 and max_length>0):
tmp=[t for t in tmp if len(t)<=max_length]
elif(min_length>0 and max_length==0):
tmp=[t for t in tmp if len(t)>=min_length]
print("Extracted",len(tmp),"unique peptides.")
return tmp
def trypsin_digest(proseq, miss_cleavage):
'''trypsin digestion of protein seqrecords
TRYPSIN from https://github.com/yafeng/trypsin/blob/master/trypsin.py'''
peptides=[]
cut_sites=[0]
for i in range(0,len(proseq)-1):
if proseq[i]=='K' and proseq[i+1]!='P':
cut_sites.append(i+1)
elif proseq[i]=='R' and proseq[i+1]!='P':
cut_sites.append(i+1)
if cut_sites[-1]!=len(proseq):
cut_sites.append(len(proseq))
if len(cut_sites)>2:
if miss_cleavage==0:
for j in range(0,len(cut_sites)-1):
peptides.append(proseq[cut_sites[j]:cut_sites[j+1]])
elif miss_cleavage==1:
for j in range(0,len(cut_sites)-2):
peptides.append(proseq[cut_sites[j]:cut_sites[j+1]])
peptides.append(proseq[cut_sites[j]:cut_sites[j+2]])
peptides.append(proseq[cut_sites[-2]:cut_sites[-1]])
elif miss_cleavage==2:
for j in range(0,len(cut_sites)-3):
peptides.append(proseq[cut_sites[j]:cut_sites[j+1]])
peptides.append(proseq[cut_sites[j]:cut_sites[j+2]])
peptides.append(proseq[cut_sites[j]:cut_sites[j+3]])
peptides.append(proseq[cut_sites[-3]:cut_sites[-2]])
peptides.append(proseq[cut_sites[-3]:cut_sites[-1]])
peptides.append(proseq[cut_sites[-2]:cut_sites[-1]])
else: #there is no trypsin site in the protein sequence
peptides.append(proseq)
return list(set(peptides))
###########################################################################
# Processing CD-HIT clusters
###########################################################################
def clusters_df_from_sequence_df(df,threshold=[1.0,0.9,0.5],alignment_coverage=[0.0,0.9,0.8],memory=16000, threads=8, exploded=True, verbose=False):
'''create clusters df from sequence df (using cd hit)
df: dataframe with sequence information
threshold: similarity threshold for clustering (pass a list for hierarchical clustering e.g [1.0, 0.9, 0.5])
alignment_coverage: required minimum coverage of the longer sequence (to mimic uniref https://www.uniprot.org/help/uniref)
memory: limit available memory
threads: limit number of threads
exploded: return exploded view of the dataframe (one row for every member vs. one row for every cluster)
uses CD-HIT for clustering
https://github.com/weizhongli/cdhit/wiki/3.-User's-Guide
copy cd-hit into ~/bin
TODO: extend to psi-cd-hit for thresholds smaller than 0.4
'''
if verbose:
print("Exporting original dataframe as fasta...")
fasta_file = "cdhit.fasta"
df_original_index = list(df.index) #reindex the dataframe since cdhit can only handle 19 letters
df = df.reset_index(drop=True)
df_to_fasta(df, fasta_file)
if(not(isinstance(threshold, list))):
threshold=[threshold]
alignment_coverage=[alignment_coverage]
assert(len(threshold)==len(alignment_coverage))
fasta_files=[]
for i,thr in enumerate(threshold):
if(thr< 0.4):#use psi-cd-hit here
print("thresholds lower than 0.4 require psi-cd-hit.pl require psi-cd-hit.pl (building on BLAST) which is currently not supported")
return pd.DataFrame()
elif(thr<0.5):
wl = 2
elif(thr<0.6):
wl = 3
elif(thr<0.7):
wl = 4
else:
wl = 5
aL = alignment_coverage[i]
#cd-hit -i nr -o nr80 -c 0.8 -n 5
#cd-hit -i nr80 -o nr60 -c 0.6 -n 4
#psi-cd-hit.pl -i nr60 -o nr30 -c 0.3
if verbose:
print("Clustering using cd-hit at threshold", thr, "using wordlength", wl, "and alignment coverage", aL, "...")
fasta_file_new= "cdhit"+str(int(thr*100))+".fasta"
command = "cd-hit -i "+fasta_file+" -o "+fasta_file_new+" -c "+str(thr)+" -n "+str(wl)+" -aL "+str(aL)+" -M "+str(memory)+" -T "+str(threads)
if(verbose):
print(command)
process= subprocess.Popen(command.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
output, error = process.communicate()
if(verbose):
print(output)
if(error !=""):
print(error)
fasta_files.append(fasta_file)
if(i==len(threshold)-1):
fasta_files.append(fasta_file_new)
fasta_file= fasta_file_new
#join results from all clustering steps
if verbose:
print("Joining results from different clustering steps...")
for i,f in enumerate(reversed(fasta_files[1:])):
if verbose:
print("Processing",f,"...")
if(i==0):
df_clusters = parse_cdhit_clstr(f+".clstr",exploded=False)
else:
df_clusters2 = parse_cdhit_clstr(f+".clstr",exploded=False)
for id,row in df_clusters.iterrows():
members = row['members']
new_members = [list(df_clusters2[df_clusters2.repr_accession==y].members)[0] for y in members]
new_members = [item for sublist in new_members for item in sublist] #flattened
row['members']=new_members
df_clusters["members"]=df_clusters["members"].apply(lambda x:[df_original_index[int(y)] for y in x])
df_clusters["repr_accession"]=df_clusters["repr_accession"].apply(lambda x:df_original_index[int(x)])
if(exploded):
return explode_clusters_df(df_clusters)
return df_clusters
def parse_cdhit_clstr(filename, exploded=True):
'''Aux. Function (used by clusters_df_from_sequence_df) to parse CD-HITs clstr output file in a similar way as the uniref data
for the format see https://github.com/weizhongli/cdhit/wiki/3.-User's-Guide#CDHIT
exploded: single row for every ID instead of single for every cluster
'''
def save_cluster(rows,members,representative):
if(len(members)>0):
rows.append({"entry_id":filename[:-6]+"_"+representative, "members":members, "repr_accession":representative})
rows=[]
with open(filename, 'r') as f:
members=[]
representative=""
for l in tqdm(f):
if(l[0]==">"):
save_cluster(rows,members,representative)
members=[]
representative=""
else:
member=(l.split(">")[1]).split("...")[0]
members.append(member)
if "*" in l:
representative = member
save_cluster(rows,members,representative)
df=pd.DataFrame(rows).set_index("entry_id")
if(exploded):
return explode_clusters_df(df)
return df
###########################################################################
# MHC DATA
###########################################################################
######### Helper functions ##########
def _label_binder(data, threshold=500, measurement_column="meas"):
# Drop entries above IC50 > 500nM with inequality < (ambiguous)
to_drop = (( (data['inequality']=='<')&(data[measurement_column]>threshold))|((data['inequality']=='>')&(data[measurement_column]<threshold))).mean()
if to_drop > 0:
print('Dropping {} % because of ambiguous inequality'.format(to_drop))
data = data[~(( (data['inequality']=='<')&(data[measurement_column]>threshold))|((data['inequality']=='>')&(data[measurement_column]<threshold)))]
# Labeling
data['label'] = (1* data[measurement_column]<=threshold).astype(int)
return data
def _transform_ic50(data, how="log",max_ic50=50000.0, inequality_offset=True, label_column="meas"):
"""Transform ic50 measurements
how: "log" logarithmic transform, inequality "=" mapped to [0,1], inequality ">" mapped to [2,3], inequality "<" mapped to [4,5]
"norm"
"cap"
"""
x = data[label_column]
if how=="cap":
x = np.minimum(x, 50000)
elif how=="norm":
x = np.minimum(x, 50000)
x = (x - x.mean()) / x.std()
elif how=="log":
# log transform
x = 1 - (np.log(x)/np.log(max_ic50))
x = np.minimum(1.0, np.maximum(0.0,x))
if(inequality_offset):
# add offsets for loss
offsets = pd.Series(data['inequality']).map({'=': 0, '>': 2, '<': 4,}).values
x += offsets
return x
def _string_index(data):
# Add prefix letter "a" to the numerical index (such that it is clearly a string in order to avoid later errors).
data["ID"] = data.index
data["ID"] = data["ID"].apply(lambda x: "a"+ str(x))
data = data.set_index(["ID"])
return data
def _format_alleles(x):
if x[:3]=='HLA':
return x[:5]+'-'+x[6:8]+x[9:]
if x[:4]=='Mamu':
return x[:6]+'-'+x[7:]
else:
return x
def _get_allele_ranking(data_dir='.'):
'''
Allele ranking should be the same across different datasets (noMS, withMS) to avoid confusion.
Thus, the ranking is based on the larger withMS dataset
'''
data_dir = Path(data_dir)
curated_withMS_path = data_path/'data_curated.20180219'/'curated_training_data.with_mass_spec.csv'
df = | pd.read_csv(curated_withMS_path) | pandas.read_csv |
"""
Module containing the Company Class.
Abreviations used in code:
dfi = input dataframe
dfo = output dataframe
"""
from typing import Literal
import numpy as np
import pandas as pd
from . import config as c
class Company:
"""
Finance Data Class for listed Brazilian Companies.
Attributes
----------
identifier: int or str
A unique identifier to filter a company in as fi. Both CVM
ID or Fiscal ID can be used. CVM ID (regulator ID) must be an integer.
Fiscal ID must be a string in 'XX.XXX.XXX/XXXX-XX' format.
"""
def __init__(
self,
identifier: int | str,
acc_method: Literal["consolidated", "separate"] = "consolidated",
acc_unit: float | str = 1.0,
tax_rate: float = 0.34,
):
"""Initialize main variables.
Parameters
----------
identifier: int or str
A unique identifier to filter a company in as fi.
Both CVM ID or Fiscal ID can be used.
CVM ID (regulator ID) must be an integer.
Fiscal ID must be a string in 'XX.XXX.XXX/XXXX-XX' format.
acc_method : {'consolidated', 'separate'}, default 'consolidated'
Accounting method used for registering investments in subsidiaries.
acc_unit : float or str, default 1.0
acc_unit is a constant that will divide company account values.
The constant can be a number greater than zero or the strings
{'thousand', 'million', 'billion'}.
tax_rate : float, default 0.34
The 'tax_rate' attribute will be used to calculate some of the
company indicators.
"""
self.set_id(identifier)
self.acc_method = acc_method
self.acc_unit = acc_unit
self.tax_rate = tax_rate
def set_id(self, identifier: int | str):
"""
Set a unique identifier to filter the company in as fi.
Parameters
----------
value: int or str
A unique identifier to filter a company in as fi.
Both CVM ID or Fiscal ID can be used.
CVM ID (regulator ID) must be an integer.
Fiscal ID must be a string in 'XX.XXX.XXX/XXXX-XX' format.
Returns
-------
int or str
Raises
------
KeyError
* If passed ``identifier`` not found in as fi.
"""
# Create custom data frame for ID selection
df = (
c.main_df[["cvm_id", "fiscal_id"]]
.drop_duplicates()
.astype({"cvm_id": int, "fiscal_id": str})
)
if identifier in df["cvm_id"].values:
self._cvm_id = identifier
self._fiscal_id = df.loc[df["cvm_id"] == identifier, "fiscal_id"].item()
elif identifier in df["fiscal_id"].values:
self._fiscal_id = identifier
self._cvm_id = df.loc[df["fiscal_id"] == identifier, "cvm_id"].item()
else:
raise KeyError("Company 'identifier' not found in database")
# Only set company data after object identifier validation
self._set_main_data()
@property
def acc_method(self):
"""
Get or set accounting method used for registering investments in
subsidiaries.
Parameters
----------
value : {'consolidated', 'separate'}, default 'consolidated'
Accounting method used for registering investments in subsidiaries.
Returns
-------
str
Raises
------
ValueError
* If passed ``value`` is invalid.
"""
return self._acc_unit
@acc_method.setter
def acc_method(self, value: Literal["consolidated", "separate"]):
if value in {"consolidated", "separate"}:
self._acc_method = value
else:
raise ValueError("acc_method expects 'consolidated' or 'separate'")
@property
def acc_unit(self):
"""
Get or set a constant to divide company account values.
Parameters
----------
value : float or str, default 1.0
acc_unit is a constant that will divide company account values.
The constant can be a number greater than zero or the strings
{'thousand', 'million', 'billion'}.
Returns
-------
float
Raises
------
ValueError
* If passed ``value`` is invalid.
"""
return self._acc_unit
@acc_unit.setter
def acc_unit(self, value: float | str):
if value == "thousand":
self._acc_unit = 1_000
elif value == "million":
self._acc_unit = 1_000_000
elif value == "billion":
self._acc_unit = 1_000_000_000
elif value >= 0:
self._acc_unit = value
else:
raise ValueError("Accounting Unit is invalid")
@property
def tax_rate(self):
"""
Get or set company 'tax_rate' attribute.
Parameters
----------
value : float, default 0.34
'value' will be passed to 'tax_rate' object attribute if
0 <= value <= 1.
Returns
-------
float
Raises
------
ValueError
* If passed ``value`` is invalid.
"""
return self._tax_rate
@tax_rate.setter
def tax_rate(self, value: float):
if 0 <= value <= 1:
self._tax_rate = value
else:
raise ValueError("Company 'tax_rate' value is invalid")
def _set_main_data(self) -> pd.DataFrame:
self._COMP_DF = (
c.main_df.query("cvm_id == @self._cvm_id")
.astype(
{
"co_name": str,
"cvm_id": np.uint32,
"fiscal_id": str,
"report_type": str,
"report_version": str,
"period_reference": "datetime64",
"period_begin": "datetime64",
"period_end": "datetime64",
"period_order": np.int8,
"acc_code": str,
"acc_name": str,
"acc_method": str,
"acc_fixed": bool,
"acc_value": float,
"equity_statement_column": str,
}
)
.sort_values(by="acc_code", ignore_index=True)
)
self._NAME = self._COMP_DF["co_name"].iloc[0]
self._FIRST_ANNUAL = self._COMP_DF.query('report_type == "annual"')[
"period_end"
].min()
self._LAST_ANNUAL = self._COMP_DF.query('report_type == "annual"')[
"period_end"
].max()
self._LAST_QUARTERLY = self._COMP_DF.query('report_type == "quarterly"')[
"period_end"
].max()
def info(self) -> pd.DataFrame:
"""Return dataframe with company info."""
company_info = {
"Name": self._NAME,
"CVM ID": self._cvm_id,
"Fiscal ID (CNPJ)": self._fiscal_id,
"Total Accounting Rows": len(self._COMP_DF.index),
"Selected Tax Rate": self._tax_rate,
"Selected Accounting Method": self._acc_method,
"Selected Accounting Unit": self._acc_unit,
"First Annual Report": self._FIRST_ANNUAL.strftime("%Y-%m-%d"),
"Last Annual Report": self._LAST_ANNUAL.strftime("%Y-%m-%d"),
"Last Quarterly Report": self._LAST_QUARTERLY.strftime("%Y-%m-%d"),
}
df = pd.DataFrame.from_dict(company_info, orient="index", columns=["Values"])
df.index.name = "Company Info"
return df
def report(
self,
report_type: str,
acc_level: int | None = None,
num_years: int = 0,
) -> pd.DataFrame:
"""
Return a DataFrame with company selected report type.
This function generates a report representing one of the financial
statements for the company adjusted by the attributes passed and
returns a pandas.DataFrame with this report.
Parameters
----------
report_type : {'assets', 'liabilities_and_equity', 'liabilities',
'equity', 'income', 'cash_flow'}
Report type to be generated.
acc_level : {None, 2, 3, 4}, default None
Detail level to show for account codes.
acc_level = None -> X... (default: show all accounts)
acc_level = 2 -> X.YY (show 2 levels)
acc_level = 3 -> X.YY.ZZ (show 3 levels)
acc_level = 4 -> X.YY.ZZ.WW (show 4 levels)
num_years : int, default 0
Select how many last years to show where 0 -> show all years
Returns
------
pandas.DataFrame
Raises
------
ValueError
* If ``report_type`` attribute is invalid
* If ``acc_level`` attribute is invalid
"""
# Check input arguments.
if acc_level not in {None, 2, 3, 4}:
raise ValueError("acc_level expects None, 2, 3 or 4")
df = self._COMP_DF.query("acc_method == @self._acc_method").copy()
# Change acc_unit only for accounts different from 3.99
df["acc_value"] = np.where(
df["acc_code"].str.startswith("3.99"),
df["acc_value"],
df["acc_value"] / self._acc_unit,
)
# Filter dataframe for selected acc_level
if acc_level:
acc_code_limit = acc_level * 3 - 2 # noqa
df.query("acc_code.str.len() <= @acc_code_limit", inplace=True)
"""
Filter dataframe for selected report_type (report type)
df['acc_code'].str[0].unique() -> [1, 2, 3, 4, 5, 6, 7]
The first part of 'acc_code' is the report type
Table of reports correspondence:
1 -> Balance Sheet - Assets
2 -> Balance Sheet - Liabilities and Shareholders’ Equity
3 -> Income
4 -> Comprehensive Income
5 -> Changes in Equity
6 -> Cash Flow (Indirect Method)
7 -> Added Value
"""
report_types = {
"assets": ["1"],
"cash": ["1.01.01", "1.01.02"],
"current_assets": ["1.01"],
"non_current_assets": ["1.02"],
"liabilities": ["2.01", "2.02"],
"debt": ["2.01.04", "2.02.01"],
"current_liabilities": ["2.01"],
"non_current_liabilities": ["2.02"],
"liabilities_and_equity": ["2"],
"equity": ["2.03"],
"income": ["3"],
# "earnings_per_share": ["3.99.01.01", "3.99.02.01"],
"earnings_per_share": ["3.99"],
"comprehensive_income": ["4"],
"changes_in_equity": ["5"],
"cash_flow": ["6"],
"added_value": ["7"],
}
acc_codes = report_types[report_type]
expression = ""
for count, acc_code in enumerate(acc_codes):
if count > 0:
expression += " or "
expression += f'acc_code.str.startswith("{acc_code}")'
df.query(expression, inplace=True)
# remove earnings per share from income statment
if report_type == 'income':
df = df[~df['acc_code'].str.startswith("3.99")]
if report_type in {"income", "cash_flow"}:
df = self._calculate_ttm(df)
df.reset_index(drop=True, inplace=True)
report_df = self._make_report(df)
report_df.set_index(keys="acc_code", drop=True, inplace=True)
# Show only selected years
if num_years > 0:
cols = report_df.columns.to_list()
cols = cols[0:2] + cols[-num_years:]
report_df = report_df[cols]
return report_df
def _calculate_ttm(self, dfi: pd.DataFrame) -> pd.DataFrame:
if self._LAST_ANNUAL > self._LAST_QUARTERLY:
return dfi.query('report_type == "annual"').copy()
df1 = dfi.query("period_end == @self._LAST_QUARTERLY").copy()
df1.query("period_begin == period_begin.min()", inplace=True)
df2 = dfi.query("period_reference == @self._LAST_QUARTERLY").copy()
df2.query("period_begin == period_begin.min()", inplace=True)
df2["acc_value"] = -df2["acc_value"]
df3 = dfi.query("period_end == @self._LAST_ANNUAL").copy()
df_ttm = (
pd.concat([df1, df2, df3], ignore_index=True)[["acc_code", "acc_value"]]
.groupby(by="acc_code")
.sum()
.reset_index()
)
df1.drop(columns="acc_value", inplace=True)
df_ttm = pd.merge(df1, df_ttm)
df_ttm["report_type"] = "quarterly"
df_ttm["period_begin"] = self._LAST_QUARTERLY - pd.DateOffset(years=1)
df_annual = dfi.query('report_type == "annual"').copy()
return pd.concat([df_annual, df_ttm], ignore_index=True)
def custom_report(
self,
acc_list: list[str],
num_years: int = 0,
) -> pd.DataFrame:
"""
Return a financial report from custom list of accounting codes
Creates DataFrame object with a custom list of accounting codes
adjusted by function attributes
Parameters
----------
acc_list : list[str]
A list of strings containg accounting codes to be used in report
num_years : int, default 0
Select how many last years to show where 0 -> show all years
Returns
-------
pandas.DataFrame
"""
df_as = self.report("assets")
df_le = self.report("liabilities_and_equity")
df_is = self.report("income")
df_cf = self.report("cash_flow")
dfo = pd.concat([df_as, df_le, df_is, df_cf]).query("acc_code == @acc_list")
# Show only selected years
if num_years > 0:
cols = dfo.columns.to_list()
cols = cols[0:2] + cols[-num_years:]
dfo = dfo[cols]
return dfo
@staticmethod
def _prior_values(s: pd.Series, is_prior: bool) -> pd.Series:
"""Shift row to the right in order to obtain series previous values"""
if is_prior:
arr = s.iloc[:-1].values
return np.append(np.nan, arr)
else:
return s
def indicators(self, num_years: int = 0, is_prior: bool = True) -> pd.DataFrame:
"""
Return company main operating indicators.
Creates DataFrame object with company operating indicators as
described in reference [1]
Parameters
----------
num_years : int, default 0
Select how many last years to show where 0 -> show all years
is_prior : bool, default True
Divide return measurements by book values from the end of the prior
year (see Damodaran reference).
Returns
-------
pandas.Dataframe
References
----------
.. [1] <NAME>, "Return on Capital (ROC), Return on Invested
Capital (ROIC) and Return on Equity (ROE): Measurement and
Implications.", 2007,
https://people.stern.nyu.edu/adamodar/pdfoles/papers/returnmeasures.pdf
https://people.stern.nyu.edu/adamodar/New_Home_Page/datafile/variable.htm
"""
df_as = self.report("assets")
df_le = self.report("liabilities_and_equity")
df_in = self.report("income")
df_cf = self.report("cash_flow")
df = pd.concat([df_as, df_le, df_in, df_cf]).drop(
columns=["acc_fixed", "acc_name"]
)
# Calculate indicators series
revenues = df.loc["3.01"]
gross_profit = df.loc["3.03"]
ebit = df.loc["3.05"]
ebt = df.loc["3.07"]
effective_tax = df.loc["3.08"]
depreciation_amortization = df.loc["6.01.01.04"]
ebitda = ebit + depreciation_amortization
operating_cash_flow = df.loc["6.01"]
# capex = df.loc["6.02"]
net_income = df.loc["3.11"]
total_assets = df.loc["1"]
total_assets_p = self._prior_values(total_assets, is_prior)
equity = df.loc["2.03"]
equity_p = self._prior_values(equity, is_prior)
total_cash = df.loc["1.01.01"] + df.loc["1.01.02"]
current_assets = df.loc["1.01"]
current_liabilities = df.loc["2.01"]
working_capital = current_assets - current_liabilities
total_debt = df.loc["2.01.04"] + df.loc["2.02.01"]
net_debt = total_debt - total_cash
invested_capital = total_debt + equity - total_cash
invested_capital_p = self._prior_values(invested_capital, is_prior)
# Output Dataframe (dfo)
dfo = pd.DataFrame(columns=df.columns)
dfo.loc["revenues"] = revenues
dfo.loc["operating_cash_flow"] = operating_cash_flow
# dfo.loc["capex"] = capex
dfo.loc["ebitda"] = ebitda
dfo.loc["ebit"] = ebit
dfo.loc["ebt"] = ebt
dfo.loc["effective_tax_rate"] = -1 * effective_tax / ebt
dfo.loc["net_income"] = net_income
dfo.loc["total_cash"] = total_cash
dfo.loc["total_debt"] = total_debt
dfo.loc["net_debt"] = net_debt
dfo.loc["working_capital"] = working_capital
dfo.loc["invested_capital"] = invested_capital
dfo.loc["return_on_assets"] = ebit * (1 - self._tax_rate) / total_assets_p
dfo.loc["return_on_capital"] = ebit * (1 - self._tax_rate) / invested_capital_p
dfo.loc["return_on_equity"] = net_income / equity_p
dfo.loc["gross_margin"] = gross_profit / revenues
dfo.loc["ebitda_margin"] = ebitda / revenues
dfo.loc["pre_tax_operating_margin"] = ebit / revenues
dfo.loc["after_tax_operating_margin"] = ebit * (1 - self._tax_rate) / revenues
dfo.loc["net_margin"] = net_income / revenues
dfo.index.name = "Company Financial Indicators"
# Show only the selected number of years
if num_years > 0:
dfo = dfo[dfo.columns[-num_years:]]
# Since all columns are strings representing corporate year, convert them to datetime64
dfo.columns = pd.to_datetime(dfo.columns)
return dfo
def _make_report(self, dfi: pd.DataFrame) -> pd.DataFrame:
# keep only last quarterly fs
if self._LAST_ANNUAL > self._LAST_QUARTERLY:
df = dfi.query('report_type == "annual"').copy()
df.query(
"period_order == -1 or \
period_end == @self._LAST_ANNUAL",
inplace=True,
)
else:
df = dfi.query(
'report_type == "annual" or \
period_end == @self._LAST_QUARTERLY'
).copy()
df.query(
"period_order == -1 or \
period_end == @self._LAST_QUARTERLY or \
period_end == @self._LAST_ANNUAL",
inplace=True,
)
# Create output dataframe with only the index
dfo = df.sort_values(by="period_end", ascending=True)[
["acc_name", "acc_code", "acc_fixed"]
].drop_duplicates(subset="acc_code", ignore_index=True, keep="last")
periods = list(df["period_end"].sort_values().unique())
for period in periods:
df_year = df.query("period_end == @period")[
["acc_value", "acc_code"]
].copy()
period_str = str(np.datetime_as_string(period, unit="D"))
if period == self._LAST_QUARTERLY:
period_str += " (ttm)"
df_year.rename(columns={"acc_value": period_str}, inplace=True)
dfo = | pd.merge(dfo, df_year, how="left", on=["acc_code"]) | pandas.merge |
from flask import Flask, render_template,flash,request
import os
from os import listdir
from os.path import isfile, join
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
import pandas as pd
import numpy as np
import json
import pickle
from sklearn.metrics import mean_squared_error,r2_score
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_squared_error
import scipy
from scipy.stats import iqr
from scipy.interpolate import griddata
from PIL import Image, ImageDraw
from collections import Counter
import itertools
from datetime import date
import matplotlib.pyplot as plt
from lib import toimage
from keras.models import load_model
from keras import backend as K
app = Flask(__name__)
app.config['SEND_FILE_MAX_AGE_DEFAULT'] = 0
def getting_data():
df = pd.read_csv("data/04_10558.csv", sep='|', engine='python',header=None)
df.columns = ['date', 'sensor','flag','pm10','co2','vocs','noise','temp','humi','co','hcho','pm25','n']
df=df.drop(['flag','co2','vocs','co','hcho','n'], axis=1)
df=df.dropna()
df_corr=df.iloc[:,[2,3,4,5,6]].corr(method ='pearson')
df_corr= df_corr.to_dict(orient='records')
df_corr = json.dumps(df_corr, indent=2)
#scatter data
tmpc=pd.Series(['a', 'b', 'c','d'])
tmpc=tmpc.repeat(360)
tmpc=tmpc[:df.shape[0]]
df['sepcolor'] = tmpc.values
chart_data = df.to_dict(orient='records')
chart_data = json.dumps(chart_data, indent=2)
#result_seoul=pd.DataFrame({"pred":["test.png"]})
#data = {'chart_data': chart_data,'records': records.to_dict(orient='records'),'result_seoul': result_seoul.to_dict(orient='records')}
#data = {'chart_data': chart_data,'records': records.to_dict(orient='records'),'records_inter': records_inter.to_dict(orient='records')}
#numpy
corr_np = np.load("data/pmcorr.npy")
df_corr = | pd.DataFrame(columns=['x','y','corr']) | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
# In[47]:
import requests # Include HTTP Requests module
from bs4 import BeautifulSoup # Include BS web scraping module
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import re
# In[48]:
gameID = 'loyola-university-chicago/boxscore/4822'
url = "https://meangreensports.com/sports/mens-basketball/stats/2020-21/" + gameID
r = requests.get(url,verify=False)
soup = BeautifulSoup(r.text, "html.parser")
prds = soup.find_all('section', attrs = {'id':'play-by-play'})
# In[49]:
dfRaw = pd.DataFrame()
for i in prds:
p = 1
T = '00:20:00'
team = ''
action = ''
plr = ''
for prd in i.find_all('div',id=re.compile(r'period')):
for pos in prd.find_all('tr')[1:]:
t = pos.find('th',attrs={'scope':'row'}).text
if re.search(r'\d',t):
T = t
a = pos.find(lambda tag: tag.name == 'td' and
tag.get('class') == ['text-right','hide-on-medium-down']).text.strip()
h = pos.find(lambda tag: tag.name == 'td' and
tag.get('class') == ['hide-on-medium-down']).text.strip()
if len(a)>0:
team = 'A'
action = a
else:
team = 'H'
action = h
try:
plr = action.split(' by ')[1]
except:
print(action)
dfRaw = pd.concat([dfRaw,
pd.DataFrame([[p,T,team,action,plr]],
columns=['Period','Time','Team','ActionRaw','Player'])
])
p += 1
dfRaw['Action'] = dfRaw.ActionRaw.str.extract('([^a-z]{2,})')
# In[50]:
def cleanAction(x):
x = re.sub('^\d+', '', x).lstrip()
x = re.sub('\d+$', '', x).rstrip()
x = re.sub(' by$', '', x).rstrip()
return x
def cleanPeriod(x):
x = int(''.join(filter(str.isdigit, x)))
return x
def cleanPlayer(x):
x = x.lstrip().rstrip()
return x
def getStarters(df):
nprd = df['Period'].max()
periodStart = pd.to_timedelta('00:00:00')
periodEnd = pd.to_timedelta('00:40:00')
if nprd > 2:
n = nprd - 2
while n > 0:
periodEnd += pd.to_timedelta('00:05:00')
n -= 1
lineups2 = df[df.Action.isin(['SUB IN','SUB OUT'])
][['Player','Action','Time','Period','Team']]
linePV = pd.pivot_table(lineups2,index=['Player','Team'],columns='Action',values='Time',aggfunc=np.min).reset_index()
linePV['SUB IN'] = linePV['SUB IN'].fillna(periodStart)
linePV['SUB OUT'] = linePV['SUB OUT'].fillna(periodEnd)
starters = linePV[
(
(linePV['SUB OUT'] < linePV['SUB IN'])
)
|
(
(linePV['SUB IN'] == '00:00:00')
)
][['Team','Player','SUB OUT','SUB IN']]
return list(starters[starters['Team']=='H']['Player']),list(starters[starters['Team']=='A']['Player'])
def getStartersByPeriod(df,p):
periodStart = pd.to_timedelta('00:00:00')
periodEnd = pd.to_timedelta('00:20:00')
if p > 2:
periodEnd = pd.to_timedelta('00:05:00')
lineups2 = df[
df.Action.isin(['SUB IN','SUB OUT'])
][['Player','Action','Time','Period','Team']]
lineups2 = lineups2[lineups2['Period']==p]
linePV = pd.pivot_table(lineups2,index=['Player','Team'],columns='Action',values='Time',aggfunc=np.min).reset_index()
linePV['SUB IN'] = linePV['SUB IN'].fillna(periodStart)
linePV['SUB OUT'] = linePV['SUB OUT'].fillna(periodEnd)
starters = linePV[
(
(linePV['SUB OUT'] < linePV['SUB IN'])
)
|
(
(linePV['SUB IN'] == '00:00:00')
)
][['Team','Player','SUB OUT','SUB IN']]
return list(starters[starters['Team']=='H']['Player']),list(starters[starters['Team']=='A']['Player'])
def extractParens(s):
pat = '\(([^)]+)'
if re.search(pat,s):
s = re.findall(pat, s)[0]
else:
s = ''
return s
def removeParens(x):
return x.split("(")[0]
# In[51]:
try:
dfRaw['Duration'] = pd.to_datetime(dfRaw['Time'].astype(str)).diff().dt.total_seconds().div(-60)
except:
dfRaw['Duration'] = 0
# In[52]:
actValMap = {
'MISS LAYUP':0
, 'REBOUND DEF':0
, 'GOOD JUMPER':2
, 'MISS 3PTR':0
, 'REBOUND OFF':0
, 'GOOD 3PTR':3
, 'ASSIST':0
, 'FOUL':0
, 'GOOD LAYUP':2
, 'BLOCK':0
, 'TIMEOUT 30SEC':0
, 'SUB OUT':0
, 'SUB IN':0
, 'TURNOVER':0
, 'STEAL':0
, 'MISS JUMPER':0
, 'TIMEOUT MEDIA':0
, 'REBOUND DEADB':0
, 'GOOD FT':1
, 'GOOD DUNK':2
, 'MISS FT':0
}
# In[53]:
dfRaw['Action'] = dfRaw['Action'].apply(cleanAction)
#dfRaw['Period'] = dfRaw['Period'].apply(cleanPeriod)#.apply(int)
#dfRaw['Duration'] = df['duration'].apply(int)
dfRaw['ActionValue'] = dfRaw['Action'].map(actValMap).map(int,na_action='ignore')
dfRaw['Time'] = pd.to_timedelta('00:'+dfRaw['Time'])
dfRaw.loc[dfRaw['Period'] <= 2,'Time'] = pd.to_timedelta('00:20:00') - dfRaw.loc[dfRaw['Period'] <= 2,'Time']
dfRaw.loc[dfRaw['Period'] > 2,'Time'] = pd.to_timedelta('00:05:00') - dfRaw.loc[dfRaw['Period'] > 2,'Time']
dfRaw.loc[dfRaw['Period'] == 2,'Time'] += pd.to_timedelta('00:20:00')
dfRaw.loc[dfRaw['Period'] == 3,'Time'] += pd.to_timedelta('00:25:00')
dfRaw.loc[dfRaw['Period'] == 4,'Time'] += pd.to_timedelta('00:30:00')
dfRaw.loc[dfRaw['Period'] == 5,'Time'] += pd.to_timedelta('00:35:00')
dfRaw.loc[dfRaw['Period'] == 6,'Time'] += pd.to_timedelta('00:40:00')
dfRaw.loc[dfRaw['Period'] == 7,'Time'] += pd.to_timedelta('00:45:00')
dfRaw.loc[dfRaw['Period'] == 8,'Time'] += pd.to_timedelta('00:50:00')
# In[54]:
dfRaw['x'] = dfRaw['Player'].apply(extractParens)
dfRaw['Player'] = dfRaw['Player'].apply(removeParens)
dfRaw['Player'] = dfRaw['Player'].apply(cleanPlayer)
#dfRaw['Duration'] = dfRaw['Duration'].apply(int)
dfRaw.loc[dfRaw.Duration.isna(),'Duration'] = pd.to_timedelta(dfRaw.loc[dfRaw.Duration.isna(),'Time']).dt.total_seconds()#.div(-60)
# In[55]:
dfRaw['seqNo'] = dfRaw['Time'].ne(dfRaw['Time'].shift()).cumsum()
# In[56]:
conditions = [
(dfRaw['ActionValue'] == 1),
(dfRaw['ActionValue'] == 2),
(dfRaw['ActionValue'] == 3),
(dfRaw['Action'].str.contains('miss') & dfRaw['Action'].str.contains('3')),
(dfRaw['Action'].str.contains('miss') & ~dfRaw['Action'].str.contains('3') & ~dfRaw['Action'].str.contains('ft')),
(dfRaw['Action'].str.contains('miss') & ~dfRaw['Action'].str.contains('3') & dfRaw['Action'].str.contains('ft'))
]
choices = ['FTM', 'FG2', 'FG3','3PA','2PA','FTA']
dfRaw['action_edit1'] = np.select(conditions, choices, default=dfRaw['Action'])
dfRaw['playScore'] = dfRaw['Time'].map(dfRaw.groupby("Time")['ActionValue'].sum())
# In[57]:
def set_pm(df,rosterH,rosterA,debug=False,isHome=True):
HLU,ALU = getStarters(df)
lineupDF = df[df.Action.isin(['SUB IN','SUB OUT'])].copy().reindex(columns=['Time'
,'Action'
,'Player'
,'Team'
,'scoreHome'
,'scoreAway'
,'seqNo'
,'Period'
])
lineupDF = lineupDF.reset_index()
#rosterH = [p for p in lineupDF[lineupDF['team']=='Home']['player'].unique()]
#rosterA = [p for p in lineupDF[lineupDF['team']=='Away']['player'].unique()]
seq = lineupDF.loc[0,'seqNo'].copy()
time = lineupDF.loc[0,'Time']
hSc = lineupDF.loc[0,'scoreHome'].copy()
aSc = lineupDF.loc[0,'scoreAway'].copy()
prd = lineupDF.loc[0,'Period'].copy()
diff = hSc-aSc
away = pd.DataFrame(data={'Lineup':[ALU],'Time':pd.to_timedelta('00:00:00'),'Team':'A','diff':0}).head(1)
home = pd.DataFrame(data={'Lineup':[HLU],'Time':pd.to_timedelta('00:00:00'),'Team':'H','diff':0}).head(1)
h = home.loc[0,'Lineup'].copy()
h.sort()
a = away.loc[0,'Lineup'].copy()
a.sort()
hPlayerPM = {'H':{i:{'curDiff':0, 'pm':0, 'curTime': | pd.to_timedelta('00:00:00') | pandas.to_timedelta |
import chesscom as chess
import chess_match as cm
import pandas as pd
import json
from datetime import datetime
import argparse
import os
import numpy as np
import matplotlib.pyplot as plt
DATA_FOLDER = './data'
OUTPUTS_FOLDER = './outputs'
RESULTS_FOLDER = './results'
import re
_slugify_strip_re = re.compile(r'[^\w\s-]')
_slugify_hyphenate_re = re.compile(r'[-\s]+')
def _slugify(value):
"""
Normalizes string, converts to lowercase, removes non-alpha characters,
and converts spaces to hyphens.
From Django's "django/template/defaultfilters.py".
"""
import unicodedata
if not isinstance(value, str):
value = str(value)
value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore')
value = value.decode('utf-8')
value = str(_slugify_strip_re.sub('', value).strip().lower())
return _slugify_hyphenate_re.sub('-', value)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-id", dest='match_id', type=str, help="Number of match id")
parser.add_argument("-url", dest='match_url', type=str, help="URL of the match")
parser.add_argument("-N", dest='N_predict', type=int, default=1000, help="Number of trials to predict match resutl")
parser.add_argument("-Nb", dest='N_bias', type=int, default=1000, help="Number of trials to predict match biased resutl")
parser.add_argument("-bias", dest='bias', type=float, default=0.0, help="ELOs bias for Team A")
parser.add_argument("-u", action='store_true', help="Sorce update data from web")
parser.add_argument("-plot", action='store_true', default=False, help="Show plots")
args = parser.parse_args()
# Turn interactive plotting off
if not args.plot:
plt.ioff()
# check folders
if not os.path.exists(DATA_FOLDER):
os.makedirs(DATA_FOLDER)
if not os.path.exists(OUTPUTS_FOLDER):
os.makedirs(OUTPUTS_FOLDER)
if not os.path.exists(OUTPUTS_FOLDER):
os.makedirs(OUTPUTS_FOLDER)
# resolve match_id
if args.match_id:
match_id = int(args.match_id)
elif args.match_url:
match_id = args.match_url
# get data about match
data = chess.get_match_data(match_id)
#print('\n')
match_name = _slugify(chess.get_match_name(match_id))
print('\nMatch info:')
print('\tName:\t{}'.format(chess.get_match_name(match_id)))
teams_names = chess.get_teams_names(match_id)
print('\tTeam A:\t{}'.format(teams_names[0]))
print('\tTeam B:\t{}'.format(teams_names[1]))
# Get ELOs list into matrix (M)
print('\nReading ELOs list')
match_stats_filename = DATA_FOLDER+'/'+match_name+'_match_stats.xlsx'
if os.path.exists(match_stats_filename) and not args.u:
print('\tA backup file was found!')
print('\tLoading from file {} ...'.format(match_stats_filename))
match_stats_list_df = pd.read_excel(match_stats_filename)
match_stats_list_np = match_stats_list_df.to_numpy()
M = np.array([s[2:] for s in match_stats_list_np]).astype(int)
print('\tNote: if you want update backup file, use -u argument.')
print('\tDone!')
else:
print('\tLoading from web ...')
match_stats_list = chess.get_match_elos_list(data, format='list')
print('\tDone!')
print('\tSaving backup file{}'.format(match_stats_filename))
match_stats_list_df = | pd.DataFrame.from_dict(match_stats_list['boards_stats']) | pandas.DataFrame.from_dict |
# import tabula
import pandas as pd
import numpy as np
# !pip install tabula-py
import camelot
import os
import string
import pytz
from datetime import datetime, timezone, timedelta
from tzlocal import get_localzone
from StatusMsg import StatusMsg
from tqdm import tqdm
from urllib.error import HTTPError
import re
import tabula
from tabulate import tabulate
import io
# from datetime import datetime,timedelta
#programe extracts the tabels from the PDF files.
# Need some Preprocessing to convert to RawCSV
#Have Done for KA and HR for reference
# a=b
#declare the path of your file
# file_path = r"../INPUT/2021-10-26/KA.pdf"
#Convert your file
# reads all the tables in the PDF
class FileFormatChanged(Exception):
pass
# def getAPData(file_path,date,StateCode):
# table = camelot.read_pdf(file_path,pages='1')
# if not os.path.isdir('../INPUT/{}/{}/'.format(date,StateCode)):
# os.mkdir('../INPUT/{}/{}/'.format(date,StateCode))
# table.export('../INPUT/{}/{}/foo.csv'.format(date,StateCode), f='csv')
# df_districts = pd.read_csv('../INPUT/{}/{}/foo-page-1-table-1.csv'.format(date,StateCode))
# df_districts.columns = df_districts.columns.str.replace("\n","")
# col_dict = {"TotalPositives":"Confirmed","TotalRecovered":"Recovered","TotalDeceased":"Deceased"}
# df_districts.rename(columns=col_dict,inplace=True)
# # df_districts.drop(columns=['S.No','PositivesLast 24 Hrs','TotalActive Cases'],inplace=True)
# df_districts = df_districts[df_districts['District']!="Total AP Cases"]
# df_summary = df_districts
# df_districts = df_districts[:-1]
# df_json = pd.read_json("../DistrictMappingMaster.json")
# dist_map = df_json['Andhra Pradesh'].to_dict()
# df_districts['District'].replace(dist_map,inplace=True)
# df_summary = df_summary.iloc[-1,:]
# # print(df_districts)
# # print(df_summary)
# # a=b
# return df_summary,df_districts
def combine_listItems(list):
combined_items = ' '.join([str(item) for item in list])
return combined_items
def getAPData(file_path, date, StateCode):
try:
# print(file_path)
file = tabula.read_pdf(file_path,pages=1,stream = True)
# print(file)
table = tabulate(file)
# print(table)
df_districts = pd.read_fwf(io.StringIO(table))
# remove junk on top and reset the index
df_districts.drop(df_districts.head(4).index, inplace=True)
df_districts = df_districts.reset_index()
# remove bottom junk
df_districts.drop(df_districts.tail(2).index, inplace=True)
df_other_cols = df_districts
# print(df_districts)
# remove unnecessary columns
cols = [0, 4, 6]
df_districts.drop(df_districts.columns[cols], axis=1, inplace=True)
# add column names
df_districts.columns = ['S.No','District', 'cumulativeConfirmedNumberForDistrict', 'District_1', 'Cases_2']
df_districts.drop('S.No', axis=1, inplace=True)
new_df = df_districts
# splitting the dataframe
N = 2
splitted_list_df = np.split(df_districts, np.arange(N, len(df_districts.columns), N), axis=1)
part_A = splitted_list_df[0]
part_B = splitted_list_df[1]
# print(type(part_B))
part_B_cols = {"District_1": "District", "Cases_2": "cumulativeConfirmedNumberForDistrict"}
part_B.rename(columns=part_B_cols, inplace=True)
# concatenate two splitted DF's
df_districts = pd.concat([part_A, part_B], ignore_index=True, sort=False)
# print(df_districts)
# base_csv= '../RAWCSV/2022-04-05/myGov/AP_raw.csv'
# base_csv= '../RAWCSV/2022-04-17/myGov/AP_raw.csv'
base_csv= '../RAWCSV/2022-04-19/AP_raw.csv'
df_base_csv = pd.read_csv(base_csv)
# print(df_base_csv)
# df_base_csv.drop(df_base_csv.index[[0,7]],inplace=True)
# df_base_csv = df_base_csv.reset_index(drop=True)
# distri = df_base_csv['District']
# con = df_base_csv['cumulativeConfirmedNumberForDistrict']
# print(con, distri)
# base_csv_forState = '../RAWCSV/2022-04-06/myGov/AP_raw.csv'
base_csv_forState = '../RAWCSV/2022-04-20/myGov/AP_raw.csv'
df_base_csv_forState = pd.read_csv(base_csv_forState)
# df_base_csv_forState.drop(df_base_csv_forState.index[[0,7]],inplace=True)
# df_base_csv_forState = df_base_csv_forState.reset_index(drop=True)
# distri = df_base_csv_forState['District']
# con = df_base_csv_forState['cumulativeConfirmedNumberForDistrict']
# print(con, distri)
for index, row in df_districts.iterrows():
# print(index, row)
cases_col = row['cumulativeConfirmedNumberForDistrict'].split(' ')[1:]
cases_col = list(filter(str.strip, cases_col))
# print(cases_col, len(cases_col))
district_col = row['District'].split(' ')[1:]
district_col = list(filter(str.strip, district_col))
# print(district_col,len(district_col))
if len(district_col) == 1:
s = ''
new_district_col = s.join(district_col)
else:
new_district_col = combine_listItems(district_col)
if len(cases_col) == 1:
s = ''
new_cases_col = s.join(cases_col)
else:
new_cases_col = combine_listItems(cases_col)
df_districts.loc[index, "District"] = new_district_col
# print(type(new_district_col))
df_districts.loc[index, "cumulativeConfirmedNumberForDistrict"] = new_cases_col
# dropping rows having Nan
df_districts.drop(df_districts.index[[13,14,15,16,30,33]],inplace=True)
df_districts = df_districts.reset_index(drop=True)
df_districts['cumulativeConfirmedNumberForDistrict'] =df_districts['cumulativeConfirmedNumberForDistrict'].astype(int)
# df_summary = df_districts
df_districts = df_districts[:-2]
# print(df_districts)
df_json = pd.read_json("../DistrictMappingMaster.json")
dist_map = df_json['Andhra Pradesh'].to_dict()
df_districts['District'].replace(dist_map,inplace=True)
for index,row in df_districts.iterrows():
filtered_base_df = df_base_csv[df_base_csv['District']==row['District']]
# cumulativeConfirmedNumberForDistrict_value = filtered_base_df['cumulativeConfirmedNumberForDistrict']
# print('printing value .....')
# print(cumulativeConfirmedNumberForDistrict_value)
filtered_base_forState_df= df_base_csv_forState[df_base_csv_forState['District']==row['District']]
if len(filtered_base_df) == 1 and len(filtered_base_forState_df) == 1:
# if len(filtered_base_df) == 1:
# print('printing district names',filtered_district)
cumulative_confirmed_forDistrict = filtered_base_df.iloc[0]['cumulativeConfirmedNumberForDistrict'].astype(int)
# print('cumulative_confirmed_forDistrict',cumulative_confirmed_forDistrict)
df_districts.loc[index, 'cumulativeConfirmedNumberForDistrict'] = cumulative_confirmed_forDistrict+int(row['cumulativeConfirmedNumberForDistrict'])
df_districts['cumulativeDeceasedNumberForDistrict'] = '0'
df_districts['cumulativeRecoveredNumberForDistrict'] = '0'
df_districts['cumulativeTestedNumberForDistrict'] = '0'
df_districts['cumulativeConfirmedNumberForState'] = df_districts['cumulativeConfirmedNumberForDistrict'].sum()
cumulativeDeceasedNumberForState = filtered_base_forState_df.iloc[0]['cumulativeDeceasedNumberForState'].astype(int)
df_districts['cumulativeDeceasedNumberForState'] = cumulativeDeceasedNumberForState
cumulativeRecoveredNumberForState = filtered_base_forState_df.iloc[0]['cumulativeRecoveredNumberForState'].astype(int)
df_districts['cumulativeRecoveredNumberForState'] = cumulativeRecoveredNumberForState
# df_districts['cumulativeTestedNumberForState'] = '33462024'
df_summary = df_districts
# print('printing df districts.....')
# print(df_districts)
# df_summary['cumulativeTestedNumberForState'] = '33462024'
# df_summary['cumulativeTestedNumberForState'] = '33469666'
df_addTest = pd.read_csv("../INPUT/AP_Tested.csv")
print(df_addTest)
try:
df_summary['cumulativeTestedNumberForState'] = df_addTest[df_addTest["Date"] == date]["Cumulative_Tested"].item()
# print(df_summary['Tested'])
except:
print("Please Enter AP Tested values in ../Input/AP_Tested.csv")
raise
df_summary.to_csv("../RAWCSV/{}/{}_raw.csv".format(date, StateCode))
return df_summary, df_districts
except Exception as e:
raise
# print(e)
def getRJData(file_path,date,StateCode):
table = camelot.read_pdf(file_path,pages='1,2')
if not os.path.isdir('../INPUT/{}/{}/'.format(date,StateCode)):
os.mkdir('../INPUT/{}/{}/'.format(date,StateCode))
table.export('../INPUT/{}/{}/foo.csv'.format(date,StateCode), f='csv')
df_districts_1 = pd.read_csv('../INPUT/{}/{}/foo-page-1-table-1.csv'.format(date,StateCode),header=0)
df_districts_2 = pd.read_csv('../INPUT/{}/{}/foo-page-2-table-1.csv'.format(date,StateCode))
frames = [df_districts_1,df_districts_2]
df_districts = pd.concat(frames,ignore_index=True)
df_districts.columns = df_districts.columns.str.replace("\n","")
print(df_districts.columns)
#Cumulative Sample
col_dict = {"Unnamed: 2":"Tested", "Cumulative Positive":"Confirmed", "Cumulative Recovered/Discharged":"Recovered","Cumulative Death":"Deceased","CumulativePositive":"Confirmed",
"CumulativeDeath":"Deceased","CumulativeRecovered/ Discharged":"Recovered"}
df_districts.rename(columns=col_dict,inplace=True)
print(df_districts.columns)
# df_districts.drop(columns=['S.No','Today\'s Positive','Today\'sDeath','Today\'sRecovered/ Discharged', 'Active Case'],inplace=True)
df_districts.dropna(how="all",inplace=True)
# print(df_districts)
# a=b
# df_summary = df_districts
# df_districts = df_districts[:-1]
# df_districts = df_districts[:-4]
# print(df_districts)
# a=b
df_summary = df_districts
print(df_districts)
df_districts = df_districts[:-1]
# df_districts.drop(labels=[0,1],axis=0,inplace=True)
# df = df[]
df_districts['District'] = df_districts['District'].str.capitalize()
df_json = pd.read_json("../DistrictMappingMaster.json")
dist_map = df_json['Rajasthan'].to_dict()
df_districts['District'].replace(dist_map,inplace=True)
df_summary = df_summary.iloc[-1,:] #testcode needs to be updated later
# print(df_summary)
# a=b
return df_summary,df_districts
def getKAData(file_path,date,StateCode):
table = camelot.read_pdf(file_path,pages='1,5')
if not os.path.isdir('../INPUT/{}/{}/'.format(date,StateCode)):
os.mkdir('../INPUT/{}/{}/'.format(date,StateCode))
table.export('../INPUT/{}/{}/foo.csv'.format(date,StateCode), f='csv')
# table[5].to_excel('foo.xlsx')
df_districts = pd.read_csv('../INPUT/{}/{}/foo-page-5-table-1.csv'.format(date,StateCode),skiprows=3)
df_districts.columns = df_districts.columns.str.replace("\n","")
df_districts['District Name'] = df_districts['District Name'].str.replace("\n","")
df_districts['District Name'] = df_districts['District Name'].str.replace("#","")
df_districts['District Name'] = df_districts['District Name'].str.replace("*","")
df_districts['District Name'] = df_districts['District Name'].replace(r'\s+', ' ', regex=True)
# df_districts = df_districts.replace("nan",np.nan)
print(df_districts.columns)
# a=b
# df_summary = df_districts
# df_districts.columns = df_districts.columns.str.replace("\n","")
for idx in df_districts.index:
print(df_districts["Sl. No"][idx])
if df_districts["Sl. No"][idx] == "21 Mandya":
df_districts["Sl. No"][idx] = 21
df_districts["District Name"][idx] = "Mandya"
elif df_districts["Sl. No"][idx] == "22 Mysuru":
df_districts["Sl. No"][idx] = 22
df_districts["District Name"][idx] = "Mysuru"
if "Non-Covid" in df_districts.columns[-1]:
col_dict = {"District Name":"District","Total Positives":"Confirmed","Total Discharges":"Recovered","Total Covid Deaths":"Deceased" , df_districts.columns[-1]:"Other"}
else:
col_dict = {"District Name":"District","Total Positives":"Confirmed","Total Discharges":"Recovered","Total Covid Deaths":"Deceased" , df_districts.columns[-2]:"Other"}
df_districts.rename(columns=col_dict,inplace=True)
# print(df_districts.columns)
# df_districts.drop(columns=['Sl. No','Today’s Positives','Today’s Discharges','Total Active Cases','Today’s Reported Covid Deaths','Death due to Non-Covid reasons#'],inplace=True)
df_districts.dropna(how="all",inplace=True)
# print(df_districts)
# a=b
# a=b
for col in df_districts.columns:
df_districts[col] = df_districts[col].astype(str).str.replace("*","")
# df_districts.dropna(inplace=True)
# print(df_districts)
# a=b
df_summary = df_districts[df_districts["Sl. No"] == "Total"].iloc[0]
# df_summary = df_districts[df_districts["District"] == "Total"].iloc[0]
# print(df_summary)
# a=b
df_districts = df_districts[ | pd.to_numeric(df_districts['Sl. No'], errors='coerce') | pandas.to_numeric |
import os
from copy import deepcopy
from datetime import datetime
from dateutil.parser import parse as parse_to_datetime
import dateutil
import numpy as np
import pandas as pd
np.seterr(divide='ignore')
from sklearn.utils import class_weight
from sklearn.cluster import KMeans, MeanShift, DBSCAN
from sklearn.multiclass import OneVsRestClassifier
from sklearn.preprocessing import MultiLabelBinarizer, StandardScaler
from sklearn.metrics import (
classification_report, confusion_matrix,
silhouette_score, homogeneity_score, completeness_score, v_measure_score,
auc, plot_roc_curve
)
from sklearn.model_selection import (
KFold, StratifiedKFold, cross_val_score,
GridSearchCV,
train_test_split
)
from xgboost import XGBClassifier
LUCKY_NUMBER = 6969
classifier = XGBClassifier(objective='multi:softprob', n_jobs=11)
parameters = {
'max_depth': range (2, 7, 1),
'n_estimators': range(60, 90, 10),
'learning_rate': [0.01, 0.05, 0.1]
}
HP_searcher = GridSearchCV(
estimator=classifier,
param_grid=parameters,
scoring='f1_macro',
cv=10,
verbose=True
)
def to_datetime(text: str):
try:
dt_format = str(parse_to_datetime(text))
dt_object = datetime.strptime(dt_format,'%Y-%m-%d %H:%M:%S')
except Exception:
return None
return dt_object
def month_to_quarter(month: int) -> int:
if 1 <= month <= 3:
return 1
elif 4 <= month <= 6:
return 2
elif 7 <= month <= 9:
return 3
elif 10 <= month <= 12:
return 4
else:
raise ValueError(f'input must be between 1 and 12')
def load_csv(filepath: str) -> pd.DataFrame:
if not os.path.isfile(filepath):
raise FileNotFoundError(f"Cannot find {filepath}")
return pd.read_csv(filepath)
def find_best_k_clustering(x: np.array,
max_clusters: int=10,
max_iterations: int=1000,
n_samples: int=169,
lucky_number: int=LUCKY_NUMBER,
verbose: bool=False):
scores = {}
for k in range(2, max_clusters-1):
if k < 2:
continue
try:
kmeans = KMeans(n_clusters=k, max_iter=max_iterations, random_state=lucky_number).fit(x)
except:
continue
# scores[k] = kmeans.inertia_
try:
scores[k] = silhouette_score(x, kmeans.labels_, metric='euclidean', sample_size=n_samples)
except ValueError:
continue
best_k = max(scores, key=scores.get)
if verbose:
print(f"Best k is {best_k}")
return best_k
def reorder_cluster(cluster_field_name: str,
target_field_name: str,
df: pd.DataFrame,
ascending: bool=True):
new_cluster_field_name = 'new_' + cluster_field_name
df_new = df.groupby(cluster_field_name)[target_field_name].mean().reset_index()
df_new = df_new.sort_values(by=target_field_name, ascending=ascending).reset_index(drop=True)
df_new['index'] = df_new.index
df_final = pd.merge(df, df_new[[cluster_field_name, 'index']], on=cluster_field_name)
df_final = df_final.drop([cluster_field_name], axis=1)
df_final = df_final.rename(columns={"index": cluster_field_name})
df_final[cluster_field_name] = df_final[cluster_field_name] + 1
return df_final
def cdf(x):
"""
Cumulative Density Function (with epsilon)
"""
x = np.sort(x)
u, c = np.unique(x, return_counts=True)
n = len(x)
y = (np.cumsum(c)-0.5) / n
def interpolate_(x_):
y_interp = np.interp(x_, u, y, left=0.0, right=1.0)
return y_interp
return interpolate_
def cumulative_kl(x, y, fraction: float=0.5):
"""
Cumulative Method to calculate Kullback–Leibler divergence
"""
dx = np.diff(np.sort(np.unique(x)))
dy = np.diff(np.sort(np.unique(y)))
ex = np.min(dx)
ey = np.min(dy)
e = np.min([ex, ey]) * fraction
n = len(x)
P = cdf(x)
Q = cdf(y)
divergence = (1./n) * np.sum(np.log((P(x)-P(x-e)) / (Q(x)-Q(x-e)+1e-11)))
return np.abs(divergence)
def preprocess_for_classifier(df: pd.DataFrame, target_name: str,
id_cols: list=[], train_size: float=0.69):
# Split to train and validation
dset, X, Y = dict(), dict(), dict()
# If the minimum number of groups for any class less than 2
try:
dset['train'], dset['test'] = train_test_split(df, train_size=train_size, stratify=df[target_name])
for ds_name, ds in dset.items():
Y[ds_name] = ds[target_name]
X[ds_name] = ds.copy()
X[ds_name].drop(columns=id_cols+[target_name], errors='ignore', inplace=True)
except:
X['train'] = df.copy()
X['train'].drop(columns=id_cols+[target_name], errors='ignore', inplace=True)
X['test'] = X['train'].copy()
Y['train'] = df[target_name]
Y['test'] = Y['train'].copy()
# Compute class weights for target
target_weights = Y['train']
target_classes = target_weights.unique()
class_weights = list(
class_weight.compute_class_weight('balanced', target_classes, target_weights)
)
target_weights = target_weights.map({clss_i+1: clss_w for clss_i, clss_w in enumerate(class_weights)})
return X, Y, target_weights
def visualize_results(classifier, X, Y):
results = classifier.evals_result()
epochs = len(results['validation_0']['mlogloss'])
x_axis = range(0, epochs)
viz_df = pd.DataFrame(classifier.feature_importances_,
index=X['train'].columns,
columns=['feature_importance'])
viz_df.sort_values(by=['feature_importance'], inplace=True)
# viz_df[viz_df.feature_importance>0.011].plot(kind='barh', alpha=0.75)
print('\n\nAccuracy of XGB classifier on training: {:.2f}'
.format(classifier.score(X['train'], Y['train'])))
y_pred = classifier.predict(X['train'])
print(classification_report(Y['train'], y_pred))
print('\n\nAccuracy of XGB classifier on testing: {:.2f}'
.format(classifier.score(X['test'], Y['test'])))
y_pred = classifier.predict(X['test'])
print(classification_report(Y['test'], y_pred))
def filter_opposite_features(df: pd.DataFrame, verbose: bool=False):
features_before = list(df.columns)
features_after = deepcopy(features_before)
feature_id = 0
while feature_id < len(features_after):
feature_1 = features_after[feature_id]
if verbose:
print(f"Checking {feature_1}")
feature_id += 1
is_separable = False
for op in [' = ', ' - ', ' + ', ' > ', ' < ']:
if op in feature_1:
is_separable = True
break
if not is_separable:
continue
obj_a, obj_b = feature_1.split(op)
feature_2 = obj_b + op + obj_a
if feature_2 in features_after:
if verbose:
print(f"Remove {feature_2} because of oppositing {feature_1}")
features_after.remove(feature_2)
features_removed = list(set(features_before).difference(set(features_after)))
if verbose:
print(f"\n\n\nFeatures removed:\n\t", features_removed)
return features_removed
def check_features_diverged(DF_1: pd.DataFrame,
DF_2: pd.DataFrame,
exclude_columns: list=[],
include_nan_divergence: bool=False,
threshold: float=0.69,
mode: str='min',
verbose: bool=False) -> list:
len_1, len_2 = len(DF_1), len(DF_2)
if len_1 > len_2:
DF_1 = DF_1.sample(len_2)
elif len_2 > len_1:
DF_2 = DF_2.sample(len_1)
diverged_features = dict()
common_features = set.intersection(set(list(DF_1.columns)), set(list(DF_2.columns)))
common_features = [f for f in list(common_features) if f not in exclude_columns]
for col in common_features:
div_xy = cumulative_kl(DF_1[col], DF_2[col])
div_yx = cumulative_kl(DF_2[col], DF_1[col])
if mode == 'max':
div = max(div_xy, div_yx)
elif mode == 'min':
div = min(div_xy, div_yx)
else:
div = (div_xy + div_yx) / 2
if verbose:
print(f'\n{col}\n\t{div_xy}\n\t{div_yx}\n\t{div}\n')
if div > threshold:
diverged_features[col] = div
if include_nan_divergence and np.isnan(div):
diverged_features[col] = div
diverged_features_df = | pd.DataFrame.from_dict(diverged_features, orient='index', columns=['KL_divergence']) | pandas.DataFrame.from_dict |
import pandas as pd
import numpy as np
import matplotlib as plt
pd.set_option('display.max_columns', None)
df=pd.read_csv('train_HK6lq50.csv')
def train_data_preprocess(df,train,test):
df['trainee_engagement_rating'].fillna(value=1.0,inplace=True)
df['isage_null']=0
df.isage_null[df.age.isnull()]=1
df['age'].fillna(value=0,inplace=True)
#new cols actual_programs_enrolled and total_test_taken
total=train.append(test)
unique_trainee=pd.DataFrame(total.trainee_id.value_counts())
unique_trainee['trainee_id']=unique_trainee.index
value=[]
for i in unique_trainee.trainee_id:
value.append(len(total[total.trainee_id==i].program_id.unique()))
unique_trainee['actual_programs_enrolled']=value
dic1=dict(zip(unique_trainee['trainee_id'],unique_trainee['actual_programs_enrolled']))
df['actual_programs_enrolled']=df['trainee_id'].map(dic1).astype(int)
value=[]
for i in unique_trainee.trainee_id:
value.append(len(total[total.trainee_id==i].test_id.unique()))
unique_trainee['total_test_taken']=value
dic2=dict(zip(unique_trainee['trainee_id'],unique_trainee['total_test_taken']))
df['total_test_taken']=df['trainee_id'].map(dic2).astype(int)
#new col total_trainee_in_each_test
unique_test=pd.DataFrame(total.test_id.value_counts())
unique_test['test_id']=unique_test.index
value=[]
for i in unique_test.test_id:
value.append(len(total[total.test_id==i].trainee_id.unique()))
unique_test['total_trainee_in_each_test']=value
dic3=dict(zip(unique_test['test_id'],unique_test['total_trainee_in_each_test']))
df['total_trainee_in_each_test']=df['test_id'].map(dic3).astype(int)
#LABEL ENCODING
test_type=sorted(df['test_type'].unique())
test_type_mapping=dict(zip(test_type,range(1,len(test_type)+1)))
df['test_type_val']=df['test_type'].map(test_type_mapping).astype(int)
df.drop('test_type',axis=1,inplace=True)
program_type=sorted(df['program_type'].unique())
program_type_mapping=dict(zip(program_type,range(1,len(program_type)+1)))
df['program_type_val']=df['program_type'].map(program_type_mapping).astype(int)
df.drop('program_type',axis=1,inplace=True)
program_id=sorted(df['program_id'].unique())
program_id_mapping=dict(zip(program_id,range(1,len(program_id)+1)))
df['program_id_val']=df['program_id'].map(program_id_mapping).astype(int)
#df.drop('program_id',axis=1,inplace=True)
difficulty_level=['easy','intermediate','hard','vary hard']
difficulty_level_mapping=dict(zip(difficulty_level,range(1,len(difficulty_level)+1)))
df['difficulty_level_val']=df['difficulty_level'].map(difficulty_level_mapping).astype(int)
df.drop('difficulty_level',axis=1,inplace=True)
education=['No Qualification','High School Diploma','Matriculation','Bachelors','Masters']
educationmapping=dict(zip(education,range(1,len(education)+1)))
df['education_val']=df['education'].map(educationmapping).astype(int)
df.drop('education',axis=1,inplace=True)
is_handicapped=sorted(df['is_handicapped'].unique())
is_handicappedmapping=dict(zip(is_handicapped,range(1,len(is_handicapped)+1)))
df['is_handicapped_val']=df['is_handicapped'].map(is_handicappedmapping).astype(int)
df.drop('is_handicapped',axis=1,inplace=True)
#creating new program_id group based on is_pass percentage
df['new_program_id_group']=pd.DataFrame(df['program_id'])
df.loc[(df.new_program_id_group=='X_1')|(df.new_program_id_group=='X_3'),'new_program_id_group']=1
df.loc[(df.new_program_id_group=='Y_1')|(df.new_program_id_group=='Y_2')|(df.new_program_id_group=='Y_3')|(df.new_program_id_group=='Y_4')|(df.new_program_id_group=='X_2'),'new_program_id_group']=2
df.loc[(df.new_program_id_group=='Z_1')|(df.new_program_id_group=='Z_2')|(df.new_program_id_group=='Z_3')|(df.new_program_id_group=='T_2')|(df.new_program_id_group=='T_3')|(df.new_program_id_group=='T_4'),'new_program_id_group']=3
df.loc[(df.new_program_id_group=='U_1'),'new_program_id_group']=4
df.loc[(df.new_program_id_group=='V_1')|(df.new_program_id_group=='U_2'),'new_program_id_group']=5
df.loc[(df.new_program_id_group=='V_3')|(df.new_program_id_group=='S_2')|(df.new_program_id_group=='V_4')|(df.new_program_id_group=='V_2'),'new_program_id_group']=6
df.loc[(df.new_program_id_group=='T_1')|(df.new_program_id_group=='S_1'),'new_program_id_group']=7
df.drop('program_id',axis=1,inplace=True)
#creating col test_id and rating category together
train=pd.read_csv('train_HK6lq50.csv')
test=pd.read_csv('test_2nAIblo.csv')
total=train.append(test)
count=0
total['test_id_and_rating']=0
for a in total.trainee_engagement_rating.unique():
for b in total.test_id.unique():
count+=1
total.loc[(total.trainee_engagement_rating==a)&(total.test_id==b),'test_id_and_rating']=count
dic=dict(zip(total['id'],total['test_id_and_rating']))
df['test_id_and_rating']=df['id'].map(dic)
count=0
total['test_id_and_education']=0
for a in total.education.unique():
for b in total.test_id.unique():
count+=1
total.loc[(total.education==a)&(total.test_id==b),'test_id_and_education']=count
dic=dict(zip(total['id'],total['test_id_and_education']))
df['test_id_and_education']=df['id'].map(dic)
count=0
total['program_type_and_rating']=0
for a in total.trainee_engagement_rating.unique():
for b in total.program_type.unique():
count+=1
total.loc[(total.trainee_engagement_rating==a)&(total.program_type==b),'program_type_and_rating']=count
dic=dict(zip(total['id'],total['program_type_and_rating']))
df['program_type_and_rating']=df['id'].map(dic)
#grouping of test_id_and_rating
c=pd.crosstab(df.test_id_and_rating,df.is_pass)
c_pct=c.div(c.sum(1).astype(float),axis=0)
c_pct.columns = ['fail', 'pass']
c_pct['id_group']=pd.DataFrame(c_pct['pass'])
c_pct.loc[(c_pct.id_group>=.20)&(c_pct.id_group<.30),'id_group']=1
c_pct.loc[(c_pct.id_group>=.30)&(c_pct.id_group<.40),'id_group']=2
c_pct.loc[(c_pct.id_group>=.40)&(c_pct.id_group<.50),'id_group']=3
c_pct.loc[(c_pct.id_group>=.50)&(c_pct.id_group<.60),'id_group']=4
c_pct.loc[(c_pct.id_group>=.60)&(c_pct.id_group<.70),'id_group']=5
c_pct.loc[(c_pct.id_group>=.70)&(c_pct.id_group<.80),'id_group']=6
c_pct.loc[(c_pct.id_group>=.80)&(c_pct.id_group<.90),'id_group']=7
c_pct.loc[(c_pct.id_group>=.90)&(c_pct.id_group<1),'id_group']=8
c_pct.id_group=c_pct.id_group.astype(int)
c_pct.drop(['fail','pass'],axis=1,inplace=True)
dic=c_pct.to_dict()
dic4=dic['id_group']
df['test_id_and_rating_group']=df['test_id_and_rating'].map(dic4).astype(int)
#grouping of program_type_and_rating
c=pd.crosstab(df.program_type_and_rating,df.is_pass)
c_pct=c.div(c.sum(1).astype(float),axis=0)
c_pct.columns = ['fail', 'pass']
c_pct['id_group']=pd.DataFrame(c_pct['pass'])
c_pct.loc[(c_pct.id_group>=.20)&(c_pct.id_group<.30),'id_group']=1
c_pct.loc[(c_pct.id_group>=.30)&(c_pct.id_group<.40),'id_group']=2
c_pct.loc[(c_pct.id_group>=.40)&(c_pct.id_group<.50),'id_group']=3
c_pct.loc[(c_pct.id_group>=.50)&(c_pct.id_group<.60),'id_group']=4
c_pct.loc[(c_pct.id_group>=.60)&(c_pct.id_group<.70),'id_group']=5
c_pct.loc[(c_pct.id_group>=.70)&(c_pct.id_group<.80),'id_group']=6
c_pct.loc[(c_pct.id_group>=.80)&(c_pct.id_group<.90),'id_group']=7
c_pct.loc[(c_pct.id_group>=.90)&(c_pct.id_group<1),'id_group']=8
c_pct.id_group=c_pct.id_group.astype(int)
c_pct.drop(['fail','pass'],axis=1,inplace=True)
dic=c_pct.to_dict()
dic41=dic['id_group']
df['program_type_and_rating_group']=df['program_type_and_rating'].map(dic41).astype(int)
#col avg_rating by test_id
total=train.append(test)
c=pd.crosstab(total.test_id,total.trainee_engagement_rating) #use this for final submission
c['avg_rating']=(c[1.0]+2*c[2.0]+3*c[3.0]+4*c[4.0]+5*c[5.0])/(c[1.0]+c[2.0]+c[3.0]+c[4.0]+c[5.0])
c['test_id']=c.index
dic5=dict(zip(c['test_id'],c['avg_rating']))
df['avg_rating']=df['test_id'].map(dic5)
#rating_diff(count(1.0+2.0)-count(4.0+5.0))
#c=pd.crosstab(total.test_id,total.trainee_engagement_rating) #use this for final submission
c=pd.crosstab(df.test_id,df.trainee_engagement_rating)
c['rating_diff_test_id']=c[1.0]+c[2.0]-c[4.0]-c[5.0]+c[3.0]
c['test_id']=c.index
dic6=dict(zip(c['test_id'],c['rating_diff_test_id']))
df['rating_diff_test_id']=df['test_id'].map(dic6)
#col avg_rating by trainee_id
#c=pd.crosstab(total.test_id,total.trainee_engagement_rating) #use this for final submission
c=pd.crosstab(df.trainee_id,df.trainee_engagement_rating)
c['avg_rating_trainee_id']=(c[1.0]+2*c[2.0]+3*c[3.0]+4*c[4.0]+5*c[5.0])/(c[1.0]+c[2.0]+c[3.0]+c[4.0]+c[5.0])
c['trainee_id']=c.index
dic7=dict(zip(c['trainee_id'],c['avg_rating_trainee_id']))
df['avg_rating_trainee_id']=df['trainee_id'].map(dic7)
#is_pass_diff wrt trainee_engagement_rating
c=pd.crosstab(df.trainee_engagement_rating,df.is_pass)
c['trainee_engagement_rating']=c.index
c['pass']=c[1]
c['fail']=c[0]
c['is_pass_diff_rating']=c['pass']-c['fail']
dic8=dict(zip(c['trainee_engagement_rating'],c['is_pass_diff_rating']))
df['is_pass_diff_rating']=df['trainee_engagement_rating'].map(dic8).astype(int)
#is_pass_diff wrt total_programs_enrolled
c= | pd.crosstab(df.total_programs_enrolled,df.is_pass) | pandas.crosstab |
from __future__ import annotations
import pytest
from pandas.errors import ParserWarning
import pandas.util._test_decorators as td
from pandas import (
DataFrame,
Series,
to_datetime,
)
import pandas._testing as tm
from pandas.io.xml import read_xml
@pytest.fixture(params=[pytest.param("lxml", marks=td.skip_if_no("lxml")), "etree"])
def parser(request):
return request.param
@pytest.fixture(
params=[None, {"book": ["category", "title", "author", "year", "price"]}]
)
def iterparse(request):
return request.param
def read_xml_iterparse(data, **kwargs):
with tm.ensure_clean() as path:
with open(path, "w") as f:
f.write(data)
return read_xml(path, **kwargs)
xml_types = """\
<?xml version='1.0' encoding='utf-8'?>
<data>
<row>
<shape>square</shape>
<degrees>00360</degrees>
<sides>4.0</sides>
</row>
<row>
<shape>circle</shape>
<degrees>00360</degrees>
<sides/>
</row>
<row>
<shape>triangle</shape>
<degrees>00180</degrees>
<sides>3.0</sides>
</row>
</data>"""
xml_dates = """<?xml version='1.0' encoding='utf-8'?>
<data>
<row>
<shape>square</shape>
<degrees>00360</degrees>
<sides>4.0</sides>
<date>2020-01-01</date>
</row>
<row>
<shape>circle</shape>
<degrees>00360</degrees>
<sides/>
<date>2021-01-01</date>
</row>
<row>
<shape>triangle</shape>
<degrees>00180</degrees>
<sides>3.0</sides>
<date>2022-01-01</date>
</row>
</data>"""
# DTYPE
def test_dtype_single_str(parser):
df_result = read_xml(xml_types, dtype={"degrees": "str"}, parser=parser)
df_iter = read_xml_iterparse(
xml_types,
parser=parser,
dtype={"degrees": "str"},
iterparse={"row": ["shape", "degrees", "sides"]},
)
df_expected = DataFrame(
{
"shape": ["square", "circle", "triangle"],
"degrees": ["00360", "00360", "00180"],
"sides": [4.0, float("nan"), 3.0],
}
)
tm.assert_frame_equal(df_result, df_expected)
tm.assert_frame_equal(df_iter, df_expected)
def test_dtypes_all_str(parser):
df_result = read_xml(xml_dates, dtype="string", parser=parser)
df_iter = read_xml_iterparse(
xml_dates,
parser=parser,
dtype="string",
iterparse={"row": ["shape", "degrees", "sides", "date"]},
)
df_expected = DataFrame(
{
"shape": ["square", "circle", "triangle"],
"degrees": ["00360", "00360", "00180"],
"sides": ["4.0", None, "3.0"],
"date": ["2020-01-01", "2021-01-01", "2022-01-01"],
},
dtype="string",
)
tm.assert_frame_equal(df_result, df_expected)
tm.assert_frame_equal(df_iter, df_expected)
def test_dtypes_with_names(parser):
df_result = read_xml(
xml_dates,
names=["Col1", "Col2", "Col3", "Col4"],
dtype={"Col2": "string", "Col3": "Int64", "Col4": "datetime64"},
parser=parser,
)
df_iter = read_xml_iterparse(
xml_dates,
parser=parser,
names=["Col1", "Col2", "Col3", "Col4"],
dtype={"Col2": "string", "Col3": "Int64", "Col4": "datetime64"},
iterparse={"row": ["shape", "degrees", "sides", "date"]},
)
df_expected = DataFrame(
{
"Col1": ["square", "circle", "triangle"],
"Col2": Series(["00360", "00360", "00180"]).astype("string"),
"Col3": Series([4.0, float("nan"), 3.0]).astype("Int64"),
"Col4": to_datetime(["2020-01-01", "2021-01-01", "2022-01-01"]),
}
)
tm.assert_frame_equal(df_result, df_expected)
tm.assert_frame_equal(df_iter, df_expected)
def test_dtype_nullable_int(parser):
df_result = read_xml(xml_types, dtype={"sides": "Int64"}, parser=parser)
df_iter = read_xml_iterparse(
xml_types,
parser=parser,
dtype={"sides": "Int64"},
iterparse={"row": ["shape", "degrees", "sides"]},
)
df_expected = DataFrame(
{
"shape": ["square", "circle", "triangle"],
"degrees": [360, 360, 180],
"sides": Series([4.0, float("nan"), 3.0]).astype("Int64"),
}
)
tm.assert_frame_equal(df_result, df_expected)
tm.assert_frame_equal(df_iter, df_expected)
def test_dtype_float(parser):
df_result = | read_xml(xml_types, dtype={"degrees": "float"}, parser=parser) | pandas.io.xml.read_xml |
import pandas as pd
def get_produced_wind_power(file_path):
df = | pd.read_html(file_path) | pandas.read_html |
# -*- coding:utf-8 -*-
# !/usr/bin/env python
"""
Date: 2022/5/18 20:40
Desc: 上海证券交易所-产品-股票期权-期权风险指标
"""
import requests
import pandas as pd
def option_risk_indicator_sse(date: str = "20220516") -> pd.DataFrame:
"""
上海证券交易所-产品-股票期权-期权风险指标
http://www.sse.com.cn/assortment/options/risk/
:param date: 日期; 20150209 开始
:type date: str
:return: 期权风险指标
:rtype: pandas.DataFrame
"""
url = "http://query.sse.com.cn/commonQuery.do"
params = {
"isPagination": "false",
"trade_date": date,
"sqlId": "SSE_ZQPZ_YSP_GGQQZSXT_YSHQ_QQFXZB_DATE_L",
"contractSymbol": "",
"_": "1652877575590",
}
headers = {
"Accept": "*/*",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8",
"Cache-Control": "no-cache",
"Connection": "keep-alive",
"Host": "query.sse.com.cn",
"Pragma": "no-cache",
"Referer": "http://www.sse.com.cn/",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/101.0.4951.67 Safari/537.36",
}
r = requests.get(url, params=params, headers=headers)
data_json = r.json()
temp_df = pd.DataFrame(data_json["result"])
temp_df = temp_df[
[
"TRADE_DATE",
"SECURITY_ID",
"CONTRACT_ID",
"CONTRACT_SYMBOL",
"DELTA_VALUE",
"THETA_VALUE",
"GAMMA_VALUE",
"VEGA_VALUE",
"RHO_VALUE",
"IMPLC_VOLATLTY",
]
]
temp_df["TRADE_DATE"] = pd.to_datetime(temp_df["TRADE_DATE"]).dt.date
temp_df["DELTA_VALUE"] = pd.to_numeric(temp_df["DELTA_VALUE"])
temp_df["THETA_VALUE"] = | pd.to_numeric(temp_df["THETA_VALUE"]) | pandas.to_numeric |
import random
import numpy as np
import pandas as pd
from sklearn.pipeline import Pipeline
import pickle
from bots.epsilonbot import EpsilonBot
from bots.gammabot import GammaBot
from bots.markovbot import MarkovBot
from collections import Counter
from utils import emoji_to_text, evaluate, win_rate_fn, log_game
from flask import Flask, request, render_template
app = Flask(__name__)
# capture results for win rates
results_easy = []
results_medium = []
results_hard = []
# home page
@app.route('/', methods=['GET', 'POST'])
def index():
return render_template('index.html')
@app.route('/easy', methods=['GET', 'POST'])
def easy():
game = {}
bot = EpsilonBot()
if request.method == 'POST':
player_throw = emoji_to_text(request.form['player_throw'])
bot_throw = bot.throw(game)
result = evaluate(player_throw, bot_throw)
results_easy.append(result)
win_rate = win_rate_fn(results_easy)
game["W"] = Counter(results_easy)["win"]
game["L"] = Counter(results_easy)["lose"]
game['result'] = result
game['player'] = player_throw
game['bot'] = bot_throw
game["win_rate"] = round(win_rate*100,2)
return render_template('easy.html', game=game)
@app.route('/medium', methods=['GET', 'POST'])
def medium():
game = {}
bot = GammaBot()
if request.method == 'POST':
player_throw = emoji_to_text(request.form['player_throw'])
bot_throw = bot.throw(player_throw)
result = evaluate(player_throw, bot_throw)
results_medium.append(result)
win_rate = win_rate_fn(results_medium)
game["W"] = Counter(results_medium)["win"]
game["L"] = Counter(results_medium)["lose"]
game['result'] = result
game['player'] = player_throw
game['bot'] = bot_throw
game["win_rate"] = round(win_rate*100,2)
return render_template('medium.html', game=game)
# game setup for markovbot
# spin up random game 1
options = ['rock', 'paper', 'scissors']
player = np.random.choice(options)
bot = np.random.choice(options)
game = {'W': 0, 'L': 0, 'result': None, 'player': player, 'bot': bot, 'win_rate': 0}
# save the outcomes and play
memory = pd.DataFrame({ "outcome": [], "next_play": []})
memory.to_csv('data/memory.csv', index=False)
del memory
# save the updated transition matrix
memory_transition_prob = pd.DataFrame({
'paperpaper': {'paper': 1/3, 'rock': 1/3,'scissors': 1/3},
'paperrock': {'paper': 1/3, 'rock': 1/3,'scissors': 1/3},
'paperscissors': {'paper': 1/3, 'rock': 1/3,'scissors': 1/3},
'rockpaper': {'paper': 1/3, 'rock': 1/3,'scissors': 1/3},
'rockrock': {'paper': 1/3, 'rock': 1/3,'scissors': 1/3},
'rockscissors': {'paper': 1/3, 'rock': 1/3,'scissors': 1/3},
'scissorspaper': {'paper': 1/3, 'rock': 1/3,'scissors': 1/3},
'scissorsrock': {'paper': 1/3, 'rock': 1/3,'scissors': 1/3},
'scissorsscissors': {'paper': 1/3, 'rock': 1/3,'scissors': 1/3}})
memory_transition_prob.to_csv("data/memory_transition_prob.csv",index=False)
del memory_transition_prob
@app.route('/hard', methods=['GET', 'POST'])
def hard():
# do not render first dummy game's html
game_count = 0
memory_transition_prob = | pd.read_csv('data/memory_transition_prob.csv') | pandas.read_csv |
"""
Gather metadata for all assemblies in the dataset
and output to assemblies.csv
"""
import argparse
import os
import logging
import re
import numpy as np
import pandas as pd
logger = logging.getLogger(__name__)
def main():
logging.basicConfig(level=logging.INFO, format="%(asctime)s (%(levelname)s) %(message)s")
data_path = os.path.join(os.getcwd(), 'data')
sequences_path = os.path.join(data_path, 'sequences')
ncbi_assembly_summary_path = os.path.join(data_path, 'NCBI_assembly_summary.txt')
arc_metadata_path = os.path.join(data_path, 'archaea/DB_Archaea95_update012020.info.txt')
bac_metadata_path = os.path.join(data_path, 'bacteria/DB_BACT95_HCLUST0.5/DB_BACT95_HCLUST0.5.info.txt')
output_file = os.path.join(data_path, 'assemblies.csv')
logger.info('Loading NCBI assembly summary')
ncbi_summary_df = pd.read_csv(
ncbi_assembly_summary_path,
sep='\t',
skiprows=1,
).set_index('assembly_accession')
logger.info('Loading Archea metadata file')
arc_metadata_df = read_metadata_file(arc_metadata_path)
logger.info('Loading Bacteria metadata file')
bac_metadata_df = read_metadata_file(bac_metadata_path)
logger.info('Concatenating metadata files')
metadata_df = pd.concat(
[arc_metadata_df, bac_metadata_df],
ignore_index=True,
).set_index('assembly_accession')
logger.info('Merging metadata files')
output_df = pd.merge(
ncbi_summary_df[[
'taxid',
'species_taxid',
'organism_name',
'assembly_level',
]],
metadata_df[[
'domain',
'phylum',
'class',
'order',
'family',
'genus',
'species',
'strain',
]],
how='inner',
on='assembly_accession',
)
output_df['taxid'] = pd.to_numeric(output_df['taxid'])
output_df['species_taxid'] = | pd.to_numeric(output_df['species_taxid']) | pandas.to_numeric |
#!/usr/bin/env python3.7
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 23 11:46:57 2020
@author: reideej1
:DESCRIPTION: Evaluate coaching data for the last 50 years of college football
- the goal is to determine how coaches who struggle in their first 3 years
fare over time at the same program
:REQUIRES: scrape_sports_reference.py located in: cfbAnalysis\src\data
:TODO:
"""
#==============================================================================
# Package Import
#==============================================================================
import datetime
import glob
import os
import numpy as np
import pandas as pd
import pathlib
import time
import tqdm
from src.data.scrape_sports_reference import *
#==============================================================================
# Reference Variable Declaration
#==============================================================================
#==============================================================================
# Function Definitions
#==============================================================================
def renameSchool(df, name_var):
'''
Purpose: Rename a school/university to a standard name as specified in
the file `school_abbreviations.csv`
Inputs
------
df : Pandas Dataframe
DataFrame containing a school-name variable for which the names
need to be standardized
name_var : string
Name of the variable which is to be renamed/standardized
Outputs
-------
list(row)[0] : string
Standardized version of the school's name based on the first value
in the row in the file `school_abbreviations.csv`
'''
# read in school name information
df_school_names = pd.read_csv(r'references\names_pictures_ncaa.csv')
# convert the dataframe to a dictionary such that the keys are the
# optional spelling of each school and the value is the standardized
# name of the school
dict_school_names = {}
for index, row in df_school_names.iterrows():
# isolate the alternative name columns
names = row[[x for x in row.index if 'Name' in x]]
# convert the row to a list that doesn't include NaN values
list_names = [x for x in names.values.tolist() if str(x) != 'nan']
# add the nickname to the team names as an alternative name
nickname = row['Nickname']
list_names_nicknames = list_names.copy()
for name in list_names:
list_names_nicknames.append(name + ' ' + nickname)
# extract the standardized team name
name_standardized = row['Team']
# add the standardized name
list_names_nicknames.append(name_standardized)
# add the nickname to the standardized name
list_names_nicknames.append(name_standardized + ' ' + nickname)
# for every alternative spelling of the team, set the value to be
# the standardized name
for name_alternate in list_names_nicknames:
dict_school_names[name_alternate] = name_standardized
# df[name_var] = df[name_var].apply(
# lambda x: dict_school_names[x] if str(x) != 'nan' else '')
df[name_var] = df[name_var].apply(
lambda x: rename_school_helper(x, dict_school_names))
return df
def rename_school_helper(name_school, dict_school_names):
try:
if str(name_school) != 'nan':
return dict_school_names[name_school]
else:
return ''
except:
print(f'School not found in school abbreviations .csv file: {name_school} ')
return name_school
def create_coach_dataframe(df_schools):
'''
Purpose: Given historic school data, create a dataframe of coaches and
their performance data on a year-by-year basis
Inputs
------
df_schools : Pandas DataFrame
Contains year-by-year results for each school (with coaches' names)
Outputs
-------
df_coaches : Pandas DataFrame
A dataframe containing all historic season data from a coaching perspective
'''
# Create a dictionary that assigns each school to its current conference
df_conf = df_schools.groupby(['School', 'Conf']).head(1).groupby('School').head(1).reset_index(drop = True)
df_conf = df_conf[['School', 'Conf']]
df_conf['Power5'] = df_conf.apply(lambda row: True if row['Conf'] in [
'SEC', 'Pac-12', 'Big 12', 'ACC', 'Big Ten'] else False, axis = 1)
df_conf = df_conf.set_index('School')
dict_conf = df_conf.to_dict(orient = 'index')
# Create a coaching dataframe by iterating over every year for every school
list_coaches = []
for index, row in df_schools.iterrows():
# handle every coach that coached that season
for coach in row['Coach(es)'].split(', '):
dict_coach_year = {}
dict_coach_year['coach'] = coach.split(' (')[0].strip()
dict_coach_year['year'] = row['Year']
dict_coach_year['school'] = row['School']
dict_coach_year['ranking_pre'] = row['AP_Pre']
dict_coach_year['ranking_high'] = row['AP_High']
dict_coach_year['ranking_post'] = row['AP_Post']
dict_coach_year['ranked_pre'] = not pd.isna(row['AP_Pre'])
dict_coach_year['ranked_post'] = not pd.isna(row['AP_Post'])
try:
dict_coach_year['ranked_top_10'] = row['AP_Post'] <= 10
except:
print(row['AP_Post'])
dict_coach_year['ranked_top_5'] = row['AP_Post'] <= 5
# handle bowl games
if pd.isna(row['Bowl']):
dict_coach_year['bowl'] = False
dict_coach_year['bowl_name'] = ''
dict_coach_year['bowl_win'] = False
else:
dict_coach_year['bowl'] = True
dict_coach_year['bowl_name'] = row['Bowl'].split('-')[0]
if '-' in str(row['Bowl']):
try:
if row['Bowl'].split('-')[1] == 'W':
dict_coach_year['bowl_win'] = True
except:
print(row['Bowl'])
# handle wins and losses
if len(coach.split('(')[1].split('-')) > 2:
dict_coach_year['W'] = coach.split('(')[1].split('-')[0]
dict_coach_year['L'] = coach.split('(')[1].split('-')[1].strip(')')
dict_coach_year['T'] = coach.split('(')[1].split('-')[2].strip(')')
else:
dict_coach_year['W'] = coach.split('(')[1].split('-')[0]
dict_coach_year['L'] = coach.split('(')[1].split('-')[1].strip(')')
# assign conference information
dict_coach_year['conf'] = dict_conf[row['School']]['Conf']
dict_coach_year['power5'] = dict_conf[row['School']]['Power5']
list_coaches.append(dict_coach_year)
# Convert list to DataFrame
df_coaches = pd.DataFrame(list_coaches)
# Convert all Tie Nans to 0
df_coaches['T'] = df_coaches['T'].fillna(0)
# Identify all unique coaches in the dataframe
list_coaches = list(df_coaches['coach'].unique())
# Cast Win and Loss columns to ints
df_coaches['W'] = df_coaches['W'].astype('int')
df_coaches['L'] = df_coaches['L'].astype('int')
df_coaches['T'] = df_coaches['T'].astype('int')
# Add a column for games coached in the season
df_coaches['GP'] = df_coaches.apply(lambda row: row['W'] + row['L'] + row['T'], axis = 1)
return df_coaches
def add_coach_metadata(df_stint):
'''
Purpose: Iterate over a coach's historic data and tabulate totals on a
year-by-year basis
Inputs
------
df_stint : Pandas DataFrame
Contains year-by-year results for a coach
** Note: This is continuous years only. Breaks in coaching stints
are treated as separate coaching histories **
Outputs
-------
df_coach : Pandas DataFrame
Coaching data with updated year-by-year totals
'''
df_coach = df_stint.copy()
# 1. Year # at school
df_coach['season'] = list(range(1,len(df_coach)+1))
# 2. Cumulative games coached at school (on a year-by-year basis)
df_coach['cum_GP'] = df_coach['GP'].cumsum(axis = 0)
# 3. Cumulative wins at school (on a year-by-year basis)
df_coach['cum_W'] = df_coach['W'].cumsum(axis = 0)
# 4. Cumulative losses at school (on a year-by-year basis)
df_coach['cum_L'] = df_coach['L'].cumsum(axis = 0)
# 5. Cumulative ties at school (on a year-by-year basis)
df_coach['cum_T'] = df_coach['T'].cumsum(axis = 0)
# 6. Cumulative Win Pct at school (on a year-by-year basis)
if len(df_coach) == 1:
if int(df_coach['cum_GP']) == 0:
df_coach['cum_win_pct'] = 0
else:
df_coach['cum_win_pct'] = df_coach.apply(lambda row: row['cum_W'] / row['cum_GP'] if row['cum_GP'] != 0 else 0, axis = 1)
else:
df_coach['cum_win_pct'] = df_coach.apply(lambda row: row['cum_W'] / row['cum_GP'] if row['cum_GP'] != 0 else 0, axis = 1)
# 7. Total bowl games at school
df_coach['total_bowl'] = df_coach['bowl'].sum(axis = 0)
# 8. Total bowl wins at school
df_coach['total_bowl_win'] = df_coach['bowl_win'].sum(axis = 0)
# 9. Total AP Preseason rankings
df_coach['total_ranked_pre'] = df_coach['ranked_pre'].sum(axis = 0)
# 10. Total AP Postseason rankings
df_coach['total_ranked_post'] = df_coach['ranked_post'].sum(axis = 0)
# 11. Total Top 10 finishes
df_coach['total_top_10'] = df_coach['ranked_top_10'].sum(axis = 0)
# 12. Total Top 5 finishes
df_coach['total_top_5'] = df_coach['ranked_top_5'].sum(axis = 0)
# 13. Total Seasons Coached at School
df_coach['total_seasons'] = df_coach.iloc[len(df_coach)-1]['season']
# 14. Total Games Coached at School
df_coach['total_games'] = df_coach.iloc[len(df_coach)-1]['cum_GP']
# 15. Total Wins at School
df_coach['total_wins'] = df_coach.iloc[len(df_coach)-1]['cum_W']
# 16. Total Losses at School
df_coach['total_losses'] = df_coach.iloc[len(df_coach)-1]['cum_L']
# 17. Total Win Pct at School
df_coach['total_win_pct'] = df_coach.iloc[len(df_coach)-1]['cum_win_pct']
return df_coach
def calculate_year_by_year(df_coaches):
'''
Purpose: Given the data for coaches in a historical perspective, iterate
through their coaching stints and calculate year-by-year totals in an
effor to understand their progress over time
Inputs
------
df_coaches : Pandas DataFrame
A dataframe containing all historic season data from a coaching perspective
Outputs
-------
df_yr_by_yr : Pandas DataFrame
Coaching data with updated year-by-year totals separated by stints
at schools in each coach's career
'''
# make an empty dataframe for storing new coach info
df_yr_by_yr = pd.DataFrame()
# Coach-by-coach --> Year by year, determine the following:
gps = df_coaches.groupby(['coach', 'school'])
for combo, df_coach in tqdm.tqdm(gps):
# sort the dataframe by earliest year to latest
df_coach = df_coach.sort_values(by = 'year')
# look for gaps in years
num_stints = 1
list_stint_end = []
list_years = list(df_coach['year'])
for num_ele in list(range(0,len(list_years))):
if (num_ele == 0):
pass
else:
if list_years[num_ele] - list_years[num_ele-1] > 1:
# print(f"Gap detected for coach: {df_coach.iloc[0]['coach']}")
# print(f" -- Gap between {list_years[num_ele]} and {list_years[num_ele-1]}")
list_stint_end.append(list_years[num_ele-1])
num_stints = num_stints + 1
# handle coaches with multiple stints
if num_stints >= 2:
for stint_count in list(range(0,num_stints)):
# split the coaches data into stints
if stint_count == 0:
year_stint_end = list_stint_end[stint_count]
df_stint = df_coach[df_coach['year'] <= year_stint_end]
elif stint_count < num_stints-1:
year_stint_end = list_stint_end[stint_count]
year_stint_end_prev = list_stint_end[stint_count-1]
df_stint = df_coach[df_coach['year'] <= year_stint_end]
df_stint = df_stint[df_stint['year'] > year_stint_end_prev]
else:
year_stint_end_prev = list_stint_end[stint_count-1]
df_stint = df_coach[df_coach['year'] > year_stint_end_prev]
# process the data on a year by year basis
df_stint = add_coach_metadata(df_stint)
# Add coach dataframe to overall dataframe
if len(df_yr_by_yr) == 0:
df_yr_by_yr = df_stint.copy()
else:
df_yr_by_yr = df_yr_by_yr.append(df_stint)
else:
# process the data on a year by year basis
df_coach = add_coach_metadata(df_coach)
# Add coach dataframe to overall dataframe
if len(df_yr_by_yr) == 0:
df_yr_by_yr = df_coach.copy()
else:
df_yr_by_yr = df_yr_by_yr.append(df_coach)
# reset dataframe index
df_yr_by_yr = df_yr_by_yr.reset_index(drop = True)
return df_yr_by_yr
def create_week_by_week_dataframe(df_all_games, df_schools, games_sf):
'''
Purpose: Combine the week-by-week results for each school with the
end-of-year school/coach information to create a week-by-week
dataframe detailing who coached each team when. This will facilitate
analysis of coaching tenures.
Inputs
------
df_all_games : Pandas DataFrame
Contains week-by-week results for each school
df_schools : Pandas DataFrame
Contains year-by-year results for each school (with coaches' names)
games_sf : int
Scott Frost's current number of games
Outputs
-------
df_engineered : Pandas DataFrame
A dataframe containing all historic week-by-week results infused
with coaches' names
'''
# standardize team names
df_all_games = renameSchool(df_all_games, 'School')
df_all_games = renameSchool(df_all_games, 'Opponent')
df_schools = renameSchool(df_schools, 'School')
# merge data together
df_coaches = pd.merge(df_all_games,
df_schools[['School', 'Year', 'Conf', 'Conf_W', 'Conf_L',
'Conf_T', 'AP_Pre', 'AP_High', 'AP_Post',
'Coach(es)', 'Bowl']],
how = 'left',
on = ['School', 'Year'])
# rename columns
df_coaches = df_coaches.rename(columns = {'Conf_x':'Conf_Opp', 'Conf_y':'Conf'})
# sort dataframe to ensure no issues with groupby
df_coaches = df_coaches.sort_values(by = ['School', 'Year', 'G'])
# Break out coaches on a week-by-week basis
list_coaches = []
table_coaches = pd.DataFrame(columns = ['School', 'Year', 'Coach', 'Games'])
for school, grp in tqdm.tqdm(df_coaches.groupby(['School', 'Year'])):
dict_coaches = {}
# Handle Utah 2003
if school[0] == 'Utah' and school[1] == 2004:
dict_coaches['Urban Meyer'] = 12
# Handle Utah St. 2021
elif school[0] == 'Utah St.' and school[1] == 2021:
coach_name = '<NAME>'
coach_games = grp['G'].count()
dict_coaches[coach_name] = coach_games
# Handle USC 2021
elif school[0] == 'USC' and school[1] == 2021:
dict_coaches['C<NAME>'] = 2
dict_coaches['<NAME>'] = len(grp) - 2
# handle every coach that coached that season for that team
else:
# for every coach a team has, calculate how many games they coached that season
for coach in grp['Coach(es)'].iloc[0].split(', '):
coach_name = coach.split(' (')[0]
coach_record = coach.split(' (')[1].replace(')','')
# first attempt to account for ties in a coaches' record
try:
coach_games = int(coach_record.split('-')[0]) + int(coach_record.split('-')[1]) + int(coach_record.split('-')[2])
# otherwise assume they only have wins-losses in their record
except:
coach_games = int(coach_record.split('-')[0]) + int(coach_record.split('-')[1])
dict_coaches[coach_name] = coach_games
# add coaches to master list
num_games = 0
for coach in dict_coaches.keys():
list_coaches = list_coaches + ([coach] * dict_coaches[coach])
table_coaches = table_coaches.append(pd.DataFrame(
[[school[0], school[1], coach, dict_coaches[coach]]],
columns = ['School', 'Year', 'Coach', 'Games']))
num_games = dict_coaches[coach] + num_games
if num_games != len(grp):
print('oops!')
break
df_coaches['Coach'] = list_coaches
# test for any values of "coach" that weren't in the original data
for index, row in tqdm.tqdm(df_coaches.iterrows()):
if not pd.isna(row['Coach(es)']):
if row['Coach'] not in row['Coach(es)']:
print(f"{row['Coach']} not found in {row['Coach(es)']}")
# add power5 status to dataframe
df_school_info = pd.read_csv(r'references\names_pictures_ncaa.csv')
df_school_info = df_school_info.rename(columns = {'Team':'School'})
df_coaches = pd.merge(df_coaches, df_school_info[['School', 'Power5']], how = 'left', on = 'School')
df_school_info = df_school_info.rename(columns = {'School':'Opponent', 'Power5':'Power5_Opp'})
df_coaches = pd.merge(df_coaches, df_school_info[['Opponent', 'Power5_Opp']], how = 'left', on = 'Opponent')
# rename columns
df_coaches = df_coaches.rename(columns = {'G':'Week',
'Year':'Season',
'Opp':'Pts_Opp',
'Cum_W':'W_Sn',
'Cum_L':'L_Sn',
'T':'T_Sn'})
# add opponent's record for the year to the table
df_team_records = pd.merge(df_coaches[['Season', 'Opponent']],
df_schools[['School', 'Year', 'Overall_Pct', 'Conf_Pct']],
left_on = ['Season', 'Opponent'],
right_on = ['Year', 'School'])
df_team_records = df_team_records.drop_duplicates()
df_team_records = df_team_records[['Season', 'School', 'Overall_Pct', 'Conf_Pct']]
df_team_records = df_team_records.rename(columns = {'Overall_Pct':'Win_Pct_Opp',
'Conf_Pct':'Win_Pct_Conf_Opp',
'School':'Opponent'})
df_coaches = pd.merge(df_coaches, df_team_records, how = 'left', on = ['Season', 'Opponent'])
# add flag if opponent's overall record was > .500
df_coaches['Opp_Winning_Record'] = list(df_coaches.apply(
lambda row: True if row['Win_Pct_Opp'] > .5 else False, axis = 1))
# add flag if opponent's conference record was > .500
df_coaches['Opp_Conf_Winning_Record'] = list(df_coaches.apply(
lambda row: True if row['Win_Pct_Conf_Opp'] > .5 else False, axis = 1))
# reorder columns
df_coaches = df_coaches[['Season', 'Week', 'Date', 'Day', 'Rank', 'School',
'Coach', 'Conf', 'Power5', 'Home_Away', 'Rank_Opp', 'Opponent',
'Conf_Opp', 'Power5_Opp', 'Win_Pct_Opp', 'Opp_Winning_Record',
'Win_Pct_Conf_Opp', 'Opp_Conf_Winning_Record',
'Result', 'Pts', 'Pts_Opp', 'W_Sn',
'L_Sn', 'T_Sn', 'AP_Pre', 'AP_High', 'AP_Post',
'Notes', 'Bowl', 'url_boxscore']]
# Engineer variables for each coach's stint/tenure at a given school=
df_engineered = pd.DataFrame()
for index, grp in tqdm.tqdm(df_coaches.groupby(['School', 'Coach'])):
if len(df_engineered) == 0:
df_engineered = add_tenure_features(grp, games_sf)
else:
df_engineered = df_engineered.append(add_tenure_features(grp, games_sf))
return df_engineered
def add_tenure_features(df_coach, games_sf):
'''
Purpose: Manage the engineering of features across a coach's tenure at a
a given school (while also accounting for those coaches who have
multiple coaching stints/tenures at the same school)
Inputs
------
df_coach : Pandas DataFrame
Contains data for all seasons a coach has coached at a given school
games_sf : int
Scott Frost's current number of games
Outputs
-------
df_coach_eng : Pandas DataFrame
Contains input data with newly engineered features that span the
whole coaching tenure, not just seasons
'''
# Step 1. Identify if the coach's dataframe has multiple stints
# (i.e. gaps in years between tenures at the same school)
num_stints = 1
list_stint_end = []
list_years = list(df_coach['Season'])
for num_ele in list(range(0,len(list_years))):
if (num_ele == 0):
pass
else:
if list_years[num_ele] - list_years[num_ele-1] > 1:
# print(f"Gap detected for coach: {df_coach.iloc[0]['coach']}")
# print(f" -- Gap between {list_years[num_ele]} and {list_years[num_ele-1]}")
list_stint_end.append(list_years[num_ele-1])
num_stints = num_stints + 1
# Step 2.A. Handle coaches with multiple stints (i.e. gaps in years)
if num_stints >= 2:
df_coach_eng = pd.DataFrame()
for stint_count in list(range(0,num_stints)):
# handle the first coaching stint
if stint_count == 0:
year_stint_end = list_stint_end[stint_count]
df_stint = df_coach[df_coach['Season'] <= year_stint_end].copy()
# handle coaching stints 2 through num_stints - 1
elif stint_count < num_stints-1:
year_stint_end = list_stint_end[stint_count]
year_stint_end_prev = list_stint_end[stint_count-1]
df_stint = df_coach[df_coach['Season'] <= year_stint_end].copy()
df_stint = df_stint[df_stint['Season'] > year_stint_end_prev].copy()
# handle the last coaching stint
else:
year_stint_end_prev = list_stint_end[stint_count-1]
df_stint = df_coach[df_coach['Season'] > year_stint_end_prev].copy()
# engineer new features and add to coach's tenure dataframe
if len(df_coach_eng) == 0:
df_coach_eng = engineer_stint_features(df_stint, games_sf)
else:
df_coach_eng = df_coach_eng.append(engineer_stint_features(df_stint, games_sf))
# print(f"Coach: {df_stint['Coach'].iloc[0]}, Games: {len(df_stint)}")
# Step 2.B. Handle coaches with only a single stint at the respective school
else:
df_coach_eng = engineer_stint_features(df_coach, games_sf)
return df_coach_eng
def engineer_stint_features(df_tenure, games_sf):
'''
Purpose: Engineer features across a coach's tenure at a given school
Inputs
------
df_tenure : Pandas DataFrame
Contains data for all seasons in a tenure for a given coach/school combo
games_sf : int
Scott Frost's current number of games
Outputs
-------
df_tenure : Pandas DataFrame
Contains input data with newly engineered features
'''
# df_tenure = df_coaches[(df_coaches['School'] == 'Nebraska') & (df_coaches['Coach'] == '<NAME>')].copy()
# df_tenure = df_coaches[(df_coaches['School'] == 'Nebraska') & (df_coaches['Coach'] == '<NAME>')].copy()
# df_tenure = df_coaches[(df_coaches['School'] == 'Nebraska') & (df_coaches['Coach'] == '<NAME>')].copy()
# 0. Total seasons
row_counts = list(df_tenure.Season.value_counts())
list_seasons = []
for idx in range(0,len(row_counts)):
list_seasons = list_seasons + ([idx+1] * row_counts[idx])
df_tenure['Sn'] = list_seasons
# 1. Total games
df_tenure['G'] = list(range(1,len(df_tenure)+1))
# 2. Total wins
df_tenure['W'] = df_tenure.Result.eq('W').cumsum()
# 3. Total losses
df_tenure['L'] = df_tenure.Result.eq('L').cumsum()
# 4. Total ties
df_tenure['T'] = df_tenure.Result.eq('T').cumsum()
df_tenure['T'] = df_tenure['T'].fillna(0)
# 5. Win Pct.
if (len(df_tenure) == 1) and (int(df_tenure['G']) == 0):
df_tenure['Win_Pct'] = 0
else:
df_tenure['Win_Pct'] = df_tenure.apply(lambda row: row['W'] / row['G']
if row['G'] != 0 else 0, axis = 1)
# 6. Create conference win/loss flag
list_conf_flag = []
for index, row in df_tenure.iterrows():
if (row['Result'] == 'W') and (row['Conf'] == row['Conf_Opp']):
list_conf_flag.append('W')
elif (row['Result'] == 'L') and (row['Conf'] == row['Conf_Opp']):
list_conf_flag.append('L')
elif (row['Result'] == 'T') and (row['Conf'] == row['Conf_Opp']):
list_conf_flag.append('T')
else:
list_conf_flag.append('')
df_tenure['Result_Conf'] = list_conf_flag
# 7. Total conference games
df_tenure['G_Conf'] = df_tenure.Result_Conf.ne('').cumsum()
# 8. Total conference wins
df_tenure['W_Conf'] = df_tenure.Result_Conf.eq('W').cumsum()
# 9. Total conference losses
df_tenure['L_Conf'] = df_tenure.Result_Conf.eq('L').cumsum()
# 10. Total conference ties
df_tenure['T_Conf'] = df_tenure.Result_Conf.eq('T').cumsum()
# 11. Conference Win Pct.
df_tenure['Win_Pct_Conf'] = df_tenure.apply(
lambda row: row['W_Conf'] / row['G_Conf'] if row['G_Conf'] != 0 else 0, axis = 1)
# if (len(df_tenure) == 1) and (int(df_tenure['G_Conf']) == 0):
# df_tenure['Win_Pct_Conf'] = 0
# else:
# df_tenure['Win_Pct_Conf'] = df_tenure.apply(lambda row: row['W_Conf'] / row['G_Conf']
# if row['G_Conf'] != 0 else 0, axis = 1)
# 12. Create top 25 opponent win/loss flag
list_top25_results = []
for index, row in df_tenure.iterrows():
if (row['Result'] == 'W') and (~np.isnan(row['Rank_Opp'])):
list_top25_results.append('W')
elif (row['Result'] == 'L') and (~np.isnan(row['Rank_Opp'])):
list_top25_results.append('L')
elif (row['Result'] == 'T') and (~np.isnan(row['Rank_Opp'])):
list_top25_results.append('T')
else:
list_top25_results.append('')
df_tenure['Result_Top25_Opp'] = list_top25_results
# 13. Wins vs. AP Top-25
df_tenure['W_vs_Rank'] = df_tenure.Result_Top25_Opp.eq('W').cumsum()
# 14. Losses vs. AP Top-25
df_tenure['L_vs_Rank'] = df_tenure.Result_Top25_Opp.eq('L').cumsum()
# 15. Ties vs AP Top-25
df_tenure['T_vs_Rank'] = df_tenure.Result_Top25_Opp.eq('T').cumsum()
# 16. Win Pct. vs AP Top-25
df_tenure['Win_Pct_vs_Rank'] = df_tenure.apply(
lambda row: row['W_vs_Rank'] / (row['W_vs_Rank'] + row['L_vs_Rank'] + row['T_vs_Rank'])
if (row['W_vs_Rank'] + row['L_vs_Rank'] + row['T_vs_Rank']) != 0 else 0, axis = 1)
# 17. Total bowl games
df_tenure['Bowl_G'] = df_tenure.Notes.str.contains('Bowl').eq(True).cumsum()
# 18. Create bowl win/loss flag
list_bowl_results = []
for index, row in df_tenure.iterrows():
if (row['Result'] == 'W') and ('Bowl' in str(row['Notes'])):
list_bowl_results.append('W')
elif (row['Result'] == 'L') and ('Bowl' in str(row['Notes'])):
list_bowl_results.append('L')
elif (row['Result'] == 'T') and ('Bowl' in str(row['Notes'])):
list_bowl_results.append('T')
else:
list_bowl_results.append('')
df_tenure['Result_Bowl'] = list_bowl_results
# 19. Bowl Wins
df_tenure['Bowl_W'] = df_tenure.Result_Bowl.eq('W').cumsum()
# 20. Bowl Losses
df_tenure['Bowl_L'] = df_tenure.Result_Bowl.eq('L').cumsum()
# 21. Bowl Ties
df_tenure['Bowl_T'] = df_tenure.Result_Bowl.eq('T').cumsum()
# 22. Bowl Win Pct.
df_tenure['Win_Pct_Bowl'] = df_tenure.apply(
lambda row: row['Bowl_W'] / (row['Bowl_W'] + row['Bowl_L'] + row['Bowl_T'])
if (row['Bowl_W'] + row['Bowl_L'] + row['Bowl_T']) != 0 else 0, axis = 1)
# 23. Calculate # of seasons with pre-post season AP Top 25 rankings
list_AP_Pre_counts = []
list_AP_Post_25_counts = []
list_AP_Post_10_counts = []
list_AP_Post_5_counts = []
list_game_counts = []
for season, grp in df_tenure.groupby('Season'):
list_AP_Pre_counts = list_AP_Pre_counts + [1 if ~np.isnan(grp.AP_Pre.iloc[0]) else 0]
list_AP_Post_25_counts = list_AP_Post_25_counts + [1 if grp.AP_Post.iloc[0] <= 25 else 0]
list_AP_Post_10_counts = list_AP_Post_10_counts + [1 if grp.AP_Post.iloc[0] <= 10 else 0]
list_AP_Post_5_counts = list_AP_Post_5_counts + [1 if grp.AP_Post.iloc[0] <= 5 else 0]
list_game_counts = list_game_counts + [len(grp)]
series_AP_Pre_counts = | pd.Series(list_AP_Pre_counts) | pandas.Series |
import numpy as np
import pandas as pd
import plotly.graph_objects as go
import streamlit as st
from sklearn.ensemble import RandomForestRegressor
class Predicoes:
def __init__(self, options):
self.month_names_missing = ['July', 'August', 'September', 'October', 'November', 'December']
self.month_names = ['January', 'February', 'March', 'April', 'May', 'June', 'July', 'August', 'September',
'October', 'November', 'December']
self.y = options
self.range_dates = [f'{mes + 1:0>2}/{2017 + ano}' for ano in range(5) for mes in range(12)]
self.start_date, self.end_date = st.sidebar.select_slider('Intervalo de datas', options=self.range_dates,
value=('01/2020', '03/2021'))
self.qtds = st.sidebar.selectbox('Quantidade', options=self.y, index=1)
def filter_dates(self, df):
start_year = int(self.start_date[3:])
start_month = int(self.start_date[:2])
end_year = int(self.end_date[3:])
end_month = int(self.end_date[:2])
years = list()
for y in range(end_year - start_year + 1):
years.append(f'{start_year + y}')
months = list()
qtd_months = (len(years) * 12) - (start_month - 1) - (12 - end_month)
for m in range(qtd_months):
months.append(f'{self.month_names[(start_month + m - 1) % 12]}')
return self.__calc_intervalo_meses_ano(df, months, years)
def __calc_intervalo_meses_ano(self, df, months, years):
months_ = months
list_months = ['Hi!'] * len(years)
cont = 0
i = 0
for y in years:
months_ = months_[cont:]
cont = 0
for m in months_:
cont += 1
if m == 'December':
break
list_months[i] = months_[0:cont]
i += 1
i = 0
df_ = df[(df['Ano'].isin([years[i]])) & (df['Mês'].isin(list_months[i]))]
if len(years) > 1:
for y in years[1:]:
i += 1
df_1 = df[(df['Ano'].isin([y])) & (df['Mês'].isin(list_months[i]))]
frames = [df_, df_1]
df_ = pd.concat(frames)
return df_
def sort(self, df):
sorter_index = dict(zip(self.month_names, range(len(self.month_names))))
df['mes_rank'] = df['Mês'].map(sorter_index)
df = df.sort_values(['Ano', 'mes_rank'])
return df.drop(columns=['mes_rank'])
def create_plot(self, df, by, title):
fig = go.Figure()
for faixa in df[by].unique():
data = df[df[by] == faixa]
meses = [f'{t[0][:3]} {t[1]}' for t in zip(data['Mês'].values, data['Ano'].values)]
fig.add_trace(go.Scatter(x=meses, y=data[self.qtds].values, mode='lines+markers', name=faixa))
fig.update_layout(title_text=title)
return fig
def predict(self, df, df_new, X_train, X_test):
Y_train = df[self.y]
regressor = RandomForestRegressor()
regressor.fit(X_train, Y_train)
previsoes = regressor.predict(X_test)
previsoes = pd.DataFrame(previsoes)
previsoes.columns = self.y
for c in previsoes.columns:
previsoes[c] = previsoes[c].astype(int)
previsoes = pd.concat([df_new, previsoes], axis=1)
return pd.concat([df, previsoes])
class PredicoesPessoasIdade(Predicoes):
def __init__(self, dfPessoas, options):
super().__init__(options)
self.x = ['Mês', 'Ano', 'Faixa Etária']
dfPessoas['data_inversa'] = pd.to_datetime(dfPessoas['data_inversa'])
self.df = dfPessoas.copy()
self.__agg_data(dfPessoas)
self.__agg_new_data()
def __agg_data(self, dfPessoas):
self.df.loc[dfPessoas['idade'] >= 0, 'idade'] = 'Criança'
self.df.loc[dfPessoas['idade'] >= 13, 'idade'] = 'Jovem'
self.df.loc[dfPessoas['idade'] >= 25, 'idade'] = 'Adulto'
self.df.loc[dfPessoas['idade'] >= 60, 'idade'] = 'Idoso'
self.df = self.df.groupby([self.df['data_inversa'].dt.strftime('%B'), self.df['data_inversa'].dt.strftime('%Y'), 'idade'])
self.df = self.df.agg({'id': 'nunique', 'pesid': 'count', 'ilesos': 'sum', 'feridos_leves': 'sum', 'feridos_graves': 'sum', 'mortos': 'sum'})
self.df.index.names = self.x
self.df.columns = self.y
self.df = self.df.reset_index()
def __agg_new_data(self):
faixa_etaria_unique = self.df['Faixa Etária'].unique().tolist()
mesh = np.array(np.meshgrid(self.month_names_missing, ['2020'], faixa_etaria_unique)).T.reshape(-1, 3)
df_new2020 = pd.DataFrame(mesh, columns=['Mês', 'Ano', 'Faixa Etária'])
mesh = np.array(np.meshgrid(self.month_names, ['2021'], faixa_etaria_unique)).T.reshape(-1, 3)
df_new2021 = | pd.DataFrame(mesh, columns=['Mês', 'Ano', 'Faixa Etária']) | pandas.DataFrame |
import scipy.io.wavfile as wav
from python_speech_features import mfcc
import numpy as np
import os
import pandas as pd
CLASSICAL_DIR = "C:\\Users\\<NAME>\\Music\\Classical\\"
METAL_DIR = "C:\\Users\\<NAME>\\Music\\Metal\\"
JAZZ_DIR = "C:\\Users\\<NAME>\\Music\\Jazz\\"
POP_DIR = "C:\\Users\\<NAME>\\Music\\Pop\\"
PATH = "E:\\git\\python_speech_features\\covariance\\"
x = [CLASSICAL_DIR, METAL_DIR, JAZZ_DIR, POP_DIR]
t = 100
columns = ['Feature1', 'Feature2', 'Feature3', 'Feature4', 'Feature5', 'Feature6', 'Feature7', 'Feature8', 'Feature9',
'Feature10', 'Feature11', 'Feature12', 'Feature13']
dataset = []
genre = []
for i in x:
if i == CLASSICAL_DIR:
for index in range(0, t):
genre.append(0)
file_name = "classical.000"+str(index).zfill(2)
file = file_name+".wav"
(rate, signal) = wav.read(CLASSICAL_DIR+file)
mfcc_feat = mfcc(signal, rate)
cov = np.cov(mfcc_feat, rowvar=0)
mean = np.mean(mfcc_feat, axis=0)
# if not os.path.exists(PATH+file_name):
# os.makedirs(PATH+file_name)
pd.DataFrame(cov).to_csv(PATH+"classical"+str(index)+'.csv', index=False, header=False)
dataset.append(mean)
elif i == METAL_DIR:
for index in range(0, t):
genre.append(1)
file_name = "metal.000" + str(index).zfill(2)
file = file_name + ".wav"
(rate, signal) = wav.read(METAL_DIR + file)
mfcc_feat = mfcc(signal, rate)
cov = np.cov(mfcc_feat, rowvar=0)
mean = np.mean(mfcc_feat, axis=0)
# if not os.path.exists(PATH+file_name):
# os.makedirs(PATH+file_name)
| pd.DataFrame(cov) | pandas.DataFrame |
import sys
import configparser
import time
import re
import pandas as pd
from pandas import Series,DataFrame
#统计可能含日期的口令(连续数字位数大于等于4的)
def countProbPasswd(passwdList):
df = []
for i in range(len(passwdList)):
passwd = str(passwdList[i])
struc = ""
for ch in passwd:
if ch.isdigit():
struc += 'D'
elif ch.isalpha():
struc += 'L'
else:
struc += 'S'
char = struc[0]
c = 1
stri = struc[1:]
res = ''
for j in stri:
if j == char:
c += 1
else:
res += char
res += str(c)
char = j
c = 1
res += char
res += str(c)
#r'D[4-9]|D\d{2}'
if re.search(r'D[4-9]|D\d{2}', res):
df.append(passwd)
return df
#统计含数字日期的口令-Yahoo
# 筛选出的符合条件的密码在 date_passwd/Yahoo 路径下
def analysisDate_Yahoo(data):
lis1 = []
lis2 = []
lis3 = []
lis4 = []
lis5 = []
lis6 = []
lis7 = []
lis8 = []
lis9 = []
datePasswd = {'yyyy':0,'yyyymm':0,'yyyymmdd':0,'mmddyyyy':0,'ddmmyyyy':0,'yymmdd':0,'mmddyy':0,'ddmmyy':0,'mmdd':0}
for i in data:
# 密码判断条件由长到短,符合多种条件的密码只归类于先进行判断的条件
# 例如19800205,归类于yyyy-mm-dd而不是yyyy
#yyyy-mm-dd
if re.search(r'(19\d{2}|20\d{2})(0[1-9]|1[0-2])(0[1-9]|[1-2][0-9]|3[0-1])',i):
datePasswd['yyyymmdd'] += 1
lis3.append(i)
continue
#mm-dd-yyyy
if re.search(r'(0[1-9]|1[0-2])(0[1-9]|[1-2][0-9]|3[0-1])(19\d{2}|20\d{2})',i):
datePasswd['mmddyyyy'] += 1
lis4.append(i)
continue
#dd-mm-yyyy
if re.search(r'(0[1-9]|[1-2][0-9]|3[0-1])(0[1-9]|1[0-2])(19\d{2}|20\d{2})',i):
datePasswd['ddmmyyyy'] += 1
lis5.append(i)
continue
#yy-mm-dd
if re.search(r'[0-9][0-9](0[1-9]|1[0-2])(0[1-9]|[1-2][0-9]|3[0-1])',i):
datePasswd['yymmdd'] += 1
lis6.append(i)
continue
#mm-dd-yy
if re.search(r'(0[1-9]|1[0-2])(0[1-9]|[1-2][0-9]|3[0-1])[0-9][0-9]',i):
datePasswd['mmddyy'] += 1
lis7.append(i)
continue
#dd-mm-yy
if re.search(r'(0[1-9]|[1-2][0-9]|3[0-1])(0[1-9]|1[0-2])[0-9][0-9]',i):
datePasswd['ddmmyy'] += 1
lis8.append(i)
continue
#yyyy-mm
if re.search(r'(19\d{2}|20\d{2})(0[1-9]|1[0-2])',i):
datePasswd['yyyymm'] += 1
lis2.append(i)
continue
#yyyy 1900-2100
if re.search(r'19\d{2}|20\d{2}',i):
datePasswd['yyyy'] += 1
lis1.append(i)
continue
#mm-dd
if re.search(r'(0[1-9]|1[0-2])(0[1-9]|[1-2][0-9]|3[0-1])',i):
datePasswd['mmdd'] += 1
lis9.append(i)
continue
pd.Series(lis1).to_csv('date_passwd/Yahoo/yyyy.csv')
pd.Series(lis2).to_csv('date_passwd/Yahoo/yyyymm.csv')
pd.Series(lis3).to_csv('date_passwd/Yahoo/yyyymmdd.csv')
pd.Series(lis4).to_csv('date_passwd/Yahoo/mmddyyyy.csv')
pd.Series(lis5).to_csv('date_passwd/Yahoo/ddmmyyyy.csv')
pd.Series(lis6).to_csv('date_passwd/Yahoo/yymmdd.csv')
pd.Series(lis7).to_csv('date_passwd/Yahoo/mmddyy.csv')
pd.Series(lis8).to_csv('date_passwd/Yahoo/ddmmyy.csv')
| pd.Series(lis9) | pandas.Series |
# Copyright (c) 2018-2022, NVIDIA CORPORATION.
import numpy as np
import pandas as pd
import pytest
from pandas.api import types as ptypes
import cudf
from cudf.api import types as types
@pytest.mark.parametrize(
"obj, expect",
(
# Base Python objects.
(bool(), False),
(int(), False),
(float(), False),
(complex(), False),
(str(), False),
("", False),
(r"", False),
(object(), False),
# Base Python types.
(bool, False),
(int, False),
(float, False),
(complex, False),
(str, False),
(object, False),
# NumPy types.
(np.bool_, False),
(np.int_, False),
(np.float64, False),
(np.complex128, False),
(np.str_, False),
(np.unicode_, False),
(np.datetime64, False),
(np.timedelta64, False),
# NumPy scalars.
(np.bool_(), False),
(np.int_(), False),
(np.float64(), False),
(np.complex128(), False),
(np.str_(), False),
(np.unicode_(), False),
(np.datetime64(), False),
(np.timedelta64(), False),
# NumPy dtype objects.
(np.dtype("bool"), False),
(np.dtype("int"), False),
(np.dtype("float"), False),
(np.dtype("complex"), False),
(np.dtype("str"), False),
(np.dtype("unicode"), False),
(np.dtype("datetime64"), False),
(np.dtype("timedelta64"), False),
(np.dtype("object"), False),
# NumPy arrays.
(np.array([], dtype=np.bool_), False),
(np.array([], dtype=np.int_), False),
(np.array([], dtype=np.float64), False),
(np.array([], dtype=np.complex128), False),
(np.array([], dtype=np.str_), False),
(np.array([], dtype=np.unicode_), False),
(np.array([], dtype=np.datetime64), False),
(np.array([], dtype=np.timedelta64), False),
(np.array([], dtype=object), False),
# Pandas dtypes.
(pd.core.dtypes.dtypes.CategoricalDtypeType, True),
(pd.CategoricalDtype, True),
# Pandas objects.
(pd.Series(dtype="bool"), False),
(pd.Series(dtype="int"), False),
(pd.Series(dtype="float"), False),
(pd.Series(dtype="complex"), False),
(pd.Series(dtype="str"), False),
(pd.Series(dtype="unicode"), False),
(pd.Series(dtype="datetime64[s]"), False),
(pd.Series(dtype="timedelta64[s]"), False),
(pd.Series(dtype="category"), True),
(pd.Series(dtype="object"), False),
# cuDF dtypes.
(cudf.CategoricalDtype, True),
(cudf.ListDtype, False),
(cudf.StructDtype, False),
(cudf.Decimal128Dtype, False),
(cudf.Decimal64Dtype, False),
(cudf.Decimal32Dtype, False),
(cudf.IntervalDtype, False),
# cuDF dtype instances.
(cudf.CategoricalDtype("a"), True),
(cudf.ListDtype(int), False),
(cudf.StructDtype({"a": int}), False),
(cudf.Decimal128Dtype(5, 2), False),
(cudf.Decimal64Dtype(5, 2), False),
(cudf.Decimal32Dtype(5, 2), False),
(cudf.IntervalDtype(int), False),
# cuDF objects
(cudf.Series(dtype="bool"), False),
(cudf.Series(dtype="int"), False),
(cudf.Series(dtype="float"), False),
(cudf.Series(dtype="str"), False),
(cudf.Series(dtype="datetime64[s]"), False),
(cudf.Series(dtype="timedelta64[s]"), False),
(cudf.Series(dtype="category"), True),
(cudf.Series(dtype=cudf.Decimal128Dtype(5, 2)), False),
(cudf.Series(dtype=cudf.Decimal64Dtype(5, 2)), False),
(cudf.Series(dtype=cudf.Decimal32Dtype(5, 2)), False),
# TODO: Currently creating an empty Series of list type ignores the
# provided type and instead makes a float64 Series.
(cudf.Series([[1, 2], [3, 4, 5]]), False),
# TODO: Currently creating an empty Series of struct type fails because
# it uses a numpy utility that doesn't understand StructDtype.
(cudf.Series([{"a": 1, "b": 2}, {"c": 3}]), False),
(cudf.Series(dtype=cudf.IntervalDtype(int)), False),
),
)
def test_is_categorical_dtype(obj, expect):
assert types.is_categorical_dtype(obj) == expect
@pytest.mark.parametrize(
"obj, expect",
(
# Base Python objects.
(bool(), False),
(int(), False),
(float(), False),
(complex(), False),
(str(), False),
("", False),
(r"", False),
(object(), False),
# Base Python types.
(bool, True),
(int, True),
(float, True),
(complex, True),
(str, False),
(object, False),
# NumPy types.
(np.bool_, True),
(np.int_, True),
(np.float64, True),
(np.complex128, True),
(np.str_, False),
(np.unicode_, False),
(np.datetime64, False),
(np.timedelta64, False),
# NumPy scalars.
(np.bool_(), True),
(np.int_(), True),
(np.float64(), True),
(np.complex128(), True),
(np.str_(), False),
(np.unicode_(), False),
(np.datetime64(), False),
(np.timedelta64(), False),
# NumPy dtype objects.
(np.dtype("bool"), True),
(np.dtype("int"), True),
(np.dtype("float"), True),
(np.dtype("complex"), True),
(np.dtype("str"), False),
(np.dtype("unicode"), False),
(np.dtype("datetime64"), False),
(np.dtype("timedelta64"), False),
(np.dtype("object"), False),
# NumPy arrays.
(np.array([], dtype=np.bool_), True),
(np.array([], dtype=np.int_), True),
(np.array([], dtype=np.float64), True),
(np.array([], dtype=np.complex128), True),
(np.array([], dtype=np.str_), False),
(np.array([], dtype=np.unicode_), False),
(np.array([], dtype=np.datetime64), False),
(np.array([], dtype=np.timedelta64), False),
(np.array([], dtype=object), False),
# Pandas dtypes.
(pd.core.dtypes.dtypes.CategoricalDtypeType, False),
(pd.CategoricalDtype, False),
# Pandas objects.
(pd.Series(dtype="bool"), True),
(pd.Series(dtype="int"), True),
(pd.Series(dtype="float"), True),
(pd.Series(dtype="complex"), True),
(pd.Series(dtype="str"), False),
(pd.Series(dtype="unicode"), False),
(pd.Series(dtype="datetime64[s]"), False),
(pd.Series(dtype="timedelta64[s]"), False),
(pd.Series(dtype="category"), False),
(pd.Series(dtype="object"), False),
# cuDF dtypes.
(cudf.CategoricalDtype, False),
(cudf.ListDtype, False),
(cudf.StructDtype, False),
(cudf.Decimal128Dtype, True),
(cudf.Decimal64Dtype, True),
(cudf.Decimal32Dtype, True),
(cudf.IntervalDtype, False),
# cuDF dtype instances.
(cudf.CategoricalDtype("a"), False),
(cudf.ListDtype(int), False),
(cudf.StructDtype({"a": int}), False),
(cudf.Decimal128Dtype(5, 2), True),
(cudf.Decimal64Dtype(5, 2), True),
(cudf.Decimal32Dtype(5, 2), True),
(cudf.IntervalDtype(int), False),
# cuDF objects
(cudf.Series(dtype="bool"), True),
(cudf.Series(dtype="int"), True),
(cudf.Series(dtype="float"), True),
(cudf.Series(dtype="str"), False),
(cudf.Series(dtype="datetime64[s]"), False),
(cudf.Series(dtype="timedelta64[s]"), False),
(cudf.Series(dtype="category"), False),
(cudf.Series(dtype=cudf.Decimal128Dtype(5, 2)), True),
(cudf.Series(dtype=cudf.Decimal64Dtype(5, 2)), True),
(cudf.Series(dtype=cudf.Decimal32Dtype(5, 2)), True),
(cudf.Series([[1, 2], [3, 4, 5]]), False),
(cudf.Series([{"a": 1, "b": 2}, {"c": 3}]), False),
(cudf.Series(dtype=cudf.IntervalDtype(int)), False),
),
)
def test_is_numeric_dtype(obj, expect):
assert types.is_numeric_dtype(obj) == expect
@pytest.mark.parametrize(
"obj, expect",
(
# Base Python objects.
(bool(), False),
(int(), False),
(float(), False),
(complex(), False),
(str(), False),
("", False),
(r"", False),
(object(), False),
# Base Python types.
(bool, False),
(int, True),
(float, False),
(complex, False),
(str, False),
(object, False),
# NumPy types.
(np.bool_, False),
(np.int_, True),
(np.float64, False),
(np.complex128, False),
(np.str_, False),
(np.unicode_, False),
(np.datetime64, False),
(np.timedelta64, False),
# NumPy scalars.
(np.bool_(), False),
(np.int_(), True),
(np.float64(), False),
(np.complex128(), False),
(np.str_(), False),
(np.unicode_(), False),
(np.datetime64(), False),
(np.timedelta64(), False),
# NumPy dtype objects.
(np.dtype("bool"), False),
(np.dtype("int"), True),
(np.dtype("float"), False),
(np.dtype("complex"), False),
(np.dtype("str"), False),
(np.dtype("unicode"), False),
(np.dtype("datetime64"), False),
(np.dtype("timedelta64"), False),
(np.dtype("object"), False),
# NumPy arrays.
(np.array([], dtype=np.bool_), False),
(np.array([], dtype=np.int_), True),
(np.array([], dtype=np.float64), False),
(np.array([], dtype=np.complex128), False),
(np.array([], dtype=np.str_), False),
(np.array([], dtype=np.unicode_), False),
(np.array([], dtype=np.datetime64), False),
(np.array([], dtype=np.timedelta64), False),
(np.array([], dtype=object), False),
# Pandas dtypes.
(pd.core.dtypes.dtypes.CategoricalDtypeType, False),
(pd.CategoricalDtype, False),
# Pandas objects.
(pd.Series(dtype="bool"), False),
(pd.Series(dtype="int"), True),
(pd.Series(dtype="float"), False),
(pd.Series(dtype="complex"), False),
(pd.Series(dtype="str"), False),
(pd.Series(dtype="unicode"), False),
(pd.Series(dtype="datetime64[s]"), False),
(pd.Series(dtype="timedelta64[s]"), False),
(pd.Series(dtype="category"), False),
(pd.Series(dtype="object"), False),
# cuDF dtypes.
(cudf.CategoricalDtype, False),
(cudf.ListDtype, False),
(cudf.StructDtype, False),
(cudf.Decimal128Dtype, False),
(cudf.Decimal64Dtype, False),
(cudf.Decimal32Dtype, False),
(cudf.IntervalDtype, False),
# cuDF dtype instances.
(cudf.CategoricalDtype("a"), False),
(cudf.ListDtype(int), False),
(cudf.StructDtype({"a": int}), False),
(cudf.Decimal128Dtype(5, 2), False),
(cudf.Decimal64Dtype(5, 2), False),
(cudf.Decimal32Dtype(5, 2), False),
(cudf.IntervalDtype(int), False),
# cuDF objects
(cudf.Series(dtype="bool"), False),
(cudf.Series(dtype="int"), True),
(cudf.Series(dtype="float"), False),
(cudf.Series(dtype="str"), False),
(cudf.Series(dtype="datetime64[s]"), False),
(cudf.Series(dtype="timedelta64[s]"), False),
(cudf.Series(dtype="category"), False),
(cudf.Series(dtype=cudf.Decimal128Dtype(5, 2)), False),
(cudf.Series(dtype=cudf.Decimal64Dtype(5, 2)), False),
(cudf.Series(dtype=cudf.Decimal32Dtype(5, 2)), False),
(cudf.Series([[1, 2], [3, 4, 5]]), False),
(cudf.Series([{"a": 1, "b": 2}, {"c": 3}]), False),
(cudf.Series(dtype=cudf.IntervalDtype(int)), False),
),
)
def test_is_integer_dtype(obj, expect):
assert types.is_integer_dtype(obj) == expect
@pytest.mark.parametrize(
"obj, expect",
(
# Base Python objects.
(bool(), False),
(int(), True),
(float(), False),
(complex(), False),
(str(), False),
("", False),
(r"", False),
(object(), False),
# Base Python types.
(bool, False),
(int, False),
(float, False),
(complex, False),
(str, False),
(object, False),
# NumPy types.
(np.bool_, False),
(np.int_, False),
(np.float64, False),
(np.complex128, False),
(np.str_, False),
(np.unicode_, False),
(np.datetime64, False),
(np.timedelta64, False),
# NumPy scalars.
(np.bool_(), False),
(np.int_(), True),
(np.float64(), False),
(np.complex128(), False),
(np.str_(), False),
(np.unicode_(), False),
(np.datetime64(), False),
(np.timedelta64(), False),
# NumPy dtype objects.
(np.dtype("bool"), False),
(np.dtype("int"), False),
(np.dtype("float"), False),
(np.dtype("complex"), False),
(np.dtype("str"), False),
(np.dtype("unicode"), False),
(np.dtype("datetime64"), False),
(np.dtype("timedelta64"), False),
(np.dtype("object"), False),
# NumPy arrays.
(np.array([], dtype=np.bool_), False),
(np.array([], dtype=np.int_), False),
(np.array([], dtype=np.float64), False),
(np.array([], dtype=np.complex128), False),
(np.array([], dtype=np.str_), False),
(np.array([], dtype=np.unicode_), False),
(np.array([], dtype=np.datetime64), False),
(np.array([], dtype=np.timedelta64), False),
(np.array([], dtype=object), False),
# Pandas dtypes.
(pd.core.dtypes.dtypes.CategoricalDtypeType, False),
(pd.CategoricalDtype, False),
# Pandas objects.
(pd.Series(dtype="bool"), False),
(pd.Series(dtype="int"), False),
(pd.Series(dtype="float"), False),
(pd.Series(dtype="complex"), False),
(pd.Series(dtype="str"), False),
(pd.Series(dtype="unicode"), False),
(pd.Series(dtype="datetime64[s]"), False),
(pd.Series(dtype="timedelta64[s]"), False),
(pd.Series(dtype="category"), False),
(pd.Series(dtype="object"), False),
# cuDF dtypes.
(cudf.CategoricalDtype, False),
(cudf.ListDtype, False),
(cudf.StructDtype, False),
(cudf.Decimal128Dtype, False),
(cudf.Decimal64Dtype, False),
(cudf.Decimal32Dtype, False),
(cudf.IntervalDtype, False),
# cuDF dtype instances.
(cudf.CategoricalDtype("a"), False),
(cudf.ListDtype(int), False),
(cudf.StructDtype({"a": int}), False),
(cudf.Decimal128Dtype(5, 2), False),
(cudf.Decimal64Dtype(5, 2), False),
(cudf.Decimal32Dtype(5, 2), False),
(cudf.IntervalDtype(int), False),
# cuDF objects
(cudf.Series(dtype="bool"), False),
(cudf.Series(dtype="int"), False),
(cudf.Series(dtype="float"), False),
(cudf.Series(dtype="str"), False),
(cudf.Series(dtype="datetime64[s]"), False),
(cudf.Series(dtype="timedelta64[s]"), False),
(cudf.Series(dtype="category"), False),
(cudf.Series(dtype=cudf.Decimal128Dtype(5, 2)), False),
(cudf.Series(dtype=cudf.Decimal64Dtype(5, 2)), False),
(cudf.Series(dtype=cudf.Decimal32Dtype(5, 2)), False),
(cudf.Series([[1, 2], [3, 4, 5]]), False),
(cudf.Series([{"a": 1, "b": 2}, {"c": 3}]), False),
(cudf.Series(dtype=cudf.IntervalDtype(int)), False),
),
)
def test_is_integer(obj, expect):
assert types.is_integer(obj) == expect
# TODO: Temporarily ignoring all cases of "object" until we decide what to do.
@pytest.mark.parametrize(
"obj, expect",
(
# Base Python objects.
(bool(), False),
(int(), False),
(float(), False),
(complex(), False),
(str(), False),
("", False),
(r"", False),
(object(), False),
# Base Python types.
(bool, False),
(int, False),
(float, False),
(complex, False),
(str, True),
# (object, False),
# NumPy types.
(np.bool_, False),
(np.int_, False),
(np.float64, False),
(np.complex128, False),
(np.str_, True),
(np.unicode_, True),
(np.datetime64, False),
(np.timedelta64, False),
# NumPy scalars.
(np.bool_(), False),
(np.int_(), False),
(np.float64(), False),
(np.complex128(), False),
(np.str_(), True),
(np.unicode_(), True),
(np.datetime64(), False),
(np.timedelta64(), False),
# NumPy dtype objects.
(np.dtype("bool"), False),
(np.dtype("int"), False),
(np.dtype("float"), False),
(np.dtype("complex"), False),
(np.dtype("str"), True),
(np.dtype("unicode"), True),
(np.dtype("datetime64"), False),
(np.dtype("timedelta64"), False),
# (np.dtype("object"), False),
# NumPy arrays.
(np.array([], dtype=np.bool_), False),
(np.array([], dtype=np.int_), False),
(np.array([], dtype=np.float64), False),
(np.array([], dtype=np.complex128), False),
(np.array([], dtype=np.str_), True),
(np.array([], dtype=np.unicode_), True),
(np.array([], dtype=np.datetime64), False),
(np.array([], dtype=np.timedelta64), False),
# (np.array([], dtype=object), False),
# Pandas dtypes.
(pd.core.dtypes.dtypes.CategoricalDtypeType, False),
(pd.CategoricalDtype, False),
# Pandas objects.
(pd.Series(dtype="bool"), False),
(pd.Series(dtype="int"), False),
(pd.Series(dtype="float"), False),
(pd.Series(dtype="complex"), False),
(pd.Series(dtype="str"), True),
(pd.Series(dtype="unicode"), True),
(pd.Series(dtype="datetime64[s]"), False),
(pd.Series(dtype="timedelta64[s]"), False),
(pd.Series(dtype="category"), False),
# (pd.Series(dtype="object"), False),
# cuDF dtypes.
(cudf.CategoricalDtype, False),
(cudf.ListDtype, False),
(cudf.StructDtype, False),
(cudf.Decimal128Dtype, False),
(cudf.Decimal64Dtype, False),
(cudf.Decimal32Dtype, False),
(cudf.IntervalDtype, False),
# cuDF dtype instances.
(cudf.CategoricalDtype("a"), False),
(cudf.ListDtype(int), False),
(cudf.StructDtype({"a": int}), False),
(cudf.Decimal128Dtype(5, 2), False),
(cudf.Decimal64Dtype(5, 2), False),
(cudf.Decimal32Dtype(5, 2), False),
(cudf.IntervalDtype(int), False),
# cuDF objects
(cudf.Series(dtype="bool"), False),
(cudf.Series(dtype="int"), False),
(cudf.Series(dtype="float"), False),
(cudf.Series(dtype="str"), True),
(cudf.Series(dtype="datetime64[s]"), False),
(cudf.Series(dtype="timedelta64[s]"), False),
(cudf.Series(dtype="category"), False),
(cudf.Series(dtype=cudf.Decimal128Dtype(5, 2)), False),
(cudf.Series(dtype=cudf.Decimal64Dtype(5, 2)), False),
(cudf.Series(dtype=cudf.Decimal32Dtype(5, 2)), False),
(cudf.Series([[1, 2], [3, 4, 5]]), False),
(cudf.Series([{"a": 1, "b": 2}, {"c": 3}]), False),
(cudf.Series(dtype=cudf.IntervalDtype(int)), False),
),
)
def test_is_string_dtype(obj, expect):
assert types.is_string_dtype(obj) == expect
@pytest.mark.parametrize(
"obj, expect",
(
# Base Python objects.
(bool(), False),
(int(), False),
(float(), False),
(complex(), False),
(str(), False),
("", False),
(r"", False),
(object(), False),
# Base Python types.
(bool, False),
(int, False),
(float, False),
(complex, False),
(str, False),
(object, False),
# NumPy types.
(np.bool_, False),
(np.int_, False),
(np.float64, False),
(np.complex128, False),
(np.str_, False),
(np.unicode_, False),
(np.datetime64, True),
(np.timedelta64, False),
# NumPy scalars.
(np.bool_(), False),
(np.int_(), False),
(np.float64(), False),
(np.complex128(), False),
(np.str_(), False),
(np.unicode_(), False),
(np.datetime64(), True),
(np.timedelta64(), False),
# NumPy dtype objects.
(np.dtype("bool"), False),
(np.dtype("int"), False),
(np.dtype("float"), False),
(np.dtype("complex"), False),
(np.dtype("str"), False),
(np.dtype("unicode"), False),
(np.dtype("datetime64"), True),
(np.dtype("timedelta64"), False),
(np.dtype("object"), False),
# NumPy arrays.
(np.array([], dtype=np.bool_), False),
(np.array([], dtype=np.int_), False),
(np.array([], dtype=np.float64), False),
(np.array([], dtype=np.complex128), False),
(np.array([], dtype=np.str_), False),
(np.array([], dtype=np.unicode_), False),
(np.array([], dtype=np.datetime64), True),
(np.array([], dtype=np.timedelta64), False),
(np.array([], dtype=object), False),
# Pandas dtypes.
(pd.core.dtypes.dtypes.CategoricalDtypeType, False),
(pd.CategoricalDtype, False),
# Pandas objects.
(pd.Series(dtype="bool"), False),
(pd.Series(dtype="int"), False),
(pd.Series(dtype="float"), False),
(pd.Series(dtype="complex"), False),
(pd.Series(dtype="str"), False),
(pd.Series(dtype="unicode"), False),
( | pd.Series(dtype="datetime64[s]") | pandas.Series |
# Rent share of income by population density
import pandas as pd
import numpy as np
import array
file = | pd.read_csv('county_rent_mort_inc_units_1yr.csv') | pandas.read_csv |
import pandas as pd
from metacash.account import Account
from metacash.transactions import Transactions
class TimestampSampler:
def __init__(self, ts_index):
self.index = ts_index
def add_noise_uniform(self, d):
# sample from [0,s] uniformly
return self
def add_noise_normal(self, d):
# sample from [0,s] uniformly
return self
@classmethod
def date_range(cls, *args, **kwargs):
# https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.date_range.html
return cls(pd.date_range(*args, **kwargs).round("D"))
def sample(self):
return self.index.copy()
class AmountSampler:
def __init__(self, amount, noise=None):
self.amount = amount
self.noise = noise
def sample(self):
# normal distrib mu=0 stddev=1/2: 95% values within -1,1
if self.noise is not None:
noise_sample = self.noise(self.amount)
else:
noise_sample = 0
return self.amount + noise_sample
class TransactionsSampler:
def __init__(self, ts_sampler, amount_sampler, description):
self.ts_sampler = ts_sampler
self.amount_sampler = amount_sampler
self.description = description
def sample(self):
index = self.ts_sampler.sample()
df = | pd.DataFrame(columns=Transactions.columns) | pandas.DataFrame |
import pytest
import numpy as np
import pandas as pd
EXP_IDX = pd.MultiIndex(levels=[['model_a'], ['scen_a', 'scen_b']],
codes=[[0, 0], [0, 1]], names=['model', 'scenario'])
def test_set_meta_no_name(test_df):
idx = pd.MultiIndex(levels=[['a_scenario'], ['a_model'], ['some_region']],
codes=[[0], [0], [0]],
names=['scenario', 'model', 'region'])
s = pd.Series(data=[0.3], index=idx)
pytest.raises(ValueError, test_df.set_meta, s)
def test_set_meta_as_named_series(test_df):
idx = pd.MultiIndex(levels=[['scen_a'], ['model_a'], ['some_region']],
codes=[[0], [0], [0]],
names=['scenario', 'model', 'region'])
s = pd.Series(data=[0.3], index=idx, name='meta_values')
test_df.set_meta(s)
exp = pd.Series(data=[0.3, np.nan], index=EXP_IDX, name='meta_values')
pd.testing.assert_series_equal(test_df['meta_values'], exp)
def test_set_meta_as_unnamed_series(test_df):
idx = pd.MultiIndex(levels=[['scen_a'], ['model_a'], ['some_region']],
codes=[[0], [0], [0]],
names=['scenario', 'model', 'region'])
s = pd.Series(data=[0.3], index=idx)
test_df.set_meta(s, name='meta_values')
exp = pd.Series(data=[0.3, np.nan], index=EXP_IDX, name='meta_values')
pd.testing.assert_series_equal(test_df['meta_values'], exp)
def test_set_meta_non_unique_index_fail(test_df):
idx = pd.MultiIndex(levels=[['model_a'], ['scen_a'], ['reg_a', 'reg_b']],
codes=[[0, 0], [0, 0], [0, 1]],
names=['model', 'scenario', 'region'])
s = pd.Series([0.4, 0.5], idx)
pytest.raises(ValueError, test_df.set_meta, s)
def test_set_meta_non_existing_index_fail(test_df):
idx = pd.MultiIndex(levels=[['model_a', 'fail_model'],
['scen_a', 'fail_scenario']],
codes=[[0, 1], [0, 1]], names=['model', 'scenario'])
s = pd.Series([0.4, 0.5], idx)
pytest.raises(ValueError, test_df.set_meta, s)
def test_set_meta_by_df(test_df):
df = pd.DataFrame([
['model_a', 'scen_a', 'some_region', 1],
], columns=['model', 'scenario', 'region', 'col'])
test_df.set_meta(meta=0.3, name='meta_values', index=df)
exp = pd.Series(data=[0.3, np.nan], index=EXP_IDX, name='meta_values')
pd.testing.assert_series_equal(test_df['meta_values'], exp)
def test_set_meta_as_series(test_df):
s = pd.Series([0.3, 0.4])
test_df.set_meta(s, 'meta_series')
exp = pd.Series(data=[0.3, 0.4], index=EXP_IDX, name='meta_series')
pd.testing.assert_series_equal(test_df['meta_series'], exp)
def test_set_meta_as_int(test_df):
test_df.set_meta(3.2, 'meta_int')
exp = pd.Series(data=[3.2, 3.2], index=EXP_IDX, name='meta_int')
obs = test_df['meta_int']
pd.testing.assert_series_equal(obs, exp)
def test_set_meta_as_str(test_df):
test_df.set_meta('testing', name='meta_str')
exp = pd.Series(data=['testing'] * 2, index=EXP_IDX, name='meta_str')
pd.testing.assert_series_equal(test_df['meta_str'], exp)
def test_set_meta_as_str_list(test_df):
test_df.set_meta(['testing', 'testing2'], name='category')
obs = test_df.filter(category='testing')
assert obs['scenario'].unique() == 'scen_a'
def test_set_meta_as_str_by_index(test_df):
idx = pd.MultiIndex(levels=[['model_a'], ['scen_a']],
codes=[[0], [0]], names=['model', 'scenario'])
test_df.set_meta('foo', 'meta_str', idx)
exp = pd.Series(data=['foo', None], index=EXP_IDX, name='meta_str')
pd.testing.assert_series_equal(test_df['meta_str'], exp)
def test_set_meta_from_data(test_df):
test_df.set_meta_from_data('pe_2005', variable='Primary Energy', year=2005)
exp = pd.Series(data=[1., 2.], index=EXP_IDX, name='pe_2005')
pd.testing.assert_series_equal(test_df['pe_2005'], exp)
def test_set_meta_from_data_max(test_df):
test_df.set_meta_from_data('pe_max_yr', variable='Primary Energy',
method=np.max)
exp = pd.Series(data=[6., 7.], index=EXP_IDX, name='pe_max_yr')
| pd.testing.assert_series_equal(test_df['pe_max_yr'], exp) | pandas.testing.assert_series_equal |
"""
Process results
This script process results for the final report of SCOOP
"""
# ==============================================================================
# Imports
# ==============================================================================
import pandas as pd
from matplotlib import pyplot as plt
from matplotlib import rc
rc("font", **{"family": "serif", "serif": ["Times"]})
rc("text", usetex=True)
from glob import glob
import re
# ==============================================================================
# Constants
# ==============================================================================
ddir_lst = ["data/eu4dpfmix_mpr0.csv", "data/eu4dpfmix.csv"]
# ==============================================================================
# Functions
# ==============================================================================
def column_generator(data_frame):
"""
Create supplementary columns
"""
str_splt = data_frame["Cycle"].split("-")
veh_id = int(str_splt[0])
mpr = float(str_splt[2].split("_")[0])
flow = float(str_splt[3].split("_")[0])
distance = int(str_splt[4].split(".dri")[0])
return pd.Series([veh_id, mpr, flow, distance])
def create_columns(data_frame, function):
"""
Apply function to dataframe
"""
fields = ["veh_id", "mpr", "flow", "distance"]
data_frame[fields] = data_frame.apply(function, axis=1)
return data_frame
def refer_to_mpr(data_frame, field, new_field):
"""
Refer to MPR 0 %
"""
# Create reference data_frame
reference = data_frame[data_frame["mpr"].eq(0)]
reference = pd.concat([reference] * 5).reset_index()
reference = reference.drop("index", axis=1)
# Compute difference
diff_df = reference[field] - data_frame[field]
# diff_df = diff_df.reset_index()
data_frame[new_field] = (diff_df.divide(reference[field])) * 100
# Round for results
data_frame = data_frame.round(3)
return data_frame
def plot_var(
data_frame,
x_var="flow",
y_var="CO_TP",
label_var="mpr",
pivot="distance",
x_label="Flow [veh/m]",
y_label="CO2 %",
t_label="Distance [m]: ",
legends=[r"0 \%", r"10 \%", r"20 \%", r"30 \%", r"40 \%"],
fnt_size={"fontsize": 16},
x_size=5,
y_size=7.5,
transpose=False,
):
"""
Plot variables
"""
pivoter = data_frame[pivot].unique()
N = len(pivoter)
if transpose:
n, m = 1, N
else:
m, n = 1, N
fig, axes = plt.subplots(m, n, figsize=(x_size * N, y_size), sharey=True)
for pvt, ax in zip(pivoter, axes):
flt = data_frame[pivot].eq(pvt)
df = data_frame[flt]
df.pivot_table(
index=x_var, columns=label_var, values=y_var, aggfunc="mean"
).plot(kind="bar", ax=ax, grid=True)
ax.set_xlabel(x_label, fontdict=fnt_size)
ax.set_ylabel(y_label, fontdict=fnt_size)
ax.set_title(t_label + str(pvt), fontdict=fnt_size)
ax.legend(legends)
return fig, axes
def plot_co2perc(data_frame):
"""
Create Dataframe CO2 % Data vs flow
"""
figco2, axco2 = plot_var(
data_frame=data_frame,
x_var="flow",
y_var="CO2 %",
label_var="mpr",
pivot="distance",
x_label="Flow [veh/m]",
y_label=r"Change in CO$_2$ [\%]",
t_label="Distance [m]: ",
legends=[r"0 \%", r"10 \%", r"20 \%", r"30 \%", r"40 \%"],
fnt_size={"fontsize": 16},
)
return figco2, axco2
def plot_co2(data_frame):
"""
Create Dataframe CO2 consumption vs flow
"""
figco2, axco2 = plot_var(
data_frame=data_frame,
x_var="flow",
y_var="CO2_TP",
label_var="mpr",
pivot="distance",
x_label="Flow [veh/m]",
y_label="CO$_2$ [g/km]",
t_label="Distance [m]: ",
legends=[r"0 \%", r"10 \%", r"20 \%", r"30 \%", r"40 \%"],
fnt_size={"fontsize": 16},
)
for ax in axco2:
ax.set(ylim=(120, 160))
return figco2, axco2
def plot_ttt(data_frame):
"""
Plot absolute Total Travel Time vs flow
"""
figtt, axtt = plot_var(
data_frame=data_frame,
x_var="flow",
y_var="totalTT",
label_var="mpr",
pivot="distance",
x_label="Flow [veh/m]",
y_label="Total Travel Time [s]",
t_label="Distance [m]: ",
legends=[r"0 \%", r"10 \%", r"20 \%", r"30 \%", r"40 \%"],
fnt_size={"fontsize": 16},
)
return figtt, axtt
def plot_tttprc(data_frame):
"""
Plot Change Total Travel Time % vs flow
"""
figtt, axtt = plot_var(
data_frame=data_frame,
x_var="flow",
y_var="totTT %",
label_var="mpr",
pivot="distance",
x_label="Flow [veh/m]",
y_label=r"Change in Total TT [\%]",
t_label="Distance [m]: ",
legends=[r"0 \%", r"10 \%", r"20 \%", r"30 \%", r"40 \%"],
fnt_size={"fontsize": 16},
)
return figtt, axtt
def plot_tttd(data_frame):
"""
Plot Absolute Total Travel Time vs distance
"""
figtt, axtt = plot_var(
data_frame=data_frame,
x_var="distance",
y_var="totalTT",
label_var="mpr",
pivot="flow",
x_label="Distance [m]",
y_label="Total Travel Time [s]",
t_label="Flow [veh/h]: ",
legends=[r"0 \%", r"10 \%", r"20 \%", r"30 \%", r"40 \%"],
fnt_size={"fontsize": 16},
)
return figtt, axtt
def plot_tttdprc(data_frame):
"""
Plot Change Total Travel Time % vs distance
"""
figtt, axtt = plot_var(
data_frame=data_frame,
x_var="distance",
y_var="totTT %",
label_var="mpr",
pivot="flow",
x_label="Distance [m]",
y_label=r"Change in Total TT [\%]",
t_label="Flow [veh/h]: ",
legends=[r"0 \%", r"10 \%", r"20 \%", r"30 \%", r"40 \%"],
fnt_size={"fontsize": 16},
)
return figtt, axtt
def plot_mtt(data_frame):
"""
Plot absolute Avg Travel Time vs flow
"""
figtt, axtt = plot_var(
data_frame=data_frame,
x_var="flow",
y_var="meanTT",
label_var="mpr",
pivot="distance",
x_label="Flow [veh/m]",
y_label="Avg. Travel Time [s]",
t_label="Distance [m]: ",
legends=[r"0 \%", r"10 \%", r"20 \%", r"30 \%", r"40 \%"],
fnt_size={"fontsize": 16},
)
return figtt, axtt
def plot_mttperc(data_frame):
"""
Plot Change Avg Travel Time % vs flow
"""
figtt, axtt = plot_var(
data_frame=data_frame,
x_var="flow",
y_var="avgTT %",
label_var="mpr",
pivot="distance",
x_label="Flow [veh/m]",
y_label=r"Change in Avg. TT [\%]",
t_label="Distance [m]: ",
legends=[r"0 \%", r"10 \%", r"20 \%", r"30 \%", r"40 \%"],
fnt_size={"fontsize": 16},
)
return figtt, axtt
def plot_mttd(data_frame):
"""
Plot Absolute Total Travel Time vs distance
"""
figtt, axtt = plot_var(
data_frame=data_frame,
x_var="distance",
y_var="meanTT",
label_var="mpr",
pivot="flow",
x_label="Distance [m]",
y_label="Average Travel Time [s]",
t_label="Flow [veh/h]: ",
legends=[r"0 \%", r"10 \%", r"20 \%", r"30 \%", r"40 \%"],
fnt_size={"fontsize": 16},
)
return figtt, axtt
def plot_mttdprc(data_frame):
"""
Plot Change Total Travel Time % vs distance
"""
figtt, axtt = plot_var(
data_frame=data_frame,
x_var="distance",
y_var="avgTT %",
label_var="mpr",
pivot="flow",
x_label="Distance [m]",
y_label=r"Change in Avg. TT [\%]",
t_label="Flow [veh/h]: ",
legends=[r"0 \%", r"10 \%", r"20 \%", r"30 \%", r"40 \%"],
fnt_size={"fontsize": 16},
)
return figtt, axtt
def plot_ttc(data_frame):
"""
Plot time to Colission vs flow
"""
figtt, axtt = plot_var(
data_frame=data_frame,
x_var="flow",
y_var="timetC",
label_var="mpr",
pivot="distance",
x_label="Flow [veh/m]",
y_label="Time To Collision [s]",
t_label="Distance [m]: ",
legends=[r"0 \%", r"10 \%", r"20 \%", r"30 \%", r"40 \%"],
fnt_size={"fontsize": 16},
)
return figtt, axtt
def plot_ttcprc(data_frame):
"""
Plot Change Total Travel Time % vs flow
"""
figtt, axtt = plot_var(
data_frame=data_frame,
x_var="flow",
y_var="timeTC %",
label_var="mpr",
pivot="distance",
x_label="Flow [veh/m]",
y_label=r"Change in Time to Collision [\%]",
t_label="Distance [m]: ",
legends=[r"0 \%", r"10 \%", r"20 \%", r"30 \%", r"40 \%"],
fnt_size={"fontsize": 16},
)
return figtt, axtt
def plot_ttcd(data_frame):
"""
Plot Absolute Total Travel Time vs distance
"""
figtt, axtt = plot_var(
data_frame=data_frame,
x_var="distance",
y_var="timetC",
label_var="mpr",
pivot="flow",
x_label="Distance [m]",
y_label="Time To Collision [s]",
t_label="Flow [veh/h]: ",
legends=[r"0 \%", r"10 \%", r"20 \%", r"30 \%", r"40 \%"],
fnt_size={"fontsize": 16},
)
return figtt, axtt
def plot_ttcdprc(data_frame):
"""
Plot Change Total Travel Time % vs distance
"""
figtt, axtt = plot_var(
data_frame=data_frame,
x_var="distance",
y_var="timeTC %",
label_var="mpr",
pivot="flow",
x_label="Distance [m]",
y_label=r"Change in Time to Collision [\%]",
t_label="Flow [veh/h]: ",
legends=[r"0 \%", r"10 \%", r"20 \%", r"30 \%", r"40 \%"],
fnt_size={"fontsize": 16},
)
return figtt, axtt
def plot_hwy(data_frame):
"""
Plot spacing vs time
"""
fighwy, axhwy = plot_var(
data_frame=data_frame,
x_var="time",
y_var="hwy",
label_var="mpr",
pivot="flow",
x_label=" Time [hh:mm]",
y_label="Headway space [m]",
t_label="Flow [veh/h]",
legends=[r"0 \%", r"10 \%", r"20 \%", r"30 \%", r"40 \%"],
fnt_size={"fontsize": 16},
x_size=7.5,
transpose=True,
)
return fighwy, axhwy
# ==============================================================================
# Processing
# ==============================================================================
# CO2
# ==============================================================================
# Import csv files
dflst = [ | pd.read_csv(file) | pandas.read_csv |
import os, sys, fnmatch, random, json, logging, argparse
import pandas as pd
import numpy as np
from collections import OrderedDict
import datetime
from datetime import datetime
from dateutil import parser
from IPython.display import display, Markdown, HTML, clear_output, display_html
from operator import itemgetter
from jinja2 import Template
from urllib.parse import urlparse
from urllib.request import urlopen
import scipy.stats as stats
from scipy.stats import ttest_ind, ttest_rel, ttest_1samp
from scipy.stats import chi2, chi2_contingency
import statsmodels.api as sm
from statsmodels.formula.api import ols
from statsmodels.graphics.gofplots import qqplot
from scipy.stats import boxcox, shapiro, gaussian_kde
import sqlalchemy
from sqlalchemy import create_engine, Column, Integer, String, Enum
import matplotlib
import matplotlib.pyplot as plt
from termcolor import colored
import seaborn as sns
sns.set_context('talk')
sns.set_style('white')
import hvplot
import hvplot.pandas
import holoviews as hv
from holoviews import opts
import panel as pn
pn.extension()
from io import StringIO
from bokeh.io import show, curdoc
from bokeh.plotting import figure
from bokeh.transform import factor_cmap
from bokeh.models.filters import CustomJSFilter
from bokeh.layouts import column, row, WidgetBox, gridplot
from bokeh.palettes import Category10_10, Category20_16, Category20_20, Category20
from bokeh.models import Column, CDSView, CustomJS, CategoricalColorMapper, ColumnDataSource, HoverTool, Panel, MultiSelect
from bokeh.models.widgets import CheckboxGroup, CheckboxButtonGroup, Slider, RangeSlider, Tabs, TableColumn, DataTable
import warnings
warnings.filterwarnings("ignore")
warnings.filterwarnings("ignore", category=DeprecationWarning)
warnings.simplefilter("ignore")
from src.Config import Config
from src.analysis.feature_engineer import Feature_Engineer
from src.analysis.statistical_analysis import Statistic_Analysis
class Logger(object):
info = print
warning = print
error = print
critical = print
class JSON_Datetime_Encoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, (datetime.date, datetime.datetime)):
return obj.isoformat()
else:
return json.JSONEncoder.default(self, obj)
class Analysis(Feature_Engineer, Statistic_Analysis):
data = {}
json = JSON_Datetime_Encoder()
def __init__(self, customer_id=["*"], suffix="", logger=Logger()):
self.customer_id = customer_id
self.suffix = suffix
self.logger = logger
@staticmethod
def style(p):
# # Title
p.title.align = "center"
p.title.text_font_size = "16pt"
p.title.text_font = "serif"
# # Axis Titles
p.xaxis.axis_label_text_font_size = "12pt"
p.xaxis.axis_label_text_font_style = "bold"
p.yaxis.axis_label_text_font_size = "12t"
p.yaxis.axis_label_text_font_style = "bold"
# # Tick Labels
p.xaxis.major_label_text_font_size = "10pt"
p.yaxis.major_label_text_font_size = "10pt"
return p
@staticmethod
def get_template(path):
if bool(urlparse(path).netloc):
from urllib.request import urlopen
return urlopen(path).read().decode('utf8')
return open(path).read()
@staticmethod
def vars(types=[], wc_vars=[], qreturn_dict=False):
""" Return list of variable names
Acquire the right features from dataframe to be input into model.
Featurs will be acquired based the value "predictive" in the VARS dictionary.
Parameters
----------
types : str
VARS name on type of features
Returns
-------
Features with predictive == True in self.VARS
"""
if types==None:
types = [V for V in Config.VARS]
selected_vars = []
for t in types:
for d in Config.VARS[t]:
if not d.get('predictive'):
continue
if len(wc_vars) != 0:
matched_vars = fnmatch.filter(wc_vars, d['var'])
if qreturn_dict:
for v in matched_vars:
dd = d.copy()
dd['var'] = v
if not dd in selected_vars:
selected_vars.append(dd)
else:
for v in matched_vars:
if not v in selected_vars:
selected_vars.append(v)
else:
if qreturn_dict and not d in selected_vars:
selected_vars.append(d)
else:
if not d['var'] in selected_vars:
selected_vars.append(d['var'])
return selected_vars
def dump_json(self, obj):
return json.dumps(obj, cls=JSON_Datetime_Encoder)
def read_file(self, fname, source_type=Config.ANALYSIS_CONFIG["FILE_TYPE"]):
"""Read in files, focusing on csv files.
Parameters
----------
fname : [type]
[description]
source_type : str, optional
[description], by default "single"
Returns
-------
[type]
[description]
"""
if source_type == "single":
try:
fname = "{}.csv".format(os.path.join(self.FILES["DATA_LOCAL"], fname))
data = pd.read_csv(fname, error_bad_lines=False)
if data.size == 0:
self.logger.warning("no data found in file {}".format(fname))
if self.logger.warning == print:
exit()
except FileNotFoundError:
self.logger.critical("file {} is not found ...".format(fname))
if self.logger.critical == print:
exit()
elif source_type == "multiple":
csv_ext = [".csv"]
data = pd.DataFrame()
for root, dirs, files in os.walk(os.path.join(self.FILES["DATA_LOCAL"])):
for filename in files:
if filename.endswith(tuple(csv_ext)):
df_temp = pd.read_csv(os.path.join(root, filename))
data = pd.concat([data, df_temp], axis=0, sort=True)
else:
self.logger.error("Please select only 'single' or 'multiple' ...")
return data
def get_data(self):
self.logger.info("Reading in data ...")
self.logger.info(" Loading order dataframe ...")
self.data["order_df"] = self.read_file(fname="orders", source_type="single")
self.logger.info(" Loading order prior dataframe ...")
self.data["order_prior_df"] = self.read_file(fname="order_products__prior", source_type="single")
self.logger.info(" Loading products dataframe ...")
self.data["products_df"] = self.read_file(fname="products", source_type="single")
self.logger.info(" Loading departments dataframe ...")
self.data["departments_df"] = self.read_file(fname="departments", source_type="single")
self.logger.info(" Loading aisles dataframe ...")
self.data["aisles_df"] = self.read_file(fname="aisles", source_type="single")
self.logger.info(" Merging dataframe ...")
self.logger.info(" on Orders-Prior & Orders:")
self.data["customer_data"] = pd.merge(self.data["order_prior_df"], self.data["order_df"], on=["order_id"], how="left")
self.logger.info(" on Customer Orders & Products:")
self.data["customer_data"] = pd.merge(self.data["customer_data"], self.data["products_df"], on=["product_id"], how="left")
self.logger.info(" on Customer Orders & Aisle:")
self.data["customer_data"] = pd.merge(self.data["customer_data"], self.data["aisles_df"], on=["aisle_id"], how="left")
self.logger.info(" on Customer Orders & Departments:")
self.data["customer_data"] = pd.merge(self.data["customer_data"], self.data["departments_df"], on=["department_id"], how="left")
self.logger.info(" Data Processing ...")
self.logger.info(" renaming columns ...")
self.data["customer_data"] = self.data["customer_data"].rename(columns=self.COLUMN_RENAME["CUSTOMER"])
self.data["customer_data"]["user_id"] = self.data["customer_data"]["user_id"].astype(str)
# # Randomly select 20 customer from the ID for analysis, as bokeh palettes only allow maximum 20 subset
self.available_group = list(self.data["customer_data"]['user_id'].unique())
self.available_group = random.sample(self.available_group, k=20)
# # Data correlation
self.logger.info(" Generating data correlation dataframe for feature dependency ...")
self.data["data_correlation_df"] = self.data["customer_data"][self.vars(["Customer"], self.data["customer_data"].columns)]
if self.QDEBUG:
fname = os.path.join(self.FILES["OUTPUT_PATH"], "{}{}.csv".format(self.FILES["DATA_CORRELATION"], self.suffix))
self.data["data_correlation_df"].to_csv(fname)
fname = os.path.join(self.FILES["OUTPUT_PATH"], "{}{}.csv".format(self.FILES["CUSTOMER_DATA"], self.suffix))
self.data["customer_data"].to_csv(fname)
self.logger.info("done.")
def feature_engineering(self):
self.logger.info("Creating new feature ...")
self.logger.info(" Creating number of products ordered across each days ...")
self.data['day_peak_df'] = self.data['customer_data'].groupby(['order_id', 'order_day_of_week'])['order_number'].count().reset_index().rename(columns={'order_number': 'count'})
self.data["day_peak_df"]["peak_day"] = np.where(self.data['day_peak_df']['order_day_of_week'] <= 1, 1, 0)
self.logger.info(" Creating number of products ordered across each hours ...")
self.data['time_peak_df'] = self.data['customer_data'].groupby(['order_id', 'order_hour_of_day'])['order_number'].count().reset_index().rename(columns={'order_number': 'count'})
self.data["time_peak_df"]["peak_time"] = np.where((self.data["time_peak_df"]['order_hour_of_day'] >= self.ANALYSIS_CONFIG["PEAK_DAY_FROM"]) &
(self.data["time_peak_df"]['order_hour_of_day'] <= self.ANALYSIS_CONFIG["PEAK_DAY_TO"]), 1, 0)
# # RFM features
self.logger.info(" Creating number of products ordered in each order ...")
num_products = self.data["customer_data"].groupby(['order_id'])['product_id'].count().reset_index().rename(columns={'product_id':'num_products'})
self.data["customer_data"] = pd.merge(self.data["customer_data"], num_products, on='order_id', how='left')
self.logger.info(" Creating peak day categorical feature ...")
self.data["customer_data"]['peak_day'] = np.where(self.data["customer_data"]['order_day_of_week'] <= 1, 1, 0)
self.logger.info(" Creating peak time categorical feature (from 10 - 16) ...")
self.data["customer_data"]['peak_time'] = np.where((self.data["customer_data"]['order_hour_of_day'] >= self.ANALYSIS_CONFIG["PEAK_DAY_FROM"]) &
(self.data["customer_data"]['order_hour_of_day'] <= self.ANALYSIS_CONFIG["PEAK_DAY_TO"]), 1, 0)
self.logger.info(" Creating number of orders per customer, peak day rate, median hour, peak time rate, mean lag days since last order, mean number of products ...")
num_orders = self.data["customer_data"].groupby(['user_id'])['order_number'].max()
peakday_rate = round(self.data["customer_data"].groupby(["user_id"])['peak_day'].mean(), 2)
med_hour = round(self.data["customer_data"].groupby('user_id')['order_hour_of_day'].median(), 0)
peaktime_rate = round(self.data["customer_data"].groupby(['user_id'])['peak_time'].mean(), 2)
mean_lag_days = round(self.data["customer_data"].groupby(['user_id'])['days_since_last_order'].mean(), 0)
mean_num_products = round(self.data["customer_data"].groupby('user_id')['num_products'].mean(), 0)
self.data['features'] = pd.concat([num_orders, peakday_rate, med_hour, peaktime_rate, mean_lag_days, mean_num_products], axis=1)
self.data['features'].columns = self.ANALYSIS_CONFIG["FEATURES_COL"]
self.data['features'] = self.data['features'].reset_index()
self.data["customer_data"] = | pd.merge(self.data["customer_data"], self.data['features'], on='user_id', how='left') | pandas.merge |
"""arbin res-type data files"""
import os
import sys
import tempfile
import shutil
import logging
import platform
import warnings
import time
import numpy as np
import pandas as pd
from cellpy.readers.core import (
FileID,
Cell,
check64bit,
humanize_bytes,
xldate_as_datetime,
)
from cellpy.parameters.internal_settings import HeaderDict, get_headers_normal
from cellpy.readers.instruments.mixin import Loader, MINIMUM_SELECTION
from cellpy import prms
DEBUG_MODE = prms.Reader.diagnostics
ALLOW_MULTI_TEST_FILE = False
# Select odbc module
ODBC = prms._odbc
SEARCH_FOR_ODBC_DRIVERS = prms._search_for_odbc_driver
use_subprocess = prms.Instruments.Arbin.use_subprocess
detect_subprocess_need = prms.Instruments.Arbin.detect_subprocess_need
# Finding out some stuff about the platform (TODO: refactor to mixin)
is_posix = False
is_macos = False
if os.name == "posix":
is_posix = True
current_platform = platform.system()
if current_platform == "Darwin":
is_macos = True
if DEBUG_MODE:
logging.debug("DEBUG_MODE")
logging.debug(f"ODBC: {ODBC}")
logging.debug(f"SEARCH_FOR_ODBC_DRIVERS: {SEARCH_FOR_ODBC_DRIVERS}")
logging.debug(f"use_subprocess: {use_subprocess}")
logging.debug(f"detect_subprocess_need: {detect_subprocess_need}")
logging.debug(f"current_platform: {current_platform}")
# TODO: refactor to mixin
if detect_subprocess_need:
logging.debug("detect_subprocess_need is True: checking versions")
python_version, os_version = platform.architecture()
if python_version == "64bit" and prms.Instruments.Arbin.office_version == "32bit":
logging.debug(
"python 64bit and office 32bit -> " "setting use_subprocess to True"
)
use_subprocess = True
if use_subprocess and not is_posix:
# The windows users most likely have a strange custom path to mdbtools etc.
logging.debug(
"using subprocess (most lilkely mdbtools) " "on non-posix (most likely windows)"
)
if not prms.Instruments.Arbin.sub_process_path:
sub_process_path = str(prms._sub_process_path)
else:
sub_process_path = str(prms.Instruments.Arbin.sub_process_path)
if is_posix:
sub_process_path = "mdb-export"
try:
driver_dll = prms.Instruments.Arbin.odbc_driver
except AttributeError:
driver_dll = None
# TODO: deprecate ado
use_ado = False
if ODBC == "ado":
use_ado = True
logging.debug("Trying to use adodbapi as ado loader")
try:
import adodbapi as dbloader # http://adodbapi.sourceforge.net/
except ImportError:
use_ado = False
if not use_ado:
if ODBC == "pyodbc":
try:
import pyodbc as dbloader
except ImportError:
warnings.warn("COULD NOT LOAD DBLOADER!", ImportWarning)
dbloader = None
elif ODBC == "pypyodbc":
try:
import pypyodbc as dbloader
except ImportError:
warnings.warn("COULD NOT LOAD DBLOADER!", ImportWarning)
dbloader = None
if DEBUG_MODE:
logging.debug(f"dbloader: {dbloader}")
# Names of the tables in the .res db that is used by cellpy
TABLE_NAMES = {
"normal": "Channel_Normal_Table",
"global": "Global_Table",
"statistic": "Channel_Statistic_Table",
"aux_global": "Aux_Global_Data_Table",
"aux": "Auxiliary_Table",
}
summary_headers_renaming_dict = {
"test_id_txt": "Test_ID",
"data_point_txt": "Data_Point",
"vmax_on_cycle_txt": "Vmax_On_Cycle",
"charge_time_txt": "Charge_Time",
"discharge_time_txt": "Discharge_Time",
}
normal_headers_renaming_dict = {
"aci_phase_angle_txt": "ACI_Phase_Angle",
"ref_aci_phase_angle_txt": "Reference_ACI_Phase_Angle",
"ac_impedance_txt": "AC_Impedance",
"ref_ac_impedance_txt": "Reference_AC_Impedance",
"charge_capacity_txt": "Charge_Capacity",
"charge_energy_txt": "Charge_Energy",
"current_txt": "Current",
"cycle_index_txt": "Cycle_Index",
"data_point_txt": "Data_Point",
"datetime_txt": "DateTime",
"discharge_capacity_txt": "Discharge_Capacity",
"discharge_energy_txt": "Discharge_Energy",
"internal_resistance_txt": "Internal_Resistance",
"is_fc_data_txt": "Is_FC_Data",
"step_index_txt": "Step_Index",
"sub_step_index_txt": "Sub_Step_Index", # new
"step_time_txt": "Step_Time",
"sub_step_time_txt": "Sub_Step_Time", # new
"test_id_txt": "Test_ID",
"test_time_txt": "Test_Time",
"voltage_txt": "Voltage",
"ref_voltage_txt": "Reference_Voltage", # new
"dv_dt_txt": "dV/dt",
"frequency_txt": "Frequency", # new
"amplitude_txt": "Amplitude", # new
}
class ArbinLoader(Loader):
""" Class for loading arbin-data from res-files.
Implemented Cellpy params (prms.Instruments.Arbin):
max_res_filesize
chunk_size
max_chunks
use_subprocess
detect_subprocess_need
sub_process_path
office_version
SQL_server
"""
def __init__(self):
"""initiates the ArbinLoader class"""
# could use __init__(self, cellpydata_object) and
# set self.logger = cellpydata_object.logger etc.
# then remember to include that as prm in "out of class" functions
# self.prms = prms
self.logger = logging.getLogger(__name__)
# use the following prm to limit to loading only
# one cycle or from cycle>x to cycle<x+n
# prms.Reader["limit_loaded_cycles"] = [cycle from, cycle to]
self.arbin_headers_normal = (
self.get_headers_normal()
) # the column headers defined by Arbin
self.cellpy_headers_normal = (
get_headers_normal()
) # the column headers defined by cellpy
self.arbin_headers_global = self.get_headers_global()
self.arbin_headers_aux_global = self.get_headers_aux_global()
self.arbin_headers_aux = self.get_headers_aux()
self.current_chunk = 0 # use this to set chunks to load
@staticmethod
def get_raw_units():
raw_units = dict()
raw_units["current"] = 1.0 # A
raw_units["charge"] = 1.0 # Ah
raw_units["mass"] = 0.001 # g
return raw_units
@staticmethod
def get_headers_normal():
"""Defines the so-called normal column headings for Arbin .res-files"""
headers = HeaderDict()
# - normal (raw-data) column headings (specific for Arbin)
headers["aci_phase_angle_txt"] = "ACI_Phase_Angle"
headers["ref_aci_phase_angle_txt"] = "Reference_ACI_Phase_Angle"
headers["ac_impedance_txt"] = "AC_Impedance"
headers["ref_ac_impedance_txt"] = "Reference_AC_Impedance" # new
headers["charge_capacity_txt"] = "Charge_Capacity"
headers["charge_energy_txt"] = "Charge_Energy"
headers["current_txt"] = "Current"
headers["cycle_index_txt"] = "Cycle_Index"
headers["data_point_txt"] = "Data_Point"
headers["datetime_txt"] = "DateTime"
headers["discharge_capacity_txt"] = "Discharge_Capacity"
headers["discharge_energy_txt"] = "Discharge_Energy"
headers["internal_resistance_txt"] = "Internal_Resistance"
headers["is_fc_data_txt"] = "Is_FC_Data"
headers["step_index_txt"] = "Step_Index"
headers["sub_step_index_txt"] = "Sub_Step_Index" # new
headers["step_time_txt"] = "Step_Time"
headers["sub_step_time_txt"] = "Sub_Step_Time" # new
headers["test_id_txt"] = "Test_ID"
headers["test_time_txt"] = "Test_Time"
headers["voltage_txt"] = "Voltage"
headers["ref_voltage_txt"] = "Reference_Voltage" # new
headers["dv_dt_txt"] = "dV/dt"
headers["frequency_txt"] = "Frequency" # new
headers["amplitude_txt"] = "Amplitude" # new
return headers
@staticmethod
def get_headers_aux():
"""Defines the so-called auxiliary table column headings for Arbin .res-files"""
headers = HeaderDict()
# - aux column headings (specific for Arbin)
headers["test_id_txt"] = "Test_ID"
headers["data_point_txt"] = "Data_Point"
headers["aux_index_txt"] = "Auxiliary_Index"
headers["data_type_txt"] = "Data_Type"
headers["x_value_txt"] = "X"
headers["x_dt_value"] = "dX_dt"
return headers
@staticmethod
def get_headers_aux_global():
"""Defines the so-called auxiliary global column headings for Arbin .res-files"""
headers = HeaderDict()
# - aux global column headings (specific for Arbin)
headers["channel_index_txt"] = "Channel_Index"
headers["aux_index_txt"] = "Auxiliary_Index"
headers["data_type_txt"] = "Data_Type"
headers["aux_name_txt"] = "Nickname"
headers["aux_unit_txt"] = "Unit"
return headers
@staticmethod
def get_headers_global():
"""Defines the so-called global column headings for Arbin .res-files"""
headers = HeaderDict()
# - global column headings (specific for Arbin)
headers["applications_path_txt"] = "Applications_Path"
headers["channel_index_txt"] = "Channel_Index"
headers["channel_number_txt"] = "Channel_Number"
headers["channel_type_txt"] = "Channel_Type"
headers["comments_txt"] = "Comments"
headers["creator_txt"] = "Creator"
headers["daq_index_txt"] = "DAQ_Index"
headers["item_id_txt"] = "Item_ID"
headers["log_aux_data_flag_txt"] = "Log_Aux_Data_Flag"
headers["log_chanstat_data_flag_txt"] = "Log_ChanStat_Data_Flag"
headers["log_event_data_flag_txt"] = "Log_Event_Data_Flag"
headers["log_smart_battery_data_flag_txt"] = "Log_Smart_Battery_Data_Flag"
headers["mapped_aux_conc_cnumber_txt"] = "Mapped_Aux_Conc_CNumber"
headers["mapped_aux_di_cnumber_txt"] = "Mapped_Aux_DI_CNumber"
headers["mapped_aux_do_cnumber_txt"] = "Mapped_Aux_DO_CNumber"
headers["mapped_aux_flow_rate_cnumber_txt"] = "Mapped_Aux_Flow_Rate_CNumber"
headers["mapped_aux_ph_number_txt"] = "Mapped_Aux_PH_Number"
headers["mapped_aux_pressure_number_txt"] = "Mapped_Aux_Pressure_Number"
headers["mapped_aux_temperature_number_txt"] = "Mapped_Aux_Temperature_Number"
headers["mapped_aux_voltage_number_txt"] = "Mapped_Aux_Voltage_Number"
headers[
"schedule_file_name_txt"
] = "Schedule_File_Name" # KEEP FOR CELLPY FILE FORMAT
headers["start_datetime_txt"] = "Start_DateTime"
headers["test_id_txt"] = "Test_ID" # KEEP FOR CELLPY FILE FORMAT
headers["test_name_txt"] = "Test_Name" # KEEP FOR CELLPY FILE FORMAT
return headers
@staticmethod
def get_raw_limits():
raw_limits = dict()
raw_limits["current_hard"] = 0.000_000_000_000_1
raw_limits["current_soft"] = 0.000_01
raw_limits["stable_current_hard"] = 2.0
raw_limits["stable_current_soft"] = 4.0
raw_limits["stable_voltage_hard"] = 2.0
raw_limits["stable_voltage_soft"] = 4.0
raw_limits["stable_charge_hard"] = 0.001
raw_limits["stable_charge_soft"] = 5.0
raw_limits["ir_change"] = 0.00001
return raw_limits
def _get_res_connector(self, temp_filename):
if use_ado:
is64bit_python = check64bit(current_system="python")
if is64bit_python:
constr = (
"Provider=Microsoft.ACE.OLEDB.12.0; Data Source=%s" % temp_filename
)
else:
constr = (
"Provider=Microsoft.Jet.OLEDB.4.0; Data Source=%s" % temp_filename
)
return constr
if SEARCH_FOR_ODBC_DRIVERS:
logging.debug("Searching for odbc drivers")
try:
drivers = [
driver
for driver in dbloader.drivers()
if "Microsoft Access Driver" in driver
]
logging.debug(f"Found these: {drivers}")
driver = drivers[0]
except IndexError as e:
logging.debug(
"Unfortunately, it seems the " "list of drivers is emtpy."
)
logging.debug("Use driver-name from config (if existing).")
driver = driver_dll
if is_macos:
driver = "/usr/local/lib/libmdbodbc.dylib"
else:
if not driver:
print(
"\nCould not find any odbc-drivers suitable "
"for .res-type files. "
"Check out the homepage of pydobc for info on "
"installing drivers"
)
print(
"One solution that might work is downloading "
"the Microsoft Access database engine (in correct"
" bytes (32 or 64)) "
"from:\n"
"https://www.microsoft.com/en-us/download/"
"details.aspx?id=13255"
)
print(
"Or install mdbtools and set it up "
"(check the cellpy docs for help)"
)
print("\n")
else:
logging.debug("Using driver dll from config file")
logging.debug(f"driver dll: {driver}")
self.logger.debug(f"odbc constr: {driver}")
else:
is64bit_python = check64bit(current_system="python")
if is64bit_python:
driver = "{Microsoft Access Driver (*.mdb, *.accdb)}"
else:
driver = "Microsoft Access Driver (*.mdb)"
self.logger.debug("odbc constr: {}".format(driver))
constr = "Driver=%s;Dbq=%s" % (driver, temp_filename)
logging.debug(f"constr: {constr}")
return constr
def _clean_up_loadres(self, cur, conn, filename):
if cur is not None:
cur.close() # adodbapi
if conn is not None:
conn.close() # adodbapi
if os.path.isfile(filename):
try:
os.remove(filename)
except WindowsError as e:
self.logger.warning("could not remove tmp-file\n%s %s" % (filename, e))
def _post_process(self, data):
fix_datetime = True
set_index = True
rename_headers = True
# TODO: insert post-processing and div tests here
# - check dtypes
# Remark that we also set index during saving the file to hdf5 if
# it is not set.
if rename_headers:
columns = {}
for key in self.arbin_headers_normal:
old_header = normal_headers_renaming_dict[key]
new_header = self.cellpy_headers_normal[key]
columns[old_header] = new_header
data.raw.rename(index=str, columns=columns, inplace=True)
try:
# TODO: check if summary df is existing (to only check if it is
# empty will give an error later!)
columns = {}
for key, old_header in summary_headers_renaming_dict.items():
try:
columns[old_header] = self.cellpy_headers_normal[key]
except KeyError:
columns[old_header] = old_header.lower()
data.summary.rename(index=str, columns=columns, inplace=True)
except Exception as e:
logging.debug(f"Could not rename summary df ::\n{e}")
if fix_datetime:
h_datetime = self.cellpy_headers_normal.datetime_txt
logging.debug("converting to datetime format")
# print(data.raw.columns)
data.raw[h_datetime] = data.raw[h_datetime].apply(
xldate_as_datetime, option="to_datetime"
)
h_datetime = h_datetime
if h_datetime in data.summary:
data.summary[h_datetime] = data.summary[h_datetime].apply(
xldate_as_datetime, option="to_datetime"
)
if set_index:
hdr_data_point = self.cellpy_headers_normal.data_point_txt
if data.raw.index.name != hdr_data_point:
data.raw = data.raw.set_index(hdr_data_point, drop=False)
return data
def _inspect(self, run_data):
"""Inspect the file -> reports to log (debug)"""
if not any([DEBUG_MODE]):
return run_data
if DEBUG_MODE:
checked_rundata = []
for data in run_data:
new_cols = data.raw.columns
for col in self.arbin_headers_normal:
if col not in new_cols:
logging.debug(f"Missing col: {col}")
# data.raw[col] = np.nan
checked_rundata.append(data)
return checked_rundata
def _iterdump(self, file_name, headers=None):
"""
Function for dumping values from a file.
Should only be used by developers.
Args:
file_name: name of the file
headers: list of headers to pick
default:
["Discharge_Capacity", "Charge_Capacity"]
Returns: pandas.DataFrame
"""
if headers is None:
headers = ["Discharge_Capacity", "Charge_Capacity"]
step_txt = self.arbin_headers_normal.step_index_txt
point_txt = self.arbin_headers_normal.data_point_txt
cycle_txt = self.arbin_headers_normal.cycle_index_txt
self.logger.debug("iterating through file: %s" % file_name)
if not os.path.isfile(file_name):
print("Missing file_\n %s" % file_name)
filesize = os.path.getsize(file_name)
hfilesize = humanize_bytes(filesize)
txt = "Filesize: %i (%s)" % (filesize, hfilesize)
self.logger.info(txt)
table_name_global = TABLE_NAMES["global"]
table_name_stats = TABLE_NAMES["statistic"]
table_name_normal = TABLE_NAMES["normal"]
# creating temporary file and connection
temp_dir = tempfile.gettempdir()
temp_filename = os.path.join(temp_dir, os.path.basename(file_name))
shutil.copy2(file_name, temp_dir)
constr = self._get_res_connector(temp_filename)
if use_ado:
conn = dbloader.connect(constr)
else:
conn = dbloader.connect(constr, autocommit=True)
self.logger.debug("tmp file: %s" % temp_filename)
self.logger.debug("constr str: %s" % constr)
# --------- read global-data ------------------------------------
self.logger.debug("reading global data table")
sql = "select * from %s" % table_name_global
global_data_df = pd.read_sql_query(sql, conn)
# col_names = list(global_data_df.columns.values)
self.logger.debug("sql statement: %s" % sql)
tests = global_data_df[self.arbin_headers_normal.test_id_txt]
number_of_sets = len(tests)
self.logger.debug("number of datasets: %i" % number_of_sets)
self.logger.debug("only selecting first test")
test_no = 0
self.logger.debug("setting data for test number %i" % test_no)
loaded_from = file_name
# fid = FileID(file_name)
start_datetime = global_data_df[
self.arbin_headers_global["start_datetime_txt"]
][test_no]
test_ID = int(
global_data_df[self.arbin_headers_normal.test_id_txt][test_no]
) # OBS
test_name = global_data_df[self.arbin_headers_global["test_name_txt"]][test_no]
# --------- read raw-data (normal-data) -------------------------
self.logger.debug("reading raw-data")
columns = ["Data_Point", "Step_Index", "Cycle_Index"]
columns.extend(headers)
columns_txt = ", ".join(["%s"] * len(columns)) % tuple(columns)
sql_1 = "select %s " % columns_txt
sql_2 = "from %s " % table_name_normal
sql_3 = "where %s=%s " % (self.arbin_headers_normal.test_id_txt, test_ID)
sql_5 = "order by %s" % self.arbin_headers_normal.data_point_txt
import time
info_list = []
info_header = ["cycle", "row_count", "start_point", "end_point"]
info_header.extend(headers)
self.logger.info(" ".join(info_header))
self.logger.info("-------------------------------------------------")
for cycle_number in range(1, 2000):
t1 = time.time()
self.logger.debug("picking cycle %i" % cycle_number)
sql_4 = "AND %s=%i " % (cycle_txt, cycle_number)
sql = sql_1 + sql_2 + sql_3 + sql_4 + sql_5
self.logger.debug("sql statement: %s" % sql)
normal_df = pd.read_sql_query(sql, conn)
t2 = time.time()
dt = t2 - t1
self.logger.debug("time: %f" % dt)
if normal_df.empty:
self.logger.debug("reached the end")
break
row_count, _ = normal_df.shape
start_point = normal_df[point_txt].min()
end_point = normal_df[point_txt].max()
last = normal_df.iloc[-1, :]
step_list = [cycle_number, row_count, start_point, end_point]
step_list.extend([last[x] for x in headers])
info_list.append(step_list)
self._clean_up_loadres(None, conn, temp_filename)
info_dict = pd.DataFrame(info_list, columns=info_header)
return info_dict
def investigate(self, file_name):
"""Investigate a .res file.
Args:
file_name: name of the file
Returns: dictionary with div. stats and info.
"""
step_txt = self.arbin_headers_normal.step_index_txt
point_txt = self.arbin_headers_normal.data_point_txt
cycle_txt = self.arbin_headers_normal.cycle_index_txt
self.logger.debug("investigating file: %s" % file_name)
if not os.path.isfile(file_name):
print("Missing file_\n %s" % file_name)
filesize = os.path.getsize(file_name)
hfilesize = humanize_bytes(filesize)
txt = "Filesize: %i (%s)" % (filesize, hfilesize)
self.logger.info(txt)
table_name_global = TABLE_NAMES["global"]
table_name_stats = TABLE_NAMES["statistic"]
table_name_normal = TABLE_NAMES["normal"]
# creating temporary file and connection
temp_dir = tempfile.gettempdir()
temp_filename = os.path.join(temp_dir, os.path.basename(file_name))
shutil.copy2(file_name, temp_dir)
constr = self._get_res_connector(temp_filename)
if use_ado:
conn = dbloader.connect(constr)
else:
conn = dbloader.connect(constr, autocommit=True)
self.logger.debug("tmp file: %s" % temp_filename)
self.logger.debug("constr str: %s" % constr)
# --------- read global-data ------------------------------------
self.logger.debug("reading global data table")
sql = "select * from %s" % table_name_global
global_data_df = pd.read_sql_query(sql, conn)
# col_names = list(global_data_df.columns.values)
self.logger.debug("sql statement: %s" % sql)
tests = global_data_df[self.arbin_headers_normal.test_id_txt]
number_of_sets = len(tests)
self.logger.debug("number of datasets: %i" % number_of_sets)
self.logger.debug("only selecting first test")
test_no = 0
self.logger.debug("setting data for test number %i" % test_no)
loaded_from = file_name
# fid = FileID(file_name)
start_datetime = global_data_df[
self.arbin_headers_global["start_datetime_txt"]
][test_no]
test_ID = int(
global_data_df[self.arbin_headers_normal.test_id_txt][test_no]
) # OBS
test_name = global_data_df[self.arbin_headers_global["test_name_txt"]][test_no]
# --------- read raw-data (normal-data) -------------------------
self.logger.debug("reading raw-data")
columns = ["Data_Point", "Step_Index", "Cycle_Index"]
columns_txt = ", ".join(["%s"] * len(columns)) % tuple(columns)
sql_1 = "select %s " % columns_txt
sql_2 = "from %s " % table_name_normal
sql_3 = "where %s=%s " % (self.arbin_headers_normal.test_id_txt, test_ID)
sql_5 = "order by %s" % self.arbin_headers_normal.data_point_txt
import time
info_list = []
info_header = ["cycle", "step", "row_count", "start_point", "end_point"]
self.logger.info(" ".join(info_header))
self.logger.info("-------------------------------------------------")
for cycle_number in range(1, 2000):
t1 = time.time()
self.logger.debug("picking cycle %i" % cycle_number)
sql_4 = "AND %s=%i " % (cycle_txt, cycle_number)
sql = sql_1 + sql_2 + sql_3 + sql_4 + sql_5
self.logger.debug("sql statement: %s" % sql)
normal_df = pd.read_sql_query(sql, conn)
t2 = time.time()
dt = t2 - t1
self.logger.debug("time: %f" % dt)
if normal_df.empty:
self.logger.debug("reached the end")
break
row_count, _ = normal_df.shape
steps = normal_df[self.arbin_headers_normal.step_index_txt].unique()
txt = "cycle %i: %i [" % (cycle_number, row_count)
for step in steps:
self.logger.debug(" step: %i" % step)
step_df = normal_df.loc[normal_df[step_txt] == step]
step_row_count, _ = step_df.shape
start_point = step_df[point_txt].min()
end_point = step_df[point_txt].max()
txt += " %i-(%i)" % (step, step_row_count)
step_list = [cycle_number, step, step_row_count, start_point, end_point]
info_list.append(step_list)
txt += "]"
self.logger.info(txt)
self._clean_up_loadres(None, conn, temp_filename)
info_dict = pd.DataFrame(info_list, columns=info_header)
return info_dict
def repair(self, file_name):
"""try to repair a broken/corrupted file"""
raise NotImplemented
def dump(self, file_name, path):
"""Dumps the raw file to an intermediate hdf5 file.
This method can be used if the raw file is too difficult to load and it
is likely that it is more efficient to convert it to an hdf5 format
and then load it using the `from_intermediate_file` function.
Args:
file_name: name of the raw file
path: path to where to store the intermediate hdf5 file (optional)
Returns:
full path to stored intermediate hdf5 file
information about the raw file (needed by the
`from_intermediate_file` function)
"""
# information = None # contains information needed by the from_
# intermediate_file reader
# full_path = None
# return full_path, information
raise NotImplemented
def _query_table(self, table_name, conn, sql=None):
self.logger.debug(f"reading {table_name}")
if sql is None:
sql = f"select * from {table_name}"
self.logger.debug(f"sql statement: {sql}")
df = pd.read_sql_query(sql, conn)
return df
def _make_name_from_frame(self, df, aux_index, data_type, dx_dt=False):
df_names = df.loc[
(df[self.arbin_headers_aux_global.aux_index_txt] == aux_index)
& (df[self.arbin_headers_aux_global.data_type_txt] == data_type),
:,
]
unit = df_names[self.arbin_headers_aux_global.aux_unit_txt].values[0]
nick = (
df_names[self.arbin_headers_aux_global.aux_name_txt].values[0] or aux_index
)
if dx_dt:
name = f"aux_d_{nick}_dt_u_d{unit}_dt"
else:
name = f"aux_{nick}_u_{unit}"
return name
def _loader_win(
self,
file_name,
temp_filename,
*args,
bad_steps=None,
dataset_number=None,
data_points=None,
**kwargs,
):
new_tests = []
conn = None
table_name_global = TABLE_NAMES["global"]
table_name_aux_global = TABLE_NAMES["aux_global"]
table_name_aux = TABLE_NAMES["aux"]
table_name_stats = TABLE_NAMES["statistic"]
table_name_normal = TABLE_NAMES["normal"]
if DEBUG_MODE:
time_0 = time.time()
constr = self._get_res_connector(temp_filename)
if use_ado:
conn = dbloader.connect(constr)
else:
conn = dbloader.connect(constr, autocommit=True)
self.logger.debug("reading global data table")
self.logger.debug(f"constr str: {constr}")
global_data_df = self._query_table(table_name=table_name_global, conn=conn)
tests = global_data_df[self.arbin_headers_normal.test_id_txt]
number_of_sets = len(tests)
self.logger.debug(f"number of datasets: {number_of_sets}")
if dataset_number is not None:
self.logger.info(f"Dataset number given: {dataset_number}")
self.logger.info(f"Available dataset numbers: {tests}")
test_nos = [dataset_number]
else:
test_nos = range(number_of_sets)
for counter, test_no in enumerate(test_nos):
if counter > 0:
self.logger.warning("** WARNING ** MULTI-TEST-FILE (not recommended)")
if not ALLOW_MULTI_TEST_FILE:
break
data = self._init_data(file_name, global_data_df, test_no)
test_id = data.test_ID
self.logger.debug("reading raw-data")
# --------- read raw-data (normal-data) ------------------------
length_of_test, normal_df = self._load_res_normal_table(
conn, test_id, bad_steps, data_points
)
# --------- read auxiliary data (aux-data) ---------------------
normal_df = self._load_win_res_auxiliary_table(
conn, normal_df, table_name_aux, table_name_aux_global, test_id
)
# --------- read stats-data (summary-data) ---------------------
sql = "select * from %s where %s=%s order by %s" % (
table_name_stats,
self.arbin_headers_normal.test_id_txt,
data.test_ID,
self.arbin_headers_normal.data_point_txt,
)
summary_df = pd.read_sql_query(sql, conn)
if summary_df.empty and prms.Reader.use_cellpy_stat_file:
txt = "\nCould not find any summary (stats-file)!"
txt += "\n -> issue make_summary(use_cellpy_stat_file=False)"
logging.debug(txt)
# TODO: Enforce creating a summary df or modify renaming summary df (post process part)
# normal_df = normal_df.set_index("Data_Point")
data.summary = summary_df
if DEBUG_MODE:
mem_usage = normal_df.memory_usage()
logging.debug(
f"memory usage for "
f"loaded data: \n{mem_usage}"
f"\ntotal: {humanize_bytes(mem_usage.sum())}"
)
logging.debug(f"time used: {(time.time() - time_0):2.4f} s")
data.raw = normal_df
data.raw_data_files_length.append(length_of_test)
data = self._post_process(data)
data = self.identify_last_data_point(data)
new_tests.append(data)
return new_tests
def _load_win_res_auxiliary_table(
self, conn, normal_df, table_name_aux, table_name_aux_global, test_id
):
aux_global_data_df = self._query_table(table_name_aux_global, conn)
if not aux_global_data_df.empty:
aux_df = self._get_aux_df(conn, test_id, table_name_aux)
aux_df, aux_global_data_df = self._aux_to_wide(aux_df, aux_global_data_df)
aux_df = self._rename_aux_cols(aux_df, aux_global_data_df)
if not aux_df.empty:
normal_df = self._join_aux_to_normal(aux_df, normal_df)
return normal_df
def _load_posix_res_auxiliary_table(self, aux_global_data_df, aux_df, normal_df):
if not aux_global_data_df.empty:
aux_df, aux_global_data_df = self._aux_to_wide(aux_df, aux_global_data_df)
aux_df = self._rename_aux_cols(aux_df, aux_global_data_df)
if not aux_df.empty:
normal_df = self._join_aux_to_normal(aux_df, normal_df)
return normal_df
def _join_aux_to_normal(self, aux_df, normal_df):
# TODO: clean up setting index (Data_Point). This is currently done in _post_process after
# the column names are changed to cellpy-column names ("data_point").
# It also keeps a copy of the "data_point"
# column. And is that really necessary.
normal_df.set_index(self.arbin_headers_normal.data_point_txt, inplace=True)
normal_df = normal_df.join(aux_df, how="left", )
normal_df.reset_index(inplace=True)
return normal_df
def _rename_aux_cols(self, aux_df, aux_global_data_df):
aux_dfs = []
if self.arbin_headers_aux.x_value_txt in aux_df.columns:
aux_df_x = aux_df[self.arbin_headers_aux.x_value_txt].copy()
aux_df_x.columns = [
self._make_name_from_frame(aux_global_data_df, z[1], z[0])
for z in aux_df_x.columns
]
aux_dfs.append(aux_df_x)
if self.arbin_headers_aux.x_dt_value in aux_df.columns:
aux_df_dx_dt = aux_df[self.arbin_headers_aux.x_dt_value].copy()
aux_df_dx_dt.columns = [
self._make_name_from_frame(aux_global_data_df, z[1], z[0], True)
for z in aux_df_dx_dt.columns
]
aux_dfs.append(aux_df_dx_dt)
aux_df = pd.concat(aux_dfs, axis=1)
return aux_df
def _aux_to_wide(self, aux_df, aux_global_data_df):
aux_df = aux_df.drop(self.arbin_headers_aux.test_id_txt, axis=1)
keys = [
self.arbin_headers_aux.data_point_txt,
self.arbin_headers_aux.aux_index_txt,
self.arbin_headers_aux.data_type_txt,
]
aux_df = aux_df.set_index(keys=keys)
aux_df = aux_df.unstack(2).unstack(1).dropna(axis=1)
aux_global_data_df = aux_global_data_df.fillna(0)
return aux_df, aux_global_data_df
def _get_aux_df(self, conn, test_id, table_name_aux):
columns_txt = "*"
sql_1 = "select %s " % columns_txt
sql_2 = "from %s " % table_name_aux
sql_3 = "where %s=%s " % (self.arbin_headers_aux.test_id_txt, test_id,)
sql_4 = ""
sql_aux = sql_1 + sql_2 + sql_3 + sql_4
aux_df = self._query_table(table_name_aux, conn, sql=sql_aux)
return aux_df
def _loader_posix(
self,
file_name,
temp_filename,
temp_dir,
*args,
bad_steps=None,
dataset_number=None,
data_points=None,
**kwargs,
):
# TODO: auxiliary channels (table)
table_name_global = TABLE_NAMES["global"]
table_name_stats = TABLE_NAMES["statistic"]
table_name_normal = TABLE_NAMES["normal"]
table_name_aux_global = TABLE_NAMES["aux_global"]
table_name_aux = TABLE_NAMES["aux"]
new_tests = []
if is_posix:
if is_macos:
self.logger.debug("\nMAC OSX USING MDBTOOLS")
else:
self.logger.debug("\nPOSIX USING MDBTOOLS")
else:
self.logger.debug("\nWINDOWS USING MDBTOOLS-WIN")
if DEBUG_MODE:
time_0 = time.time()
(
tmp_name_global,
tmp_name_raw,
tmp_name_stats,
tmp_name_aux_global,
tmp_name_aux,
) = self._create_tmp_files(
table_name_global,
table_name_normal,
table_name_stats,
table_name_aux_global,
table_name_aux,
temp_dir,
temp_filename,
)
# use pandas to load in the data
global_data_df = pd.read_csv(tmp_name_global)
tests = global_data_df[self.arbin_headers_normal.test_id_txt]
number_of_sets = len(tests)
self.logger.debug("number of datasets: %i" % number_of_sets)
if dataset_number is not None:
self.logger.info(f"Dataset number given: {dataset_number}")
self.logger.info(f"Available dataset numbers: {tests}")
test_nos = [dataset_number]
else:
test_nos = range(number_of_sets)
for counter, test_no in enumerate(test_nos):
if counter > 0:
self.logger.warning("** WARNING ** MULTI-TEST-FILE (not recommended)")
if not ALLOW_MULTI_TEST_FILE:
break
data = self._init_data(file_name, global_data_df, test_no)
self.logger.debug("reading raw-data")
(
length_of_test,
normal_df,
summary_df,
aux_global_data_df,
aux_df,
) = self._load_from_tmp_files(
data,
tmp_name_global,
tmp_name_raw,
tmp_name_stats,
tmp_name_aux_global,
tmp_name_aux,
temp_filename,
bad_steps,
data_points,
)
# --------- read auxiliary data (aux-data) ---------------------
normal_df = self._load_posix_res_auxiliary_table(
aux_global_data_df, aux_df, normal_df
)
if summary_df.empty and prms.Reader.use_cellpy_stat_file:
txt = "\nCould not find any summary (stats-file)!"
txt += "\n -> issue make_summary(use_cellpy_stat_file=False)"
logging.debug(txt)
# normal_df = normal_df.set_index("Data_Point")
data.summary = summary_df
if DEBUG_MODE:
mem_usage = normal_df.memory_usage()
logging.debug(
f"memory usage for "
f"loaded data: \n{mem_usage}"
f"\ntotal: {humanize_bytes(mem_usage.sum())}"
)
logging.debug(f"time used: {(time.time() - time_0):2.4f} s")
data.raw = normal_df
data.raw_data_files_length.append(length_of_test)
data = self._post_process(data)
data = self.identify_last_data_point(data)
new_tests.append(data)
return new_tests
def loader(
self,
file_name,
*args,
bad_steps=None,
dataset_number=None,
data_points=None,
**kwargs,
):
"""Loads data from arbin .res files.
Args:
file_name (str): path to .res file.
bad_steps (list of tuples): (c, s) tuples of steps s (in cycle c)
to skip loading.
dataset_number (int): the data set number to select if you are dealing
with arbin files with more than one data-set.
data_points (tuple of ints): load only data from data_point[0] to
data_point[1] (use None for infinite).
Returns:
new_tests (list of data objects)
"""
# TODO: @jepe - insert kwargs - current chunk, only normal data, etc
if not os.path.isfile(file_name):
self.logger.info("Missing file_\n %s" % file_name)
return None
self.logger.debug("in loader")
self.logger.debug("filename: %s" % file_name)
filesize = os.path.getsize(file_name)
hfilesize = humanize_bytes(filesize)
txt = "Filesize: %i (%s)" % (filesize, hfilesize)
self.logger.debug(txt)
if (
filesize > prms.Instruments.Arbin.max_res_filesize
and not prms.Reader.load_only_summary
):
error_message = "\nERROR (loader):\n"
error_message += "%s > %s - File is too big!\n" % (
hfilesize,
humanize_bytes(prms.Instruments.Arbin.max_res_filesize),
)
error_message += "(edit prms.Instruments.Arbin ['max_res_filesize'])\n"
print(error_message)
return None
temp_dir = tempfile.gettempdir()
temp_filename = os.path.join(temp_dir, os.path.basename(file_name))
shutil.copy2(file_name, temp_dir)
self.logger.debug("tmp file: %s" % temp_filename)
use_mdbtools = False
if use_subprocess:
use_mdbtools = True
if is_posix:
use_mdbtools = True
if use_mdbtools:
new_tests = self._loader_posix(
file_name,
temp_filename,
temp_dir,
*args,
bad_steps=bad_steps,
dataset_number=dataset_number,
data_points=data_points,
**kwargs,
)
else:
new_tests = self._loader_win(
file_name,
temp_filename,
*args,
bad_steps=bad_steps,
dataset_number=dataset_number,
data_points=data_points,
**kwargs,
)
new_tests = self._inspect(new_tests)
return new_tests
def _create_tmp_files(
self,
table_name_global,
table_name_normal,
table_name_stats,
table_name_aux_global,
table_name_aux,
temp_dir,
temp_filename,
):
import subprocess
# creating tmp-filenames
temp_csv_filename_global = os.path.join(temp_dir, "global_tmp.csv")
temp_csv_filename_normal = os.path.join(temp_dir, "normal_tmp.csv")
temp_csv_filename_stats = os.path.join(temp_dir, "stats_tmp.csv")
temp_csv_filename_aux_global = os.path.join(temp_dir, "aux_global_tmp.csv")
temp_csv_filename_aux = os.path.join(temp_dir, "aux_tmp.csv")
# making the cmds
mdb_prms = [
(table_name_global, temp_csv_filename_global),
(table_name_normal, temp_csv_filename_normal),
(table_name_stats, temp_csv_filename_stats),
(table_name_aux_global, temp_csv_filename_aux_global),
(table_name_aux, temp_csv_filename_aux),
]
# executing cmds
for table_name, tmp_file in mdb_prms:
with open(tmp_file, "w") as f:
subprocess.call([sub_process_path, temp_filename, table_name], stdout=f)
self.logger.debug(f"ran mdb-export {str(f)} {table_name}")
return (
temp_csv_filename_global,
temp_csv_filename_normal,
temp_csv_filename_stats,
temp_csv_filename_aux_global,
temp_csv_filename_aux,
)
def _load_from_tmp_files(
self,
data,
temp_csv_filename_global,
temp_csv_filename_normal,
temp_csv_filename_stats,
temp_csv_filename_aux_global,
temp_csv_filename_aux,
temp_filename,
bad_steps,
data_points,
):
"""
if bad_steps is not None:
if not isinstance(bad_steps, (list, tuple)):
bad_steps = [bad_steps]
for bad_cycle, bad_step in bad_steps:
self.logger.debug(f"bad_step def: [c={bad_cycle}, s={bad_step}]")
sql_4 += "AND NOT (%s=%i " % (
self.headers_normal.cycle_index_txt,
bad_cycle,
)
sql_4 += "AND %s=%i) " % (self.headers_normal.step_index_txt, bad_step)
"""
# should include a more efficient to load the csv (maybe a loop where
# we load only chuncks and only keep the parts that fullfill the
# filters (e.g. bad_steps, data_points,...)
normal_df = pd.read_csv(temp_csv_filename_normal)
# filter on test ID
normal_df = normal_df[
normal_df[self.arbin_headers_normal.test_id_txt] == data.test_ID
]
# sort on data point
if prms._sort_if_subprocess:
normal_df = normal_df.sort_values(self.arbin_headers_normal.data_point_txt)
if bad_steps is not None:
logging.debug("removing bad steps")
if not isinstance(bad_steps, (list, tuple)):
bad_steps = [bad_steps]
if not isinstance(bad_steps[0], (list, tuple)):
bad_steps = [bad_steps]
for bad_cycle, bad_step in bad_steps:
self.logger.debug(f"bad_step def: [c={bad_cycle}, s={bad_step}]")
selector = (
normal_df[self.arbin_headers_normal.cycle_index_txt] == bad_cycle
) & (normal_df[self.arbin_headers_normal.step_index_txt] == bad_step)
normal_df = normal_df.loc[~selector, :]
if prms.Reader["limit_loaded_cycles"]:
logging.debug("Not yet tested for aux data")
if len(prms.Reader["limit_loaded_cycles"]) > 1:
c1, c2 = prms.Reader["limit_loaded_cycles"]
selector = (
normal_df[self.arbin_headers_normal.cycle_index_txt] > c1
) & (normal_df[self.arbin_headers_normal.cycle_index_txt] < c2)
else:
c1 = prms.Reader["limit_loaded_cycles"][0]
selector = normal_df[self.arbin_headers_normal.cycle_index_txt] == c1
normal_df = normal_df.loc[selector, :]
if data_points is not None:
logging.debug("selecting data-point range")
logging.debug("Not yet tested for aux data")
d1, d2 = data_points
if d1 is not None:
selector = normal_df[self.arbin_headers_normal.data_point_txt] >= d1
normal_df = normal_df.loc[selector, :]
if d2 is not None:
selector = normal_df[self.arbin_headers_normal.data_point_txt] <= d2
normal_df = normal_df.loc[selector, :]
length_of_test = normal_df.shape[0]
summary_df = | pd.read_csv(temp_csv_filename_stats) | pandas.read_csv |
import logging
import sys
import threading
import time
from collections import deque
import pandas as pd
from datetime import datetime
# ---
from win10toast import ToastNotifier
import pandas as pd
import numpy as np
import pickle
import os
import asyncio
import datetime
from datetime import datetime
from datetime import timedelta, timezone
from typing import Optional
from dotenv import load_dotenv
from operator import itemgetter
load_dotenv()
PWD = os.getenv("PWD")
db_name = PWD + "\\database" + "\\RVNUSDT.db"
import sys
sys.path.insert(1, PWD + "\\modules")
from alg_modules.alg_handler import AlgHandler
from plot_modules.candle_plot import CandlePlot
from collections import deque
from paper_trade import PaperTrader
import time
import logging
DEBUG = __debug__
LOG_FILE_NAME = "log_file_name.log"
format = "%(asctime)s [%(levelname)s]: %(message)s"
logger = logging.basicConfig(
filename=LOG_FILE_NAME if not DEBUG else None,
format=format,
encoding="utf-8",
level=logging.INFO,
)
if not DEBUG:
logging.getLogger(logger).addHandler(logging.StreamHandler())
from stop_loss import StopLoss
from trade_strategy import TradeStrategy
from wss_thread import WssThread
from api_modules.open_binance_api import OpenBinanceApi
import pytz
tzdata = pytz.timezone('Europe/Moscow')
# ---
class Trader(object):
'''docstring for Trader'''
def __init__(self, ):
super().__init__()
self.is_stopped = None
self._thread = threading.Thread(target=self.between_callback, args=())
# self._thread = threading.Thread(target=asyncio.run, args=())
self._lock = threading.Lock()
def between_callback(self):
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(self.loop)
self.loop.run_until_complete(self.thread_function(self))
self.loop.close()
async def thread_function(self, *args, **kwargs):
# ====
def compute_timedelta(dt: datetime):
if dt.tzinfo is None:
dt = dt.astimezone()
now = datetime.now(timezone.utc)
return max((dt - now).total_seconds(), 0)
async def sleep_until(when: datetime, result = None):
"""|coro|
Sleep until a specified time.
If the time supplied is in the past this function will yield instantly.
.. versionadded:: 1.3
Parameters
-----------
when: :class:`datetime.datetime`
The timestamp in which to sleep until. If the datetime is naive then
it is assumed to be local time.
result: Any
If provided is returned to the caller when the coroutine completes.
"""
delta = compute_timedelta(when)
return await asyncio.sleep(delta, result)
# ====
server_time = datetime.fromtimestamp(OpenBinanceApi.server_time()/1000)
local_time = datetime.now()
delay = server_time - local_time
# ====
# notifications
toast = ToastNotifier()
static_notification_settings = dict(
title="Algo traid BOT",
duration = 20,
icon_path = "python.ico",
threaded = 1,
)
notify = lambda msg: toast.show_toast(
msg=msg,
**static_notification_settings,
)
msg="Watch out for notifications from here"
async def notification(msg):
if not notify(msg):
await asyncio.sleep(20)
notify(msg)
await notification(msg)
# ====
DATA_AWAIT_TIME = 1 # seconds
SERVER_DELAY = 10 # seconds
INTERVAL_SECONDS = 60 # seconds
# request that data from api
w = WssThread(
url='wss://stream.binance.com:9443/ws/rvnusdt@ticker',
maxlen=10,
)
w.start()
STOP_LOSS_ENABLED=True
STOP_LOSS_THRESHOLD=-1.3
DEQUE_MAX_LENGTH = 200
INTERVAL = '1m'
df = OpenBinanceApi.get_df(
pair = 'RVNUSDT',
interval = INTERVAL,
limit = 1000,
)
# drop last row TODO make assert to not dublicate last row from cycle
df = df[:-1]
stop_loss_trade_flag = False
MA_list = (2, 7, 25, 100)
window = deque(maxlen=200)
for i, row in df.iterrows():
window.append(dict(row.squeeze()))
#initial currency resources
p_trdr = PaperTrader(
main_currency_label='RVN',
secondary_currency_label='USD',
main_currency_amount=100,
secondary_currency_amount=0,
fee=0.1,
)
trade_data = pd.DataFrame(
columns = p_trdr.get_df(timestamp=df.iloc[-1]['Date']).columns.values
)
stop_loss = StopLoss(
STOP_LOSS_THRESHOLD=STOP_LOSS_THRESHOLD,
)
# init alg
alg = AlgHandler(
df=pd.DataFrame([]),
MA_list=MA_list,
)
while not self._stopped:
logging.info('===get new data===')
new_df = OpenBinanceApi.get_df(
pair = 'RVNUSDT',
interval = INTERVAL,
limit = 2,
)
dt = datetime.fromtimestamp(int(new_df.Real_Date[-1:])/1000)
server_time = datetime.fromtimestamp(OpenBinanceApi.server_time()/1000)
logging.debug(f'server time: {server_time} {server_time.minute=}, {dt.minute=}')
# extract function?
if server_time.minute == dt.minute:
logging.debug('+++===success===+++')
window.append(dict(new_df[-2:-1].squeeze()))
df_ = | pd.DataFrame(window) | pandas.DataFrame |
import os
from pathlib import Path
from typing import List, Tuple, Optional, Sequence, Any, Union, Generator
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import penguins as pg
from penguins import dataset as ds # for type annotations
class Experiment:
"""
Generic interface for experiments.
"""
default_margin = (0.5, 0.02) # applicable for 13C experiments
# This is overridden in subclasses.
# use (0.02, 0.02) for 1H experiments
# use (0.4, 0.05) for 15N experiments
def __init__(self,
peaks: List[Tuple[float, float]],
margin: Optional[Tuple[float, float]] = None,
):
self.peaks = peaks
self.margin = margin or self.default_margin
def integrate(self,
dataset: ds.Dataset2D,
) -> np.ndarray:
# Get absolute peak intensities for a given dataset.
return np.array([dataset.integrate(peak=peak,
margin=self.margin,
mode="max")
for peak in self.peaks])
def show_peaks(self, ax=None, **kwargs) -> None:
"""
Draw red crosses corresponding to each peak on an existing Axes
instance. Useful for checking whether the peaks actually line up with
the spectrum.
If 'ax' is not provided, defaults to currently active Axes.
Other kwargs are passed to ax.scatter().
"""
if ax is None:
ax = plt.gca()
scatter_kwargs = {"color": pg.color_palette("bright")[3],
"marker": "+", "zorder": 2}
scatter_kwargs.update(kwargs)
ax.scatter([p[1] for p in self.peaks], [p[0] for p in self.peaks],
**scatter_kwargs)
@property
def df(self) -> pd.DataFrame:
"""
Return a pandas DataFrame containing all the peaks. This DF has
columns "f1" and "f2".
"""
return pd.DataFrame.from_records(self.peaks, columns=("f1", "f2"))
def rel_ints_df(self,
dataset: ds.Dataset2D,
ref_dataset: ds.Dataset2D,
label: str = "",
) -> pd.DataFrame:
"""
Construct a dataframe of relative intensities vs a reference
dataset.
This DataFrame will have columns "f1", "f2", "expt", and "int".
"""
df = | pd.DataFrame() | pandas.DataFrame |
def meanOrderFrequency(path_to_dataset):
"""
Displays the mean order frequency by utilizing the orders table.
:param path_to_dataset: this path should have all the .csv files for the dataset
:type path_to_dataset: str
"""
assert isinstance(path_to_dataset, str)
import pandas as pd
order_file_path = path_to_dataset + '/orders.csv'
orders = pd.read_csv(order_file_path)
print('On an average, people order once every ', orders['days_since_prior_order'].mean(), 'days')
def numOrdersVsDays(path_to_dataset):
"""
Displays the number of orders and how this number varies with change in days since last order.
:param path_to_dataset: this path should have all the .csv files for the dataset
:type path_to_dataset: str
"""
assert isinstance(path_to_dataset, str)
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
order_file_path = path_to_dataset + '/orders.csv'
orders = pd.read_csv(order_file_path)
order_by_date = orders.groupby(by='days_since_prior_order').count()
fig = plt.figure(figsize = [15, 7.5])
ax = fig.add_subplot()
order_by_date['order_id'].plot.bar(color = '0.75')
ax.set_xticklabels(ax.get_xticklabels(), fontsize= 15)
plt.yticks(fontsize=16)
ax.get_xaxis().set_major_formatter(matplotlib.ticker.FuncFormatter(lambda x, p: format(int(x))))
ax.get_yaxis().set_major_formatter(matplotlib.ticker.FuncFormatter(lambda x, p: format(int(x/1000))))
ax.set_xlabel('Days since previous order', fontsize=16)
ax.set_ylabel('Number of orders / 1000', fontsize=16)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.get_children()[7].set_color('0.1')
ax.get_children()[14].set_color('0.1')
ax.get_children()[21].set_color('0.1')
ax.get_children()[30].set_color('0.1')
my_yticks = ax.get_yticks()
plt.yticks([my_yticks[-2]], visible=True)
plt.xticks(rotation = 'horizontal');
def numOrderDaysSizeBubble(path_to_dataset):
"""
Plots a bubble plot in which:
x: Days since Previous Order
y: Number of orders/1000
size: Average Size of order given it was placed on x
:param path_to_dataset: this path should have all the .csv files for the dataset
:type path_to_dataset: str
"""
import numpy as np
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
import seaborn as sns
assert isinstance(path_to_dataset, str)
order_file_path = path_to_dataset + '/orders.csv'
order_product_prior_file_path = path_to_dataset + '/order_products__prior.csv'
orders = pd.read_csv(order_file_path)
order_products_prior = pd.read_csv(order_product_prior_file_path)
order_id_count_products = order_products_prior.groupby(by='order_id').count()
orders_with_count = order_id_count_products.merge(orders, on='order_id')
order_by_date = orders.groupby(by='days_since_prior_order').count()
# take above table and group by days_since_prior_order
df_mean_order_size = orders_with_count.groupby(by='days_since_prior_order').mean()['product_id']
df_mean_order_renamed = df_mean_order_size.rename('average_order_size')
bubble_plot_dataframe = pd.concat([order_by_date['order_id'], df_mean_order_renamed], axis=1)
bubble_plot_dataframe['average_order_size'].index.to_numpy()
fig = plt.figure(figsize=[15,7.5])
ax = fig.add_subplot()
plt.scatter(bubble_plot_dataframe['average_order_size'].index.to_numpy(), bubble_plot_dataframe['order_id'].values, s=((bubble_plot_dataframe['average_order_size'].values/bubble_plot_dataframe['average_order_size'].values.mean())*10)**3.1, alpha=0.5, c = '0.5')
plt.xticks(np.arange(0, 31, 1.0));
ax.xaxis.grid(True)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.set_xlabel('Days since previous order', fontsize=16)
ax.set_ylabel('Number of orders / 1000', fontsize=16)
ax.get_xaxis().set_major_formatter(matplotlib.ticker.FuncFormatter(lambda x, p: format(int(x))))
ax.get_yaxis().set_major_formatter(matplotlib.ticker.FuncFormatter(lambda x, p: format(int(x/1000))))
my_yticks = ax.get_yticks()
plt.yticks([my_yticks[-2], my_yticks[0]], visible=True);
fig = plt.figure(figsize=[10,9])
ax = fig.add_subplot()
plt.scatter(bubble_plot_dataframe['average_order_size'].index.to_numpy()[:8], bubble_plot_dataframe['order_id'].values[:8], s=((bubble_plot_dataframe['average_order_size'].values[:8]/bubble_plot_dataframe['average_order_size'].values.mean())*10)**3.1, alpha=0.5, c = '0.5')
plt.xticks(np.arange(0, 8, 1.0));
ax.xaxis.grid(True)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.set_xlabel('Days since previous order', fontsize=16)
ax.set_ylabel('Number of orders / 1000', fontsize=16)
ax.get_xaxis().set_major_formatter(matplotlib.ticker.FuncFormatter(lambda x, p: format(int(x))))
ax.get_yaxis().set_major_formatter(matplotlib.ticker.FuncFormatter(lambda x, p: format(int(x/1000))))
my_yticks = ax.get_yticks()
plt.yticks([my_yticks[-2], my_yticks[0]], visible=True);
def orderTimeHeatMaps(path_to_dataset):
"""
Plots the distribution of order with respect to hour of day and day of the week.
:param path_to_dataset: this path should have all the .csv files for the dataset
:type path_to_dataset: str
"""
assert isinstance(path_to_dataset, str)
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
order_file_path = path_to_dataset + '/orders.csv'
orders = pd.read_csv(order_file_path)
grouped_data = orders.groupby(["order_dow", "order_hour_of_day"])["order_number"].aggregate("count").reset_index()
grouped_data = grouped_data.pivot('order_dow', 'order_hour_of_day', 'order_number')
grouped_data.index = pd.CategoricalIndex(grouped_data.index, categories=[0,1,2,3,4,5,6])
grouped_data.sort_index(level=0, inplace=True)
plt.figure(figsize=(12,6))
hour_of_day = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12', '13', '14','15','16', '17', '18', '19','20', '21', '22', '23']
dow = [ 'SUN', 'MON', 'TUES', 'WED', 'THUR','FRI','SAT']
ax = sns.heatmap(grouped_data, xticklabels=hour_of_day,yticklabels=dow,cbar_kws={'label': 'Number Of Orders Made/1000'})
cbar = ax.collections[0].colorbar
cbar.set_ticks([0, 10000, 20000, 30000, 40000, 50000])
cbar.set_ticklabels(['0','10.0','20.0','30.0','40.0','50.0'])
ax.figure.axes[-1].yaxis.label.set_size(15)
ax.figure.axes[0].yaxis.label.set_size(15)
ax.figure.axes[0].xaxis.label.set_size(15)
ax.set(xlabel='Hour of Day', ylabel= "Day of the Week")
ax.set_title("Number of orders made by Day of the Week vs Hour of Day", fontsize=15)
plt.show()
grouped_data = orders.groupby(["order_dow", "order_hour_of_day"])["order_number"].aggregate("count").reset_index()
grouped_data = grouped_data.pivot('order_dow', 'order_hour_of_day', 'order_number')
grouped_data.index = pd.CategoricalIndex(grouped_data.index, categories=[0,1,2,3,4,5,6])
grouped_data.sort_index(level=0, inplace=True)
plt.figure(figsize=(12,6))
hour_of_day = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12', '13', '14','15','16', '17', '18', '19','20', '21', '22', '23']
dow = [ 'SUN', 'MON', 'TUES', 'WED', 'THUR','FRI','SAT']
ax = sns.heatmap(np.log(grouped_data), xticklabels=hour_of_day,yticklabels=dow,cbar=False)
cbar = ax.collections[0].colorbar
ax.figure.axes[-1].yaxis.label.set_size(15)
ax.figure.axes[0].yaxis.label.set_size(15)
ax.figure.axes[0].xaxis.label.set_size(15)
ax.set(xlabel='Hour of Day', ylabel= "Day of the Week")
ax.set_title("Number of orders made by Day of the Week vs Hour of Day (Log Scale)", fontsize=15)
plt.show()
def generateWordCloud(path_to_dataset):
"""
Generates word cloud.
:param path_to_dataset: path to dataset
:type path_to_dataset: str
"""
assert isinstance(path_to_dataset, str)
from wordcloud import WordCloud
import pandas as pd
import matplotlib.pyplot as plt
product_path = path_to_dataset + "/products.csv"
aisles_path = path_to_dataset + "/aisles.csv"
departments_path = path_to_dataset + "/departments.csv"
order_product_prior_path = path_to_dataset + "/order_products__prior.csv"
df_products = pd.read_csv(product_path)
df_aisles = pd.read_csv(aisles_path)
df_departments = pd.read_csv(departments_path)
df_order_products_prior = pd.read_csv(order_product_prior_path)
# Merge Prior orders, Product, Aisle and Department
df_order_products_prior_merged = pd.merge(
pd.merge(pd.merge(df_order_products_prior, df_products, on="product_id", how="left"),
df_aisles,
on="aisle_id",
how="left"),
df_departments,
on="department_id",
how="left")
# Top N products by frequency
top_products = df_order_products_prior_merged["product_name"].value_counts()
d = top_products.to_dict()
wordcloud = WordCloud(background_color='white')
wordcloud.generate_from_frequencies(frequencies=d)
plt.figure(figsize = (8,8))
plt.imshow(wordcloud, interpolation="bilinear")
plt.axis("off")
plt.show()
def no_of_orders(path_to_data = './instacart-market-basket-analysis'):
"""
pass path to orders.csv
"""
bins = 10
path = path_to_data + '/orders.csv'
import numpy as np # linear algebra
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
import seaborn as sns
from scipy.optimize import curve_fit
from IPython.display import display, HTML
orders = pd.read_csv(path)
sns.set_style('dark')
customer_no = orders.groupby("user_id", as_index = False)["order_number"].max()
n, bins, patches = plt.hist(customer_no["order_number"] , bins, color='blue', alpha=0.5)
plt.xlabel("No. of Orders")
plt.ylabel("Count")
plt.title("Number of Orders per Customer")
def freq_product(path1 = "./instacart-market-basket-analysis/order_products__train.csv",path2 = "./instacart-market-basket-analysis/order_products__prior.csv" , path3 = "./instacart-market-basket-analysis/products.csv"):
import numpy as np # linear algebra
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
import seaborn as sns
from scipy.optimize import curve_fit
from IPython.display import display, HTML
order_products_train = | pd.read_csv(path1) | pandas.read_csv |
#--------------------------------------------------------------
# By <NAME>
# Painted Harmony Group, Inc
# June 22, 2017
# Please See LICENSE.txt
#--------------------------------------------------------------
import pandas as pd
import dateparser
import datetime
class TrumpTweetUtilities():
def count_rows_group_by_date(self, dataframe, date_col):
#df = pd.DataFrame(dataframe[date_col].astype("datetime64"))
counts = dataframe.groupby(dataframe[date_col].dt.date).count()
dateArray = []
countArray = []
for index, row in counts.iterrows():
dateArray.append(index)
countArray.append(row[-1])
newdf = | pd.DataFrame(index=dateArray, data=countArray) | pandas.DataFrame |
from flask import Blueprint, jsonify, render_template
from requests import get
import pandas as pd
# Create a blueprint for the REST API
rest_bp = Blueprint(
'api',
__name__,
template_folder="templates",
static_folder="static",
)
"""
API endpoints for world data
"""
# All available data, for all countries
world_summary = get("https://api.covid19api.com/summary").json()
# Route for API docs page
@rest_bp.route('/api/docs')
def api_docs():
return render_template("apidocs.html", title="API Documentation")
@rest_bp.route('/api/world/summary/', methods=["GET"])
def world_api_summary():
df = pd.DataFrame(world_summary['Countries']).drop(["Premium"], axis=1)
# Show totals for all columns
total = df.sum(axis=0)
# Convert the DataFrame to a dictionary
df_dict = df.to_dict(orient='records')
return jsonify(df_dict)
# Get historic data for a country
@rest_bp.route('/api/world/<string:country>/')
def country_api_history(country):
# Define API endpoint, and fetch data
endpoint = get(f'https://api.covid19api.com/total/country/{country}')
data = endpoint.json()
df = pd.DataFrame(data).sort_values(by="Date", ascending=False)
df_dict = df.to_dict(orient="records")
return jsonify(df_dict)
@rest_bp.route('/api/world/percentages/')
def world_api_percentages():
df = pd.DataFrame(world_summary["Countries"])
names = df["Country"]
cases_percentages = round(df['TotalConfirmed'].div(
world_summary['Global']['TotalConfirmed']), 2)
deaths_percentages = round(df['TotalDeaths'].div(
world_summary['Global']['TotalDeaths']), 2)
recoveries_percentages = round(df['TotalRecovered'].div(
world_summary['Global']['TotalRecovered']), 2)
new_cases = round(df['NewConfirmed'].div(
world_summary['Global']['NewConfirmed']), 2)
new_deaths = round(df['NewDeaths'].div(
world_summary['Global']['NewDeaths']), 2)
new_recoveries = round(df['NewRecovered'].div(
world_summary['Global']['NewRecovered']), 2)
df_list = [names, cases_percentages, deaths_percentages,
recoveries_percentages, new_cases, new_deaths, new_recoveries]
merged_df = pd.concat(df_list, axis=1)
merged_df_dict = merged_df.to_dict(orient='records')
return jsonify(merged_df_dict)
@rest_bp.route('/api/world/demographic/')
def world_api_demographic():
data = get("https://covid.ourworldindata.org/data/owid-covid-data.json").json()
df = | pd.DataFrame(data) | pandas.DataFrame |
import numpy as np
import pandas as pd
import glob
import os
import re
import pickle
from tqdm import tqdm
from sklearn.metrics import r2_score
from numba_functions import *
# Data loaders
def load_trade_by_id(stock_id):
parquet_path = glob.glob(f'./dataset/trade_train.parquet/stock_id={stock_id}/*')[0]
df = pd.read_parquet(parquet_path)
return df
def load_book_by_id(stock_id):
parquet_path = glob.glob(f'./dataset/book_train.parquet/stock_id={stock_id}/*')[0]
df = pd.read_parquet(parquet_path)
return df
def load_train_by_id(stock_id):
df = pd.read_csv(f'./dataset/train_/stock_id_{stock_id}.csv')
return df
def get_path_by_id(type, stock_id):
if type in ['book', 'trade']:
return glob.glob(f'./dataset/{type}_train.parquet/stock_id={stock_id}/*')[0]
else:
print(f'Invalid type: {type}')
return None
def load_trade():
df_train = pd.read_csv('./dataset/train.csv')
stock_ids = df_train.stock_id.unique().tolist()
df_list = []
for stock_id in tqdm(stock_ids):
parquet_path = glob.glob(f'./dataset/trade_train.parquet/stock_id={stock_id}/*')[0]
df = pd.read_parquet(parquet_path)
df['stock_id'] = stock_id
df_list.append(df)
df_trade = pd.concat(df_list, ignore_index=True)
return df_trade
def load_book():
df_book = | pd.read_csv('./dataset/train.csv') | pandas.read_csv |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn
import math
from sklearn import metrics
from collections import Iterable
from sklearn.cluster import KMeans
from scipy import optimize as sco
import datetime as dt
import vnpy.analyze.data.data_prepare as dp
try:
from data_provider.nestlib.progress_bar import ProgressBar
is_online = True
def progress_map(func, iterator):
bar = ProgressBar(max_value=len(iterator))
bar.start()
wrapped_iterator = map(func, iterator)
results = []
for i, _ in enumerate(wrapped_iterator):
results.append(_)
bar.update(i + 1)
return results
except ImportError:
is_online = False
from tqdm import tqdm
def progress_map(func, iterator, desc=None):
with tqdm(iterator, desc=desc, ncols=100) as bar:
results = []
for i in bar:
bar.set_postfix_str(str(i))
results.append(func(i))
return results
class StraightLine():
def __init__(self, x1=None, y1=None, x2=None, y2=None, slope=None):
if slope is not None:
self.slope = slope
else:
if x1 == x2:
self.slope = np.nan
else:
self.slope = (y2 - y1) / (x2 - x1)
self.intercept = y1 - self.slope * x1
def point_distance(self, x0, y0):
return abs(self.slope * x0 - y0 + self.intercept) / math.sqrt(self.slope ** 2 + 1)
def is_point_above_line(self, x0, y0):
pred_y = x0 * self.slope + self.intercept
if pred_y == y0:
print('直线 y = {self.slope}x + {self.intercept} 穿过点({x0}, {y0})')
import ipdb;
ipdb.set_trace()
return y0 > pred_y
def predict(self, x_list, limit=None):
if not isinstance(x_list, Iterable):
x_list = [x_list]
results = [self.slope * _ + self.intercept for _ in x_list]
if len(results) == 1:
return results[0]
if limit is not None:
results = [
_ if _ > min(limit) and _ < max(limit) else np.nan
for _ in results
]
return results
def clustering_kmeans(num_list, thresh=0.03):
# 阻力位或者支撑位序列从1-序列个数开始聚类
k_rng = range(1, len(num_list) + 1)
est_arr = [
KMeans(n_clusters=k).fit([[num] for num in num_list])
for k in k_rng
]
# 各个分类器的距离和
sum_squares = [e.inertia_ for e in est_arr]
# 相对于1个类的分类器的距离和的比例
diff_squares = [squares / sum_squares[0] for squares in sum_squares]
diff_squares_pd = pd.Series(diff_squares)
# 根据阈值设置选择分类器
thresh_pd = diff_squares_pd[diff_squares_pd < thresh]
if len(thresh_pd) > 0:
select_k = thresh_pd.index[0] + 1
else:
# 没有符合的,就用最多的分类器
select_k = k_rng[-1]
est = est_arr[select_k - 1]
results = est.predict([[num] for num in num_list])
return results
class SupportResistanceLine():
def __init__(self, data, kind='support'):
if not isinstance(data, pd.Series):
raise TypeError('data必须为pd.Series格式')
self.y = data.copy()
self.x = np.arange(0, len(data))
df = pd.DataFrame(columns=['x', 'y'])
df.x = self.x
df.y = self.y
df.set_index(['x'], inplace=True)
df.index.name = None # 去掉索引列名
self.df = df
self.kind = kind
self.dot_color = 'g' if kind == 'support' else 'r'
def find_best_poly(self, poly_min=1, poly_max=100, show=False):
"""
寻找最佳拟合次数
:param poly_min: 最小拟合次数
:param poly_max: 最大拟合次数
:param show: 是否展示
:return:
"""
df = self.df
rolling_window = int(len(self.y) / 30)
df['y_roll_mean'] = df['y'].rolling(rolling_window, min_periods=1).mean()
# 度量原始y值和均线y_roll_mean的距离distance_mean
distance_mean = np.sqrt(metrics.mean_squared_error(df.y, df.y_roll_mean))
poly = poly_min
while poly < poly_max:
# 迭代计算1-100poly次regress_xy_polynomial的拟合曲线y_fit
p = np.polynomial.Chebyshev.fit(self.x, self.y, poly)
y_fit = p(self.x)
distance_fit = np.sqrt(metrics.mean_squared_error(df.y, y_fit))
# 使用metrics_func方法度量原始y值和拟合回归的趋势曲线y_fit的距离distance_fit
if distance_fit <= distance_mean * 0.6:
# 如果distance_fit <= distance_mean* 0.6即代表拟合曲线可以比较完美的代表原始曲线y的走势,停止迭代
df[f'poly_{poly}'] = y_fit
break
poly += 1
self.best_poly = poly
self.p = p
self.df['best_poly'] = y_fit
if show:
fig, ax = plt.subplots(1, figsize=(16, 9))
df.plot(ax=ax, figsize=(16, 9), colormap='coolwarm')
plt.show()
def find_extreme_pos(self, show=False):
"""寻找极值点"""
p = self.p
# 求导函数的根
extreme_pos = [int(round(_.real)) for _ in p.deriv().roots()]
extreme_pos = [_ for _ in extreme_pos if _ > 0 and _ < len(self.df)]
# 通过二阶导数分拣极大值和极小值
second_deriv = p.deriv(2)
min_extreme_pos = []
max_extreme_pos = []
for pos in extreme_pos:
if second_deriv(pos) > 0:
min_extreme_pos.append(pos)
elif second_deriv(pos) < 0:
max_extreme_pos.append(pos)
self.min_extreme_pos = min_extreme_pos
self.max_extreme_pos = max_extreme_pos
if show:
fig, ax = plt.subplots(1, figsize=(16, 9))
self.df.plot(ax=ax)
ax.scatter(self.min_extreme_pos, [p(_) for _ in self.min_extreme_pos], s=50, c='g')
ax.scatter(self.max_extreme_pos, [p(_) for _ in self.max_extreme_pos], s=50, c='r')
plt.show()
# 拟合极值点附近的真实极值
def find_real_extreme_points(self, show=False):
# 寻找一个支撑点两边最近的压力点,或反之
def find_left_and_right_pos(pos, refer_pos):
refer_sr = pd.Series(refer_pos)
left_pos = refer_sr[refer_sr < pos].iloc[-1] if len(refer_sr[refer_sr < pos]) > 0 else 0
right_pos = refer_sr[refer_sr > pos].iloc[0] if len(refer_sr[refer_sr > pos]) > 0 else len(self.df)
return left_pos, right_pos
# 寻找一个拟合极值点附近的真实极值
def extreme_around(left_pos, right_pos):
if self.kind == 'support':
extreme_around_pos = self.y.iloc[left_pos:right_pos].idxmin()
elif self.kind == 'resistance':
extreme_around_pos = self.y.iloc[left_pos:right_pos].idxmax()
# 如果附近的小值在边缘上,该点附近区间单调性较强,属于假极值,抛弃
if extreme_around_pos in (left_pos, right_pos):
return 0
return extreme_around_pos
extreme_pos = self.min_extreme_pos
refer_pos = self.max_extreme_pos
if self.kind == 'resistance':
extreme_pos, refer_pos = refer_pos, extreme_pos
support_resistance_pos = []
for index, pos in enumerate(extreme_pos):
if pos in [0, len(self.df)]:
continue
left_pos, right_pos = find_left_and_right_pos(pos, refer_pos)
support_resistance_pos.append(
extreme_around(left_pos, right_pos)
)
if 0 in support_resistance_pos:
support_resistance_pos.remove(0)
# 去重
support_resistance_pos = list(set(support_resistance_pos))
support_resistance_sr = pd.Series(
self.df.y.loc[support_resistance_pos],
index=support_resistance_pos
).sort_index()
support_resistance_sr.index.name = 'x'
support_resistance_df = support_resistance_sr.reset_index()
self.support_resistance_df = support_resistance_df
if show:
self.show_line(support_resistance_df)
return self.support_resistance_df
def cluster_nearest_support_resistance_pos(self, show=False, inplace=True):
def clustering_nearest(num_list, thresh=len(self.df) / 80):
sr = pd.Series(num_list).sort_values().reset_index(drop=True)
while sr.diff().min() < thresh:
index1 = sr.diff().idxmin()
index2 = index1 - 1
num1 = sr[index1]
num2 = sr[index2]
y1 = self.df['y'].iloc[num1]
y2 = self.df['y'].iloc[num2]
smaller_y_index = index1 if y1 < y2 else index2
bigger_y_index = index1 if y1 > y2 else index2
sr = sr.drop(bigger_y_index if self.kind == 'support' else smaller_y_index).reset_index(drop=True)
return sr.tolist()
clustered_pos = clustering_nearest(self.support_resistance_df['x'].tolist())
support_resistance_df = self.support_resistance_df[self.support_resistance_df['x'].isin(clustered_pos)].copy()
if show:
self.show_line(support_resistance_df)
if inplace:
self.support_resistance_df = support_resistance_df
return support_resistance_df
def cluster_kmeans_support_resistance_pos(self, show=False, inplace=True):
# 聚类
support_resistance_df = self.support_resistance_df.copy()
support_resistance_df['cluster'] = clustering_kmeans(support_resistance_df.x, 0.001)
print(
f"共{len(support_resistance_df)}个极值点,聚类为{support_resistance_df['cluster'].max() + 1}个类"
)
def extreme_in_cluster(cluster_df):
if self.kind == 'support':
cluster_df['is_extreme'] = cluster_df['y'] == cluster_df['y'].min()
else:
cluster_df['is_extreme'] = cluster_df['y'] == cluster_df['y'].max()
return cluster_df
# 只保留每个类的最小值
support_resistance_df = support_resistance_df.groupby('cluster').apply(extreme_in_cluster)
support_resistance_df = support_resistance_df[support_resistance_df['is_extreme']].drop('is_extreme', axis=1)
if show:
self.show_line(support_resistance_df)
if inplace:
self.support_resistance_df = support_resistance_df
return support_resistance_df
def score_lines_from_a_point(self, last_support_resistance_pos):
# 只考虑该点之前的点
support_resistance_df = self.support_resistance_df[
(self.support_resistance_df['x'] <= last_support_resistance_pos['x'])
# & (self.support_resistance_df['x'] >= len(self.df) * 0.25)
].copy()
if len(support_resistance_df) <= 2:
return | pd.DataFrame() | pandas.DataFrame |
import os
from os.path import join
import time
import pandas as pd
import numpy as np
import pickle
import warnings
# from matplotlib import pyplot as plt
import argparse
import logging
import csv
import sys
import ujson as ujson
from bs4 import BeautifulSoup
import re
from nltk.corpus import stopwords
from nltk.stem.porter import PorterStemmer
import nltk.data
from gensim.models import Word2Vec, KeyedVectors, LdaMulticore
from sklearn.cluster import KMeans
from sklearn.ensemble import RandomForestClassifier
from sklearn.feature_extraction.text import CountVectorizer
import scipy.spatial.distance as dist
warnings.simplefilter("ignore")
output_dir = 'output'
save_dir = 'TC_saveFolder'
input_file = ''
load = True
save = True
verbose = False
np.random.seed(0)
# Word2Vec Parameters
remove_stopwords = True
stemming = True
pretrained = False
num_features = 300 # Word vector dimensionality
min_word_count = 50 # Minimum word count
num_workers = 4 # Number of threads to run in parallel
context = 5 # Context window size
downsampling = 1e-4 # Downsample setting for frequent words
# LDA Parameters
num_topics = 100
workers = 3
# KMeans Parameters
n_clusters = 100
n_init = 10
# Relevancy Parameters
rel_threshold = 1
def parse_arguments():
parser = argparse.ArgumentParser(description="Parameters specify where/how files are saved. \n"
"This program takes as input a csv,tsv, or json file and outputs stuff.")
parser.add_argument('input_file', type=str, default=input_file)
parser.add_argument('output_dir', type=str, default=output_dir)
parser.add_argument('--save', '-s', action='store_true', default=True,
help='saves intermediary files to save_dir')
parser.add_argument('--load', '-l', action='store_true', default=True,
help='loads intermediary files automatically if available')
parser.add_argument('--fresh', '-f', action='store_true', default=False,
help='sets loading and saving to false, overwriting existing files')
parser.add_argument('--verbose', '-v', action='store_true', default=False,
help='prints program progress')
parser.add_argument('--save_dir', type=str, default=join(output_dir, save_dir))
args = parser.parse_args()
if args.fresh:
args.save = False
args.load = False
return args
def mkdir(dirPath):
if not os.path.exists(dirPath):
os.makedirs(dirPath)
else:
print("WARNING: Directory {} already exists. Data may be overwritten if 'load' option is disabled.".format(
dirPath), flush=True)
if not load:
print("You have 3 seconds to terminate program...", flush=True)
time.sleep(3)
def text_to_wordlist(text, remove_stopwords=remove_stopwords, stemming=stemming):
# Function to convert a document to a sequence of words,
# optionally removing stop words. Returns a list of words.
#
# 1. Remove HTML
review_text = BeautifulSoup(text).get_text()
#
# 2. Remove non-letters
review_text = re.sub("[^a-zA-Z]", " ", review_text)
#
# 3. Convert words to lower case and split them
words = review_text.lower().split()
#
# 4. Optionally remove stop words (false by default)
if remove_stopwords:
stops = set(stopwords.words("english"))
words = [w for w in words if not w in stops]
#
# 5. Optionally stem topically similar words
if stemming:
p_stemmer = PorterStemmer()
for i in range(len(words)):
try: words[i] = p_stemmer.stem(words[i])
except: pass
return [words]
def text_to_sentences(text, tokenizer, remove_stopwords=remove_stopwords, stemming=stemming):
# Function to split a review into parsed sentences. Returns a
# list of sentences, where each sentence is a list of words
#
# 1. Use the NLTK tokenizer to split the paragraph into sentences
# raw_sentences = tokenizer.tokenize(review.decode('utf-8').strip())
raw_sentences = tokenizer.tokenize(str(text).strip())
#
# 2. Loop over each sentence
sentences = []
for raw_sentence in raw_sentences:
# If a sentence is empty, skip it
if len(raw_sentence) > 0:
# Otherwise, call review_to_wordlist to get a list of words
sentences += text_to_wordlist(raw_sentence, remove_stopwords=remove_stopwords, stemming=stemming)
#
# Return the list of sentences (each sentence is a list of words,
# so this returns a list of lists
return sentences
def wordlists_to_words(wordlists, saveAs='', stemming=stemming):
if load:
if os.path.exists(saveAs):
if verbose: newprint("Loaded corpus.")
# return pickle.load(open(saveAs, 'rb'))
words = []
for wordlist in wordlists:
for word in wordlist:
words.append(word)
words = set(words)
if stemming:
p_stemmer = PorterStemmer()
for i in range(len(words)):
try:
words[i] = p_stemmer.stem(words[i])
except:
pass
words = set(words)
if saveAs != '':
pickle.dump(words, open(saveAs, 'wb'))
return words
def newprint(string):
print(string, flush=True)
sys.stdout.flush()
def train_Word2Vec(sentences, saveAs=''):
model = Word2Vec(sentences, workers=num_workers, \
size=num_features, min_count=min_word_count, \
window=context, sample=downsampling, seed=1, iter=10)
# If you don't plan to train the model any further, calling
# init_sims will make the model much more memory-efficient.
model.init_sims(replace=True)
# It can be helpful to create a meaningful model name and
# save the model for later use. You can load it later using Word2Vec.load()
if save:
model.save(join(output_dir, save_dir, 'w2v_Model.w2v'))
dictionary = KeyedVector_to_Dict(model.wv, saveAs=saveAs)
return dictionary
def KeyedVector_to_Dict(kv, saveAs=''):
words = set(kv.index2word)
dict = {}
for word in words:
dict[word] = kv[word]
if saveAs != '' and save:
pickle.dump(dict, open(saveAs, 'wb'))
return dict
def load_GoogleWords(words, saveAs=''):
firstStart = time.time()
if verbose: newprint("Loading Google Word2Vec...")
start = time.time()
model = KeyedVectors.load_word2vec_format('GoogleNews-vectors-negative300.bin', binary=True)
if verbose: newprint("Finished loading in {} seconds. Generating Dictionary...".format(time.time() - start))
start = time.time()
dictionary = {}
missing_words = 0
for word in words:
try:
dictionary[word] = model[word]
except KeyError:
missing_words += 1
# print("Word {} not in Google's Word2Vec dictionary.".format(word))
if verbose: newprint(
"Finished generating dict in {} seconds.\n {} words were not found in Word2Vec dictionary. This usually includes people names and typos.".format(
time.time() - start, missing_words))
if saveAs != '' and save:
if verbose: newprint("Saving Model...")
pickle.dump(dictionary, open(saveAs, 'wb'))
if verbose: newprint("Finished loading words from Google Word2Vec in {} seconds.".format(time.time() - firstStart))
return dictionary
def Dict_to_Matrix(dict, saveAs=''):
items = sorted(dict.items())
N = len(items)
M = len(items[0][1])
matrix = np.zeros((N, M))
for i in range(N):
matrix[i, :] = items[i][1]
if saveAs != '':
pickle.dump(matrix, open(saveAs, 'wb'))
return matrix
def sort_clusters(clusters, wv, centroids):
sorted_clusters = []
for i in range(len(clusters)):
distances = []
for word in clusters[i]:
distances += [dist.euclidean(wv[word], centroids[i])]
sorted_clusters.append([words for (dists, words) in sorted(zip(distances, clusters[i]))])
return sorted_clusters
def save_Cluster(cluster_list, saveAs):
with open(saveAs, 'w') as f:
writer = csv.writer(f)
writer.writerows(cluster_list)
def get_bagOfCentroids(reviews, word_centroid_map):
num_reviews = len(reviews)
num_centroids = max(word_centroid_map.values()) + 1
bag_matrix = np.zeros((num_reviews, num_centroids), dtype='float32')
for i in range(num_reviews):
for word in reviews[i]:
if word in word_centroid_map:
index = word_centroid_map[word]
bag_matrix[i, index] += 1
return bag_matrix
def load_file(filePath):
# file = pd.read_csv()
fileType = filePath.split(".")[-1]
# TWITTER
if fileType == 'csv': data = pd.read_csv(filePath, header=0, delimiter=",", quoting=3)
dictionary = dict(zip(data['id'], data['review']))
return dictionary
def load_file2(filePath):
# file = pd.read_csv()
fname, ext = os.path.splitext(filePath)
dictionary = {}
if ext == '.json':
data = ujson.loads(open(filePath).read())
for d1 in data:
sid = d1.get('SubmissionID')
dictionary[sid] = d1.get('SubmissionTitle')
com = d1.get("Comments")
for d2 in com:
cid = d2.get("CommentID")
dictionary[cid] = d2.get('CommentText')
elif ext == '.csv' or ext == '.tsv':
data = pd.read_csv(filePath, header=0, delimiter=",", quoting=3, encoding='latin1')
for row in data.itertuples():
if (not (pd.isnull(row.id) or | pd.isnull(row.text) | pandas.isnull |
# Based on https://www.kaggle.com/tunguz/logistic-regression-with-words-and-char-n-grams/
import os
import sys
import pprint
import logging
from collections import defaultdict
from datetime import datetime
from unidecode import unidecode
import numpy as np
from numpy.random import RandomState
import pandas as pd
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import LogisticRegression
import joblib
import common
import base
logger = logging.getLogger(__name__)
class NGram(base.BaseModel):
def main(self):
t_start = datetime.now()
logger.info(' {} / {} '.format(self.name, self.random_seed).center(62, '='))
logger.info('Hyperparameters:\n{}'.format(pprint.pformat(self.params)))
if os.path.isfile(os.path.join(self.output_dir, 'test.csv')):
logger.info('Output already exists - skipping')
return
# Initialize the random number generator
self.random_state = RandomState(self.random_seed)
np.random.seed(int.from_bytes(self.random_state.bytes(4), byteorder=sys.byteorder))
train_df = common.load_data('train')
train_df['comment_text'] = train_df['comment_text'].apply(unidecode)
test_df = common.load_data('test')
test_df['comment_text'] = test_df['comment_text'].apply(unidecode)
vectorizer = self.build_vectorizer(train_df, test_df)
folds = common.stratified_kfold(train_df, random_seed=self.random_seed)
for fold_num, train_ids, val_ids in folds:
logger.info(f'Fold #{fold_num}')
fold_train_df = train_df[train_df['id'].isin(train_ids)]
fold_val_df = train_df[train_df['id'].isin(val_ids)]
models = self.train(fold_num, vectorizer, fold_train_df, fold_val_df)
logger.info('Generating the out-of-fold predictions')
path = os.path.join(self.output_dir, f'fold{fold_num}_validation.csv')
self.predict(models, vectorizer, fold_val_df, path)
logger.info('Generating the test predictions')
path = os.path.join(self.output_dir, f'fold{fold_num}_test.csv')
self.predict(models, vectorizer, test_df, path)
logger.info('Combining the out-of-fold predictions')
df_parts = []
for fold_num in range(1, 11):
path = os.path.join(self.output_dir, f'fold{fold_num}_validation.csv')
df_part = pd.read_csv(path, usecols=['id'] + common.LABELS)
df_parts.append(df_part)
train_pred = pd.concat(df_parts)
path = os.path.join(self.output_dir, 'train.csv')
train_pred.to_csv(path, index=False)
logger.info('Averaging the test predictions')
df_parts = []
for fold_num in range(1, 11):
path = os.path.join(self.output_dir, f'fold{fold_num}_test.csv')
df_part = | pd.read_csv(path, usecols=['id'] + common.LABELS) | pandas.read_csv |
import pytest
import pytz
import dateutil
import numpy as np
from datetime import datetime
from dateutil.tz import tzlocal
import pandas as pd
import pandas.util.testing as tm
from pandas import (DatetimeIndex, date_range, Series, NaT, Index, Timestamp,
Int64Index, Period)
class TestDatetimeIndex(object):
def test_astype(self):
# GH 13149, GH 13209
idx = DatetimeIndex(['2016-05-16', 'NaT', NaT, np.NaN])
result = idx.astype(object)
expected = Index([Timestamp('2016-05-16')] + [NaT] * 3, dtype=object)
tm.assert_index_equal(result, expected)
result = idx.astype(int)
expected = Int64Index([1463356800000000000] +
[-9223372036854775808] * 3, dtype=np.int64)
tm.assert_index_equal(result, expected)
rng = date_range('1/1/2000', periods=10)
result = rng.astype('i8')
tm.assert_index_equal(result, Index(rng.asi8))
tm.assert_numpy_array_equal(result.values, rng.asi8)
def test_astype_with_tz(self):
# with tz
rng = date_range('1/1/2000', periods=10, tz='US/Eastern')
result = rng.astype('datetime64[ns]')
expected = (date_range('1/1/2000', periods=10,
tz='US/Eastern')
.tz_convert('UTC').tz_localize(None))
tm.assert_index_equal(result, expected)
# BUG#10442 : testing astype(str) is correct for Series/DatetimeIndex
result = pd.Series(pd.date_range('2012-01-01', periods=3)).astype(str)
expected = pd.Series(
['2012-01-01', '2012-01-02', '2012-01-03'], dtype=object)
tm.assert_series_equal(result, expected)
result = Series(pd.date_range('2012-01-01', periods=3,
tz='US/Eastern')).astype(str)
expected = Series(['2012-01-01 00:00:00-05:00',
'2012-01-02 00:00:00-05:00',
'2012-01-03 00:00:00-05:00'],
dtype=object)
| tm.assert_series_equal(result, expected) | pandas.util.testing.assert_series_equal |
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
#For User 1
User_1 = pd.read_csv('acceleration_labelled_data.csv')
User_1 = pd.DataFrame(User_1.iloc[:, 1:6].values)
User_1.columns = ["Activity", "Timeframe", "X axis", "Y axis", "Z axis"]
User_1["Timeframe"] = User_1["Timeframe"] - 0.017856
"""Export_csv = User_1.to_csv(r'/Users/talhajamal/Documents/Year 3/Individual Project/Files for project/User_1.csv')
"""
#For User 2
User_2 = pd.read_csv('acceleration.csv')
User_2 = pd.DataFrame(User_2.iloc[:, 0:4].values)
User_2.columns = ["Timeframe", "X axis", "Y axis", "Z axis"]
User_2.insert(0, "Activity", "", True)
User_2_annotations = pd.read_csv('annotations_0.csv')
#adding timedifference column
User_2.insert(5, "Timedifference", "", True)
User_2 = User_2.to_numpy()
for i in range(1, 33442):
User_2[i][5] = User_2[i][1] - User_2[i-1][1]
User_2 = pd.DataFrame(User_2)
User_2.columns = ["Activity","Timeframe", "X axis", "Y axis", "Z axis", "Timedifference"]
User_2 = User_2.to_numpy()
User_2_annotations = User_2_annotations.to_numpy()
for i in range(0, 337):
for j in range(0, 33442):
if (User_2[j][1] > User_2_annotations[i][0]) and (User_2[j][1] < User_2_annotations[i][1]):
User_2[j][0] = User_2_annotations[i][2]
User_2 = pd.DataFrame(User_2)
User_2.columns = ["Activity","Timeframe", "X axis", "Y axis", "Z axis", "Timedifference"]
#dropping empty dataframes at start and end
User_2 = User_2.iloc[446:32897,]
#exporting file
Export_User2_csv = User_2.to_csv(r'/Users/talhajamal/Documents/Year 3/Individual Project/Files for project/User_2.csv')
#For User 3
User_3 = pd.read_csv('acceleration.csv')
User_3 = pd.DataFrame(User_3.iloc[:, 0:4].values)
User_3.columns = ["Timeframe", "X axis", "Y axis", "Z axis"]
User_3.insert(0, "Activity", "", True)
User_3_annotations = pd.read_csv('annotations_0.csv')
#adding timedifference column
User_3.insert(5, "Timedifference", "", True)
User_3 = User_3.to_numpy()
for i in range(1, len(User_3)):
User_3[i][5] = User_3[i][1] - User_3[i-1][1]
User_3 = pd.DataFrame(User_3)
User_3.columns = ["Activity","Timeframe", "X axis", "Y axis", "Z axis", "Timedifference"]
User_3 = User_3.to_numpy()
User_3_annotations = User_3_annotations.to_numpy()
for i in range(0, len(User_3_annotations)):
for j in range(0, len(User_3)):
if (User_3[j][1] > User_3_annotations[i][0]) and (User_3[j][1] < User_3_annotations[i][1]):
User_3[j][0] = User_3_annotations[i][2]
User_3 = pd.DataFrame(User_3)
User_3.columns = ["Activity","Timeframe", "X axis", "Y axis", "Z axis", "Timedifference"]
User_3_annotations = pd.DataFrame(User_3_annotations)
User_3_annotations.columns = ["Start", "End", "Activity", "Type"]
#dropping empty dataframes at start and end
User_3 = User_3.iloc[1604:31228,]
#exporting file
Export_User3_csv = User_3.to_csv(r'/Users/talhajamal/Documents/Year 3/Individual Project/Files for project/Final Training Dataset/User_3.csv')
#For User 4
User_4 = pd.read_csv('acceleration.csv')
User_4 = pd.DataFrame(User_4.iloc[:, 0:4].values)
User_4.columns = ["Timeframe", "X axis", "Y axis", "Z axis"]
User_4.insert(0, "Activity", "", True)
User_4_annotations = pd.read_csv('annotations_0.csv')
#adding timedifference column
User_4.insert(5, "Timedifference", "", True)
User_4 = User_4.to_numpy()
for i in range(1, len(User_4)):
User_4[i][5] = User_4[i][1] - User_4[i-1][1]
User_4 = pd.DataFrame(User_4)
User_4.columns = ["Activity","Timeframe", "X axis", "Y axis", "Z axis", "Timedifference"]
User_4 = User_4.to_numpy()
User_4_annotations = User_4_annotations.to_numpy()
for i in range(0, len(User_4_annotations)):
for j in range(0, len(User_4)):
if (User_4[j][1] > User_4_annotations[i][0]) and (User_4[j][1] < User_4_annotations[i][1]):
User_4[j][0] = User_4_annotations[i][2]
User_4 = pd.DataFrame(User_4)
User_4.columns = ["Activity","Timeframe", "X axis", "Y axis", "Z axis", "Timedifference"]
User_4_annotations = pd.DataFrame(User_4_annotations)
User_4_annotations.columns = ["Start", "End", "Activity", "index"]
#dropping empty dataframes at start and end
User_4 = User_4.iloc[562:30679,]
#exporting file
Export_User4_csv = User_4.to_csv(r'/Users/talhajamal/Documents/Year 3/Individual Project/Files for project/Final Training Dataset/User_4.csv')
#For User 5
User_5 = pd.read_csv('acceleration.csv')
User_5 = pd.DataFrame(User_5.iloc[:, 0:4].values)
User_5.columns = ["Timeframe", "X axis", "Y axis", "Z axis"]
User_5.insert(0, "Activity", "", True)
User_5_annotations = pd.read_csv('annotations_0.csv')
#adding timedifference column
User_5.insert(5, "Timedifference", "", True)
User_5 = User_5.to_numpy()
for i in range(1, len(User_5)):
User_5[i][5] = User_5[i][1] - User_5[i-1][1]
User_5 = pd.DataFrame(User_5)
User_5.columns = ["Activity","Timeframe", "X axis", "Y axis", "Z axis", "Timedifference"]
User_5 = User_5.to_numpy()
User_5_annotations = User_5_annotations.to_numpy()
for i in range(0, len(User_5_annotations)):
for j in range(0, len(User_5)):
if (User_5[j][1] > User_5_annotations[i][0]) and (User_5[j][1] < User_5_annotations[i][1]):
User_5[j][0] = User_5_annotations[i][2]
User_5 = pd.DataFrame(User_5)
User_5.columns = ["Activity","Timeframe", "X axis", "Y axis", "Z axis", "Timedifference"]
User_5_annotations = pd.DataFrame(User_5_annotations)
User_5_annotations.columns = ["Start", "End", "Activity", "index"]
#dropping empty dataframes at start and end
User_5 = User_5.iloc[950:30633,]
#exporting file
Export_User5_csv = User_5.to_csv(r'/Users/talhajamal/Documents/Year 3/Individual Project/Files for project/Final Training Dataset/User_5.csv')
#For User 6
User_6 = pd.read_csv('acceleration.csv')
User_6 = pd.DataFrame(User_6.iloc[:, 0:4].values)
User_6.columns = ["Timeframe", "X axis", "Y axis", "Z axis"]
User_6.insert(0, "Activity", "", True)
User_6_annotations = pd.read_csv('annotations_0.csv')
#adding timedifference column
User_6.insert(5, "Timedifference", "", True)
User_6 = User_6.to_numpy()
for i in range(1, len(User_6)):
User_6[i][5] = User_6[i][1] - User_6[i-1][1]
User_6 = pd.DataFrame(User_6)
User_6.columns = ["Activity","Timeframe", "X axis", "Y axis", "Z axis", "Timedifference"]
User_6 = User_6.to_numpy()
User_6_annotations = User_6_annotations.to_numpy()
for i in range(0, len(User_6_annotations)):
for j in range(0, len(User_6)):
if (User_6[j][1] > User_6_annotations[i][0]) and (User_6[j][1] < User_6_annotations[i][1]):
User_6[j][0] = User_6_annotations[i][2]
User_6 = pd.DataFrame(User_6)
User_6.columns = ["Activity","Timeframe", "X axis", "Y axis", "Z axis", "Timedifference"]
User_6_annotations = pd.DataFrame(User_6_annotations)
User_6_annotations.columns = ["Start", "End", "Activity", "index"]
#dropping empty dataframes at start and end
User_6 = User_6.iloc[717:17329,]
#exporting file
Export_User6_csv = User_6.to_csv(r'/Users/talhajamal/Documents/Year 3/Individual Project/Files for project/Final Training Dataset/User_6.csv')
#For User 7
User_7 = | pd.read_csv('acceleration.csv') | pandas.read_csv |
# %%
import os
import pandas as pd
import numpy as np
import datetime
# %% CARGA DATOS 217065
MB1 = pd.read_excel(r'D:\Basededatos\Origen\BBDD AUTOMÓVILES 9 MILLONES\BBDD AUTOMÓVILES MERCEDES BENZ Y B.M.W. 1.xlsx', engine='openpyxl') # 99955
MB2 = pd.read_excel(r'D:\Basededatos\Origen\BBDD AUTOMÓVILES 9 MILLONES\BBDD AUTOMÓVILES MERCEDES BENZ Y B.M.W. 2.xlsx', engine='openpyxl') # 13024
MB3 = pd.read_excel(r'D:\Basededatos\Origen\BBDD AUTOMÓVILES 9 MILLONES\BBDD AUTOMÓVILES MERCEDES BENZ Y B.M.W. 3.xlsx', engine='openpyxl') # 13248
MB4 = pd.read_excel(r'D:\Basededatos\Origen\BBDD AUTOMÓVILES 9 MILLONES\BBDD AUTOMÓVILES MERCEDES BENZ Y B.M.W. 4.xlsx', engine='openpyxl') # 13248
MB5 = | pd.read_excel(r'D:\Basededatos\Origen\BBDD AUTOMÓVILES 9 MILLONES\BBDD AUTOMÓVILES MERCEDES BENZ Y B.M.W. 5.xlsx', engine='openpyxl') | pandas.read_excel |
################################################################################
# The contents of this file are Teradata Public Content and have been released
# to the Public Domain.
# <NAME> & <NAME> - April 2020 - v.1.1
# Copyright (c) 2020 by Teradata
# Licensed under BSD; see "license.txt" file in the bundle root folder.
#
################################################################################
# R and Python TechBytes Demo - Part 5: Python in-nodes with SCRIPT
# ------------------------------------------------------------------------------
# File: stoRFScoreMM.py
# ------------------------------------------------------------------------------
# The R and Python TechBytes Demo comprises of 5 parts:
# Part 1 consists of only a Powerpoint overview of R and Python in Vantage
# Part 2 demonstrates the Teradata R package tdplyr for clients
# Part 3 demonstrates the Teradata Python package teradataml for clients
# Part 4 demonstrates using R in-nodes with the SCRIPT and ExecR Table Operators
# Part 5 demonstrates using Python in-nodes with the SCRIPT Table Operator
################################################################################
#
# This TechBytes demo utilizes a use case to predict the propensity of a
# financial services customer base to open a credit card account.
#
# The present file is the Python scoring script to be used with the SCRIPT
# table operator, as described in the following use case 2 of the present demo
# Part 5:
#
# 2) Fitting and scoring multiple models
#
# We utilize the statecode variable as a partition to built a Random
# Forest model for every state. This is done by using SCRIPT Table Operator
# to run a model fitting script with a PARTITION BY statecode in the query.
# This creates a model for each of the CA, NY, TX, IL, AZ, OH and Other
# state codes, and perists the model in the database via CREATE TABLE AS
# statement.
# Then we run a scoring script via the SCRIPT Table Operator against
# these persisted Random Forest models to score the entire data set.
#
# For this use case, we build an analytic data set nearly identical to the
# one in the teradataml demo (Part 3), with one change as indicated by item
# (d) below. This is so we can demonstrate the in-database capability of
# simultaneously building many models.
# 60% of the analytic data set rows are sampled to create a training
# subset. The remaining 40% is used to create a testing/scoring dataset.
# The train and test/score datasets are used in the SCRIPT operations.
################################################################################
# File Changelog
# v.1.0 2019-10-29 First release
# v.1.1 2020-04-02 Added change log; no code changes in present file
################################################################################
import sys
import numpy as np
import pandas as pd
from sklearn.ensemble import RandomForestClassifier
import pickle
import base64
###
### Read input
###
delimiter = '\t'
inputData = []
try:
line = input()
if line == '': # Exit if user provides blank line
pass
else:
allArgs = line.split(delimiter)
inputData.append(allArgs[0:-2])
modelSerB64 = allArgs[-1]
except (EOFError): # Exit if reached EOF or CTRL-D
pass
while 1:
try:
line = input()
if line == '': # Exit if user provides blank line
break
else:
allArgs = line.split(delimiter)
inputData.append(allArgs[0:-2])
except (EOFError): # Exit if reached EOF or CTRL-D
break
#for line in sys.stdin.read().splitlines():
# line = line.split(delimiter)
# inputData.append(line)
###
### If no data received, gracefully exit rather than producing an error later.
###
if not inputData:
sys.exit()
## In the input information, all rows have the same number of column elements
## except for the first row. The latter also contains the model info in its
## last column. Isolate the serialized model from the end of first row.
#modelSerB64 = inputData[0][-1]
###
### Set up input DataFrame according to input schema
###
# Know your data: You must know in advance the number and data types of the
# incoming columns from the database!
# For numeric columns, the database sends in floats in scientific format with a
# blank space when the exponential is positive; e.g., 1.0 is sent as 1.000E 000.
# The following input data read deals with any such blank spaces in numbers.
columns = ['cust_id', 'tot_income', 'tot_age', 'tot_cust_years', 'tot_children',
'female_ind', 'single_ind', 'married_ind', 'separated_ind',
'statecode', 'ck_acct_ind', 'sv_acct_ind', 'cc_acct_ind',
'ck_avg_bal', 'sv_avg_bal', 'cc_avg_bal', 'ck_avg_tran_amt',
'sv_avg_tran_amt', 'cc_avg_tran_amt', 'q1_trans_cnt',
'q2_trans_cnt', 'q3_trans_cnt', 'q4_trans_cnt', 'SAMPLE_ID']
df = pd.DataFrame(inputData, columns=columns)
#df = pd.DataFrame.from_records(inputData, exclude=['nRow', 'model'], columns=columns)
del inputData
df['cust_id'] = pd.to_numeric(df['cust_id'])
df['tot_income'] = df['tot_income'].apply(lambda x: "".join(x.split()))
df['tot_income'] = pd.to_numeric(df['tot_income'])
df['tot_age'] = pd.to_numeric(df['tot_age'])
df['tot_cust_years'] = pd.to_numeric(df['tot_cust_years'])
df['tot_children'] = pd.to_numeric(df['tot_children'])
df['female_ind'] = pd.to_numeric(df['female_ind'])
df['single_ind'] = pd.to_numeric(df['single_ind'])
df['married_ind'] = pd.to_numeric(df['married_ind'])
df['separated_ind'] = pd.to_numeric(df['separated_ind'])
df['statecode'] = df['statecode'].apply(lambda x: x.replace('"', ''))
df['ck_acct_ind'] = pd.to_numeric(df['ck_acct_ind'])
df['sv_acct_ind'] = pd.to_numeric(df['sv_acct_ind'])
df['cc_acct_ind'] = pd.to_numeric(df['cc_acct_ind'])
df['sv_acct_ind'] = pd.to_numeric(df['sv_acct_ind'])
df['cc_acct_ind'] = pd.to_numeric(df['cc_acct_ind'])
df['ck_avg_bal'] = df['ck_avg_bal'].apply(lambda x: "".join(x.split()))
df['ck_avg_bal'] = pd.to_numeric(df['ck_avg_bal'])
df['sv_avg_bal'] = df['sv_avg_bal'].apply(lambda x: "".join(x.split()))
df['sv_avg_bal'] = pd.to_numeric(df['sv_avg_bal'])
df['cc_avg_bal'] = df['cc_avg_bal'].apply(lambda x: "".join(x.split()))
df['cc_avg_bal'] = pd.to_numeric(df['cc_avg_bal'])
df['ck_avg_tran_amt'] = df['ck_avg_tran_amt'].apply(lambda x: "".join(x.split()))
df['ck_avg_tran_amt'] = pd.to_numeric(df['ck_avg_tran_amt'])
df['sv_avg_tran_amt'] = df['sv_avg_tran_amt'].apply(lambda x: "".join(x.split()))
df['sv_avg_tran_amt'] = | pd.to_numeric(df['sv_avg_tran_amt']) | pandas.to_numeric |
# EIA_MECS.py (flowsa)
# !/usr/bin/env python3
# coding=utf-8
import pandas as pd
import numpy as np
import io
from flowsa.common import *
from flowsa.flowbyfunctions import assign_fips_location_system
import yaml
"""
MANUFACTURING ENERGY CONSUMPTION SURVEY (MECS)
https://www.eia.gov/consumption/manufacturing/data/2014/
Last updated: 8 Sept. 2020
"""
def eia_mecs_URL_helper(build_url, config, args):
"""
Takes the build url and performs substitutions based on the EIA MECS year
and data tables of interest. Returns the finished url.
"""
# initiate url list
urls = []
# for all tables listed in the source config file...
for table in config['tables']:
# start with build url
url = build_url
# replace '__year__' in build url
url = url.replace('__year__', args['year'])
# 2014 files are in .xlsx format; 2010 files are in .xls format
if(args['year'] == '2010'):
url = url[:-1]
# replace '__table__' in build url
url = url.replace('__table__', table)
# add to list of urls
urls.append(url)
return urls
def eia_mecs_land_call(url, cbesc_response, args):
# Convert response to dataframe
df_raw_data = pd.io.excel.read_excel(io.BytesIO(cbesc_response.content), sheet_name='Table 9.1')
df_raw_rse = pd.io.excel.read_excel(io.BytesIO(cbesc_response.content), sheet_name='RSE 9.1')
if (args["year"] == "2014"):
df_rse = pd.DataFrame(df_raw_rse.loc[12:93]).reindex()
df_data = pd.DataFrame(df_raw_data.loc[16:97]).reindex()
df_description = pd.DataFrame(df_raw_data.loc[16:97]).reindex()
# skip rows and remove extra rows at end of dataframe
df_description.columns = ["NAICS Code(a)", "Subsector and Industry",
"Approximate Enclosed Floorspace of All Buildings Onsite (million sq ft)",
"Establishments(b) (counts)", "Average Enclosed Floorspace per Establishment (sq ft)",
"Approximate Number of All Buildings Onsite (counts)",
"Average Number of Buildings Onsite per Establishment (counts)",
"n8", "n9", "n10", "n11", "n12"]
df_data.columns = ["NAICS Code(a)", "Subsector and Industry",
"Approximate Enclosed Floorspace of All Buildings Onsite (million sq ft)",
"Establishments(b) (counts)", "Average Enclosed Floorspace per Establishment (sq ft)",
"Approximate Number of All Buildings Onsite (counts)",
"Average Number of Buildings Onsite per Establishment (counts)",
"n8", "n9", "n10", "n11", "n12"]
df_rse.columns = ["NAICS Code(a)", "Subsector and Industry",
"Approximate Enclosed Floorspace of All Buildings Onsite (million sq ft)",
"Establishments(b) (counts)", "Average Enclosed Floorspace per Establishment (sq ft)",
"Approximate Number of All Buildings Onsite (counts)",
"Average Number of Buildings Onsite per Establishment (counts)",
"n8", "n9", "n10", "n11", "n12"]
#Drop unused columns
df_description = df_description.drop(columns=["Approximate Enclosed Floorspace of All Buildings Onsite (million sq ft)",
"Establishments(b) (counts)", "Average Enclosed Floorspace per Establishment (sq ft)",
"Approximate Number of All Buildings Onsite (counts)",
"Average Number of Buildings Onsite per Establishment (counts)",
"n8", "n9", "n10", "n11", "n12"])
df_data = df_data.drop(columns=["Subsector and Industry", "n8", "n9", "n10", "n11", "n12"])
df_rse = df_rse.drop(columns=["Subsector and Industry", "n8", "n9", "n10", "n11", "n12"])
else:
df_rse = pd.DataFrame(df_raw_rse.loc[14:97]).reindex()
df_data = pd.DataFrame(df_raw_data.loc[16:99]).reindex()
df_description = pd.DataFrame(df_raw_data.loc[16:99]).reindex()
df_description.columns = ["NAICS Code(a)", "Subsector and Industry",
"Approximate Enclosed Floorspace of All Buildings Onsite (million sq ft)",
"Establishments(b) (counts)", "Average Enclosed Floorspace per Establishment (sq ft)",
"Approximate Number of All Buildings Onsite (counts)",
"Average Number of Buildings Onsite per Establishment (counts)"]
df_data.columns = ["NAICS Code(a)", "Subsector and Industry",
"Approximate Enclosed Floorspace of All Buildings Onsite (million sq ft)",
"Establishments(b) (counts)", "Average Enclosed Floorspace per Establishment (sq ft)",
"Approximate Number of All Buildings Onsite (counts)",
"Average Number of Buildings Onsite per Establishment (counts)"]
df_rse.columns = ["NAICS Code(a)", "Subsector and Industry",
"Approximate Enclosed Floorspace of All Buildings Onsite (million sq ft)",
"Establishments(b) (counts)", "Average Enclosed Floorspace per Establishment (sq ft)",
"Approximate Number of All Buildings Onsite (counts)",
"Average Number of Buildings Onsite per Establishment (counts)"]
# Drop unused columns
df_description = df_description.drop(
columns=["Approximate Enclosed Floorspace of All Buildings Onsite (million sq ft)",
"Establishments(b) (counts)", "Average Enclosed Floorspace per Establishment (sq ft)",
"Approximate Number of All Buildings Onsite (counts)",
"Average Number of Buildings Onsite per Establishment (counts)"])
df_data = df_data.drop(columns=["Subsector and Industry"])
df_rse = df_rse.drop(columns=["Subsector and Industry"])
df_data = df_data.melt(id_vars=["NAICS Code(a)"],
var_name="FlowName",
value_name="FlowAmount")
df_rse = df_rse.melt(id_vars=["NAICS Code(a)"],
var_name="FlowName",
value_name="Spread")
df = pd.merge(df_data, df_rse)
df = | pd.merge(df, df_description) | pandas.merge |
import pandas as pd
import glob
import eyed3
import ctypes
import numpy as np
from datetime import datetime
from utils.get_bpm import beats_per_minute
from utils.top_level_locator import top_level_path
from pathlib import Path
from collections import Counter
from pathlib import Path
from pydub import AudioSegment
class MusicDatabase:
objects = []
def __init__(self):
self.database_path = Path(top_level_path() / 'data' / 'music_database.json')
self.rating = 75
self.sophisticated = 50
MusicDatabase.objects.append(self)
self.db = self.load_database(self.database_path)
def load_database(self, database_path):
return pd.read_json(database_path)
def save_database(self, mdb):
mdb.to_json(self.database_path)
def save_playlist_database(self, mdb, playlist_name):
filepath = self.database_path.parent / f'playlists/{playlist_name + ".json"}'
if filepath.is_file():
return False
else:
mdb.to_json(filepath)
return True
def update_value(self, col, old_val, new_val): # self.Mbox('Messagebox', 'Song already in database.', 0)
print(10)
if isinstance(old_val, int) and isinstance(new_val, int):
return new_val
if not isinstance(new_val, int) and isinstance(old_val, int):
self.Mbox('Messagebox', f'In column: {col}, trying to replace old int value: {old_val}, with new string value: {new_val}', 0)
if not isinstance(old_val, int) and isinstance(new_val, int):
self.Mbox('Messagebox', f'In column: {col}, trying to replace old str value: {old_val}, with new int value: {new_val}', 0)
print(11)
if isinstance(old_val, str) and isinstance(new_val, str):
return new_val
print(12)
if isinstance(old_val, str) and isinstance(new_val, list):
if old_val not in new_val:
return new_val.append(old_val)
print(13)
if isinstance(old_val, list) and isinstance(new_val, str):
if new_val not in old_val:
return old_val.append(new_val)
print(15)
if isinstance(old_val, list) and isinstance(new_val, list):
if col == 'artist':
print(16)
return new_val
return list(set().union(old_val, new_val))
print(17)
def raw_to_formatted_metadata(self, meta):
filename = meta['title']
meta['title'] = meta['title'].lower()
# process [] () in title
edges = [('[',']'), ('(',')')]
annotations = ["Single", "EP", "Cover", "Remix", "Mashup"]
feats = [' feat. ', ' feat ', ' ft. ', ' ft ']
# process feat - END in title
feat_ends = [' - ', ' | ', '!']
feat_seps = [' & ', ' + ', ' en ']
artists = []
loops = 0
song_type = 'Single'
featuring = None
# PROCESS ALL EDGES TO MAKE TITLE EASIER TO PARSE
for edge in edges:
if Counter(meta['title'])[edge[0]] == Counter(meta['title'])[edge[1]]:
loops += Counter(meta['title'])[edge[0]]
for _ in range(max(1, loops)):
if edge[0] in meta['title'] and edge[1] in meta['title']:
i1 = meta['title'].find(edge[0])
i2 = meta['title'].find(edge[1]) + 1
before = meta['title'][:i1].strip()
between = meta['title'][i1:i2]
after = meta['title'][i2:]
for anno in annotations:
if anno in between:
song_annotation = between
song_type = anno
print(song_type)
for feat in feats:
if feat in between:
artist_annotation = between # TODO handle individual artists when feat found inside of edges
meta['title'] = before + after
# CHECK IF ANY FEATURINGS IN EDGES AND PROCESS ARTISTS HERE
if any(feat in f' {between[1:len(between)-1]} ' for feat in feats):
featuring = f' {between[1:len(between)-1]} '
for end in feat_ends:
if meta['title'][:len(end)] == end:
meta['title'] = meta['title'][len(end):]
if meta['title'][len(end):] == end:
meta['title'] = meta['title'][:len(end)]
for feat in feats:
if feat in meta['title']:
i1 = meta['title'].find(feat)
i1_feat = i1 + len(feat)
for end in feat_ends:
if end in meta['title'][i1_feat:]:
i2 = meta['title'][i1_feat:].find(end)
i2_end = i2 + len(end)
if end == ' - ':
i2_end = i2
break
else:
i2 = len(meta['title'])
i2_end = len(meta['title'])
between = meta['title'][i1_feat:i1_feat + i2]
before = meta['title'][:i1].strip()
after = meta['title'][i1_feat:][i2_end:len(meta['title'])]
for sep in feat_seps:
if sep in between:
for artist in between.split(sep):
artists.append(artist.strip())
if not any([sep in between for sep in feat_seps]):
artists.append(between)
meta['title'] = before + after
if featuring != None and feat in featuring:
i1 = featuring.find(feat)
i1_feat = i1 + len(feat)
i2_end = len(featuring)
between = featuring[i1_feat:i1_feat + i2]
# NOTE here go for sep in seps loop if sep found with feat inside () [] - i.e. multiple artists inside edge with seps
artists.append(between.strip().title())
seps = [' - ', ' – ', ': ', ' & ', ' x ', ' by ']
for sep in seps:
if sep in meta['title']:
artist = meta['title'].split(sep)[0]
for sep2 in seps:
if sep2 in artist:
artists.append(artist.split(sep2)[1])
artist = artist.split(sep2)[0]
try:
artist += ' ' + artist_annotation
except NameError:
pass
artists.insert(0, artist) # insert main artist to artists on first pos
song = meta['title'].split(sep)[1]
break
else:
if not any(sep in meta['title'] for sep in seps):
artist = meta['title']
try:
artist += ' ' + artist_annotation
except NameError:
pass
artists.insert(0, artist)
song = meta['title']
break
artists_titles = []
for artist in artists:
artists_titles.append(artist.title())
try:
if song.startswith('"') and song.endswith('"'):
song = song[1:len(song)-1]
song += ' ' + song_annotation
song_annotation = ''
except NameError:
pass
print('filename:', filename)
print('original:', filename.title())
print('Parsed title:', meta['title'].title())
print('song:', song.title())
print('artist(s):', *artists_titles, sep = ", ")
print('\n')
pre_dl_meta = {
'type': song_type,
'rating': 75,
'sophisticated': 50,
'vocal': None,
'language': None,
'instrument': None,
'genre': None,
'emotion': None,
'bpm': None,
'rationale': None}
new_meta = {
'title': meta['title'],
'song': song.title(),
'artist': artists_titles,
'filepath': meta['filepath'],
'duration': meta['duration'],
'album': meta['album'],
'year_added': int(f'{datetime.today().year}0{datetime.today().month}{datetime.today().day}') if len(str(datetime.today().month)) == 1 else int(f'{datetime.today().year}{datetime.today().month}{datetime.today().day}'),
'release_year': int(meta['upload_date'][0:4]),
'youtube_url': meta['webpage_url'],
'tree_iid': meta['tree_iid'],
**pre_dl_meta}
return new_meta
def metadata_to_database(self, meta, json):
# url is better unique ID then filename or filepath (music/youtubeID.mp3, because what if youtube makes a change)
url = meta['youtube_url']
dropped = False
db = json
# check if song in database
if url in list(db['youtube_url']):
# row_index = list(db['youtube_url']).index(url) # DEPRECATE
row_index = db.youtube_url[db.youtube_url == url].index[0]
for col in db.columns:
old_value = db.at[row_index, col]
# DEVNOTE: SET COLNAME FOR TESTING PURPOSES
colname = 'rating'
if col == colname: # TODO remove only this line after testing
print('col:', col, '- row:', row_index, '- old_value:', old_value, '- new_value:', meta[col])
try:
if not | pd.isna(old_value) | pandas.isna |
# -*- coding: utf-8 -*-
"""Export Biomappings as SSSOM."""
import pathlib
import bioregistry
import click
import yaml
from biomappings import load_mappings, load_predictions
from biomappings.utils import DATA, MiriamValidator
DIRECTORY = pathlib.Path(DATA).joinpath("sssom")
DIRECTORY.mkdir(exist_ok=True, parents=True)
PATH = DIRECTORY.joinpath("biomappings.sssom.tsv")
META_PATH = DIRECTORY.joinpath("biomappings.sssom.yml")
META = {
"license": "https://creativecommons.org/publicdomain/zero/1.0/",
"mapping_provider": "https://github.com/biomappings/biomappings",
"mapping_set_group": "biomappings",
"mapping_set_id": "biomappings",
"mapping_set_title": "Biomappings",
}
validator = MiriamValidator()
def get_sssom_df():
"""Get an SSSOM dataframe."""
import pandas as pd
rows = []
prefixes = set()
columns = [
"subject_id",
"predicate_id",
"object_id",
"subject_label",
"object_label",
"match_type",
"creator_id",
"confidence",
"mapping_tool",
]
for mapping in load_mappings():
prefixes.add(mapping["source prefix"])
prefixes.add(mapping["target prefix"])
rows.append(
(
validator.get_curie(mapping["source prefix"], mapping["source identifier"]),
f'{mapping["relation"]}',
validator.get_curie(mapping["target prefix"], mapping["target identifier"]),
mapping["source name"],
mapping["target name"],
"HumanCurated", # match type
mapping["source"], # curator CURIE
None, # no confidence necessary
None, # mapping tool: none necessary for manually curated
)
)
for mapping in load_predictions():
prefixes.add(mapping["source prefix"])
prefixes.add(mapping["target prefix"])
rows.append(
(
validator.get_curie(mapping["source prefix"], mapping["source identifier"]),
f'{mapping["relation"]}',
validator.get_curie(mapping["target prefix"], mapping["target identifier"]),
mapping["source name"],
mapping["target name"],
"LexicalEquivalenceMatch", # match type
None, # no curator CURIE
mapping["confidence"],
mapping["source"], # mapping tool: source script
)
)
df = | pd.DataFrame(rows, columns=columns) | pandas.DataFrame |
import json
import logging
import os
import sys
import time
import datetime
from multiprocessing import Process, Queue
from workers.worker_git_integration import WorkerGitInterfaceable
import joblib
import numpy as np
import pandas as pd
import requests
import sqlalchemy as s
from sklearn.metrics import (confusion_matrix, f1_score, precision_score,
recall_score)
from sklearn.preprocessing import LabelEncoder, MinMaxScaler
from xgboost import XGBClassifier
from workers.message_insights_worker.message_sentiment import get_senti_score
from workers.worker_base import Worker
from augur import ROOT_AUGUR_DIRECTORY
from augur.config import AugurConfig
class PullRequestAnalysisWorker(WorkerGitInterfaceable):
def __init__(self, config={}):
# Define the worker's type, which will be used for self identification.
worker_type = "pull_request_analysis_worker"
# Define what this worker can be given and know how to interpret
given = [['github_url']]
# The name the housekeeper/broker use to distinguish the data model this worker can fill
models = ['pull_request_analysis']
# Define the tables needed to insert, update, or delete on
data_tables = ['message', 'repo', 'pull_request_analysis']
operations_tables = ['worker_history', 'worker_job']
# Run the general worker initialization
super().__init__(worker_type, config, given, models, data_tables, operations_tables)
# Do any additional configuration after the general initialization has been run
self.config.update(config)
# Define data collection info
self.tool_source = 'Pull Request Analysis Worker'
self.tool_version = '0.0.0'
self.data_source = 'Non-existent API'
self.insight_days = 200 # self.config['insight_days']
augur_config = AugurConfig(ROOT_AUGUR_DIRECTORY)
self.senti_models_dir = os.path.join(ROOT_AUGUR_DIRECTORY,"workers", "message_insights_worker",augur_config.get_section("Workers")["message_insights_worker"]["models_dir"])
self.logger.info(f'Sentiment model dir located - {self.senti_models_dir}')
def pull_request_analysis_model(self, task, repo_id):
# Any initial database instructions, like finding the last tuple inserted or generate the next ID value
# Collection and insertion of data happens here
begin_date = datetime.datetime.now() - datetime.timedelta(days=self.insight_days)
self.logger.info(f'Fetching open PRs of repo: {repo_id}')
# Fetch open PRs of repo and associated commits
pr_SQL = s.sql.text("""
select pull_requests.pull_request_id,
pr_created_at, pr_src_state,
pr_closed_at, pr_merged_at,
pull_request_commits.pr_cmt_id,
pr_augur_contributor_id,
pr_src_author_association
from augur_data.pull_requests
INNER JOIN augur_data.pull_request_commits on pull_requests.pull_request_id = pull_request_commits.pull_request_id
where pr_created_at > :begin_date
and repo_id = :repo_id
and pr_src_state like 'open'
""")
df_pr = pd.read_sql_query(pr_SQL, self.db, params={'begin_date': begin_date, 'repo_id': repo_id})
self.logger.info(f'PR Dataframe dim: {df_pr.shape}\n')
# DEBUG:
# df_pr.to_csv(f'PRA.csv',index=False)
if df_pr.empty:
self.logger.warning('No new open PRs in tables to analyze!\n')
self.register_task_completion(task, repo_id, 'pull_request_analysis')
return
self.logger.info(f'Getting count of commits associated with every PR')
# Get count of commits associated with every PR
df_pr['commit_counts'] = df_pr.groupby(['pull_request_id'])['pr_cmt_id'].transform('count')
df_pr = df_pr.drop(['pr_cmt_id'], axis=1)
# Find length of PR in days upto now
df_pr['pr_length'] = (datetime.datetime.now() - df_pr['pr_created_at']).dt.days
self.logger.info(f'Fetching messages relating to PR')
# Get sentiment score of all messages relating to the PR
messages_SQL = s.sql.text("""
select message.msg_id, msg_timestamp, msg_text, message.cntrb_id from augur_data.message
left outer join augur_data.pull_request_message_ref on message.msg_id = pull_request_message_ref.msg_id
left outer join augur_data.pull_requests on pull_request_message_ref.pull_request_id = pull_requests.pull_request_id where repo_id = :repo_id
UNION
select message.msg_id, msg_timestamp, msg_text, message.cntrb_id from augur_data.message
left outer join augur_data.issue_message_ref on message.msg_id = issue_message_ref.msg_id
left outer join augur_data.issues on issue_message_ref.issue_id = issues.issue_id where repo_id = :repo_id""")
df_message = pd.read_sql_query(messages_SQL, self.db, params={'repo_id': repo_id})
self.logger.info(f'Mapping messages to PR, find comment & participants counts')
# Map PR to its corresponding messages
pr_ref_sql = s.sql.text("select * from augur_data.pull_request_message_ref")
df_pr_ref = pd.read_sql_query(pr_ref_sql, self.db)
df_merge = pd.merge(df_pr, df_pr_ref, on='pull_request_id',how='left')
df_merge = pd.merge(df_merge, df_message, on='msg_id', how='left')
df_merge = df_merge.dropna(subset=['msg_id'], axis = 0)
if df_merge.empty:
self.logger.warning('Not enough data to analyze!\n')
self.register_task_completion(task, repo_id, 'pull_request_analysis')
return
self.logger.info(f'cols: {df_merge.columns}')
df_merge['senti_score'] = get_senti_score(df_merge,'msg_text', self.senti_models_dir,label=False, logger=self.logger)
self.logger.info(f'Calculated sentiment scores!')
# Get count of associated comments
df_merge['comment_counts'] = df_merge.groupby(['pull_request_id'])['msg_id'].transform('count')
# Get participants count
participants = pd.DataFrame(df_merge.groupby(['pull_request_id'])['cntrb_id'].nunique())
participants = participants.reset_index()
participants = participants.rename(columns={"cntrb_id": "usr_counts"})
df_merge = pd.merge(df_merge, participants, on='pull_request_id',how='left')
df_fin = df_merge[['pull_request_id','pr_created_at','pr_closed_at','pr_merged_at','commit_counts','comment_counts','pr_length','senti_score', 'pr_augur_contributor_id', 'pr_src_author_association', 'usr_counts']]
# Find the mean of sentiment scores
df_fin['comment_senti_score'] = df_fin.groupby(['pull_request_id'])['senti_score'].transform('mean')
df_fin = df_fin.drop(['senti_score'], axis=1)
df_fin = df_fin.drop_duplicates()
'''
# Get cntrb info from API
cntrb_sql = 'SELECT cntrb_id, gh_login FROM augur_data.contributors'
df_ctrb = pd.read_sql_query(cntrb_SQL, self.db)
df_fin1 = pd.merge(df_fin,df_ctrb,left_on='pr_augur_contributor_id', right_on='cntrb_id', how='left')
df_fin1 = df_fin1.drop(['cntrb_id'],axis=1)
# Dict for persisting user data & fast lookups
user_info = {}
df_fin1['usr_past_pr_accept'] = df_fin1['gh_login'].apply(self.fetch_user_info)
df_fin = df_fin1
'''
self.logger.info(f'Fetching repo statistics')
# Get repo info
repo_sql = s.sql.text("""
SELECT repo_id, pull_requests_merged, pull_request_count,watchers_count, last_updated FROM
augur_data.repo_info where repo_id = :repo_id
""")
df_repo = pd.read_sql_query(repo_sql, self.db, params = {'repo_id': repo_id})
df_repo = df_repo.loc[df_repo.groupby('repo_id').last_updated.idxmax(),:]
df_repo = df_repo.drop(['last_updated'],axis=1)
# Calculate acceptance ration of repo
df_repo['pr_accept_ratio'] = df_repo['pull_requests_merged']/df_repo['pull_request_count']
df_repo = df_repo.drop(['pull_requests_merged','pull_request_count'],axis=1)
df = | pd.concat([df_fin,df_repo], axis=1) | pandas.concat |
import os
import time
import logging
import argparse
import sys
sys.path.append("libs")
import numpy as np
import pandas as pd
import tensorflow as tf
from tensorflow.keras import backend as K
from data import ContentVaeDataGenerator
from data import CollaborativeVAEDataGenerator
from pretrain_vae import get_content_vae
from train_vae import get_collabo_vae, infer
from evaluate import EvaluateModel
from evaluate import EvaluateCold
from evaluate import Recall_at_k, NDCG_at_k
def predict_and_evaluate():
### Parse the console arguments.
parser = argparse.ArgumentParser()
parser.add_argument("--dataset", type=str,
help="specify the dataset for experiment")
parser.add_argument("--split", type=int,
help="specify the split of the dataset")
parser.add_argument("--batch_size", type=int, default=128,
help="specify the batch size prediction")
parser.add_argument("--device" , type=str, default="0",
help="specify the visible GPU device")
parser.add_argument("--lambda_V", default=None, type=int,
help="specify the value of lambda_V for regularization")
parser.add_argument("--num_cold", default=None, type=int,
help="specify the number of cold start items")
args = parser.parse_args()
os.environ["CUDA_VISIBLE_DEVICES"] = args.device
### Set up the tensorflow session.
config = tf.ConfigProto()
config.gpu_options.allow_growth=True
sess = tf.Session(config=config)
K.set_session(sess)
### Fix the random seeds.
np.random.seed(98765)
tf.set_random_seed(98765)
### Get the train, val data generator for content vae
if args.lambda_V is not None:
model_root = os.path.join("models", args.dataset, str(args.split), str(args.lambda_V))
else:
model_root = os.path.join("models", args.dataset, str(args.split))
if args.num_cold is not None:
data_root = os.path.join("data", args.dataset, str(args.split), str(args.num_cold))
model_root = os.path.join("models", args.dataset, str(args.split), "num_cold", str(args.num_cold))
else:
data_root = os.path.join("data", args.dataset, str(args.split))
model_root = os.path.join("models", args.dataset, str(args.split))
dataset = "movielen-10" if "movielen-10" in args.dataset else args.dataset
tstep_cold_gen = ContentVaeDataGenerator(
data_root = data_root, joint=True,
batch_size = args.batch_size, use_cold=True,
)
bstep_test_gen = CollaborativeVAEDataGenerator(
data_root = data_root, phase = "test",
batch_size = args.batch_size, shuffle=False
)
bstep_cold_gen = CollaborativeVAEDataGenerator(
data_root = data_root, phase="test",
batch_size = args.batch_size*8, use_cold=True,
)
### Build test model and load trained weights
collabo_vae = get_collabo_vae(dataset, bstep_test_gen.num_items)
collabo_vae.load_weights(os.path.join(model_root, "best_bstep.model"))
content_vae = get_content_vae(dataset, tstep_cold_gen.feature_dim)
content_vae.load_weights(os.path.join(model_root, "best_tstep.model"))
vae_infer_tstep = content_vae.build_vae_infer_tstep()
vae_eval = collabo_vae.build_vae_eval()
vae_eval_cold = collabo_vae.update_vae_coldstart(infer(vae_infer_tstep, tstep_cold_gen.features.A))
### Evaluate and save the results
k4recalls = [10, 20, 25, 30, 35, 40, 45, 50]
k4ndcgs = [25, 50, 100]
recalls, NDCGs = [], []
recalls_cold, NDCGs_cold = [], []
for k in k4recalls:
recalls.append("{:.4f}".format(EvaluateModel(vae_eval, bstep_test_gen, Recall_at_k, k=k)))
recalls_cold.append("{:.4f}".format(EvaluateCold(vae_eval_cold, bstep_cold_gen, Recall_at_k, k=k)))
for k in k4ndcgs:
NDCGs.append("{:.4f}".format(EvaluateModel(vae_eval, bstep_test_gen, NDCG_at_k, k=k)))
NDCGs_cold.append("{:.4f}".format(EvaluateCold(vae_eval_cold, bstep_cold_gen, NDCG_at_k, k=k)))
recall_table = | pd.DataFrame({"k":k4recalls, "recalls":recalls}, columns=["k", "recalls"]) | pandas.DataFrame |
import numpy as np
import pandas as pd
from .pandas_vb_common import setup # noqa
class FillNa(object):
goal_time = 0.2
params = [True, False]
param_names = ['inplace']
def setup(self, inplace):
N = 10**6
rng = pd.date_range('1/1/2000', periods=N, freq='min')
data = np.random.randn(N)
data[::2] = np.nan
self.ts = | pd.Series(data, index=rng) | pandas.Series |
import numpy as np
import pytest
import pandas.util._test_decorators as td
from pandas.core.dtypes.generic import ABCIndexClass
import pandas as pd
import pandas._testing as tm
from pandas.api.types import is_float, is_float_dtype, is_integer, is_scalar
from pandas.core.arrays import IntegerArray, integer_array
from pandas.core.arrays.integer import (
Int8Dtype,
Int16Dtype,
Int32Dtype,
Int64Dtype,
UInt8Dtype,
UInt16Dtype,
UInt32Dtype,
UInt64Dtype,
)
from pandas.tests.extension.base import BaseOpsUtil
def make_data():
return list(range(8)) + [np.nan] + list(range(10, 98)) + [np.nan] + [99, 100]
@pytest.fixture(
params=[
Int8Dtype,
Int16Dtype,
Int32Dtype,
Int64Dtype,
UInt8Dtype,
UInt16Dtype,
UInt32Dtype,
UInt64Dtype,
]
)
def dtype(request):
return request.param()
@pytest.fixture
def data(dtype):
return integer_array(make_data(), dtype=dtype)
@pytest.fixture
def data_missing(dtype):
return integer_array([np.nan, 1], dtype=dtype)
@pytest.fixture(params=["data", "data_missing"])
def all_data(request, data, data_missing):
"""Parametrized fixture giving 'data' and 'data_missing'"""
if request.param == "data":
return data
elif request.param == "data_missing":
return data_missing
def test_dtypes(dtype):
# smoke tests on auto dtype construction
if dtype.is_signed_integer:
assert np.dtype(dtype.type).kind == "i"
else:
assert np.dtype(dtype.type).kind == "u"
assert dtype.name is not None
@pytest.mark.parametrize(
"dtype, expected",
[
(Int8Dtype(), "Int8Dtype()"),
(Int16Dtype(), "Int16Dtype()"),
(Int32Dtype(), "Int32Dtype()"),
(Int64Dtype(), "Int64Dtype()"),
(UInt8Dtype(), "UInt8Dtype()"),
(UInt16Dtype(), "UInt16Dtype()"),
(UInt32Dtype(), "UInt32Dtype()"),
(UInt64Dtype(), "UInt64Dtype()"),
],
)
def test_repr_dtype(dtype, expected):
assert repr(dtype) == expected
def test_repr_array():
result = repr(integer_array([1, None, 3]))
expected = "<IntegerArray>\n[1, <NA>, 3]\nLength: 3, dtype: Int64"
assert result == expected
def test_repr_array_long():
data = integer_array([1, 2, None] * 1000)
expected = (
"<IntegerArray>\n"
"[ 1, 2, <NA>, 1, 2, <NA>, 1, 2, <NA>, 1,\n"
" ...\n"
" <NA>, 1, 2, <NA>, 1, 2, <NA>, 1, 2, <NA>]\n"
"Length: 3000, dtype: Int64"
)
result = repr(data)
assert result == expected
class TestConstructors:
def test_uses_pandas_na(self):
a = pd.array([1, None], dtype=pd.Int64Dtype())
assert a[1] is pd.NA
def test_from_dtype_from_float(self, data):
# construct from our dtype & string dtype
dtype = data.dtype
# from float
expected = pd.Series(data)
result = pd.Series(
data.to_numpy(na_value=np.nan, dtype="float"), dtype=str(dtype)
)
tm.assert_series_equal(result, expected)
# from int / list
expected = pd.Series(data)
result = pd.Series(np.array(data).tolist(), dtype=str(dtype))
tm.assert_series_equal(result, expected)
# from int / array
expected = pd.Series(data).dropna().reset_index(drop=True)
dropped = np.array(data.dropna()).astype(np.dtype((dtype.type)))
result = pd.Series(dropped, dtype=str(dtype))
tm.assert_series_equal(result, expected)
class TestArithmeticOps(BaseOpsUtil):
def _check_divmod_op(self, s, op, other, exc=None):
super()._check_divmod_op(s, op, other, None)
def _check_op(self, s, op_name, other, exc=None):
op = self.get_op_from_name(op_name)
result = op(s, other)
# compute expected
mask = s.isna()
# if s is a DataFrame, squeeze to a Series
# for comparison
if isinstance(s, pd.DataFrame):
result = result.squeeze()
s = s.squeeze()
mask = mask.squeeze()
# other array is an Integer
if isinstance(other, IntegerArray):
omask = getattr(other, "mask", None)
mask = getattr(other, "data", other)
if omask is not None:
mask |= omask
# 1 ** na is na, so need to unmask those
if op_name == "__pow__":
mask = np.where(~s.isna() & (s == 1), False, mask)
elif op_name == "__rpow__":
other_is_one = other == 1
if isinstance(other_is_one, pd.Series):
other_is_one = other_is_one.fillna(False)
mask = np.where(other_is_one, False, mask)
# float result type or float op
if (
is_float_dtype(other)
or is_float(other)
or op_name in ["__rtruediv__", "__truediv__", "__rdiv__", "__div__"]
):
rs = s.astype("float")
expected = op(rs, other)
self._check_op_float(result, expected, mask, s, op_name, other)
# integer result type
else:
rs = pd.Series(s.values._data, name=s.name)
expected = op(rs, other)
self._check_op_integer(result, expected, mask, s, op_name, other)
def _check_op_float(self, result, expected, mask, s, op_name, other):
# check comparisons that are resulting in float dtypes
expected[mask] = np.nan
if "floordiv" in op_name:
# Series op sets 1//0 to np.inf, which IntegerArray does not do (yet)
mask2 = np.isinf(expected) & np.isnan(result)
expected[mask2] = np.nan
tm.assert_series_equal(result, expected)
def _check_op_integer(self, result, expected, mask, s, op_name, other):
# check comparisons that are resulting in integer dtypes
# to compare properly, we convert the expected
# to float, mask to nans and convert infs
# if we have uints then we process as uints
# then convert to float
# and we ultimately want to create a IntArray
# for comparisons
fill_value = 0
# mod/rmod turn floating 0 into NaN while
# integer works as expected (no nan)
if op_name in ["__mod__", "__rmod__"]:
if is_scalar(other):
if other == 0:
expected[s.values == 0] = 0
else:
expected = expected.fillna(0)
else:
expected[
(s.values == 0).fillna(False)
& ((expected == 0).fillna(False) | expected.isna())
] = 0
try:
expected[
((expected == np.inf) | (expected == -np.inf)).fillna(False)
] = fill_value
original = expected
expected = expected.astype(s.dtype)
except ValueError:
expected = expected.astype(float)
expected[
((expected == np.inf) | (expected == -np.inf)).fillna(False)
] = fill_value
original = expected
expected = expected.astype(s.dtype)
expected[mask] = pd.NA
# assert that the expected astype is ok
# (skip for unsigned as they have wrap around)
if not s.dtype.is_unsigned_integer:
original = pd.Series(original)
# we need to fill with 0's to emulate what an astype('int') does
# (truncation) for certain ops
if op_name in ["__rtruediv__", "__rdiv__"]:
mask |= original.isna()
original = original.fillna(0).astype("int")
original = original.astype("float")
original[mask] = np.nan
tm.assert_series_equal(original, expected.astype("float"))
# assert our expected result
tm.assert_series_equal(result, expected)
def test_arith_integer_array(self, data, all_arithmetic_operators):
# we operate with a rhs of an integer array
op = all_arithmetic_operators
s = pd.Series(data)
rhs = pd.Series([1] * len(data), dtype=data.dtype)
rhs.iloc[-1] = np.nan
self._check_op(s, op, rhs)
def test_arith_series_with_scalar(self, data, all_arithmetic_operators):
# scalar
op = all_arithmetic_operators
s = pd.Series(data)
self._check_op(s, op, 1, exc=TypeError)
def test_arith_frame_with_scalar(self, data, all_arithmetic_operators):
# frame & scalar
op = all_arithmetic_operators
df = pd.DataFrame({"A": data})
self._check_op(df, op, 1, exc=TypeError)
def test_arith_series_with_array(self, data, all_arithmetic_operators):
# ndarray & other series
op = all_arithmetic_operators
s = pd.Series(data)
other = np.ones(len(s), dtype=s.dtype.type)
self._check_op(s, op, other, exc=TypeError)
def test_arith_coerce_scalar(self, data, all_arithmetic_operators):
op = all_arithmetic_operators
s = pd.Series(data)
other = 0.01
self._check_op(s, op, other)
@pytest.mark.parametrize("other", [1.0, np.array(1.0)])
def test_arithmetic_conversion(self, all_arithmetic_operators, other):
# if we have a float operand we should have a float result
# if that is equal to an integer
op = self.get_op_from_name(all_arithmetic_operators)
s = pd.Series([1, 2, 3], dtype="Int64")
result = op(s, other)
assert result.dtype is np.dtype("float")
def test_arith_len_mismatch(self, all_arithmetic_operators):
# operating with a list-like with non-matching length raises
op = self.get_op_from_name(all_arithmetic_operators)
other = np.array([1.0])
s = pd.Series([1, 2, 3], dtype="Int64")
with pytest.raises(ValueError, match="Lengths must match"):
op(s, other)
@pytest.mark.parametrize("other", [0, 0.5])
def test_arith_zero_dim_ndarray(self, other):
arr = integer_array([1, None, 2])
result = arr + np.array(other)
expected = arr + other
tm.assert_equal(result, expected)
def test_error(self, data, all_arithmetic_operators):
# invalid ops
op = all_arithmetic_operators
s = pd.Series(data)
ops = getattr(s, op)
opa = getattr(data, op)
# invalid scalars
msg = (
r"(:?can only perform ops with numeric values)"
r"|(:?IntegerArray cannot perform the operation mod)"
)
with pytest.raises(TypeError, match=msg):
ops("foo")
with pytest.raises(TypeError, match=msg):
ops(pd.Timestamp("20180101"))
# invalid array-likes
with pytest.raises(TypeError, match=msg):
ops(pd.Series("foo", index=s.index))
if op != "__rpow__":
# TODO(extension)
# rpow with a datetimelike coerces the integer array incorrectly
msg = (
"can only perform ops with numeric values|"
"cannot perform .* with this index type: DatetimeArray|"
"Addition/subtraction of integers and integer-arrays "
"with DatetimeArray is no longer supported. *"
)
with pytest.raises(TypeError, match=msg):
ops(pd.Series(pd.date_range("20180101", periods=len(s))))
# 2d
result = opa(pd.DataFrame({"A": s}))
assert result is NotImplemented
msg = r"can only perform ops with 1-d structures"
with pytest.raises(NotImplementedError, match=msg):
opa(np.arange(len(s)).reshape(-1, len(s)))
@pytest.mark.parametrize("zero, negative", [(0, False), (0.0, False), (-0.0, True)])
def test_divide_by_zero(self, zero, negative):
# https://github.com/pandas-dev/pandas/issues/27398
a = pd.array([0, 1, -1, None], dtype="Int64")
result = a / zero
expected = np.array([np.nan, np.inf, -np.inf, np.nan])
if negative:
expected *= -1
tm.assert_numpy_array_equal(result, expected)
def test_pow_scalar(self):
a = pd.array([-1, 0, 1, None, 2], dtype="Int64")
result = a ** 0
expected = pd.array([1, 1, 1, 1, 1], dtype="Int64")
tm.assert_extension_array_equal(result, expected)
result = a ** 1
expected = pd.array([-1, 0, 1, None, 2], dtype="Int64")
tm.assert_extension_array_equal(result, expected)
result = a ** pd.NA
expected = pd.array([None, None, 1, None, None], dtype="Int64")
tm.assert_extension_array_equal(result, expected)
result = a ** np.nan
expected = np.array([np.nan, np.nan, 1, np.nan, np.nan], dtype="float64")
tm.assert_numpy_array_equal(result, expected)
# reversed
a = a[1:] # Can't raise integers to negative powers.
result = 0 ** a
expected = pd.array([1, 0, None, 0], dtype="Int64")
tm.assert_extension_array_equal(result, expected)
result = 1 ** a
expected = pd.array([1, 1, 1, 1], dtype="Int64")
tm.assert_extension_array_equal(result, expected)
result = pd.NA ** a
expected = pd.array([1, None, None, None], dtype="Int64")
tm.assert_extension_array_equal(result, expected)
result = np.nan ** a
expected = np.array([1, np.nan, np.nan, np.nan], dtype="float64")
tm.assert_numpy_array_equal(result, expected)
def test_pow_array(self):
a = integer_array([0, 0, 0, 1, 1, 1, None, None, None])
b = integer_array([0, 1, None, 0, 1, None, 0, 1, None])
result = a ** b
expected = integer_array([1, 0, None, 1, 1, 1, 1, None, None])
tm.assert_extension_array_equal(result, expected)
def test_rpow_one_to_na(self):
# https://github.com/pandas-dev/pandas/issues/22022
# https://github.com/pandas-dev/pandas/issues/29997
arr = integer_array([np.nan, np.nan])
result = np.array([1.0, 2.0]) ** arr
expected = np.array([1.0, np.nan])
tm.assert_numpy_array_equal(result, expected)
class TestComparisonOps(BaseOpsUtil):
def _compare_other(self, data, op_name, other):
op = self.get_op_from_name(op_name)
# array
result = pd.Series(op(data, other))
expected = pd.Series(op(data._data, other), dtype="boolean")
# fill the nan locations
expected[data._mask] = pd.NA
tm.assert_series_equal(result, expected)
# series
s = pd.Series(data)
result = op(s, other)
expected = op(pd.Series(data._data), other)
# fill the nan locations
expected[data._mask] = pd.NA
expected = expected.astype("boolean")
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("other", [True, False, pd.NA, -1, 0, 1])
def test_scalar(self, other, all_compare_operators):
op = self.get_op_from_name(all_compare_operators)
a = pd.array([1, 0, None], dtype="Int64")
result = op(a, other)
if other is pd.NA:
expected = pd.array([None, None, None], dtype="boolean")
else:
values = op(a._data, other)
expected = pd.arrays.BooleanArray(values, a._mask, copy=True)
tm.assert_extension_array_equal(result, expected)
# ensure we haven't mutated anything inplace
result[0] = pd.NA
tm.assert_extension_array_equal(a, pd.array([1, 0, None], dtype="Int64"))
def test_array(self, all_compare_operators):
op = self.get_op_from_name(all_compare_operators)
a = pd.array([0, 1, 2, None, None, None], dtype="Int64")
b = pd.array([0, 1, None, 0, 1, None], dtype="Int64")
result = op(a, b)
values = op(a._data, b._data)
mask = a._mask | b._mask
expected = pd.arrays.BooleanArray(values, mask)
tm.assert_extension_array_equal(result, expected)
# ensure we haven't mutated anything inplace
result[0] = pd.NA
tm.assert_extension_array_equal(
a, pd.array([0, 1, 2, None, None, None], dtype="Int64")
)
tm.assert_extension_array_equal(
b, pd.array([0, 1, None, 0, 1, None], dtype="Int64")
)
def test_compare_with_booleanarray(self, all_compare_operators):
op = self.get_op_from_name(all_compare_operators)
a = pd.array([True, False, None] * 3, dtype="boolean")
b = pd.array([0] * 3 + [1] * 3 + [None] * 3, dtype="Int64")
other = pd.array([False] * 3 + [True] * 3 + [None] * 3, dtype="boolean")
expected = op(a, other)
result = op(a, b)
tm.assert_extension_array_equal(result, expected)
def test_no_shared_mask(self, data):
result = data + 1
assert np.shares_memory(result._mask, data._mask) is False
def test_compare_to_string(self, any_nullable_int_dtype):
# GH 28930
s = pd.Series([1, None], dtype=any_nullable_int_dtype)
result = s == "a"
expected = pd.Series([False, pd.NA], dtype="boolean")
self.assert_series_equal(result, expected)
def test_compare_to_int(self, any_nullable_int_dtype, all_compare_operators):
# GH 28930
s1 = pd.Series([1, None, 3], dtype=any_nullable_int_dtype)
s2 = pd.Series([1, None, 3], dtype="float")
method = getattr(s1, all_compare_operators)
result = method(2)
method = getattr(s2, all_compare_operators)
expected = method(2).astype("boolean")
expected[s2.isna()] = pd.NA
self.assert_series_equal(result, expected)
class TestCasting:
@pytest.mark.parametrize("dropna", [True, False])
def test_construct_index(self, all_data, dropna):
# ensure that we do not coerce to Float64Index, rather
# keep as Index
all_data = all_data[:10]
if dropna:
other = np.array(all_data[~all_data.isna()])
else:
other = all_data
result = pd.Index(integer_array(other, dtype=all_data.dtype))
expected = pd.Index(other, dtype=object)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("dropna", [True, False])
def test_astype_index(self, all_data, dropna):
# as an int/uint index to Index
all_data = all_data[:10]
if dropna:
other = all_data[~all_data.isna()]
else:
other = all_data
dtype = all_data.dtype
idx = pd.Index(np.array(other))
assert isinstance(idx, ABCIndexClass)
result = idx.astype(dtype)
expected = idx.astype(object).astype(dtype)
tm.assert_index_equal(result, expected)
def test_astype(self, all_data):
all_data = all_data[:10]
ints = all_data[~all_data.isna()]
mixed = all_data
dtype = Int8Dtype()
# coerce to same type - ints
s = pd.Series(ints)
result = s.astype(all_data.dtype)
expected = pd.Series(ints)
tm.assert_series_equal(result, expected)
# coerce to same other - ints
s = pd.Series(ints)
result = s.astype(dtype)
expected = pd.Series(ints, dtype=dtype)
tm.assert_series_equal(result, expected)
# coerce to same numpy_dtype - ints
s = pd.Series(ints)
result = s.astype(all_data.dtype.numpy_dtype)
expected = pd.Series(ints._data.astype(all_data.dtype.numpy_dtype))
tm.assert_series_equal(result, expected)
# coerce to same type - mixed
s = pd.Series(mixed)
result = s.astype(all_data.dtype)
expected = pd.Series(mixed)
tm.assert_series_equal(result, expected)
# coerce to same other - mixed
s = pd.Series(mixed)
result = s.astype(dtype)
expected = pd.Series(mixed, dtype=dtype)
tm.assert_series_equal(result, expected)
# coerce to same numpy_dtype - mixed
s = pd.Series(mixed)
msg = r"cannot convert to .*-dtype NumPy array with missing values.*"
with pytest.raises(ValueError, match=msg):
s.astype(all_data.dtype.numpy_dtype)
# coerce to object
s = pd.Series(mixed)
result = s.astype("object")
expected = pd.Series(np.asarray(mixed))
tm.assert_series_equal(result, expected)
def test_astype_to_larger_numpy(self):
a = pd.array([1, 2], dtype="Int32")
result = a.astype("int64")
expected = np.array([1, 2], dtype="int64")
tm.assert_numpy_array_equal(result, expected)
a = pd.array([1, 2], dtype="UInt32")
result = a.astype("uint64")
expected = np.array([1, 2], dtype="uint64")
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("dtype", [Int8Dtype(), "Int8", UInt32Dtype(), "UInt32"])
def test_astype_specific_casting(self, dtype):
s = pd.Series([1, 2, 3], dtype="Int64")
result = s.astype(dtype)
expected = pd.Series([1, 2, 3], dtype=dtype)
tm.assert_series_equal(result, expected)
s = pd.Series([1, 2, 3, None], dtype="Int64")
result = s.astype(dtype)
expected = pd.Series([1, 2, 3, None], dtype=dtype)
tm.assert_series_equal(result, expected)
def test_construct_cast_invalid(self, dtype):
msg = "cannot safely"
arr = [1.2, 2.3, 3.7]
with pytest.raises(TypeError, match=msg):
integer_array(arr, dtype=dtype)
with pytest.raises(TypeError, match=msg):
pd.Series(arr).astype(dtype)
arr = [1.2, 2.3, 3.7, np.nan]
with pytest.raises(TypeError, match=msg):
integer_array(arr, dtype=dtype)
with pytest.raises(TypeError, match=msg):
pd.Series(arr).astype(dtype)
@pytest.mark.parametrize("in_series", [True, False])
def test_to_numpy_na_nan(self, in_series):
a = pd.array([0, 1, None], dtype="Int64")
if in_series:
a = pd.Series(a)
result = a.to_numpy(dtype="float64", na_value=np.nan)
expected = np.array([0.0, 1.0, np.nan], dtype="float64")
tm.assert_numpy_array_equal(result, expected)
result = a.to_numpy(dtype="int64", na_value=-1)
expected = np.array([0, 1, -1], dtype="int64")
tm.assert_numpy_array_equal(result, expected)
result = a.to_numpy(dtype="bool", na_value=False)
expected = np.array([False, True, False], dtype="bool")
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("in_series", [True, False])
@pytest.mark.parametrize("dtype", ["int32", "int64", "bool"])
def test_to_numpy_dtype(self, dtype, in_series):
a = pd.array([0, 1], dtype="Int64")
if in_series:
a = pd.Series(a)
result = a.to_numpy(dtype=dtype)
expected = np.array([0, 1], dtype=dtype)
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("dtype", ["float64", "int64", "bool"])
def test_to_numpy_na_raises(self, dtype):
a = pd.array([0, 1, None], dtype="Int64")
with pytest.raises(ValueError, match=dtype):
a.to_numpy(dtype=dtype)
def test_astype_str(self):
a = pd.array([1, 2, None], dtype="Int64")
expected = np.array(["1", "2", "<NA>"], dtype=object)
tm.assert_numpy_array_equal(a.astype(str), expected)
tm.assert_numpy_array_equal(a.astype("str"), expected)
def test_astype_boolean(self):
# https://github.com/pandas-dev/pandas/issues/31102
a = pd.array([1, 0, -1, 2, None], dtype="Int64")
result = a.astype("boolean")
expected = pd.array([True, False, True, True, None], dtype="boolean")
tm.assert_extension_array_equal(result, expected)
def test_frame_repr(data_missing):
df = pd.DataFrame({"A": data_missing})
result = repr(df)
expected = " A\n0 <NA>\n1 1"
assert result == expected
def test_conversions(data_missing):
# astype to object series
df = pd.DataFrame({"A": data_missing})
result = df["A"].astype("object")
expected = pd.Series(np.array([np.nan, 1], dtype=object), name="A")
tm.assert_series_equal(result, expected)
# convert to object ndarray
# we assert that we are exactly equal
# including type conversions of scalars
result = df["A"].astype("object").values
expected = np.array([pd.NA, 1], dtype=object)
tm.assert_numpy_array_equal(result, expected)
for r, e in zip(result, expected):
if pd.isnull(r):
assert pd.isnull(e)
elif is_integer(r):
assert r == e
assert is_integer(e)
else:
assert r == e
assert type(r) == type(e)
def test_integer_array_constructor():
values = np.array([1, 2, 3, 4], dtype="int64")
mask = np.array([False, False, False, True], dtype="bool")
result = IntegerArray(values, mask)
expected = integer_array([1, 2, 3, np.nan], dtype="int64")
tm.assert_extension_array_equal(result, expected)
msg = r".* should be .* numpy array. Use the 'integer_array' function instead"
with pytest.raises(TypeError, match=msg):
IntegerArray(values.tolist(), mask)
with pytest.raises(TypeError, match=msg):
IntegerArray(values, mask.tolist())
with pytest.raises(TypeError, match=msg):
IntegerArray(values.astype(float), mask)
msg = r"__init__\(\) missing 1 required positional argument: 'mask'"
with pytest.raises(TypeError, match=msg):
IntegerArray(values)
@pytest.mark.parametrize(
"a, b",
[
([1, None], [1, np.nan]),
([None], [np.nan]),
([None, np.nan], [np.nan, np.nan]),
([np.nan, np.nan], [np.nan, np.nan]),
],
)
def test_integer_array_constructor_none_is_nan(a, b):
result = integer_array(a)
expected = integer_array(b)
tm.assert_extension_array_equal(result, expected)
def test_integer_array_constructor_copy():
values = np.array([1, 2, 3, 4], dtype="int64")
mask = np.array([False, False, False, True], dtype="bool")
result = IntegerArray(values, mask)
assert result._data is values
assert result._mask is mask
result = IntegerArray(values, mask, copy=True)
assert result._data is not values
assert result._mask is not mask
@pytest.mark.parametrize(
"values",
[
["foo", "bar"],
["1", "2"],
"foo",
1,
1.0,
pd.date_range("20130101", periods=2),
np.array(["foo"]),
[[1, 2], [3, 4]],
[np.nan, {"a": 1}],
],
)
def test_to_integer_array_error(values):
# error in converting existing arrays to IntegerArrays
msg = (
r"(:?.* cannot be converted to an IntegerDtype)"
r"|(:?values must be a 1D list-like)"
)
with pytest.raises(TypeError, match=msg):
integer_array(values)
def test_to_integer_array_inferred_dtype():
# if values has dtype -> respect it
result = integer_array(np.array([1, 2], dtype="int8"))
assert result.dtype == Int8Dtype()
result = integer_array(np.array([1, 2], dtype="int32"))
assert result.dtype == Int32Dtype()
# if values have no dtype -> always int64
result = integer_array([1, 2])
assert result.dtype == Int64Dtype()
def test_to_integer_array_dtype_keyword():
result = integer_array([1, 2], dtype="int8")
assert result.dtype == Int8Dtype()
# if values has dtype -> override it
result = integer_array(np.array([1, 2], dtype="int8"), dtype="int32")
assert result.dtype == Int32Dtype()
def test_to_integer_array_float():
result = integer_array([1.0, 2.0])
expected = integer_array([1, 2])
tm.assert_extension_array_equal(result, expected)
with pytest.raises(TypeError, match="cannot safely cast non-equivalent"):
integer_array([1.5, 2.0])
# for float dtypes, the itemsize is not preserved
result = integer_array(np.array([1.0, 2.0], dtype="float32"))
assert result.dtype == Int64Dtype()
@pytest.mark.parametrize(
"bool_values, int_values, target_dtype, expected_dtype",
[
([False, True], [0, 1], Int64Dtype(), Int64Dtype()),
([False, True], [0, 1], "Int64", Int64Dtype()),
([False, True, np.nan], [0, 1, np.nan], Int64Dtype(), Int64Dtype()),
],
)
def test_to_integer_array_bool(bool_values, int_values, target_dtype, expected_dtype):
result = integer_array(bool_values, dtype=target_dtype)
assert result.dtype == expected_dtype
expected = integer_array(int_values, dtype=target_dtype)
tm.assert_extension_array_equal(result, expected)
@pytest.mark.parametrize(
"values, to_dtype, result_dtype",
[
(np.array([1], dtype="int64"), None, Int64Dtype),
(np.array([1, np.nan]), None, Int64Dtype),
(np.array([1, np.nan]), "int8", Int8Dtype),
],
)
def test_to_integer_array(values, to_dtype, result_dtype):
# convert existing arrays to IntegerArrays
result = integer_array(values, dtype=to_dtype)
assert result.dtype == result_dtype()
expected = integer_array(values, dtype=result_dtype())
tm.assert_extension_array_equal(result, expected)
def test_cross_type_arithmetic():
df = pd.DataFrame(
{
"A": pd.Series([1, 2, np.nan], dtype="Int64"),
"B": pd.Series([1, np.nan, 3], dtype="UInt8"),
"C": [1, 2, 3],
}
)
result = df.A + df.C
expected = pd.Series([2, 4, np.nan], dtype="Int64")
tm.assert_series_equal(result, expected)
result = (df.A + df.C) * 3 == 12
expected = pd.Series([False, True, None], dtype="boolean")
tm.assert_series_equal(result, expected)
result = df.A + df.B
expected = pd.Series([2, np.nan, np.nan], dtype="Int64")
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("op", ["sum", "min", "max", "prod"])
def test_preserve_dtypes(op):
# TODO(#22346): preserve Int64 dtype
# for ops that enable (mean would actually work here
# but generally it is a float return value)
df = pd.DataFrame(
{
"A": ["a", "b", "b"],
"B": [1, None, 3],
"C": integer_array([1, None, 3], dtype="Int64"),
}
)
# op
result = getattr(df.C, op)()
assert isinstance(result, int)
# groupby
result = getattr(df.groupby("A"), op)()
expected = pd.DataFrame(
{"B": np.array([1.0, 3.0]), "C": integer_array([1, 3], dtype="Int64")},
index=pd.Index(["a", "b"], name="A"),
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("op", ["mean"])
def test_reduce_to_float(op):
# some reduce ops always return float, even if the result
# is a rounded number
df = pd.DataFrame(
{
"A": ["a", "b", "b"],
"B": [1, None, 3],
"C": integer_array([1, None, 3], dtype="Int64"),
}
)
# op
result = getattr(df.C, op)()
assert isinstance(result, float)
# groupby
result = getattr(df.groupby("A"), op)()
expected = pd.DataFrame(
{"B": np.array([1.0, 3.0]), "C": integer_array([1, 3], dtype="Int64")},
index=pd.Index(["a", "b"], name="A"),
)
tm.assert_frame_equal(result, expected)
def test_astype_nansafe():
# see gh-22343
arr = integer_array([np.nan, 1, 2], dtype="Int8")
msg = "cannot convert to 'uint32'-dtype NumPy array with missing values."
with pytest.raises(ValueError, match=msg):
arr.astype("uint32")
@pytest.mark.parametrize("ufunc", [np.abs, np.sign])
# np.sign emits a warning with nans, <https://github.com/numpy/numpy/issues/15127>
@pytest.mark.filterwarnings("ignore:invalid value encountered in sign")
def test_ufuncs_single_int(ufunc):
a = integer_array([1, 2, -3, np.nan])
result = ufunc(a)
expected = integer_array(ufunc(a.astype(float)))
tm.assert_extension_array_equal(result, expected)
s = pd.Series(a)
result = ufunc(s)
expected = pd.Series(integer_array(ufunc(a.astype(float))))
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("ufunc", [np.log, np.exp, np.sin, np.cos, np.sqrt])
def test_ufuncs_single_float(ufunc):
a = integer_array([1, 2, -3, np.nan])
with np.errstate(invalid="ignore"):
result = ufunc(a)
expected = ufunc(a.astype(float))
tm.assert_numpy_array_equal(result, expected)
s = pd.Series(a)
with np.errstate(invalid="ignore"):
result = ufunc(s)
expected = ufunc(s.astype(float))
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("ufunc", [np.add, np.subtract])
def test_ufuncs_binary_int(ufunc):
# two IntegerArrays
a = integer_array([1, 2, -3, np.nan])
result = ufunc(a, a)
expected = integer_array(ufunc(a.astype(float), a.astype(float)))
tm.assert_extension_array_equal(result, expected)
# IntegerArray with numpy array
arr = np.array([1, 2, 3, 4])
result = ufunc(a, arr)
expected = integer_array(ufunc(a.astype(float), arr))
tm.assert_extension_array_equal(result, expected)
result = ufunc(arr, a)
expected = integer_array(ufunc(arr, a.astype(float)))
tm.assert_extension_array_equal(result, expected)
# IntegerArray with scalar
result = ufunc(a, 1)
expected = integer_array(ufunc(a.astype(float), 1))
tm.assert_extension_array_equal(result, expected)
result = ufunc(1, a)
expected = integer_array(ufunc(1, a.astype(float)))
tm.assert_extension_array_equal(result, expected)
@pytest.mark.parametrize("values", [[0, 1], [0, None]])
def test_ufunc_reduce_raises(values):
a = integer_array(values)
msg = r"The 'reduce' method is not supported."
with pytest.raises(NotImplementedError, match=msg):
np.add.reduce(a)
@td.skip_if_no("pyarrow", min_version="0.15.0")
def test_arrow_array(data):
# protocol added in 0.15.0
import pyarrow as pa
arr = pa.array(data)
expected = np.array(data, dtype=object)
expected[data.isna()] = None
expected = pa.array(expected, type=data.dtype.name.lower(), from_pandas=True)
assert arr.equals(expected)
@td.skip_if_no("pyarrow", min_version="0.16.0")
def test_arrow_roundtrip(data):
# roundtrip possible from arrow 0.16.0
import pyarrow as pa
df = | pd.DataFrame({"a": data}) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 6 22:15:42 2018
@author: katezeng
This module is for Predictive Analysis - Hypothesis Testing
- This component contains both the traditional statistical hypothesis testing, and the beginning of machine learning predictive analytics.
Here you will write three (3) hypotheses and see whether or not they are supported by your data. You must use all of the methods listed below
(at least once) on your data.
- You do not need to try all the methods for each hypothesis. For example, you might use ANOVA for one of your hypotheses, and you might use a
t-test and linear regression for another, etc. It will be the case, that some of the hypotheses will not be well supported.
- When trying methods like a decision tree, you should use cross-validation and show your ROC curve and a confusion matrix. For each method,
explain the method in one paragraph.
- Explain how and why you will apply your selected method(s) to each hypothesis, and discuss the results.
- Therefore, you will have at least three (3) hypothesis tests and will apply all seven (7) of the following methods to one or more of your
hypotheses.
- Required methods:
- t-test or Anova (choose one)
- Linear Regression or Logistical Regression (multivariate or multinomial) (choose one)
- Decision tree
- A Lazy Learner Method (such as kNN)
- Naïve Bayes
- SVM
- Random Forest
"""
#####################################################
# #
# Import Libraries #
# #
#####################################################
import pandas as pd
import numpy as np
from scipy import stats
from sklearn.linear_model import LinearRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, roc_curve, auc, confusion_matrix, classification_report
from sklearn import svm
from sklearn.preprocessing import Normalizer
from sklearn.model_selection import cross_val_predict
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import label_binarize
from imblearn.over_sampling import SMOTE
from sklearn.tree import DecisionTreeClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.neighbors import KNeighborsClassifier
import matplotlib.pyplot as plt
import seaborn as sns
sns.set(color_codes=True)
########################################################
# #
# List of Functions #
# #
########################################################
# function for arranging columns
def arrangeCol(data):
cols = list(data)
cols.insert(len(cols), cols.pop(cols.index('price')))
data = data.loc[:, cols]
return data
# function for linear regression with absolute error plot
def linearRegression1(data):
X = data[['hotel_meanprice']]
y = data[['price']]
X_train, X_test , y_train , y_test = train_test_split(X,y,test_size=0.25,random_state=0)
model = LinearRegression()
model.fit(X_train, y_train)
predictions = model.predict(X_test)
plt.figure(figsize=(15,8))
ax = sns.distplot(y_test-predictions)
ax.set(ylabel='Density', xlabel='Error',
title='Error distribution of test sets by Linear Regrssion model')
plt.savefig("./plots/LRresults.png")
# function for linear regression with absolute error vs actual value
def linearRegression2(data):
X = data[['hotel_meanprice']]
y = data[['price']]
X_train, X_test , y_train , y_test = train_test_split(X,y,test_size=0.25,random_state=0)
model = LinearRegression()
model.fit(X_train, y_train)
predictions = model.predict(X_test)
plt.figure(figsize=(15,8))
ax = sns.distplot(abs(y_test-predictions)/y_test)
ax.set(ylabel='Percentage', xlabel='Mean Squared Error',
title='Error distribution of test sets by Linear Regrssion model')
plt.savefig("./plots/LR_absolute_diff.png")
# find relationship between hotel average price and airbnb average price
def hotel_airbnb(data):
output1 = data.groupby(['zipcode'])['price'].mean().reset_index()
output1.columns = ['zipcode', 'averagePrice']
output2 = data.groupby(['zipcode'])['hotel_meanprice'].mean().reset_index()
output = | pd.merge(output1, output2, on='zipcode') | pandas.merge |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:light
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.7.1
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn import datasets
import statsmodels.api as sm
import seaborn as sns
from sklearn.cluster import KMeans
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn import metrics
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score, roc_auc_score, roc_curve
from sklearn import tree
from sklearn.model_selection import train_test_split
from sklearn.decomposition import PCA
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import mean_squared_error
from sklearn.ensemble import BaggingClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import GridSearchCV
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn import svm
data1 = pd.read_csv('[Track1_데이터3] samp_cst_feat.csv',encoding = 'euc-kr')
data2 = pd.read_csv('[Track1_데이터2] samp_train.csv',encoding = 'euc-kr')
data1["MRC_ID_DI"] = data2["MRC_ID_DI"]
data1["MRC_ID_DI"] = data2["MRC_ID_DI"]
categories = ['VAR007','VAR015','VAR018','VAR026','VAR059',
'VAR066','VAR067','VAR070','VAR077','VAR078',
'VAR094','VAR096','VAR097','VAR098','VAR107',
'VAR111','VAR124','VAR127','VAR143','VAR144',
'VAR145','VAR148','VAR165','VAR177','VAR179',
'VAR199','VAR208',"MRC_ID_DI"]
data1[categories] = data1[categories].astype("int64")
data1.groupby(["MRC_ID_DI"]).size()
# #### 온라인 마켓 사용, 미사용으로 분류
data1["MRC_ID_DI"] = data1["MRC_ID_DI"].replace(range(1,11),1)
data1 = data1.drop(['cst_id_di'],axis = 1)
samsung = sm.add_constant(data1, has_constant = 'add')
samsung.head()
feature_columns = list(samsung.columns.difference(["MRC_ID_DI"]))
X = samsung[feature_columns]
y = samsung["MRC_ID_DI"]
print(y)
x_train, x_test, y_train, y_test = train_test_split(X, y,
train_size = 0.7, test_size = 0.3,
random_state = 100) #set_seed
print("x_train.shape = {}, x_test.shape = {}, y_train.shape = {}, y_test.shape = {}".format(x_train.shape, x_test.shape,
y_train.shape, y_test.shape))
model = sm.Logit(y_train, x_train)
results = model.fit(method = "newton")
results.summary()
results.params
np.exp(results.params)
results.aic
y_pred = results.predict(x_test)
y_pred
# +
def PRED(y, threshold):
Y = y.copy()
Y[Y > threshold] = 1
Y[Y <= threshold] = 0
return(Y.astype(int))
Y_pred = PRED(y_pred,0.5)
Y_pred
# -
# ### 오분류표
cfmat = confusion_matrix(y_test, Y_pred)
def acc(cfmat) :
acc = round((cfmat[0,0]+cfmat[1,1])/np.sum(cfmat),3)
return(acc)
acc(cfmat) # accuracy == 0.863
pca = PCA(n_components = 10)
pca.fit(X)
PCscore = pca.transform(X)
PCscore[:,0:5]
eigens_vector = pca.components_.transpose()
eigens_vector
# +
mX = np.matrix(X)
(mX * eigens_vector)[:, 0:5]
# -
print(PCscore)
plt.scatter(PCscore[:, 0], PCscore[:, 1], c = y)
print(PCscore[:,0])
plt.show()
# +
distortions = []
for i in range(1, 11) :
km = KMeans(n_clusters = i, random_state = 102)
km.fit(X)
distortions.append(km.inertia_)
plt.plot(range(1, 11), distortions, marker = 'o')
plt.xlabel("# of clusters")
plt.ylabel("Distortion")
plt.show()
# +
lr_clf = LogisticRegression(max_iter = 10000)
lr_clf.fit(x_train, y_train)
pred_lr = lr_clf.predict(x_test)
print(accuracy_score(y_test, pred_lr))
print(mean_squared_error(y_test, pred_lr))
# -
bag_clf = BaggingClassifier(base_estimator = lr_clf,
n_estimators = 5,
verbose = 1)
lr_clf_bag = bag_clf.fit(x_train, y_train)
pred_lr_bag = lr_clf_bag.predict(x_test)
pred_lr_bag
print(accuracy_score(y_test, pred_lr_bag))
print(mean_squared_error(y_test, pred_lr_bag))
# +
from sklearn.tree import DecisionTreeClassifier
dt_clf = DecisionTreeClassifier()
dt_clf.fit(x_train, y_train)
pred_dt = dt_clf.predict(x_test)
print(accuracy_score(y_test, pred_dt))
print(mean_squared_error(y_test, pred_dt))
# -
rf_clf = RandomForestClassifier(n_estimators = 5,
max_depth = 3,
random_state = 103,
verbose = 1)
rf_clf.fit(x_train, y_train)
pred = rf_clf.predict(x_test)
print(accuracy_score(y_test, pred))
rf_clf = RandomForestClassifier(n_estimators = 500,
max_depth = 3,
random_state = 103,
verbose = 1)
rf_clf.fit(x_train, y_train)
pred = rf_clf.predict(x_test)
print(accuracy_score(y_test, pred))
rf_clf = RandomForestClassifier(n_estimators = 500,
max_depth = 10,
random_state = 103,
verbose = 1)
rf_clf.fit(x_train, y_train)
pred = rf_clf.predict(x_test)
print(accuracy_score(y_test, pred))
rf_clf4 = RandomForestClassifier()
# +
params = { 'n_estimators' : [10, 100, 500, 1000],
'max_depth' : [3, 5, 10, 15]}
rf_clf4 = RandomForestClassifier(random_state = 103,
n_jobs = -1,
verbose = 1)
grid_cv = GridSearchCV(rf_clf4,
param_grid = params,
n_jobs = -1,
verbose = 1)
grid_cv.fit(x_train, y_train)
print('최적 하이퍼 파라미터: ', grid_cv.best_params_)
print('최고 예측 정확도: {:.4f}'.format(grid_cv.best_score_))
# +
test_acc = []
for n in range(1, 11):
clf = KNeighborsClassifier(n_neighbors = n)
clf.fit(x_train, y_train)
y_pred = clf.predict(x_test)
test_acc.append(accuracy_score(y_test, y_pred))
print("k : {}, 정확도 : {}".format(n, accuracy_score(y_test, y_pred)))
# -
test_acc
plt.figure()
plt.plot(range(1, 11), test_acc, label = 'test')
plt.xlabel("n_neighbors")
plt.ylabel("accuracy")
plt.xticks(np.arange(0, 11, step = 1))
plt.legend()
plt.show()
# +
clf_lin = svm.LinearSVC()
clf_lin.fit(x_train, y_train)
y_pred_lin = clf_lin.predict(x_test)
print(confusion_matrix(y_test, y_pred_lin))
print(accuracy_score(y_test, y_pred_lin))
# -
# #### 0(미사용), 1,6,8 Group shaping
group0, group1 = data1[data1["MRC_ID_DI"]==0], data1[data1["MRC_ID_DI"]==1]
group6, group8 = data1[data1["MRC_ID_DI"]==6], data1[data1["MRC_ID_DI"]==8]
print("group0.shape = {}, group1.shape = {}, group6.shape = {}, group8.shape = {}".format(group0.shape, group1.shape,
group6.shape, group8.shape))
group0, group1, group6, group8 = | pd.get_dummies(group0) | pandas.get_dummies |
###############################################################################################################
#################### Modelo de predicción de terremotos con Machine Learning y red neuronal ###################
###############################################################################################################
# Python 3.8.7
# Librerías requeridas: Numpy 1.19.5, Pandas 1.2.0, Sklearn 0.0, Keras 2.4.3, Tensorflow 2.4.0, Datetime 4.3
# Dashboard en Power BI con información de Terremotos
# https://app.powerbi.com/view?r=eyJrIjoiMDQzMTI5MWItMzAyZi00MzRkLTkxMDEtYjUwMzRjZmEyODY3IiwidCI6IjNhY2M3NWRiLTNhOTQtNDFmOS04N2M3LWIwNjE3MGRlZjEwYiJ9&pageName=ReportSectione26908533edc15cb8d45
# Referencias
# Earthquakes magnitude predication using artificial neural network in northern Red Sea area
# https://www.sciencedirect.com/science/article/pii/S1018364711000280
# Fuente de datos de terremotos:
# De 1900 a 1969 | Tableau Resources - https://public.tableau.com/en-us/s/resources
# De 1970 a 2019 | Incorporated Research Institutions for Seismology (IRIS) - https://www.iris.edu/hq/
# Última fecha de actualización: 21/01/2021
# Github: https://github.com/digiteos/earthquakes
##############################################################################################################
# 1) Importando librerías de Python (Numpy y Pandas)
import numpy as np
import pandas as pd
# 2) Cargando y leyendo el set de datos históricos (el archivo csv tiene ; como separador)
data = pd.read_csv("EarthQuakes-Data-1970-2019.csv", sep=';')
data.columns
# 3) Visualizando encabezado de tabla, cantidad de registros y tipos de datos
print(data.head())
print(data.shape)
print(data.dtypes)
# 4) Convirtiendo la fecha/hora a formato Unix (solo admite datos a partir de 1970) para que pueda ser procesada por la red neuronal y visualizando el nuevo dataset
import datetime
import time
timestamp = []
for d, t in zip(data['Date'], data['Time']):
try:
ts = datetime.datetime.strptime(d+' '+t, '%d/%m/%Y %H:%M:%S')
timestamp.append(time.mktime(ts.timetuple()))
except ValueError:
# print('ValueError')
timestamp.append('ValueError')
timeStamp = | pd.Series(timestamp) | pandas.Series |
"""
Interface for the self-driving car sensors dataset.
Description of the data:
"times" : seconds from start
"fiber_accel" : m/s**2
"fiber_compass" : this is a concatenation of x_y_z below
"fiber_compass_x" : magnetic north (we are not sure actually ;))
"fiber_compass_y" : orthogonal to magnetic north in car plane
"fiber_compass_z" : orthogonal to x and y
"fiber_gyro" : deg/s - roll, pitch, yaw in car-centric frame
"gps_1_pos" : ECEF coordinates
"gps_1_vel" : ECEF velocity
"gps_2_pos" : ECEF coordinates (another GPS sensor)
"gps_2_vel" : ECEF velocity (another GPS sensor)
"imu_compass" : same as compass above
"imu_gyro" : deg/s - roll, pitch, yaw in car-centric frame
"speed" : m/s
"speed_abs" : m/s
"steering_angle" : deg/m
"velodyne_gps" : ECEF
"velodyne_imu" : deg/s
"left_lanes" : 4 + 4 coefficients of the cubic polynomials (x and y)
"right_lanes" : 4 + 4 coefficients of the cubic polynomials (x and y)
"radar_leads" : x-y (front-left) coordinates of a detected vehicle
About the self-driving car:
http://www.bloomberg.com/features/2015-george-hotz-self-driving-car/
"""
import os
import sys
import h5py
import numpy as np
import pandas as pd
def load_data(start=0., stop=100., t_step=1, set_name='full',
ins=['gps_1_vel', 'fiber_gyro'], outs=['speed'],
gps_to_enu=False,
pts_per_lane=7,
verbose=1):
"""Load the car sensors data.
Arguments:
----------
t_step : uint
Take data points t_step apart from each other in time.
start : float in [0., 100.)
stop : float in (0., 100.]
ins : list of str
Names of the fields to use as inputs.
outs : list of str
Names of the fields to use as outputs.
verbose : uint (default: 1)
"""
if 'DATA_PATH' not in os.environ:
raise Exception("Cannot find DATA_PATH variable in the environment. "
"DATA_PATH should be the folder that contains "
"`self-driving/` directory with car sensors data. "
"Please export DATA_PATH before loading the data.")
datadir = os.path.join(os.environ['DATA_PATH'], 'self-driving')
dataset_name = 'car_sensors{}.h5'.format(
'_{}'.format(set_name) if set_name != 'full' else '')
dataset_path = os.path.join(datadir, dataset_name)
if not os.path.exists(dataset_path):
raise Exception("Cannot find data: %s" % dataset_path)
if verbose:
print('Loading data from %s...' % os.path.basename(dataset_path))
sys.stdout.flush()
f = h5py.File(dataset_path, 'r')
# Compute the data slice
N = len(f['times'][:])
start_t = int((start/100.) * N)
stop_t = int((stop/100.) * N)
idx = slice(start_t, stop_t, t_step)
# Read & preprocess inputs
input_nnz, input_vars = None, []
for name in ins:
X = f[name][idx]
if gps_to_enu and name.startswith('gps'):
assert X.shape[1] == 3
x0, y0, z0 = X[0, 0], X[0, 1], X[0, 2]
x, y, z = X[:, 0], X[:, 1], X[:, 2]
X = ecef2enu(x, y, z, x0, y0, z0)
if name.endswith('lanes'):
if verbose:
print('...constructing %s' % name)
sys.stdout.flush()
X, nnz = construct_lanes(X, pts_per_lane)
input_nnz = nnz if input_nnz is None \
else np.logical_and(input_nnz, nnz)
if name == 'radar_leads':
X[X[:, 0] < 0] = np.nan
X_df = pd.DataFrame(X).fillna(method='ffill')
X = X_df.values
# X[X[:, 0] < 0., 0], X[X[:, 1] < 0., 1] = 200., 0.
if len(X.shape) == 1:
input_vars.append(X[:, None])
else:
assert len(X.shape) == 2
input_vars.append(X)
# Read & preprocess targets
target_nnz, target_vars = None, []
for name in outs:
Y = f[name][idx]
if gps_to_enu and name.startswith('gps'):
assert Y.shape[1] == 3
x0, y0, z0 = Y[0, :]
x, y, z = Y[:, 0], Y[:, 1], Y[:, 2]
Y = ecef2enu(x, y, z, x0, y0, z0)
if name.endswith('lanes'):
if verbose:
print('...constructing %s' % name)
sys.stdout.flush()
Y, nnz = construct_lanes(Y, pts_per_lane)
target_nnz = nnz if target_nnz is None \
else np.logical_and(target_nnz, nnz)
if name == 'radar_leads':
Y[Y[:, 0] < 0] = np.nan
Y_df = | pd.DataFrame(Y) | pandas.DataFrame |
from __future__ import print_function
# this is a class to deal with aqs data
from builtins import zip
from builtins import range
from builtins import object
import os
from datetime import datetime
from zipfile import ZipFile
import pandas as pd
from numpy import array, arange
import inspect
import requests
class AQS(object):
def __init__(self):
# self.baseurl = 'https://aqs.epa.gov/aqsweb/airdata/'
self.objtype = 'AQS'
self.daily = False
self.baseurl = 'https://aqsdr1.epa.gov/aqsweb/aqstmp/airdata/'
self.dates = [datetime.strptime('2014-06-06 12:00:00', '%Y-%m-%d %H:%M:%S'),
datetime.strptime('2014-06-06 13:00:00', '%Y-%m-%d %H:%M:%S')]
self.renamedhcols = ['datetime_local', 'datetime', 'State_Code', 'County_Code',
'Site_Num', 'Parameter_Code', 'POC', 'Latitude', 'Longitude',
'Datum', 'Parameter_Name', 'Obs', 'Units',
'MDL', 'Uncertainty', 'Qualifier', 'Method_type', 'Method_Code',
'Method_Name', 'State_Name', 'County_Name', 'Date_of_Last_Change']
self.renameddcols = ['datetime_local', 'State_Code', 'County_Code', 'Site_Num',
'Parameter_Code', 'POC', 'Latitude', 'Longitude', 'Datum',
'Parameter_Name', 'Sample_Duration', 'Pollutant_Standard',
'Units', 'Event_Type', 'Observation_Count',
'Observation_Percent', 'Obs', '1st_Max_Value',
'1st_Max Hour', 'AQI', 'Method_Code', 'Method_Name',
'Local_Site_Name', 'Address', 'State_Name', 'County_Name',
'City_Name', 'MSA_Name', 'Date_of_Last_Change']
self.savecols = ['datetime_local', 'datetime', 'SCS',
'Latitude', 'Longitude', 'Obs', 'Units', 'Species']
self.se_states = array(
['Alabama', 'Florida', 'Georgia', 'Mississippi', 'North Carolina', 'South Carolina', 'Tennessee',
'Virginia', 'West Virginia'], dtype='|S14')
self.se_states_abv = array(
['AL', 'FL', 'GA', 'MS', 'NC', 'SC', 'TN',
'VA', 'WV'], dtype='|S14')
self.ne_states = array(['Connecticut', 'Delaware', 'District Of Columbia', 'Maine', 'Maryland', 'Massachusetts',
'New Hampshire', 'New Jersey', 'New York', 'Pennsylvania', 'Rhode Island', 'Vermont'],
dtype='|S20')
self.ne_states_abv = array(['CT', 'DE', 'DC', 'ME', 'MD', 'MA', 'NH', 'NJ', 'NY', 'PA', 'RI', 'VT'],
dtype='|S20')
self.nc_states = array(
['Illinois', 'Indiana', 'Iowa', 'Kentucky', 'Michigan',
'Minnesota', 'Missouri', 'Ohio', 'Wisconsin'],
dtype='|S9')
self.nc_states_abv = array(['IL', 'IN', 'IA', 'KY', 'MI', 'MN', 'MO', 'OH', 'WI'],
dtype='|S9')
self.sc_states = array(
['Arkansas', 'Louisiana', 'Oklahoma', 'Texas'], dtype='|S9')
self.sc_states_abv = array(['AR', 'LA', 'OK', 'TX'], dtype='|S9')
self.r_states = array(['Arizona', 'Colorado', 'Idaho', 'Kansas', 'Montana', 'Nebraska', 'Nevada', 'New Mexico',
'North Dakota', 'South Dakota', 'Utah', 'Wyoming'], dtype='|S12')
self.r_states_abv = array(['AZ', 'CO', 'ID', 'KS', 'MT', 'NE', 'NV', 'NM', 'ND', 'SD', 'UT', 'WY'],
dtype='|S12')
self.p_states = array(
['California', 'Oregon', 'Washington'], dtype='|S10')
self.p_states_abv = array(['CA', 'OR', 'WA'], dtype='|S10')
self.datadir = '.'
self.cwd = os.getcwd()
self.df = None # hourly dataframe
self.monitor_file = inspect.getfile(
self.__class__)[:-13] + '/data/monitoring_site_locations.dat'
self.monitor_df = None
self.d_df = None # daily dataframe
def check_file_size(self, url):
test = requests.head(url).headers
if int(test['Content-Length']) > 1000:
return True
else:
return False
def retrieve_aqs_hourly_pm25_data(self, dates):
import wget
i = dates[0]
year = i.strftime('%Y')
url1 = self.baseurl + 'hourly_88101_' + year + '.zip'
if self.check_file_size(url1):
print('Downloading Hourly PM25 FRM: ' + url1)
filename = wget.download(url1)
print('')
print('Unpacking: ' + url1)
dffrm = pd.read_csv(filename, parse_dates={'datetime': ['Date GMT', 'Time GMT'],
'datetime_local': ["Date Local", "Time Local"]},
infer_datetime_format=True)
dffrm.columns = self.renamedhcols
dffrm['SCS'] = array(
dffrm['State_Code'].values * 1.E7 +
dffrm['County_Code'].values * 1.E4 + dffrm['Site_Num'].values,
dtype='int32')
else:
dffrm = pd.DataFrame(columns=self.renamedhcols)
url2 = self.baseurl + 'hourly_88502_' + year + '.zip'
if self.check_file_size(url2):
print('Downloading Hourly PM25 NON-FRM: ' + url2)
filename = wget.download(url2)
print('')
print('Unpacking: ' + url2)
dfnfrm = pd.read_csv(filename, parse_dates={'datetime': ['Date GMT', 'Time GMT'],
'datetime_local': ["Date Local", "Time Local"]},
infer_datetime_format=True)
dfnfrm.columns = self.renamedhcols
dfnfrm['SCS'] = array(
dfnfrm['State_Code'].values * 1.E7 +
dfnfrm['County_Code'].values *
1.E4 + dfnfrm['Site_Num'].values,
dtype='int32')
else:
dfnfrm = pd.DataFrame(columns=self.renamedhcols)
if self.check_file_size(url1) | self.check_file_size(url2):
df = pd.concat([dfnfrm, dffrm], ignore_index=True)
df.loc[:, 'State_Code'] = pd.to_numeric(
df.State_Code, errors='coerce')
df.loc[:, 'Site_Num'] = pd.to_numeric(df.Site_Num, errors='coerce')
df.loc[:, 'County_Code'] = pd.to_numeric(
df.County_Code, errors='coerce')
df['SCS'] = array(df['State_Code'].values * 1.E7 + df['County_Code'].values * 1.E4 + df['Site_Num'].values,
dtype='int32')
# df.drop('Qualifier', axis=1, inplace=True)
df = self.get_species(df)
# df = self.get_region(df)
df = df.copy()[self.savecols]
df = self.add_metro_metadata2(df)
df['Species'] = 'PM2.5'
print('Saving file to: ' + self.datadir + '/' + \
'AQS_HOURLY_PM_25_88101_' + year + '.hdf')
df.to_hdf('AQS_HOURLY_PM_25_88101_' +
year + '.hdf', 'df', format='table')
else:
df = pd.DataFrame(columns=self.renamedhcols)
return df
def retrieve_aqs_hourly_ozone_data(self, dates):
import wget
i = dates[0]
year = i.strftime('%Y')
url = self.baseurl + 'hourly_44201_' + year + '.zip'
print('Downloading Hourly Ozone: ' + url)
filename = wget.download(url)
print('')
print('Unpacking: ' + url)
df = pd.read_csv(filename, parse_dates={'datetime': ['Date GMT', 'Time GMT'],
'datetime_local': ["Date Local", "Time Local"]},
infer_datetime_format=True)
df.columns = self.renamedhcols
df.loc[:, 'State_Code'] = pd.to_numeric(df.State_Code, errors='coerce')
df.loc[:, 'Site_Num'] = pd.to_numeric(df.Site_Num, errors='coerce')
df.loc[:, 'County_Code'] = pd.to_numeric(
df.County_Code, errors='coerce')
df['SCS'] = array(df['State_Code'].values * 1.E7 + df['County_Code'].values * 1.E4 + df['Site_Num'].values,
dtype='int32')
df.drop('Qualifier', axis=1, inplace=True)
df = self.get_species(df)
df = df.copy()[self.savecols]
df = self.add_metro_metadata2(df)
print('Saving file to: ' + self.datadir + '/' + \
'AQS_HOURLY_OZONE_44201_' + year + '.hdf')
df.to_hdf('AQS_HOURLY_OZONE_44201_' +
year + '.hdf', 'df', format='table')
return df
def retrieve_aqs_hourly_pm10_data(self, dates):
import wget
i = dates[0]
year = i.strftime('%Y')
url = self.baseurl + 'hourly_81102_' + year + '.zip'
print('Downloading Hourly PM10: ' + url)
filename = wget.download(url)
print('')
print('Unpacking: ' + url)
df = pd.read_csv(filename, parse_dates={'datetime': ['Date GMT', 'Time GMT'],
'datetime_local': ["Date Local", "Time Local"]},
infer_datetime_format=True)
df.columns = self.renamedhcols
df.loc[:, 'State_Code'] = pd.to_numeric(df.State_Code, errors='coerce')
df.loc[:, 'Site_Num'] = pd.to_numeric(df.Site_Num, errors='coerce')
df.loc[:, 'County_Code'] = pd.to_numeric(
df.County_Code, errors='coerce')
df['SCS'] = array(df['State_Code'].values * 1.E7 + df['County_Code'].values * 1.E4 + df['Site_Num'].values,
dtype='int32')
df.drop('Qualifier', axis=1, inplace=True)
df = self.get_species(df)
# df = self.get_region(df)
df = df.copy()[self.savecols]
df = self.add_metro_metadata2(df)
print('Saving file to: ' + self.datadir + '/' + \
'AQS_HOURLY_PM_10_81102_' + year + '.hdf')
df.to_hdf('AQS_HOURLY_PM_10_81102_' +
year + '.hdf', 'df', format='table')
return df
def retrieve_aqs_hourly_so2_data(self, dates):
import wget
i = dates[0]
year = i.strftime('%Y')
url = self.baseurl + 'hourly_42401_' + year + '.zip'
print('Downloading Hourly SO2: ' + url)
filename = wget.download(url)
print('')
print('Unpacking: ' + url)
df = pd.read_csv(filename, parse_dates={'datetime': ['Date GMT', 'Time GMT'],
'datetime_local': ["Date Local", "Time Local"]},
infer_datetime_format=True)
df.columns = self.renamedhcols
df.loc[:, 'State_Code'] = pd.to_numeric(df.State_Code, errors='coerce')
df.loc[:, 'Site_Num'] = pd.to_numeric(df.Site_Num, errors='coerce')
df.loc[:, 'County_Code'] = pd.to_numeric(
df.County_Code, errors='coerce')
df['SCS'] = array(df['State_Code'].values * 1.E7 + df['County_Code'].values * 1.E4 + df['Site_Num'].values,
dtype='int32')
df.drop('Qualifier', axis=1, inplace=True)
df = self.get_species(df)
# df = self.get_region(df)
df = df.copy()[self.savecols]
df = self.add_metro_metadata2(df)
print('Saving file to: ' + self.datadir + '/' + \
'AQS_HOURLY_SO2_42401_' + year + '.hdf')
df.to_hdf('AQS_HOURLY_SO2_42401_' + year +
'.hdf', 'df', format='table')
return df
def retrieve_aqs_hourly_no2_data(self, dates):
import wget
i = dates[0]
year = i.strftime('%Y')
url = self.baseurl + 'hourly_42602_' + year + '.zip'
print('Downloading Hourly NO2: ' + url)
filename = wget.download(url)
print('')
print('Unpacking: ' + url)
df = pd.read_csv(filename, parse_dates={'datetime': ['Date GMT', 'Time GMT'],
'datetime_local': ["Date Local", "Time Local"]},
infer_datetime_format=True)
df.columns = self.renamedhcols
df.loc[:, 'State_Code'] = pd.to_numeric(df.State_Code, errors='coerce')
df.loc[:, 'Site_Num'] = pd.to_numeric(df.Site_Num, errors='coerce')
df.loc[:, 'County_Code'] = pd.to_numeric(
df.County_Code, errors='coerce')
df['SCS'] = array(df['State_Code'].values * 1.E7 + df['County_Code'].values * 1.E4 + df['Site_Num'].values,
dtype='int32')
df.drop('Qualifier', axis=1, inplace=True)
df = self.get_species(df)
# df = self.get_region(df)
df = df.copy()[self.savecols]
df = self.add_metro_metadata2(df)
print('Saving file to: ' + self.datadir + '/' + \
'AQS_HOURLY_NO2_42602_' + year + '.hdf')
df.to_hdf('AQS_HOURLY_NO2_42602_' + year +
'.hdf', 'df', format='table')
return df
def retrieve_aqs_hourly_co_data(self, dates):
import wget
i = dates[0]
year = i.strftime('%Y')
url = self.baseurl + 'hourly_42101_' + year + '.zip'
print('Downloading Hourly CO: ' + url)
filename = wget.download(url)
print('')
print('Unpacking: ' + url)
df = pd.read_csv(filename, parse_dates={'datetime': ['Date GMT', 'Time GMT'],
'datetime_local': ["Date Local", "Time Local"]},
infer_datetime_format=True)
df.columns = self.renamedhcols
df.loc[:, 'State_Code'] = pd.to_numeric(df.State_Code, errors='coerce')
df.loc[:, 'Site_Num'] = pd.to_numeric(df.Site_Num, errors='coerce')
df.loc[:, 'County_Code'] = pd.to_numeric(
df.County_Code, errors='coerce')
df['SCS'] = array(df['State_Code'].values * 1.E7 + df['County_Code'].values * 1.E4 + df['Site_Num'].values,
dtype='int32')
df.drop('Qualifier', axis=1, inplace=True)
df = self.get_species(df)
# df = self.get_region(df)
df = df.copy()[self.savecols]
df = self.add_metro_metadata2(df)
print('Saving file to: ' + self.datadir + \
'/' + 'AQS_HOURLY_CO_42101_' + year + '.hdf')
df.to_hdf('AQS_HOURLY_CO_42101_' + year + '.hdf', 'df', format='table')
return df
def retrieve_aqs_hourly_nonoxnoy_data(self, dates):
import wget
i = dates[0]
year = i.strftime('%Y')
url = self.baseurl + 'hourly_NONOxNOy_' + year + '.zip'
print('Downloading Hourly NO NOx NOy: ' + url)
filename = wget.download(url)
print('')
print('Unpacking: ' + url)
df = pd.read_csv(filename, parse_dates={'datetime': ['Date GMT', 'Time GMT'],
'datetime_local': ["Date Local", "Time Local"]},
infer_datetime_format=True)
df.columns = self.renamedhcols
df.loc[:, 'State_Code'] = pd.to_numeric(df.State_Code, errors='coerce')
df.loc[:, 'Site_Num'] = pd.to_numeric(df.Site_Num, errors='coerce')
df.loc[:, 'County_Code'] = pd.to_numeric(
df.County_Code, errors='coerce')
df['SCS'] = array(df['State_Code'].values * 1.E7 + df['County_Code'].values * 1.E4 + df['Site_Num'].values,
dtype='int32')
df.drop('Qualifier', axis=1, inplace=True)
df = self.get_species(df)
# df = self.get_region(df)
df = df.copy()[self.savecols]
df = self.add_metro_metadata2(df)
print('Saving file to: ' + self.datadir + \
'/' + 'AQS_HOURLY_NONOXNOY_' + year + '.hdf')
df.to_hdf('AQS_HOURLY_NONOXNOY_' + year + '.hdf', 'df', format='table')
return df
def retrieve_aqs_hourly_voc_data(self, dates):
import wget
i = dates[0]
year = i.strftime('%Y')
url = self.baseurl + 'hourly_VOCS_' + year + '.zip'
print('Downloading Hourly VOCs: ' + url)
filename = wget.download(url)
print('')
print('Unpacking: ' + url)
df = pd.read_csv(filename, parse_dates={'datetime': ['Date GMT', 'Time GMT'],
'datetime_local': ["Date Local", "Time Local"]},
infer_datetime_format=True)
df.columns = self.renamedhcols
df.loc[:, 'State_Code'] = pd.to_numeric(df.State_Code, errors='coerce')
df.loc[:, 'Site_Num'] = pd.to_numeric(df.Site_Num, errors='coerce')
df.loc[:, 'County_Code'] = pd.to_numeric(
df.County_Code, errors='coerce')
df['SCS'] = array(df['State_Code'].values * 1.E7 + df['County_Code'].values * 1.E4 + df['Site_Num'].values,
dtype='int32')
df.drop('Qualifier', axis=1, inplace=True)
df = self.get_species(df, voc=True)
# df = self.get_region(df)
df = df.copy()[self.savecols]
df = self.add_metro_metadata2(df)
print('Saving file to: ' + self.datadir + \
'/' + 'AQS_HOURLY_VOC_' + year + '.hdf')
df.to_hdf('AQS_HOURLY_VOC_' + year + '.hdf', 'df', format='table')
return df
def retrieve_aqs_hourly_spec_data(self, dates):
import wget
i = dates[0]
year = i.strftime('%Y')
url = self.baseurl + 'hourly_SPEC_' + year + '.zip'
if self.check_file_size(url):
print('Downloading PM Speciation: ' + url)
filename = wget.download(url)
print('')
print('Unpacking: ' + url)
df = pd.read_csv(filename, parse_dates={'datetime': ['Date GMT', 'Time GMT'],
'datetime_local': ["Date Local", "Time Local"]},
infer_datetime_format=True)
df.columns = self.renamedhcols
df.loc[:, 'State_Code'] = pd.to_numeric(
df.State_Code, errors='coerce')
df.loc[:, 'Site_Num'] = pd.to_numeric(df.Site_Num, errors='coerce')
df.loc[:, 'County_Code'] = pd.to_numeric(
df.County_Code, errors='coerce')
df['SCS'] = array(df['State_Code'].values * 1.E7 + df['County_Code'].values * 1.E4 + df['Site_Num'].values,
dtype='int32')
df.drop('Qualifier', axis=1, inplace=True)
df = self.get_species(df)
df = df.copy()[self.savecols]
df = self.add_metro_metadata2(df)
print('Saving file to: ' + self.datadir + \
'/' + 'AQS_HOURLY_SPEC_' + year + '.hdf')
df.to_hdf('AQS_HOURLY_SPEC_' + year + '.hdf', 'df', format='table')
return df
else:
return pd.DataFrame(columns=self.renamedhcols)
def retrieve_aqs_hourly_wind_data(self, dates):
import wget
i = dates[0]
year = i.strftime('%Y')
url = self.baseurl + 'hourly_WIND_' + year + '.zip'
print('Downloading AQS WIND: ' + url)
filename = wget.download(url)
print('')
print('Unpacking: ' + url)
df = pd.read_csv(filename, parse_dates={'datetime': ['Date GMT', 'Time GMT'],
'datetime_local': ["Date Local", "Time Local"]},
infer_datetime_format=True)
df.columns = self.renamedhcols
df.loc[:, 'State_Code'] = pd.to_numeric(df.State_Code, errors='coerce')
df.loc[:, 'Site_Num'] = pd.to_numeric(df.Site_Num, errors='coerce')
df.loc[:, 'County_Code'] = pd.to_numeric(
df.County_Code, errors='coerce')
df['SCS'] = array(df['State_Code'].values * 1.E7 + df['County_Code'].values * 1.E4 + df['Site_Num'].values,
dtype='int32')
df.drop('Qualifier', axis=1, inplace=True)
df = self.get_species(df)
# df = self.get_region(df)
df = df.copy()[self.savecols]
df = self.add_metro_metadata2(df)
print('Saving file to: ' + self.datadir + \
'/' + 'AQS_HOURLY_WIND_' + year + '.hdf')
df.to_hdf('AQS_HOURLY_WIND_' + year + '.hdf', 'df', format='table')
return df
def retrieve_aqs_hourly_temp_data(self, dates):
import wget
i = dates[0]
year = i.strftime('%Y')
url = self.baseurl + 'hourly_TEMP_' + year + '.zip'
print('Downloading AQS TEMP: ' + url)
filename = wget.download(url)
print('')
print('Unpacking: ' + url)
df = pd.read_csv(filename, parse_dates={'datetime': ['Date GMT', 'Time GMT'],
'datetime_local': ["Date Local", "Time Local"]},
infer_datetime_format=True)
df.columns = self.renamedhcols
df.loc[:, 'State_Code'] = pd.to_numeric(df.State_Code, errors='coerce')
df.loc[:, 'Site_Num'] = pd.to_numeric(df.Site_Num, errors='coerce')
df.loc[:, 'County_Code'] = pd.to_numeric(
df.County_Code, errors='coerce')
df['SCS'] = array(df['State_Code'].values * 1.E7 + df['County_Code'].values * 1.E4 + df['Site_Num'].values,
dtype='int32')
df.drop('Qualifier', axis=1, inplace=True)
df = self.get_species(df)
# df = self.get_region(df)
df = df.copy()[self.savecols]
df = self.add_metro_metadata2(df)
print('Saving file to: ' + self.datadir + \
'/' + 'AQS_HOURLY_TEMP_' + year + '.hdf')
df.to_hdf('AQS_HOURLY_TEMP_' + year + '.hdf', 'df', format='table')
return df
def retrieve_aqs_hourly_rhdp_data(self, dates):
import wget
i = dates[0]
year = i.strftime('%Y')
url = self.baseurl + 'hourly_RH_DP_' + year + '.zip'
print('Downloading AQS RH and DP: ' + url)
filename = wget.download(url)
print('')
print('Unpacking: ' + url)
df = pd.read_csv(filename, parse_dates={'datetime': ['Date GMT', 'Time GMT'],
'datetime_local': ["Date Local", "Time Local"]},
infer_datetime_format=True)
df.columns = self.renamedhcols
df.loc[:, 'State_Code'] = pd.to_numeric(df.State_Code, errors='coerce')
df.loc[:, 'Site_Num'] = pd.to_numeric(df.Site_Num, errors='coerce')
df.loc[:, 'County_Code'] = pd.to_numeric(
df.County_Code, errors='coerce')
df['SCS'] = array(df['State_Code'].values * 1.E7 + df['County_Code'].values * 1.E4 + df['Site_Num'].values,
dtype='int32')
df.drop('Qualifier', axis=1, inplace=True)
df = self.get_species(df)
df = df.copy()[self.savecols]
df = self.add_metro_metadata2(df)
print('Saving file to: ' + self.datadir + \
'/' + 'AQS_HOURLY_RHDP_' + year + '.hdf')
df.to_hdf('AQS_HOURLY_RHDP_' + year + '.hdf', 'df', format='table')
return df
def load_aqs_pm25_data(self, dates):
year = dates[0].strftime('%Y')
fname = 'AQS_HOURLY_PM_25_88101_' + year + '.hdf'
if os.path.isfile(fname):
print("File Found, Loading: " + fname)
aqs = pd.read_hdf(fname)
else:
aqs = self.retrieve_aqs_hourly_pm25_data(dates)
if aqs.empty:
return aqs
else:
con = (aqs.datetime >= dates[0]) & (aqs.datetime <= dates[-1])
aqs = aqs[con]
aqs.index = arange(aqs.index.shape[0])
return aqs
def load_aqs_voc_data(self, dates):
year = dates[0].strftime('%Y')
fname = 'AQS_HOURLY_VOC_' + year + '.hdf'
if os.path.isfile(fname):
print("File Found, Loading: " + fname)
aqs = pd.read_hdf(fname)
else:
aqs = self.retrieve_aqs_hourly_voc_data(dates)
con = (aqs.datetime >= dates[0]) & (aqs.datetime <= dates[-1])
aqs = aqs[con]
aqs.index = arange(aqs.index.shape[0])
return aqs
def load_aqs_ozone_data(self, dates):
year = dates[0].strftime('%Y')
fname = 'AQS_HOURLY_OZONE_44201_' + year + '.hdf'
if os.path.isfile(fname):
print("File Found, Loading: " + fname)
aqs = pd.read_hdf(fname)
else:
aqs = self.retrieve_aqs_hourly_ozone_data(dates)
con = (aqs.datetime >= dates[0]) & (aqs.datetime <= dates[-1])
aqs = aqs[con]
aqs.Units = 'ppb'
aqs.Obs = aqs.Obs.values * 1000.
aqs.index = arange(aqs.index.shape[0])
return aqs
def load_aqs_pm10_data(self, dates):
year = dates[0].strftime('%Y')
fname = 'AQS_HOURLY_PM_10_81102_' + year + '.hdf'
if os.path.isfile(fname):
print("File Found, Loading: " + fname)
aqs = pd.read_hdf(fname)
else:
aqs = self.retrieve_aqs_hourly_pm10_data(dates)
con = (aqs.datetime >= dates[0]) & (aqs.datetime <= dates[-1])
aqs = aqs[con]
aqs.index = arange(aqs.index.shape[0])
return aqs
def load_aqs_so2_data(self, dates):
year = dates[0].strftime('%Y')
fname = 'AQS_HOURLY_SO2_42401_' + year + '.hdf'
if os.path.isfile(fname):
print("File Found, Loading: " + fname)
aqs = pd.read_hdf(fname)
else:
aqs = self.retrieve_aqs_hourly_so2_data(dates)
con = (aqs.datetime >= dates[0]) & (aqs.datetime <= dates[-1])
aqs = aqs[con]
aqs.index = arange(aqs.index.shape[0])
return aqs
def load_aqs_no2_data(self, dates):
year = dates[0].strftime('%Y')
fname = 'AQS_HOURLY_NO2_42602_' + year + '.hdf'
if os.path.isfile(fname):
print("File Found, Loading: " + fname)
aqs = pd.read_hdf(fname)
else:
aqs = self.retrieve_aqs_hourly_no2_data(dates)
con = (aqs.datetime >= dates[0]) & (aqs.datetime <= dates[-1])
aqs = aqs[con]
aqs.index = arange(aqs.index.shape[0])
return aqs
def load_aqs_co_data(self, dates):
year = dates[0].strftime('%Y')
fname = 'AQS_HOURLY_CO_42101_' + year + '.hdf'
if os.path.isfile(fname):
print("File Found, Loading: " + fname)
aqs = pd.read_hdf(fname)
else:
aqs = self.retrieve_aqs_hourly_co_data(dates)
con = (aqs.datetime >= dates[0]) & (aqs.datetime <= dates[-1])
aqs = aqs[con]
aqs.index = arange(aqs.index.shape[0])
return aqs
def load_aqs_spec_data(self, dates):
year = dates[0].strftime('%Y')
fname = 'AQS_HOURLY_SPEC_' + year + '.hdf'
if os.path.isfile(fname):
print("File Found, Loading: " + fname)
aqs = pd.read_hdf(fname)
else:
print('Retrieving Data')
aqs = self.retrieve_aqs_hourly_spec_data(dates)
if aqs.empty:
return pd.DataFrame(columns=self.renamedhcols)
else:
con = (aqs.datetime >= dates[0]) & (aqs.datetime <= dates[-1])
aqs = aqs[con]
aqs.index = arange(aqs.index.shape[0])
return aqs
def load_aqs_wind_data(self, dates):
year = dates[0].strftime('%Y')
fname = 'AQS_HOURLY_WIND_' + year + '.hdf'
if os.path.isfile(fname):
print("File Found, Loading: " + fname)
aqs = pd.read_hdf(fname)
else:
aqs = self.retrieve_aqs_hourly_wind_data(dates)
con = (aqs.datetime >= dates[0]) & (aqs.datetime <= dates[-1])
aqs = aqs[con]
aqs.index = arange(aqs.index.shape[0])
return aqs
def load_aqs_temp_data(self, dates):
year = dates[0].strftime('%Y')
fname = 'AQS_HOURLY_TEMP_' + year + '.hdf'
if os.path.isfile(fname):
print("File Found, Loading: " + fname)
aqs = pd.read_hdf(fname)
else:
aqs = self.retrieve_aqs_hourly_temp_data(dates)
con = (aqs.datetime >= dates[0]) & (aqs.datetime <= dates[-1])
aqs = aqs[con]
aqs.index = arange(aqs.index.shape[0])
return aqs
def load_aqs_rhdp_data(self, dates):
year = dates[0].strftime('%Y')
fname = 'AQS_HOURLY_RHDP_' + year + '.hdf'
if os.path.isfile(fname):
print("File Found, Loading: " + fname)
aqs = pd.read_hdf(fname)
else:
aqs = self.retrieve_aqs_hourly_rhdp_data(dates)
con = (aqs.datetime >= dates[0]) & (aqs.datetime <= dates[-1])
aqs = aqs[con]
aqs.index = arange(aqs.index.shape[0])
return aqs
def load_aqs_nonoxnoy_data(self, dates):
year = dates[0].strftime('%Y')
fname = 'AQS_HOURLY_NONOXNOY_' + year + '.hdf'
if os.path.isfile(fname):
print("File Found, Loading: " + fname)
aqs = pd.read_hdf(fname)
else:
aqs = self.retrieve_aqs_hourly_nonoxnoy_data(dates)
con = (aqs.datetime >= dates[0]) & (aqs.datetime <= dates[-1])
aqs = aqs[con]
aqs.index = arange(aqs.index.shape[0])
return aqs
def load_data(self, param, dates):
if param == 'PM2.5':
df = self.load_aqs_pm25_data(dates)
elif param == 'PM10':
df = self.load_aqs_pm10_data(dates)
elif param == 'SPEC':
df = self.load_aqs_spec_data(dates)
elif param == 'CO':
df = self.load_aqs_co_data(dates)
elif param == 'OZONE':
df = self.load_aqs_ozone_data(dates)
elif param == 'SO2':
df = self.load_aqs_so2_data(dates)
elif param == 'VOC':
df = self.load_aqs_voc_data(dates)
elif param == 'NONOXNOY':
df = self.load_aqs_nonoxnoy_data(dates)
elif param == 'WIND':
df = self.load_aqs_wind_data(dates)
elif param == 'TEMP':
df = self.load_aqs_temp_data(dates)
elif param == 'RHDP':
df = self.load_aqs_rhdp_data(dates)
return df
def load_daily_data(self, param, dates):
if param == 'PM2.5':
df = self.load_aqs_daily_pm25_data(dates)
elif param == 'PM10':
df = self.load_aqs_daily_pm10_data(dates)
elif param == 'SPEC':
df = self.load_aqs_daily_spec_data(dates)
elif param == 'CO':
df = self.load_aqs_daily_co_data(dates)
elif param == 'OZONE':
df = self.load_aqs_daily_no2_data(dates)
elif param == 'SO2':
df = self.load_aqs_daily_so2_data(dates)
elif param == 'VOC':
df = self.load_aqs_daily_voc_data(dates)
elif param == 'NONOXNOY':
df = self.load_aqs_daily_nonoxnoy_data(dates)
elif param == 'WIND':
df = self.load_aqs_daily_wind_data(dates)
elif param == 'TEMP':
df = self.load_aqs_daily_temp_data(dates)
elif param == 'RHDP':
df = self.load_aqs_daily_rhdp_data(dates)
return df
def load_all_hourly_data2(self, dates, datasets='all'):
import dask
import dask.dataframe as dd
os.chdir(self.datadir)
params = ['SPEC', 'PM10', 'PM2.5', 'CO', 'OZONE',
'SO2', 'VOC', 'NONOXNOY', 'WIND', 'TEMP', 'RHDP']
dfs = [dask.delayed(self.load_data)(i, dates) for i in params]
dff = dd.from_delayed(dfs)
# dff = dff.drop_duplicates()
self.df = dff.compute()
self.df = self.change_units(self.df)
# self.df = pd.concat(dfs, ignore_index=True)
# self.df = self.change_units(self.df).drop_duplicates(subset=['datetime','SCS','Species','Obs']).dropna(subset=['Obs'])
os.chdir(self.cwd)
def load_all_daily_data(self, dates, datasets='all'):
import dask
import dask.dataframe as dd
from dask.diagnostics import ProgressBar
os.chdir(self.datadir)
pbar = ProgressBar()
pbar.register()
params = ['SPEC', 'PM10', 'PM2.5', 'CO', 'OZONE',
'SO2', 'VOC', 'NONOXNOY', 'WIND', 'TEMP', 'RHDP']
# dfs = [dask.delayed(self.load_daily_data)(i,dates) for i in params]
# print dfs
# dff = dd.from_delayed(dfs)
# self.d_df = dff.compute()
dfs = [self.load_daily_data(i, dates) for i in params]
self.d_df = pd.concat(dfs, ignore_index=True)
self.d_df = self.change_units(self.d_df)
os.chdir(self.cwd)
def get_all_hourly_data(self, dates):
os.chdir(self.datadir)
dfs = [self.load_aqs_co_data(dates), self.load_aqs_pm10_data(dates), self.load_aqs_ozone_data(dates),
self.load_aqs_pm25_data(dates), self.load_aqs_spec_data(
dates), self.load_aqs_no2_data(dates),
self.load_aqs_so2_data(dates), self.load_aqs_voc_data(
dates), self.load_aqs_nonoxnoy_data(dates),
self.load_aqs_wind_data(dates), self.load_aqs_temp_data(dates), self.load_aqs_rhdp_data(dates)]
os.chdir(self.cwd)
def load_all_hourly_data(self, dates, datasets='all'):
os.chdir(self.datadir)
if datasets.upper() == 'PM':
dfs = [self.load_aqs_pm10_data(dates), self.load_aqs_pm25_data(
dates), self.load_aqs_spec_data(dates)]
else:
dfs = [self.load_aqs_co_data(dates), self.load_aqs_pm10_data(dates), self.load_aqs_ozone_data(dates),
self.load_aqs_pm25_data(dates), self.load_aqs_spec_data(
dates), self.load_aqs_no2_data(dates),
self.load_aqs_so2_data(dates), self.load_aqs_voc_data(
dates), self.load_aqs_nonoxnoy_data(dates),
self.load_aqs_wind_data(
dates), self.load_aqs_temp_data(dates),
self.load_aqs_rhdp_data(dates)] # ,self.load_aqs_daily_spec_data(dates)]
self.df = pd.concat(dfs, ignore_index=True)
self.df = self.change_units(self.df).drop_duplicates()
os.chdir(self.cwd)
def load_aqs_daily_pm25_data(self, dates):
from datetime import timedelta
year = dates[0].strftime('%Y')
fname = self.datadir + '/' + 'AQS_DAILY_PM25_' + year + '.hdf'
if os.path.isfile(fname):
aqs = | pd.read_hdf(fname) | pandas.read_hdf |
import importlib
from hydroDL.master import basins
from hydroDL.app import waterQuality
from hydroDL import kPath
from hydroDL.model import trainTS
from hydroDL.data import gageII, usgs
from hydroDL.post import axplot, figplot
import torch
import os
import json
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import time
wqData = waterQuality.DataModelWQ('sbWT')
siteNoLst = wqData.info['siteNo'].unique().tolist()
# trainSetLst = ['Y1', 'Y2']
trainSet = 'Y1'
dfCorrLst = [pd.DataFrame(index=siteNoLst, columns=usgs.varC)
for x in range(2)]
dfRmseLst = [pd.DataFrame(index=siteNoLst, columns=usgs.varC)
for x in range(2)]
t0 = time.time()
for kk, siteNo in enumerate(siteNoLst):
print('{}/{} {:.2f}'.format(
kk, len(siteNoLst), time.time()-t0))
outFolder = os.path.join(kPath.dirWQ, 'modelStat', 'WRTDS-F')
saveFile = os.path.join(outFolder, trainSet, siteNo)
dfP = pd.read_csv(saveFile, index_col=None).set_index('date')
dfP.index = | pd.to_datetime(dfP.index) | pandas.to_datetime |
# -*- coding: utf-8 -*-
import re
import warnings
from datetime import timedelta
from itertools import product
import pytest
import numpy as np
import pandas as pd
from pandas import (CategoricalIndex, DataFrame, Index, MultiIndex,
compat, date_range, period_range)
from pandas.compat import PY3, long, lrange, lzip, range, u, PYPY
from pandas.errors import PerformanceWarning, UnsortedIndexError
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.core.indexes.base import InvalidIndexError
from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
from pandas._libs.tslib import Timestamp
import pandas.util.testing as tm
from pandas.util.testing import assert_almost_equal, assert_copy
from .common import Base
class TestMultiIndex(Base):
_holder = MultiIndex
_compat_props = ['shape', 'ndim', 'size']
def setup_method(self, method):
major_axis = Index(['foo', 'bar', 'baz', 'qux'])
minor_axis = Index(['one', 'two'])
major_labels = np.array([0, 0, 1, 2, 3, 3])
minor_labels = np.array([0, 1, 0, 1, 0, 1])
self.index_names = ['first', 'second']
self.indices = dict(index=MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels
], names=self.index_names,
verify_integrity=False))
self.setup_indices()
def create_index(self):
return self.index
def test_can_hold_identifiers(self):
idx = self.create_index()
key = idx[0]
assert idx._can_hold_identifiers_and_holds_name(key) is True
def test_boolean_context_compat2(self):
# boolean context compat
# GH7897
i1 = MultiIndex.from_tuples([('A', 1), ('A', 2)])
i2 = MultiIndex.from_tuples([('A', 1), ('A', 3)])
common = i1.intersection(i2)
def f():
if common:
pass
tm.assert_raises_regex(ValueError, 'The truth value of a', f)
def test_labels_dtypes(self):
# GH 8456
i = MultiIndex.from_tuples([('A', 1), ('A', 2)])
assert i.labels[0].dtype == 'int8'
assert i.labels[1].dtype == 'int8'
i = MultiIndex.from_product([['a'], range(40)])
assert i.labels[1].dtype == 'int8'
i = MultiIndex.from_product([['a'], range(400)])
assert i.labels[1].dtype == 'int16'
i = MultiIndex.from_product([['a'], range(40000)])
assert i.labels[1].dtype == 'int32'
i = pd.MultiIndex.from_product([['a'], range(1000)])
assert (i.labels[0] >= 0).all()
assert (i.labels[1] >= 0).all()
def test_where(self):
i = MultiIndex.from_tuples([('A', 1), ('A', 2)])
def f():
i.where(True)
pytest.raises(NotImplementedError, f)
def test_where_array_like(self):
i = MultiIndex.from_tuples([('A', 1), ('A', 2)])
klasses = [list, tuple, np.array, pd.Series]
cond = [False, True]
for klass in klasses:
def f():
return i.where(klass(cond))
pytest.raises(NotImplementedError, f)
def test_repeat(self):
reps = 2
numbers = [1, 2, 3]
names = np.array(['foo', 'bar'])
m = MultiIndex.from_product([
numbers, names], names=names)
expected = MultiIndex.from_product([
numbers, names.repeat(reps)], names=names)
tm.assert_index_equal(m.repeat(reps), expected)
with tm.assert_produces_warning(FutureWarning):
result = m.repeat(n=reps)
tm.assert_index_equal(result, expected)
def test_numpy_repeat(self):
reps = 2
numbers = [1, 2, 3]
names = np.array(['foo', 'bar'])
m = MultiIndex.from_product([
numbers, names], names=names)
expected = MultiIndex.from_product([
numbers, names.repeat(reps)], names=names)
tm.assert_index_equal(np.repeat(m, reps), expected)
msg = "the 'axis' parameter is not supported"
tm.assert_raises_regex(
ValueError, msg, np.repeat, m, reps, axis=1)
def test_set_name_methods(self):
# so long as these are synonyms, we don't need to test set_names
assert self.index.rename == self.index.set_names
new_names = [name + "SUFFIX" for name in self.index_names]
ind = self.index.set_names(new_names)
assert self.index.names == self.index_names
assert ind.names == new_names
with tm.assert_raises_regex(ValueError, "^Length"):
ind.set_names(new_names + new_names)
new_names2 = [name + "SUFFIX2" for name in new_names]
res = ind.set_names(new_names2, inplace=True)
assert res is None
assert ind.names == new_names2
# set names for specific level (# GH7792)
ind = self.index.set_names(new_names[0], level=0)
assert self.index.names == self.index_names
assert ind.names == [new_names[0], self.index_names[1]]
res = ind.set_names(new_names2[0], level=0, inplace=True)
assert res is None
assert ind.names == [new_names2[0], self.index_names[1]]
# set names for multiple levels
ind = self.index.set_names(new_names, level=[0, 1])
assert self.index.names == self.index_names
assert ind.names == new_names
res = ind.set_names(new_names2, level=[0, 1], inplace=True)
assert res is None
assert ind.names == new_names2
@pytest.mark.parametrize('inplace', [True, False])
def test_set_names_with_nlevel_1(self, inplace):
# GH 21149
# Ensure that .set_names for MultiIndex with
# nlevels == 1 does not raise any errors
expected = pd.MultiIndex(levels=[[0, 1]],
labels=[[0, 1]],
names=['first'])
m = pd.MultiIndex.from_product([[0, 1]])
result = m.set_names('first', level=0, inplace=inplace)
if inplace:
result = m
tm.assert_index_equal(result, expected)
def test_set_levels_labels_directly(self):
# setting levels/labels directly raises AttributeError
levels = self.index.levels
new_levels = [[lev + 'a' for lev in level] for level in levels]
labels = self.index.labels
major_labels, minor_labels = labels
major_labels = [(x + 1) % 3 for x in major_labels]
minor_labels = [(x + 1) % 1 for x in minor_labels]
new_labels = [major_labels, minor_labels]
with pytest.raises(AttributeError):
self.index.levels = new_levels
with pytest.raises(AttributeError):
self.index.labels = new_labels
def test_set_levels(self):
# side note - you probably wouldn't want to use levels and labels
# directly like this - but it is possible.
levels = self.index.levels
new_levels = [[lev + 'a' for lev in level] for level in levels]
def assert_matching(actual, expected, check_dtype=False):
# avoid specifying internal representation
# as much as possible
assert len(actual) == len(expected)
for act, exp in zip(actual, expected):
act = np.asarray(act)
exp = np.asarray(exp)
tm.assert_numpy_array_equal(act, exp, check_dtype=check_dtype)
# level changing [w/o mutation]
ind2 = self.index.set_levels(new_levels)
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
# level changing [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels, inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, new_levels)
# level changing specific level [w/o mutation]
ind2 = self.index.set_levels(new_levels[0], level=0)
assert_matching(ind2.levels, [new_levels[0], levels[1]])
assert_matching(self.index.levels, levels)
ind2 = self.index.set_levels(new_levels[1], level=1)
assert_matching(ind2.levels, [levels[0], new_levels[1]])
assert_matching(self.index.levels, levels)
# level changing multiple levels [w/o mutation]
ind2 = self.index.set_levels(new_levels, level=[0, 1])
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
# level changing specific level [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels[0], level=0, inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, [new_levels[0], levels[1]])
assert_matching(self.index.levels, levels)
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels[1], level=1, inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, [levels[0], new_levels[1]])
assert_matching(self.index.levels, levels)
# level changing multiple levels [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels, level=[0, 1],
inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
# illegal level changing should not change levels
# GH 13754
original_index = self.index.copy()
for inplace in [True, False]:
with tm.assert_raises_regex(ValueError, "^On"):
self.index.set_levels(['c'], level=0, inplace=inplace)
assert_matching(self.index.levels, original_index.levels,
check_dtype=True)
with tm.assert_raises_regex(ValueError, "^On"):
self.index.set_labels([0, 1, 2, 3, 4, 5], level=0,
inplace=inplace)
assert_matching(self.index.labels, original_index.labels,
check_dtype=True)
with tm.assert_raises_regex(TypeError, "^Levels"):
self.index.set_levels('c', level=0, inplace=inplace)
assert_matching(self.index.levels, original_index.levels,
check_dtype=True)
with tm.assert_raises_regex(TypeError, "^Labels"):
self.index.set_labels(1, level=0, inplace=inplace)
assert_matching(self.index.labels, original_index.labels,
check_dtype=True)
def test_set_labels(self):
# side note - you probably wouldn't want to use levels and labels
# directly like this - but it is possible.
labels = self.index.labels
major_labels, minor_labels = labels
major_labels = [(x + 1) % 3 for x in major_labels]
minor_labels = [(x + 1) % 1 for x in minor_labels]
new_labels = [major_labels, minor_labels]
def assert_matching(actual, expected):
# avoid specifying internal representation
# as much as possible
assert len(actual) == len(expected)
for act, exp in zip(actual, expected):
act = np.asarray(act)
exp = np.asarray(exp, dtype=np.int8)
tm.assert_numpy_array_equal(act, exp)
# label changing [w/o mutation]
ind2 = self.index.set_labels(new_labels)
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
# label changing [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels, inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, new_labels)
# label changing specific level [w/o mutation]
ind2 = self.index.set_labels(new_labels[0], level=0)
assert_matching(ind2.labels, [new_labels[0], labels[1]])
assert_matching(self.index.labels, labels)
ind2 = self.index.set_labels(new_labels[1], level=1)
assert_matching(ind2.labels, [labels[0], new_labels[1]])
assert_matching(self.index.labels, labels)
# label changing multiple levels [w/o mutation]
ind2 = self.index.set_labels(new_labels, level=[0, 1])
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
# label changing specific level [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels[0], level=0, inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, [new_labels[0], labels[1]])
assert_matching(self.index.labels, labels)
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels[1], level=1, inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, [labels[0], new_labels[1]])
assert_matching(self.index.labels, labels)
# label changing multiple levels [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels, level=[0, 1],
inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
# label changing for levels of different magnitude of categories
ind = pd.MultiIndex.from_tuples([(0, i) for i in range(130)])
new_labels = range(129, -1, -1)
expected = pd.MultiIndex.from_tuples(
[(0, i) for i in new_labels])
# [w/o mutation]
result = ind.set_labels(labels=new_labels, level=1)
assert result.equals(expected)
# [w/ mutation]
result = ind.copy()
result.set_labels(labels=new_labels, level=1, inplace=True)
assert result.equals(expected)
def test_set_levels_labels_names_bad_input(self):
levels, labels = self.index.levels, self.index.labels
names = self.index.names
with tm.assert_raises_regex(ValueError, 'Length of levels'):
self.index.set_levels([levels[0]])
with tm.assert_raises_regex(ValueError, 'Length of labels'):
self.index.set_labels([labels[0]])
with tm.assert_raises_regex(ValueError, 'Length of names'):
self.index.set_names([names[0]])
# shouldn't scalar data error, instead should demand list-like
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_levels(levels[0])
# shouldn't scalar data error, instead should demand list-like
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_labels(labels[0])
# shouldn't scalar data error, instead should demand list-like
with tm.assert_raises_regex(TypeError, 'list-like'):
self.index.set_names(names[0])
# should have equal lengths
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_levels(levels[0], level=[0, 1])
with tm.assert_raises_regex(TypeError, 'list-like'):
self.index.set_levels(levels, level=0)
# should have equal lengths
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_labels(labels[0], level=[0, 1])
with tm.assert_raises_regex(TypeError, 'list-like'):
self.index.set_labels(labels, level=0)
# should have equal lengths
with tm.assert_raises_regex(ValueError, 'Length of names'):
self.index.set_names(names[0], level=[0, 1])
with tm.assert_raises_regex(TypeError, 'string'):
self.index.set_names(names, level=0)
def test_set_levels_categorical(self):
# GH13854
index = MultiIndex.from_arrays([list("xyzx"), [0, 1, 2, 3]])
for ordered in [False, True]:
cidx = CategoricalIndex(list("bac"), ordered=ordered)
result = index.set_levels(cidx, 0)
expected = MultiIndex(levels=[cidx, [0, 1, 2, 3]],
labels=index.labels)
tm.assert_index_equal(result, expected)
result_lvl = result.get_level_values(0)
expected_lvl = CategoricalIndex(list("bacb"),
categories=cidx.categories,
ordered=cidx.ordered)
tm.assert_index_equal(result_lvl, expected_lvl)
def test_metadata_immutable(self):
levels, labels = self.index.levels, self.index.labels
# shouldn't be able to set at either the top level or base level
mutable_regex = re.compile('does not support mutable operations')
with tm.assert_raises_regex(TypeError, mutable_regex):
levels[0] = levels[0]
with tm.assert_raises_regex(TypeError, mutable_regex):
levels[0][0] = levels[0][0]
# ditto for labels
with tm.assert_raises_regex(TypeError, mutable_regex):
labels[0] = labels[0]
with tm.assert_raises_regex(TypeError, mutable_regex):
labels[0][0] = labels[0][0]
# and for names
names = self.index.names
with tm.assert_raises_regex(TypeError, mutable_regex):
names[0] = names[0]
def test_inplace_mutation_resets_values(self):
levels = [['a', 'b', 'c'], [4]]
levels2 = [[1, 2, 3], ['a']]
labels = [[0, 1, 0, 2, 2, 0], [0, 0, 0, 0, 0, 0]]
mi1 = MultiIndex(levels=levels, labels=labels)
mi2 = MultiIndex(levels=levels2, labels=labels)
vals = mi1.values.copy()
vals2 = mi2.values.copy()
assert mi1._tuples is not None
# Make sure level setting works
new_vals = mi1.set_levels(levels2).values
tm.assert_almost_equal(vals2, new_vals)
# Non-inplace doesn't kill _tuples [implementation detail]
tm.assert_almost_equal(mi1._tuples, vals)
# ...and values is still same too
tm.assert_almost_equal(mi1.values, vals)
# Inplace should kill _tuples
mi1.set_levels(levels2, inplace=True)
tm.assert_almost_equal(mi1.values, vals2)
# Make sure label setting works too
labels2 = [[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0]]
exp_values = np.empty((6,), dtype=object)
exp_values[:] = [(long(1), 'a')] * 6
# Must be 1d array of tuples
assert exp_values.shape == (6,)
new_values = mi2.set_labels(labels2).values
# Not inplace shouldn't change
tm.assert_almost_equal(mi2._tuples, vals2)
# Should have correct values
tm.assert_almost_equal(exp_values, new_values)
# ...and again setting inplace should kill _tuples, etc
mi2.set_labels(labels2, inplace=True)
tm.assert_almost_equal(mi2.values, new_values)
def test_copy_in_constructor(self):
levels = np.array(["a", "b", "c"])
labels = np.array([1, 1, 2, 0, 0, 1, 1])
val = labels[0]
mi = MultiIndex(levels=[levels, levels], labels=[labels, labels],
copy=True)
assert mi.labels[0][0] == val
labels[0] = 15
assert mi.labels[0][0] == val
val = levels[0]
levels[0] = "PANDA"
assert mi.levels[0][0] == val
def test_set_value_keeps_names(self):
# motivating example from #3742
lev1 = ['hans', 'hans', 'hans', 'grethe', 'grethe', 'grethe']
lev2 = ['1', '2', '3'] * 2
idx = pd.MultiIndex.from_arrays([lev1, lev2], names=['Name', 'Number'])
df = pd.DataFrame(
np.random.randn(6, 4),
columns=['one', 'two', 'three', 'four'],
index=idx)
df = df.sort_index()
assert df._is_copy is None
assert df.index.names == ('Name', 'Number')
df.at[('grethe', '4'), 'one'] = 99.34
assert df._is_copy is None
assert df.index.names == ('Name', 'Number')
def test_copy_names(self):
# Check that adding a "names" parameter to the copy is honored
# GH14302
multi_idx = pd.Index([(1, 2), (3, 4)], names=['MyName1', 'MyName2'])
multi_idx1 = multi_idx.copy()
assert multi_idx.equals(multi_idx1)
assert multi_idx.names == ['MyName1', 'MyName2']
assert multi_idx1.names == ['MyName1', 'MyName2']
multi_idx2 = multi_idx.copy(names=['NewName1', 'NewName2'])
assert multi_idx.equals(multi_idx2)
assert multi_idx.names == ['MyName1', 'MyName2']
assert multi_idx2.names == ['NewName1', 'NewName2']
multi_idx3 = multi_idx.copy(name=['NewName1', 'NewName2'])
assert multi_idx.equals(multi_idx3)
assert multi_idx.names == ['MyName1', 'MyName2']
assert multi_idx3.names == ['NewName1', 'NewName2']
def test_names(self):
# names are assigned in setup
names = self.index_names
level_names = [level.name for level in self.index.levels]
assert names == level_names
# setting bad names on existing
index = self.index
tm.assert_raises_regex(ValueError, "^Length of names",
setattr, index, "names",
list(index.names) + ["third"])
tm.assert_raises_regex(ValueError, "^Length of names",
setattr, index, "names", [])
# initializing with bad names (should always be equivalent)
major_axis, minor_axis = self.index.levels
major_labels, minor_labels = self.index.labels
tm.assert_raises_regex(ValueError, "^Length of names", MultiIndex,
levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels],
names=['first'])
tm.assert_raises_regex(ValueError, "^Length of names", MultiIndex,
levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels],
names=['first', 'second', 'third'])
# names are assigned
index.names = ["a", "b"]
ind_names = list(index.names)
level_names = [level.name for level in index.levels]
assert ind_names == level_names
def test_astype(self):
expected = self.index.copy()
actual = self.index.astype('O')
assert_copy(actual.levels, expected.levels)
assert_copy(actual.labels, expected.labels)
self.check_level_names(actual, expected.names)
with tm.assert_raises_regex(TypeError, "^Setting.*dtype.*object"):
self.index.astype(np.dtype(int))
@pytest.mark.parametrize('ordered', [True, False])
def test_astype_category(self, ordered):
# GH 18630
msg = '> 1 ndim Categorical are not supported at this time'
with tm.assert_raises_regex(NotImplementedError, msg):
self.index.astype(CategoricalDtype(ordered=ordered))
if ordered is False:
# dtype='category' defaults to ordered=False, so only test once
with tm.assert_raises_regex(NotImplementedError, msg):
self.index.astype('category')
def test_constructor_single_level(self):
result = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux']],
labels=[[0, 1, 2, 3]], names=['first'])
assert isinstance(result, MultiIndex)
expected = Index(['foo', 'bar', 'baz', 'qux'], name='first')
tm.assert_index_equal(result.levels[0], expected)
assert result.names == ['first']
def test_constructor_no_levels(self):
tm.assert_raises_regex(ValueError, "non-zero number "
"of levels/labels",
MultiIndex, levels=[], labels=[])
both_re = re.compile('Must pass both levels and labels')
with tm.assert_raises_regex(TypeError, both_re):
MultiIndex(levels=[])
with tm.assert_raises_regex(TypeError, both_re):
MultiIndex(labels=[])
def test_constructor_mismatched_label_levels(self):
labels = [np.array([1]), np.array([2]), np.array([3])]
levels = ["a"]
tm.assert_raises_regex(ValueError, "Length of levels and labels "
"must be the same", MultiIndex,
levels=levels, labels=labels)
length_error = re.compile('>= length of level')
label_error = re.compile(r'Unequal label lengths: \[4, 2\]')
# important to check that it's looking at the right thing.
with tm.assert_raises_regex(ValueError, length_error):
MultiIndex(levels=[['a'], ['b']],
labels=[[0, 1, 2, 3], [0, 3, 4, 1]])
with tm.assert_raises_regex(ValueError, label_error):
MultiIndex(levels=[['a'], ['b']], labels=[[0, 0, 0, 0], [0, 0]])
# external API
with tm.assert_raises_regex(ValueError, length_error):
self.index.copy().set_levels([['a'], ['b']])
with tm.assert_raises_regex(ValueError, label_error):
self.index.copy().set_labels([[0, 0, 0, 0], [0, 0]])
def test_constructor_nonhashable_names(self):
# GH 20527
levels = [[1, 2], [u'one', u'two']]
labels = [[0, 0, 1, 1], [0, 1, 0, 1]]
names = ((['foo'], ['bar']))
message = "MultiIndex.name must be a hashable type"
tm.assert_raises_regex(TypeError, message,
MultiIndex, levels=levels,
labels=labels, names=names)
# With .rename()
mi = MultiIndex(levels=[[1, 2], [u'one', u'two']],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=('foo', 'bar'))
renamed = [['foor'], ['barr']]
tm.assert_raises_regex(TypeError, message, mi.rename, names=renamed)
# With .set_names()
tm.assert_raises_regex(TypeError, message, mi.set_names, names=renamed)
@pytest.mark.parametrize('names', [['a', 'b', 'a'], ['1', '1', '2'],
['1', 'a', '1']])
def test_duplicate_level_names(self, names):
# GH18872
pytest.raises(ValueError, pd.MultiIndex.from_product,
[[0, 1]] * 3, names=names)
# With .rename()
mi = pd.MultiIndex.from_product([[0, 1]] * 3)
tm.assert_raises_regex(ValueError, "Duplicated level name:",
mi.rename, names)
# With .rename(., level=)
mi.rename(names[0], level=1, inplace=True)
tm.assert_raises_regex(ValueError, "Duplicated level name:",
mi.rename, names[:2], level=[0, 2])
def assert_multiindex_copied(self, copy, original):
# Levels should be (at least, shallow copied)
tm.assert_copy(copy.levels, original.levels)
tm.assert_almost_equal(copy.labels, original.labels)
# Labels doesn't matter which way copied
tm.assert_almost_equal(copy.labels, original.labels)
assert copy.labels is not original.labels
# Names doesn't matter which way copied
assert copy.names == original.names
assert copy.names is not original.names
# Sort order should be copied
assert copy.sortorder == original.sortorder
def test_copy(self):
i_copy = self.index.copy()
self.assert_multiindex_copied(i_copy, self.index)
def test_shallow_copy(self):
i_copy = self.index._shallow_copy()
self.assert_multiindex_copied(i_copy, self.index)
def test_view(self):
i_view = self.index.view()
self.assert_multiindex_copied(i_view, self.index)
def check_level_names(self, index, names):
assert [level.name for level in index.levels] == list(names)
def test_changing_names(self):
# names should be applied to levels
level_names = [level.name for level in self.index.levels]
self.check_level_names(self.index, self.index.names)
view = self.index.view()
copy = self.index.copy()
shallow_copy = self.index._shallow_copy()
# changing names should change level names on object
new_names = [name + "a" for name in self.index.names]
self.index.names = new_names
self.check_level_names(self.index, new_names)
# but not on copies
self.check_level_names(view, level_names)
self.check_level_names(copy, level_names)
self.check_level_names(shallow_copy, level_names)
# and copies shouldn't change original
shallow_copy.names = [name + "c" for name in shallow_copy.names]
self.check_level_names(self.index, new_names)
def test_get_level_number_integer(self):
self.index.names = [1, 0]
assert self.index._get_level_number(1) == 0
assert self.index._get_level_number(0) == 1
pytest.raises(IndexError, self.index._get_level_number, 2)
tm.assert_raises_regex(KeyError, 'Level fourth not found',
self.index._get_level_number, 'fourth')
def test_from_arrays(self):
arrays = []
for lev, lab in zip(self.index.levels, self.index.labels):
arrays.append(np.asarray(lev).take(lab))
# list of arrays as input
result = MultiIndex.from_arrays(arrays, names=self.index.names)
tm.assert_index_equal(result, self.index)
# infer correctly
result = MultiIndex.from_arrays([[pd.NaT, Timestamp('20130101')],
['a', 'b']])
assert result.levels[0].equals(Index([Timestamp('20130101')]))
assert result.levels[1].equals(Index(['a', 'b']))
def test_from_arrays_iterator(self):
# GH 18434
arrays = []
for lev, lab in zip(self.index.levels, self.index.labels):
arrays.append(np.asarray(lev).take(lab))
# iterator as input
result = MultiIndex.from_arrays(iter(arrays), names=self.index.names)
tm.assert_index_equal(result, self.index)
# invalid iterator input
with tm.assert_raises_regex(
TypeError, "Input must be a list / sequence of array-likes."):
MultiIndex.from_arrays(0)
def test_from_arrays_index_series_datetimetz(self):
idx1 = pd.date_range('2015-01-01 10:00', freq='D', periods=3,
tz='US/Eastern')
idx2 = pd.date_range('2015-01-01 10:00', freq='H', periods=3,
tz='Asia/Tokyo')
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_series_timedelta(self):
idx1 = pd.timedelta_range('1 days', freq='D', periods=3)
idx2 = pd.timedelta_range('2 hours', freq='H', periods=3)
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_series_period(self):
idx1 = pd.period_range('2011-01-01', freq='D', periods=3)
idx2 = pd.period_range('2015-01-01', freq='H', periods=3)
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_datetimelike_mixed(self):
idx1 = pd.date_range('2015-01-01 10:00', freq='D', periods=3,
tz='US/Eastern')
idx2 = pd.date_range('2015-01-01 10:00', freq='H', periods=3)
idx3 = pd.timedelta_range('1 days', freq='D', periods=3)
idx4 = pd.period_range('2011-01-01', freq='D', periods=3)
result = pd.MultiIndex.from_arrays([idx1, idx2, idx3, idx4])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
tm.assert_index_equal(result.get_level_values(2), idx3)
tm.assert_index_equal(result.get_level_values(3), idx4)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1),
pd.Series(idx2),
pd.Series(idx3),
pd.Series(idx4)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result2.get_level_values(2), idx3)
tm.assert_index_equal(result2.get_level_values(3), idx4)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_series_categorical(self):
# GH13743
idx1 = pd.CategoricalIndex(list("abcaab"), categories=list("bac"),
ordered=False)
idx2 = pd.CategoricalIndex(list("abcaab"), categories=list("bac"),
ordered=True)
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
result3 = pd.MultiIndex.from_arrays([idx1.values, idx2.values])
tm.assert_index_equal(result3.get_level_values(0), idx1)
tm.assert_index_equal(result3.get_level_values(1), idx2)
def test_from_arrays_empty(self):
# 0 levels
with tm.assert_raises_regex(
ValueError, "Must pass non-zero number of levels/labels"):
MultiIndex.from_arrays(arrays=[])
# 1 level
result = MultiIndex.from_arrays(arrays=[[]], names=['A'])
assert isinstance(result, MultiIndex)
expected = Index([], name='A')
tm.assert_index_equal(result.levels[0], expected)
# N levels
for N in [2, 3]:
arrays = [[]] * N
names = list('ABC')[:N]
result = MultiIndex.from_arrays(arrays=arrays, names=names)
expected = MultiIndex(levels=[[]] * N, labels=[[]] * N,
names=names)
tm.assert_index_equal(result, expected)
def test_from_arrays_invalid_input(self):
invalid_inputs = [1, [1], [1, 2], [[1], 2],
'a', ['a'], ['a', 'b'], [['a'], 'b']]
for i in invalid_inputs:
pytest.raises(TypeError, MultiIndex.from_arrays, arrays=i)
def test_from_arrays_different_lengths(self):
# see gh-13599
idx1 = [1, 2, 3]
idx2 = ['a', 'b']
tm.assert_raises_regex(ValueError, '^all arrays must '
'be same length$',
MultiIndex.from_arrays, [idx1, idx2])
idx1 = []
idx2 = ['a', 'b']
tm.assert_raises_regex(ValueError, '^all arrays must '
'be same length$',
MultiIndex.from_arrays, [idx1, idx2])
idx1 = [1, 2, 3]
idx2 = []
tm.assert_raises_regex(ValueError, '^all arrays must '
'be same length$',
MultiIndex.from_arrays, [idx1, idx2])
def test_from_product(self):
first = ['foo', 'bar', 'buz']
second = ['a', 'b', 'c']
names = ['first', 'second']
result = MultiIndex.from_product([first, second], names=names)
tuples = [('foo', 'a'), ('foo', 'b'), ('foo', 'c'), ('bar', 'a'),
('bar', 'b'), ('bar', 'c'), ('buz', 'a'), ('buz', 'b'),
('buz', 'c')]
expected = MultiIndex.from_tuples(tuples, names=names)
tm.assert_index_equal(result, expected)
def test_from_product_iterator(self):
# GH 18434
first = ['foo', 'bar', 'buz']
second = ['a', 'b', 'c']
names = ['first', 'second']
tuples = [('foo', 'a'), ('foo', 'b'), ('foo', 'c'), ('bar', 'a'),
('bar', 'b'), ('bar', 'c'), ('buz', 'a'), ('buz', 'b'),
('buz', 'c')]
expected = MultiIndex.from_tuples(tuples, names=names)
# iterator as input
result = MultiIndex.from_product(iter([first, second]), names=names)
tm.assert_index_equal(result, expected)
# Invalid non-iterable input
with tm.assert_raises_regex(
TypeError, "Input must be a list / sequence of iterables."):
MultiIndex.from_product(0)
def test_from_product_empty(self):
# 0 levels
with tm.assert_raises_regex(
ValueError, "Must pass non-zero number of levels/labels"):
MultiIndex.from_product([])
# 1 level
result = MultiIndex.from_product([[]], names=['A'])
expected = pd.Index([], name='A')
tm.assert_index_equal(result.levels[0], expected)
# 2 levels
l1 = [[], ['foo', 'bar', 'baz'], []]
l2 = [[], [], ['a', 'b', 'c']]
names = ['A', 'B']
for first, second in zip(l1, l2):
result = MultiIndex.from_product([first, second], names=names)
expected = MultiIndex(levels=[first, second],
labels=[[], []], names=names)
tm.assert_index_equal(result, expected)
# GH12258
names = ['A', 'B', 'C']
for N in range(4):
lvl2 = lrange(N)
result = MultiIndex.from_product([[], lvl2, []], names=names)
expected = MultiIndex(levels=[[], lvl2, []],
labels=[[], [], []], names=names)
tm.assert_index_equal(result, expected)
def test_from_product_invalid_input(self):
invalid_inputs = [1, [1], [1, 2], [[1], 2],
'a', ['a'], ['a', 'b'], [['a'], 'b']]
for i in invalid_inputs:
pytest.raises(TypeError, MultiIndex.from_product, iterables=i)
def test_from_product_datetimeindex(self):
dt_index = date_range('2000-01-01', periods=2)
mi = pd.MultiIndex.from_product([[1, 2], dt_index])
etalon = construct_1d_object_array_from_listlike([(1, pd.Timestamp(
'2000-01-01')), (1, pd.Timestamp('2000-01-02')), (2, pd.Timestamp(
'2000-01-01')), (2, pd.Timestamp('2000-01-02'))])
tm.assert_numpy_array_equal(mi.values, etalon)
def test_from_product_index_series_categorical(self):
# GH13743
first = ['foo', 'bar']
for ordered in [False, True]:
idx = pd.CategoricalIndex(list("abcaab"), categories=list("bac"),
ordered=ordered)
expected = pd.CategoricalIndex(list("abcaab") + list("abcaab"),
categories=list("bac"),
ordered=ordered)
for arr in [idx, pd.Series(idx), idx.values]:
result = pd.MultiIndex.from_product([first, arr])
tm.assert_index_equal(result.get_level_values(1), expected)
def test_values_boxed(self):
tuples = [(1, pd.Timestamp('2000-01-01')), (2, pd.NaT),
(3, pd.Timestamp('2000-01-03')),
(1, pd.Timestamp('2000-01-04')),
(2, pd.Timestamp('2000-01-02')),
(3, pd.Timestamp('2000-01-03'))]
result = pd.MultiIndex.from_tuples(tuples)
expected = construct_1d_object_array_from_listlike(tuples)
tm.assert_numpy_array_equal(result.values, expected)
# Check that code branches for boxed values produce identical results
tm.assert_numpy_array_equal(result.values[:4], result[:4].values)
def test_values_multiindex_datetimeindex(self):
# Test to ensure we hit the boxing / nobox part of MI.values
ints = np.arange(10 ** 18, 10 ** 18 + 5)
naive = pd.DatetimeIndex(ints)
aware = pd.DatetimeIndex(ints, tz='US/Central')
idx = pd.MultiIndex.from_arrays([naive, aware])
result = idx.values
outer = pd.DatetimeIndex([x[0] for x in result])
tm.assert_index_equal(outer, naive)
inner = pd.DatetimeIndex([x[1] for x in result])
tm.assert_index_equal(inner, aware)
# n_lev > n_lab
result = idx[:2].values
outer = pd.DatetimeIndex([x[0] for x in result])
tm.assert_index_equal(outer, naive[:2])
inner = pd.DatetimeIndex([x[1] for x in result])
tm.assert_index_equal(inner, aware[:2])
def test_values_multiindex_periodindex(self):
# Test to ensure we hit the boxing / nobox part of MI.values
ints = np.arange(2007, 2012)
pidx = pd.PeriodIndex(ints, freq='D')
idx = pd.MultiIndex.from_arrays([ints, pidx])
result = idx.values
outer = pd.Int64Index([x[0] for x in result])
tm.assert_index_equal(outer, pd.Int64Index(ints))
inner = pd.PeriodIndex([x[1] for x in result])
tm.assert_index_equal(inner, pidx)
# n_lev > n_lab
result = idx[:2].values
outer = pd.Int64Index([x[0] for x in result])
tm.assert_index_equal(outer, pd.Int64Index(ints[:2]))
inner = pd.PeriodIndex([x[1] for x in result])
tm.assert_index_equal(inner, pidx[:2])
def test_append(self):
result = self.index[:3].append(self.index[3:])
assert result.equals(self.index)
foos = [self.index[:1], self.index[1:3], self.index[3:]]
result = foos[0].append(foos[1:])
assert result.equals(self.index)
# empty
result = self.index.append([])
assert result.equals(self.index)
def test_append_mixed_dtypes(self):
# GH 13660
dti = date_range('2011-01-01', freq='M', periods=3, )
dti_tz = date_range('2011-01-01', freq='M', periods=3, tz='US/Eastern')
pi = period_range('2011-01', freq='M', periods=3)
mi = MultiIndex.from_arrays([[1, 2, 3],
[1.1, np.nan, 3.3],
['a', 'b', 'c'],
dti, dti_tz, pi])
assert mi.nlevels == 6
res = mi.append(mi)
exp = MultiIndex.from_arrays([[1, 2, 3, 1, 2, 3],
[1.1, np.nan, 3.3, 1.1, np.nan, 3.3],
['a', 'b', 'c', 'a', 'b', 'c'],
dti.append(dti),
dti_tz.append(dti_tz),
pi.append(pi)])
tm.assert_index_equal(res, exp)
other = MultiIndex.from_arrays([['x', 'y', 'z'], ['x', 'y', 'z'],
['x', 'y', 'z'], ['x', 'y', 'z'],
['x', 'y', 'z'], ['x', 'y', 'z']])
res = mi.append(other)
exp = MultiIndex.from_arrays([[1, 2, 3, 'x', 'y', 'z'],
[1.1, np.nan, 3.3, 'x', 'y', 'z'],
['a', 'b', 'c', 'x', 'y', 'z'],
dti.append(pd.Index(['x', 'y', 'z'])),
dti_tz.append(pd.Index(['x', 'y', 'z'])),
pi.append(pd.Index(['x', 'y', 'z']))])
tm.assert_index_equal(res, exp)
def test_get_level_values(self):
result = self.index.get_level_values(0)
expected = Index(['foo', 'foo', 'bar', 'baz', 'qux', 'qux'],
name='first')
tm.assert_index_equal(result, expected)
assert result.name == 'first'
result = self.index.get_level_values('first')
expected = self.index.get_level_values(0)
tm.assert_index_equal(result, expected)
# GH 10460
index = MultiIndex(
levels=[CategoricalIndex(['A', 'B']),
CategoricalIndex([1, 2, 3])],
labels=[np.array([0, 0, 0, 1, 1, 1]),
np.array([0, 1, 2, 0, 1, 2])])
exp = CategoricalIndex(['A', 'A', 'A', 'B', 'B', 'B'])
tm.assert_index_equal(index.get_level_values(0), exp)
exp = CategoricalIndex([1, 2, 3, 1, 2, 3])
tm.assert_index_equal(index.get_level_values(1), exp)
def test_get_level_values_int_with_na(self):
# GH 17924
arrays = [['a', 'b', 'b'], [1, np.nan, 2]]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(1)
expected = Index([1, np.nan, 2])
tm.assert_index_equal(result, expected)
arrays = [['a', 'b', 'b'], [np.nan, np.nan, 2]]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(1)
expected = Index([np.nan, np.nan, 2])
tm.assert_index_equal(result, expected)
def test_get_level_values_na(self):
arrays = [[np.nan, np.nan, np.nan], ['a', np.nan, 1]]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(0)
expected = pd.Index([np.nan, np.nan, np.nan])
tm.assert_index_equal(result, expected)
result = index.get_level_values(1)
expected = pd.Index(['a', np.nan, 1])
tm.assert_index_equal(result, expected)
arrays = [['a', 'b', 'b'], pd.DatetimeIndex([0, 1, pd.NaT])]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(1)
expected = pd.DatetimeIndex([0, 1, pd.NaT])
tm.assert_index_equal(result, expected)
arrays = [[], []]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(0)
expected = pd.Index([], dtype=object)
tm.assert_index_equal(result, expected)
def test_get_level_values_all_na(self):
# GH 17924 when level entirely consists of nan
arrays = [[np.nan, np.nan, np.nan], ['a', np.nan, 1]]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(0)
expected = pd.Index([np.nan, np.nan, np.nan], dtype=np.float64)
tm.assert_index_equal(result, expected)
result = index.get_level_values(1)
expected = pd.Index(['a', np.nan, 1], dtype=object)
tm.assert_index_equal(result, expected)
def test_reorder_levels(self):
# this blows up
tm.assert_raises_regex(IndexError, '^Too many levels',
self.index.reorder_levels, [2, 1, 0])
def test_nlevels(self):
assert self.index.nlevels == 2
def test_iter(self):
result = list(self.index)
expected = [('foo', 'one'), ('foo', 'two'), ('bar', 'one'),
('baz', 'two'), ('qux', 'one'), ('qux', 'two')]
assert result == expected
def test_legacy_pickle(self):
if PY3:
pytest.skip("testing for legacy pickles not "
"support on py3")
path = tm.get_data_path('multiindex_v1.pickle')
obj = pd.read_pickle(path)
obj2 = MultiIndex.from_tuples(obj.values)
assert obj.equals(obj2)
res = obj.get_indexer(obj)
exp = np.arange(len(obj), dtype=np.intp)
assert_almost_equal(res, exp)
res = obj.get_indexer(obj2[::-1])
exp = obj.get_indexer(obj[::-1])
exp2 = obj2.get_indexer(obj2[::-1])
assert_almost_equal(res, exp)
assert_almost_equal(exp, exp2)
def test_legacy_v2_unpickle(self):
# 0.7.3 -> 0.8.0 format manage
path = tm.get_data_path('mindex_073.pickle')
obj = pd.read_pickle(path)
obj2 = MultiIndex.from_tuples(obj.values)
assert obj.equals(obj2)
res = obj.get_indexer(obj)
exp = np.arange(len(obj), dtype=np.intp)
assert_almost_equal(res, exp)
res = obj.get_indexer(obj2[::-1])
exp = obj.get_indexer(obj[::-1])
exp2 = obj2.get_indexer(obj2[::-1])
assert_almost_equal(res, exp)
assert_almost_equal(exp, exp2)
def test_roundtrip_pickle_with_tz(self):
# GH 8367
# round-trip of timezone
index = MultiIndex.from_product(
[[1, 2], ['a', 'b'], date_range('20130101', periods=3,
tz='US/Eastern')
], names=['one', 'two', 'three'])
unpickled = tm.round_trip_pickle(index)
assert index.equal_levels(unpickled)
def test_from_tuples_index_values(self):
result = MultiIndex.from_tuples(self.index)
assert (result.values == self.index.values).all()
def test_contains(self):
assert ('foo', 'two') in self.index
assert ('bar', 'two') not in self.index
assert None not in self.index
def test_contains_top_level(self):
midx = MultiIndex.from_product([['A', 'B'], [1, 2]])
assert 'A' in midx
assert 'A' not in midx._engine
def test_contains_with_nat(self):
# MI with a NaT
mi = MultiIndex(levels=[['C'],
pd.date_range('2012-01-01', periods=5)],
labels=[[0, 0, 0, 0, 0, 0], [-1, 0, 1, 2, 3, 4]],
names=[None, 'B'])
assert ('C', pd.Timestamp('2012-01-01')) in mi
for val in mi.values:
assert val in mi
def test_is_all_dates(self):
assert not self.index.is_all_dates
def test_is_numeric(self):
# MultiIndex is never numeric
assert not self.index.is_numeric()
def test_getitem(self):
# scalar
assert self.index[2] == ('bar', 'one')
# slice
result = self.index[2:5]
expected = self.index[[2, 3, 4]]
assert result.equals(expected)
# boolean
result = self.index[[True, False, True, False, True, True]]
result2 = self.index[np.array([True, False, True, False, True, True])]
expected = self.index[[0, 2, 4, 5]]
assert result.equals(expected)
assert result2.equals(expected)
def test_getitem_group_select(self):
sorted_idx, _ = self.index.sortlevel(0)
assert sorted_idx.get_loc('baz') == slice(3, 4)
assert sorted_idx.get_loc('foo') == slice(0, 2)
def test_get_loc(self):
assert self.index.get_loc(('foo', 'two')) == 1
assert self.index.get_loc(('baz', 'two')) == 3
pytest.raises(KeyError, self.index.get_loc, ('bar', 'two'))
pytest.raises(KeyError, self.index.get_loc, 'quux')
pytest.raises(NotImplementedError, self.index.get_loc, 'foo',
method='nearest')
# 3 levels
index = MultiIndex(levels=[Index(lrange(4)), Index(lrange(4)), Index(
lrange(4))], labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])])
pytest.raises(KeyError, index.get_loc, (1, 1))
assert index.get_loc((2, 0)) == slice(3, 5)
def test_get_loc_duplicates(self):
index = Index([2, 2, 2, 2])
result = index.get_loc(2)
expected = slice(0, 4)
assert result == expected
# pytest.raises(Exception, index.get_loc, 2)
index = Index(['c', 'a', 'a', 'b', 'b'])
rs = index.get_loc('c')
xp = 0
assert rs == xp
def test_get_value_duplicates(self):
index = MultiIndex(levels=[['D', 'B', 'C'],
[0, 26, 27, 37, 57, 67, 75, 82]],
labels=[[0, 0, 0, 1, 2, 2, 2, 2, 2, 2],
[1, 3, 4, 6, 0, 2, 2, 3, 5, 7]],
names=['tag', 'day'])
assert index.get_loc('D') == slice(0, 3)
with pytest.raises(KeyError):
index._engine.get_value(np.array([]), 'D')
def test_get_loc_level(self):
index = MultiIndex(levels=[Index(lrange(4)), Index(lrange(4)), Index(
lrange(4))], labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])])
loc, new_index = index.get_loc_level((0, 1))
expected = slice(1, 2)
exp_index = index[expected].droplevel(0).droplevel(0)
assert loc == expected
assert new_index.equals(exp_index)
loc, new_index = index.get_loc_level((0, 1, 0))
expected = 1
assert loc == expected
assert new_index is None
pytest.raises(KeyError, index.get_loc_level, (2, 2))
index = MultiIndex(levels=[[2000], lrange(4)], labels=[np.array(
[0, 0, 0, 0]), np.array([0, 1, 2, 3])])
result, new_index = index.get_loc_level((2000, slice(None, None)))
expected = slice(None, None)
assert result == expected
assert new_index.equals(index.droplevel(0))
@pytest.mark.parametrize('level', [0, 1])
@pytest.mark.parametrize('null_val', [np.nan, pd.NaT, None])
def test_get_loc_nan(self, level, null_val):
# GH 18485 : NaN in MultiIndex
levels = [['a', 'b'], ['c', 'd']]
key = ['b', 'd']
levels[level] = np.array([0, null_val], dtype=type(null_val))
key[level] = null_val
idx = MultiIndex.from_product(levels)
assert idx.get_loc(tuple(key)) == 3
def test_get_loc_missing_nan(self):
# GH 8569
idx = MultiIndex.from_arrays([[1.0, 2.0], [3.0, 4.0]])
assert isinstance(idx.get_loc(1), slice)
pytest.raises(KeyError, idx.get_loc, 3)
pytest.raises(KeyError, idx.get_loc, np.nan)
pytest.raises(KeyError, idx.get_loc, [np.nan])
@pytest.mark.parametrize('dtype1', [int, float, bool, str])
@pytest.mark.parametrize('dtype2', [int, float, bool, str])
def test_get_loc_multiple_dtypes(self, dtype1, dtype2):
# GH 18520
levels = [np.array([0, 1]).astype(dtype1),
np.array([0, 1]).astype(dtype2)]
idx = pd.MultiIndex.from_product(levels)
assert idx.get_loc(idx[2]) == 2
@pytest.mark.parametrize('level', [0, 1])
@pytest.mark.parametrize('dtypes', [[int, float], [float, int]])
def test_get_loc_implicit_cast(self, level, dtypes):
# GH 18818, GH 15994 : as flat index, cast int to float and vice-versa
levels = [['a', 'b'], ['c', 'd']]
key = ['b', 'd']
lev_dtype, key_dtype = dtypes
levels[level] = np.array([0, 1], dtype=lev_dtype)
key[level] = key_dtype(1)
idx = MultiIndex.from_product(levels)
assert idx.get_loc(tuple(key)) == 3
def test_get_loc_cast_bool(self):
# GH 19086 : int is casted to bool, but not vice-versa
levels = [[False, True], np.arange(2, dtype='int64')]
idx = MultiIndex.from_product(levels)
assert idx.get_loc((0, 1)) == 1
assert idx.get_loc((1, 0)) == 2
pytest.raises(KeyError, idx.get_loc, (False, True))
pytest.raises(KeyError, idx.get_loc, (True, False))
def test_slice_locs(self):
df = tm.makeTimeDataFrame()
stacked = df.stack()
idx = stacked.index
slob = slice(*idx.slice_locs(df.index[5], df.index[15]))
sliced = stacked[slob]
expected = df[5:16].stack()
tm.assert_almost_equal(sliced.values, expected.values)
slob = slice(*idx.slice_locs(df.index[5] + timedelta(seconds=30),
df.index[15] - timedelta(seconds=30)))
sliced = stacked[slob]
expected = df[6:15].stack()
tm.assert_almost_equal(sliced.values, expected.values)
def test_slice_locs_with_type_mismatch(self):
df = tm.makeTimeDataFrame()
stacked = df.stack()
idx = stacked.index
tm.assert_raises_regex(TypeError, '^Level type mismatch',
idx.slice_locs, (1, 3))
tm.assert_raises_regex(TypeError, '^Level type mismatch',
idx.slice_locs,
df.index[5] + timedelta(
seconds=30), (5, 2))
df = tm.makeCustomDataframe(5, 5)
stacked = df.stack()
idx = stacked.index
with tm.assert_raises_regex(TypeError, '^Level type mismatch'):
idx.slice_locs(timedelta(seconds=30))
# TODO: Try creating a UnicodeDecodeError in exception message
with tm.assert_raises_regex(TypeError, '^Level type mismatch'):
idx.slice_locs(df.index[1], (16, "a"))
def test_slice_locs_not_sorted(self):
index = MultiIndex(levels=[Index(lrange(4)), Index(lrange(4)), Index(
lrange(4))], labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])])
tm.assert_raises_regex(KeyError, "[Kk]ey length.*greater than "
"MultiIndex lexsort depth",
index.slice_locs, (1, 0, 1), (2, 1, 0))
# works
sorted_index, _ = index.sortlevel(0)
# should there be a test case here???
sorted_index.slice_locs((1, 0, 1), (2, 1, 0))
def test_slice_locs_partial(self):
sorted_idx, _ = self.index.sortlevel(0)
result = sorted_idx.slice_locs(('foo', 'two'), ('qux', 'one'))
assert result == (1, 5)
result = sorted_idx.slice_locs(None, ('qux', 'one'))
assert result == (0, 5)
result = sorted_idx.slice_locs(('foo', 'two'), None)
assert result == (1, len(sorted_idx))
result = sorted_idx.slice_locs('bar', 'baz')
assert result == (2, 4)
def test_slice_locs_not_contained(self):
# some searchsorted action
index = MultiIndex(levels=[[0, 2, 4, 6], [0, 2, 4]],
labels=[[0, 0, 0, 1, 1, 2, 3, 3, 3],
[0, 1, 2, 1, 2, 2, 0, 1, 2]], sortorder=0)
result = index.slice_locs((1, 0), (5, 2))
assert result == (3, 6)
result = index.slice_locs(1, 5)
assert result == (3, 6)
result = index.slice_locs((2, 2), (5, 2))
assert result == (3, 6)
result = index.slice_locs(2, 5)
assert result == (3, 6)
result = index.slice_locs((1, 0), (6, 3))
assert result == (3, 8)
result = index.slice_locs(-1, 10)
assert result == (0, len(index))
def test_consistency(self):
# need to construct an overflow
major_axis = lrange(70000)
minor_axis = lrange(10)
major_labels = np.arange(70000)
minor_labels = np.repeat(lrange(10), 7000)
# the fact that is works means it's consistent
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
# inconsistent
major_labels = np.array([0, 0, 1, 1, 1, 2, 2, 3, 3])
minor_labels = np.array([0, 1, 0, 1, 1, 0, 1, 0, 1])
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
assert not index.is_unique
def test_truncate(self):
major_axis = Index(lrange(4))
minor_axis = Index(lrange(2))
major_labels = np.array([0, 0, 1, 2, 3, 3])
minor_labels = np.array([0, 1, 0, 1, 0, 1])
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
result = index.truncate(before=1)
assert 'foo' not in result.levels[0]
assert 1 in result.levels[0]
result = index.truncate(after=1)
assert 2 not in result.levels[0]
assert 1 in result.levels[0]
result = index.truncate(before=1, after=2)
assert len(result.levels[0]) == 2
# after < before
pytest.raises(ValueError, index.truncate, 3, 1)
def test_get_indexer(self):
major_axis = Index(lrange(4))
minor_axis = Index(lrange(2))
major_labels = np.array([0, 0, 1, 2, 2, 3, 3], dtype=np.intp)
minor_labels = np.array([0, 1, 0, 0, 1, 0, 1], dtype=np.intp)
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
idx1 = index[:5]
idx2 = index[[1, 3, 5]]
r1 = idx1.get_indexer(idx2)
assert_almost_equal(r1, np.array([1, 3, -1], dtype=np.intp))
r1 = idx2.get_indexer(idx1, method='pad')
e1 = np.array([-1, 0, 0, 1, 1], dtype=np.intp)
assert_almost_equal(r1, e1)
r2 = idx2.get_indexer(idx1[::-1], method='pad')
assert_almost_equal(r2, e1[::-1])
rffill1 = idx2.get_indexer(idx1, method='ffill')
assert_almost_equal(r1, rffill1)
r1 = idx2.get_indexer(idx1, method='backfill')
e1 = np.array([0, 0, 1, 1, 2], dtype=np.intp)
assert_almost_equal(r1, e1)
r2 = idx2.get_indexer(idx1[::-1], method='backfill')
assert_almost_equal(r2, e1[::-1])
rbfill1 = idx2.get_indexer(idx1, method='bfill')
assert_almost_equal(r1, rbfill1)
# pass non-MultiIndex
r1 = idx1.get_indexer(idx2.values)
rexp1 = idx1.get_indexer(idx2)
assert_almost_equal(r1, rexp1)
r1 = idx1.get_indexer([1, 2, 3])
assert (r1 == [-1, -1, -1]).all()
# create index with duplicates
idx1 = Index(lrange(10) + lrange(10))
idx2 = Index(lrange(20))
msg = "Reindexing only valid with uniquely valued Index objects"
with tm.assert_raises_regex(InvalidIndexError, msg):
idx1.get_indexer(idx2)
def test_get_indexer_nearest(self):
midx = MultiIndex.from_tuples([('a', 1), ('b', 2)])
with pytest.raises(NotImplementedError):
midx.get_indexer(['a'], method='nearest')
with pytest.raises(NotImplementedError):
midx.get_indexer(['a'], method='pad', tolerance=2)
def test_hash_collisions(self):
# non-smoke test that we don't get hash collisions
index = MultiIndex.from_product([np.arange(1000), np.arange(1000)],
names=['one', 'two'])
result = index.get_indexer(index.values)
tm.assert_numpy_array_equal(result, np.arange(
len(index), dtype='intp'))
for i in [0, 1, len(index) - 2, len(index) - 1]:
result = index.get_loc(index[i])
assert result == i
def test_format(self):
self.index.format()
self.index[:0].format()
def test_format_integer_names(self):
index = MultiIndex(levels=[[0, 1], [0, 1]],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]], names=[0, 1])
index.format(names=True)
def test_format_sparse_display(self):
index = MultiIndex(levels=[[0, 1], [0, 1], [0, 1], [0]],
labels=[[0, 0, 0, 1, 1, 1], [0, 0, 1, 0, 0, 1],
[0, 1, 0, 0, 1, 0], [0, 0, 0, 0, 0, 0]])
result = index.format()
assert result[3] == '1 0 0 0'
def test_format_sparse_config(self):
warn_filters = warnings.filters
warnings.filterwarnings('ignore', category=FutureWarning,
module=".*format")
# GH1538
pd.set_option('display.multi_sparse', False)
result = self.index.format()
assert result[1] == 'foo two'
tm.reset_display_options()
warnings.filters = warn_filters
def test_to_frame(self):
tuples = [(1, 'one'), (1, 'two'), (2, 'one'), (2, 'two')]
index = MultiIndex.from_tuples(tuples)
result = index.to_frame(index=False)
expected = DataFrame(tuples)
tm.assert_frame_equal(result, expected)
result = index.to_frame()
expected.index = index
tm.assert_frame_equal(result, expected)
tuples = [(1, 'one'), (1, 'two'), (2, 'one'), (2, 'two')]
index = MultiIndex.from_tuples(tuples, names=['first', 'second'])
result = index.to_frame(index=False)
expected = DataFrame(tuples)
expected.columns = ['first', 'second']
tm.assert_frame_equal(result, expected)
result = index.to_frame()
expected.index = index
tm.assert_frame_equal(result, expected)
index = MultiIndex.from_product([range(5),
pd.date_range('20130101', periods=3)])
result = index.to_frame(index=False)
expected = DataFrame(
{0: np.repeat(np.arange(5, dtype='int64'), 3),
1: np.tile(pd.date_range('20130101', periods=3), 5)})
tm.assert_frame_equal(result, expected)
index = MultiIndex.from_product([range(5),
pd.date_range('20130101', periods=3)])
result = index.to_frame()
expected.index = index
tm.assert_frame_equal(result, expected)
def test_to_hierarchical(self):
index = MultiIndex.from_tuples([(1, 'one'), (1, 'two'), (2, 'one'), (
2, 'two')])
result = index.to_hierarchical(3)
expected = MultiIndex(levels=[[1, 2], ['one', 'two']],
labels=[[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1]])
tm.assert_index_equal(result, expected)
assert result.names == index.names
# K > 1
result = index.to_hierarchical(3, 2)
expected = MultiIndex(levels=[[1, 2], ['one', 'two']],
labels=[[0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1],
[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]])
tm.assert_index_equal(result, expected)
assert result.names == index.names
# non-sorted
index = MultiIndex.from_tuples([(2, 'c'), (1, 'b'),
(2, 'a'), (2, 'b')],
names=['N1', 'N2'])
result = index.to_hierarchical(2)
expected = MultiIndex.from_tuples([(2, 'c'), (2, 'c'), (1, 'b'),
(1, 'b'),
(2, 'a'), (2, 'a'),
(2, 'b'), (2, 'b')],
names=['N1', 'N2'])
tm.assert_index_equal(result, expected)
assert result.names == index.names
def test_bounds(self):
self.index._bounds
def test_equals_multi(self):
assert self.index.equals(self.index)
assert not self.index.equals(self.index.values)
assert self.index.equals(Index(self.index.values))
assert self.index.equal_levels(self.index)
assert not self.index.equals(self.index[:-1])
assert not self.index.equals(self.index[-1])
# different number of levels
index = MultiIndex(levels=[Index(lrange(4)), Index(lrange(4)), Index(
lrange(4))], labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])])
index2 = MultiIndex(levels=index.levels[:-1], labels=index.labels[:-1])
assert not index.equals(index2)
assert not index.equal_levels(index2)
# levels are different
major_axis = Index(lrange(4))
minor_axis = Index(lrange(2))
major_labels = np.array([0, 0, 1, 2, 2, 3])
minor_labels = np.array([0, 1, 0, 0, 1, 0])
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
assert not self.index.equals(index)
assert not self.index.equal_levels(index)
# some of the labels are different
major_axis = Index(['foo', 'bar', 'baz', 'qux'])
minor_axis = Index(['one', 'two'])
major_labels = np.array([0, 0, 2, 2, 3, 3])
minor_labels = np.array([0, 1, 0, 1, 0, 1])
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
assert not self.index.equals(index)
def test_equals_missing_values(self):
# make sure take is not using -1
i = pd.MultiIndex.from_tuples([(0, pd.NaT),
(0, pd.Timestamp('20130101'))])
result = i[0:1].equals(i[0])
assert not result
result = i[1:2].equals(i[1])
assert not result
def test_identical(self):
mi = self.index.copy()
mi2 = self.index.copy()
assert mi.identical(mi2)
mi = mi.set_names(['new1', 'new2'])
assert mi.equals(mi2)
assert not mi.identical(mi2)
mi2 = mi2.set_names(['new1', 'new2'])
assert mi.identical(mi2)
mi3 = Index(mi.tolist(), names=mi.names)
mi4 = Index(mi.tolist(), names=mi.names, tupleize_cols=False)
assert mi.identical(mi3)
assert not mi.identical(mi4)
assert mi.equals(mi4)
def test_is_(self):
mi = MultiIndex.from_tuples(lzip(range(10), range(10)))
assert mi.is_(mi)
assert mi.is_(mi.view())
assert mi.is_(mi.view().view().view().view())
mi2 = mi.view()
# names are metadata, they don't change id
mi2.names = ["A", "B"]
assert mi2.is_(mi)
assert mi.is_(mi2)
assert mi.is_(mi.set_names(["C", "D"]))
mi2 = mi.view()
mi2.set_names(["E", "F"], inplace=True)
assert mi.is_(mi2)
# levels are inherent properties, they change identity
mi3 = mi2.set_levels([lrange(10), lrange(10)])
assert not mi3.is_(mi2)
# shouldn't change
assert mi2.is_(mi)
mi4 = mi3.view()
# GH 17464 - Remove duplicate MultiIndex levels
mi4.set_levels([lrange(10), lrange(10)], inplace=True)
assert not mi4.is_(mi3)
mi5 = mi.view()
mi5.set_levels(mi5.levels, inplace=True)
assert not mi5.is_(mi)
def test_union(self):
piece1 = self.index[:5][::-1]
piece2 = self.index[3:]
the_union = piece1 | piece2
tups = sorted(self.index.values)
expected = MultiIndex.from_tuples(tups)
assert the_union.equals(expected)
# corner case, pass self or empty thing:
the_union = self.index.union(self.index)
assert the_union is self.index
the_union = self.index.union(self.index[:0])
assert the_union is self.index
# won't work in python 3
# tuples = self.index.values
# result = self.index[:4] | tuples[4:]
# assert result.equals(tuples)
# not valid for python 3
# def test_union_with_regular_index(self):
# other = Index(['A', 'B', 'C'])
# result = other.union(self.index)
# assert ('foo', 'one') in result
# assert 'B' in result
# result2 = self.index.union(other)
# assert result.equals(result2)
def test_intersection(self):
piece1 = self.index[:5][::-1]
piece2 = self.index[3:]
the_int = piece1 & piece2
tups = sorted(self.index[3:5].values)
expected = MultiIndex.from_tuples(tups)
assert the_int.equals(expected)
# corner case, pass self
the_int = self.index.intersection(self.index)
assert the_int is self.index
# empty intersection: disjoint
empty = self.index[:2] & self.index[2:]
expected = self.index[:0]
assert empty.equals(expected)
# can't do in python 3
# tuples = self.index.values
# result = self.index & tuples
# assert result.equals(tuples)
def test_sub(self):
first = self.index
# - now raises (previously was set op difference)
with pytest.raises(TypeError):
first - self.index[-3:]
with pytest.raises(TypeError):
self.index[-3:] - first
with pytest.raises(TypeError):
self.index[-3:] - first.tolist()
with pytest.raises(TypeError):
first.tolist() - self.index[-3:]
def test_difference(self):
first = self.index
result = first.difference(self.index[-3:])
expected = MultiIndex.from_tuples(sorted(self.index[:-3].values),
sortorder=0,
names=self.index.names)
assert isinstance(result, MultiIndex)
assert result.equals(expected)
assert result.names == self.index.names
# empty difference: reflexive
result = self.index.difference(self.index)
expected = self.index[:0]
assert result.equals(expected)
assert result.names == self.index.names
# empty difference: superset
result = self.index[-3:].difference(self.index)
expected = self.index[:0]
assert result.equals(expected)
assert result.names == self.index.names
# empty difference: degenerate
result = self.index[:0].difference(self.index)
expected = self.index[:0]
assert result.equals(expected)
assert result.names == self.index.names
# names not the same
chunklet = self.index[-3:]
chunklet.names = ['foo', 'baz']
result = first.difference(chunklet)
assert result.names == (None, None)
# empty, but non-equal
result = self.index.difference(self.index.sortlevel(1)[0])
assert len(result) == 0
# raise Exception called with non-MultiIndex
result = first.difference(first.values)
assert result.equals(first[:0])
# name from empty array
result = first.difference([])
assert first.equals(result)
assert first.names == result.names
# name from non-empty array
result = first.difference([('foo', 'one')])
expected = pd.MultiIndex.from_tuples([('bar', 'one'), ('baz', 'two'), (
'foo', 'two'), ('qux', 'one'), ('qux', 'two')])
expected.names = first.names
assert first.names == result.names
tm.assert_raises_regex(TypeError, "other must be a MultiIndex "
"or a list of tuples",
first.difference, [1, 2, 3, 4, 5])
def test_from_tuples(self):
tm.assert_raises_regex(TypeError, 'Cannot infer number of levels '
'from empty list',
MultiIndex.from_tuples, [])
expected = MultiIndex(levels=[[1, 3], [2, 4]],
labels=[[0, 1], [0, 1]],
names=['a', 'b'])
# input tuples
result = MultiIndex.from_tuples(((1, 2), (3, 4)), names=['a', 'b'])
tm.assert_index_equal(result, expected)
def test_from_tuples_iterator(self):
# GH 18434
# input iterator for tuples
expected = MultiIndex(levels=[[1, 3], [2, 4]],
labels=[[0, 1], [0, 1]],
names=['a', 'b'])
result = MultiIndex.from_tuples(zip([1, 3], [2, 4]), names=['a', 'b'])
tm.assert_index_equal(result, expected)
# input non-iterables
with tm.assert_raises_regex(
TypeError, 'Input must be a list / sequence of tuple-likes.'):
MultiIndex.from_tuples(0)
def test_from_tuples_empty(self):
# GH 16777
result = MultiIndex.from_tuples([], names=['a', 'b'])
expected = MultiIndex.from_arrays(arrays=[[], []],
names=['a', 'b'])
tm.assert_index_equal(result, expected)
def test_argsort(self):
result = self.index.argsort()
expected = self.index.values.argsort()
tm.assert_numpy_array_equal(result, expected)
def test_sortlevel(self):
import random
tuples = list(self.index)
random.shuffle(tuples)
index = MultiIndex.from_tuples(tuples)
sorted_idx, _ = index.sortlevel(0)
expected = MultiIndex.from_tuples(sorted(tuples))
assert sorted_idx.equals(expected)
sorted_idx, _ = index.sortlevel(0, ascending=False)
assert sorted_idx.equals(expected[::-1])
sorted_idx, _ = index.sortlevel(1)
by1 = sorted(tuples, key=lambda x: (x[1], x[0]))
expected = MultiIndex.from_tuples(by1)
assert sorted_idx.equals(expected)
sorted_idx, _ = index.sortlevel(1, ascending=False)
assert sorted_idx.equals(expected[::-1])
def test_sortlevel_not_sort_remaining(self):
mi = MultiIndex.from_tuples([[1, 1, 3], [1, 1, 1]], names=list('ABC'))
sorted_idx, _ = mi.sortlevel('A', sort_remaining=False)
assert sorted_idx.equals(mi)
def test_sortlevel_deterministic(self):
tuples = [('bar', 'one'), ('foo', 'two'), ('qux', 'two'),
('foo', 'one'), ('baz', 'two'), ('qux', 'one')]
index = MultiIndex.from_tuples(tuples)
sorted_idx, _ = index.sortlevel(0)
expected = MultiIndex.from_tuples(sorted(tuples))
assert sorted_idx.equals(expected)
sorted_idx, _ = index.sortlevel(0, ascending=False)
assert sorted_idx.equals(expected[::-1])
sorted_idx, _ = index.sortlevel(1)
by1 = sorted(tuples, key=lambda x: (x[1], x[0]))
expected = MultiIndex.from_tuples(by1)
assert sorted_idx.equals(expected)
sorted_idx, _ = index.sortlevel(1, ascending=False)
assert sorted_idx.equals(expected[::-1])
def test_dims(self):
pass
def test_drop(self):
dropped = self.index.drop([('foo', 'two'), ('qux', 'one')])
index = MultiIndex.from_tuples([('foo', 'two'), ('qux', 'one')])
dropped2 = self.index.drop(index)
expected = self.index[[0, 2, 3, 5]]
tm.assert_index_equal(dropped, expected)
tm.assert_index_equal(dropped2, expected)
dropped = self.index.drop(['bar'])
expected = self.index[[0, 1, 3, 4, 5]]
tm.assert_index_equal(dropped, expected)
dropped = self.index.drop('foo')
expected = self.index[[2, 3, 4, 5]]
tm.assert_index_equal(dropped, expected)
index = MultiIndex.from_tuples([('bar', 'two')])
pytest.raises(KeyError, self.index.drop, [('bar', 'two')])
pytest.raises(KeyError, self.index.drop, index)
pytest.raises(KeyError, self.index.drop, ['foo', 'two'])
# partially correct argument
mixed_index = MultiIndex.from_tuples([('qux', 'one'), ('bar', 'two')])
pytest.raises(KeyError, self.index.drop, mixed_index)
# error='ignore'
dropped = self.index.drop(index, errors='ignore')
expected = self.index[[0, 1, 2, 3, 4, 5]]
tm.assert_index_equal(dropped, expected)
dropped = self.index.drop(mixed_index, errors='ignore')
expected = self.index[[0, 1, 2, 3, 5]]
tm.assert_index_equal(dropped, expected)
dropped = self.index.drop(['foo', 'two'], errors='ignore')
expected = self.index[[2, 3, 4, 5]]
tm.assert_index_equal(dropped, expected)
# mixed partial / full drop
dropped = self.index.drop(['foo', ('qux', 'one')])
expected = self.index[[2, 3, 5]]
tm.assert_index_equal(dropped, expected)
# mixed partial / full drop / error='ignore'
mixed_index = ['foo', ('qux', 'one'), 'two']
pytest.raises(KeyError, self.index.drop, mixed_index)
dropped = self.index.drop(mixed_index, errors='ignore')
expected = self.index[[2, 3, 5]]
tm.assert_index_equal(dropped, expected)
def test_droplevel_with_names(self):
index = self.index[self.index.get_loc('foo')]
dropped = index.droplevel(0)
assert dropped.name == 'second'
index = MultiIndex(
levels=[Index(lrange(4)), Index(lrange(4)), Index(lrange(4))],
labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])],
names=['one', 'two', 'three'])
dropped = index.droplevel(0)
assert dropped.names == ('two', 'three')
dropped = index.droplevel('two')
expected = index.droplevel(1)
assert dropped.equals(expected)
def test_droplevel_list(self):
index = MultiIndex(
levels=[Index(lrange(4)), Index(lrange(4)), Index(lrange(4))],
labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])],
names=['one', 'two', 'three'])
dropped = index[:2].droplevel(['three', 'one'])
expected = index[:2].droplevel(2).droplevel(0)
assert dropped.equals(expected)
dropped = index[:2].droplevel([])
expected = index[:2]
assert dropped.equals(expected)
with pytest.raises(ValueError):
index[:2].droplevel(['one', 'two', 'three'])
with pytest.raises(KeyError):
index[:2].droplevel(['one', 'four'])
def test_drop_not_lexsorted(self):
# GH 12078
# define the lexsorted version of the multi-index
tuples = [('a', ''), ('b1', 'c1'), ('b2', 'c2')]
lexsorted_mi = MultiIndex.from_tuples(tuples, names=['b', 'c'])
assert lexsorted_mi.is_lexsorted()
# and the not-lexsorted version
df = pd.DataFrame(columns=['a', 'b', 'c', 'd'],
data=[[1, 'b1', 'c1', 3], [1, 'b2', 'c2', 4]])
df = df.pivot_table(index='a', columns=['b', 'c'], values='d')
df = df.reset_index()
not_lexsorted_mi = df.columns
assert not not_lexsorted_mi.is_lexsorted()
# compare the results
tm.assert_index_equal(lexsorted_mi, not_lexsorted_mi)
with tm.assert_produces_warning(PerformanceWarning):
tm.assert_index_equal(lexsorted_mi.drop('a'),
not_lexsorted_mi.drop('a'))
def test_insert(self):
# key contained in all levels
new_index = self.index.insert(0, ('bar', 'two'))
assert new_index.equal_levels(self.index)
assert new_index[0] == ('bar', 'two')
# key not contained in all levels
new_index = self.index.insert(0, ('abc', 'three'))
exp0 = Index(list(self.index.levels[0]) + ['abc'], name='first')
tm.assert_index_equal(new_index.levels[0], exp0)
exp1 = Index(list(self.index.levels[1]) + ['three'], name='second')
tm.assert_index_equal(new_index.levels[1], exp1)
assert new_index[0] == ('abc', 'three')
# key wrong length
msg = "Item must have length equal to number of levels"
with tm.assert_raises_regex(ValueError, msg):
self.index.insert(0, ('foo2',))
left = pd.DataFrame([['a', 'b', 0], ['b', 'd', 1]],
columns=['1st', '2nd', '3rd'])
left.set_index(['1st', '2nd'], inplace=True)
ts = left['3rd'].copy(deep=True)
left.loc[('b', 'x'), '3rd'] = 2
left.loc[('b', 'a'), '3rd'] = -1
left.loc[('b', 'b'), '3rd'] = 3
left.loc[('a', 'x'), '3rd'] = 4
left.loc[('a', 'w'), '3rd'] = 5
left.loc[('a', 'a'), '3rd'] = 6
ts.loc[('b', 'x')] = 2
ts.loc['b', 'a'] = -1
ts.loc[('b', 'b')] = 3
ts.loc['a', 'x'] = 4
ts.loc[('a', 'w')] = 5
ts.loc['a', 'a'] = 6
right = pd.DataFrame([['a', 'b', 0], ['b', 'd', 1], ['b', 'x', 2],
['b', 'a', -1], ['b', 'b', 3], ['a', 'x', 4],
['a', 'w', 5], ['a', 'a', 6]],
columns=['1st', '2nd', '3rd'])
right.set_index(['1st', '2nd'], inplace=True)
# FIXME data types changes to float because
# of intermediate nan insertion;
tm.assert_frame_equal(left, right, check_dtype=False)
tm.assert_series_equal(ts, right['3rd'])
# GH9250
idx = [('test1', i) for i in range(5)] + \
[('test2', i) for i in range(6)] + \
[('test', 17), ('test', 18)]
left = pd.Series(np.linspace(0, 10, 11),
pd.MultiIndex.from_tuples(idx[:-2]))
left.loc[('test', 17)] = 11
left.loc[('test', 18)] = 12
right = pd.Series(np.linspace(0, 12, 13),
pd.MultiIndex.from_tuples(idx))
tm.assert_series_equal(left, right)
def test_take_preserve_name(self):
taken = self.index.take([3, 0, 1])
assert taken.names == self.index.names
def test_take_fill_value(self):
# GH 12631
vals = [['A', 'B'],
[pd.Timestamp('2011-01-01'), pd.Timestamp('2011-01-02')]]
idx = pd.MultiIndex.from_product(vals, names=['str', 'dt'])
result = idx.take(np.array([1, 0, -1]))
exp_vals = [('A', pd.Timestamp('2011-01-02')),
('A', pd.Timestamp('2011-01-01')),
('B', pd.Timestamp('2011-01-02'))]
expected = pd.MultiIndex.from_tuples(exp_vals, names=['str', 'dt'])
tm.assert_index_equal(result, expected)
# fill_value
result = idx.take(np.array([1, 0, -1]), fill_value=True)
exp_vals = [('A', pd.Timestamp('2011-01-02')),
('A', pd.Timestamp('2011-01-01')),
(np.nan, pd.NaT)]
expected = pd.MultiIndex.from_tuples(exp_vals, names=['str', 'dt'])
tm.assert_index_equal(result, expected)
# allow_fill=False
result = idx.take(np.array([1, 0, -1]), allow_fill=False,
fill_value=True)
exp_vals = [('A', pd.Timestamp('2011-01-02')),
('A', pd.Timestamp('2011-01-01')),
('B', pd.Timestamp('2011-01-02'))]
expected = pd.MultiIndex.from_tuples(exp_vals, names=['str', 'dt'])
tm.assert_index_equal(result, expected)
msg = ('When allow_fill=True and fill_value is not None, '
'all indices must be >= -1')
with tm.assert_raises_regex(ValueError, msg):
idx.take(np.array([1, 0, -2]), fill_value=True)
with tm.assert_raises_regex(ValueError, msg):
idx.take(np.array([1, 0, -5]), fill_value=True)
with pytest.raises(IndexError):
idx.take(np.array([1, -5]))
def take_invalid_kwargs(self):
vals = [['A', 'B'],
[pd.Timestamp('2011-01-01'), pd.Timestamp('2011-01-02')]]
idx = pd.MultiIndex.from_product(vals, names=['str', 'dt'])
indices = [1, 2]
msg = r"take\(\) got an unexpected keyword argument 'foo'"
tm.assert_raises_regex(TypeError, msg, idx.take,
indices, foo=2)
msg = "the 'out' parameter is not supported"
tm.assert_raises_regex(ValueError, msg, idx.take,
indices, out=indices)
msg = "the 'mode' parameter is not supported"
tm.assert_raises_regex(ValueError, msg, idx.take,
indices, mode='clip')
@pytest.mark.parametrize('other',
[Index(['three', 'one', 'two']),
Index(['one']),
Index(['one', 'three'])])
def test_join_level(self, other, join_type):
join_index, lidx, ridx = other.join(self.index, how=join_type,
level='second',
return_indexers=True)
exp_level = other.join(self.index.levels[1], how=join_type)
assert join_index.levels[0].equals(self.index.levels[0])
assert join_index.levels[1].equals(exp_level)
# pare down levels
mask = np.array(
[x[1] in exp_level for x in self.index], dtype=bool)
exp_values = self.index.values[mask]
tm.assert_numpy_array_equal(join_index.values, exp_values)
if join_type in ('outer', 'inner'):
join_index2, ridx2, lidx2 = \
self.index.join(other, how=join_type, level='second',
return_indexers=True)
assert join_index.equals(join_index2)
tm.assert_numpy_array_equal(lidx, lidx2)
tm.assert_numpy_array_equal(ridx, ridx2)
tm.assert_numpy_array_equal(join_index2.values, exp_values)
def test_join_level_corner_case(self):
# some corner cases
idx = Index(['three', 'one', 'two'])
result = idx.join(self.index, level='second')
assert isinstance(result, MultiIndex)
tm.assert_raises_regex(TypeError, "Join.*MultiIndex.*ambiguous",
self.index.join, self.index, level=1)
def test_join_self(self, join_type):
res = self.index
joined = res.join(res, how=join_type)
assert res is joined
def test_join_multi(self):
# GH 10665
midx = pd.MultiIndex.from_product(
[np.arange(4), np.arange(4)], names=['a', 'b'])
idx = pd.Index([1, 2, 5], name='b')
# inner
jidx, lidx, ridx = midx.join(idx, how='inner', return_indexers=True)
exp_idx = pd.MultiIndex.from_product(
[np.arange(4), [1, 2]], names=['a', 'b'])
exp_lidx = np.array([1, 2, 5, 6, 9, 10, 13, 14], dtype=np.intp)
exp_ridx = np.array([0, 1, 0, 1, 0, 1, 0, 1], dtype=np.intp)
tm.assert_index_equal(jidx, exp_idx)
tm.assert_numpy_array_equal(lidx, exp_lidx)
tm.assert_numpy_array_equal(ridx, exp_ridx)
# flip
jidx, ridx, lidx = idx.join(midx, how='inner', return_indexers=True)
tm.assert_index_equal(jidx, exp_idx)
tm.assert_numpy_array_equal(lidx, exp_lidx)
tm.assert_numpy_array_equal(ridx, exp_ridx)
# keep MultiIndex
jidx, lidx, ridx = midx.join(idx, how='left', return_indexers=True)
exp_ridx = np.array([-1, 0, 1, -1, -1, 0, 1, -1, -1, 0, 1, -1, -1, 0,
1, -1], dtype=np.intp)
tm.assert_index_equal(jidx, midx)
assert lidx is None
tm.assert_numpy_array_equal(ridx, exp_ridx)
# flip
jidx, ridx, lidx = idx.join(midx, how='right', return_indexers=True)
tm.assert_index_equal(jidx, midx)
assert lidx is None
tm.assert_numpy_array_equal(ridx, exp_ridx)
def test_reindex(self):
result, indexer = self.index.reindex(list(self.index[:4]))
assert isinstance(result, MultiIndex)
self.check_level_names(result, self.index[:4].names)
result, indexer = self.index.reindex(list(self.index))
assert isinstance(result, MultiIndex)
assert indexer is None
self.check_level_names(result, self.index.names)
def test_reindex_level(self):
idx = Index(['one'])
target, indexer = self.index.reindex(idx, level='second')
target2, indexer2 = idx.reindex(self.index, level='second')
exp_index = self.index.join(idx, level='second', how='right')
exp_index2 = self.index.join(idx, level='second', how='left')
assert target.equals(exp_index)
exp_indexer = np.array([0, 2, 4])
tm.assert_numpy_array_equal(indexer, exp_indexer, check_dtype=False)
assert target2.equals(exp_index2)
exp_indexer2 = np.array([0, -1, 0, -1, 0, -1])
tm.assert_numpy_array_equal(indexer2, exp_indexer2, check_dtype=False)
tm.assert_raises_regex(TypeError, "Fill method not supported",
self.index.reindex, self.index,
method='pad', level='second')
tm.assert_raises_regex(TypeError, "Fill method not supported",
idx.reindex, idx, method='bfill',
level='first')
def test_duplicates(self):
assert not self.index.has_duplicates
assert self.index.append(self.index).has_duplicates
index = MultiIndex(levels=[[0, 1], [0, 1, 2]], labels=[
[0, 0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 0, 1, 2]])
assert index.has_duplicates
# GH 9075
t = [(u('x'), u('out'), u('z'), 5, u('y'), u('in'), u('z'), 169),
(u('x'), u('out'), u('z'), 7, u('y'), u('in'), u('z'), 119),
(u('x'), u('out'), u('z'), 9, u('y'), u('in'), u('z'), 135),
(u('x'), u('out'), u('z'), 13, u('y'), u('in'), u('z'), 145),
(u('x'), u('out'), u('z'), 14, u('y'), u('in'), u('z'), 158),
(u('x'), u('out'), u('z'), 16, u('y'), u('in'), u('z'), 122),
(u('x'), u('out'), u('z'), 17, u('y'), u('in'), u('z'), 160),
(u('x'), u('out'), u('z'), 18, u('y'), u('in'), u('z'), 180),
(u('x'), u('out'), u('z'), 20, u('y'), u('in'), u('z'), 143),
(u('x'), u('out'), u('z'), 21, u('y'), u('in'), u('z'), 128),
(u('x'), u('out'), u('z'), 22, u('y'), u('in'), u('z'), 129),
(u('x'), u('out'), u('z'), 25, u('y'), u('in'), u('z'), 111),
(u('x'), u('out'), u('z'), 28, u('y'), u('in'), u('z'), 114),
(u('x'), u('out'), u('z'), 29, u('y'), u('in'), u('z'), 121),
(u('x'), u('out'), u('z'), 31, u('y'), u('in'), u('z'), 126),
(u('x'), u('out'), u('z'), 32, u('y'), u('in'), u('z'), 155),
(u('x'), u('out'), u('z'), 33, u('y'), u('in'), u('z'), 123),
(u('x'), u('out'), u('z'), 12, u('y'), u('in'), u('z'), 144)]
index = pd.MultiIndex.from_tuples(t)
assert not index.has_duplicates
# handle int64 overflow if possible
def check(nlevels, with_nulls):
labels = np.tile(np.arange(500), 2)
level = np.arange(500)
if with_nulls: # inject some null values
labels[500] = -1 # common nan value
labels = [labels.copy() for i in range(nlevels)]
for i in range(nlevels):
labels[i][500 + i - nlevels // 2] = -1
labels += [np.array([-1, 1]).repeat(500)]
else:
labels = [labels] * nlevels + [np.arange(2).repeat(500)]
levels = [level] * nlevels + [[0, 1]]
# no dups
index = MultiIndex(levels=levels, labels=labels)
assert not index.has_duplicates
# with a dup
if with_nulls:
def f(a):
return np.insert(a, 1000, a[0])
labels = list(map(f, labels))
index = MultiIndex(levels=levels, labels=labels)
else:
values = index.values.tolist()
index = MultiIndex.from_tuples(values + [values[0]])
assert index.has_duplicates
# no overflow
check(4, False)
check(4, True)
# overflow possible
check(8, False)
check(8, True)
# GH 9125
n, k = 200, 5000
levels = [np.arange(n), tm.makeStringIndex(n), 1000 + np.arange(n)]
labels = [np.random.choice(n, k * n) for lev in levels]
mi = MultiIndex(levels=levels, labels=labels)
for keep in ['first', 'last', False]:
left = mi.duplicated(keep=keep)
right = pd._libs.hashtable.duplicated_object(mi.values, keep=keep)
tm.assert_numpy_array_equal(left, right)
# GH5873
for a in [101, 102]:
mi = MultiIndex.from_arrays([[101, a], [3.5, np.nan]])
assert not mi.has_duplicates
with warnings.catch_warnings(record=True):
# Deprecated - see GH20239
assert mi.get_duplicates().equals(MultiIndex.from_arrays(
[[], []]))
tm.assert_numpy_array_equal(mi.duplicated(), np.zeros(
2, dtype='bool'))
for n in range(1, 6): # 1st level shape
for m in range(1, 5): # 2nd level shape
# all possible unique combinations, including nan
lab = product(range(-1, n), range(-1, m))
mi = MultiIndex(levels=[list('abcde')[:n], list('WXYZ')[:m]],
labels=np.random.permutation(list(lab)).T)
assert len(mi) == (n + 1) * (m + 1)
assert not mi.has_duplicates
with warnings.catch_warnings(record=True):
# Deprecated - see GH20239
assert mi.get_duplicates().equals(MultiIndex.from_arrays(
[[], []]))
tm.assert_numpy_array_equal(mi.duplicated(), np.zeros(
len(mi), dtype='bool'))
def test_duplicate_meta_data(self):
# GH 10115
index = MultiIndex(
levels=[[0, 1], [0, 1, 2]],
labels=[[0, 0, 0, 0, 1, 1, 1],
[0, 1, 2, 0, 0, 1, 2]])
for idx in [index,
index.set_names([None, None]),
index.set_names([None, 'Num']),
index.set_names(['Upper', 'Num']), ]:
assert idx.has_duplicates
assert idx.drop_duplicates().names == idx.names
def test_get_unique_index(self):
idx = self.index[[0, 1, 0, 1, 1, 0, 0]]
expected = self.index._shallow_copy(idx[[0, 1]])
for dropna in [False, True]:
result = idx._get_unique_index(dropna=dropna)
assert result.unique
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('names', [None, ['first', 'second']])
def test_unique(self, names):
mi = pd.MultiIndex.from_arrays([[1, 2, 1, 2], [1, 1, 1, 2]],
names=names)
res = mi.unique()
exp = pd.MultiIndex.from_arrays([[1, 2, 2], [1, 1, 2]], names=mi.names)
tm.assert_index_equal(res, exp)
mi = pd.MultiIndex.from_arrays([list('aaaa'), list('abab')],
names=names)
res = mi.unique()
exp = pd.MultiIndex.from_arrays([list('aa'), list('ab')],
names=mi.names)
tm.assert_index_equal(res, exp)
mi = pd.MultiIndex.from_arrays([list('aaaa'), list('aaaa')],
names=names)
res = mi.unique()
exp = pd.MultiIndex.from_arrays([['a'], ['a']], names=mi.names)
tm.assert_index_equal(res, exp)
# GH #20568 - empty MI
mi = pd.MultiIndex.from_arrays([[], []], names=names)
res = mi.unique()
tm.assert_index_equal(mi, res)
@pytest.mark.parametrize('level', [0, 'first', 1, 'second'])
def test_unique_level(self, level):
# GH #17896 - with level= argument
result = self.index.unique(level=level)
expected = self.index.get_level_values(level).unique()
tm.assert_index_equal(result, expected)
# With already unique level
mi = pd.MultiIndex.from_arrays([[1, 3, 2, 4], [1, 3, 2, 5]],
names=['first', 'second'])
result = mi.unique(level=level)
expected = mi.get_level_values(level)
tm.assert_index_equal(result, expected)
# With empty MI
mi = pd.MultiIndex.from_arrays([[], []], names=['first', 'second'])
result = mi.unique(level=level)
expected = mi.get_level_values(level)
def test_unique_datetimelike(self):
idx1 = pd.DatetimeIndex(['2015-01-01', '2015-01-01', '2015-01-01',
'2015-01-01', 'NaT', 'NaT'])
idx2 = pd.DatetimeIndex(['2015-01-01', '2015-01-01', '2015-01-02',
'2015-01-02', 'NaT', '2015-01-01'],
tz='Asia/Tokyo')
result = pd.MultiIndex.from_arrays([idx1, idx2]).unique()
eidx1 = pd.DatetimeIndex(['2015-01-01', '2015-01-01', 'NaT', 'NaT'])
eidx2 = pd.DatetimeIndex(['2015-01-01', '2015-01-02',
'NaT', '2015-01-01'],
tz='Asia/Tokyo')
exp = pd.MultiIndex.from_arrays([eidx1, eidx2])
tm.assert_index_equal(result, exp)
def test_tolist(self):
result = self.index.tolist()
exp = list(self.index.values)
assert result == exp
def test_repr_with_unicode_data(self):
with pd.core.config.option_context("display.encoding", 'UTF-8'):
d = {"a": [u("\u05d0"), 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]}
index = pd.DataFrame(d).set_index(["a", "b"]).index
assert "\\u" not in repr(index) # we don't want unicode-escaped
def test_repr_roundtrip(self):
mi = MultiIndex.from_product([list('ab'), range(3)],
names=['first', 'second'])
str(mi)
if PY3:
tm.assert_index_equal(eval(repr(mi)), mi, exact=True)
else:
result = eval(repr(mi))
# string coerces to unicode
tm.assert_index_equal(result, mi, exact=False)
assert mi.get_level_values('first').inferred_type == 'string'
assert result.get_level_values('first').inferred_type == 'unicode'
mi_u = MultiIndex.from_product(
[list(u'ab'), range(3)], names=['first', 'second'])
result = eval(repr(mi_u))
tm.assert_index_equal(result, mi_u, exact=True)
# formatting
if PY3:
str(mi)
else:
compat.text_type(mi)
# long format
mi = MultiIndex.from_product([list('abcdefg'), range(10)],
names=['first', 'second'])
if PY3:
tm.assert_index_equal(eval(repr(mi)), mi, exact=True)
else:
result = eval(repr(mi))
# string coerces to unicode
tm.assert_index_equal(result, mi, exact=False)
assert mi.get_level_values('first').inferred_type == 'string'
assert result.get_level_values('first').inferred_type == 'unicode'
result = eval(repr(mi_u))
tm.assert_index_equal(result, mi_u, exact=True)
def test_str(self):
# tested elsewhere
pass
def test_unicode_string_with_unicode(self):
d = {"a": [u("\u05d0"), 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]}
idx = pd.DataFrame(d).set_index(["a", "b"]).index
if PY3:
str(idx)
else:
compat.text_type(idx)
def test_bytestring_with_unicode(self):
d = {"a": [u("\u05d0"), 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]}
idx = pd.DataFrame(d).set_index(["a", "b"]).index
if PY3:
bytes(idx)
else:
str(idx)
def test_slice_keep_name(self):
x = MultiIndex.from_tuples([('a', 'b'), (1, 2), ('c', 'd')],
names=['x', 'y'])
assert x[1:].names == x.names
def test_isna_behavior(self):
# should not segfault GH5123
# NOTE: if MI representation changes, may make sense to allow
# isna(MI)
with pytest.raises(NotImplementedError):
pd.isna(self.index)
def test_level_setting_resets_attributes(self):
ind = pd.MultiIndex.from_arrays([
['A', 'A', 'B', 'B', 'B'], [1, 2, 1, 2, 3]
])
assert ind.is_monotonic
ind.set_levels([['A', 'B'], [1, 3, 2]], inplace=True)
# if this fails, probably didn't reset the cache correctly.
assert not ind.is_monotonic
def test_is_monotonic_increasing(self):
i = MultiIndex.from_product([np.arange(10),
np.arange(10)], names=['one', 'two'])
assert i.is_monotonic
assert i._is_strictly_monotonic_increasing
assert Index(i.values).is_monotonic
assert i._is_strictly_monotonic_increasing
i = MultiIndex.from_product([np.arange(10, 0, -1),
np.arange(10)], names=['one', 'two'])
assert not i.is_monotonic
assert not i._is_strictly_monotonic_increasing
assert not Index(i.values).is_monotonic
assert not Index(i.values)._is_strictly_monotonic_increasing
i = MultiIndex.from_product([np.arange(10),
np.arange(10, 0, -1)],
names=['one', 'two'])
assert not i.is_monotonic
assert not i._is_strictly_monotonic_increasing
assert not Index(i.values).is_monotonic
assert not Index(i.values)._is_strictly_monotonic_increasing
i = MultiIndex.from_product([[1.0, np.nan, 2.0], ['a', 'b', 'c']])
assert not i.is_monotonic
assert not i._is_strictly_monotonic_increasing
assert not Index(i.values).is_monotonic
assert not Index(i.values)._is_strictly_monotonic_increasing
# string ordering
i = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],
['one', 'two', 'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
assert not i.is_monotonic
assert not Index(i.values).is_monotonic
assert not i._is_strictly_monotonic_increasing
assert not Index(i.values)._is_strictly_monotonic_increasing
i = MultiIndex(levels=[['bar', 'baz', 'foo', 'qux'],
['mom', 'next', 'zenith']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
assert i.is_monotonic
assert Index(i.values).is_monotonic
assert i._is_strictly_monotonic_increasing
assert Index(i.values)._is_strictly_monotonic_increasing
# mixed levels, hits the TypeError
i = MultiIndex(
levels=[[1, 2, 3, 4], ['gb00b03mlx29', 'lu0197800237',
'nl0000289783',
'nl0000289965', 'nl0000301109']],
labels=[[0, 1, 1, 2, 2, 2, 3], [4, 2, 0, 0, 1, 3, -1]],
names=['household_id', 'asset_id'])
assert not i.is_monotonic
assert not i._is_strictly_monotonic_increasing
# empty
i = MultiIndex.from_arrays([[], []])
assert i.is_monotonic
assert Index(i.values).is_monotonic
assert i._is_strictly_monotonic_increasing
assert Index(i.values)._is_strictly_monotonic_increasing
def test_is_monotonic_decreasing(self):
i = MultiIndex.from_product([np.arange(9, -1, -1),
np.arange(9, -1, -1)],
names=['one', 'two'])
assert i.is_monotonic_decreasing
assert i._is_strictly_monotonic_decreasing
assert Index(i.values).is_monotonic_decreasing
assert i._is_strictly_monotonic_decreasing
i = MultiIndex.from_product([np.arange(10),
np.arange(10, 0, -1)],
names=['one', 'two'])
assert not i.is_monotonic_decreasing
assert not i._is_strictly_monotonic_decreasing
assert not Index(i.values).is_monotonic_decreasing
assert not Index(i.values)._is_strictly_monotonic_decreasing
i = MultiIndex.from_product([np.arange(10, 0, -1),
np.arange(10)], names=['one', 'two'])
assert not i.is_monotonic_decreasing
assert not i._is_strictly_monotonic_decreasing
assert not Index(i.values).is_monotonic_decreasing
assert not Index(i.values)._is_strictly_monotonic_decreasing
i = MultiIndex.from_product([[2.0, np.nan, 1.0], ['c', 'b', 'a']])
assert not i.is_monotonic_decreasing
assert not i._is_strictly_monotonic_decreasing
assert not Index(i.values).is_monotonic_decreasing
assert not Index(i.values)._is_strictly_monotonic_decreasing
# string ordering
i = MultiIndex(levels=[['qux', 'foo', 'baz', 'bar'],
['three', 'two', 'one']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
assert not i.is_monotonic_decreasing
assert not Index(i.values).is_monotonic_decreasing
assert not i._is_strictly_monotonic_decreasing
assert not Index(i.values)._is_strictly_monotonic_decreasing
i = MultiIndex(levels=[['qux', 'foo', 'baz', 'bar'],
['zenith', 'next', 'mom']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
assert i.is_monotonic_decreasing
assert Index(i.values).is_monotonic_decreasing
assert i._is_strictly_monotonic_decreasing
assert Index(i.values)._is_strictly_monotonic_decreasing
# mixed levels, hits the TypeError
i = MultiIndex(
levels=[[4, 3, 2, 1], ['nl0000301109', 'nl0000289965',
'nl0000289783', 'lu0197800237',
'gb00b03mlx29']],
labels=[[0, 1, 1, 2, 2, 2, 3], [4, 2, 0, 0, 1, 3, -1]],
names=['household_id', 'asset_id'])
assert not i.is_monotonic_decreasing
assert not i._is_strictly_monotonic_decreasing
# empty
i = MultiIndex.from_arrays([[], []])
assert i.is_monotonic_decreasing
assert Index(i.values).is_monotonic_decreasing
assert i._is_strictly_monotonic_decreasing
assert Index(i.values)._is_strictly_monotonic_decreasing
def test_is_strictly_monotonic_increasing(self):
idx = pd.MultiIndex(levels=[['bar', 'baz'], ['mom', 'next']],
labels=[[0, 0, 1, 1], [0, 0, 0, 1]])
assert idx.is_monotonic_increasing
assert not idx._is_strictly_monotonic_increasing
def test_is_strictly_monotonic_decreasing(self):
idx = pd.MultiIndex(levels=[['baz', 'bar'], ['next', 'mom']],
labels=[[0, 0, 1, 1], [0, 0, 0, 1]])
assert idx.is_monotonic_decreasing
assert not idx._is_strictly_monotonic_decreasing
def test_reconstruct_sort(self):
# starts off lexsorted & monotonic
mi = MultiIndex.from_arrays([
['A', 'A', 'B', 'B', 'B'], [1, 2, 1, 2, 3]
])
assert mi.is_lexsorted()
assert mi.is_monotonic
recons = mi._sort_levels_monotonic()
assert recons.is_lexsorted()
assert recons.is_monotonic
assert mi is recons
assert mi.equals(recons)
assert Index(mi.values).equals(Index(recons.values))
# cannot convert to lexsorted
mi = pd.MultiIndex.from_tuples([('z', 'a'), ('x', 'a'), ('y', 'b'),
('x', 'b'), ('y', 'a'), ('z', 'b')],
names=['one', 'two'])
assert not mi.is_lexsorted()
assert not mi.is_monotonic
recons = mi._sort_levels_monotonic()
assert not recons.is_lexsorted()
assert not recons.is_monotonic
assert mi.equals(recons)
assert Index(mi.values).equals( | Index(recons.values) | pandas.Index |
#!/usr/bin/env python
# coding: utf-8
import numpy as np
import pandas as pd
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import OneHotEncoder
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn.compose import make_column_transformer
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import LabelEncoder, LabelBinarizer
from sklearn.base import BaseEstimator, TransformerMixin #gives fit_transform method for free
import pdb
from sklearn.base import TransformerMixin
from collections import defaultdict
####################################################################################################
class My_LabelEncoder(BaseEstimator, TransformerMixin):
"""
################################################################################################
###### The My_LabelEncoder class was developed by <NAME> for AutoViML #########
###### The My_LabelEncoder class works just like sklearn's Label Encoder but better! #######
##### It label encodes any cat var in your dataset. It also handles NaN's in your dataset! ####
## The beauty of this function is that it takes care of NaN's and unknown (future) values.#####
##################### This is the BEST working version - don't mess with it!! ##################
################################################################################################
Usage:
le = My_LabelEncoder()
le.fit_transform(train[column]) ## this will give your transformed values as an array
le.transform(test[column]) ### this will give your transformed values as an array
Usage in Column Transformers and Pipelines:
No. It cannot be used in pipelines since it need to produce two columns for the next stage in pipeline.
See my other module called My_LabelEncoder_Pipe() to see how it can be used in Pipelines.
"""
def __init__(self):
self.transformer = defaultdict(str)
self.inverse_transformer = defaultdict(str)
self.max_val = 0
def fit(self,testx, y=None):
if isinstance(testx, pd.Series):
pass
elif isinstance(testx, np.ndarray):
testx = | pd.Series(testx) | pandas.Series |
from DataStreamerCpp import dsStream #custom module that wraps the cpp file api.
import numpy as np
import pandas as pd
import collections
import time
import config ## TODO decide whether to use this or not, or to provide some sort of different option for these variables.
import matplotlib.pyplot as plt
## Purpose of this class file is to provide an easy to read python class for quick intergration into a python project.
## rather then having to examine the c++ code and output, you can just use this wrapper.
## additionally configuration args can be added in here without the need to expose them to the main file.
class DataStreamer(object):
def __init__(self, *args, **kwargs):
self.cppProcessor = dsStream()
return super().__init__(*args, **kwargs)
#initalize the stream
#vRate: the variable rate of how many data points to arrive per step default 100milliseconds
#stepRate: the step rate in miliseconds. default is 100, aka 1/10 of a second.
#readerCount: TODO, number of readers to use a threads for incoming datapoints.
#randomRate: random noise to apply to the rate per step. default is 0
#randomStep: TODO
#loadMethod: TODO set which load method to use, default, no load balancer engaged.
def initialize(self, vR, stepRate = 10, randomRate = 0, randomStep = 0):
vRate = vR
if isinstance(vRate,(collections.Sequence, np.ndarray, pd.DataFrame)):
#vRate is a sequence, follow the sequence step by step
print("sequence")
if isinstance(vRate,np.ndarray):
vRate = vRate.tolist()
if isinstance(vRate,pd.DataFrame): #TODO, check how the interaction between dataframes and the list works for this.
vRate = vRate.values.tolist()
if len(vRate) > 0:
self.cppProcessor.setVRate(vRate)
else:
raise ValueError('Variable rates in Initialize must not be empty, consider using a scalar.')
else:
#vRate is a scalar
self.cppProcessor.setVRateScalar(vRate)
return 0
def process(self, X_train, y_train, X_test):
output = self.cppProcessor.initReaders(X_train, y_train, X_test)
return output
def checkComplete(self):
output = self.cppProcessor.checkComplete()
return output
def checkException(self):
output = self.cppProcessor.checkForThreadException()
return output
def getResultsCount(self):
output = self.cppProcessor.getResultsCount()
return output
def getResults(self):
output = self.cppProcessor.getResults()
return output
#pause the processor, will not be immediate, but will stop next item from being processed. time recorders also paused. If already paused no effect.
def pause(self):
#TODO
#output = self.cppProcessor.pause()
return 0
#resume the processor after pause has been called. If not paused, no effect.
def resume(self):
#TODO
#output = self.cppProcessor.resume()
return 0
#####
##### Metric Calculation Section
#####
def caclulateErr(results, Print=False):
df =pd.DataFrame()
df["result"] = results["predicted"].str.strip("[]")
df["truth"] = results["Label"]
df['result'] = df['result'].astype(np.float64)
df['truth'] = df['truth'].astype(np.float64)
res =df.loc[~(df['result'] == df['truth'])]
output ="error rate: {}%".format(len(res)/len(results)*100)
if Print:
print(output)
return output
def caclulateLatency(results, vRate=None, Print=False):
df =pd.DataFrame()
df['latency'] = results['latency'].astype(np.float64)
#if isinstance(le_list,(,)):
#df['vRate'] = vRate
res =df.loc[~(df['latency'] >= config.LATENCYBOUND)]
output ="exceed rate: {}%".format(len(res)/len(results)*100)
if Print:
print(output)
vFig =plt.figure()
vAx = vFig.add_subplot(1,1,1)
vYRate = np.arange(0,len(vRate),config.READERINTERVAL)
vAx.plot(vRate,vYRate)
return output
def expandVRate(vRate, data):
valSum=0;
newVRate = | pd.DataFrame() | pandas.DataFrame |
import sys
import os
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.dirname(SCRIPT_DIR))
import numpy as np
import pandas as pd
from sklearn.metrics import confusion_matrix
import matplotlib.pyplot as plt
import Models
from Models.Models import away_features, home_features, features, build_DT_classifier, build_RF_classifier, build_XGBoostClassifier
import Helper
import Models.moving_average_dataset
import Models.backtesting as backtesting
import Models.elo_model as elo_model
import dicts_and_lists as dal
import logging, coloredlogs
pd.set_option('display.max_rows', 1000)
# ------------ Hyperparameters ------------ #
leave_out = '2020'
margin = 0
betting_limiter = True
betting_limit = 0.125
prob_threshold = 0.65
prob_2x_bet = 0.99
offset = 0.0 # Added probability
average_N = 3
skip_n = 0
# ------ Logger ------- #
logger = logging.getLogger('test_models.py')
coloredlogs.install(level='INFO', logger=logger)
def extract_and_predict(next_game):
# Extract away_team Name and home_team Name from last_N_games_away and last_N_games_home
away_team = next_game['Team_away'].values[0]
home_team = next_game['Team_home'].values[0]
# Before predicting a game, check that it has not yet been predicted.
# This is the case where e.g., TeamHome's next game at home against TeamAway has been evaluated ...
# by both next home game and next away game. They are the same game, which are therefore predicted twice.
if next_game.index[0] not in evaluated_indexes:
# Track the inserted game based on its index
evaluated_indexes.append(next_game.index[0])
# Extract indexes for last N games
next_games_away_indexes = df.loc[df['Team_away'] == away_team].index
next_games_home_indexes = df.loc[df['Team_home'] == home_team].index
next_away_indexes_reduced = [x for x in next_games_away_indexes if x < next_game.index[0]][-average_N:]
next_home_indexes_reduced = [x for x in next_games_home_indexes if x < next_game.index[0]][-average_N:]
# Extract last N games based on indexes
last_N_games_away = df.iloc[next_away_indexes_reduced]
last_N_games_home = df.iloc[next_home_indexes_reduced]
# Concatenate the two teams with their average stats
to_predict = pd.concat(
[
last_N_games_away[away_features].mean(),
last_N_games_home[home_features].mean()
],
axis=0)[features]
# Standardize the input
to_predict = scaler.transform(to_predict.values.reshape(1,-1))
pred = int(clf.predict(to_predict))
true_value = next_game['Winner'].values[0]
predictions.append(pred)
winners.append(true_value)
prob = clf.predict_proba(to_predict)
model_prob.append(max(prob[0]))
model_odds.append(1/max(prob[0]))
odds_away.append(next_game['OddsAway'].values[0])
odds_home.append(next_game['OddsHome'].values[0])
dates_list.append(next_game['Date'].values[0])
home_teams_list.append(home_team)
away_teams_list.append(away_team)
# Only the most significant features will be considered
away_features = away_features
home_features = home_features
# Create the df containing stats per single game on every row
train_df = pd.read_csv('past_data/average_seasons/average_N_4Seasons.csv')
# Standardize the DataFrame
std_df, scaler = Helper.standardize_DataFrame(train_df)
### Test the Classification model based on the mean of the last average_N games ###
logger.info('\nSelect the type of model you want to backtest:\n\
[1]: Decision Tree + Elo Model\n\
[2]: Random Forest + Elo Model\n\
[3]: Random Forest + Elo Model + Build Moving Average Dataset\n\
[4]: XGBoost + Elo Model'
)
inp = input()
if inp == '1':
logger.info('Building a Decision Tree Classifier...')
clf = build_DT_classifier(std_df)
elif inp == '2':
logger.info('Building a Random Forest Classifier...')
clf = build_RF_classifier(std_df)
elif inp == '3':
Models.moving_average_dataset.build_moving_average_dataset(average_N, skip_n, leave_out=leave_out)
train_df = pd.read_csv('past_data/average_seasons/average_N_4Seasons.csv')
# Standardize the DataFrame
std_df, scaler = Helper.standardize_DataFrame(train_df)
logger.info('Building a Random Forest Classifier...')
clf = build_RF_classifier(std_df)
elif inp == '4':
clf = build_XGBoostClassifier(std_df)
# To evaluate accuracy
dates_list = []
predictions = []
winners = []
model_prob = []
model_odds = []
odds_away = []
odds_home = []
home_teams_list = []
away_teams_list = []
evaluated_indexes = []
# Backtest on the 2020/2021 Season
df = | pd.read_csv('past_data/2020_2021/split_stats_per_game.csv') | pandas.read_csv |
import settings
import const
import pandas as pd
import numpy as np
import matplotlib
from matplotlib import rcParams
rcParams.update({'figure.autolayout': True}) # to prevent labels going out of plot!
matplotlib.use('TkAgg')
import seaborn as sns
import matplotlib.pyplot as plt
from src.preprocess import reform
from src import util
config = settings.config[const.DEFAULT]
feature_dir = config[const.FEATURE_DIR]
suffix = '_12_3_7_24_8_6_12_1_7_24_hybrid_tests.csv'
paths = {
'BJ': {
# 'PM2.5': feature_dir + const.BJ_PM25 + suffix,
# 'PM10': feature_dir + const.BJ_PM10 + suffix,
# 'O3': feature_dir + const.BJ_O3 + suffix,
},
'LD': {
'PM2.5': feature_dir + const.LD_PM25 + suffix,
# 'PM10': feature_dir + const.LD_PM10 + suffix,
}
}
smape_columns = ['city', const.ID, const.LONG, const.LAT, 'pollutant', 'SMAPE', 'count']
smapes = pd.DataFrame(columns=smape_columns)
for city in paths:
station_path = config[const.BJ_STATIONS] if city == 'BJ' else config[const.LD_STATIONS]
stations = | pd.read_csv(station_path, sep=";", low_memory=False) | pandas.read_csv |
# run_xlm_inf.py
"""
Execution file for inference for xlm model. (The baseline provided in the competition.)
"""
from transformers import AutoTokenizer, AutoConfig, AutoModelForSequenceClassification, Trainer, TrainingArguments
from torch.utils.data import DataLoader
from solution.data.load_data import *
import pandas as pd
import torch
import torch.nn.functional as F
import numpy as np
import argparse
from tqdm import tqdm
from solution.utils import IDX2LABEL
def inference(model, tokenized_sent, device):
"""
test dataset을 DataLoader로 만들어 준 후,
batch_size로 나눠 model이 예측 합니다.
"""
dataloader = DataLoader(tokenized_sent, batch_size=64, shuffle=False)
model.eval()
output_pred = []
output_prob = []
for i, data in enumerate(tqdm(dataloader)):
with torch.no_grad():
outputs = model(
input_ids=data['input_ids'].to(device),
attention_mask=data['attention_mask'].to(device),
#token_type_ids=data['token_type_ids'].to(device)
)
logits = outputs[0]
prob = F.softmax(logits, dim=-1).detach().cpu().numpy()
logits = logits.detach().cpu().numpy()
result = np.argmax(logits, axis=-1)
output_pred.append(result)
output_prob.append(prob)
return np.concatenate(output_pred).tolist(), np.concatenate(output_prob, axis=0).tolist()
def load_test_dataset(dataset_dir, tokenizer):
"""
test dataset을 불러온 후,
tokenizing 합니다.
"""
test_dataset = load_data(dataset_dir)
test_label = list(map(int,test_dataset['label'].values))
# tokenizing dataset
tokenized_test = tokenized_dataset(test_dataset, tokenizer)
return test_dataset['id'], tokenized_test, test_label
def main(args):
"""
주어진 dataset csv 파일과 같은 형태일 경우 inference 가능한 코드입니다.
"""
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
# load tokenizer
Tokenizer_NAME = "xlm-roberta-large"
tokenizer = AutoTokenizer.from_pretrained(Tokenizer_NAME)
## load my model
MODEL_NAME = args.model_dir # model dir.
model = AutoModelForSequenceClassification.from_pretrained(args.model_dir)
model.parameters
model.to(device)
## load test datset
test_dataset_dir = "../../dataset/test/test_data.csv"
test_id, test_dataset, test_label = load_test_dataset(test_dataset_dir, tokenizer)
Re_test_dataset = RE_Dataset(test_dataset ,test_label)
## predict answer
pred_answer, output_prob = inference(model, Re_test_dataset, device) # model에서 class 추론
pred_answer = IDX2LABEL[pred_answer] # 숫자로 된 class를 원래 문자열 라벨로 변환.
## make csv file with predicted answer
#########################################################
# 아래 directory와 columns의 형태는 지켜주시기 바랍니다.
output = | pd.DataFrame({'id':test_id,'pred_label':pred_answer,'probs':output_prob,}) | pandas.DataFrame |
import pandas as pd
import numpy as np
import pytest
from kgextension.caching_helper import freeze_unhashable, unfreeze_unhashable
class TestFreezeUnfreezeUnhashable:
def test1_arg_series(self):
@freeze_unhashable(freeze_by="argument", freeze_argument="the_arg")
def test_fun(a, b, c=12, the_arg=[]):
the_arg = unfreeze_unhashable(the_arg, frozen_type="series")
if a == 10 and b == 11 and c == 12:
return the_arg
else:
return None
df = pd.DataFrame({"a": [1,2,3,np.nan], "b": ["x", "y", "z", np.nan]})
s = df["a"]
s_unfrozen = test_fun(10, 11, the_arg=s)
pd.testing.assert_series_equal(s, s_unfrozen)
def test2_arg_series_kwargs(self):
@freeze_unhashable(freeze_by="argument", freeze_argument="the_arg")
def test_fun(a, b, c=12, the_arg=[]):
the_arg = unfreeze_unhashable(the_arg, frozen_type="series")
if a == 10 and b == 11 and c == 12:
return the_arg
else:
return None
df = pd.DataFrame({"a": [1,2,3,np.nan], "b": ["x", "y", "z", np.nan]})
s = df["a"]
s_unfrozen = test_fun(a=10, b=11, the_arg=s)
pd.testing.assert_series_equal(s, s_unfrozen)
def test3_arg_series_kwargs_noatttrib(self):
@freeze_unhashable(freeze_by="argument", freeze_argument="the_arg")
def test_fun(a=10, b=11, c=12, the_arg=[]):
the_arg = unfreeze_unhashable(the_arg, frozen_type="series")
if a == 10 and b == 11 and c == 12:
return the_arg
else:
return None
s_unfrozen = test_fun(a=10, b=11)
assert s_unfrozen == []
def test4_arg_dict(self):
@freeze_unhashable(freeze_by="argument", freeze_argument="the_arg")
def test_fun(a, b, c=12, the_arg=[]):
the_arg = unfreeze_unhashable(the_arg, frozen_type="dict")
if a == 10 and b == 11 and c == 12:
return the_arg
else:
return None
dict_attr = {"a": np.nan, "b": 2, "c": "hi"}
dict_unfrozen = test_fun(a=10, b=11, the_arg=dict_attr)
assert dict_attr == dict_unfrozen
def test5_index_series(self):
@freeze_unhashable(freeze_by="index", freeze_index=0)
def test_fun(the_arg, a, b, c=12):
the_arg = unfreeze_unhashable(the_arg, frozen_type="series")
if a == 10 and b == 11 and c == 12:
return the_arg
else:
return None
df = pd.DataFrame({"a": [1,2,3,np.nan], "b": ["x", "y", "z", np.nan]})
s = df["a"]
s_unfrozen = test_fun(s, 10, 11)
| pd.testing.assert_series_equal(s, s_unfrozen) | pandas.testing.assert_series_equal |
# text_association.py - calculates the similarity between the text and the influencers
import pandas as pd
from .text_cleaner import *
import re
from collections import Counter
import numpy as np
import pickle
from scipy.special import softmax
import tensorflow as tf
class TextProcessor(object):
def __init__(self):
# Load the required files
with open("text_processing/finetuned_s90_10_word_trait_array.pickle", "rb") as f:
self.word_df = pickle.load(f)
# Generate word map from AGDS
self.word_map = self.word_df.columns.tolist()
# Read archetype list and clean it up
self.arch_df = pd.read_csv("text_processing/archetypes_pl_new.csv", header=0, index_col=0)
self.arch_df = self.arch_df.fillna(2)
self.arch_df = self.arch_df[~self.arch_df.index.duplicated(keep='first')]
# Generate trait list
self.trait_list = self.arch_df.columns.tolist()
# Load LSTM model
self.test_model = tf.keras.models.load_model("text_processing/nn_model")
def extract_hashtags(self, post_text):
HASH_RE = re.compile(r"\#\w+")
out_list = re.findall(HASH_RE, post_text)
return out_list
def get_trait_dot_product(self, post_text: str) -> list:
# Filter out the text
filtered_post = remove_stopwords(clean_up_text(post_text))
filtered_post += self.extract_hashtags(post_text)
# Create a vector for dot product vector
post_vector = [0] * len(self.word_map)
# Calculate word occurrences
word_ctr = Counter(filtered_post)
for word, freq in word_ctr.items():
if word in self.word_map:
post_vector[self.word_map.index(word)] = freq
# Calculate dot product for a given text
word_dot = self.word_df.dot(post_vector)
out_vec = pd.Series()
for trait in self.trait_list:
out_vec = out_vec.append(pd.Series([np.argmax(softmax(word_dot.loc[trait]))], index=[trait]))
return out_vec
# Trait accuracy - round the results
def natural_round(x: float) -> int:
out = int(x // 1)
return out + 1 if (x - out) >= 0.5 else out
def accuracy_per_trait(input_vector: pd.Series, annotated_vector: pd.Series) -> np.array:
out_array = np.array([0] * 37, dtype=np.int)
for i in range(len(out_array)):
if input_vector[i] == annotated_vector[i]:
out_array[i] = 1
return out_array
# Method for calculating the similarity
def calculate_similarity(self, post_text: str) -> (pd.Series, pd.Series):
# Calculate word-trait dot product
post_result = self.get_trait_dot_product(post_text)
# Generate new dataframe - one row per influencer
inf_df = pd.Series(index=self.arch_df.index)
# Replace all data in temporary df with calculated post result
for idx in inf_df.index:
inf_df.loc[idx] = np.linalg.norm(self.arch_df.loc[idx] - post_result)
sorted_infs = inf_df.sort_values()
return post_result, sorted_infs
def clean_post(self, src_text: str) -> str:
# Extract posts and hashtags
extracted_text = remove_stopwords(clean_up_text(src_text))
extracted_hashtags = self.extract_hashtags(src_text)
return extracted_text + extracted_hashtags
def predict_nn(self, post_text):
# Preprocess the text
user_text = " ".join(self.clean_post(post_text))
# Make a prediction
prediction = self.test_model.predict([user_text])
# Process the predictions
predicted_classes = []
for trait in prediction:
predicted_classes.append(int(np.argmax(trait)))
predicted_dict = {trait: pred for trait, pred in zip(self.trait_list, predicted_classes)}
series_pred = | pd.Series(predicted_dict) | pandas.Series |
'''
May 2020 by <NAME>
<EMAIL>
https://www.github.com/sebbarb/
'''
import feather
import pandas as pd
import numpy as np
from hyperparameters import Hyperparameters
from pdb import set_trace as bp
def main():
hp = Hyperparameters()
# Load data
#df = feather.read_dataframe(hp.data_dir + 'Py_VARIANZ_2012_v3-1.feather')
df = | pd.read_feather(hp.data_dir + 'Py_VARIANZ_2012_v3-1.feather') | pandas.read_feather |
#Plot
import matplotlib.pyplot as plt
import seaborn as sns
from bleu import file_bleu
#Data Packages
import math
import pandas as pd
import numpy as np
#Progress bar
from tqdm import tqdm
#Counter
from collections import Counter
#Operation
import operator
#Natural Language Processing Packages
import re
import nltk
## Download Resources
nltk.download("vader_lexicon")
nltk.download("stopwords")
nltk.download("averaged_perceptron_tagger")
nltk.download("wordnet")
from nltk.sentiment import SentimentAnalyzer
from nltk.sentiment.vader import SentimentIntensityAnalyzer
from nltk.sentiment.util import *
from nltk import tokenize
from nltk.corpus import stopwords
from nltk.tag import PerceptronTagger
from nltk.data import find
sns.set(rc={'figure.figsize':(5,3.5)})
# CHANGE FLIEPATH before running this locally
# Use vader to evaluated sentiment of reviews
def evalSentences(sentences, to_df=False, columns=[]):
# Instantiate an instance to access SentimentIntensityAnalyzer class
sid = SentimentIntensityAnalyzer()
pdlist = []
if to_df:
for sentence in tqdm(sentences):
ss = sid.polarity_scores(sentence)
pdlist.append([sentence] + [ss['compound']])
reviewDf = pd.DataFrame(pdlist)
reviewDf.columns = columns
return reviewDf
else:
for sentence in tqdm(sentences):
print(sentence)
ss = sid.polarity_scores(sentence)
for k in sorted(ss):
print('{0}: {1}, '.format(k, ss[k]), end='')
print()
def getHistogram(df, measure, title, hue=None, figsize=(5, 3)):
if hue:
sns_plot = sns.kdeplot(data=df, x=measure, hue=hue)
# sns_plot = sns.histplot(data=df, x=measure, hue=hue)
else:
sns_plot = sns.histplot(data=df, x=measure)
# sns_plot.set_title(title)
sns_plot.set_xlabel("Value")
sns_plot.set_ylabel("Density")
plt.tight_layout()
sns_plot.figure.savefig("{}.png".format(title))
def calculate_vader_ALE(filename=None):
print("Evaluate ALE")
if filename:
file_path = "./{}.txt".format(filename)
else:
file_path ="./outputext_step1_eps5.txt"
file_path_neg ="../../data/yelp/sentiment.test.0"
file_path_pos ="../../data/yelp/sentiment.test.1"
review_file = open(file_path, "r")
reviews = review_file.readlines()
review_file.close()
reviewDF = evalSentences(reviews, to_df=True, columns=['review','vader'])
# sanity check
assert(reviewDF.shape[0]==1000)
neg_2_pos = (reviewDF[:500]['vader']>=0).sum()
pos_2_neg = (reviewDF[500:]['vader']<=0).sum()
acc = (neg_2_pos+pos_2_neg)/1000
print("accuracy of changed sentences is {}".format(acc))
print("accuracy of pos_to_neg sentences is {}".format(pos_2_neg/500))
print("accuracy of neg_to_pos sentences is {}".format(neg_2_pos/500))
review_file_neg = open(file_path_neg, "r")
review_file_pos = open(file_path_pos, "r")
reviews_neg = review_file_neg.readlines()
reviews_pos = review_file_pos.readlines()
review_file_neg.close()
review_file_pos.close()
reviewDF_neg = evalSentences(reviews_neg, to_df=True, columns=['review','vader'])
reviewDF_pos = evalSentences(reviews_pos, to_df=True, columns=['review','vader'])
# sanity check
assert(reviewDF_neg.shape[0]==500)
assert (reviewDF_pos.shape[0] == 500)
pos_acc = (reviewDF_pos['vader']>=0).sum()
neg_acc = (reviewDF_neg['vader']<=0).sum()
org_acc = (pos_acc+neg_acc)/1000
print("accuracy of original sentences is {}".format(org_acc))
print("accuracy of original positive sentences is {}".format(pos_acc/500))
print("accuracy of original negative sentences is {}".format(neg_acc/500))
return reviewDF, reviewDF_pos, reviewDF_neg
def calculate_style_trans():
print("Evaluate Style Transformer")
file_path ="./style_transformer.txt"
review_file = open(file_path, "r")
reviews_raw = review_file.readlines()
review_file.close()
reviews_pos_to_neg = [] # changed sentence
reviews_neg_to_pos = [] # changed sentence
reviews_pos = [] #original pos
reviews_neg = [] #original neg
pos_example = False
neg_example = False
for sent in reviews_raw:
if sent.startswith("[raw 0.0]"):
reviews_neg.append(sent[11:])
neg_example = True
pos_example = False
elif sent.startswith("[raw 1.0]"):
reviews_pos.append(sent[11:])
neg_example = False
pos_example = True
elif sent.startswith("[rev 0.0]") and pos_example:
reviews_pos_to_neg.append(sent[11:])
pos_example = False
neg_example = False
elif sent.startswith("[rev 1.0]") and neg_example:
reviews_neg_to_pos.append(sent[11:])
pos_example = False
neg_example = False
assert (len(reviews_pos_to_neg) == 500)
assert (len(reviews_neg_to_pos) == 500)
assert (len(reviews_pos) == 500)
assert (len(reviews_neg) == 500)
reviewDF_pos_to_neg = evalSentences(reviews_pos_to_neg, to_df=True, columns=['review','vader'])
reviewDF_neg_to_pos = evalSentences(reviews_neg_to_pos, to_df=True, columns=['review','vader'])
neg_2_pos = (reviewDF_neg_to_pos['vader']>=0).sum()
pos_2_neg = (reviewDF_pos_to_neg['vader']<=0).sum()
acc = (neg_2_pos+pos_2_neg)/1000
print("accuracy of changed sentences is {}".format(acc))
print("accuracy of pos_to_neg sentences is {}".format(pos_2_neg/500))
print("accuracy of neg_to_pos sentences is {}".format(neg_2_pos/500))
reviewDF_neg = evalSentences(reviews_neg, to_df=True, columns=['review','vader'])
reviewDF_pos = evalSentences(reviews_pos, to_df=True, columns=['review','vader'])
# sanity check
assert(reviewDF_neg.shape[0]==500)
assert (reviewDF_pos.shape[0] == 500)
pos_acc = (reviewDF_pos['vader']>=0).sum()
neg_acc = (reviewDF_neg['vader']<=0).sum()
org_acc = (pos_acc+neg_acc)/1000
print("accuracy of original sentences is {}".format(org_acc))
print("accuracy of original positive sentences is {}".format(pos_acc/500))
print("accuracy of original negative sentences is {}".format(neg_acc/500))
return reviewDF_pos_to_neg, reviewDF_neg_to_pos, reviewDF_pos, reviewDF_neg
def graph_ALE(reviewDF_ALE, reviewDF_pos_ALE, reviewDF_neg_ALE, color1, color2):
reviewDF_pos_ALE['label'] = "POS"
reviewDF_neg_ALE['label'] = "NEG"
reviewDF_org = pd.concat((reviewDF_neg_ALE, reviewDF_pos_ALE), 0).reset_index(drop=True)
assert (reviewDF_org['review']==reviewDF_ALE['review']).any() # there are definitely unchanged sentence, otherwise the ordering is wrong
reviewDF_org = reviewDF_org.rename(columns={"vader":"vader_original"})
reviewDF_ALE_all = pd.concat([reviewDF_ALE, reviewDF_org], axis=1, join="inner")
reviewDF_ALE_all = reviewDF_ALE_all.loc[:,~reviewDF_ALE_all.columns.duplicated()]
reviewDF_ALE_all['change in vader'] = reviewDF_ALE_all['vader'] - reviewDF_ALE_all['vader_original']
# getHistogram(reviewDF_ALE_all, 'change in vader', 'ALE change in vader score', hue="label")
pal = dict(POS=color2, NEG=color1)
sns_plot = sns.kdeplot(data=reviewDF_ALE_all, x='change in vader', hue="label", palette=pal)
# sns_plot = sns.kdeplot(data=reviewDF_ALE_all, x='change in vader', color=color1)
return sns_plot
def draw_transition_graph():
palette = sns.color_palette("coolwarm", n_colors=10)
i=0
for filename in ["outputext_step1_eps0.5", "outputext_step1_eps2", "outputext_step1_eps3", "outputext_step1_eps4", "outputext_step1_eps5"]:
color1 = palette[4-i]
color2 = palette[i+5]
i+=1
reviewDF_ALE, reviewDF_pos_ALE, reviewDF_neg_ALE = calculate_vader_ALE(filename)
plot = graph_ALE(reviewDF_ALE, reviewDF_pos_ALE, reviewDF_neg_ALE, color1, color2)
plot.figure.savefig("ALE transition")
def graph_ALE_vader():
reviewDF_ALE, reviewDF_pos_ALE, reviewDF_neg_ALE = calculate_vader_ALE()
reviewDF_pos_ALE['label'] = "POS → NEG"
reviewDF_neg_ALE['label'] = "NEG → POS"
reviewDF_org = pd.concat((reviewDF_neg_ALE, reviewDF_pos_ALE), 0).reset_index(drop=True)
assert (reviewDF_org['review']==reviewDF_ALE['review']).any() # there are definitely unchanged sentence, otherwise the ordering is wrong
reviewDF_org = reviewDF_org.rename(columns={"vader":"vader_original"})
reviewDF_ALE_all = pd.concat([reviewDF_ALE, reviewDF_org], axis=1, join="inner")
reviewDF_ALE_all = reviewDF_ALE_all.loc[:,~reviewDF_ALE_all.columns.duplicated()]
reviewDF_ALE_all['change in vader'] = reviewDF_ALE_all['vader'] - reviewDF_ALE_all['vader_original']
getHistogram(reviewDF_ALE_all, 'change in vader', 'change in vader score (ALE)', hue="label")
def graph_ST_vader():
reviewDF_pos_to_neg, reviewDF_neg_to_pos, reviewDF_pos, reviewDF_neg = calculate_style_trans()
reviewDF_pos['label'] = "POS → NEG"
reviewDF_neg['label'] = "NEG → POS"
reviewDF_org = | pd.concat((reviewDF_neg, reviewDF_pos), 0) | pandas.concat |
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 3 12:51:57 2021
@author: Administrator
"""
import pandas as pd
import numpy as np
from pandas import DataFrame, Series
def apply(decorator):
def decorate(cls):
for attr in cls.__dict__:
if callable(getattr(cls, attr)):
setattr(cls, attr, decorator(getattr(cls, attr)))
return cls
return decorate
class TA:
__version__ = "1.2"
@classmethod
def SMA(cls, ohlc: DataFrame, period: int = 41, column: str = "close") -> Series:
"""
Simple moving average - rolling mean in pandas lingo. Also known as 'MA'.
The simple moving average (SMA) is the most basic of the moving averages used for trading.
"""
return pd.Series(
ohlc[column].rolling(window=period).mean(),
name="{0} period SMA".format(period),
)
@classmethod
def SMM(cls, ohlc: DataFrame, period: int = 9, column: str = "close") -> Series:
"""
Simple moving median, an alternative to moving average. SMA, when used to estimate the underlying trend in a time series,
is susceptible to rare events such as rapid shocks or other anomalies. A more robust estimate of the trend is the simple moving median over n time periods.
"""
return pd.Series(
ohlc[column].rolling(window=period).median(),
name="{0} period SMM".format(period),
)
@classmethod
def SSMA(
cls,
ohlc: DataFrame,
period: int = 9,
column: str = "close",
adjust: bool = True,
) -> Series:
"""
Smoothed simple moving average.
:param ohlc: data
:param period: range
:param column: open/close/high/low column of the DataFrame
:return: result Series
"""
return pd.Series(
ohlc[column]
.ewm(ignore_na=False, alpha=1.0 / period, min_periods=0, adjust=adjust)
.mean(),
name="{0} period SSMA".format(period),
)
@classmethod
def EMA(
cls,
ohlc: DataFrame,
period: int = 9,
column: str = "close",
adjust: bool = True,
) -> Series:
"""
Exponential Weighted Moving Average - Like all moving average indicators, they are much better suited for trending markets.
When the market is in a strong and sustained uptrend, the EMA indicator line will also show an uptrend and vice-versa for a down trend.
EMAs are commonly used in conjunction with other indicators to confirm significant market moves and to gauge their validity.
"""
return pd.Series(
ohlc[column].ewm(span=period, adjust=adjust).mean(),
name="{0} period EMA".format(period),
)
@classmethod
def DEMA(
cls,
ohlc: DataFrame,
period: int = 9,
column: str = "close",
adjust: bool = True,
) -> Series:
"""
Double Exponential Moving Average - attempts to remove the inherent lag associated to Moving Averages
by placing more weight on recent values. The name suggests this is achieved by applying a double exponential
smoothing which is not the case. The name double comes from the fact that the value of an EMA (Exponential Moving Average) is doubled.
To keep it in line with the actual data and to remove the lag the value 'EMA of EMA' is subtracted from the previously doubled EMA.
Because EMA(EMA) is used in the calculation, DEMA needs 2 * period -1 samples to start producing values in contrast to the period
samples needed by a regular EMA
"""
DEMA = (
2 * cls.EMA(ohlc, period)
- cls.EMA(ohlc, period).ewm(span=period, adjust=adjust).mean()
)
return pd.Series(DEMA, name="{0} period DEMA".format(period))
@classmethod
def TEMA(cls, ohlc: DataFrame, period: int = 9, adjust: bool = True) -> Series:
"""
Triple exponential moving average - attempts to remove the inherent lag associated to Moving Averages by placing more weight on recent values.
The name suggests this is achieved by applying a triple exponential smoothing which is not the case. The name triple comes from the fact that the
value of an EMA (Exponential Moving Average) is triple.
To keep it in line with the actual data and to remove the lag the value 'EMA of EMA' is subtracted 3 times from the previously tripled EMA.
Finally 'EMA of EMA of EMA' is added.
Because EMA(EMA(EMA)) is used in the calculation, TEMA needs 3 * period - 2 samples to start producing values in contrast to the period samples
needed by a regular EMA.
"""
triple_ema = 3 * cls.EMA(ohlc, period)
ema_ema_ema = (
cls.EMA(ohlc, period)
.ewm(ignore_na=False, span=period, adjust=adjust)
.mean()
.ewm(ignore_na=False, span=period, adjust=adjust)
.mean()
)
TEMA = (
triple_ema
- 3 * cls.EMA(ohlc, period).ewm(span=period, adjust=adjust).mean()
+ ema_ema_ema
)
return pd.Series(TEMA, name="{0} period TEMA".format(period))
@classmethod
def TRIMA(cls, ohlc: DataFrame, period: int = 18) -> Series:
"""
The Triangular Moving Average (TRIMA) [also known as TMA] represents an average of prices,
but places weight on the middle prices of the time period.
The calculations double-smooth the data using a window width that is one-half the length of the series.
source: https://www.thebalance.com/triangular-moving-average-tma-description-and-uses-1031203
"""
SMA = cls.SMA(ohlc, period).rolling(window=period).sum()
return pd.Series(SMA / period, name="{0} period TRIMA".format(period))
@classmethod
def TRIX(
cls,
ohlc: DataFrame,
period: int = 20,
column: str = "close",
adjust: bool = True,
) -> Series:
"""
The TRIX indicator calculates the rate of change of a triple exponential moving average.
The values oscillate around zero. Buy/sell signals are generated when the TRIX crosses above/below zero.
A (typically) 9 period exponential moving average of the TRIX can be used as a signal line.
A buy/sell signals are generated when the TRIX crosses above/below the signal line and is also above/below zero.
The TRIX was developed by <NAME>, publisher of Technical Analysis of Stocks & Commodities magazine,
and was introduced in Volume 1, Number 5 of that magazine.
"""
data = ohlc[column]
def _ema(data, period, adjust):
return pd.Series(data.ewm(span=period, adjust=adjust).mean())
m = _ema(_ema(_ema(data, period, adjust), period, adjust), period, adjust)
return pd.Series(100 * (m.diff() / m), name="{0} period TRIX".format(period))
@classmethod
def LWMA(cls, ohlc: DataFrame, period: int, column: str = "close") -> Series:
"""
Linear Weighted Moving Average
"""
raise NotImplementedError
@classmethod
def VAMA(cls, ohlcv: DataFrame, period: int = 8, column: str = "close") -> Series:
"""
Volume Adjusted Moving Average
"""
vp = ohlcv["volume"] * ohlcv[column]
volsum = ohlcv["volume"].rolling(window=period).mean()
volRatio = pd.Series(vp / volsum, name="VAMA")
cumSum = (volRatio * ohlcv[column]).rolling(window=period).sum()
cumDiv = volRatio.rolling(window=period).sum()
return pd.Series(cumSum / cumDiv, name="{0} period VAMA".format(period))
@classmethod
def VIDYA(
cls,
ohlcv: DataFrame,
period: int = 9,
smoothing_period: int = 12,
column: str = "close",
) -> Series:
""" Vidya (variable index dynamic average) indicator is a modification of the traditional Exponential Moving Average (EMA) indicator.
The main difference between EMA and Vidya is in the way the smoothing factor F is calculated.
In EMA the smoothing factor is a constant value F=2/(period+1);
in Vidya the smoothing factor is variable and depends on bar-to-bar price movements."""
raise NotImplementedError
@classmethod
def ER(cls, ohlc: DataFrame, period: int = 10, column: str = "close") -> Series:
"""The Kaufman Efficiency indicator is an oscillator indicator that oscillates between +100 and -100, where zero is the center point.
+100 is upward forex trending market and -100 is downwards trending markets."""
change = ohlc[column].diff(period).abs()
volatility = ohlc[column].diff().abs().rolling(window=period).sum()
return pd.Series(change / volatility, name="{0} period ER".format(period))
@classmethod
def KAMA(
cls,
ohlc: DataFrame,
er: int = 10,
ema_fast: int = 2,
ema_slow: int = 30,
period: int = 20,
column: str = "close",
) -> Series:
"""Developed by <NAME>, Kaufman's Adaptive Moving Average (KAMA) is a moving average designed to account for market noise or volatility.
Its main advantage is that it takes into consideration not just the direction, but the market volatility as well."""
er = cls.ER(ohlc, er)
fast_alpha = 2 / (ema_fast + 1)
slow_alpha = 2 / (ema_slow + 1)
sc = pd.Series(
(er * (fast_alpha - slow_alpha) + slow_alpha) ** 2,
name="smoothing_constant",
) ## smoothing constant
sma = pd.Series(
ohlc[column].rolling(period).mean(), name="SMA"
) ## first KAMA is SMA
kama = []
# Current KAMA = Prior KAMA + smoothing_constant * (Price - Prior KAMA)
for s, ma, price in zip(
sc.iteritems(), sma.shift().iteritems(), ohlc[column].iteritems()
):
try:
kama.append(kama[-1] + s[1] * (price[1] - kama[-1]))
except (IndexError, TypeError):
if pd.notnull(ma[1]):
kama.append(ma[1] + s[1] * (price[1] - ma[1]))
else:
kama.append(None)
sma["KAMA"] = pd.Series(
kama, index=sma.index, name="{0} period KAMA.".format(period)
) ## apply the kama list to existing index
return sma["KAMA"]
@classmethod
def ZLEMA(
cls,
ohlc: DataFrame,
period: int = 26,
adjust: bool = True,
column: str = "close",
) -> Series:
"""ZLEMA is an abbreviation of Zero Lag Exponential Moving Average. It was developed by <NAME> and <NAME>.
ZLEMA is a kind of Exponential moving average but its main idea is to eliminate the lag arising from the very nature of the moving averages
and other trend following indicators. As it follows price closer, it also provides better price averaging and responds better to price swings."""
lag = (period - 1) / 2
ema = pd.Series(
(ohlc[column] + (ohlc[column].diff(lag))),
name="{0} period ZLEMA.".format(period),
)
zlema = pd.Series(
ema.ewm(span=period, adjust=adjust).mean(),
name="{0} period ZLEMA".format(period),
)
return zlema
@classmethod
def WMA(cls, ohlc: DataFrame, period: int = 9, column: str = "close") -> Series:
"""
WMA stands for weighted moving average. It helps to smooth the price curve for better trend identification.
It places even greater importance on recent data than the EMA does.
:period: Specifies the number of Periods used for WMA calculation
"""
d = (period * (period + 1)) / 2 # denominator
weights = np.arange(1, period + 1)
def linear(w):
def _compute(x):
return (w * x).sum() / d
return _compute
_close = ohlc[column].rolling(period, min_periods=period)
wma = _close.apply(linear(weights), raw=True)
return pd.Series(wma, name="{0} period WMA.".format(period))
@classmethod
def HMA(cls, ohlc: DataFrame, period: int = 16) -> Series:
"""
HMA indicator is a common abbreviation of Hull Moving Average.
The average was developed by <NAME> and is used mainly to identify the current market trend.
Unlike SMA (simple moving average) the curve of Hull moving average is considerably smoother.
Moreover, because its aim is to minimize the lag between HMA and price it does follow the price activity much closer.
It is used especially for middle-term and long-term trading.
:period: Specifies the number of Periods used for WMA calculation
"""
import math
half_length = int(period / 2)
sqrt_length = int(math.sqrt(period))
wmaf = cls.WMA(ohlc, period=half_length)
wmas = cls.WMA(ohlc, period=period)
ohlc["deltawma"] = 2 * wmaf - wmas
hma = cls.WMA(ohlc, column="deltawma", period=sqrt_length)
return pd.Series(hma, name="{0} period HMA.".format(period))
@classmethod
def EVWMA(cls, ohlcv: DataFrame, period: int = 20) -> Series:
"""
The eVWMA can be looked at as an approximation to the
average price paid per share in the last n periods.
:period: Specifies the number of Periods used for eVWMA calculation
"""
vol_sum = (
ohlcv["volume"].rolling(window=period).sum()
) # floating shares in last N periods
x = (vol_sum - ohlcv["volume"]) / vol_sum
y = (ohlcv["volume"] * ohlcv["close"]) / vol_sum
evwma = [0]
# evwma = (evma[-1] * (vol_sum - volume)/vol_sum) + (volume * price / vol_sum)
for x, y in zip(x.fillna(0).iteritems(), y.iteritems()):
if x[1] == 0 or y[1] == 0:
evwma.append(0)
else:
evwma.append(evwma[-1] * x[1] + y[1])
return pd.Series(
evwma[1:], index=ohlcv.index, name="{0} period EVWMA.".format(period),
)
@classmethod
def VWAP(cls, ohlcv: DataFrame) -> Series:
"""
The volume weighted average price (VWAP) is a trading benchmark used especially in pension plans.
VWAP is calculated by adding up the dollars traded for every transaction (price multiplied by number of shares traded) and then dividing
by the total shares traded for the day.
"""
return pd.Series(
((ohlcv["volume"] * cls.TP(ohlcv)).cumsum()) / ohlcv["volume"].cumsum(),
name="VWAP.",
)
@classmethod
def SMMA(
cls,
ohlc: DataFrame,
period: int = 42,
column: str = "close",
adjust: bool = True,
) -> Series:
"""The SMMA (Smoothed Moving Average) gives recent prices an equal weighting to historic prices."""
return pd.Series(
ohlc[column].ewm(alpha=1 / period, adjust=adjust).mean(), name="SMMA"
)
@classmethod
def ALMA(
cls, ohlc: DataFrame, period: int = 9, sigma: int = 6, offset: int = 0.85
) -> Series:
"""Arnaud Legoux Moving Average."""
"""dataWindow = _.last(data, period)
size = _.size(dataWindow)
m = offset * (size - 1)
s = size / sigma
sum = 0
norm = 0
for i in [size-1..0] by -1
coeff = Math.exp(-1 * (i - m) * (i - m) / 2 * s * s)
sum = sum + dataWindow[i] * coeff
norm = norm + coeff
return sum / norm"""
raise NotImplementedError
@classmethod
def MAMA(cls, ohlc: DataFrame, period: int = 16) -> Series:
"""MESA Adaptive Moving Average"""
raise NotImplementedError
@classmethod
def FRAMA(cls, ohlc: DataFrame, period: int = 16, batch: int=10) -> Series:
"""Fractal Adaptive Moving Average
Source: http://www.stockspotter.com/Files/frama.pdf
Adopted from: https://www.quantopian.com/posts/frama-fractal-adaptive-moving-average-in-python
:period: Specifies the number of periods used for FRANA calculation
:batch: Specifies the size of batches used for FRAMA calculation
"""
assert period % 2 == 0, print("FRAMA period must be even")
c = ohlc.close.copy()
window = batch * 2
hh = c.rolling(batch).max()
ll = c.rolling(batch).min()
n1 = (hh - ll) / batch
n2 = n1.shift(batch)
hh2 = c.rolling(window).max()
ll2 = c.rolling(window).min()
n3 = (hh2 - ll2) / window
# calculate fractal dimension
D = (np.log(n1 + n2) - np.log(n3)) / np.log(2)
alp = np.exp(-4.6 * (D - 1))
alp = np.clip(alp, .01, 1).values
filt = c.values
for i, x in enumerate(alp):
cl = c.values[i]
if i < window:
continue
filt[i] = cl * x + (1 - x) * filt[i - 1]
return pd.Series(filt, index=ohlc.index, name="{0} period FRAMA.".format(period))
@classmethod
def MACD(
cls,
ohlc: DataFrame,
period_fast: int = 12,
period_slow: int = 26,
signal: int = 9,
column: str = "close",
adjust: bool = True,
) -> DataFrame:
"""
MACD, MACD Signal and MACD difference.
The MACD Line oscillates above and below the zero line, which is also known as the centerline.
These crossovers signal that the 12-day EMA has crossed the 26-day EMA. The direction, of course, depends on the direction of the moving average cross.
Positive MACD indicates that the 12-day EMA is above the 26-day EMA. Positive values increase as the shorter EMA diverges further from the longer EMA.
This means upside momentum is increasing. Negative MACD values indicates that the 12-day EMA is below the 26-day EMA.
Negative values increase as the shorter EMA diverges further below the longer EMA. This means downside momentum is increasing.
Signal line crossovers are the most common MACD signals. The signal line is a 9-day EMA of the MACD Line.
As a moving average of the indicator, it trails the MACD and makes it easier to spot MACD turns.
A bullish crossover occurs when the MACD turns up and crosses above the signal line.
A bearish crossover occurs when the MACD turns down and crosses below the signal line.
"""
EMA_fast = pd.Series(
ohlc[column].ewm(ignore_na=False, span=period_fast, adjust=adjust).mean(),
name="EMA_fast",
)
EMA_slow = pd.Series(
ohlc[column].ewm(ignore_na=False, span=period_slow, adjust=adjust).mean(),
name="EMA_slow",
)
MACD = pd.Series(EMA_fast - EMA_slow, name="MACD")
MACD_signal = pd.Series(
MACD.ewm(ignore_na=False, span=signal, adjust=adjust).mean(), name="SIGNAL"
)
return pd.concat([MACD, MACD_signal], axis=1)
@classmethod
def PPO(
cls,
ohlc: DataFrame,
period_fast: int = 12,
period_slow: int = 26,
signal: int = 9,
column: str = "close",
adjust: bool = True,
) -> DataFrame:
"""
Percentage Price Oscillator
PPO, PPO Signal and PPO difference.
As with MACD, the PPO reflects the convergence and divergence of two moving averages.
While MACD measures the absolute difference between two moving averages, PPO makes this a relative value by dividing the difference by the slower moving average
"""
EMA_fast = pd.Series(
ohlc[column].ewm(ignore_na=False, span=period_fast, adjust=adjust).mean(),
name="EMA_fast",
)
EMA_slow = pd.Series(
ohlc[column].ewm(ignore_na=False, span=period_slow, adjust=adjust).mean(),
name="EMA_slow",
)
PPO = pd.Series(((EMA_fast - EMA_slow) / EMA_slow) * 100, name="PPO")
PPO_signal = pd.Series(
PPO.ewm(ignore_na=False, span=signal, adjust=adjust).mean(), name="SIGNAL"
)
PPO_histo = pd.Series(PPO - PPO_signal, name="HISTO")
return pd.concat([PPO, PPO_signal, PPO_histo], axis=1)
@classmethod
def VW_MACD(
cls,
ohlcv: DataFrame,
period_fast: int = 12,
period_slow: int = 26,
signal: int = 9,
column: str = "close",
adjust: bool = True,
) -> DataFrame:
""""Volume-Weighted MACD" is an indicator that shows how a volume-weighted moving average can be used to calculate moving average convergence/divergence (MACD).
This technique was first used by <NAME>, CMT, and has been written about since at least 2002."""
vp = ohlcv["volume"] * ohlcv[column]
_fast = pd.Series(
(vp.ewm(ignore_na=False, span=period_fast, adjust=adjust).mean())
/ (
ohlcv["volume"]
.ewm(ignore_na=False, span=period_fast, adjust=adjust)
.mean()
),
name="_fast",
)
_slow = pd.Series(
(vp.ewm(ignore_na=False, span=period_slow, adjust=adjust).mean())
/ (
ohlcv["volume"]
.ewm(ignore_na=False, span=period_slow, adjust=adjust)
.mean()
),
name="_slow",
)
MACD = pd.Series(_fast - _slow, name="MACD")
MACD_signal = pd.Series(
MACD.ewm(ignore_na=False, span=signal, adjust=adjust).mean(), name="SIGNAL"
)
return pd.concat([MACD, MACD_signal], axis=1)
@classmethod
def EV_MACD(
cls,
ohlcv: DataFrame,
period_fast: int = 20,
period_slow: int = 40,
signal: int = 9,
adjust: bool = True,
) -> DataFrame:
"""
Elastic Volume Weighted MACD is a variation of standard MACD,
calculated using two EVWMA's.
:period_slow: Specifies the number of Periods used for the slow EVWMA calculation
:period_fast: Specifies the number of Periods used for the fast EVWMA calculation
:signal: Specifies the number of Periods used for the signal calculation
"""
evwma_slow = cls.EVWMA(ohlcv, period_slow)
evwma_fast = cls.EVWMA(ohlcv, period_fast)
MACD = pd.Series(evwma_fast - evwma_slow, name="MACD")
MACD_signal = pd.Series(
MACD.ewm(ignore_na=False, span=signal, adjust=adjust).mean(), name="SIGNAL"
)
return | pd.concat([MACD, MACD_signal], axis=1) | pandas.concat |
#!/usr/bin/env python3
# coding: utf-8
# ## 1. Multi-Class Classification:
# For the multiclass classification problem, there were six different datasets. Some of the datasets contain missing values. For example, TrainData1, TestData1 and TrainData3 contain some missing values (1.00000000000000e+99). Therefore, the first approach needs to handle the missing values for selecting the features. Then compare the accuracy on train dataset to find out which classifier gives best result for each dataset with cross validation to verify the accuracy based on test dataset.
# <center><div style='width:50%; height:50%'><img src='../images/Q1_table.jpeg'></div></center>
#
# Hint:
# * Missing Value Estimation
# - (KNN method for imputation of the missing values)
# * Dimensionality Reduction
# * Use Several Classifiers/ Ensemble Method
# - Logistic Regression (with different c values)
# - Random Forest (with different estimator values)
# - SVM (with different kernels)
# - KNN (with k = 1,2,5,10,20)
# - K (3,5,10) Fold Cross Validation
# * Performance Comparison
# - Classification Accuracy, Precision, Recall, Sensitivity, Specificity
# - AUC, ROC Curve
# In[517]:
import os
import re
import sys
import gc
from sklearn.svm import SVC
from pydotplus import *
from IPython.display import Image
from six import StringIO
from sklearn.tree import export_graphviz
from sklearn.ensemble import RandomForestRegressor
from sklearn.linear_model import LogisticRegression
from impyute.imputation.cs import fast_knn
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import *
from sklearn.decomposition import PCA
from sklearn.multiclass import OneVsRestClassifier
from sklearn.metrics import confusion_matrix, classification_report, roc_curve, roc_auc_score, accuracy_score
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.preprocessing import *
import statistics as stat
import seaborn as sns
from matplotlib import offsetbox
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from time import strptime, mktime
import warnings
warnings.filterwarnings('ignore')
# Modeling settings
plt.rc("font", size=14)
sns.set(style="white")
sns.set(style="whitegrid", color_codes=True)
# In[353]:
def optimizeK(X_train, y_train, X_test, y_test):
neighbors = np.arange(1, 20)
train_accuracy = np.empty(len(neighbors))
test_accuracy = np.empty(len(neighbors))
for i, k in enumerate(neighbors):
knn = KNeighborsClassifier(n_neighbors=k)
knn.fit(X_train, y_train)
train_accuracy[i] = knn.score(X_train, y_train)
test_accuracy[i] = knn.score(X_test, y_test2)
return neighbors, test_accuracy, train_accuracy
# In[361]:
def plotK(neighbors, test_accuracy, train_accuracy):
plt.plot(neighbors, test_accuracy, label='Testing Accuracy')
plt.plot(neighbors, train_accuracy, label='Training Accuracy')
plt.legend()
plt.xlabel('Number of Neighbors')
plt.xticks(np.arange(0, neighbors[-1], step=1))
plt.ylabel('Accuracy')
plt.title('KNN Varying Number of Neighbors')
plt.show()
# In[317]:
X_train2 = pd.read_csv('../data/1/TrainData2.txt',
delimiter='\s+', header=None)
X_train3 = pd.read_csv('../data/1/TrainData3.txt',
delimiter='\s+', header=None)
X_train4 = pd.read_csv('../data/1/TrainData4.txt',
delimiter='\s+', header=None)
# In[318]:
y_train2 = pd.read_csv('../data/1/TrainLabel2.txt',
delimiter='\n', header=None)
y_train3 = pd.read_csv('../data/1/TrainLabel3.txt',
delimiter='\n', header=None)
y_train4 = pd.read_csv('../data/1/TrainLabel4.txt',
delimiter='\n', header=None)
# In[319]:
X_test2 = pd.read_csv('../data/1/TestData2.txt', delimiter='\s+', header=None)
X_test3 = pd.read_csv('../data/1/TestData3.txt', delimiter=',', header=None)
X_test4 = pd.read_csv('../data/1/TestData4.txt', delimiter='\s+', header=None)
# In[320]:
X_training = [X_train2, X_train3, X_train4]
y_training = [y_train2, y_train3, y_train4]
X_testing = [X_test2, X_test3, X_test4]
# In[321]:
for i, x in enumerate(X_training):
print(f'X_TrainData{i+1} Shape: {x.shape}')
# In[322]:
for i, y in enumerate(y_training):
print(f'y_TrainData{i+1} Shape: {y.shape}')
# In[323]:
for j, y in enumerate(X_testing):
print(f'TestData{j+1} Shape: {y.shape}')
# # _Dataset 2_
# ### PCA for DS2
# In[324]:
X_train2.shape
# In[325]:
y_train2.shape
# In[326]:
X_test2.shape
# In[327]:
xTrain2PCA = PCA(n_components=74)
X_train2_pca = xTrain2PCA.fit_transform(X_train2)
# In[330]:
# 100 principle components can explain 99% of the data
X_train2_pca_var = xTrain2PCA.fit(X_train2)
print(sum(X_train2_pca_var.explained_variance_ratio_))
print(X_train2_pca.shape)
# In[332]:
# 74 principle components can explain 99% of the data
xTest2PCA = PCA(n_components=74)
X_test2_pca = xTest2PCA.fit_transform(X_test2)
# In[333]:
X_test2_pca_var = X_test_pca.fit(X_test2)
print(sum(X_test2_pca_var.explained_variance_ratio_))
print(X_test2_pca.shape)
# In[334]:
X_train2_components = pd.DataFrame(X_train2_pca)
X_train2_components.head(10)
# In[335]:
X_test2_components = | pd.DataFrame(X_test2_pca) | pandas.DataFrame |
from src.lib.DownloadData import DownloadData
import pandas as pd
class CDLData(DownloadData):
def get_data(self):
df = self.pro.daily(trade_date='20180810')
daily_limit_df = df[df['pct_chg'] >= 9.9]
daily_limit_company_info = pd.DataFrame(
columns=['ts_code', 'main_business', 'reg_capital', 'setup_date', 'province'])
szse_exhange_company_info_df = self.pro.stock_company(
exchange='SZSE',
fields='ts_code,main_business,reg_capital,setup_date,province')
sse_exhange_company_info_df = self.pro.stock_company(
exchange='SSE',
fields='ts_code,main_business,reg_capital,setup_date,province')
for ts_code in daily_limit_df['ts_code'].tolist():
szse_company_info_df = szse_exhange_company_info_df[szse_exhange_company_info_df['ts_code'] == ts_code]
sse_company_info_df = sse_exhange_company_info_df[sse_exhange_company_info_df['ts_code'] == ts_code]
if szse_company_info_df.shape[0] > 0:
daily_limit_company_info = pd.concat([daily_limit_company_info,szse_company_info_df], ignore_index=True)
else:
daily_limit_company_info = | pd.concat([daily_limit_company_info,sse_company_info_df], ignore_index=True) | pandas.concat |
from __future__ import division # brings in Python 3.0 mixed type calculation rules
import datetime
import inspect
import numpy as np
import numpy.testing as npt
import os.path
import pandas as pd
import sys
from tabulate import tabulate
import unittest
print("Python version: " + sys.version)
print("Numpy version: " + np.__version__)
from ..ted_exe import Ted
test = {}
class TestTed(unittest.TestCase):
"""
Unit tests for TED model.
"""
print("ted unittests conducted at " + str(datetime.datetime.today()))
def setUp(self):
"""
Setup routine for ted unit tests.
:return:
"""
pass
def tearDown(self):
"""
Teardown routine for ted unit tests.
:return:
"""
pass
# teardown called after each test
# e.g. maybe write test results to some text file
def create_ted_object(self):
# create empty pandas dataframes to create empty object for testing
df_empty = pd.DataFrame()
# create an empty ted object
ted_empty = Ted(df_empty, df_empty)
return ted_empty
def test_daily_app_flag(self):
"""
:description generates a daily flag to denote whether a pesticide is applied that day or not (1 - applied, 0 - anot applied)
:param num_apps; number of applications
:param app_interval; number of days between applications
:NOTE in TED model there are two application scenarios per simulation (one for a min/max exposure scenario)
(this is why the parameters are passed in)
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
ted_empty = self.create_ted_object()
expected_results = pd.Series([], dtype='bool')
result = pd.Series([[]], dtype='bool')
expected_results = [[True, False, False, True, False, False, True, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False],
[True, False, False, False, False, False, False, True, False, False,
False, False, False, False, True, False, False, False, False, False,
False, True, False, False, False, False, False, False, True, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False],
[True, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False]]
try:
# internal model constants
ted_empty.num_simulation_days = 366
# input varialbles that change per simulation
ted_empty.num_apps_min = pd.Series([3, 5, 1])
ted_empty.app_interval_min = pd.Series([3, 7, 1])
for i in range (3):
result[i] = ted_empty.daily_app_flag(ted_empty.num_apps_min[i], ted_empty.app_interval_min[i])
np.array_equal(result[i],expected_results[i])
finally:
for i in range(3):
tab = [result[i], expected_results[i]]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_set_drift_parameters(self):
"""
:description provides parmaeter values to use when calculating distances from edge of application source area to
concentration of interest
:param app_method; application method (aerial/ground/airblast)
:param boom_hgt; height of boom (low/high) - 'NA' if not ground application
:param drop_size; droplet spectrum for application (see list below for aerial/ground - 'NA' if airblast)
:param param_a (result[i][0]; parameter a for spray drift distance calculation
:param param_b (result[i][1]; parameter b for spray drift distance calculation
:param param_c (result[i][2]; parameter c for spray drift distance calculation
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
ted_empty = self.create_ted_object()
expected_results = pd.Series([], dtype='float')
result = pd.Series(9*[[0.,0.,0.]], dtype='float')
expected_results = [[0.0292,0.822,0.6539],[0.043,1.03,0.5],[0.0721,1.0977,0.4999],[0.1014,1.1344,0.4999],
[1.0063,0.9998,1.0193],[5.5513,0.8523,1.0079],[0.1913,1.2366,1.0552],
[2.4154,0.9077,1.0128],[0.0351,2.4586,0.4763]]
try:
# input variable that change per simulation
ted_empty.app_method_min = pd.Series(['aerial','aerial','aerial','aerial','ground','ground','ground','ground','airblast'])
ted_empty.boom_hgt_min = pd.Series(['','','','','low','low','high','high',''])
ted_empty.droplet_spec_min = pd.Series(['very_fine_to_fine','fine_to_medium','medium_to_coarse','coarse_to_very_coarse',
'very_fine_to_fine','fine_to_medium-coarse','very_fine_to_fine','fine_to_medium-coarse',''])
for i in range (9): # test that the nine combinations are accessed
result[i][0], result[i][1], result[i][2] = ted_empty.set_drift_parameters(ted_empty.app_method_min[i], ted_empty.boom_hgt_min[i], ted_empty.droplet_spec_min[i])
npt.assert_allclose(result[i],expected_results[i],rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
for i in range (9):
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_drift_distance_calc(self):
"""
:description provides parmaeter values to use when calculating distances from edge of application source area to
concentration of interest
:param app_rate_frac; fraction of active ingredient application rate equivalent to the health threshold of concern
:param param_a; parameter a for spray drift distance calculation
:param param_b; parameter b for spray drift distance calculation
:param param_c; parameter c for spray drift distance calculation
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
ted_empty = self.create_ted_object()
expected_results = pd.Series([], dtype='float')
result = pd.Series([], dtype='float')
expected_results = [302.050738, 11.484378, 0.0]
try:
# internal model constants
ted_empty.max_distance_from_source = 1000.
# input variable that is internally specified from among options
param_a = pd.Series([0.0292, 0.1913, 0.0351], dtype='float')
param_b = pd.Series([0.822, 1.2366, 2.4586], dtype='float')
param_c = pd.Series([0.6539, 1.0522, 0.4763], dtype='float')
# internally calculated variables
app_rate_frac = pd.Series([0.1,0.25,0.88], dtype='float')
for i in range(3):
result[i] = ted_empty.drift_distance_calc(app_rate_frac[i], param_a[i], param_b[i], param_c[i], ted_empty.max_distance_from_source)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_conc_timestep(self):
"""
:description unittest for function conc_timestep:
:param conc_ini; initial concentration for day (actually previous day concentration)
:param half_life; halflife of pesiticde representing either foliar dissipation halflife or aerobic soil metabolism halflife (days)
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
ted_empty = self.create_ted_object()
expected_results = pd.Series([], dtype='float')
result = pd.Series([], dtype='float')
expected_results = [9.803896e-4, 0.106066, 1.220703e-3]
try:
# input variable that is internally specified from among options
half_life = pd.Series([35., 2., .1])
# internally calculated variables
conc_ini = pd.Series([1.e-3, 0.15, 1.25])
result = ted_empty.conc_timestep(conc_ini, half_life)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_conc_initial_canopy_air(self):
"""
:description calculates initial (1st application day) air concentration of pesticide within plant canopy (ug/mL)
:param application rate; active ingredient application rate (lbs a.i./acre)
:param mass_pest; mass of pesticide on treated field (mg)
:param volume_air; volume of air in 1 hectare to a height equal to the height of the crop canopy
:param biotransfer_factor; the volume_based biotransfer factor; function of Henry's las constant and Log Kow
NOTE: this represents Eq 24 (and supporting eqs 25,26,27) of Attachment 1-7 of 'Biological Evaluation Chapters for Diazinon ESA Assessment'
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
ted_empty = self.create_ted_object()
expected_results = pd.Series([], dtype='float')
result = pd.Series([], dtype='float')
expected_results = [1.152526e-7, 1.281910e-5, 7.925148e-8]
try:
# internal model constants
ted_empty.hectare_to_acre = 2.47105
ted_empty.gms_to_mg = 1000.
ted_empty.lbs_to_gms = 453.592
ted_empty.crop_hgt = 1. #m
ted_empty.hectare_area = 10000. #m2
ted_empty.m3_to_liters = 1000.
ted_empty.mass_plant = 25000. # kg/hectare
ted_empty.density_plant = 0.77 #kg/L
# input variables that change per simulation
ted_empty.log_kow = pd.Series([2., 4., 6.], dtype='float')
ted_empty.log_unitless_hlc = pd.Series([-5., -3., -4.], dtype='float')
ted_empty.app_rate_min = pd.Series([1.e-3, 0.15, 1.25]) # lbs a.i./acre
for i in range(3): #let's do 3 iterations
result[i] = ted_empty.conc_initial_canopy_air(i, ted_empty.app_rate_min[i])
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_conc_initial_soil_h2o(self):
"""
:description calculates initial (1st application day) concentration in soil pore water or surface puddles(ug/L)
:param application rate; active ingredient application rate (lbs a.i./acre)
:param soil_depth
:param soil_bulk_density; kg/L
:param porosity; soil porosity
:param frac_org_cont_soil; fraction organic carbon in soil
:param app_rate_conv; conversion factor used to convert units of application rate (lbs a.i./acre) to (ug a.i./mL)
:NOTE this represents Eq 3 of Attachment 1-7 of 'Biological Evaluation Chapters for Diazinon ESA Assessment'
(the depth of water in this equation is assumed to be 0.0 and therefore not included here)
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
ted_empty = self.create_ted_object()
expected_results = pd.Series([], dtype='float')
result = pd.Series([], dtype='float')
expected_results = [5.067739e-3, 1.828522, 6.13194634]
try:
# internal model constants
ted_empty.app_rate_conv1 = 11.2
ted_empty.soil_depth = 2.6 # cm
ted_empty.soil_porosity = 0.35
ted_empty.soil_bulk_density = 1.5 # kg/L
ted_empty.soil_foc = 0.015
ted_empty.h2o_depth_soil = 0.0
ted_empty.h2o_depth_puddles = 1.3
# internally specified variable
ted_empty.water_type = pd.Series(["puddles", "pore_water", "puddles"])
# input variables that change per simulation
ted_empty.koc = pd.Series([1.e-3, 0.15, 1.25])
ted_empty.app_rate_min = pd.Series([1.e-3, 0.15, 1.25]) # lbs a.i./acre
for i in range(3): #let's do 3 iterations
result[i] = ted_empty.conc_initial_soil_h2o(i, ted_empty.app_rate_min[i], ted_empty.water_type[i])
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_conc_initial_plant(self):
"""
:description calculates initial (1st application day) dietary based EEC (residue concentration) from pesticide application
(mg/kg-diet for food items including short/tall grass, broadleaf plants, seeds/fruit/pods, and above ground arthropods)
:param application rate; active ingredient application rate (lbs a.i./acre)
:param food_multiplier; factor by which application rate of active ingredient is multiplied to estimate dietary based EECs
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
ted_empty = self.create_ted_object()
expected_results = pd.Series([], dtype='float')
result = pd.Series([], dtype='float')
expected_results = [1.5e-2, 22.5, 300.]
try:
# input variables that change per simulation
ted_empty.food_multiplier = pd.Series([15., 150., 240.])
ted_empty.app_rate_min = pd.Series([1.e-3, 0.15, 1.25]) # lbs a.i./acre
result = ted_empty.conc_initial_plant(ted_empty.app_rate_min, ted_empty.food_multiplier)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_animal_dietary_intake(self):
"""
:description generates pesticide intake via consumption of diet containing pesticide for animals (mammals, birds, amphibians, reptiles)
:param a1; coefficient of allometric expression
:param b1; exponent of allometric expression
:param body_wgt; body weight of species (g)
:param frac_h2o; fraction of water in food item
# this represents Eqs 6 of Attachment 1-7 of 'Biological Evaluation Chapters for Diazinon ESA Assessment'
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
ted_empty = self.create_ted_object()
expected_results = pd.Series([], dtype='float')
result = pd.Series([], dtype='float')
expected_results = [8.050355, 3.507997, 64.92055]
try:
# internally specified parameters
a1 = pd.Series([.398, .013, .621], dtype='float')
b1 = pd.Series([.850, .773, .564], dtype='float')
# variables from external database
body_wgt = pd.Series([10., 120., 450.], dtype='float')
frac_h2o = pd.Series([0.65, 0.85, 0.7], dtype='float')
result = ted_empty.animal_dietary_intake(a1, b1, body_wgt, frac_h2o)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_animal_dietary_dose(self):
"""
:description generates pesticide dietary-based dose for animals (mammals, birds, amphibians, reptiles)
:param body_wgt; body weight of species (g)
:param frac_h2o; fraction of water in food item
:param food_intake_rate; ingestion rate of food item (g/day-ww)
:param food_pest_conc; pesticide concentration in food item (mg a.i./kg)
# this represents Eqs 5 of Attachment 1-7 of 'Biological Evaluation Chapters for Diazinon ESA Assessment'
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
ted_empty = self.create_ted_object()
expected_results = pd.Series([], dtype='float')
result = pd.Series([], dtype='float')
expected_results = [3.e-4, 3.45e-2, 4.5]
try:
# variables from external database
body_wgt = pd.Series([10., 120., 450.], dtype='float')
# internally calculated variables
food_intake_rate = pd.Series([3., 12., 45.], dtype='float')
food_pest_conc = pd.Series([1.e-3, 3.45e-1, 4.50e+1], dtype='float')
result = ted_empty.animal_dietary_dose(body_wgt, food_intake_rate, food_pest_conc)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_daily_plant_timeseries(self):
"""
:description generates annual timeseries of daily pesticide residue concentration (EECs) for a food item
:param i; simulation number/index
:param application rate; active ingredient application rate (lbs a.i./acre)
:param food_multiplier; factor by which application rate of active ingredient is multiplied to estimate dietary based EECs
:param daily_flag; daily flag denoting if pesticide is applied (0 - not applied, 1 - applied)
:Notes # calculations are performed daily from day of first application (assumed day 0) through the last day of a year
# note: day numbers are synchronized with 0-based array indexing; thus the year does not have a calendar specific
# assoication, rather it is one year from the day of 1st pesticide application
#expected results generated by running OPP spreadsheet with appropriate inputs
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
ted_empty = self.create_ted_object()
expected_results = pd.Series([], dtype='float')
result = pd.Series([], dtype='float')
expected_results = [[2.700000E+00,2.578072E+00,2.461651E+00,5.050487E+00,4.822415E+00,4.604642E+00,7.096704E+00,
6.776228E+00,6.470225E+00,6.178040E+00,5.899049E+00,5.632658E+00,5.378296E+00,5.135421E+00,
4.903513E+00,4.682078E+00,4.470643E+00,4.268756E+00,4.075986E+00,3.891921E+00,3.716168E+00,
3.548352E+00,3.388114E+00,3.235112E+00,3.089020E+00,2.949525E+00,2.816329E+00,2.689148E+00,
2.567710E+00,2.451757E+00,2.341039E+00,2.235322E+00,2.134378E+00,2.037993E+00,1.945961E+00,
1.858084E+00,1.774176E+00,1.694057E+00,1.617556E+00,1.544510E+00,1.474762E+00,1.408164E+00,
1.344574E+00,1.283855E+00,1.225878E+00,1.170520E+00,1.117661E+00,1.067189E+00,1.018997E+00,
9.729803E-01,9.290420E-01,8.870880E-01,8.470285E-01,8.087781E-01,7.722549E-01,7.373812E-01,
7.040822E-01,6.722870E-01,6.419276E-01,6.129392E-01,5.852598E-01,5.588304E-01,5.335945E-01,
5.094983E-01,4.864901E-01,4.645210E-01,4.435440E-01,4.235143E-01,4.043890E-01,3.861275E-01,
3.686906E-01,3.520411E-01,3.361435E-01,3.209638E-01,3.064696E-01,2.926299E-01,2.794152E-01,
2.667973E-01,2.547491E-01,2.432451E-01,2.322605E-01,2.217720E-01,2.117571E-01,2.021945E-01,
1.930637E-01,1.843453E-01,1.760206E-01,1.680717E-01,1.604819E-01,1.532348E-01,1.463150E-01,
1.397076E-01,1.333986E-01,1.273746E-01,1.216225E-01,1.161303E-01,1.108860E-01,1.058786E-01,
1.010973E-01,9.653187E-02,9.217264E-02,8.801028E-02,8.403587E-02,8.024095E-02,7.661739E-02,
7.315748E-02,6.985380E-02,6.669932E-02,6.368728E-02,6.081127E-02,5.806513E-02,5.544300E-02,
5.293928E-02,5.054863E-02,4.826593E-02,4.608632E-02,4.400514E-02,4.201794E-02,4.012047E-02,
3.830870E-02,3.657874E-02,3.492690E-02,3.334966E-02,3.184364E-02,3.040563E-02,2.903256E-02,
2.772150E-02,2.646964E-02,2.527431E-02,2.413297E-02,2.304316E-02,2.200257E-02,2.100897E-02,
2.006024E-02,1.915435E-02,1.828937E-02,1.746345E-02,1.667483E-02,1.592182E-02,1.520282E-02,
1.451628E-02,1.386075E-02,1.323482E-02,1.263716E-02,1.206648E-02,1.152158E-02,1.100128E-02,
1.050448E-02,1.003012E-02,9.577174E-03,9.144684E-03,8.731725E-03,8.337415E-03,7.960910E-03,
7.601408E-03,7.258141E-03,6.930375E-03,6.617410E-03,6.318579E-03,6.033242E-03,5.760790E-03,
5.500642E-03,5.252242E-03,5.015059E-03,4.788587E-03,4.572342E-03,4.365863E-03,4.168707E-03,
3.980455E-03,3.800704E-03,3.629070E-03,3.465187E-03,3.308705E-03,3.159289E-03,3.016621E-03,
2.880395E-03,2.750321E-03,2.626121E-03,2.507530E-03,2.394294E-03,2.286171E-03,2.182931E-03,
2.084354E-03,1.990228E-03,1.900352E-03,1.814535E-03,1.732594E-03,1.654353E-03,1.579645E-03,
1.508310E-03,1.440198E-03,1.375161E-03,1.313061E-03,1.253765E-03,1.197147E-03,1.143086E-03,
1.091466E-03,1.042177E-03,9.951138E-04,9.501760E-04,9.072676E-04,8.662969E-04,8.271763E-04,
7.898223E-04,7.541552E-04,7.200988E-04,6.875803E-04,6.565303E-04,6.268824E-04,5.985734E-04,
5.715428E-04,5.457328E-04,5.210884E-04,4.975569E-04,4.750880E-04,4.536338E-04,4.331484E-04,
4.135881E-04,3.949112E-04,3.770776E-04,3.600494E-04,3.437901E-04,3.282651E-04,3.134412E-04,
2.992867E-04,2.857714E-04,2.728664E-04,2.605442E-04,2.487784E-04,2.375440E-04,2.268169E-04,
2.165742E-04,2.067941E-04,1.974556E-04,1.885388E-04,1.800247E-04,1.718951E-04,1.641326E-04,
1.567206E-04,1.496433E-04,1.428857E-04,1.364332E-04,1.302721E-04,1.243892E-04,1.187720E-04,
1.134085E-04,1.082871E-04,1.033970E-04,9.872779E-05,9.426940E-05,9.001235E-05,8.594753E-05,
8.206628E-05,7.836030E-05,7.482167E-05,7.144285E-05,6.821660E-05,6.513605E-05,6.219461E-05,
5.938600E-05,5.670423E-05,5.414355E-05,5.169852E-05,4.936390E-05,4.713470E-05,4.500617E-05,
4.297377E-05,4.103314E-05,3.918015E-05,3.741084E-05,3.572142E-05,3.410830E-05,3.256803E-05,
3.109731E-05,2.969300E-05,2.835211E-05,2.707178E-05,2.584926E-05,2.468195E-05,2.356735E-05,
2.250309E-05,2.148688E-05,2.051657E-05,1.959007E-05,1.870542E-05,1.786071E-05,1.705415E-05,
1.628401E-05,1.554865E-05,1.484650E-05,1.417606E-05,1.353589E-05,1.292463E-05,1.234097E-05,
1.178368E-05,1.125154E-05,1.074344E-05,1.025829E-05,9.795037E-06,9.352709E-06,8.930356E-06,
8.527075E-06,8.142006E-06,7.774326E-06,7.423250E-06,7.088028E-06,6.767944E-06,6.462315E-06,
6.170487E-06,5.891838E-06,5.625772E-06,5.371721E-06,5.129143E-06,4.897519E-06,4.676355E-06,
4.465178E-06,4.263538E-06,4.071003E-06,3.887163E-06,3.711625E-06,3.544014E-06,3.383972E-06,
3.231157E-06,3.085243E-06,2.945919E-06,2.812886E-06,2.685860E-06,2.564571E-06,2.448759E-06,
2.338177E-06,2.232589E-06,2.131769E-06,2.035502E-06,1.943582E-06,1.855813E-06,1.772007E-06,
1.691986E-06,1.615579E-06,1.542622E-06,1.472959E-06,1.406443E-06,1.342930E-06,1.282286E-06,
1.224380E-06,1.169089E-06,1.116294E-06,1.065884E-06,1.017751E-06,9.717908E-07,9.279063E-07,
8.860035E-07,8.459930E-07,8.077893E-07,7.713109E-07,7.364797E-07,7.032215E-07,6.714651E-07,
6.411428E-07,6.121898E-07,5.845443E-07,5.581472E-07,5.329422E-07,5.088754E-07,4.858954E-07,
4.639531E-07,4.430018E-07],
[5.500000E+01,5.349602E+01,5.203317E+01,5.061032E+01,4.922638E+01,4.788028E+01,4.657099E+01,
1.002975E+02,9.755487E+01,9.488722E+01,9.229253E+01,8.976878E+01,8.731405E+01,8.492644E+01,
1.376041E+02,1.338413E+02,1.301814E+02,1.266216E+02,1.231591E+02,1.197913E+02,1.165156E+02,
1.683295E+02,1.637265E+02,1.592494E+02,1.548947E+02,1.506591E+02,1.465394E+02,1.425322E+02,
1.936347E+02,1.883397E+02,1.831896E+02,1.781802E+02,1.733079E+02,1.685688E+02,1.639593E+02,
1.594758E+02,1.551149E+02,1.508733E+02,1.467476E+02,1.427348E+02,1.388317E+02,1.350354E+02,
1.313428E+02,1.277512E+02,1.242579E+02,1.208600E+02,1.175551E+02,1.143406E+02,1.112139E+02,
1.081728E+02,1.052148E+02,1.023377E+02,9.953925E+01,9.681734E+01,9.416987E+01,9.159479E+01,
8.909012E+01,8.665395E+01,8.428439E+01,8.197963E+01,7.973789E+01,7.755746E+01,7.543664E+01,
7.337382E+01,7.136741E+01,6.941587E+01,6.751769E+01,6.567141E+01,6.387562E+01,6.212894E+01,
6.043002E+01,5.877756E+01,5.717028E+01,5.560696E+01,5.408638E+01,5.260739E+01,5.116884E+01,
4.976962E+01,4.840867E+01,4.708493E+01,4.579739E+01,4.454506E+01,4.332697E+01,4.214220E+01,
4.098981E+01,3.986895E+01,3.877873E+01,3.771832E+01,3.668691E+01,3.568371E+01,3.470793E+01,
3.375884E+01,3.283571E+01,3.193781E+01,3.106447E+01,3.021501E+01,2.938878E+01,2.858514E+01,
2.780348E+01,2.704319E+01,2.630369E+01,2.558442E+01,2.488481E+01,2.420434E+01,2.354247E+01,
2.289870E+01,2.227253E+01,2.166349E+01,2.107110E+01,2.049491E+01,1.993447E+01,1.938936E+01,
1.885916E+01,1.834346E+01,1.784185E+01,1.735397E+01,1.687942E+01,1.641785E+01,1.596891E+01,
1.553224E+01,1.510751E+01,1.469439E+01,1.429257E+01,1.390174E+01,1.352160E+01,1.315185E+01,
1.279221E+01,1.244241E+01,1.210217E+01,1.177123E+01,1.144935E+01,1.113627E+01,1.083174E+01,
1.053555E+01,1.024745E+01,9.967237E+00,9.694682E+00,9.429580E+00,9.171728E+00,8.920927E+00,
8.676983E+00,8.439711E+00,8.208926E+00,7.984453E+00,7.766118E+00,7.553753E+00,7.347195E+00,
7.146286E+00,6.950870E+00,6.760798E+00,6.575924E+00,6.396105E+00,6.221203E+00,6.051084E+00,
5.885617E+00,5.724674E+00,5.568133E+00,5.415872E+00,5.267774E+00,5.123727E+00,4.983618E+00,
4.847341E+00,4.714790E+00,4.585864E+00,4.460463E+00,4.338492E+00,4.219855E+00,4.104463E+00,
3.992226E+00,3.883059E+00,3.776876E+00,3.673597E+00,3.573143E+00,3.475435E+00,3.380399E+00,
3.287962E+00,3.198052E+00,3.110601E+00,3.025542E+00,2.942808E+00,2.862337E+00,2.784066E+00,
2.707936E+00,2.633887E+00,2.561863E+00,2.491809E+00,2.423670E+00,2.357395E+00,2.292932E+00,
2.230232E+00,2.169246E+00,2.109928E+00,2.052232E+00,1.996113E+00,1.941529E+00,1.888438E+00,
1.836799E+00,1.786571E+00,1.737718E+00,1.690200E+00,1.643981E+00,1.599026E+00,1.555301E+00,
1.512771E+00,1.471404E+00,1.431169E+00,1.392033E+00,1.353968E+00,1.316944E+00,1.280932E+00,
1.245905E+00,1.211835E+00,1.178698E+00,1.146466E+00,1.115116E+00,1.084623E+00,1.054964E+00,
1.026116E+00,9.980566E-01,9.707647E-01,9.442191E-01,9.183994E-01,8.932857E-01,8.688588E-01,
8.450998E-01,8.219905E-01,7.995131E-01,7.776504E-01,7.563855E-01,7.357021E-01,7.155843E-01,
6.960166E-01,6.769840E-01,6.584718E-01,6.404659E-01,6.229523E-01,6.059176E-01,5.893488E-01,
5.732330E-01,5.575579E-01,5.423115E-01,5.274819E-01,5.130579E-01,4.990283E-01,4.853824E-01,
4.721095E-01,4.591997E-01,4.466428E-01,4.344294E-01,4.225499E-01,4.109952E-01,3.997565E-01,
3.888252E-01,3.781927E-01,3.678510E-01,3.577921E-01,3.480083E-01,3.384920E-01,3.292359E-01,
3.202329E-01,3.114761E-01,3.029588E-01,2.946744E-01,2.866165E-01,2.787790E-01,2.711557E-01,
2.637410E-01,2.565290E-01,2.495142E-01,2.426912E-01,2.360548E-01,2.295998E-01,2.233214E-01,
2.172147E-01,2.112749E-01,2.054976E-01,1.998783E-01,1.944126E-01,1.890964E-01,1.839255E-01,
1.788961E-01,1.740041E-01,1.692460E-01,1.646180E-01,1.601165E-01,1.557381E-01,1.514794E-01,
1.473372E-01,1.433082E-01,1.393895E-01,1.355779E-01,1.318705E-01,1.282645E-01,1.247571E-01,
1.213456E-01,1.180274E-01,1.147999E-01,1.116607E-01,1.086073E-01,1.056375E-01,1.027488E-01,
9.993914E-02,9.720630E-02,9.454818E-02,9.196276E-02,8.944803E-02,8.700207E-02,8.462300E-02,
8.230898E-02,8.005823E-02,7.786904E-02,7.573970E-02,7.366860E-02,7.165412E-02,6.969474E-02,
6.778893E-02,6.593524E-02,6.413224E-02,6.237854E-02,6.067279E-02,5.901369E-02,5.739996E-02,
5.583036E-02,5.430367E-02,5.281874E-02,5.137440E-02,4.996957E-02,4.860315E-02,4.727409E-02,
4.598138E-02,4.472402E-02,4.350104E-02,4.231150E-02,4.115449E-02,4.002912E-02,3.893452E-02,
3.786985E-02,3.683430E-02,3.582706E-02,3.484737E-02,3.389447E-02,3.296762E-02,3.206612E-02,
3.118927E-02,3.033640E-02,2.950685E-02,2.869998E-02,2.791518E-02,2.715184E-02,2.640937E-02,
2.568720E-02,2.498478E-02,2.430157E-02,2.363705E-02,2.299069E-02,2.236201E-02,2.175052E-02,
2.115575E-02,2.057724E-02,2.001456E-02,1.946726E-02,1.893493E-02,1.841715E-02,1.791353E-02,
1.742368E-02,1.694723E-02],
[3.000000E+02,2.941172E+02,2.883497E+02,2.826954E+02,2.771519E+02,2.717171E+02,2.663889E+02,
2.611652E+02,2.560439E+02,2.510230E+02,2.461006E+02,2.412747E+02,2.365435E+02,2.319050E+02,
2.273575E+02,2.228991E+02,2.185282E+02,2.142430E+02,2.100418E+02,2.059231E+02,2.018850E+02,
1.979262E+02,1.940450E+02,1.902399E+02,1.865094E+02,1.828520E+02,1.792664E+02,1.757511E+02,
1.723048E+02,1.689260E+02,1.656134E+02,1.623658E+02,1.591820E+02,1.560605E+02,1.530002E+02,
1.500000E+02,1.470586E+02,1.441749E+02,1.413477E+02,1.385759E+02,1.358585E+02,1.331944E+02,
1.305826E+02,1.280219E+02,1.255115E+02,1.230503E+02,1.206374E+02,1.182717E+02,1.159525E+02,
1.136787E+02,1.114496E+02,1.092641E+02,1.071215E+02,1.050209E+02,1.029615E+02,1.009425E+02,
9.896309E+01,9.702249E+01,9.511994E+01,9.325469E+01,9.142602E+01,8.963322E+01,8.787556E+01,
8.615238E+01,8.446298E+01,8.280671E+01,8.118292E+01,7.959098E+01,7.803025E+01,7.650012E+01,
7.500000E+01,7.352930E+01,7.208743E+01,7.067384E+01,6.928797E+01,6.792927E+01,6.659722E+01,
6.529129E+01,6.401097E+01,6.275575E+01,6.152515E+01,6.031868E+01,5.913587E+01,5.797625E+01,
5.683937E+01,5.572479E+01,5.463206E+01,5.356076E+01,5.251046E+01,5.148076E+01,5.047126E+01,
4.948155E+01,4.851124E+01,4.755997E+01,4.662735E+01,4.571301E+01,4.481661E+01,4.393778E+01,
4.307619E+01,4.223149E+01,4.140336E+01,4.059146E+01,3.979549E+01,3.901512E+01,3.825006E+01,
3.750000E+01,3.676465E+01,3.604372E+01,3.533692E+01,3.464398E+01,3.396464E+01,3.329861E+01,
3.264565E+01,3.200548E+01,3.137788E+01,3.076258E+01,3.015934E+01,2.956793E+01,2.898813E+01,
2.841969E+01,2.786239E+01,2.731603E+01,2.678038E+01,2.625523E+01,2.574038E+01,2.523563E+01,
2.474077E+01,2.425562E+01,2.377998E+01,2.331367E+01,2.285651E+01,2.240830E+01,2.196889E+01,
2.153809E+01,2.111575E+01,2.070168E+01,2.029573E+01,1.989774E+01,1.950756E+01,1.912503E+01,
1.875000E+01,1.838232E+01,1.802186E+01,1.766846E+01,1.732199E+01,1.698232E+01,1.664931E+01,
1.632282E+01,1.600274E+01,1.568894E+01,1.538129E+01,1.507967E+01,1.478397E+01,1.449406E+01,
1.420984E+01,1.393120E+01,1.365801E+01,1.339019E+01,1.312762E+01,1.287019E+01,1.261781E+01,
1.237039E+01,1.212781E+01,1.188999E+01,1.165684E+01,1.142825E+01,1.120415E+01,1.098445E+01,
1.076905E+01,1.055787E+01,1.035084E+01,1.014787E+01,9.948872E+00,9.753781E+00,9.562515E+00,
9.375000E+00,9.191162E+00,9.010929E+00,8.834230E+00,8.660996E+00,8.491159E+00,8.324653E+00,
8.161412E+00,8.001371E+00,7.844469E+00,7.690644E+00,7.539835E+00,7.391984E+00,7.247031E+00,
7.104921E+00,6.965598E+00,6.829007E+00,6.695094E+00,6.563808E+00,6.435095E+00,6.308907E+00,
6.185193E+00,6.063905E+00,5.944996E+00,5.828418E+00,5.714127E+00,5.602076E+00,5.492223E+00,
5.384524E+00,5.278936E+00,5.175420E+00,5.073933E+00,4.974436E+00,4.876890E+00,4.781258E+00,
4.687500E+00,4.595581E+00,4.505464E+00,4.417115E+00,4.330498E+00,4.245580E+00,4.162326E+00,
4.080706E+00,4.000686E+00,3.922235E+00,3.845322E+00,3.769918E+00,3.695992E+00,3.623516E+00,
3.552461E+00,3.482799E+00,3.414504E+00,3.347547E+00,3.281904E+00,3.217548E+00,3.154454E+00,
3.092597E+00,3.031953E+00,2.972498E+00,2.914209E+00,2.857063E+00,2.801038E+00,2.746111E+00,
2.692262E+00,2.639468E+00,2.587710E+00,2.536966E+00,2.487218E+00,2.438445E+00,2.390629E+00,
2.343750E+00,2.297790E+00,2.252732E+00,2.208558E+00,2.165249E+00,2.122790E+00,2.081163E+00,
2.040353E+00,2.000343E+00,1.961117E+00,1.922661E+00,1.884959E+00,1.847996E+00,1.811758E+00,
1.776230E+00,1.741400E+00,1.707252E+00,1.673774E+00,1.640952E+00,1.608774E+00,1.577227E+00,
1.546298E+00,1.515976E+00,1.486249E+00,1.457105E+00,1.428532E+00,1.400519E+00,1.373056E+00,
1.346131E+00,1.319734E+00,1.293855E+00,1.268483E+00,1.243609E+00,1.219223E+00,1.195314E+00,
1.171875E+00,1.148895E+00,1.126366E+00,1.104279E+00,1.082625E+00,1.061395E+00,1.040582E+00,
1.020176E+00,1.000171E+00,9.805587E-01,9.613305E-01,9.424794E-01,9.239979E-01,9.058789E-01,
8.881152E-01,8.706998E-01,8.536259E-01,8.368868E-01,8.204760E-01,8.043869E-01,7.886134E-01,
7.731492E-01,7.579882E-01,7.431245E-01,7.285523E-01,7.142658E-01,7.002595E-01,6.865278E-01,
6.730654E-01,6.598670E-01,6.469274E-01,6.342416E-01,6.218045E-01,6.096113E-01,5.976572E-01,
5.859375E-01,5.744476E-01,5.631831E-01,5.521394E-01,5.413123E-01,5.306975E-01,5.202908E-01,
5.100882E-01,5.000857E-01,4.902793E-01,4.806652E-01,4.712397E-01,4.619990E-01,4.529395E-01,
4.440576E-01,4.353499E-01,4.268129E-01,4.184434E-01,4.102380E-01,4.021935E-01,3.943067E-01,
3.865746E-01,3.789941E-01,3.715622E-01,3.642761E-01,3.571329E-01,3.501297E-01,3.432639E-01,
3.365327E-01,3.299335E-01,3.234637E-01,3.171208E-01,3.109023E-01,3.048056E-01,2.988286E-01,
2.929687E-01,2.872238E-01,2.815915E-01,2.760697E-01,2.706561E-01,2.653487E-01,2.601454E-01,
2.550441E-01,2.500429E-01,2.451397E-01,2.403326E-01,2.356198E-01,2.309995E-01,2.264697E-01,
2.220288E-01,2.176749E-01]]
try:
# internal model constants
ted_empty.num_simulation_days = 366
# internally specified variable (from internal database)
food_multiplier = pd.Series([15., 110., 240.])
# input variables that change per simulation
ted_empty.foliar_diss_hlife = pd.Series([15., 25., 35.])
ted_empty.app_rate_min = pd.Series([0.18, 0.5, 1.25]) # lbs a.i./acre
# application scenarios generated from 'daily_app_flag' tests and reused here
daily_flag = pd.Series([[True, False, False, True, False, False, True, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False],
[True, False, False, False, False, False, False, True, False, False,
False, False, False, False, True, False, False, False, False, False,
False, True, False, False, False, False, False, False, True, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False],
[True, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False]], dtype='bool')
for i in range(3):
result[i] = ted_empty.daily_plant_timeseries(i, ted_empty.app_rate_min[i], food_multiplier[i], daily_flag[i])
npt.assert_allclose(result[i],expected_results[i],rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
for i in range(3):
tab = [result[i], expected_results[i]]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_daily_soil_h2o_timeseries(self):
"""
:description generates annual timeseries of daily pesticide concentrations in soil pore water and surface puddles
:param i; simulation number/index
:param application rate; active ingredient application rate (lbs a.i./acre)
:param food_multiplier; factor by which application rate of active ingredient is multiplied to estimate dietary based EECs
:param daily_flag; daily flag denoting if pesticide is applied (0 - not applied, 1 - applied)
:param water_type; type of water (pore water or surface puddles)
:Notes # calculations are performed daily from day of first application (assumed day 0) through the last day of a year
# note: day numbers are synchronized with 0-based array indexing; thus the year does not have a calendar specific
# assoication, rather it is one year from the day of 1st pesticide application
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
ted_empty = self.create_ted_object()
expected_results = pd.Series([], dtype='float')
result = pd.Series([], dtype='float')
expected_results = [[2.235571E-02,2.134616E-02,2.038220E-02,4.181749E-02,3.992908E-02,3.812594E-02,
5.875995E-02,5.610644E-02,5.357277E-02,5.115350E-02,4.884349E-02,4.663780E-02,
4.453171E-02,4.252073E-02,4.060056E-02,3.876711E-02,3.701645E-02,3.534484E-02,
3.374873E-02,3.222469E-02,3.076947E-02,2.937997E-02,2.805322E-02,2.678638E-02,
2.557675E-02,2.442175E-02,2.331890E-02,2.226586E-02,2.126037E-02,2.030028E-02,
1.938355E-02,1.850822E-02,1.767242E-02,1.687436E-02,1.611234E-02,1.538474E-02,
1.468999E-02,1.402661E-02,1.339319E-02,1.278838E-02,1.221087E-02,1.165945E-02,
1.113293E-02,1.063018E-02,1.015014E-02,9.691777E-03,9.254112E-03,8.836211E-03,
8.437182E-03,8.056172E-03,7.692368E-03,7.344993E-03,7.013305E-03,6.696596E-03,
6.394188E-03,6.105437E-03,5.829725E-03,5.566464E-03,5.315091E-03,5.075070E-03,
4.845888E-03,4.627056E-03,4.418105E-03,4.218591E-03,4.028086E-03,3.846184E-03,
3.672497E-03,3.506653E-03,3.348298E-03,3.197094E-03,3.052718E-03,2.914863E-03,
2.783232E-03,2.657546E-03,2.537535E-03,2.422944E-03,2.313528E-03,2.209053E-03,
2.109295E-03,2.014043E-03,1.923092E-03,1.836248E-03,1.753326E-03,1.674149E-03,
1.598547E-03,1.526359E-03,1.457431E-03,1.391616E-03,1.328773E-03,1.268768E-03,
1.211472E-03,1.156764E-03,1.104526E-03,1.054648E-03,1.007022E-03,9.615460E-04,
9.181242E-04,8.766632E-04,8.370745E-04,7.992735E-04,7.631796E-04,7.287156E-04,
6.958080E-04,6.643864E-04,6.343838E-04,6.057361E-04,5.783820E-04,5.522632E-04,
5.273239E-04,5.035108E-04,4.807730E-04,4.590621E-04,4.383316E-04,4.185372E-04,
3.996368E-04,3.815898E-04,3.643578E-04,3.479040E-04,3.321932E-04,3.171919E-04,
3.028680E-04,2.891910E-04,2.761316E-04,2.636619E-04,2.517554E-04,2.403865E-04,
2.295310E-04,2.191658E-04,2.092686E-04,1.998184E-04,1.907949E-04,1.821789E-04,
1.739520E-04,1.660966E-04,1.585960E-04,1.514340E-04,1.445955E-04,1.380658E-04,
1.318310E-04,1.258777E-04,1.201933E-04,1.147655E-04,1.095829E-04,1.046343E-04,
9.990919E-05,9.539745E-05,9.108945E-05,8.697600E-05,8.304830E-05,7.929798E-05,
7.571701E-05,7.229775E-05,6.903290E-05,6.591548E-05,6.293885E-05,6.009663E-05,
5.738276E-05,5.479145E-05,5.231715E-05,4.995459E-05,4.769873E-05,4.554473E-05,
4.348800E-05,4.152415E-05,3.964899E-05,3.785850E-05,3.614887E-05,3.451645E-05,
3.295774E-05,3.146942E-05,3.004831E-05,2.869138E-05,2.739572E-05,2.615858E-05,
2.497730E-05,2.384936E-05,2.277236E-05,2.174400E-05,2.076208E-05,1.982449E-05,
1.892925E-05,1.807444E-05,1.725822E-05,1.647887E-05,1.573471E-05,1.502416E-05,
1.434569E-05,1.369786E-05,1.307929E-05,1.248865E-05,1.192468E-05,1.138618E-05,
1.087200E-05,1.038104E-05,9.912247E-06,9.464626E-06,9.037219E-06,8.629112E-06,
8.239435E-06,7.867356E-06,7.512079E-06,7.172845E-06,6.848931E-06,6.539644E-06,
6.244324E-06,5.962341E-06,5.693091E-06,5.436000E-06,5.190519E-06,4.956124E-06,
4.732313E-06,4.518609E-06,4.314556E-06,4.119718E-06,3.933678E-06,3.756039E-06,
3.586423E-06,3.424465E-06,3.269822E-06,3.122162E-06,2.981170E-06,2.846545E-06,
2.718000E-06,2.595260E-06,2.478062E-06,2.366156E-06,2.259305E-06,2.157278E-06,
2.059859E-06,1.966839E-06,1.878020E-06,1.793211E-06,1.712233E-06,1.634911E-06,
1.561081E-06,1.490585E-06,1.423273E-06,1.359000E-06,1.297630E-06,1.239031E-06,
1.183078E-06,1.129652E-06,1.078639E-06,1.029929E-06,9.834195E-07,9.390098E-07,
8.966056E-07,8.561164E-07,8.174555E-07,7.805405E-07,7.452926E-07,7.116364E-07,
6.795000E-07,6.488149E-07,6.195154E-07,5.915391E-07,5.648262E-07,5.393195E-07,
5.149647E-07,4.917097E-07,4.695049E-07,4.483028E-07,4.280582E-07,4.087278E-07,
3.902703E-07,3.726463E-07,3.558182E-07,3.397500E-07,3.244074E-07,3.097577E-07,
2.957696E-07,2.824131E-07,2.696598E-07,2.574824E-07,2.458549E-07,2.347525E-07,
2.241514E-07,2.140291E-07,2.043639E-07,1.951351E-07,1.863231E-07,1.779091E-07,
1.698750E-07,1.622037E-07,1.548789E-07,1.478848E-07,1.412065E-07,1.348299E-07,
1.287412E-07,1.229274E-07,1.173762E-07,1.120757E-07,1.070145E-07,1.021819E-07,
9.756757E-08,9.316157E-08,8.895455E-08,8.493750E-08,8.110186E-08,7.743943E-08,
7.394239E-08,7.060327E-08,6.741494E-08,6.437059E-08,6.146372E-08,5.868811E-08,
5.603785E-08,5.350727E-08,5.109097E-08,4.878378E-08,4.658079E-08,4.447727E-08,
4.246875E-08,4.055093E-08,3.871971E-08,3.697119E-08,3.530163E-08,3.370747E-08,
3.218529E-08,3.073186E-08,2.934406E-08,2.801893E-08,2.675364E-08,2.554549E-08,
2.439189E-08,2.329039E-08,2.223864E-08,2.123438E-08,2.027546E-08,1.935986E-08,
1.848560E-08,1.765082E-08,1.685373E-08,1.609265E-08,1.536593E-08,1.467203E-08,
1.400946E-08,1.337682E-08,1.277274E-08,1.219595E-08,1.164520E-08,1.111932E-08,
1.061719E-08,1.013773E-08,9.679929E-09,9.242799E-09,8.825409E-09,8.426867E-09,
8.046324E-09,7.682965E-09,7.336014E-09,7.004732E-09,6.688409E-09,6.386371E-09,
6.097973E-09,5.822598E-09,5.559659E-09,5.308594E-09,5.068866E-09,4.839964E-09,
4.621399E-09,4.412704E-09,4.213434E-09,4.023162E-09,3.841482E-09,3.668007E-09],
[9.391514E-02,8.762592E-02,8.175787E-02,7.628279E-02,7.117436E-02,6.640803E-02,
6.196088E-02,1.517267E-01,1.415660E-01,1.320858E-01,1.232404E-01,1.149873E-01,
1.072870E-01,1.001023E-01,1.873139E-01,1.747700E-01,1.630662E-01,1.521461E-01,
1.419574E-01,1.324509E-01,1.235811E-01,2.092203E-01,1.952095E-01,1.821369E-01,
1.699397E-01,1.585594E-01,1.479411E-01,1.380340E-01,2.227054E-01,2.077915E-01,
1.938763E-01,1.808930E-01,1.687791E-01,1.574765E-01,1.469307E-01,1.370912E-01,
1.279106E-01,1.193449E-01,1.113527E-01,1.038957E-01,9.693814E-02,9.044648E-02,
8.438955E-02,7.873824E-02,7.346537E-02,6.854562E-02,6.395532E-02,5.967242E-02,
5.567634E-02,5.194786E-02,4.846907E-02,4.522324E-02,4.219478E-02,3.936912E-02,
3.673269E-02,3.427281E-02,3.197766E-02,2.983621E-02,2.783817E-02,2.597393E-02,
2.423454E-02,2.261162E-02,2.109739E-02,1.968456E-02,1.836634E-02,1.713640E-02,
1.598883E-02,1.491811E-02,1.391909E-02,1.298697E-02,1.211727E-02,1.130581E-02,
1.054869E-02,9.842280E-03,9.183172E-03,8.568202E-03,7.994415E-03,7.459053E-03,
6.959543E-03,6.493483E-03,6.058634E-03,5.652905E-03,5.274347E-03,4.921140E-03,
4.591586E-03,4.284101E-03,3.997208E-03,3.729527E-03,3.479771E-03,3.246741E-03,
3.029317E-03,2.826453E-03,2.637174E-03,2.460570E-03,2.295793E-03,2.142051E-03,
1.998604E-03,1.864763E-03,1.739886E-03,1.623371E-03,1.514658E-03,1.413226E-03,
1.318587E-03,1.230285E-03,1.147896E-03,1.071025E-03,9.993019E-04,9.323816E-04,
8.699428E-04,8.116854E-04,7.573292E-04,7.066131E-04,6.592934E-04,6.151425E-04,
5.739482E-04,5.355126E-04,4.996509E-04,4.661908E-04,4.349714E-04,4.058427E-04,
3.786646E-04,3.533066E-04,3.296467E-04,3.075712E-04,2.869741E-04,2.677563E-04,
2.498255E-04,2.330954E-04,2.174857E-04,2.029213E-04,1.893323E-04,1.766533E-04,
1.648233E-04,1.537856E-04,1.434871E-04,1.338782E-04,1.249127E-04,1.165477E-04,
1.087429E-04,1.014607E-04,9.466615E-05,8.832664E-05,8.241167E-05,7.689281E-05,
7.174353E-05,6.693908E-05,6.245637E-05,5.827385E-05,5.437143E-05,5.073034E-05,
4.733308E-05,4.416332E-05,4.120584E-05,3.844640E-05,3.587176E-05,3.346954E-05,
3.122818E-05,2.913693E-05,2.718571E-05,2.536517E-05,2.366654E-05,2.208166E-05,
2.060292E-05,1.922320E-05,1.793588E-05,1.673477E-05,1.561409E-05,1.456846E-05,
1.359286E-05,1.268258E-05,1.183327E-05,1.104083E-05,1.030146E-05,9.611601E-06,
8.967941E-06,8.367385E-06,7.807046E-06,7.284232E-06,6.796428E-06,6.341292E-06,
5.916635E-06,5.520415E-06,5.150730E-06,4.805801E-06,4.483971E-06,4.183692E-06,
3.903523E-06,3.642116E-06,3.398214E-06,3.170646E-06,2.958317E-06,2.760208E-06,
2.575365E-06,2.402900E-06,2.241985E-06,2.091846E-06,1.951762E-06,1.821058E-06,
1.699107E-06,1.585323E-06,1.479159E-06,1.380104E-06,1.287682E-06,1.201450E-06,
1.120993E-06,1.045923E-06,9.758808E-07,9.105289E-07,8.495535E-07,7.926615E-07,
7.395793E-07,6.900519E-07,6.438412E-07,6.007251E-07,5.604963E-07,5.229616E-07,
4.879404E-07,4.552645E-07,4.247768E-07,3.963307E-07,3.697897E-07,3.450260E-07,
3.219206E-07,3.003625E-07,2.802482E-07,2.614808E-07,2.439702E-07,2.276322E-07,
2.123884E-07,1.981654E-07,1.848948E-07,1.725130E-07,1.609603E-07,1.501813E-07,
1.401241E-07,1.307404E-07,1.219851E-07,1.138161E-07,1.061942E-07,9.908269E-08,
9.244741E-08,8.625649E-08,8.048015E-08,7.509063E-08,7.006204E-08,6.537019E-08,
6.099255E-08,5.690806E-08,5.309710E-08,4.954134E-08,4.622371E-08,4.312824E-08,
4.024007E-08,3.754532E-08,3.503102E-08,3.268510E-08,3.049627E-08,2.845403E-08,
2.654855E-08,2.477067E-08,2.311185E-08,2.156412E-08,2.012004E-08,1.877266E-08,
1.751551E-08,1.634255E-08,1.524814E-08,1.422702E-08,1.327427E-08,1.238534E-08,
1.155593E-08,1.078206E-08,1.006002E-08,9.386329E-09,8.757755E-09,8.171274E-09,
7.624068E-09,7.113507E-09,6.637137E-09,6.192668E-09,5.777963E-09,5.391030E-09,
5.030009E-09,4.693165E-09,4.378877E-09,4.085637E-09,3.812034E-09,3.556754E-09,
3.318569E-09,3.096334E-09,2.888982E-09,2.695515E-09,2.515005E-09,2.346582E-09,
2.189439E-09,2.042819E-09,1.906017E-09,1.778377E-09,1.659284E-09,1.548167E-09,
1.444491E-09,1.347758E-09,1.257502E-09,1.173291E-09,1.094719E-09,1.021409E-09,
9.530086E-10,8.891884E-10,8.296421E-10,7.740835E-10,7.222454E-10,6.738788E-10,
6.287512E-10,5.866456E-10,5.473597E-10,5.107046E-10,4.765043E-10,4.445942E-10,
4.148211E-10,3.870417E-10,3.611227E-10,3.369394E-10,3.143756E-10,2.933228E-10,
2.736798E-10,2.553523E-10,2.382521E-10,2.222971E-10,2.074105E-10,1.935209E-10,
1.805614E-10,1.684697E-10,1.571878E-10,1.466614E-10,1.368399E-10,1.276762E-10,
1.191261E-10,1.111486E-10,1.037053E-10,9.676043E-11,9.028068E-11,8.423485E-11,
7.859390E-11,7.333070E-11,6.841996E-11,6.383808E-11,5.956303E-11,5.557428E-11,
5.185263E-11,4.838022E-11,4.514034E-11,4.211743E-11,3.929695E-11,3.666535E-11,
3.420998E-11,3.191904E-11,2.978152E-11,2.778714E-11,2.592632E-11,2.419011E-11,
2.257017E-11,2.105871E-11,1.964847E-11,1.833267E-11,1.710499E-11,1.595952E-11],
[1.172251E-01,1.132320E-01,1.093749E-01,1.056492E-01,1.020504E-01,9.857420E-02,
9.521640E-02,9.197298E-02,8.884005E-02,8.581383E-02,8.289069E-02,8.006713E-02,
7.733975E-02,7.470528E-02,7.216054E-02,6.970249E-02,6.732817E-02,6.503472E-02,
6.281940E-02,6.067954E-02,5.861257E-02,5.661601E-02,5.468746E-02,5.282461E-02,
5.102521E-02,4.928710E-02,4.760820E-02,4.598649E-02,4.442002E-02,4.290691E-02,
4.144535E-02,4.003357E-02,3.866988E-02,3.735264E-02,3.608027E-02,3.485124E-02,
3.366408E-02,3.251736E-02,3.140970E-02,3.033977E-02,2.930629E-02,2.830801E-02,
2.734373E-02,2.641230E-02,2.551260E-02,2.464355E-02,2.380410E-02,2.299325E-02,
2.221001E-02,2.145346E-02,2.072267E-02,2.001678E-02,1.933494E-02,1.867632E-02,
1.804014E-02,1.742562E-02,1.683204E-02,1.625868E-02,1.570485E-02,1.516989E-02,
1.465314E-02,1.415400E-02,1.367187E-02,1.320615E-02,1.275630E-02,1.232178E-02,
1.190205E-02,1.149662E-02,1.110501E-02,1.072673E-02,1.036134E-02,1.000839E-02,
9.667469E-03,9.338160E-03,9.020068E-03,8.712811E-03,8.416021E-03,8.129340E-03,
7.852425E-03,7.584943E-03,7.326572E-03,7.077002E-03,6.835933E-03,6.603076E-03,
6.378151E-03,6.160888E-03,5.951025E-03,5.748312E-03,5.552503E-03,5.363364E-03,
5.180668E-03,5.004196E-03,4.833735E-03,4.669080E-03,4.510034E-03,4.356406E-03,
4.208010E-03,4.064670E-03,3.926212E-03,3.792471E-03,3.663286E-03,3.538501E-03,
3.417966E-03,3.301538E-03,3.189075E-03,3.080444E-03,2.975513E-03,2.874156E-03,
2.776251E-03,2.681682E-03,2.590334E-03,2.502098E-03,2.416867E-03,2.334540E-03,
2.255017E-03,2.178203E-03,2.104005E-03,2.032335E-03,1.963106E-03,1.896236E-03,
1.831643E-03,1.769250E-03,1.708983E-03,1.650769E-03,1.594538E-03,1.540222E-03,
1.487756E-03,1.437078E-03,1.388126E-03,1.340841E-03,1.295167E-03,1.251049E-03,
1.208434E-03,1.167270E-03,1.127508E-03,1.089101E-03,1.052003E-03,1.016168E-03,
9.815531E-04,9.481178E-04,9.158214E-04,8.846252E-04,8.544916E-04,8.253845E-04,
7.972689E-04,7.701110E-04,7.438782E-04,7.185389E-04,6.940629E-04,6.704205E-04,
6.475836E-04,6.255245E-04,6.042168E-04,5.836350E-04,5.637542E-04,5.445507E-04,
5.260013E-04,5.080838E-04,4.907766E-04,4.740589E-04,4.579107E-04,4.423126E-04,
4.272458E-04,4.126923E-04,3.986344E-04,3.850555E-04,3.719391E-04,3.592695E-04,
3.470314E-04,3.352103E-04,3.237918E-04,3.127622E-04,3.021084E-04,2.918175E-04,
2.818771E-04,2.722753E-04,2.630006E-04,2.540419E-04,2.453883E-04,2.370295E-04,
2.289554E-04,2.211563E-04,2.136229E-04,2.063461E-04,1.993172E-04,1.925277E-04,
1.859695E-04,1.796347E-04,1.735157E-04,1.676051E-04,1.618959E-04,1.563811E-04,
1.510542E-04,1.459087E-04,1.409386E-04,1.361377E-04,1.315003E-04,1.270209E-04,
1.226941E-04,1.185147E-04,1.144777E-04,1.105782E-04,1.068115E-04,1.031731E-04,
9.965861E-05,9.626387E-05,9.298477E-05,8.981737E-05,8.675786E-05,8.380257E-05,
8.094794E-05,7.819056E-05,7.552710E-05,7.295437E-05,7.046928E-05,6.806884E-05,
6.575016E-05,6.351047E-05,6.134707E-05,5.925736E-05,5.723884E-05,5.528908E-05,
5.340573E-05,5.158653E-05,4.982930E-05,4.813194E-05,4.649239E-05,4.490868E-05,
4.337893E-05,4.190128E-05,4.047397E-05,3.909528E-05,3.776355E-05,3.647719E-05,
3.523464E-05,3.403442E-05,3.287508E-05,3.175523E-05,3.067354E-05,2.962868E-05,
2.861942E-05,2.764454E-05,2.670286E-05,2.579327E-05,2.491465E-05,2.406597E-05,
2.324619E-05,2.245434E-05,2.168946E-05,2.095064E-05,2.023699E-05,1.954764E-05,
1.888178E-05,1.823859E-05,1.761732E-05,1.701721E-05,1.643754E-05,1.587762E-05,
1.533677E-05,1.481434E-05,1.430971E-05,1.382227E-05,1.335143E-05,1.289663E-05,
1.245733E-05,1.203298E-05,1.162310E-05,1.122717E-05,1.084473E-05,1.047532E-05,
1.011849E-05,9.773820E-06,9.440888E-06,9.119297E-06,8.808660E-06,8.508605E-06,
8.218770E-06,7.938809E-06,7.668384E-06,7.407170E-06,7.154855E-06,6.911134E-06,
6.675716E-06,6.448316E-06,6.228663E-06,6.016492E-06,5.811548E-06,5.613585E-06,
5.422366E-06,5.237660E-06,5.059247E-06,4.886910E-06,4.720444E-06,4.559648E-06,
4.404330E-06,4.254302E-06,4.109385E-06,3.969404E-06,3.834192E-06,3.703585E-06,
3.577428E-06,3.455567E-06,3.337858E-06,3.224158E-06,3.114332E-06,3.008246E-06,
2.905774E-06,2.806793E-06,2.711183E-06,2.618830E-06,2.529623E-06,2.443455E-06,
2.360222E-06,2.279824E-06,2.202165E-06,2.127151E-06,2.054693E-06,1.984702E-06,
1.917096E-06,1.851793E-06,1.788714E-06,1.727784E-06,1.668929E-06,1.612079E-06,
1.557166E-06,1.504123E-06,1.452887E-06,1.403396E-06,1.355592E-06,1.309415E-06,
1.264812E-06,1.221728E-06,1.180111E-06,1.139912E-06,1.101082E-06,1.063576E-06,
1.027346E-06,9.923511E-07,9.585480E-07,9.258963E-07,8.943569E-07,8.638918E-07,
8.344645E-07,8.060396E-07,7.785829E-07,7.520615E-07,7.264435E-07,7.016982E-07,
6.777958E-07,6.547076E-07,6.324058E-07,6.108638E-07,5.900555E-07,5.699560E-07,
5.505412E-07,5.317878E-07,5.136731E-07,4.961755E-07,4.792740E-07,4.629482E-07,
4.471784E-07,4.319459E-07,4.172322E-07,4.030198E-07,3.892914E-07,3.760307E-07]]
try:
# internal model constants
ted_empty.num_simulation_days = 366
ted_empty.app_rate_conv1 = 11.2
ted_empty.h2o_depth_puddles = 1.3
ted_empty.soil_depth = 2.6
ted_empty.soil_porosity = 0.4339623
ted_empty.soil_bulk_density = 1.5
ted_empty.h2o_depth_soil = 0.0
ted_empty.soil_foc = 0.015
# internally specified variable
water_type = ['puddles', 'pore_water', 'puddles']
# input variables that change per simulation
ted_empty.aerobic_soil_meta_hlife = pd.Series([15., 10., 20.], dtype='float')
ted_empty.koc = pd.Series([1500., 1000., 2000.], dtype='float')
ted_empty.app_rate_min = pd.Series([0.18, 0.5, 1.25]) # lbs a.i./acre
# application scenarios generated from 'daily_app_flag' tests and reused here
daily_flag = pd.Series([[True, False, False, True, False, False, True, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False],
[True, False, False, False, False, False, False, True, False, False,
False, False, False, False, True, False, False, False, False, False,
False, True, False, False, False, False, False, False, True, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False],
[True, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False]], dtype='bool')
for i in range(3):
result[i] = ted_empty.daily_soil_h2o_timeseries(i, ted_empty.app_rate_min[i], daily_flag[i], water_type[i])
npt.assert_allclose(result[i],expected_results[i],rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
for i in range(3):
tab = [result[i], expected_results[i]]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_daily_plant_dew_timeseries(self):
"""
:description generates annual timeseries of daily pesticide concentrations in dew that resides on broad leaf plants
:param i; simulation number/index
:param blp_conc; daily values of pesticide concentration in broad leaf plant dew
:Notes # calculations are performed daily from day of first application (assumed day 0) through the last day of a year
# note: day numbers are synchronized with 0-based array indexing; thus the year does not have a calendar specific
# assoication, rather it is one year from the day of 1st pesticide application
#this represents Eq 11 of Attachment 1-7 of 'Biological Evaluation Chapters for Diazinon ESA Assessment'
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
ted_empty = self.create_ted_object()
expected_results = pd.Series([], dtype='float')
result = pd.Series([[]], dtype='float')
expected_results = [[6.201749E+00,6.080137E+00,5.960909E+00,5.844019E+00,5.729422E+00,5.617071E+00,
5.506924E+00,1.160069E+01,1.137320E+01,1.115018E+01,1.093153E+01,1.071717E+01,
1.050702E+01,1.030098E+01,1.630073E+01,1.598109E+01,1.566771E+01,1.536047E+01,
1.505926E+01,1.476396E+01,1.447445E+01,2.039236E+01,1.999248E+01,1.960044E+01,
1.921609E+01,1.883927E+01,1.846984E+01,1.810766E+01,2.395433E+01,2.348460E+01,
2.302408E+01,2.257259E+01,2.212996E+01,2.169600E+01,2.127056E+01,2.085346E+01,
2.044453E+01,2.004363E+01,1.965059E+01,1.926525E+01,1.888747E+01,1.851710E+01,
1.815399E+01,1.779800E+01,1.744899E+01,1.710683E+01,1.677137E+01,1.644250E+01,
1.612007E+01,1.580396E+01,1.549406E+01,1.519023E+01,1.489236E+01,1.460033E+01,
1.431403E+01,1.403334E+01,1.375815E+01,1.348836E+01,1.322386E+01,1.296455E+01,
1.271032E+01,1.246108E+01,1.221673E+01,1.197717E+01,1.174230E+01,1.151204E+01,
1.128630E+01,1.106498E+01,1.084800E+01,1.063528E+01,1.042673E+01,1.022227E+01,
1.002181E+01,9.825293E+00,9.632625E+00,9.443735E+00,9.258549E+00,9.076994E+00,
8.899000E+00,8.724496E+00,8.553414E+00,8.385687E+00,8.221249E+00,8.060035E+00,
7.901982E+00,7.747029E+00,7.595115E+00,7.446179E+00,7.300164E+00,7.157013E+00,
7.016668E+00,6.879075E+00,6.744181E+00,6.611932E+00,6.482276E+00,6.355162E+00,
6.230541E+00,6.108364E+00,5.988583E+00,5.871150E+00,5.756021E+00,5.643149E+00,
5.532490E+00,5.424001E+00,5.317640E+00,5.213364E+00,5.111133E+00,5.010907E+00,
4.912646E+00,4.816312E+00,4.721867E+00,4.629274E+00,4.538497E+00,4.449500E+00,
4.362248E+00,4.276707E+00,4.192843E+00,4.110624E+00,4.030017E+00,3.950991E+00,
3.873515E+00,3.797557E+00,3.723090E+00,3.650082E+00,3.578506E+00,3.508334E+00,
3.439538E+00,3.372090E+00,3.305966E+00,3.241138E+00,3.177581E+00,3.115271E+00,
3.054182E+00,2.994291E+00,2.935575E+00,2.878010E+00,2.821574E+00,2.766245E+00,
2.712001E+00,2.658820E+00,2.606682E+00,2.555567E+00,2.505454E+00,2.456323E+00,
2.408156E+00,2.360934E+00,2.314637E+00,2.269249E+00,2.224750E+00,2.181124E+00,
2.138353E+00,2.096422E+00,2.055312E+00,2.015009E+00,1.975496E+00,1.936757E+00,
1.898779E+00,1.861545E+00,1.825041E+00,1.789253E+00,1.754167E+00,1.719769E+00,
1.686045E+00,1.652983E+00,1.620569E+00,1.588791E+00,1.557635E+00,1.527091E+00,
1.497146E+00,1.467788E+00,1.439005E+00,1.410787E+00,1.383122E+00,1.356000E+00,
1.329410E+00,1.303341E+00,1.277783E+00,1.252727E+00,1.228162E+00,1.204078E+00,
1.180467E+00,1.157319E+00,1.134624E+00,1.112375E+00,1.090562E+00,1.069177E+00,
1.048211E+00,1.027656E+00,1.007504E+00,9.877478E-01,9.683787E-01,9.493894E-01,
9.307724E-01,9.125205E-01,8.946266E-01,8.770835E-01,8.598844E-01,8.430226E-01,
8.264914E-01,8.102845E-01,7.943953E-01,7.788177E-01,7.635455E-01,7.485729E-01,
7.338938E-01,7.195026E-01,7.053936E-01,6.915612E-01,6.780002E-01,6.647050E-01,
6.516705E-01,6.388917E-01,6.263634E-01,6.140808E-01,6.020390E-01,5.902334E-01,
5.786593E-01,5.673121E-01,5.561875E-01,5.452810E-01,5.345884E-01,5.241054E-01,
5.138280E-01,5.037522E-01,4.938739E-01,4.841893E-01,4.746947E-01,4.653862E-01,
4.562603E-01,4.473133E-01,4.385417E-01,4.299422E-01,4.215113E-01,4.132457E-01,
4.051422E-01,3.971976E-01,3.894088E-01,3.817728E-01,3.742864E-01,3.669469E-01,
3.597513E-01,3.526968E-01,3.457806E-01,3.390001E-01,3.323525E-01,3.258353E-01,
3.194458E-01,3.131817E-01,3.070404E-01,3.010195E-01,2.951167E-01,2.893296E-01,
2.836561E-01,2.780937E-01,2.726405E-01,2.672942E-01,2.620527E-01,2.569140E-01,
2.518761E-01,2.469370E-01,2.420947E-01,2.373473E-01,2.326931E-01,2.281301E-01,
2.236566E-01,2.192709E-01,2.149711E-01,2.107557E-01,2.066229E-01,2.025711E-01,
1.985988E-01,1.947044E-01,1.908864E-01,1.871432E-01,1.834735E-01,1.798756E-01,
1.763484E-01,1.728903E-01,1.695000E-01,1.661762E-01,1.629176E-01,1.597229E-01,
1.565908E-01,1.535202E-01,1.505098E-01,1.475584E-01,1.446648E-01,1.418280E-01,
1.390469E-01,1.363202E-01,1.336471E-01,1.310264E-01,1.284570E-01,1.259380E-01,
1.234685E-01,1.210473E-01,1.186737E-01,1.163466E-01,1.140651E-01,1.118283E-01,
1.096354E-01,1.074856E-01,1.053778E-01,1.033114E-01,1.012856E-01,9.929941E-02,
9.735221E-02,9.544319E-02,9.357161E-02,9.173673E-02,8.993782E-02,8.817420E-02,
8.644516E-02,8.475002E-02,8.308812E-02,8.145882E-02,7.986146E-02,7.829542E-02,
7.676010E-02,7.525488E-02,7.377918E-02,7.233241E-02,7.091402E-02,6.952344E-02,
6.816012E-02,6.682355E-02,6.551318E-02,6.422850E-02,6.296902E-02,6.173424E-02,
6.052367E-02,5.933684E-02,5.817328E-02,5.703253E-02,5.591416E-02,5.481772E-02,
5.374278E-02,5.268891E-02,5.165572E-02,5.064278E-02,4.964970E-02,4.867610E-02,
4.772160E-02,4.678580E-02,4.586836E-02,4.496891E-02,4.408710E-02,4.322258E-02,
4.237501E-02,4.154406E-02,4.072941E-02,3.993073E-02,3.914771E-02,3.838005E-02,
3.762744E-02,3.688959E-02,3.616621E-02,3.545701E-02,3.476172E-02,3.408006E-02,
3.341177E-02,3.275659E-02,3.211425E-02,3.148451E-02,3.086712E-02,3.026183E-02],
[3.487500E-01,3.419112E-01,3.352066E-01,3.286334E-01,3.221891E-01,3.158711E-01,
3.096771E-01,6.523545E-01,6.395622E-01,6.270208E-01,6.147253E-01,6.026709E-01,
5.908529E-01,5.792667E-01,9.166576E-01,8.986825E-01,8.810599E-01,8.637828E-01,
8.468446E-01,8.302385E-01,8.139580E-01,1.000000E+00,1.000000E+00,1.000000E+00,
1.000000E+00,1.000000E+00,1.000000E+00,1.000000E+00,1.000000E+00,1.000000E+00,
1.000000E+00,1.000000E+00,1.000000E+00,1.000000E+00,1.000000E+00,1.000000E+00,
1.000000E+00,1.000000E+00,1.000000E+00,1.000000E+00,1.000000E+00,1.000000E+00,
1.000000E+00,1.000000E+00,9.812289E-01,9.619876E-01,9.431236E-01,9.246296E-01,
9.064981E-01,8.887223E-01,8.712950E-01,8.542094E-01,8.374589E-01,8.210368E-01,
8.049368E-01,7.891525E-01,7.736777E-01,7.585063E-01,7.436325E-01,7.290503E-01,
7.147541E-01,7.007382E-01,6.869971E-01,6.735255E-01,6.603181E-01,6.473697E-01,
6.346751E-01,6.222295E-01,6.100280E-01,5.980657E-01,5.863380E-01,5.748403E-01,
5.635680E-01,5.525168E-01,5.416823E-01,5.310602E-01,5.206465E-01,5.104369E-01,
5.004275E-01,4.906145E-01,4.809938E-01,4.715618E-01,4.623148E-01,4.532491E-01,
4.443611E-01,4.356475E-01,4.271047E-01,4.187294E-01,4.105184E-01,4.024684E-01,
3.945762E-01,3.868388E-01,3.792532E-01,3.718162E-01,3.645251E-01,3.573770E-01,
3.503691E-01,3.434986E-01,3.367628E-01,3.301591E-01,3.236848E-01,3.173376E-01,
3.111148E-01,3.050140E-01,2.990329E-01,2.931690E-01,2.874201E-01,2.817840E-01,
2.762584E-01,2.708411E-01,2.655301E-01,2.603232E-01,2.552184E-01,2.502138E-01,
2.453072E-01,2.404969E-01,2.357809E-01,2.311574E-01,2.266245E-01,2.221806E-01,
2.178237E-01,2.135523E-01,2.093647E-01,2.052592E-01,2.012342E-01,1.972881E-01,
1.934194E-01,1.896266E-01,1.859081E-01,1.822626E-01,1.786885E-01,1.751845E-01,
1.717493E-01,1.683814E-01,1.650795E-01,1.618424E-01,1.586688E-01,1.555574E-01,
1.525070E-01,1.495164E-01,1.465845E-01,1.437101E-01,1.408920E-01,1.381292E-01,
1.354206E-01,1.327651E-01,1.301616E-01,1.276092E-01,1.251069E-01,1.226536E-01,
1.202485E-01,1.178905E-01,1.155787E-01,1.133123E-01,1.110903E-01,1.089119E-01,
1.067762E-01,1.046824E-01,1.026296E-01,1.006171E-01,9.864406E-02,9.670971E-02,
9.481329E-02,9.295406E-02,9.113129E-02,8.934426E-02,8.759227E-02,8.587464E-02,
8.419069E-02,8.253976E-02,8.092121E-02,7.933439E-02,7.777869E-02,7.625350E-02,
7.475822E-02,7.329225E-02,7.185504E-02,7.044600E-02,6.906460E-02,6.771029E-02,
6.638253E-02,6.508081E-02,6.380461E-02,6.255344E-02,6.132681E-02,6.012423E-02,
5.894523E-02,5.778935E-02,5.665613E-02,5.554514E-02,5.445593E-02,5.338809E-02,
5.234118E-02,5.131480E-02,5.030855E-02,4.932203E-02,4.835485E-02,4.740664E-02,
4.647703E-02,4.556564E-02,4.467213E-02,4.379614E-02,4.293732E-02,4.209535E-02,
4.126988E-02,4.046060E-02,3.966720E-02,3.888935E-02,3.812675E-02,3.737911E-02,
3.664613E-02,3.592752E-02,3.522300E-02,3.453230E-02,3.385514E-02,3.319126E-02,
3.254040E-02,3.190231E-02,3.127672E-02,3.066340E-02,3.006211E-02,2.947261E-02,
2.889467E-02,2.832807E-02,2.777257E-02,2.722797E-02,2.669404E-02,2.617059E-02,
2.565740E-02,2.515427E-02,2.466101E-02,2.417743E-02,2.370332E-02,2.323851E-02,
2.278282E-02,2.233606E-02,2.189807E-02,2.146866E-02,2.104767E-02,2.063494E-02,
2.023030E-02,1.983360E-02,1.944467E-02,1.906338E-02,1.868955E-02,1.832306E-02,
1.796376E-02,1.761150E-02,1.726615E-02,1.692757E-02,1.659563E-02,1.627020E-02,
1.595115E-02,1.563836E-02,1.533170E-02,1.503106E-02,1.473631E-02,1.444734E-02,
1.416403E-02,1.388629E-02,1.361398E-02,1.334702E-02,1.308529E-02,1.282870E-02,
1.257714E-02,1.233051E-02,1.208871E-02,1.185166E-02,1.161926E-02,1.139141E-02,
1.116803E-02,1.094903E-02,1.073433E-02,1.052384E-02,1.031747E-02,1.011515E-02,
9.916799E-03,9.722337E-03,9.531688E-03,9.344777E-03,9.161532E-03,8.981880E-03,
8.805750E-03,8.633075E-03,8.463786E-03,8.297816E-03,8.135101E-03,7.975577E-03,
7.819180E-03,7.665851E-03,7.515528E-03,7.368153E-03,7.223668E-03,7.082017E-03,
6.943143E-03,6.806992E-03,6.673511E-03,6.542647E-03,6.414350E-03,6.288569E-03,
6.165254E-03,6.044357E-03,5.925831E-03,5.809629E-03,5.695705E-03,5.584016E-03,
5.474517E-03,5.367165E-03,5.261918E-03,5.158735E-03,5.057576E-03,4.958400E-03,
4.861168E-03,4.765844E-03,4.672389E-03,4.580766E-03,4.490940E-03,4.402875E-03,
4.316538E-03,4.231893E-03,4.148908E-03,4.067550E-03,3.987788E-03,3.909590E-03,
3.832926E-03,3.757764E-03,3.684077E-03,3.611834E-03,3.541008E-03,3.471571E-03,
3.403496E-03,3.336755E-03,3.271324E-03,3.207175E-03,3.144284E-03,3.082627E-03,
3.022178E-03,2.962915E-03,2.904814E-03,2.847853E-03,2.792008E-03,2.737258E-03,
2.683583E-03,2.630959E-03,2.579368E-03,2.528788E-03,2.479200E-03,2.430584E-03,
2.382922E-03,2.336194E-03,2.290383E-03,2.245470E-03,2.201438E-03,2.158269E-03,
2.115946E-03,2.074454E-03,2.033775E-03,1.993894E-03,1.954795E-03,1.916463E-03,
1.878882E-03,1.842038E-03,1.805917E-03,1.770504E-03,1.735786E-03,1.701748E-03],
[8.718750E-02,8.547781E-02,8.380164E-02,8.215834E-02,8.054726E-02,7.896778E-02,
7.741927E-02,1.630886E-01,1.598906E-01,1.567552E-01,1.536813E-01,1.506677E-01,
1.477132E-01,1.448167E-01,2.291644E-01,2.246706E-01,2.202650E-01,2.159457E-01,
2.117111E-01,2.075596E-01,2.034895E-01,2.866867E-01,2.810649E-01,2.755534E-01,
2.701500E-01,2.648525E-01,2.596589E-01,2.545672E-01,3.367628E-01,3.301591E-01,
3.236848E-01,3.173376E-01,3.111148E-01,3.050140E-01,2.990329E-01,3.803565E-01,
3.728980E-01,3.655857E-01,3.584167E-01,3.513884E-01,3.444979E-01,3.377425E-01,
4.183071E-01,4.101043E-01,4.020624E-01,3.941782E-01,3.864486E-01,3.788706E-01,
3.714412E-01,3.641575E-01,3.570166E-01,3.500157E-01,3.431521E-01,3.364231E-01,
3.298260E-01,3.233583E-01,3.170175E-01,3.108010E-01,3.047063E-01,2.987312E-01,
2.928733E-01,2.871302E-01,2.814998E-01,2.759797E-01,2.705680E-01,2.652623E-01,
2.600606E-01,2.549610E-01,2.499614E-01,2.450598E-01,2.402543E-01,2.355431E-01,
2.309242E-01,2.263959E-01,2.219565E-01,2.176040E-01,2.133369E-01,2.091535E-01,
2.050522E-01,2.010312E-01,1.970891E-01,1.932243E-01,1.894353E-01,1.857206E-01,
1.820787E-01,1.785083E-01,1.750078E-01,1.715760E-01,1.682115E-01,1.649130E-01,
1.616792E-01,1.585087E-01,1.554005E-01,1.523532E-01,1.493656E-01,1.464367E-01,
1.435651E-01,1.407499E-01,1.379899E-01,1.352840E-01,1.326311E-01,1.300303E-01,
1.274805E-01,1.249807E-01,1.225299E-01,1.201272E-01,1.177715E-01,1.154621E-01,
1.131980E-01,1.109782E-01,1.088020E-01,1.066685E-01,1.045768E-01,1.025261E-01,
1.005156E-01,9.854456E-02,9.661216E-02,9.471765E-02,9.286030E-02,9.103937E-02,
8.925414E-02,8.750392E-02,8.578802E-02,8.410577E-02,8.245651E-02,8.083959E-02,
7.925437E-02,7.770024E-02,7.617659E-02,7.468281E-02,7.321833E-02,7.178256E-02,
7.037495E-02,6.899494E-02,6.764199E-02,6.631557E-02,6.501516E-02,6.374025E-02,
6.249035E-02,6.126495E-02,6.006358E-02,5.888577E-02,5.773106E-02,5.659899E-02,
5.548911E-02,5.440101E-02,5.333424E-02,5.228838E-02,5.126304E-02,5.025780E-02,
4.927228E-02,4.830608E-02,4.735883E-02,4.643015E-02,4.551968E-02,4.462707E-02,
4.375196E-02,4.289401E-02,4.205289E-02,4.122825E-02,4.041979E-02,3.962719E-02,
3.885012E-02,3.808829E-02,3.734141E-02,3.660916E-02,3.589128E-02,3.518747E-02,
3.449747E-02,3.382099E-02,3.315779E-02,3.250758E-02,3.187013E-02,3.124517E-02,
3.063247E-02,3.003179E-02,2.944289E-02,2.886553E-02,2.829949E-02,2.774456E-02,
2.720050E-02,2.666712E-02,2.614419E-02,2.563152E-02,2.512890E-02,2.463614E-02,
2.415304E-02,2.367941E-02,2.321507E-02,2.275984E-02,2.231353E-02,2.187598E-02,
2.144701E-02,2.102644E-02,2.061413E-02,2.020990E-02,1.981359E-02,1.942506E-02,
1.904415E-02,1.867070E-02,1.830458E-02,1.794564E-02,1.759374E-02,1.724873E-02,
1.691050E-02,1.657889E-02,1.625379E-02,1.593506E-02,1.562259E-02,1.531624E-02,
1.501590E-02,1.472144E-02,1.443276E-02,1.414975E-02,1.387228E-02,1.360025E-02,
1.333356E-02,1.307210E-02,1.281576E-02,1.256445E-02,1.231807E-02,1.207652E-02,
1.183971E-02,1.160754E-02,1.137992E-02,1.115677E-02,1.093799E-02,1.072350E-02,
1.051322E-02,1.030706E-02,1.010495E-02,9.906796E-03,9.712530E-03,9.522073E-03,
9.335351E-03,9.152291E-03,8.972820E-03,8.796868E-03,8.624367E-03,8.455249E-03,
8.289446E-03,8.126895E-03,7.967532E-03,7.811293E-03,7.658119E-03,7.507948E-03,
7.360721E-03,7.216382E-03,7.074873E-03,6.936139E-03,6.800126E-03,6.666780E-03,
6.536048E-03,6.407880E-03,6.282226E-03,6.159035E-03,6.038260E-03,5.919853E-03,
5.803769E-03,5.689960E-03,5.578384E-03,5.468995E-03,5.361751E-03,5.256611E-03,
5.153532E-03,5.052474E-03,4.953398E-03,4.856265E-03,4.761037E-03,4.667676E-03,
4.576145E-03,4.486410E-03,4.398434E-03,4.312184E-03,4.227624E-03,4.144723E-03,
4.063448E-03,3.983766E-03,3.905647E-03,3.829059E-03,3.753974E-03,3.680361E-03,
3.608191E-03,3.537437E-03,3.468070E-03,3.400063E-03,3.333390E-03,3.268024E-03,
3.203940E-03,3.141113E-03,3.079517E-03,3.019130E-03,2.959927E-03,2.901884E-03,
2.844980E-03,2.789192E-03,2.734497E-03,2.680876E-03,2.628305E-03,2.576766E-03,
2.526237E-03,2.476699E-03,2.428133E-03,2.380518E-03,2.333838E-03,2.288073E-03,
2.243205E-03,2.199217E-03,2.156092E-03,2.113812E-03,2.072362E-03,2.031724E-03,
1.991883E-03,1.952823E-03,1.914530E-03,1.876987E-03,1.840180E-03,1.804096E-03,
1.768718E-03,1.734035E-03,1.700031E-03,1.666695E-03,1.634012E-03,1.601970E-03,
1.570556E-03,1.539759E-03,1.509565E-03,1.479963E-03,1.450942E-03,1.422490E-03,
1.394596E-03,1.367249E-03,1.340438E-03,1.314153E-03,1.288383E-03,1.263119E-03,
1.238350E-03,1.214066E-03,1.190259E-03,1.166919E-03,1.144036E-03,1.121602E-03,
1.099609E-03,1.078046E-03,1.056906E-03,1.036181E-03,1.015862E-03,9.959415E-04,
9.764117E-04,9.572648E-04,9.384935E-04,9.200902E-04,9.020478E-04,8.843592E-04,
8.670174E-04,8.500157E-04,8.333474E-04,8.170060E-04,8.009850E-04,7.852782E-04,
7.698794E-04,7.547825E-04,7.399817E-04,7.254711E-04,7.112450E-04,6.972980E-04]]
try:
# internal model constants
ted_empty.num_simulation_days = 366
ted_empty.frac_pest_on_surface = 0.62
ted_empty.density_h2o = 1.0
ted_empty.mass_wax = 0.012
# input variables that change per simulation
ted_empty.solubility = pd.Series([145., 1., 20.], dtype='float')
ted_empty.log_kow = pd.Series([2.75, 4., 5.], dtype='float')
# internally calculated variables
blp_conc = pd.Series([[6.750000E+01,6.617637E+01,6.487869E+01,6.360646E+01,6.235917E+01,6.113635E+01,5.993750E+01,
1.262622E+02,1.237862E+02,1.213589E+02,1.189791E+02,1.166460E+02,1.143586E+02,1.121161E+02,
1.774176E+02,1.739385E+02,1.705277E+02,1.671838E+02,1.639054E+02,1.606913E+02,1.575403E+02,
2.219510E+02,2.175987E+02,2.133317E+02,2.091484E+02,2.050471E+02,2.010263E+02,1.970843E+02,
2.607196E+02,2.556070E+02,2.505947E+02,2.456807E+02,2.408631E+02,2.361399E+02,2.315093E+02,
2.269696E+02,2.225188E+02,2.181554E+02,2.138775E+02,2.096835E+02,2.055717E+02,2.015406E+02,
1.975885E+02,1.937139E+02,1.899153E+02,1.861912E+02,1.825401E+02,1.789606E+02,1.754513E+02,
1.720108E+02,1.686377E+02,1.653309E+02,1.620888E+02,1.589104E+02,1.557942E+02,1.527392E+02,
1.497441E+02,1.468077E+02,1.439289E+02,1.411065E+02,1.383395E+02,1.356267E+02,1.329672E+02,
1.303598E+02,1.278035E+02,1.252974E+02,1.228404E+02,1.204315E+02,1.180699E+02,1.157547E+02,
1.134848E+02,1.112594E+02,1.090777E+02,1.069387E+02,1.048417E+02,1.027859E+02,1.007703E+02,
9.879424E+01,9.685694E+01,9.495764E+01,9.309558E+01,9.127003E+01,8.948028E+01,8.772563E+01,
8.600538E+01,8.431887E+01,8.266543E+01,8.104441E+01,7.945518E+01,7.789711E+01,7.636959E+01,
7.487203E+01,7.340384E+01,7.196443E+01,7.055325E+01,6.916975E+01,6.781337E+01,6.648359E+01,
6.517989E+01,6.390175E+01,6.264868E+01,6.142018E+01,6.021576E+01,5.903497E+01,5.787733E+01,
5.674239E+01,5.562971E+01,5.453884E+01,5.346937E+01,5.242087E+01,5.139293E+01,5.038514E+01,
4.939712E+01,4.842847E+01,4.747882E+01,4.654779E+01,4.563501E+01,4.474014E+01,4.386281E+01,
4.300269E+01,4.215943E+01,4.133271E+01,4.052220E+01,3.972759E+01,3.894855E+01,3.818480E+01,
3.743602E+01,3.670192E+01,3.598222E+01,3.527663E+01,3.458487E+01,3.390669E+01,3.324180E+01,
3.258994E+01,3.195088E+01,3.132434E+01,3.071009E+01,3.010788E+01,2.951748E+01,2.893866E+01,
2.837119E+01,2.781485E+01,2.726942E+01,2.673468E+01,2.621043E+01,2.569646E+01,2.519257E+01,
2.469856E+01,2.421424E+01,2.373941E+01,2.327389E+01,2.281751E+01,2.237007E+01,2.193141E+01,
2.150135E+01,2.107972E+01,2.066636E+01,2.026110E+01,1.986379E+01,1.947428E+01,1.909240E+01,
1.871801E+01,1.835096E+01,1.799111E+01,1.763831E+01,1.729244E+01,1.695334E+01,1.662090E+01,
1.629497E+01,1.597544E+01,1.566217E+01,1.535504E+01,1.505394E+01,1.475874E+01,1.446933E+01,
1.418560E+01,1.390743E+01,1.363471E+01,1.336734E+01,1.310522E+01,1.284823E+01,1.259629E+01,
1.234928E+01,1.210712E+01,1.186970E+01,1.163695E+01,1.140875E+01,1.118503E+01,1.096570E+01,
1.075067E+01,1.053986E+01,1.033318E+01,1.013055E+01,9.931897E+00,9.737138E+00,9.546199E+00,
9.359004E+00,9.175480E+00,8.995554E+00,8.819157E+00,8.646218E+00,8.476671E+00,8.310449E+00,
8.147486E+00,7.987719E+00,7.831085E+00,7.677522E+00,7.526970E+00,7.379371E+00,7.234666E+00,
7.092799E+00,6.953713E+00,6.817355E+00,6.683671E+00,6.552608E+00,6.424116E+00,6.298143E+00,
6.174640E+00,6.053559E+00,5.934852E+00,5.818474E+00,5.704377E+00,5.592517E+00,5.482852E+00,
5.375336E+00,5.269929E+00,5.166589E+00,5.065275E+00,4.965948E+00,4.868569E+00,4.773100E+00,
4.679502E+00,4.587740E+00,4.497777E+00,4.409578E+00,4.323109E+00,4.238336E+00,4.155225E+00,
4.073743E+00,3.993859E+00,3.915542E+00,3.838761E+00,3.763485E+00,3.689686E+00,3.617333E+00,
3.546399E+00,3.476857E+00,3.408678E+00,3.341835E+00,3.276304E+00,3.212058E+00,3.149071E+00,
3.087320E+00,3.026779E+00,2.967426E+00,2.909237E+00,2.852188E+00,2.796259E+00,2.741426E+00,
2.687668E+00,2.634965E+00,2.583295E+00,2.532638E+00,2.482974E+00,2.434285E+00,2.386550E+00,
2.339751E+00,2.293870E+00,2.248889E+00,2.204789E+00,2.161555E+00,2.119168E+00,2.077612E+00,
2.036872E+00,1.996930E+00,1.957771E+00,1.919380E+00,1.881743E+00,1.844843E+00,1.808667E+00,
1.773200E+00,1.738428E+00,1.704339E+00,1.670918E+00,1.638152E+00,1.606029E+00,1.574536E+00,
1.543660E+00,1.513390E+00,1.483713E+00,1.454618E+00,1.426094E+00,1.398129E+00,1.370713E+00,
1.343834E+00,1.317482E+00,1.291647E+00,1.266319E+00,1.241487E+00,1.217142E+00,1.193275E+00,
1.169876E+00,1.146935E+00,1.124444E+00,1.102395E+00,1.080777E+00,1.059584E+00,1.038806E+00,
1.018436E+00,9.984649E-01,9.788856E-01,9.596902E-01,9.408713E-01,9.224214E-01,9.043333E-01,
8.865998E-01,8.692142E-01,8.521694E-01,8.354589E-01,8.190760E-01,8.030145E-01,7.872678E-01,
7.718300E-01,7.566949E-01,7.418565E-01,7.273092E-01,7.130471E-01,6.990647E-01,6.853565E-01,
6.719170E-01,6.587411E-01,6.458236E-01,6.331594E-01,6.207436E-01,6.085712E-01,5.966374E-01,
5.849378E-01,5.734675E-01,5.622221E-01,5.511973E-01,5.403887E-01,5.297920E-01,5.194031E-01,
5.092179E-01,4.992324E-01,4.894428E-01,4.798451E-01,4.704356E-01,4.612107E-01,4.521666E-01,
4.432999E-01,4.346071E-01,4.260847E-01,4.177294E-01,4.095380E-01,4.015072E-01,3.936339E-01,
3.859150E-01,3.783474E-01,3.709283E-01,3.636546E-01,3.565236E-01,3.495323E-01,3.426782E-01,
3.359585E-01,3.293706E-01],
[6.750000E+01,6.617637E+01,6.487869E+01,6.360646E+01,6.235917E+01,6.113635E+01,5.993750E+01,
1.262622E+02,1.237862E+02,1.213589E+02,1.189791E+02,1.166460E+02,1.143586E+02,1.121161E+02,
1.774176E+02,1.739385E+02,1.705277E+02,1.671838E+02,1.639054E+02,1.606913E+02,1.575403E+02,
2.219510E+02,2.175987E+02,2.133317E+02,2.091484E+02,2.050471E+02,2.010263E+02,1.970843E+02,
2.607196E+02,2.556070E+02,2.505947E+02,2.456807E+02,2.408631E+02,2.361399E+02,2.315093E+02,
2.269696E+02,2.225188E+02,2.181554E+02,2.138775E+02,2.096835E+02,2.055717E+02,2.015406E+02,
1.975885E+02,1.937139E+02,1.899153E+02,1.861912E+02,1.825401E+02,1.789606E+02,1.754513E+02,
1.720108E+02,1.686377E+02,1.653309E+02,1.620888E+02,1.589104E+02,1.557942E+02,1.527392E+02,
1.497441E+02,1.468077E+02,1.439289E+02,1.411065E+02,1.383395E+02,1.356267E+02,1.329672E+02,
1.303598E+02,1.278035E+02,1.252974E+02,1.228404E+02,1.204315E+02,1.180699E+02,1.157547E+02,
1.134848E+02,1.112594E+02,1.090777E+02,1.069387E+02,1.048417E+02,1.027859E+02,1.007703E+02,
9.879424E+01,9.685694E+01,9.495764E+01,9.309558E+01,9.127003E+01,8.948028E+01,8.772563E+01,
8.600538E+01,8.431887E+01,8.266543E+01,8.104441E+01,7.945518E+01,7.789711E+01,7.636959E+01,
7.487203E+01,7.340384E+01,7.196443E+01,7.055325E+01,6.916975E+01,6.781337E+01,6.648359E+01,
6.517989E+01,6.390175E+01,6.264868E+01,6.142018E+01,6.021576E+01,5.903497E+01,5.787733E+01,
5.674239E+01,5.562971E+01,5.453884E+01,5.346937E+01,5.242087E+01,5.139293E+01,5.038514E+01,
4.939712E+01,4.842847E+01,4.747882E+01,4.654779E+01,4.563501E+01,4.474014E+01,4.386281E+01,
4.300269E+01,4.215943E+01,4.133271E+01,4.052220E+01,3.972759E+01,3.894855E+01,3.818480E+01,
3.743602E+01,3.670192E+01,3.598222E+01,3.527663E+01,3.458487E+01,3.390669E+01,3.324180E+01,
3.258994E+01,3.195088E+01,3.132434E+01,3.071009E+01,3.010788E+01,2.951748E+01,2.893866E+01,
2.837119E+01,2.781485E+01,2.726942E+01,2.673468E+01,2.621043E+01,2.569646E+01,2.519257E+01,
2.469856E+01,2.421424E+01,2.373941E+01,2.327389E+01,2.281751E+01,2.237007E+01,2.193141E+01,
2.150135E+01,2.107972E+01,2.066636E+01,2.026110E+01,1.986379E+01,1.947428E+01,1.909240E+01,
1.871801E+01,1.835096E+01,1.799111E+01,1.763831E+01,1.729244E+01,1.695334E+01,1.662090E+01,
1.629497E+01,1.597544E+01,1.566217E+01,1.535504E+01,1.505394E+01,1.475874E+01,1.446933E+01,
1.418560E+01,1.390743E+01,1.363471E+01,1.336734E+01,1.310522E+01,1.284823E+01,1.259629E+01,
1.234928E+01,1.210712E+01,1.186970E+01,1.163695E+01,1.140875E+01,1.118503E+01,1.096570E+01,
1.075067E+01,1.053986E+01,1.033318E+01,1.013055E+01,9.931897E+00,9.737138E+00,9.546199E+00,
9.359004E+00,9.175480E+00,8.995554E+00,8.819157E+00,8.646218E+00,8.476671E+00,8.310449E+00,
8.147486E+00,7.987719E+00,7.831085E+00,7.677522E+00,7.526970E+00,7.379371E+00,7.234666E+00,
7.092799E+00,6.953713E+00,6.817355E+00,6.683671E+00,6.552608E+00,6.424116E+00,6.298143E+00,
6.174640E+00,6.053559E+00,5.934852E+00,5.818474E+00,5.704377E+00,5.592517E+00,5.482852E+00,
5.375336E+00,5.269929E+00,5.166589E+00,5.065275E+00,4.965948E+00,4.868569E+00,4.773100E+00,
4.679502E+00,4.587740E+00,4.497777E+00,4.409578E+00,4.323109E+00,4.238336E+00,4.155225E+00,
4.073743E+00,3.993859E+00,3.915542E+00,3.838761E+00,3.763485E+00,3.689686E+00,3.617333E+00,
3.546399E+00,3.476857E+00,3.408678E+00,3.341835E+00,3.276304E+00,3.212058E+00,3.149071E+00,
3.087320E+00,3.026779E+00,2.967426E+00,2.909237E+00,2.852188E+00,2.796259E+00,2.741426E+00,
2.687668E+00,2.634965E+00,2.583295E+00,2.532638E+00,2.482974E+00,2.434285E+00,2.386550E+00,
2.339751E+00,2.293870E+00,2.248889E+00,2.204789E+00,2.161555E+00,2.119168E+00,2.077612E+00,
2.036872E+00,1.996930E+00,1.957771E+00,1.919380E+00,1.881743E+00,1.844843E+00,1.808667E+00,
1.773200E+00,1.738428E+00,1.704339E+00,1.670918E+00,1.638152E+00,1.606029E+00,1.574536E+00,
1.543660E+00,1.513390E+00,1.483713E+00,1.454618E+00,1.426094E+00,1.398129E+00,1.370713E+00,
1.343834E+00,1.317482E+00,1.291647E+00,1.266319E+00,1.241487E+00,1.217142E+00,1.193275E+00,
1.169876E+00,1.146935E+00,1.124444E+00,1.102395E+00,1.080777E+00,1.059584E+00,1.038806E+00,
1.018436E+00,9.984649E-01,9.788856E-01,9.596902E-01,9.408713E-01,9.224214E-01,9.043333E-01,
8.865998E-01,8.692142E-01,8.521694E-01,8.354589E-01,8.190760E-01,8.030145E-01,7.872678E-01,
7.718300E-01,7.566949E-01,7.418565E-01,7.273092E-01,7.130471E-01,6.990647E-01,6.853565E-01,
6.719170E-01,6.587411E-01,6.458236E-01,6.331594E-01,6.207436E-01,6.085712E-01,5.966374E-01,
5.849378E-01,5.734675E-01,5.622221E-01,5.511973E-01,5.403887E-01,5.297920E-01,5.194031E-01,
5.092179E-01,4.992324E-01,4.894428E-01,4.798451E-01,4.704356E-01,4.612107E-01,4.521666E-01,
4.432999E-01,4.346071E-01,4.260847E-01,4.177294E-01,4.095380E-01,4.015072E-01,3.936339E-01,
3.859150E-01,3.783474E-01,3.709283E-01,3.636546E-01,3.565236E-01,3.495323E-01,3.426782E-01,
3.359585E-01,3.293706E-01],
[1.687500E+02,1.654409E+02,1.621967E+02,1.590161E+02,1.558979E+02,1.528409E+02,1.498438E+02,
3.156554E+02,3.094656E+02,3.033972E+02,2.974477E+02,2.916150E+02,2.858966E+02,2.802903E+02,
4.435440E+02,4.348464E+02,4.263193E+02,4.179594E+02,4.097635E+02,4.017283E+02,3.938506E+02,
5.548775E+02,5.439967E+02,5.333292E+02,5.228710E+02,5.126178E+02,5.025657E+02,4.927107E+02,
6.517989E+02,6.390175E+02,6.264868E+02,6.142018E+02,6.021576E+02,5.903497E+02,5.787733E+02,
7.361739E+02,7.217380E+02,7.075851E+02,6.937098E+02,6.801066E+02,6.667701E+02,6.536952E+02,
8.096266E+02,7.937503E+02,7.781854E+02,7.629256E+02,7.479651E+02,7.332980E+02,7.189184E+02,
7.048209E+02,6.909998E+02,6.774497E+02,6.641653E+02,6.511414E+02,6.383730E+02,6.258549E+02,
6.135822E+02,6.015503E+02,5.897542E+02,5.781895E+02,5.668516E+02,5.557359E+02,5.448383E+02,
5.341544E+02,5.236799E+02,5.134109E+02,5.033432E+02,4.934729E+02,4.837962E+02,4.743093E+02,
4.650084E+02,4.558898E+02,4.469501E+02,4.381857E+02,4.295931E+02,4.211691E+02,4.129102E+02,
4.048133E+02,3.968752E+02,3.890927E+02,3.814628E+02,3.739826E+02,3.666490E+02,3.594592E+02,
3.524104E+02,3.454999E+02,3.387249E+02,3.320827E+02,3.255707E+02,3.191865E+02,3.129274E+02,
3.067911E+02,3.007751E+02,2.948771E+02,2.890947E+02,2.834258E+02,2.778680E+02,2.724191E+02,
2.670772E+02,2.618400E+02,2.567054E+02,2.516716E+02,2.467365E+02,2.418981E+02,2.371546E+02,
2.325042E+02,2.279449E+02,2.234751E+02,2.190929E+02,2.147966E+02,2.105845E+02,2.064551E+02,
2.024067E+02,1.984376E+02,1.945463E+02,1.907314E+02,1.869913E+02,1.833245E+02,1.797296E+02,
1.762052E+02,1.727499E+02,1.693624E+02,1.660413E+02,1.627854E+02,1.595932E+02,1.564637E+02,
1.533956E+02,1.503876E+02,1.474386E+02,1.445474E+02,1.417129E+02,1.389340E+02,1.362096E+02,
1.335386E+02,1.309200E+02,1.283527E+02,1.258358E+02,1.233682E+02,1.209491E+02,1.185773E+02,
1.162521E+02,1.139725E+02,1.117375E+02,1.095464E+02,1.073983E+02,1.052923E+02,1.032276E+02,
1.012033E+02,9.921879E+01,9.727317E+01,9.536570E+01,9.349564E+01,9.166225E+01,8.986481E+01,
8.810261E+01,8.637497E+01,8.468121E+01,8.302067E+01,8.139268E+01,7.979662E+01,7.823186E+01,
7.669778E+01,7.519378E+01,7.371928E+01,7.227369E+01,7.085644E+01,6.946699E+01,6.810479E+01,
6.676929E+01,6.545999E+01,6.417636E+01,6.291790E+01,6.168412E+01,6.047453E+01,5.928866E+01,
5.812605E+01,5.698623E+01,5.586876E+01,5.477321E+01,5.369914E+01,5.264614E+01,5.161378E+01,
5.060166E+01,4.960939E+01,4.863658E+01,4.768285E+01,4.674782E+01,4.583112E+01,4.493240E+01,
4.405131E+01,4.318749E+01,4.234061E+01,4.151033E+01,4.069634E+01,3.989831E+01,3.911593E+01,
3.834889E+01,3.759689E+01,3.685964E+01,3.613684E+01,3.542822E+01,3.473350E+01,3.405239E+01,
3.338465E+01,3.272999E+01,3.208818E+01,3.145895E+01,3.084206E+01,3.023726E+01,2.964433E+01,
2.906302E+01,2.849312E+01,2.793438E+01,2.738661E+01,2.684957E+01,2.632307E+01,2.580689E+01,
2.530083E+01,2.480470E+01,2.431829E+01,2.384143E+01,2.337391E+01,2.291556E+01,2.246620E+01,
2.202565E+01,2.159374E+01,2.117030E+01,2.075517E+01,2.034817E+01,1.994916E+01,1.955796E+01,
1.917444E+01,1.879845E+01,1.842982E+01,1.806842E+01,1.771411E+01,1.736675E+01,1.702620E+01,
1.669232E+01,1.636500E+01,1.604409E+01,1.572947E+01,1.542103E+01,1.511863E+01,1.482217E+01,
1.453151E+01,1.424656E+01,1.396719E+01,1.369330E+01,1.342479E+01,1.316153E+01,1.290344E+01,
1.265042E+01,1.240235E+01,1.215915E+01,1.192071E+01,1.168695E+01,1.145778E+01,1.123310E+01,
1.101283E+01,1.079687E+01,1.058515E+01,1.037758E+01,1.017409E+01,9.974578E+00,9.778982E+00,
9.587222E+00,9.399223E+00,9.214910E+00,9.034211E+00,8.857056E+00,8.683374E+00,8.513098E+00,
8.346162E+00,8.182499E+00,8.022045E+00,7.864737E+00,7.710515E+00,7.559316E+00,7.411083E+00,
7.265756E+00,7.123279E+00,6.983596E+00,6.846652E+00,6.712393E+00,6.580767E+00,6.451722E+00,
6.325208E+00,6.201174E+00,6.079573E+00,5.960356E+00,5.843477E+00,5.728890E+00,5.616550E+00,
5.506413E+00,5.398436E+00,5.292576E+00,5.188792E+00,5.087043E+00,4.987289E+00,4.889491E+00,
4.793611E+00,4.699611E+00,4.607455E+00,4.517105E+00,4.428528E+00,4.341687E+00,4.256549E+00,
4.173081E+00,4.091249E+00,4.011022E+00,3.932369E+00,3.855257E+00,3.779658E+00,3.705541E+00,
3.632878E+00,3.561639E+00,3.491798E+00,3.423326E+00,3.356196E+00,3.290383E+00,3.225861E+00,
3.162604E+00,3.100587E+00,3.039787E+00,2.980178E+00,2.921739E+00,2.864445E+00,2.808275E+00,
2.753207E+00,2.699218E+00,2.646288E+00,2.594396E+00,2.543521E+00,2.493644E+00,2.444746E+00,
2.396806E+00,2.349806E+00,2.303727E+00,2.258553E+00,2.214264E+00,2.170844E+00,2.128275E+00,
2.086540E+00,2.045625E+00,2.005511E+00,1.966184E+00,1.927629E+00,1.889829E+00,1.852771E+00,
1.816439E+00,1.780820E+00,1.745899E+00,1.711663E+00,1.678098E+00,1.645192E+00,1.612931E+00,
1.581302E+00,1.550294E+00,1.519893E+00,1.490089E+00,1.460869E+00,1.432223E+00,1.404138E+00,
1.376603E+00,1.349609E+00]], dtype='float')
for i in range(3):
result[i] = ted_empty.daily_plant_dew_timeseries(i, blp_conc[i])
npt.assert_allclose(result[i],expected_results[i],rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
for i in range(3):
tab = [result[i], expected_results[i]]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_daily_soil_timeseries(self):
"""
:description generates annual timeseries of daily pesticide concentrations in soil
:param i; simulation number/index
:param pore_h2o_conc; daily values of pesticide concentration in soil pore water
:Notes # calculations are performed daily from day of first application (assumed day 0) through the last day of a year
# note: day numbers are synchronized with 0-based array indexing; thus the year does not have a calendar specific
# assoication, rather it is one year from the day of 1st pesticide application
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
ted_empty = self.create_ted_object()
expected_results = pd.Series([], dtype='float')
result = pd.Series([[]], dtype='float')
expected_results = [[3.521818E+00,3.285972E+00,3.065920E+00,2.860605E+00,2.669039E+00,2.490301E+00,2.323533E+00,
5.689751E+00,5.308725E+00,4.953216E+00,4.621514E+00,4.312025E+00,4.023261E+00,3.753835E+00,
7.024270E+00,6.553876E+00,6.114982E+00,5.705480E+00,5.323401E+00,4.966909E+00,4.634290E+00,
7.845763E+00,7.320356E+00,6.830133E+00,6.372740E+00,5.945976E+00,5.547792E+00,5.176273E+00,
8.351451E+00,7.792179E+00,7.270360E+00,6.783486E+00,6.329216E+00,5.905368E+00,5.509903E+00,
8.662739E+00,8.082621E+00,7.541352E+00,7.036330E+00,6.565128E+00,6.125481E+00,5.715276E+00,
8.854359E+00,8.261409E+00,7.708167E+00,7.191974E+00,6.710349E+00,6.260977E+00,5.841698E+00,
5.450497E+00,5.085494E+00,4.744933E+00,4.427179E+00,4.130704E+00,3.854083E+00,3.595987E+00,
3.355175E+00,3.130489E+00,2.920849E+00,2.725249E+00,2.542747E+00,2.372467E+00,2.213590E+00,
2.065352E+00,1.927042E+00,1.797994E+00,1.677587E+00,1.565244E+00,1.460425E+00,1.362624E+00,
1.271373E+00,1.186233E+00,1.106795E+00,1.032676E+00,9.635209E-01,8.989968E-01,8.387936E-01,
7.826221E-01,7.302123E-01,6.813121E-01,6.356867E-01,5.931167E-01,5.533974E-01,5.163381E-01,
4.817604E-01,4.494984E-01,4.193968E-01,3.913111E-01,3.651061E-01,3.406561E-01,3.178434E-01,
2.965583E-01,2.766987E-01,2.581690E-01,2.408802E-01,2.247492E-01,2.096984E-01,1.956555E-01,
1.825531E-01,1.703280E-01,1.589217E-01,1.482792E-01,1.383494E-01,1.290845E-01,1.204401E-01,
1.123746E-01,1.048492E-01,9.782777E-02,9.127653E-02,8.516402E-02,7.946084E-02,7.413958E-02,
6.917468E-02,6.454226E-02,6.022005E-02,5.618730E-02,5.242460E-02,4.891388E-02,4.563827E-02,
4.258201E-02,3.973042E-02,3.706979E-02,3.458734E-02,3.227113E-02,3.011003E-02,2.809365E-02,
2.621230E-02,2.445694E-02,2.281913E-02,2.129100E-02,1.986521E-02,1.853490E-02,1.729367E-02,
1.613556E-02,1.505501E-02,1.404682E-02,1.310615E-02,1.222847E-02,1.140957E-02,1.064550E-02,
9.932605E-03,9.267448E-03,8.646835E-03,8.067782E-03,7.527507E-03,7.023412E-03,6.553075E-03,
6.114235E-03,5.704783E-03,5.322751E-03,4.966302E-03,4.633724E-03,4.323417E-03,4.033891E-03,
3.763753E-03,3.511706E-03,3.276538E-03,3.057118E-03,2.852392E-03,2.661376E-03,2.483151E-03,
2.316862E-03,2.161709E-03,2.016946E-03,1.881877E-03,1.755853E-03,1.638269E-03,1.528559E-03,
1.426196E-03,1.330688E-03,1.241576E-03,1.158431E-03,1.080854E-03,1.008473E-03,9.409384E-04,
8.779265E-04,8.191344E-04,7.642794E-04,7.130979E-04,6.653439E-04,6.207878E-04,5.792155E-04,
5.404272E-04,5.042364E-04,4.704692E-04,4.389633E-04,4.095672E-04,3.821397E-04,3.565490E-04,
3.326719E-04,3.103939E-04,2.896077E-04,2.702136E-04,2.521182E-04,2.352346E-04,2.194816E-04,
2.047836E-04,1.910699E-04,1.782745E-04,1.663360E-04,1.551970E-04,1.448039E-04,1.351068E-04,
1.260591E-04,1.176173E-04,1.097408E-04,1.023918E-04,9.553493E-05,8.913724E-05,8.316799E-05,
7.759848E-05,7.240194E-05,6.755340E-05,6.302955E-05,5.880865E-05,5.487041E-05,5.119590E-05,
4.776746E-05,4.456862E-05,4.158399E-05,3.879924E-05,3.620097E-05,3.377670E-05,3.151477E-05,
2.940432E-05,2.743520E-05,2.559795E-05,2.388373E-05,2.228431E-05,2.079200E-05,1.939962E-05,
1.810048E-05,1.688835E-05,1.575739E-05,1.470216E-05,1.371760E-05,1.279898E-05,1.194187E-05,
1.114216E-05,1.039600E-05,9.699809E-06,9.050242E-06,8.444175E-06,7.878693E-06,7.351081E-06,
6.858801E-06,6.399488E-06,5.970933E-06,5.571078E-06,5.197999E-06,4.849905E-06,4.525121E-06,
4.222087E-06,3.939347E-06,3.675540E-06,3.429400E-06,3.199744E-06,2.985467E-06,2.785539E-06,
2.599000E-06,2.424952E-06,2.262561E-06,2.111044E-06,1.969673E-06,1.837770E-06,1.714700E-06,
1.599872E-06,1.492733E-06,1.392769E-06,1.299500E-06,1.212476E-06,1.131280E-06,1.055522E-06,
9.848367E-07,9.188851E-07,8.573501E-07,7.999360E-07,7.463666E-07,6.963847E-07,6.497499E-07,
6.062381E-07,5.656401E-07,5.277609E-07,4.924183E-07,4.594426E-07,4.286751E-07,3.999680E-07,
3.731833E-07,3.481923E-07,3.248749E-07,3.031190E-07,2.828201E-07,2.638805E-07,2.462092E-07,
2.297213E-07,2.143375E-07,1.999840E-07,1.865917E-07,1.740962E-07,1.624375E-07,1.515595E-07,
1.414100E-07,1.319402E-07,1.231046E-07,1.148606E-07,1.071688E-07,9.999199E-08,9.329583E-08,
8.704809E-08,8.121874E-08,7.577976E-08,7.070502E-08,6.597011E-08,6.155229E-08,5.743032E-08,
5.358438E-08,4.999600E-08,4.664791E-08,4.352404E-08,4.060937E-08,3.788988E-08,3.535251E-08,
3.298506E-08,3.077615E-08,2.871516E-08,2.679219E-08,2.499800E-08,2.332396E-08,2.176202E-08,
2.030468E-08,1.894494E-08,1.767625E-08,1.649253E-08,1.538807E-08,1.435758E-08,1.339610E-08,
1.249900E-08,1.166198E-08,1.088101E-08,1.015234E-08,9.472470E-09,8.838127E-09,8.246264E-09,
7.694037E-09,7.178790E-09,6.698048E-09,6.249500E-09,5.830989E-09,5.440505E-09,5.076171E-09,
4.736235E-09,4.419064E-09,4.123132E-09,3.847018E-09,3.589395E-09,3.349024E-09,3.124750E-09,
2.915495E-09,2.720253E-09,2.538086E-09,2.368118E-09,2.209532E-09,2.061566E-09,1.923509E-09,
1.794697E-09,1.674512E-09],
[3.544172E+00,3.306830E+00,3.085381E+00,2.878762E+00,2.685980E+00,2.506108E+00,2.338282E+00,
5.725866E+00,5.342422E+00,4.984656E+00,4.650848E+00,4.339395E+00,4.048799E+00,3.777663E+00,
7.068856E+00,6.595476E+00,6.153797E+00,5.741695E+00,5.357191E+00,4.998436E+00,4.663706E+00,
7.895563E+00,7.366821E+00,6.873487E+00,6.413190E+00,5.983718E+00,5.583006E+00,5.209129E+00,
8.404462E+00,7.841640E+00,7.316509E+00,6.826544E+00,6.369391E+00,5.942852E+00,5.544877E+00,
8.717725E+00,8.133925E+00,7.589220E+00,7.080993E+00,6.606800E+00,6.164363E+00,5.751554E+00,
8.910561E+00,8.313848E+00,7.757094E+00,7.237625E+00,6.752943E+00,6.300718E+00,5.878778E+00,
5.485094E+00,5.117774E+00,4.775052E+00,4.455281E+00,4.156924E+00,3.878547E+00,3.618812E+00,
3.376471E+00,3.150359E+00,2.939389E+00,2.742547E+00,2.558887E+00,2.387526E+00,2.227640E+00,
2.078462E+00,1.939274E+00,1.809406E+00,1.688236E+00,1.575180E+00,1.469695E+00,1.371273E+00,
1.279443E+00,1.193763E+00,1.113820E+00,1.039231E+00,9.696368E-01,9.047031E-01,8.441178E-01,
7.875898E-01,7.348473E-01,6.856367E-01,6.397217E-01,5.968815E-01,5.569101E-01,5.196155E-01,
4.848184E-01,4.523516E-01,4.220589E-01,3.937949E-01,3.674236E-01,3.428184E-01,3.198608E-01,
2.984407E-01,2.784550E-01,2.598077E-01,2.424092E-01,2.261758E-01,2.110295E-01,1.968974E-01,
1.837118E-01,1.714092E-01,1.599304E-01,1.492204E-01,1.392275E-01,1.299039E-01,1.212046E-01,
1.130879E-01,1.055147E-01,9.844872E-02,9.185591E-02,8.570459E-02,7.996521E-02,7.461018E-02,
6.961376E-02,6.495194E-02,6.060230E-02,5.654394E-02,5.275737E-02,4.922436E-02,4.592795E-02,
4.285230E-02,3.998261E-02,3.730509E-02,3.480688E-02,3.247597E-02,3.030115E-02,2.827197E-02,
2.637868E-02,2.461218E-02,2.296398E-02,2.142615E-02,1.999130E-02,1.865255E-02,1.740344E-02,
1.623798E-02,1.515057E-02,1.413599E-02,1.318934E-02,1.230609E-02,1.148199E-02,1.071307E-02,
9.995652E-03,9.326273E-03,8.701720E-03,8.118992E-03,7.575287E-03,7.067993E-03,6.594671E-03,
6.153045E-03,5.740994E-03,5.356537E-03,4.997826E-03,4.663136E-03,4.350860E-03,4.059496E-03,
3.787644E-03,3.533997E-03,3.297335E-03,3.076523E-03,2.870497E-03,2.678269E-03,2.498913E-03,
2.331568E-03,2.175430E-03,2.029748E-03,1.893822E-03,1.766998E-03,1.648668E-03,1.538261E-03,
1.435249E-03,1.339134E-03,1.249456E-03,1.165784E-03,1.087715E-03,1.014874E-03,9.469109E-04,
8.834991E-04,8.243338E-04,7.691307E-04,7.176243E-04,6.695671E-04,6.247282E-04,5.828920E-04,
5.438575E-04,5.074370E-04,4.734555E-04,4.417496E-04,4.121669E-04,3.845653E-04,3.588121E-04,
3.347836E-04,3.123641E-04,2.914460E-04,2.719288E-04,2.537185E-04,2.367277E-04,2.208748E-04,
2.060835E-04,1.922827E-04,1.794061E-04,1.673918E-04,1.561821E-04,1.457230E-04,1.359644E-04,
1.268592E-04,1.183639E-04,1.104374E-04,1.030417E-04,9.614133E-05,8.970304E-05,8.369589E-05,
7.809103E-05,7.286151E-05,6.798219E-05,6.342962E-05,5.918193E-05,5.521870E-05,5.152086E-05,
4.807067E-05,4.485152E-05,4.184795E-05,3.904551E-05,3.643075E-05,3.399109E-05,3.171481E-05,
2.959097E-05,2.760935E-05,2.576043E-05,2.403533E-05,2.242576E-05,2.092397E-05,1.952276E-05,
1.821538E-05,1.699555E-05,1.585741E-05,1.479548E-05,1.380467E-05,1.288022E-05,1.201767E-05,
1.121288E-05,1.046199E-05,9.761378E-06,9.107688E-06,8.497774E-06,7.928703E-06,7.397742E-06,
6.902337E-06,6.440108E-06,6.008833E-06,5.606440E-06,5.230993E-06,4.880689E-06,4.553844E-06,
4.248887E-06,3.964352E-06,3.698871E-06,3.451168E-06,3.220054E-06,3.004417E-06,2.803220E-06,
2.615497E-06,2.440345E-06,2.276922E-06,2.124443E-06,1.982176E-06,1.849435E-06,1.725584E-06,
1.610027E-06,1.502208E-06,1.401610E-06,1.307748E-06,1.220172E-06,1.138461E-06,1.062222E-06,
9.910879E-07,9.247177E-07,8.627921E-07,8.050135E-07,7.511042E-07,7.008050E-07,6.538742E-07,
6.100862E-07,5.692305E-07,5.311108E-07,4.955439E-07,4.623588E-07,4.313961E-07,4.025068E-07,
3.755521E-07,3.504025E-07,3.269371E-07,3.050431E-07,2.846153E-07,2.655554E-07,2.477720E-07,
2.311794E-07,2.156980E-07,2.012534E-07,1.877760E-07,1.752012E-07,1.634685E-07,1.525215E-07,
1.423076E-07,1.327777E-07,1.238860E-07,1.155897E-07,1.078490E-07,1.006267E-07,9.388802E-08,
8.760062E-08,8.173427E-08,7.626077E-08,7.115381E-08,6.638886E-08,6.194299E-08,5.779486E-08,
5.392451E-08,5.031334E-08,4.694401E-08,4.380031E-08,4.086713E-08,3.813038E-08,3.557691E-08,
3.319443E-08,3.097150E-08,2.889743E-08,2.696225E-08,2.515667E-08,2.347201E-08,2.190016E-08,
2.043357E-08,1.906519E-08,1.778845E-08,1.659721E-08,1.548575E-08,1.444871E-08,1.348113E-08,
1.257834E-08,1.173600E-08,1.095008E-08,1.021678E-08,9.532596E-09,8.894227E-09,8.298607E-09,
7.742874E-09,7.224357E-09,6.740563E-09,6.289168E-09,5.868001E-09,5.475039E-09,5.108392E-09,
4.766298E-09,4.447113E-09,4.149303E-09,3.871437E-09,3.612178E-09,3.370282E-09,3.144584E-09,
2.934001E-09,2.737519E-09,2.554196E-09,2.383149E-09,2.223557E-09,2.074652E-09,1.935719E-09,
1.806089E-09,1.685141E-09],
[3.555456E+00,3.317358E+00,3.095204E+00,2.887928E+00,2.694532E+00,2.514087E+00,2.345726E+00,
5.744096E+00,5.359431E+00,5.000526E+00,4.665656E+00,4.353211E+00,4.061689E+00,3.789690E+00,
7.091362E+00,6.616475E+00,6.173389E+00,5.759976E+00,5.374248E+00,5.014350E+00,4.678554E+00,
7.920702E+00,7.390276E+00,6.895371E+00,6.433609E+00,6.002769E+00,5.600782E+00,5.225714E+00,
8.431220E+00,7.866606E+00,7.339803E+00,6.848279E+00,6.389670E+00,5.961773E+00,5.562531E+00,
8.745481E+00,8.159822E+00,7.613383E+00,7.103538E+00,6.627835E+00,6.183989E+00,5.769866E+00,
8.938931E+00,8.340318E+00,7.781792E+00,7.260668E+00,6.774443E+00,6.320779E+00,5.897495E+00,
5.502558E+00,5.134068E+00,4.790255E+00,4.469466E+00,4.170159E+00,3.890896E+00,3.630334E+00,
3.387221E+00,3.160389E+00,2.948748E+00,2.751279E+00,2.567034E+00,2.395127E+00,2.234733E+00,
2.085079E+00,1.945448E+00,1.815167E+00,1.693611E+00,1.580195E+00,1.474374E+00,1.375639E+00,
1.283517E+00,1.197564E+00,1.117366E+00,1.042540E+00,9.727239E-01,9.075835E-01,8.468054E-01,
7.900974E-01,7.371869E-01,6.878197E-01,6.417585E-01,5.987818E-01,5.586832E-01,5.212699E-01,
4.863620E-01,4.537918E-01,4.234027E-01,3.950487E-01,3.685934E-01,3.439098E-01,3.208792E-01,
2.993909E-01,2.793416E-01,2.606349E-01,2.431810E-01,2.268959E-01,2.117013E-01,1.975243E-01,
1.842967E-01,1.719549E-01,1.604396E-01,1.496955E-01,1.396708E-01,1.303175E-01,1.215905E-01,
1.134479E-01,1.058507E-01,9.876217E-02,9.214836E-02,8.597746E-02,8.021981E-02,7.484773E-02,
6.983540E-02,6.515873E-02,6.079525E-02,5.672397E-02,5.292534E-02,4.938108E-02,4.607418E-02,
4.298873E-02,4.010990E-02,3.742386E-02,3.491770E-02,3.257937E-02,3.039762E-02,2.836199E-02,
2.646267E-02,2.469054E-02,2.303709E-02,2.149437E-02,2.005495E-02,1.871193E-02,1.745885E-02,
1.628968E-02,1.519881E-02,1.418099E-02,1.323133E-02,1.234527E-02,1.151855E-02,1.074718E-02,
1.002748E-02,9.355966E-03,8.729425E-03,8.144841E-03,7.599406E-03,7.090496E-03,6.615667E-03,
6.172636E-03,5.759273E-03,5.373591E-03,5.013738E-03,4.677983E-03,4.364712E-03,4.072421E-03,
3.799703E-03,3.545248E-03,3.307833E-03,3.086318E-03,2.879636E-03,2.686796E-03,2.506869E-03,
2.338991E-03,2.182356E-03,2.036210E-03,1.899851E-03,1.772624E-03,1.653917E-03,1.543159E-03,
1.439818E-03,1.343398E-03,1.253435E-03,1.169496E-03,1.091178E-03,1.018105E-03,9.499257E-04,
8.863120E-04,8.269584E-04,7.715794E-04,7.199091E-04,6.716989E-04,6.267173E-04,5.847479E-04,
5.455891E-04,5.090526E-04,4.749629E-04,4.431560E-04,4.134792E-04,3.857897E-04,3.599545E-04,
3.358495E-04,3.133586E-04,2.923739E-04,2.727945E-04,2.545263E-04,2.374814E-04,2.215780E-04,
2.067396E-04,1.928949E-04,1.799773E-04,1.679247E-04,1.566793E-04,1.461870E-04,1.363973E-04,
1.272631E-04,1.187407E-04,1.107890E-04,1.033698E-04,9.644743E-05,8.998863E-05,8.396236E-05,
7.833966E-05,7.309348E-05,6.819863E-05,6.363157E-05,5.937036E-05,5.539450E-05,5.168490E-05,
4.822372E-05,4.499432E-05,4.198118E-05,3.916983E-05,3.654674E-05,3.409932E-05,3.181579E-05,
2.968518E-05,2.769725E-05,2.584245E-05,2.411186E-05,2.249716E-05,2.099059E-05,1.958491E-05,
1.827337E-05,1.704966E-05,1.590789E-05,1.484259E-05,1.384863E-05,1.292122E-05,1.205593E-05,
1.124858E-05,1.049530E-05,9.792457E-06,9.136686E-06,8.524829E-06,7.953947E-06,7.421295E-06,
6.924313E-06,6.460612E-06,6.027964E-06,5.624290E-06,5.247648E-06,4.896229E-06,4.568343E-06,
4.262415E-06,3.976973E-06,3.710647E-06,3.462156E-06,3.230306E-06,3.013982E-06,2.812145E-06,
2.623824E-06,2.448114E-06,2.284171E-06,2.131207E-06,1.988487E-06,1.855324E-06,1.731078E-06,
1.615153E-06,1.506991E-06,1.406072E-06,1.311912E-06,1.224057E-06,1.142086E-06,1.065604E-06,
9.942433E-07,9.276618E-07,8.655391E-07,8.075765E-07,7.534956E-07,7.030362E-07,6.559560E-07,
6.120286E-07,5.710428E-07,5.328018E-07,4.971217E-07,4.638309E-07,4.327695E-07,4.037883E-07,
3.767478E-07,3.515181E-07,3.279780E-07,3.060143E-07,2.855214E-07,2.664009E-07,2.485608E-07,
2.319155E-07,2.163848E-07,2.018941E-07,1.883739E-07,1.757591E-07,1.639890E-07,1.530071E-07,
1.427607E-07,1.332005E-07,1.242804E-07,1.159577E-07,1.081924E-07,1.009471E-07,9.418694E-08,
8.787953E-08,8.199450E-08,7.650357E-08,7.138036E-08,6.660023E-08,6.214021E-08,5.797886E-08,
5.409619E-08,5.047353E-08,4.709347E-08,4.393976E-08,4.099725E-08,3.825179E-08,3.569018E-08,
3.330011E-08,3.107010E-08,2.898943E-08,2.704810E-08,2.523677E-08,2.354674E-08,2.196988E-08,
2.049862E-08,1.912589E-08,1.784509E-08,1.665006E-08,1.553505E-08,1.449472E-08,1.352405E-08,
1.261838E-08,1.177337E-08,1.098494E-08,1.024931E-08,9.562946E-09,8.922544E-09,8.325028E-09,
7.767526E-09,7.247358E-09,6.762024E-09,6.309192E-09,5.886684E-09,5.492470E-09,5.124656E-09,
4.781473E-09,4.461272E-09,4.162514E-09,3.883763E-09,3.623679E-09,3.381012E-09,3.154596E-09,
2.943342E-09,2.746235E-09,2.562328E-09,2.390737E-09,2.230636E-09,2.081257E-09,1.941882E-09,
1.811840E-09,1.690506E-09]]
try:
# internal model constants
ted_empty.num_simulation_days = 366
ted_empty.soil_foc = 0.015
# input variables that change per simulation
ted_empty.koc = pd.Series([1000., 1500., 2000.], dtype='float')
# internally calculated variables
pore_h2o_conc = pd.Series([[2.347878E-01,2.190648E-01,2.043947E-01,1.907070E-01,1.779359E-01,1.660201E-01,
1.549022E-01,3.793167E-01,3.539150E-01,3.302144E-01,3.081009E-01,2.874683E-01,
2.682174E-01,2.502557E-01,4.682847E-01,4.369250E-01,4.076655E-01,3.803653E-01,
3.548934E-01,3.311273E-01,3.089527E-01,5.230509E-01,4.880237E-01,4.553422E-01,
4.248493E-01,3.963984E-01,3.698528E-01,3.450849E-01,5.567634E-01,5.194786E-01,
4.846907E-01,4.522324E-01,4.219478E-01,3.936912E-01,3.673269E-01,5.775159E-01,
5.388414E-01,5.027568E-01,4.690887E-01,4.376752E-01,4.083654E-01,3.810184E-01,
5.902906E-01,5.507606E-01,5.138778E-01,4.794649E-01,4.473566E-01,4.173985E-01,
3.894465E-01,3.633665E-01,3.390329E-01,3.163289E-01,2.951453E-01,2.753803E-01,
2.569389E-01,2.397325E-01,2.236783E-01,2.086992E-01,1.947233E-01,1.816832E-01,
1.695165E-01,1.581644E-01,1.475726E-01,1.376901E-01,1.284694E-01,1.198662E-01,
1.118392E-01,1.043496E-01,9.736164E-02,9.084162E-02,8.475823E-02,7.908222E-02,
7.378632E-02,6.884507E-02,6.423472E-02,5.993312E-02,5.591958E-02,5.217481E-02,
4.868082E-02,4.542081E-02,4.237911E-02,3.954111E-02,3.689316E-02,3.442254E-02,
3.211736E-02,2.996656E-02,2.795979E-02,2.608740E-02,2.434041E-02,2.271040E-02,
2.118956E-02,1.977056E-02,1.844658E-02,1.721127E-02,1.605868E-02,1.498328E-02,
1.397989E-02,1.304370E-02,1.217020E-02,1.135520E-02,1.059478E-02,9.885278E-03,
9.223290E-03,8.605634E-03,8.029341E-03,7.491640E-03,6.989947E-03,6.521851E-03,
6.085102E-03,5.677601E-03,5.297389E-03,4.942639E-03,4.611645E-03,4.302817E-03,
4.014670E-03,3.745820E-03,3.494973E-03,3.260926E-03,3.042551E-03,2.838801E-03,
2.648695E-03,2.471319E-03,2.305823E-03,2.151409E-03,2.007335E-03,1.872910E-03,
1.747487E-03,1.630463E-03,1.521276E-03,1.419400E-03,1.324347E-03,1.235660E-03,
1.152911E-03,1.075704E-03,1.003668E-03,9.364550E-04,8.737434E-04,8.152314E-04,
7.606378E-04,7.097001E-04,6.621737E-04,6.178299E-04,5.764556E-04,5.378521E-04,
5.018338E-04,4.682275E-04,4.368717E-04,4.076157E-04,3.803189E-04,3.548501E-04,
3.310868E-04,3.089149E-04,2.882278E-04,2.689261E-04,2.509169E-04,2.341137E-04,
2.184358E-04,2.038078E-04,1.901594E-04,1.774250E-04,1.655434E-04,1.544575E-04,
1.441139E-04,1.344630E-04,1.254584E-04,1.170569E-04,1.092179E-04,1.019039E-04,
9.507972E-05,8.871252E-05,8.277171E-05,7.722873E-05,7.205696E-05,6.723152E-05,
6.272922E-05,5.852844E-05,5.460896E-05,5.095196E-05,4.753986E-05,4.435626E-05,
4.138585E-05,3.861437E-05,3.602848E-05,3.361576E-05,3.136461E-05,2.926422E-05,
2.730448E-05,2.547598E-05,2.376993E-05,2.217813E-05,2.069293E-05,1.930718E-05,
1.801424E-05,1.680788E-05,1.568231E-05,1.463211E-05,1.365224E-05,1.273799E-05,
1.188497E-05,1.108906E-05,1.034646E-05,9.653592E-06,9.007119E-06,8.403940E-06,
7.841153E-06,7.316054E-06,6.826120E-06,6.368995E-06,5.942483E-06,5.544532E-06,
5.173232E-06,4.826796E-06,4.503560E-06,4.201970E-06,3.920576E-06,3.658027E-06,
3.413060E-06,3.184498E-06,2.971241E-06,2.772266E-06,2.586616E-06,2.413398E-06,
2.251780E-06,2.100985E-06,1.960288E-06,1.829014E-06,1.706530E-06,1.592249E-06,
1.485621E-06,1.386133E-06,1.293308E-06,1.206699E-06,1.125890E-06,1.050492E-06,
9.801441E-07,9.145068E-07,8.532650E-07,7.961244E-07,7.428103E-07,6.930666E-07,
6.466540E-07,6.033495E-07,5.629450E-07,5.252462E-07,4.900721E-07,4.572534E-07,
4.266325E-07,3.980622E-07,3.714052E-07,3.465333E-07,3.233270E-07,3.016747E-07,
2.814725E-07,2.626231E-07,2.450360E-07,2.286267E-07,2.133163E-07,1.990311E-07,
1.857026E-07,1.732666E-07,1.616635E-07,1.508374E-07,1.407362E-07,1.313116E-07,
1.225180E-07,1.143133E-07,1.066581E-07,9.951555E-08,9.285129E-08,8.663332E-08,
8.083174E-08,7.541868E-08,7.036812E-08,6.565578E-08,6.125901E-08,5.715667E-08,
5.332906E-08,4.975778E-08,4.642565E-08,4.331666E-08,4.041587E-08,3.770934E-08,
3.518406E-08,3.282789E-08,3.062950E-08,2.857834E-08,2.666453E-08,2.487889E-08,
2.321282E-08,2.165833E-08,2.020794E-08,1.885467E-08,1.759203E-08,1.641394E-08,
1.531475E-08,1.428917E-08,1.333227E-08,1.243944E-08,1.160641E-08,1.082916E-08,
1.010397E-08,9.427336E-09,8.796015E-09,8.206972E-09,7.657376E-09,7.144584E-09,
6.666133E-09,6.219722E-09,5.803206E-09,5.414582E-09,5.051984E-09,4.713668E-09,
4.398008E-09,4.103486E-09,3.828688E-09,3.572292E-09,3.333066E-09,3.109861E-09,
2.901603E-09,2.707291E-09,2.525992E-09,2.356834E-09,2.199004E-09,2.051743E-09,
1.914344E-09,1.786146E-09,1.666533E-09,1.554930E-09,1.450801E-09,1.353646E-09,
1.262996E-09,1.178417E-09,1.099502E-09,1.025872E-09,9.571720E-10,8.930730E-10,
8.332666E-10,7.774652E-10,7.254007E-10,6.768228E-10,6.314980E-10,5.892085E-10,
5.497509E-10,5.129358E-10,4.785860E-10,4.465365E-10,4.166333E-10,3.887326E-10,
3.627004E-10,3.384114E-10,3.157490E-10,2.946042E-10,2.748755E-10,2.564679E-10,
2.392930E-10,2.232683E-10,2.083167E-10,1.943663E-10,1.813502E-10,1.692057E-10,
1.578745E-10,1.473021E-10,1.374377E-10,1.282339E-10,1.196465E-10,1.116341E-10],
[1.575188E-01,1.469702E-01,1.371280E-01,1.279450E-01,1.193769E-01,1.113826E-01,
1.039236E-01,2.544829E-01,2.374410E-01,2.215403E-01,2.067044E-01,1.928620E-01,
1.799466E-01,1.678961E-01,3.141714E-01,2.931323E-01,2.735021E-01,2.551865E-01,
2.380974E-01,2.221527E-01,2.072758E-01,3.509139E-01,3.274143E-01,3.054883E-01,
2.850307E-01,2.659430E-01,2.481336E-01,2.315169E-01,3.735316E-01,3.485173E-01,
3.251782E-01,3.034020E-01,2.830840E-01,2.641267E-01,2.464390E-01,3.874544E-01,
3.615078E-01,3.372987E-01,3.147108E-01,2.936356E-01,2.739717E-01,2.556246E-01,
3.960250E-01,3.695043E-01,3.447597E-01,3.216722E-01,3.001308E-01,2.800319E-01,
2.612790E-01,2.437820E-01,2.274566E-01,2.122245E-01,1.980125E-01,1.847522E-01,
1.723799E-01,1.608361E-01,1.500654E-01,1.400160E-01,1.306395E-01,1.218910E-01,
1.137283E-01,1.061123E-01,9.900624E-02,9.237609E-02,8.618994E-02,8.041805E-02,
7.503270E-02,7.000798E-02,6.531976E-02,6.094549E-02,5.686415E-02,5.305613E-02,
4.950312E-02,4.618804E-02,4.309497E-02,4.020903E-02,3.751635E-02,3.500399E-02,
3.265988E-02,3.047274E-02,2.843208E-02,2.652806E-02,2.475156E-02,2.309402E-02,
2.154748E-02,2.010451E-02,1.875817E-02,1.750200E-02,1.632994E-02,1.523637E-02,
1.421604E-02,1.326403E-02,1.237578E-02,1.154701E-02,1.077374E-02,1.005226E-02,
9.379087E-03,8.750998E-03,8.164970E-03,7.618186E-03,7.108019E-03,6.632016E-03,
6.187890E-03,5.773505E-03,5.386871E-03,5.026128E-03,4.689544E-03,4.375499E-03,
4.082485E-03,3.809093E-03,3.554009E-03,3.316008E-03,3.093945E-03,2.886753E-03,
2.693435E-03,2.513064E-03,2.344772E-03,2.187749E-03,2.041242E-03,1.904547E-03,
1.777005E-03,1.658004E-03,1.546972E-03,1.443376E-03,1.346718E-03,1.256532E-03,
1.172386E-03,1.093875E-03,1.020621E-03,9.522733E-04,8.885024E-04,8.290020E-04,
7.734862E-04,7.216882E-04,6.733589E-04,6.282660E-04,5.861929E-04,5.469374E-04,
5.103106E-04,4.761366E-04,4.442512E-04,4.145010E-04,3.867431E-04,3.608441E-04,
3.366794E-04,3.141330E-04,2.930965E-04,2.734687E-04,2.551553E-04,2.380683E-04,
2.221256E-04,2.072505E-04,1.933716E-04,1.804220E-04,1.683397E-04,1.570665E-04,
1.465482E-04,1.367343E-04,1.275777E-04,1.190342E-04,1.110628E-04,1.036253E-04,
9.668578E-05,9.021102E-05,8.416986E-05,7.853326E-05,7.327412E-05,6.836717E-05,
6.378883E-05,5.951708E-05,5.553140E-05,5.181263E-05,4.834289E-05,4.510551E-05,
4.208493E-05,3.926663E-05,3.663706E-05,3.418358E-05,3.189441E-05,2.975854E-05,
2.776570E-05,2.590631E-05,2.417144E-05,2.255276E-05,2.104246E-05,1.963331E-05,
1.831853E-05,1.709179E-05,1.594721E-05,1.487927E-05,1.388285E-05,1.295316E-05,
1.208572E-05,1.127638E-05,1.052123E-05,9.816657E-06,9.159265E-06,8.545896E-06,
7.973603E-06,7.439635E-06,6.941425E-06,6.476578E-06,6.042861E-06,5.638189E-06,
5.260616E-06,4.908328E-06,4.579632E-06,4.272948E-06,3.986802E-06,3.719817E-06,
3.470712E-06,3.238289E-06,3.021431E-06,2.819094E-06,2.630308E-06,2.454164E-06,
2.289816E-06,2.136474E-06,1.993401E-06,1.859909E-06,1.735356E-06,1.619145E-06,
1.510715E-06,1.409547E-06,1.315154E-06,1.227082E-06,1.144908E-06,1.068237E-06,
9.967004E-07,9.299543E-07,8.676781E-07,8.095723E-07,7.553576E-07,7.047736E-07,
6.575770E-07,6.135411E-07,5.724540E-07,5.341185E-07,4.983502E-07,4.649772E-07,
4.338390E-07,4.047861E-07,3.776788E-07,3.523868E-07,3.287885E-07,3.067705E-07,
2.862270E-07,2.670593E-07,2.491751E-07,2.324886E-07,2.169195E-07,2.023931E-07,
1.888394E-07,1.761934E-07,1.643943E-07,1.533853E-07,1.431135E-07,1.335296E-07,
1.245875E-07,1.162443E-07,1.084598E-07,1.011965E-07,9.441971E-08,8.809670E-08,
8.219713E-08,7.669263E-08,7.155676E-08,6.676481E-08,6.229377E-08,5.812215E-08,
5.422988E-08,5.059827E-08,4.720985E-08,4.404835E-08,4.109856E-08,3.834632E-08,
3.577838E-08,3.338241E-08,3.114689E-08,2.906107E-08,2.711494E-08,2.529913E-08,
2.360493E-08,2.202418E-08,2.054928E-08,1.917316E-08,1.788919E-08,1.669120E-08,
1.557344E-08,1.453054E-08,1.355747E-08,1.264957E-08,1.180246E-08,1.101209E-08,
1.027464E-08,9.586579E-09,8.944595E-09,8.345602E-09,7.786722E-09,7.265268E-09,
6.778735E-09,6.324783E-09,5.901232E-09,5.506044E-09,5.137321E-09,4.793290E-09,
4.472297E-09,4.172801E-09,3.893361E-09,3.632634E-09,3.389368E-09,3.162392E-09,
2.950616E-09,2.753022E-09,2.568660E-09,2.396645E-09,2.236149E-09,2.086400E-09,
1.946680E-09,1.816317E-09,1.694684E-09,1.581196E-09,1.475308E-09,1.376511E-09,
1.284330E-09,1.198322E-09,1.118074E-09,1.043200E-09,9.733402E-10,9.081585E-10,
8.473419E-10,7.905979E-10,7.376540E-10,6.882555E-10,6.421651E-10,5.991612E-10,
5.590372E-10,5.216001E-10,4.866701E-10,4.540793E-10,4.236709E-10,3.952990E-10,
3.688270E-10,3.441277E-10,3.210825E-10,2.995806E-10,2.795186E-10,2.608001E-10,
2.433351E-10,2.270396E-10,2.118355E-10,1.976495E-10,1.844135E-10,1.720639E-10,
1.605413E-10,1.497903E-10,1.397593E-10,1.304000E-10,1.216675E-10,1.135198E-10,
1.059177E-10,9.882474E-11,9.220674E-11,8.603193E-11,8.027063E-11,7.489515E-11],
[1.185152E-01,1.105786E-01,1.031735E-01,9.626426E-02,8.981773E-02,8.380291E-02,
7.819088E-02,1.914699E-01,1.786477E-01,1.666842E-01,1.555219E-01,1.451070E-01,
1.353896E-01,1.263230E-01,2.363787E-01,2.205492E-01,2.057796E-01,1.919992E-01,
1.791416E-01,1.671450E-01,1.559518E-01,2.640234E-01,2.463425E-01,2.298457E-01,
2.144536E-01,2.000923E-01,1.866927E-01,1.741905E-01,2.810407E-01,2.622202E-01,
2.446601E-01,2.282760E-01,2.129890E-01,1.987258E-01,1.854177E-01,2.915160E-01,
2.719941E-01,2.537794E-01,2.367846E-01,2.209278E-01,2.061330E-01,1.923289E-01,
2.979644E-01,2.780106E-01,2.593931E-01,2.420223E-01,2.258148E-01,2.106926E-01,
1.965832E-01,1.834186E-01,1.711356E-01,1.596752E-01,1.489822E-01,1.390053E-01,
1.296965E-01,1.210111E-01,1.129074E-01,1.053463E-01,9.829159E-02,9.170929E-02,
8.556780E-02,7.983758E-02,7.449109E-02,6.950265E-02,6.484826E-02,6.050557E-02,
5.645369E-02,5.267316E-02,4.914579E-02,4.585465E-02,4.278390E-02,3.991879E-02,
3.724555E-02,3.475132E-02,3.242413E-02,3.025278E-02,2.822685E-02,2.633658E-02,
2.457290E-02,2.292732E-02,2.139195E-02,1.995939E-02,1.862277E-02,1.737566E-02,
1.621207E-02,1.512639E-02,1.411342E-02,1.316829E-02,1.228645E-02,1.146366E-02,
1.069597E-02,9.979697E-03,9.311387E-03,8.687831E-03,8.106033E-03,7.563196E-03,
7.056711E-03,6.584145E-03,6.143224E-03,5.731831E-03,5.347987E-03,4.989849E-03,
4.655693E-03,4.343915E-03,4.053016E-03,3.781598E-03,3.528356E-03,3.292072E-03,
3.071612E-03,2.865915E-03,2.673994E-03,2.494924E-03,2.327847E-03,2.171958E-03,
2.026508E-03,1.890799E-03,1.764178E-03,1.646036E-03,1.535806E-03,1.432958E-03,
1.336997E-03,1.247462E-03,1.163923E-03,1.085979E-03,1.013254E-03,9.453995E-04,
8.820889E-04,8.230181E-04,7.679030E-04,7.164788E-04,6.684984E-04,6.237311E-04,
5.819617E-04,5.429894E-04,5.066271E-04,4.726998E-04,4.410445E-04,4.115090E-04,
3.839515E-04,3.582394E-04,3.342492E-04,3.118655E-04,2.909808E-04,2.714947E-04,
2.533135E-04,2.363499E-04,2.205222E-04,2.057545E-04,1.919758E-04,1.791197E-04,
1.671246E-04,1.559328E-04,1.454904E-04,1.357474E-04,1.266568E-04,1.181749E-04,
1.102611E-04,1.028773E-04,9.598788E-05,8.955986E-05,8.356230E-05,7.796638E-05,
7.274521E-05,6.787368E-05,6.332838E-05,5.908747E-05,5.513056E-05,5.143863E-05,
4.799394E-05,4.477993E-05,4.178115E-05,3.898319E-05,3.637260E-05,3.393684E-05,
3.166419E-05,2.954373E-05,2.756528E-05,2.571931E-05,2.399697E-05,2.238996E-05,
2.089058E-05,1.949160E-05,1.818630E-05,1.696842E-05,1.583210E-05,1.477187E-05,
1.378264E-05,1.285966E-05,1.199848E-05,1.119498E-05,1.044529E-05,9.745798E-06,
9.093151E-06,8.484210E-06,7.916048E-06,7.385934E-06,6.891320E-06,6.429829E-06,
5.999242E-06,5.597491E-06,5.222644E-06,4.872899E-06,4.546575E-06,4.242105E-06,
3.958024E-06,3.692967E-06,3.445660E-06,3.214914E-06,2.999621E-06,2.798745E-06,
2.611322E-06,2.436449E-06,2.273288E-06,2.121052E-06,1.979012E-06,1.846483E-06,
1.722830E-06,1.607457E-06,1.499811E-06,1.399373E-06,1.305661E-06,1.218225E-06,
1.136644E-06,1.060526E-06,9.895060E-07,9.232417E-07,8.614150E-07,8.037286E-07,
7.499053E-07,6.996864E-07,6.528305E-07,6.091124E-07,5.683219E-07,5.302631E-07,
4.947530E-07,4.616209E-07,4.307075E-07,4.018643E-07,3.749526E-07,3.498432E-07,
3.264152E-07,3.045562E-07,2.841610E-07,2.651316E-07,2.473765E-07,2.308104E-07,
2.153537E-07,2.009321E-07,1.874763E-07,1.749216E-07,1.632076E-07,1.522781E-07,
1.420805E-07,1.325658E-07,1.236882E-07,1.154052E-07,1.076769E-07,1.004661E-07,
9.373816E-08,8.746080E-08,8.160381E-08,7.613905E-08,7.104024E-08,6.628289E-08,
6.184412E-08,5.770261E-08,5.383844E-08,5.023304E-08,4.686908E-08,4.373040E-08,
4.080190E-08,3.806952E-08,3.552012E-08,3.314144E-08,3.092206E-08,2.885130E-08,
2.691922E-08,2.511652E-08,2.343454E-08,2.186520E-08,2.040095E-08,1.903476E-08,
1.776006E-08,1.657072E-08,1.546103E-08,1.442565E-08,1.345961E-08,1.255826E-08,
1.171727E-08,1.093260E-08,1.020048E-08,9.517381E-09,8.880030E-09,8.285361E-09,
7.730515E-09,7.212826E-09,6.729804E-09,6.279130E-09,5.858635E-09,5.466300E-09,
5.100238E-09,4.758690E-09,4.440015E-09,4.142681E-09,3.865258E-09,3.606413E-09,
3.364902E-09,3.139565E-09,2.929318E-09,2.733150E-09,2.550119E-09,2.379345E-09,
2.220008E-09,2.071340E-09,1.932629E-09,1.803206E-09,1.682451E-09,1.569782E-09,
1.464659E-09,1.366575E-09,1.275060E-09,1.189673E-09,1.110004E-09,1.035670E-09,
9.663144E-10,9.016032E-10,8.412256E-10,7.848912E-10,7.323294E-10,6.832875E-10,
6.375298E-10,5.948363E-10,5.550019E-10,5.178351E-10,4.831572E-10,4.508016E-10,
4.206128E-10,3.924456E-10,3.661647E-10,3.416437E-10,3.187649E-10,2.974181E-10,
2.775009E-10,2.589175E-10,2.415786E-10,2.254008E-10,2.103064E-10,1.962228E-10,
1.830823E-10,1.708219E-10,1.593824E-10,1.487091E-10,1.387505E-10,1.294588E-10,
1.207893E-10,1.127004E-10,1.051532E-10,9.811140E-11,9.154117E-11,8.541093E-11,
7.969122E-11,7.435454E-11,6.937524E-11,6.472938E-11,6.039465E-11,5.635020E-11]], dtype='float')
for i in range(3):
result[i] = ted_empty.daily_soil_timeseries(i, pore_h2o_conc[i])
npt.assert_allclose(result[i],expected_results[i],rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
for i in range(3):
tab = [result[i], expected_results[i]]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_daily_soil_inv_timeseries(self):
"""
:description generates annual timeseries of daily pesticide concentrations in soil invertebrates (earthworms)
:param i; simulation number/index
:param pore_h2o_conc; daily values of pesticide concentration in soil pore water
:Notes # calculations are performed daily from day of first application (assumed day 0) through the last day of a year
# note: day numbers are synchronized with 0-based array indexing; thus the year does not have a calendar specific
# assoication, rather it is one year from the day of 1st pesticide application
# this represents Eq 2 of Attachment 1-7 of 'Biological Evaluation Chapters for Diazinon ESA Assessment'
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
ted_empty = self.create_ted_object()
expected_results = pd.Series([], dtype='float')
result = pd.Series([[]], dtype='float')
expected_results = [[2.347878E+02,2.190648E+02,2.043947E+02,1.907070E+02,1.779359E+02,1.660201E+02,
1.549022E+02,3.793167E+02,3.539150E+02,3.302144E+02,3.081009E+02,2.874683E+02,
2.682174E+02,2.502557E+02,4.682847E+02,4.369250E+02,4.076655E+02,3.803653E+02,
3.548934E+02,3.311273E+02,3.089527E+02,5.230509E+02,4.880237E+02,4.553422E+02,
4.248493E+02,3.963984E+02,3.698528E+02,3.450849E+02,5.567634E+02,5.194786E+02,
4.846907E+02,4.522324E+02,4.219478E+02,3.936912E+02,3.673269E+02,5.775159E+02,
5.388414E+02,5.027568E+02,4.690887E+02,4.376752E+02,4.083654E+02,3.810184E+02,
5.902906E+02,5.507606E+02,5.138778E+02,4.794649E+02,4.473566E+02,4.173985E+02,
3.894465E+02,3.633665E+02,3.390329E+02,3.163289E+02,2.951453E+02,2.753803E+02,
2.569389E+02,2.397325E+02,2.236783E+02,2.086992E+02,1.947233E+02,1.816832E+02,
1.695165E+02,1.581644E+02,1.475726E+02,1.376901E+02,1.284694E+02,1.198662E+02,
1.118392E+02,1.043496E+02,9.736164E+01,9.084162E+01,8.475823E+01,7.908222E+01,
7.378632E+01,6.884507E+01,6.423472E+01,5.993312E+01,5.591958E+01,5.217481E+01,
4.868082E+01,4.542081E+01,4.237911E+01,3.954111E+01,3.689316E+01,3.442254E+01,
3.211736E+01,2.996656E+01,2.795979E+01,2.608740E+01,2.434041E+01,2.271040E+01,
2.118956E+01,1.977056E+01,1.844658E+01,1.721127E+01,1.605868E+01,1.498328E+01,
1.397989E+01,1.304370E+01,1.217020E+01,1.135520E+01,1.059478E+01,9.885278E+00,
9.223290E+00,8.605634E+00,8.029341E+00,7.491640E+00,6.989947E+00,6.521851E+00,
6.085102E+00,5.677601E+00,5.297389E+00,4.942639E+00,4.611645E+00,4.302817E+00,
4.014670E+00,3.745820E+00,3.494973E+00,3.260926E+00,3.042551E+00,2.838801E+00,
2.648695E+00,2.471319E+00,2.305823E+00,2.151409E+00,2.007335E+00,1.872910E+00,
1.747487E+00,1.630463E+00,1.521276E+00,1.419400E+00,1.324347E+00,1.235660E+00,
1.152911E+00,1.075704E+00,1.003668E+00,9.364550E-01,8.737434E-01,8.152314E-01,
7.606378E-01,7.097001E-01,6.621737E-01,6.178299E-01,5.764556E-01,5.378521E-01,
5.018338E-01,4.682275E-01,4.368717E-01,4.076157E-01,3.803189E-01,3.548501E-01,
3.310868E-01,3.089149E-01,2.882278E-01,2.689261E-01,2.509169E-01,2.341137E-01,
2.184358E-01,2.038078E-01,1.901594E-01,1.774250E-01,1.655434E-01,1.544575E-01,
1.441139E-01,1.344630E-01,1.254584E-01,1.170569E-01,1.092179E-01,1.019039E-01,
9.507972E-02,8.871252E-02,8.277171E-02,7.722873E-02,7.205696E-02,6.723152E-02,
6.272922E-02,5.852844E-02,5.460896E-02,5.095196E-02,4.753986E-02,4.435626E-02,
4.138585E-02,3.861437E-02,3.602848E-02,3.361576E-02,3.136461E-02,2.926422E-02,
2.730448E-02,2.547598E-02,2.376993E-02,2.217813E-02,2.069293E-02,1.930718E-02,
1.801424E-02,1.680788E-02,1.568231E-02,1.463211E-02,1.365224E-02,1.273799E-02,
1.188497E-02,1.108906E-02,1.034646E-02,9.653592E-03,9.007119E-03,8.403940E-03,
7.841153E-03,7.316054E-03,6.826120E-03,6.368995E-03,5.942483E-03,5.544532E-03,
5.173232E-03,4.826796E-03,4.503560E-03,4.201970E-03,3.920576E-03,3.658027E-03,
3.413060E-03,3.184498E-03,2.971241E-03,2.772266E-03,2.586616E-03,2.413398E-03,
2.251780E-03,2.100985E-03,1.960288E-03,1.829014E-03,1.706530E-03,1.592249E-03,
1.485621E-03,1.386133E-03,1.293308E-03,1.206699E-03,1.125890E-03,1.050492E-03,
9.801441E-04,9.145068E-04,8.532650E-04,7.961244E-04,7.428103E-04,6.930666E-04,
6.466540E-04,6.033495E-04,5.629450E-04,5.252462E-04,4.900721E-04,4.572534E-04,
4.266325E-04,3.980622E-04,3.714052E-04,3.465333E-04,3.233270E-04,3.016747E-04,
2.814725E-04,2.626231E-04,2.450360E-04,2.286267E-04,2.133163E-04,1.990311E-04,
1.857026E-04,1.732666E-04,1.616635E-04,1.508374E-04,1.407362E-04,1.313116E-04,
1.225180E-04,1.143133E-04,1.066581E-04,9.951555E-05,9.285129E-05,8.663332E-05,
8.083174E-05,7.541868E-05,7.036812E-05,6.565578E-05,6.125901E-05,5.715667E-05,
5.332906E-05,4.975778E-05,4.642565E-05,4.331666E-05,4.041587E-05,3.770934E-05,
3.518406E-05,3.282789E-05,3.062950E-05,2.857834E-05,2.666453E-05,2.487889E-05,
2.321282E-05,2.165833E-05,2.020794E-05,1.885467E-05,1.759203E-05,1.641394E-05,
1.531475E-05,1.428917E-05,1.333227E-05,1.243944E-05,1.160641E-05,1.082916E-05,
1.010397E-05,9.427336E-06,8.796015E-06,8.206972E-06,7.657376E-06,7.144584E-06,
6.666133E-06,6.219722E-06,5.803206E-06,5.414582E-06,5.051984E-06,4.713668E-06,
4.398008E-06,4.103486E-06,3.828688E-06,3.572292E-06,3.333066E-06,3.109861E-06,
2.901603E-06,2.707291E-06,2.525992E-06,2.356834E-06,2.199004E-06,2.051743E-06,
1.914344E-06,1.786146E-06,1.666533E-06,1.554930E-06,1.450801E-06,1.353646E-06,
1.262996E-06,1.178417E-06,1.099502E-06,1.025872E-06,9.571720E-07,8.930730E-07,
8.332666E-07,7.774652E-07,7.254007E-07,6.768228E-07,6.314980E-07,5.892085E-07,
5.497509E-07,5.129358E-07,4.785860E-07,4.465365E-07,4.166333E-07,3.887326E-07,
3.627004E-07,3.384114E-07,3.157490E-07,2.946042E-07,2.748755E-07,2.564679E-07,
2.392930E-07,2.232683E-07,2.083167E-07,1.943663E-07,1.813502E-07,1.692057E-07,
1.578745E-07,1.473021E-07,1.374377E-07,1.282339E-07,1.196465E-07,1.116341E-07],
[2.347878E+01,2.190648E+01,2.043947E+01,1.907070E+01,1.779359E+01,1.660201E+01,
1.549022E+01,3.793167E+01,3.539150E+01,3.302144E+01,3.081009E+01,2.874683E+01,
2.682174E+01,2.502557E+01,4.682847E+01,4.369250E+01,4.076655E+01,3.803653E+01,
3.548934E+01,3.311273E+01,3.089527E+01,5.230509E+01,4.880237E+01,4.553422E+01,
4.248493E+01,3.963984E+01,3.698528E+01,3.450849E+01,5.567634E+01,5.194786E+01,
4.846907E+01,4.522324E+01,4.219478E+01,3.936912E+01,3.673269E+01,5.775159E+01,
5.388414E+01,5.027568E+01,4.690887E+01,4.376752E+01,4.083654E+01,3.810184E+01,
5.902906E+01,5.507606E+01,5.138778E+01,4.794649E+01,4.473566E+01,4.173985E+01,
3.894465E+01,3.633665E+01,3.390329E+01,3.163289E+01,2.951453E+01,2.753803E+01,
2.569389E+01,2.397325E+01,2.236783E+01,2.086992E+01,1.947233E+01,1.816832E+01,
1.695165E+01,1.581644E+01,1.475726E+01,1.376901E+01,1.284694E+01,1.198662E+01,
1.118392E+01,1.043496E+01,9.736164E+00,9.084162E+00,8.475823E+00,7.908222E+00,
7.378632E+00,6.884507E+00,6.423472E+00,5.993312E+00,5.591958E+00,5.217481E+00,
4.868082E+00,4.542081E+00,4.237911E+00,3.954111E+00,3.689316E+00,3.442254E+00,
3.211736E+00,2.996656E+00,2.795979E+00,2.608740E+00,2.434041E+00,2.271040E+00,
2.118956E+00,1.977056E+00,1.844658E+00,1.721127E+00,1.605868E+00,1.498328E+00,
1.397989E+00,1.304370E+00,1.217020E+00,1.135520E+00,1.059478E+00,9.885278E-01,
9.223290E-01,8.605634E-01,8.029341E-01,7.491640E-01,6.989947E-01,6.521851E-01,
6.085102E-01,5.677601E-01,5.297389E-01,4.942639E-01,4.611645E-01,4.302817E-01,
4.014670E-01,3.745820E-01,3.494973E-01,3.260926E-01,3.042551E-01,2.838801E-01,
2.648695E-01,2.471319E-01,2.305823E-01,2.151409E-01,2.007335E-01,1.872910E-01,
1.747487E-01,1.630463E-01,1.521276E-01,1.419400E-01,1.324347E-01,1.235660E-01,
1.152911E-01,1.075704E-01,1.003668E-01,9.364550E-02,8.737434E-02,8.152314E-02,
7.606378E-02,7.097001E-02,6.621737E-02,6.178299E-02,5.764556E-02,5.378521E-02,
5.018338E-02,4.682275E-02,4.368717E-02,4.076157E-02,3.803189E-02,3.548501E-02,
3.310868E-02,3.089149E-02,2.882278E-02,2.689261E-02,2.509169E-02,2.341137E-02,
2.184358E-02,2.038078E-02,1.901594E-02,1.774250E-02,1.655434E-02,1.544575E-02,
1.441139E-02,1.344630E-02,1.254584E-02,1.170569E-02,1.092179E-02,1.019039E-02,
9.507972E-03,8.871252E-03,8.277171E-03,7.722873E-03,7.205696E-03,6.723152E-03,
6.272922E-03,5.852844E-03,5.460896E-03,5.095196E-03,4.753986E-03,4.435626E-03,
4.138585E-03,3.861437E-03,3.602848E-03,3.361576E-03,3.136461E-03,2.926422E-03,
2.730448E-03,2.547598E-03,2.376993E-03,2.217813E-03,2.069293E-03,1.930718E-03,
1.801424E-03,1.680788E-03,1.568231E-03,1.463211E-03,1.365224E-03,1.273799E-03,
1.188497E-03,1.108906E-03,1.034646E-03,9.653592E-04,9.007119E-04,8.403940E-04,
7.841153E-04,7.316054E-04,6.826120E-04,6.368995E-04,5.942483E-04,5.544532E-04,
5.173232E-04,4.826796E-04,4.503560E-04,4.201970E-04,3.920576E-04,3.658027E-04,
3.413060E-04,3.184498E-04,2.971241E-04,2.772266E-04,2.586616E-04,2.413398E-04,
2.251780E-04,2.100985E-04,1.960288E-04,1.829014E-04,1.706530E-04,1.592249E-04,
1.485621E-04,1.386133E-04,1.293308E-04,1.206699E-04,1.125890E-04,1.050492E-04,
9.801441E-05,9.145068E-05,8.532650E-05,7.961244E-05,7.428103E-05,6.930666E-05,
6.466540E-05,6.033495E-05,5.629450E-05,5.252462E-05,4.900721E-05,4.572534E-05,
4.266325E-05,3.980622E-05,3.714052E-05,3.465333E-05,3.233270E-05,3.016747E-05,
2.814725E-05,2.626231E-05,2.450360E-05,2.286267E-05,2.133163E-05,1.990311E-05,
1.857026E-05,1.732666E-05,1.616635E-05,1.508374E-05,1.407362E-05,1.313116E-05,
1.225180E-05,1.143133E-05,1.066581E-05,9.951555E-06,9.285129E-06,8.663332E-06,
8.083174E-06,7.541868E-06,7.036812E-06,6.565578E-06,6.125901E-06,5.715667E-06,
5.332906E-06,4.975778E-06,4.642565E-06,4.331666E-06,4.041587E-06,3.770934E-06,
3.518406E-06,3.282789E-06,3.062950E-06,2.857834E-06,2.666453E-06,2.487889E-06,
2.321282E-06,2.165833E-06,2.020794E-06,1.885467E-06,1.759203E-06,1.641394E-06,
1.531475E-06,1.428917E-06,1.333227E-06,1.243944E-06,1.160641E-06,1.082916E-06,
1.010397E-06,9.427336E-07,8.796015E-07,8.206972E-07,7.657376E-07,7.144584E-07,
6.666133E-07,6.219722E-07,5.803206E-07,5.414582E-07,5.051984E-07,4.713668E-07,
4.398008E-07,4.103486E-07,3.828688E-07,3.572292E-07,3.333066E-07,3.109861E-07,
2.901603E-07,2.707291E-07,2.525992E-07,2.356834E-07,2.199004E-07,2.051743E-07,
1.914344E-07,1.786146E-07,1.666533E-07,1.554930E-07,1.450801E-07,1.353646E-07,
1.262996E-07,1.178417E-07,1.099502E-07,1.025872E-07,9.571720E-08,8.930730E-08,
8.332666E-08,7.774652E-08,7.254007E-08,6.768228E-08,6.314980E-08,5.892085E-08,
5.497509E-08,5.129358E-08,4.785860E-08,4.465365E-08,4.166333E-08,3.887326E-08,
3.627004E-08,3.384114E-08,3.157490E-08,2.946042E-08,2.748755E-08,2.564679E-08,
2.392930E-08,2.232683E-08,2.083167E-08,1.943663E-08,1.813502E-08,1.692057E-08,
1.578745E-08,1.473021E-08,1.374377E-08,1.282339E-08,1.196465E-08,1.116341E-08],
[6.664600E-01,6.218291E-01,5.801871E-01,5.413337E-01,5.050822E-01,4.712584E-01,
4.396996E-01,1.076714E+00,1.004610E+00,9.373342E-01,8.745637E-01,8.159968E-01,
7.613519E-01,7.103665E-01,1.329255E+00,1.240239E+00,1.157184E+00,1.079691E+00,
1.007387E+00,9.399255E-01,8.769815E-01,1.484713E+00,1.385286E+00,1.292517E+00,
1.205961E+00,1.125202E+00,1.049850E+00,9.795450E-01,1.580408E+00,1.474573E+00,
1.375825E+00,1.283690E+00,1.197725E+00,1.117517E+00,1.042680E+00,1.639315E+00,
1.529535E+00,1.427107E+00,1.331538E+00,1.242369E+00,1.159171E+00,1.081545E+00,
1.675577E+00,1.563368E+00,1.458674E+00,1.360991E+00,1.269850E+00,1.184812E+00,
1.105468E+00,1.031439E+00,9.623662E-01,8.979194E-01,8.377884E-01,7.816842E-01,
7.293372E-01,6.804956E-01,6.349249E-01,5.924059E-01,5.527342E-01,5.157193E-01,
4.811831E-01,4.489597E-01,4.188942E-01,3.908421E-01,3.646686E-01,3.402478E-01,
3.174624E-01,2.962029E-01,2.763671E-01,2.578596E-01,2.405915E-01,2.244798E-01,
2.094471E-01,1.954211E-01,1.823343E-01,1.701239E-01,1.587312E-01,1.481015E-01,
1.381836E-01,1.289298E-01,1.202958E-01,1.122399E-01,1.047235E-01,9.771053E-02,
9.116714E-02,8.506195E-02,7.936561E-02,7.405073E-02,6.909178E-02,6.446491E-02,
6.014788E-02,5.611996E-02,5.236177E-02,4.885526E-02,4.558357E-02,4.253098E-02,
3.968280E-02,3.702537E-02,3.454589E-02,3.223245E-02,3.007394E-02,2.805998E-02,
2.618089E-02,2.442763E-02,2.279179E-02,2.126549E-02,1.984140E-02,1.851268E-02,
1.727294E-02,1.611623E-02,1.503697E-02,1.402999E-02,1.309044E-02,1.221382E-02,
1.139589E-02,1.063274E-02,9.920701E-03,9.256341E-03,8.636472E-03,8.058113E-03,
7.518486E-03,7.014995E-03,6.545222E-03,6.106908E-03,5.697947E-03,5.316372E-03,
4.960351E-03,4.628171E-03,4.318236E-03,4.029057E-03,3.759243E-03,3.507498E-03,
3.272611E-03,3.053454E-03,2.848973E-03,2.658186E-03,2.480175E-03,2.314085E-03,
2.159118E-03,2.014528E-03,1.879621E-03,1.753749E-03,1.636305E-03,1.526727E-03,
1.424487E-03,1.329093E-03,1.240088E-03,1.157043E-03,1.079559E-03,1.007264E-03,
9.398107E-04,8.768744E-04,8.181527E-04,7.633635E-04,7.122433E-04,6.645465E-04,
6.200438E-04,5.785213E-04,5.397795E-04,5.036321E-04,4.699053E-04,4.384372E-04,
4.090764E-04,3.816817E-04,3.561217E-04,3.322733E-04,3.100219E-04,2.892607E-04,
2.698897E-04,2.518160E-04,2.349527E-04,2.192186E-04,2.045382E-04,1.908409E-04,
1.780608E-04,1.661366E-04,1.550110E-04,1.446303E-04,1.349449E-04,1.259080E-04,
1.174763E-04,1.096093E-04,1.022691E-04,9.542044E-05,8.903041E-05,8.306831E-05,
7.750548E-05,7.231517E-05,6.747244E-05,6.295401E-05,5.873817E-05,5.480465E-05,
5.113455E-05,4.771022E-05,4.451521E-05,4.153416E-05,3.875274E-05,3.615758E-05,
3.373622E-05,3.147701E-05,2.936908E-05,2.740232E-05,2.556727E-05,2.385511E-05,
2.225760E-05,2.076708E-05,1.937637E-05,1.807879E-05,1.686811E-05,1.573850E-05,
1.468454E-05,1.370116E-05,1.278364E-05,1.192755E-05,1.112880E-05,1.038354E-05,
9.688185E-06,9.039396E-06,8.434055E-06,7.869251E-06,7.342271E-06,6.850581E-06,
6.391818E-06,5.963777E-06,5.564401E-06,5.191770E-06,4.844092E-06,4.519698E-06,
4.217027E-06,3.934626E-06,3.671136E-06,3.425291E-06,3.195909E-06,2.981889E-06,
2.782200E-06,2.595885E-06,2.422046E-06,2.259849E-06,2.108514E-06,1.967313E-06,
1.835568E-06,1.712645E-06,1.597955E-06,1.490944E-06,1.391100E-06,1.297942E-06,
1.211023E-06,1.129924E-06,1.054257E-06,9.836564E-07,9.177839E-07,8.563226E-07,
7.989773E-07,7.454722E-07,6.955501E-07,6.489712E-07,6.055115E-07,5.649622E-07,
5.271284E-07,4.918282E-07,4.588919E-07,4.281613E-07,3.994886E-07,3.727361E-07,
3.477751E-07,3.244856E-07,3.027558E-07,2.824811E-07,2.635642E-07,2.459141E-07,
2.294460E-07,2.140807E-07,1.997443E-07,1.863680E-07,1.738875E-07,1.622428E-07,
1.513779E-07,1.412406E-07,1.317821E-07,1.229571E-07,1.147230E-07,1.070403E-07,
9.987216E-08,9.318402E-08,8.694376E-08,8.112140E-08,7.568894E-08,7.062028E-08,
6.589105E-08,6.147853E-08,5.736149E-08,5.352016E-08,4.993608E-08,4.659201E-08,
4.347188E-08,4.056070E-08,3.784447E-08,3.531014E-08,3.294553E-08,3.073926E-08,
2.868075E-08,2.676008E-08,2.496804E-08,2.329600E-08,2.173594E-08,2.028035E-08,
1.892224E-08,1.765507E-08,1.647276E-08,1.536963E-08,1.434037E-08,1.338004E-08,
1.248402E-08,1.164800E-08,1.086797E-08,1.014018E-08,9.461118E-09,8.827535E-09,
8.236382E-09,7.684816E-09,7.170187E-09,6.690021E-09,6.242010E-09,5.824001E-09,
5.433985E-09,5.070088E-09,4.730559E-09,4.413768E-09,4.118191E-09,3.842408E-09,
3.585093E-09,3.345010E-09,3.121005E-09,2.912001E-09,2.716993E-09,2.535044E-09,
2.365279E-09,2.206884E-09,2.059095E-09,1.921204E-09,1.792547E-09,1.672505E-09,
1.560502E-09,1.456000E-09,1.358496E-09,1.267522E-09,1.182640E-09,1.103442E-09,
1.029548E-09,9.606020E-10,8.962733E-10,8.362526E-10,7.802512E-10,7.280002E-10,
6.792482E-10,6.337609E-10,5.913199E-10,5.517209E-10,5.147738E-10,4.803010E-10,
4.481367E-10,4.181263E-10,3.901256E-10,3.640001E-10,3.396241E-10,3.168805E-10]]
try:
# internal model constants
ted_empty.num_simulation_days = 366
ted_empty.lipid_earthworm = 0.01
ted_empty.density_earthworm = 1.0
# input variables that change per simulation
ted_empty.log_kow = pd.Series([5.0, 4.0, 2.75], dtype='float')
# internally calculated variables
pore_h2o_conc = pd.Series([[2.347878E-01,2.190648E-01,2.043947E-01,1.907070E-01,1.779359E-01,1.660201E-01,
1.549022E-01,3.793167E-01,3.539150E-01,3.302144E-01,3.081009E-01,2.874683E-01,
2.682174E-01,2.502557E-01,4.682847E-01,4.369250E-01,4.076655E-01,3.803653E-01,
3.548934E-01,3.311273E-01,3.089527E-01,5.230509E-01,4.880237E-01,4.553422E-01,
4.248493E-01,3.963984E-01,3.698528E-01,3.450849E-01,5.567634E-01,5.194786E-01,
4.846907E-01,4.522324E-01,4.219478E-01,3.936912E-01,3.673269E-01,5.775159E-01,
5.388414E-01,5.027568E-01,4.690887E-01,4.376752E-01,4.083654E-01,3.810184E-01,
5.902906E-01,5.507606E-01,5.138778E-01,4.794649E-01,4.473566E-01,4.173985E-01,
3.894465E-01,3.633665E-01,3.390329E-01,3.163289E-01,2.951453E-01,2.753803E-01,
2.569389E-01,2.397325E-01,2.236783E-01,2.086992E-01,1.947233E-01,1.816832E-01,
1.695165E-01,1.581644E-01,1.475726E-01,1.376901E-01,1.284694E-01,1.198662E-01,
1.118392E-01,1.043496E-01,9.736164E-02,9.084162E-02,8.475823E-02,7.908222E-02,
7.378632E-02,6.884507E-02,6.423472E-02,5.993312E-02,5.591958E-02,5.217481E-02,
4.868082E-02,4.542081E-02,4.237911E-02,3.954111E-02,3.689316E-02,3.442254E-02,
3.211736E-02,2.996656E-02,2.795979E-02,2.608740E-02,2.434041E-02,2.271040E-02,
2.118956E-02,1.977056E-02,1.844658E-02,1.721127E-02,1.605868E-02,1.498328E-02,
1.397989E-02,1.304370E-02,1.217020E-02,1.135520E-02,1.059478E-02,9.885278E-03,
9.223290E-03,8.605634E-03,8.029341E-03,7.491640E-03,6.989947E-03,6.521851E-03,
6.085102E-03,5.677601E-03,5.297389E-03,4.942639E-03,4.611645E-03,4.302817E-03,
4.014670E-03,3.745820E-03,3.494973E-03,3.260926E-03,3.042551E-03,2.838801E-03,
2.648695E-03,2.471319E-03,2.305823E-03,2.151409E-03,2.007335E-03,1.872910E-03,
1.747487E-03,1.630463E-03,1.521276E-03,1.419400E-03,1.324347E-03,1.235660E-03,
1.152911E-03,1.075704E-03,1.003668E-03,9.364550E-04,8.737434E-04,8.152314E-04,
7.606378E-04,7.097001E-04,6.621737E-04,6.178299E-04,5.764556E-04,5.378521E-04,
5.018338E-04,4.682275E-04,4.368717E-04,4.076157E-04,3.803189E-04,3.548501E-04,
3.310868E-04,3.089149E-04,2.882278E-04,2.689261E-04,2.509169E-04,2.341137E-04,
2.184358E-04,2.038078E-04,1.901594E-04,1.774250E-04,1.655434E-04,1.544575E-04,
1.441139E-04,1.344630E-04,1.254584E-04,1.170569E-04,1.092179E-04,1.019039E-04,
9.507972E-05,8.871252E-05,8.277171E-05,7.722873E-05,7.205696E-05,6.723152E-05,
6.272922E-05,5.852844E-05,5.460896E-05,5.095196E-05,4.753986E-05,4.435626E-05,
4.138585E-05,3.861437E-05,3.602848E-05,3.361576E-05,3.136461E-05,2.926422E-05,
2.730448E-05,2.547598E-05,2.376993E-05,2.217813E-05,2.069293E-05,1.930718E-05,
1.801424E-05,1.680788E-05,1.568231E-05,1.463211E-05,1.365224E-05,1.273799E-05,
1.188497E-05,1.108906E-05,1.034646E-05,9.653592E-06,9.007119E-06,8.403940E-06,
7.841153E-06,7.316054E-06,6.826120E-06,6.368995E-06,5.942483E-06,5.544532E-06,
5.173232E-06,4.826796E-06,4.503560E-06,4.201970E-06,3.920576E-06,3.658027E-06,
3.413060E-06,3.184498E-06,2.971241E-06,2.772266E-06,2.586616E-06,2.413398E-06,
2.251780E-06,2.100985E-06,1.960288E-06,1.829014E-06,1.706530E-06,1.592249E-06,
1.485621E-06,1.386133E-06,1.293308E-06,1.206699E-06,1.125890E-06,1.050492E-06,
9.801441E-07,9.145068E-07,8.532650E-07,7.961244E-07,7.428103E-07,6.930666E-07,
6.466540E-07,6.033495E-07,5.629450E-07,5.252462E-07,4.900721E-07,4.572534E-07,
4.266325E-07,3.980622E-07,3.714052E-07,3.465333E-07,3.233270E-07,3.016747E-07,
2.814725E-07,2.626231E-07,2.450360E-07,2.286267E-07,2.133163E-07,1.990311E-07,
1.857026E-07,1.732666E-07,1.616635E-07,1.508374E-07,1.407362E-07,1.313116E-07,
1.225180E-07,1.143133E-07,1.066581E-07,9.951555E-08,9.285129E-08,8.663332E-08,
8.083174E-08,7.541868E-08,7.036812E-08,6.565578E-08,6.125901E-08,5.715667E-08,
5.332906E-08,4.975778E-08,4.642565E-08,4.331666E-08,4.041587E-08,3.770934E-08,
3.518406E-08,3.282789E-08,3.062950E-08,2.857834E-08,2.666453E-08,2.487889E-08,
2.321282E-08,2.165833E-08,2.020794E-08,1.885467E-08,1.759203E-08,1.641394E-08,
1.531475E-08,1.428917E-08,1.333227E-08,1.243944E-08,1.160641E-08,1.082916E-08,
1.010397E-08,9.427336E-09,8.796015E-09,8.206972E-09,7.657376E-09,7.144584E-09,
6.666133E-09,6.219722E-09,5.803206E-09,5.414582E-09,5.051984E-09,4.713668E-09,
4.398008E-09,4.103486E-09,3.828688E-09,3.572292E-09,3.333066E-09,3.109861E-09,
2.901603E-09,2.707291E-09,2.525992E-09,2.356834E-09,2.199004E-09,2.051743E-09,
1.914344E-09,1.786146E-09,1.666533E-09,1.554930E-09,1.450801E-09,1.353646E-09,
1.262996E-09,1.178417E-09,1.099502E-09,1.025872E-09,9.571720E-10,8.930730E-10,
8.332666E-10,7.774652E-10,7.254007E-10,6.768228E-10,6.314980E-10,5.892085E-10,
5.497509E-10,5.129358E-10,4.785860E-10,4.465365E-10,4.166333E-10,3.887326E-10,
3.627004E-10,3.384114E-10,3.157490E-10,2.946042E-10,2.748755E-10,2.564679E-10,
2.392930E-10,2.232683E-10,2.083167E-10,1.943663E-10,1.813502E-10,1.692057E-10,
1.578745E-10,1.473021E-10,1.374377E-10,1.282339E-10,1.196465E-10,1.116341E-10],
[2.347878E-01,2.190648E-01,2.043947E-01,1.907070E-01,1.779359E-01,1.660201E-01,
1.549022E-01,3.793167E-01,3.539150E-01,3.302144E-01,3.081009E-01,2.874683E-01,
2.682174E-01,2.502557E-01,4.682847E-01,4.369250E-01,4.076655E-01,3.803653E-01,
3.548934E-01,3.311273E-01,3.089527E-01,5.230509E-01,4.880237E-01,4.553422E-01,
4.248493E-01,3.963984E-01,3.698528E-01,3.450849E-01,5.567634E-01,5.194786E-01,
4.846907E-01,4.522324E-01,4.219478E-01,3.936912E-01,3.673269E-01,5.775159E-01,
5.388414E-01,5.027568E-01,4.690887E-01,4.376752E-01,4.083654E-01,3.810184E-01,
5.902906E-01,5.507606E-01,5.138778E-01,4.794649E-01,4.473566E-01,4.173985E-01,
3.894465E-01,3.633665E-01,3.390329E-01,3.163289E-01,2.951453E-01,2.753803E-01,
2.569389E-01,2.397325E-01,2.236783E-01,2.086992E-01,1.947233E-01,1.816832E-01,
1.695165E-01,1.581644E-01,1.475726E-01,1.376901E-01,1.284694E-01,1.198662E-01,
1.118392E-01,1.043496E-01,9.736164E-02,9.084162E-02,8.475823E-02,7.908222E-02,
7.378632E-02,6.884507E-02,6.423472E-02,5.993312E-02,5.591958E-02,5.217481E-02,
4.868082E-02,4.542081E-02,4.237911E-02,3.954111E-02,3.689316E-02,3.442254E-02,
3.211736E-02,2.996656E-02,2.795979E-02,2.608740E-02,2.434041E-02,2.271040E-02,
2.118956E-02,1.977056E-02,1.844658E-02,1.721127E-02,1.605868E-02,1.498328E-02,
1.397989E-02,1.304370E-02,1.217020E-02,1.135520E-02,1.059478E-02,9.885278E-03,
9.223290E-03,8.605634E-03,8.029341E-03,7.491640E-03,6.989947E-03,6.521851E-03,
6.085102E-03,5.677601E-03,5.297389E-03,4.942639E-03,4.611645E-03,4.302817E-03,
4.014670E-03,3.745820E-03,3.494973E-03,3.260926E-03,3.042551E-03,2.838801E-03,
2.648695E-03,2.471319E-03,2.305823E-03,2.151409E-03,2.007335E-03,1.872910E-03,
1.747487E-03,1.630463E-03,1.521276E-03,1.419400E-03,1.324347E-03,1.235660E-03,
1.152911E-03,1.075704E-03,1.003668E-03,9.364550E-04,8.737434E-04,8.152314E-04,
7.606378E-04,7.097001E-04,6.621737E-04,6.178299E-04,5.764556E-04,5.378521E-04,
5.018338E-04,4.682275E-04,4.368717E-04,4.076157E-04,3.803189E-04,3.548501E-04,
3.310868E-04,3.089149E-04,2.882278E-04,2.689261E-04,2.509169E-04,2.341137E-04,
2.184358E-04,2.038078E-04,1.901594E-04,1.774250E-04,1.655434E-04,1.544575E-04,
1.441139E-04,1.344630E-04,1.254584E-04,1.170569E-04,1.092179E-04,1.019039E-04,
9.507972E-05,8.871252E-05,8.277171E-05,7.722873E-05,7.205696E-05,6.723152E-05,
6.272922E-05,5.852844E-05,5.460896E-05,5.095196E-05,4.753986E-05,4.435626E-05,
4.138585E-05,3.861437E-05,3.602848E-05,3.361576E-05,3.136461E-05,2.926422E-05,
2.730448E-05,2.547598E-05,2.376993E-05,2.217813E-05,2.069293E-05,1.930718E-05,
1.801424E-05,1.680788E-05,1.568231E-05,1.463211E-05,1.365224E-05,1.273799E-05,
1.188497E-05,1.108906E-05,1.034646E-05,9.653592E-06,9.007119E-06,8.403940E-06,
7.841153E-06,7.316054E-06,6.826120E-06,6.368995E-06,5.942483E-06,5.544532E-06,
5.173232E-06,4.826796E-06,4.503560E-06,4.201970E-06,3.920576E-06,3.658027E-06,
3.413060E-06,3.184498E-06,2.971241E-06,2.772266E-06,2.586616E-06,2.413398E-06,
2.251780E-06,2.100985E-06,1.960288E-06,1.829014E-06,1.706530E-06,1.592249E-06,
1.485621E-06,1.386133E-06,1.293308E-06,1.206699E-06,1.125890E-06,1.050492E-06,
9.801441E-07,9.145068E-07,8.532650E-07,7.961244E-07,7.428103E-07,6.930666E-07,
6.466540E-07,6.033495E-07,5.629450E-07,5.252462E-07,4.900721E-07,4.572534E-07,
4.266325E-07,3.980622E-07,3.714052E-07,3.465333E-07,3.233270E-07,3.016747E-07,
2.814725E-07,2.626231E-07,2.450360E-07,2.286267E-07,2.133163E-07,1.990311E-07,
1.857026E-07,1.732666E-07,1.616635E-07,1.508374E-07,1.407362E-07,1.313116E-07,
1.225180E-07,1.143133E-07,1.066581E-07,9.951555E-08,9.285129E-08,8.663332E-08,
8.083174E-08,7.541868E-08,7.036812E-08,6.565578E-08,6.125901E-08,5.715667E-08,
5.332906E-08,4.975778E-08,4.642565E-08,4.331666E-08,4.041587E-08,3.770934E-08,
3.518406E-08,3.282789E-08,3.062950E-08,2.857834E-08,2.666453E-08,2.487889E-08,
2.321282E-08,2.165833E-08,2.020794E-08,1.885467E-08,1.759203E-08,1.641394E-08,
1.531475E-08,1.428917E-08,1.333227E-08,1.243944E-08,1.160641E-08,1.082916E-08,
1.010397E-08,9.427336E-09,8.796015E-09,8.206972E-09,7.657376E-09,7.144584E-09,
6.666133E-09,6.219722E-09,5.803206E-09,5.414582E-09,5.051984E-09,4.713668E-09,
4.398008E-09,4.103486E-09,3.828688E-09,3.572292E-09,3.333066E-09,3.109861E-09,
2.901603E-09,2.707291E-09,2.525992E-09,2.356834E-09,2.199004E-09,2.051743E-09,
1.914344E-09,1.786146E-09,1.666533E-09,1.554930E-09,1.450801E-09,1.353646E-09,
1.262996E-09,1.178417E-09,1.099502E-09,1.025872E-09,9.571720E-10,8.930730E-10,
8.332666E-10,7.774652E-10,7.254007E-10,6.768228E-10,6.314980E-10,5.892085E-10,
5.497509E-10,5.129358E-10,4.785860E-10,4.465365E-10,4.166333E-10,3.887326E-10,
3.627004E-10,3.384114E-10,3.157490E-10,2.946042E-10,2.748755E-10,2.564679E-10,
2.392930E-10,2.232683E-10,2.083167E-10,1.943663E-10,1.813502E-10,1.692057E-10,
1.578745E-10,1.473021E-10,1.374377E-10,1.282339E-10,1.196465E-10,1.116341E-10],
[1.185152E-01,1.105786E-01,1.031735E-01,9.626426E-02,8.981773E-02,8.380291E-02,
7.819088E-02,1.914699E-01,1.786477E-01,1.666842E-01,1.555219E-01,1.451070E-01,
1.353896E-01,1.263230E-01,2.363787E-01,2.205492E-01,2.057796E-01,1.919992E-01,
1.791416E-01,1.671450E-01,1.559518E-01,2.640234E-01,2.463425E-01,2.298457E-01,
2.144536E-01,2.000923E-01,1.866927E-01,1.741905E-01,2.810407E-01,2.622202E-01,
2.446601E-01,2.282760E-01,2.129890E-01,1.987258E-01,1.854177E-01,2.915160E-01,
2.719941E-01,2.537794E-01,2.367846E-01,2.209278E-01,2.061330E-01,1.923289E-01,
2.979644E-01,2.780106E-01,2.593931E-01,2.420223E-01,2.258148E-01,2.106926E-01,
1.965832E-01,1.834186E-01,1.711356E-01,1.596752E-01,1.489822E-01,1.390053E-01,
1.296965E-01,1.210111E-01,1.129074E-01,1.053463E-01,9.829159E-02,9.170929E-02,
8.556780E-02,7.983758E-02,7.449109E-02,6.950265E-02,6.484826E-02,6.050557E-02,
5.645369E-02,5.267316E-02,4.914579E-02,4.585465E-02,4.278390E-02,3.991879E-02,
3.724555E-02,3.475132E-02,3.242413E-02,3.025278E-02,2.822685E-02,2.633658E-02,
2.457290E-02,2.292732E-02,2.139195E-02,1.995939E-02,1.862277E-02,1.737566E-02,
1.621207E-02,1.512639E-02,1.411342E-02,1.316829E-02,1.228645E-02,1.146366E-02,
1.069597E-02,9.979697E-03,9.311387E-03,8.687831E-03,8.106033E-03,7.563196E-03,
7.056711E-03,6.584145E-03,6.143224E-03,5.731831E-03,5.347987E-03,4.989849E-03,
4.655693E-03,4.343915E-03,4.053016E-03,3.781598E-03,3.528356E-03,3.292072E-03,
3.071612E-03,2.865915E-03,2.673994E-03,2.494924E-03,2.327847E-03,2.171958E-03,
2.026508E-03,1.890799E-03,1.764178E-03,1.646036E-03,1.535806E-03,1.432958E-03,
1.336997E-03,1.247462E-03,1.163923E-03,1.085979E-03,1.013254E-03,9.453995E-04,
8.820889E-04,8.230181E-04,7.679030E-04,7.164788E-04,6.684984E-04,6.237311E-04,
5.819617E-04,5.429894E-04,5.066271E-04,4.726998E-04,4.410445E-04,4.115090E-04,
3.839515E-04,3.582394E-04,3.342492E-04,3.118655E-04,2.909808E-04,2.714947E-04,
2.533135E-04,2.363499E-04,2.205222E-04,2.057545E-04,1.919758E-04,1.791197E-04,
1.671246E-04,1.559328E-04,1.454904E-04,1.357474E-04,1.266568E-04,1.181749E-04,
1.102611E-04,1.028773E-04,9.598788E-05,8.955986E-05,8.356230E-05,7.796638E-05,
7.274521E-05,6.787368E-05,6.332838E-05,5.908747E-05,5.513056E-05,5.143863E-05,
4.799394E-05,4.477993E-05,4.178115E-05,3.898319E-05,3.637260E-05,3.393684E-05,
3.166419E-05,2.954373E-05,2.756528E-05,2.571931E-05,2.399697E-05,2.238996E-05,
2.089058E-05,1.949160E-05,1.818630E-05,1.696842E-05,1.583210E-05,1.477187E-05,
1.378264E-05,1.285966E-05,1.199848E-05,1.119498E-05,1.044529E-05,9.745798E-06,
9.093151E-06,8.484210E-06,7.916048E-06,7.385934E-06,6.891320E-06,6.429829E-06,
5.999242E-06,5.597491E-06,5.222644E-06,4.872899E-06,4.546575E-06,4.242105E-06,
3.958024E-06,3.692967E-06,3.445660E-06,3.214914E-06,2.999621E-06,2.798745E-06,
2.611322E-06,2.436449E-06,2.273288E-06,2.121052E-06,1.979012E-06,1.846483E-06,
1.722830E-06,1.607457E-06,1.499811E-06,1.399373E-06,1.305661E-06,1.218225E-06,
1.136644E-06,1.060526E-06,9.895060E-07,9.232417E-07,8.614150E-07,8.037286E-07,
7.499053E-07,6.996864E-07,6.528305E-07,6.091124E-07,5.683219E-07,5.302631E-07,
4.947530E-07,4.616209E-07,4.307075E-07,4.018643E-07,3.749526E-07,3.498432E-07,
3.264152E-07,3.045562E-07,2.841610E-07,2.651316E-07,2.473765E-07,2.308104E-07,
2.153537E-07,2.009321E-07,1.874763E-07,1.749216E-07,1.632076E-07,1.522781E-07,
1.420805E-07,1.325658E-07,1.236882E-07,1.154052E-07,1.076769E-07,1.004661E-07,
9.373816E-08,8.746080E-08,8.160381E-08,7.613905E-08,7.104024E-08,6.628289E-08,
6.184412E-08,5.770261E-08,5.383844E-08,5.023304E-08,4.686908E-08,4.373040E-08,
4.080190E-08,3.806952E-08,3.552012E-08,3.314144E-08,3.092206E-08,2.885130E-08,
2.691922E-08,2.511652E-08,2.343454E-08,2.186520E-08,2.040095E-08,1.903476E-08,
1.776006E-08,1.657072E-08,1.546103E-08,1.442565E-08,1.345961E-08,1.255826E-08,
1.171727E-08,1.093260E-08,1.020048E-08,9.517381E-09,8.880030E-09,8.285361E-09,
7.730515E-09,7.212826E-09,6.729804E-09,6.279130E-09,5.858635E-09,5.466300E-09,
5.100238E-09,4.758690E-09,4.440015E-09,4.142681E-09,3.865258E-09,3.606413E-09,
3.364902E-09,3.139565E-09,2.929318E-09,2.733150E-09,2.550119E-09,2.379345E-09,
2.220008E-09,2.071340E-09,1.932629E-09,1.803206E-09,1.682451E-09,1.569782E-09,
1.464659E-09,1.366575E-09,1.275060E-09,1.189673E-09,1.110004E-09,1.035670E-09,
9.663144E-10,9.016032E-10,8.412256E-10,7.848912E-10,7.323294E-10,6.832875E-10,
6.375298E-10,5.948363E-10,5.550019E-10,5.178351E-10,4.831572E-10,4.508016E-10,
4.206128E-10,3.924456E-10,3.661647E-10,3.416437E-10,3.187649E-10,2.974181E-10,
2.775009E-10,2.589175E-10,2.415786E-10,2.254008E-10,2.103064E-10,1.962228E-10,
1.830823E-10,1.708219E-10,1.593824E-10,1.487091E-10,1.387505E-10,1.294588E-10,
1.207893E-10,1.127004E-10,1.051532E-10,9.811140E-11,9.154117E-11,8.541093E-11,
7.969122E-11,7.435454E-11,6.937524E-11,6.472938E-11,6.039465E-11,5.635020E-11]], dtype='float')
for i in range(3):
result[i] = ted_empty.daily_soil_inv_timeseries(i, pore_h2o_conc[i])
npt.assert_allclose(result[i],expected_results[i],rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
for i in range(3):
tab = [result[i], expected_results[i]]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_daily_animal_dose_timeseries(self):
"""
:description generates annual timeseries of daily pesticide concentrations in animals (mammals, birds, amphibians, reptiles)
:param a1; coefficient of allometric expression
:param b1; exponent of allometrice expression
:param body_wgt; body weight of species (g)
:param frac_h2o; fraction of water in food item
:param intake_food_conc; pesticide concentration in food item (daily mg a.i./kg)
:param frac_retained; fraction of ingested food retained by animal (mammals, birds, reptiles/amphibians)
:Notes # calculations are performed daily from day of first application (assumed day 0) through the last day of a year
# note: day numbers are synchronized with 0-based array indexing; thus the year does not have a calendar specific
# assoication, rather it is one year from the day of 1st pesticide application
# this represents Eqs 5&6 of Attachment 1-7 of 'Biological Evaluation Chapters for Diazinon ESA Assessment'
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
ted_empty = self.create_ted_object()
expected_results = pd.Series([], dtype='float')
result = pd.Series([[]], dtype='float')
expected_results = [[2.860270E+02,3.090209E+02,3.058215E+02,3.001105E+02,2.942541E+02,2.884869E+02,2.828301E+02,
5.633110E+02,5.808675E+02,5.723374E+02,5.614002E+02,5.504201E+02,5.396295E+02,5.290480E+02,
8.047008E+02,8.175238E+02,8.043529E+02,7.888661E+02,7.734255E+02,7.582619E+02,7.433932E+02,
1.014843E+03,1.023545E+03,1.006334E+03,9.868866E+02,9.675630E+02,9.485925E+02,9.299915E+02,
1.197782E+03,1.202897E+03,1.182169E+03,1.159274E+03,1.136569E+03,1.114285E+03,1.092435E+03,
1.357040E+03,1.359032E+03,1.335242E+03,1.309345E+03,1.283698E+03,1.258528E+03,1.233850E+03,
1.495682E+03,1.494955E+03,1.468500E+03,1.439990E+03,1.411781E+03,1.384100E+03,1.356959E+03,
1.330350E+03,1.304262E+03,1.278687E+03,1.253612E+03,1.229030E+03,1.204929E+03,1.181301E+03,
1.158137E+03,1.135426E+03,1.113161E+03,1.091333E+03,1.069932E+03,1.048952E+03,1.028382E+03,
1.008217E+03,9.884460E+02,9.690632E+02,9.500604E+02,9.314303E+02,9.131655E+02,8.952589E+02,
8.777034E+02,8.604922E+02,8.436185E+02,8.270756E+02,8.108572E+02,7.949568E+02,7.793682E+02,
7.640852E+02,7.491020E+02,7.344125E+02,7.200112E+02,7.058922E+02,6.920501E+02,6.784794E+02,
6.651748E+02,6.521312E+02,6.393433E+02,6.268061E+02,6.145148E+02,6.024646E+02,5.906506E+02,
5.790683E+02,5.677131E+02,5.565806E+02,5.456664E+02,5.349662E+02,5.244759E+02,5.141912E+02,
5.041083E+02,4.942230E+02,4.845316E+02,4.750302E+02,4.657152E+02,4.565828E+02,4.476295E+02,
4.388517E+02,4.302461E+02,4.218092E+02,4.135378E+02,4.054286E+02,3.974784E+02,3.896841E+02,
3.820426E+02,3.745510E+02,3.672063E+02,3.600056E+02,3.529461E+02,3.460250E+02,3.392397E+02,
3.325874E+02,3.260656E+02,3.196716E+02,3.134031E+02,3.072574E+02,3.012323E+02,2.953253E+02,
2.895342E+02,2.838566E+02,2.782903E+02,2.728332E+02,2.674831E+02,2.622379E+02,2.570956E+02,
2.520541E+02,2.471115E+02,2.422658E+02,2.375151E+02,2.328576E+02,2.282914E+02,2.238147E+02,
2.194259E+02,2.151231E+02,2.109046E+02,2.067689E+02,2.027143E+02,1.987392E+02,1.948420E+02,
1.910213E+02,1.872755E+02,1.836031E+02,1.800028E+02,1.764730E+02,1.730125E+02,1.696198E+02,
1.662937E+02,1.630328E+02,1.598358E+02,1.567015E+02,1.536287E+02,1.506161E+02,1.476627E+02,
1.447671E+02,1.419283E+02,1.391452E+02,1.364166E+02,1.337416E+02,1.311190E+02,1.285478E+02,
1.260271E+02,1.235557E+02,1.211329E+02,1.187576E+02,1.164288E+02,1.141457E+02,1.119074E+02,
1.097129E+02,1.075615E+02,1.054523E+02,1.033845E+02,1.013571E+02,9.936960E+01,9.742102E+01,
9.551065E+01,9.363775E+01,9.180157E+01,9.000140E+01,8.823652E+01,8.650626E+01,8.480992E+01,
8.314685E+01,8.151639E+01,7.991791E+01,7.835077E+01,7.681436E+01,7.530807E+01,7.383133E+01,
7.238354E+01,7.096414E+01,6.957258E+01,6.820830E+01,6.687078E+01,6.555948E+01,6.427390E+01,
6.301353E+01,6.177787E+01,6.056645E+01,5.937878E+01,5.821440E+01,5.707285E+01,5.595368E+01,
5.485647E+01,5.378076E+01,5.272616E+01,5.169223E+01,5.067857E+01,4.968480E+01,4.871051E+01,
4.775533E+01,4.681887E+01,4.590078E+01,4.500070E+01,4.411826E+01,4.325313E+01,4.240496E+01,
4.157343E+01,4.075820E+01,3.995895E+01,3.917538E+01,3.840718E+01,3.765404E+01,3.691566E+01,
3.619177E+01,3.548207E+01,3.478629E+01,3.410415E+01,3.343539E+01,3.277974E+01,3.213695E+01,
3.150677E+01,3.088894E+01,3.028322E+01,2.968939E+01,2.910720E+01,2.853642E+01,2.797684E+01,
2.742823E+01,2.689038E+01,2.636308E+01,2.584611E+01,2.533929E+01,2.484240E+01,2.435526E+01,
2.387766E+01,2.340944E+01,2.295039E+01,2.250035E+01,2.205913E+01,2.162656E+01,2.120248E+01,
2.078671E+01,2.037910E+01,1.997948E+01,1.958769E+01,1.920359E+01,1.882702E+01,1.845783E+01,
1.809588E+01,1.774104E+01,1.739314E+01,1.705208E+01,1.671770E+01,1.638987E+01,1.606848E+01,
1.575338E+01,1.544447E+01,1.514161E+01,1.484469E+01,1.455360E+01,1.426821E+01,1.398842E+01,
1.371412E+01,1.344519E+01,1.318154E+01,1.292306E+01,1.266964E+01,1.242120E+01,1.217763E+01,
1.193883E+01,1.170472E+01,1.147520E+01,1.125017E+01,1.102957E+01,1.081328E+01,1.060124E+01,
1.039336E+01,1.018955E+01,9.989738E+00,9.793846E+00,9.601794E+00,9.413509E+00,9.228916E+00,
9.047942E+00,8.870518E+00,8.696572E+00,8.526038E+00,8.358848E+00,8.194936E+00,8.034238E+00,
7.876691E+00,7.722234E+00,7.570806E+00,7.422347E+00,7.276799E+00,7.134106E+00,6.994210E+00,
6.857058E+00,6.722595E+00,6.590769E+00,6.461528E+00,6.334822E+00,6.210600E+00,6.088814E+00,
5.969416E+00,5.852359E+00,5.737598E+00,5.625087E+00,5.514783E+00,5.406641E+00,5.300620E+00,
5.196678E+00,5.094775E+00,4.994869E+00,4.896923E+00,4.800897E+00,4.706755E+00,4.614458E+00,
4.523971E+00,4.435259E+00,4.348286E+00,4.263019E+00,4.179424E+00,4.097468E+00,4.017119E+00,
3.938346E+00,3.861117E+00,3.785403E+00,3.711174E+00,3.638400E+00,3.567053E+00,3.497105E+00,
3.428529E+00,3.361298E+00,3.295385E+00,3.230764E+00,3.167411E+00,3.105300E+00,3.044407E+00,
2.984708E+00,2.926180E+00,2.868799E+00,2.812544E+00,2.757391E+00,2.703321E+00,2.650310E+00,
2.598339E+00,2.547387E+00],
[4.583348E+01,4.951806E+01,4.900538E+01,4.809025E+01,4.715181E+01,4.622765E+01,4.532120E+01,
9.026597E+01,9.307926E+01,9.171236E+01,8.995977E+01,8.820030E+01,8.647120E+01,8.477560E+01,
1.289467E+02,1.310015E+02,1.288910E+02,1.264093E+02,1.239351E+02,1.215053E+02,1.191227E+02,
1.626202E+02,1.640147E+02,1.612568E+02,1.581405E+02,1.550440E+02,1.520042E+02,1.490235E+02,
1.919347E+02,1.927544E+02,1.894329E+02,1.857641E+02,1.821259E+02,1.785550E+02,1.750537E+02,
2.174545E+02,2.177737E+02,2.139616E+02,2.098118E+02,2.057021E+02,2.016689E+02,1.977143E+02,
2.396707E+02,2.395543E+02,2.353151E+02,2.307466E+02,2.262263E+02,2.217906E+02,2.174415E+02,
2.131776E+02,2.089973E+02,2.048990E+02,2.008811E+02,1.969419E+02,1.930800E+02,1.892938E+02,
1.855819E+02,1.819427E+02,1.783750E+02,1.748771E+02,1.714479E+02,1.680859E+02,1.647898E+02,
1.615584E+02,1.583904E+02,1.552844E+02,1.522394E+02,1.492541E+02,1.463273E+02,1.434579E+02,
1.406448E+02,1.378868E+02,1.351829E+02,1.325321E+02,1.299332E+02,1.273853E+02,1.248873E+02,
1.224384E+02,1.200374E+02,1.176836E+02,1.153759E+02,1.131134E+02,1.108953E+02,1.087208E+02,
1.065888E+02,1.044987E+02,1.024495E+02,1.004405E+02,9.847096E+01,9.654000E+01,9.464691E+01,
9.279094E+01,9.097137E+01,8.918748E+01,8.743857E+01,8.572395E+01,8.404295E+01,8.239492E+01,
8.077921E+01,7.919518E+01,7.764221E+01,7.611969E+01,7.462703E+01,7.316364E+01,7.172895E+01,
7.032239E+01,6.894341E+01,6.759147E+01,6.626604E+01,6.496660E+01,6.369265E+01,6.244367E+01,
6.121919E+01,6.001872E+01,5.884179E+01,5.768794E+01,5.655671E+01,5.544767E+01,5.436038E+01,
5.329440E+01,5.224933E+01,5.122475E+01,5.022027E+01,4.923548E+01,4.827000E+01,4.732346E+01,
4.639547E+01,4.548569E+01,4.459374E+01,4.371928E+01,4.286197E+01,4.202148E+01,4.119746E+01,
4.038960E+01,3.959759E+01,3.882110E+01,3.805985E+01,3.731352E+01,3.658182E+01,3.586447E+01,
3.516119E+01,3.447170E+01,3.379573E+01,3.313302E+01,3.248330E+01,3.184632E+01,3.122184E+01,
3.060960E+01,3.000936E+01,2.942090E+01,2.884397E+01,2.827836E+01,2.772384E+01,2.718019E+01,
2.664720E+01,2.612467E+01,2.561238E+01,2.511013E+01,2.461774E+01,2.413500E+01,2.366173E+01,
2.319774E+01,2.274284E+01,2.229687E+01,2.185964E+01,2.143099E+01,2.101074E+01,2.059873E+01,
2.019480E+01,1.979879E+01,1.941055E+01,1.902992E+01,1.865676E+01,1.829091E+01,1.793224E+01,
1.758060E+01,1.723585E+01,1.689787E+01,1.656651E+01,1.624165E+01,1.592316E+01,1.561092E+01,
1.530480E+01,1.500468E+01,1.471045E+01,1.442198E+01,1.413918E+01,1.386192E+01,1.359009E+01,
1.332360E+01,1.306233E+01,1.280619E+01,1.255507E+01,1.230887E+01,1.206750E+01,1.183086E+01,
1.159887E+01,1.137142E+01,1.114843E+01,1.092982E+01,1.071549E+01,1.050537E+01,1.029937E+01,
1.009740E+01,9.899397E+00,9.705276E+00,9.514962E+00,9.328379E+00,9.145455E+00,8.966118E+00,
8.790298E+00,8.617926E+00,8.448934E+00,8.283255E+00,8.120826E+00,7.961581E+00,7.805459E+00,
7.652399E+00,7.502340E+00,7.355224E+00,7.210992E+00,7.069589E+00,6.930959E+00,6.795047E+00,
6.661800E+00,6.531166E+00,6.403094E+00,6.277533E+00,6.154435E+00,6.033750E+00,5.915432E+00,
5.799434E+00,5.685711E+00,5.574217E+00,5.464910E+00,5.357747E+00,5.252685E+00,5.149683E+00,
5.048701E+00,4.949699E+00,4.852638E+00,4.757481E+00,4.664189E+00,4.572728E+00,4.483059E+00,
4.395149E+00,4.308963E+00,4.224467E+00,4.141628E+00,4.060413E+00,3.980791E+00,3.902730E+00,
3.826200E+00,3.751170E+00,3.677612E+00,3.605496E+00,3.534795E+00,3.465479E+00,3.397524E+00,
3.330900E+00,3.265583E+00,3.201547E+00,3.138767E+00,3.077217E+00,3.016875E+00,2.957716E+00,
2.899717E+00,2.842855E+00,2.787109E+00,2.732455E+00,2.678873E+00,2.626342E+00,2.574841E+00,
2.524350E+00,2.474849E+00,2.426319E+00,2.378740E+00,2.332095E+00,2.286364E+00,2.241530E+00,
2.197575E+00,2.154481E+00,2.112233E+00,2.070814E+00,2.030206E+00,1.990395E+00,1.951365E+00,
1.913100E+00,1.875585E+00,1.838806E+00,1.802748E+00,1.767397E+00,1.732740E+00,1.698762E+00,
1.665450E+00,1.632792E+00,1.600774E+00,1.569383E+00,1.538609E+00,1.508438E+00,1.478858E+00,
1.449859E+00,1.421428E+00,1.393554E+00,1.366228E+00,1.339437E+00,1.313171E+00,1.287421E+00,
1.262175E+00,1.237425E+00,1.213160E+00,1.189370E+00,1.166047E+00,1.143182E+00,1.120765E+00,
1.098787E+00,1.077241E+00,1.056117E+00,1.035407E+00,1.015103E+00,9.951976E-01,9.756824E-01,
9.565499E-01,9.377925E-01,9.194030E-01,9.013741E-01,8.836987E-01,8.663699E-01,8.493809E-01,
8.327250E-01,8.163958E-01,8.003868E-01,7.846917E-01,7.693044E-01,7.542188E-01,7.394290E-01,
7.249293E-01,7.107138E-01,6.967772E-01,6.831138E-01,6.697183E-01,6.565856E-01,6.437103E-01,
6.310876E-01,6.187123E-01,6.065798E-01,5.946851E-01,5.830237E-01,5.715909E-01,5.603824E-01,
5.493936E-01,5.386204E-01,5.280583E-01,5.177034E-01,5.075516E-01,4.975988E-01,4.878412E-01,
4.782749E-01,4.688963E-01,4.597015E-01,4.506870E-01,4.418493E-01,4.331849E-01,4.246904E-01,
4.163625E-01,4.081979E-01],
[1.338207E+02,1.378876E+02,1.355183E+02,1.328776E+02,1.302728E+02,1.277182E+02,1.252138E+02,
2.565791E+02,2.582388E+02,2.535095E+02,2.485550E+02,2.436818E+02,2.389034E+02,2.342187E+02,
3.634465E+02,3.630106E+02,3.562267E+02,3.492581E+02,3.424102E+02,3.356958E+02,3.291130E+02,
4.564800E+02,4.542198E+02,4.456473E+02,4.369252E+02,4.283582E+02,4.199584E+02,4.117233E+02,
5.374704E+02,5.336219E+02,5.234925E+02,5.132438E+02,5.031803E+02,4.933133E+02,4.836397E+02,
6.079765E+02,6.027455E+02,5.912606E+02,5.796831E+02,5.683167E+02,5.571724E+02,5.462466E+02,
6.693557E+02,6.629211E+02,6.502562E+02,6.375218E+02,6.250212E+02,6.127650E+02,6.007490E+02,
5.889687E+02,5.774194E+02,5.660965E+02,5.549957E+02,5.441126E+02,5.334429E+02,5.229824E+02,
5.127270E+02,5.026728E+02,4.928157E+02,4.831519E+02,4.736775E+02,4.643890E+02,4.552826E+02,
4.463548E+02,4.376021E+02,4.290210E+02,4.206081E+02,4.123602E+02,4.042741E+02,3.963465E+02,
3.885744E+02,3.809547E+02,3.734844E+02,3.661606E+02,3.589804E+02,3.519411E+02,3.450397E+02,
3.382737E+02,3.316404E+02,3.251371E+02,3.187613E+02,3.125106E+02,3.063825E+02,3.003745E+02,
2.944844E+02,2.887097E+02,2.830483E+02,2.774979E+02,2.720563E+02,2.667214E+02,2.614912E+02,
2.563635E+02,2.513364E+02,2.464078E+02,2.415759E+02,2.368388E+02,2.321945E+02,2.276413E+02,
2.231774E+02,2.188010E+02,2.145105E+02,2.103041E+02,2.061801E+02,2.021371E+02,1.981733E+02,
1.942872E+02,1.904774E+02,1.867422E+02,1.830803E+02,1.794902E+02,1.759705E+02,1.725199E+02,
1.691368E+02,1.658202E+02,1.625685E+02,1.593807E+02,1.562553E+02,1.531912E+02,1.501873E+02,
1.472422E+02,1.443548E+02,1.415241E+02,1.387489E+02,1.360282E+02,1.333607E+02,1.307456E+02,
1.281818E+02,1.256682E+02,1.232039E+02,1.207880E+02,1.184194E+02,1.160973E+02,1.138207E+02,
1.115887E+02,1.094005E+02,1.072552E+02,1.051520E+02,1.030901E+02,1.010685E+02,9.908664E+01,
9.714361E+01,9.523868E+01,9.337111E+01,9.154016E+01,8.974511E+01,8.798527E+01,8.625993E+01,
8.456842E+01,8.291009E+01,8.128427E+01,7.969034E+01,7.812766E+01,7.659562E+01,7.509363E+01,
7.362109E+01,7.217742E+01,7.076207E+01,6.937447E+01,6.801408E+01,6.668036E+01,6.537280E+01,
6.409088E+01,6.283410E+01,6.160196E+01,6.039398E+01,5.920969E+01,5.804863E+01,5.691033E+01,
5.579435E+01,5.470026E+01,5.362762E+01,5.257601E+01,5.154503E+01,5.053426E+01,4.954332E+01,
4.857180E+01,4.761934E+01,4.668555E+01,4.577008E+01,4.487256E+01,4.399263E+01,4.312996E+01,
4.228421E+01,4.145504E+01,4.064214E+01,3.984517E+01,3.906383E+01,3.829781E+01,3.754681E+01,
3.681054E+01,3.608871E+01,3.538103E+01,3.468723E+01,3.400704E+01,3.334018E+01,3.268640E+01,
3.204544E+01,3.141705E+01,3.080098E+01,3.019699E+01,2.960485E+01,2.902431E+01,2.845516E+01,
2.789718E+01,2.735013E+01,2.681381E+01,2.628801E+01,2.577252E+01,2.526713E+01,2.477166E+01,
2.428590E+01,2.380967E+01,2.334278E+01,2.288504E+01,2.243628E+01,2.199632E+01,2.156498E+01,
2.114211E+01,2.072752E+01,2.032107E+01,1.992258E+01,1.953191E+01,1.914891E+01,1.877341E+01,
1.840527E+01,1.804436E+01,1.769052E+01,1.734362E+01,1.700352E+01,1.667009E+01,1.634320E+01,
1.602272E+01,1.570852E+01,1.540049E+01,1.509850E+01,1.480242E+01,1.451216E+01,1.422758E+01,
1.394859E+01,1.367506E+01,1.340690E+01,1.314400E+01,1.288626E+01,1.263357E+01,1.238583E+01,
1.214295E+01,1.190484E+01,1.167139E+01,1.144252E+01,1.121814E+01,1.099816E+01,1.078249E+01,
1.057105E+01,1.036376E+01,1.016053E+01,9.961292E+00,9.765957E+00,9.574453E+00,9.386704E+00,
9.202636E+00,9.022178E+00,8.845259E+00,8.671808E+00,8.501760E+00,8.335045E+00,8.171600E+00,
8.011360E+00,7.854262E+00,7.700245E+00,7.549248E+00,7.401212E+00,7.256078E+00,7.113791E+00,
6.974294E+00,6.837532E+00,6.703452E+00,6.572002E+00,6.443129E+00,6.316783E+00,6.192915E+00,
6.071476E+00,5.952418E+00,5.835694E+00,5.721260E+00,5.609069E+00,5.499079E+00,5.391245E+00,
5.285526E+00,5.181880E+00,5.080267E+00,4.980646E+00,4.882979E+00,4.787226E+00,4.693352E+00,
4.601318E+00,4.511089E+00,4.422629E+00,4.335904E+00,4.250880E+00,4.167523E+00,4.085800E+00,
4.005680E+00,3.927131E+00,3.850122E+00,3.774624E+00,3.700606E+00,3.628039E+00,3.556896E+00,
3.487147E+00,3.418766E+00,3.351726E+00,3.286001E+00,3.221564E+00,3.158392E+00,3.096457E+00,
3.035738E+00,2.976209E+00,2.917847E+00,2.860630E+00,2.804535E+00,2.749540E+00,2.695623E+00,
2.642763E+00,2.590940E+00,2.540133E+00,2.490323E+00,2.441489E+00,2.393613E+00,2.346676E+00,
2.300659E+00,2.255544E+00,2.211315E+00,2.167952E+00,2.125440E+00,2.083761E+00,2.042900E+00,
2.002840E+00,1.963566E+00,1.925061E+00,1.887312E+00,1.850303E+00,1.814020E+00,1.778448E+00,
1.743573E+00,1.709383E+00,1.675863E+00,1.643000E+00,1.610782E+00,1.579196E+00,1.548229E+00,
1.517869E+00,1.488104E+00,1.458924E+00,1.430315E+00,1.402267E+00,1.374770E+00,1.347811E+00,
1.321382E+00,1.295470E+00,1.270067E+00,1.245162E+00,1.220745E+00,1.196807E+00,1.173338E+00,
1.150330E+00,1.127772E+00]]
try:
# internal model constants
ted_empty.num_simulation_days = 366
# internally specified variables
a1 = pd.Series([.621, .621, .648], dtype='float')
b1 = pd.Series([.564, .564, .651], dtype='float')
# internally specified variables from external database
body_wgt = pd.Series([15., 1000., 20.], dtype='float')
frac_h2o = pd.Series([0.8, 0.8, 0.8], dtype='float')
# input variables that change per simulation
ted_empty.frac_retained_mamm = pd.Series([0.1, 0.1, 0.05], dtype='float')
# internally calculated variables
intake_food_conc = pd.Series([[3.000000E+02,2.941172E+02,2.883497E+02,2.826954E+02,2.771519E+02,
2.717171E+02,2.663889E+02,5.611652E+02,5.501611E+02,5.393727E+02,
5.287960E+02,5.184266E+02,5.082606E+02,4.982939E+02,7.885227E+02,
7.730602E+02,7.579010E+02,7.430390E+02,7.284684E+02,7.141836E+02,
7.001789E+02,9.864488E+02,9.671052E+02,9.481408E+02,9.295484E+02,
9.113205E+02,8.934501E+02,8.759300E+02,1.158754E+03,1.136031E+03,
1.113754E+03,1.091914E+03,1.070502E+03,1.049511E+03,1.028930E+03,
1.308754E+03,1.283090E+03,1.257929E+03,1.233262E+03,1.209078E+03,
1.185369E+03,1.162125E+03,1.439336E+03,1.411112E+03,1.383441E+03,
1.356312E+03,1.329716E+03,1.303641E+03,1.278077E+03,1.253015E+03,
1.228444E+03,1.204355E+03,1.180738E+03,1.157585E+03,1.134885E+03,
1.112631E+03,1.090813E+03,1.069423E+03,1.048452E+03,1.027892E+03,
1.007736E+03,9.879750E+02,9.686014E+02,9.496077E+02,9.309865E+02,
9.127304E+02,8.948323E+02,8.772852E+02,8.600822E+02,8.432165E+02,
8.266816E+02,8.104708E+02,7.945780E+02,7.789968E+02,7.637211E+02,
7.487450E+02,7.340626E+02,7.196681E+02,7.055558E+02,6.917203E+02,
6.781561E+02,6.648579E+02,6.518204E+02,6.390386E+02,6.265075E+02,
6.142220E+02,6.021775E+02,5.903692E+02,5.787924E+02,5.674426E+02,
5.563154E+02,5.454064E+02,5.347113E+02,5.242260E+02,5.139462E+02,
5.038680E+02,4.939875E+02,4.843007E+02,4.748039E+02,4.654933E+02,
4.563652E+02,4.474162E+02,4.386426E+02,4.300411E+02,4.216083E+02,
4.133408E+02,4.052354E+02,3.972890E+02,3.894984E+02,3.818606E+02,
3.743725E+02,3.670313E+02,3.598340E+02,3.527779E+02,3.458602E+02,
3.390781E+02,3.324289E+02,3.259102E+02,3.195193E+02,3.132537E+02,
3.071110E+02,3.010888E+02,2.951846E+02,2.893962E+02,2.837213E+02,
2.781577E+02,2.727032E+02,2.673557E+02,2.621130E+02,2.569731E+02,
2.519340E+02,2.469938E+02,2.421504E+02,2.374019E+02,2.327466E+02,
2.281826E+02,2.237081E+02,2.193213E+02,2.150205E+02,2.108041E+02,
2.066704E+02,2.026177E+02,1.986445E+02,1.947492E+02,1.909303E+02,
1.871863E+02,1.835157E+02,1.799170E+02,1.763890E+02,1.729301E+02,
1.695390E+02,1.662145E+02,1.629551E+02,1.597597E+02,1.566269E+02,
1.535555E+02,1.505444E+02,1.475923E+02,1.446981E+02,1.418607E+02,
1.390789E+02,1.363516E+02,1.336778E+02,1.310565E+02,1.284866E+02,
1.259670E+02,1.234969E+02,1.210752E+02,1.187010E+02,1.163733E+02,
1.140913E+02,1.118540E+02,1.096607E+02,1.075103E+02,1.054021E+02,
1.033352E+02,1.013089E+02,9.932225E+01,9.737460E+01,9.546514E+01,
9.359313E+01,9.175783E+01,8.995851E+01,8.819448E+01,8.646504E+01,
8.476951E+01,8.310723E+01,8.147755E+01,7.987983E+01,7.831343E+01,
7.677775E+01,7.527219E+01,7.379615E+01,7.234905E+01,7.093033E+01,
6.953943E+01,6.817580E+01,6.683892E+01,6.552825E+01,6.424328E+01,
6.298351E+01,6.174844E+01,6.053759E+01,5.935048E+01,5.818666E+01,
5.704565E+01,5.592702E+01,5.483033E+01,5.375514E+01,5.270103E+01,
5.166760E+01,5.065443E+01,4.966112E+01,4.868730E+01,4.773257E+01,
4.679657E+01,4.587891E+01,4.497926E+01,4.409724E+01,4.323252E+01,
4.238476E+01,4.155362E+01,4.073878E+01,3.993991E+01,3.915672E+01,
3.838888E+01,3.763609E+01,3.689807E+01,3.617452E+01,3.546516E+01,
3.476971E+01,3.408790E+01,3.341946E+01,3.276412E+01,3.212164E+01,
3.149175E+01,3.087422E+01,3.026879E+01,2.967524E+01,2.909333E+01,
2.852283E+01,2.796351E+01,2.741516E+01,2.687757E+01,2.635052E+01,
2.583380E+01,2.532721E+01,2.483056E+01,2.434365E+01,2.386629E+01,
2.339828E+01,2.293946E+01,2.248963E+01,2.204862E+01,2.161626E+01,
2.119238E+01,2.077681E+01,2.036939E+01,1.996996E+01,1.957836E+01,
1.919444E+01,1.881805E+01,1.844904E+01,1.808726E+01,1.773258E+01,
1.738486E+01,1.704395E+01,1.670973E+01,1.638206E+01,1.606082E+01,
1.574588E+01,1.543711E+01,1.513440E+01,1.483762E+01,1.454666E+01,
1.426141E+01,1.398176E+01,1.370758E+01,1.343878E+01,1.317526E+01,
1.291690E+01,1.266361E+01,1.241528E+01,1.217183E+01,1.193314E+01,
1.169914E+01,1.146973E+01,1.124481E+01,1.102431E+01,1.080813E+01,
1.059619E+01,1.038840E+01,1.018469E+01,9.984978E+00,9.789179E+00,
9.597219E+00,9.409024E+00,9.224518E+00,9.043631E+00,8.866291E+00,
8.692429E+00,8.521975E+00,8.354865E+00,8.191031E+00,8.030410E+00,
7.872938E+00,7.718555E+00,7.567199E+00,7.418810E+00,7.273332E+00,
7.130706E+00,6.990878E+00,6.853791E+00,6.719392E+00,6.587629E+00,
6.458450E+00,6.331803E+00,6.207641E+00,6.085913E+00,5.966571E+00,
5.849571E+00,5.734864E+00,5.622407E+00,5.512155E+00,5.404065E+00,
5.298095E+00,5.194202E+00,5.092347E+00,4.992489E+00,4.894590E+00,
4.798610E+00,4.704512E+00,4.612259E+00,4.521816E+00,4.433146E+00,
4.346214E+00,4.260988E+00,4.177432E+00,4.095515E+00,4.015205E+00,
3.936469E+00,3.859277E+00,3.783599E+00,3.709405E+00,3.636666E+00,
3.565353E+00,3.495439E+00,3.426895E+00,3.359696E+00,3.293814E+00,
3.229225E+00,3.165902E+00,3.103820E+00,3.042956E+00,2.983286E+00,
2.924785E+00,2.867432E+00,2.811203E+00,2.756077E+00,2.702032E+00,
2.649047E+00,2.597101E+00,2.546174E+00,2.496245E+00,2.447295E+00,
2.399305E+00],
[3.000000E+02,2.941172E+02,2.883497E+02,2.826954E+02,2.771519E+02,
2.717171E+02,2.663889E+02,5.611652E+02,5.501611E+02,5.393727E+02,
5.287960E+02,5.184266E+02,5.082606E+02,4.982939E+02,7.885227E+02,
7.730602E+02,7.579010E+02,7.430390E+02,7.284684E+02,7.141836E+02,
7.001789E+02,9.864488E+02,9.671052E+02,9.481408E+02,9.295484E+02,
9.113205E+02,8.934501E+02,8.759300E+02,1.158754E+03,1.136031E+03,
1.113754E+03,1.091914E+03,1.070502E+03,1.049511E+03,1.028930E+03,
1.308754E+03,1.283090E+03,1.257929E+03,1.233262E+03,1.209078E+03,
1.185369E+03,1.162125E+03,1.439336E+03,1.411112E+03,1.383441E+03,
1.356312E+03,1.329716E+03,1.303641E+03,1.278077E+03,1.253015E+03,
1.228444E+03,1.204355E+03,1.180738E+03,1.157585E+03,1.134885E+03,
1.112631E+03,1.090813E+03,1.069423E+03,1.048452E+03,1.027892E+03,
1.007736E+03,9.879750E+02,9.686014E+02,9.496077E+02,9.309865E+02,
9.127304E+02,8.948323E+02,8.772852E+02,8.600822E+02,8.432165E+02,
8.266816E+02,8.104708E+02,7.945780E+02,7.789968E+02,7.637211E+02,
7.487450E+02,7.340626E+02,7.196681E+02,7.055558E+02,6.917203E+02,
6.781561E+02,6.648579E+02,6.518204E+02,6.390386E+02,6.265075E+02,
6.142220E+02,6.021775E+02,5.903692E+02,5.787924E+02,5.674426E+02,
5.563154E+02,5.454064E+02,5.347113E+02,5.242260E+02,5.139462E+02,
5.038680E+02,4.939875E+02,4.843007E+02,4.748039E+02,4.654933E+02,
4.563652E+02,4.474162E+02,4.386426E+02,4.300411E+02,4.216083E+02,
4.133408E+02,4.052354E+02,3.972890E+02,3.894984E+02,3.818606E+02,
3.743725E+02,3.670313E+02,3.598340E+02,3.527779E+02,3.458602E+02,
3.390781E+02,3.324289E+02,3.259102E+02,3.195193E+02,3.132537E+02,
3.071110E+02,3.010888E+02,2.951846E+02,2.893962E+02,2.837213E+02,
2.781577E+02,2.727032E+02,2.673557E+02,2.621130E+02,2.569731E+02,
2.519340E+02,2.469938E+02,2.421504E+02,2.374019E+02,2.327466E+02,
2.281826E+02,2.237081E+02,2.193213E+02,2.150205E+02,2.108041E+02,
2.066704E+02,2.026177E+02,1.986445E+02,1.947492E+02,1.909303E+02,
1.871863E+02,1.835157E+02,1.799170E+02,1.763890E+02,1.729301E+02,
1.695390E+02,1.662145E+02,1.629551E+02,1.597597E+02,1.566269E+02,
1.535555E+02,1.505444E+02,1.475923E+02,1.446981E+02,1.418607E+02,
1.390789E+02,1.363516E+02,1.336778E+02,1.310565E+02,1.284866E+02,
1.259670E+02,1.234969E+02,1.210752E+02,1.187010E+02,1.163733E+02,
1.140913E+02,1.118540E+02,1.096607E+02,1.075103E+02,1.054021E+02,
1.033352E+02,1.013089E+02,9.932225E+01,9.737460E+01,9.546514E+01,
9.359313E+01,9.175783E+01,8.995851E+01,8.819448E+01,8.646504E+01,
8.476951E+01,8.310723E+01,8.147755E+01,7.987983E+01,7.831343E+01,
7.677775E+01,7.527219E+01,7.379615E+01,7.234905E+01,7.093033E+01,
6.953943E+01,6.817580E+01,6.683892E+01,6.552825E+01,6.424328E+01,
6.298351E+01,6.174844E+01,6.053759E+01,5.935048E+01,5.818666E+01,
5.704565E+01,5.592702E+01,5.483033E+01,5.375514E+01,5.270103E+01,
5.166760E+01,5.065443E+01,4.966112E+01,4.868730E+01,4.773257E+01,
4.679657E+01,4.587891E+01,4.497926E+01,4.409724E+01,4.323252E+01,
4.238476E+01,4.155362E+01,4.073878E+01,3.993991E+01,3.915672E+01,
3.838888E+01,3.763609E+01,3.689807E+01,3.617452E+01,3.546516E+01,
3.476971E+01,3.408790E+01,3.341946E+01,3.276412E+01,3.212164E+01,
3.149175E+01,3.087422E+01,3.026879E+01,2.967524E+01,2.909333E+01,
2.852283E+01,2.796351E+01,2.741516E+01,2.687757E+01,2.635052E+01,
2.583380E+01,2.532721E+01,2.483056E+01,2.434365E+01,2.386629E+01,
2.339828E+01,2.293946E+01,2.248963E+01,2.204862E+01,2.161626E+01,
2.119238E+01,2.077681E+01,2.036939E+01,1.996996E+01,1.957836E+01,
1.919444E+01,1.881805E+01,1.844904E+01,1.808726E+01,1.773258E+01,
1.738486E+01,1.704395E+01,1.670973E+01,1.638206E+01,1.606082E+01,
1.574588E+01,1.543711E+01,1.513440E+01,1.483762E+01,1.454666E+01,
1.426141E+01,1.398176E+01,1.370758E+01,1.343878E+01,1.317526E+01,
1.291690E+01,1.266361E+01,1.241528E+01,1.217183E+01,1.193314E+01,
1.169914E+01,1.146973E+01,1.124481E+01,1.102431E+01,1.080813E+01,
1.059619E+01,1.038840E+01,1.018469E+01,9.984978E+00,9.789179E+00,
9.597219E+00,9.409024E+00,9.224518E+00,9.043631E+00,8.866291E+00,
8.692429E+00,8.521975E+00,8.354865E+00,8.191031E+00,8.030410E+00,
7.872938E+00,7.718555E+00,7.567199E+00,7.418810E+00,7.273332E+00,
7.130706E+00,6.990878E+00,6.853791E+00,6.719392E+00,6.587629E+00,
6.458450E+00,6.331803E+00,6.207641E+00,6.085913E+00,5.966571E+00,
5.849571E+00,5.734864E+00,5.622407E+00,5.512155E+00,5.404065E+00,
5.298095E+00,5.194202E+00,5.092347E+00,4.992489E+00,4.894590E+00,
4.798610E+00,4.704512E+00,4.612259E+00,4.521816E+00,4.433146E+00,
4.346214E+00,4.260988E+00,4.177432E+00,4.095515E+00,4.015205E+00,
3.936469E+00,3.859277E+00,3.783599E+00,3.709405E+00,3.636666E+00,
3.565353E+00,3.495439E+00,3.426895E+00,3.359696E+00,3.293814E+00,
3.229225E+00,3.165902E+00,3.103820E+00,3.042956E+00,2.983286E+00,
2.924785E+00,2.867432E+00,2.811203E+00,2.756077E+00,2.702032E+00,
2.649047E+00,2.597101E+00,2.546174E+00,2.496245E+00,2.447295E+00,
2.399305E+00],
[1.175000E+02,1.151959E+02,1.129370E+02,1.107224E+02,
1.085512E+02,1.064225E+02,1.043356E+02,2.197897E+02,2.154797E+02,
2.112543E+02,2.071118E+02,2.030504E+02,1.990687E+02,1.951651E+02,
3.088380E+02,3.027819E+02,2.968445E+02,2.910236E+02,2.853168E+02,
2.797219E+02,2.742367E+02,3.863591E+02,3.787829E+02,3.713552E+02,
3.640731E+02,3.569339E+02,3.499346E+02,3.430726E+02,4.538452E+02,
4.449455E+02,4.362204E+02,4.276664E+02,4.192801E+02,4.110583E+02,
4.029977E+02,5.125952E+02,5.025435E+02,4.926889E+02,4.830276E+02,
4.735557E+02,4.642696E+02,4.551655E+02,5.637400E+02,5.526854E+02,
5.418476E+02,5.312223E+02,5.208053E+02,5.105927E+02,5.005803E+02,
4.907642E+02,4.811406E+02,4.717057E+02,4.624559E+02,4.533874E+02,
4.444967E+02,4.357804E+02,4.272350E+02,4.188572E+02,4.106437E+02,
4.025912E+02,3.946966E+02,3.869569E+02,3.793689E+02,3.719297E+02,
3.646364E+02,3.574861E+02,3.504760E+02,3.436034E+02,3.368655E+02,
3.302598E+02,3.237836E+02,3.174344E+02,3.112097E+02,3.051071E+02,
2.991241E+02,2.932585E+02,2.875079E+02,2.818700E+02,2.763427E+02,
2.709238E+02,2.656111E+02,2.604027E+02,2.552963E+02,2.502901E+02,
2.453821E+02,2.405703E+02,2.358529E+02,2.312279E+02,2.266937E+02,
2.222484E+02,2.178902E+02,2.136175E+02,2.094286E+02,2.053218E+02,
2.012956E+02,1.973483E+02,1.934784E+02,1.896844E+02,1.859648E+02,
1.823182E+02,1.787430E+02,1.752380E+02,1.718017E+02,1.684328E+02,
1.651299E+02,1.618918E+02,1.587172E+02,1.556049E+02,1.525535E+02,
1.495621E+02,1.466292E+02,1.437539E+02,1.409350E+02,1.381714E+02,
1.354619E+02,1.328056E+02,1.302013E+02,1.276482E+02,1.251451E+02,
1.226910E+02,1.202851E+02,1.179264E+02,1.156140E+02,1.133468E+02,
1.111242E+02,1.089451E+02,1.068088E+02,1.047143E+02,1.026609E+02,
1.006478E+02,9.867416E+01,9.673922E+01,9.484222E+01,9.298242E+01,
9.115910E+01,8.937152E+01,8.761900E+01,8.590085E+01,8.421638E+01,
8.256495E+01,8.094590E+01,7.935860E+01,7.780243E+01,7.627677E+01,
7.478103E+01,7.331462E+01,7.187696E+01,7.046750E+01,6.908568E+01,
6.773095E+01,6.640279E+01,6.510067E+01,6.382408E+01,6.257253E+01,
6.134552E+01,6.014257E+01,5.896321E+01,5.780698E+01,5.667342E+01,
5.556209E+01,5.447255E+01,5.340438E+01,5.235715E+01,5.133046E+01,
5.032390E+01,4.933708E+01,4.836961E+01,4.742111E+01,4.649121E+01,
4.557955E+01,4.468576E+01,4.380950E+01,4.295042E+01,4.210819E+01,
4.128248E+01,4.047295E+01,3.967930E+01,3.890121E+01,3.813839E+01,
3.739051E+01,3.665731E+01,3.593848E+01,3.523375E+01,3.454284E+01,
3.386547E+01,3.320139E+01,3.255033E+01,3.191204E+01,3.128627E+01,
3.067276E+01,3.007129E+01,2.948161E+01,2.890349E+01,2.833671E+01,
2.778105E+01,2.723628E+01,2.670219E+01,2.617858E+01,2.566523E+01,
2.516195E+01,2.466854E+01,2.418480E+01,2.371056E+01,2.324561E+01,
2.278977E+01,2.234288E+01,2.190475E+01,2.147521E+01,2.105410E+01,
2.064124E+01,2.023648E+01,1.983965E+01,1.945061E+01,1.906919E+01,
1.869526E+01,1.832865E+01,1.796924E+01,1.761688E+01,1.727142E+01,
1.693274E+01,1.660070E+01,1.627517E+01,1.595602E+01,1.564313E+01,
1.533638E+01,1.503564E+01,1.474080E+01,1.445175E+01,1.416836E+01,
1.389052E+01,1.361814E+01,1.335109E+01,1.308929E+01,1.283261E+01,
1.258098E+01,1.233427E+01,1.209240E+01,1.185528E+01,1.162280E+01,
1.139489E+01,1.117144E+01,1.095238E+01,1.073761E+01,1.052705E+01,
1.032062E+01,1.011824E+01,9.919825E+00,9.725304E+00,9.534596E+00,
9.347629E+00,9.164327E+00,8.984620E+00,8.808438E+00,8.635709E+00,
8.466368E+00,8.300348E+00,8.137583E+00,7.978010E+00,7.821566E+00,
7.668190E+00,7.517822E+00,7.370402E+00,7.225873E+00,7.084178E+00,
6.945261E+00,6.809069E+00,6.675547E+00,6.544644E+00,6.416307E+00,
6.290488E+00,6.167135E+00,6.046201E+00,5.927639E+00,5.811402E+00,
5.697443E+00,5.585720E+00,5.476188E+00,5.368803E+00,5.263524E+00,
5.160309E+00,5.059119E+00,4.959913E+00,4.862652E+00,4.767298E+00,
4.673814E+00,4.582164E+00,4.492310E+00,4.404219E+00,4.317855E+00,
4.233184E+00,4.150174E+00,4.068792E+00,3.989005E+00,3.910783E+00,
3.834095E+00,3.758911E+00,3.685201E+00,3.612936E+00,3.542089E+00,
3.472631E+00,3.404535E+00,3.337774E+00,3.272322E+00,3.208154E+00,
3.145244E+00,3.083567E+00,3.023101E+00,2.963819E+00,2.905701E+00,
2.848722E+00,2.792860E+00,2.738094E+00,2.684401E+00,2.631762E+00,
2.580155E+00,2.529559E+00,2.479956E+00,2.431326E+00,2.383649E+00,
2.336907E+00,2.291082E+00,2.246155E+00,2.202109E+00,2.158927E+00,
2.116592E+00,2.075087E+00,2.034396E+00,1.994503E+00,1.955392E+00,
1.917048E+00,1.879455E+00,1.842600E+00,1.806468E+00,1.771044E+00,
1.736315E+00,1.702267E+00,1.668887E+00,1.636161E+00,1.604077E+00,
1.572622E+00,1.541784E+00,1.511550E+00,1.481910E+00,1.452850E+00,
1.424361E+00,1.396430E+00,1.369047E+00,1.342201E+00,1.315881E+00,
1.290077E+00,1.264780E+00,1.239978E+00,1.215663E+00,1.191825E+00,
1.168454E+00,1.145541E+00,1.123078E+00,1.101055E+00,1.079464E+00,
1.058296E+00,1.037544E+00,1.017198E+00,9.972513E-01,9.776958E-01,
9.585238E-01,9.397277E-01]], dtype='float')
for i in range(3):
result[i] = ted_empty.daily_animal_dose_timeseries(a1[i], b1[i], body_wgt[i], frac_h2o[i], intake_food_conc[i], ted_empty.frac_retained_mamm[i])
npt.assert_allclose(result[i],expected_results[i],rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
for i in range(3):
tab = [result[i], expected_results[i]]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_daily_canopy_air_timeseries(self):
"""
:description generates annual timeseries of daily pesticide concentrations in soil pore water and surface puddles
:param i; simulation number/index
:param application rate; active ingredient application rate (lbs a.i./acre)
:param food_multiplier; factor by which application rate of active ingredient is multiplied to estimate dietary based EECs
:param daily_flag; daily flag denoting if pesticide is applied (0 - not applied, 1 - applied)
:param water_type; type of water (pore water or surface puddles)
:Notes # calculations are performed daily from day of first application (assumed day 0) through the last day of a year
# note: day numbers are synchronized with 0-based array indexing; thus the year does not have a calendar specific
# assoication, rather it is one year from the day of 1st pesticide application
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
ted_empty = self.create_ted_object()
expected_results = pd.Series([], dtype='float')
result = pd.Series([], dtype='float')
expected_results = [[2.697542E-06,2.575726E-06,2.459410E-06,5.045889E-06,4.818025E-06,4.600451E-06,
7.090244E-06,6.770060E-06,6.464335E-06,6.172416E-06,5.893680E-06,5.627531E-06,
5.373400E-06,5.130746E-06,4.899050E-06,4.677817E-06,4.466574E-06,4.264871E-06,
4.072276E-06,3.888378E-06,3.712786E-06,3.545122E-06,3.385030E-06,3.232168E-06,
3.086208E-06,2.946840E-06,2.813765E-06,2.686700E-06,2.565373E-06,2.449525E-06,
2.338908E-06,2.233287E-06,2.132435E-06,2.036138E-06,1.944189E-06,1.856393E-06,
1.772561E-06,1.692515E-06,1.616084E-06,1.543104E-06,1.473420E-06,1.406883E-06,
1.343350E-06,1.282687E-06,1.224762E-06,1.169454E-06,1.116643E-06,1.066218E-06,
1.018069E-06,9.720946E-07,9.281964E-07,8.862805E-07,8.462575E-07,8.080419E-07,
7.715520E-07,7.367100E-07,7.034413E-07,6.716750E-07,6.413433E-07,6.123812E-07,
5.847271E-07,5.583217E-07,5.331088E-07,5.090345E-07,4.860473E-07,4.640982E-07,
4.431403E-07,4.231288E-07,4.040209E-07,3.857760E-07,3.683550E-07,3.517207E-07,
3.358375E-07,3.206716E-07,3.061906E-07,2.923635E-07,2.791609E-07,2.665544E-07,
2.545172E-07,2.430237E-07,2.320491E-07,2.215701E-07,2.115644E-07,2.020105E-07,
1.928880E-07,1.841775E-07,1.758603E-07,1.679188E-07,1.603358E-07,1.530953E-07,
1.461818E-07,1.395804E-07,1.332772E-07,1.272586E-07,1.215118E-07,1.160245E-07,
1.107851E-07,1.057822E-07,1.010052E-07,9.644400E-08,9.208875E-08,8.793017E-08,
8.395938E-08,8.016791E-08,7.654765E-08,7.309089E-08,6.979022E-08,6.663860E-08,
6.362931E-08,6.075591E-08,5.801227E-08,5.539253E-08,5.289110E-08,5.050262E-08,
4.822200E-08,4.604437E-08,4.396508E-08,4.197969E-08,4.008395E-08,3.827383E-08,
3.654544E-08,3.489511E-08,3.331930E-08,3.181466E-08,3.037796E-08,2.900614E-08,
2.769627E-08,2.644555E-08,2.525131E-08,2.411100E-08,2.302219E-08,2.198254E-08,
2.098985E-08,2.004198E-08,1.913691E-08,1.827272E-08,1.744755E-08,1.665965E-08,
1.590733E-08,1.518898E-08,1.450307E-08,1.384813E-08,1.322277E-08,1.262565E-08,
1.205550E-08,1.151109E-08,1.099127E-08,1.049492E-08,1.002099E-08,9.568457E-09,
9.136361E-09,8.723777E-09,8.329826E-09,7.953664E-09,7.594489E-09,7.251534E-09,
6.924067E-09,6.611387E-09,6.312827E-09,6.027750E-09,5.755547E-09,5.495635E-09,
5.247461E-09,5.010494E-09,4.784228E-09,4.568180E-09,4.361889E-09,4.164913E-09,
3.976832E-09,3.797245E-09,3.625767E-09,3.462033E-09,3.305693E-09,3.156414E-09,
3.013875E-09,2.877773E-09,2.747818E-09,2.623731E-09,2.505247E-09,2.392114E-09,
2.284090E-09,2.180944E-09,2.082456E-09,1.988416E-09,1.898622E-09,1.812884E-09,
1.731017E-09,1.652847E-09,1.578207E-09,1.506938E-09,1.438887E-09,1.373909E-09,
1.311865E-09,1.252624E-09,1.196057E-09,1.142045E-09,1.090472E-09,1.041228E-09,
9.942080E-10,9.493112E-10,9.064418E-10,8.655083E-10,8.264234E-10,7.891034E-10,
7.534688E-10,7.194433E-10,6.869544E-10,6.559327E-10,6.263118E-10,5.980286E-10,
5.710225E-10,5.452361E-10,5.206141E-10,4.971040E-10,4.746556E-10,4.532209E-10,
4.327542E-10,4.132117E-10,3.945517E-10,3.767344E-10,3.597217E-10,3.434772E-10,
3.279663E-10,3.131559E-10,2.990143E-10,2.855113E-10,2.726180E-10,2.603070E-10,
2.485520E-10,2.373278E-10,2.266104E-10,2.163771E-10,2.066058E-10,1.972759E-10,
1.883672E-10,1.798608E-10,1.717386E-10,1.639832E-10,1.565779E-10,1.495071E-10,
1.427556E-10,1.363090E-10,1.301535E-10,1.242760E-10,1.186639E-10,1.133052E-10,
1.081885E-10,1.033029E-10,9.863793E-11,9.418360E-11,8.993042E-11,8.586930E-11,
8.199158E-11,7.828897E-11,7.475357E-11,7.137782E-11,6.815451E-11,6.507676E-11,
6.213800E-11,5.933195E-11,5.665261E-11,5.409427E-11,5.165146E-11,4.931896E-11,
4.709180E-11,4.496521E-11,4.293465E-11,4.099579E-11,3.914449E-11,3.737678E-11,
3.568891E-11,3.407726E-11,3.253838E-11,3.106900E-11,2.966597E-11,2.832631E-11,
2.704714E-11,2.582573E-11,2.465948E-11,2.354590E-11,2.248260E-11,2.146733E-11,
2.049790E-11,1.957224E-11,1.868839E-11,1.784445E-11,1.703863E-11,1.626919E-11,
1.553450E-11,1.483299E-11,1.416315E-11,1.352357E-11,1.291287E-11,1.232974E-11,
1.177295E-11,1.124130E-11,1.073366E-11,1.024895E-11,9.786122E-12,9.344196E-12,
8.922227E-12,8.519314E-12,8.134595E-12,7.767250E-12,7.416493E-12,7.081576E-12,
6.761784E-12,6.456433E-12,6.164870E-12,5.886475E-12,5.620651E-12,5.366831E-12,
5.124474E-12,4.893061E-12,4.672098E-12,4.461114E-12,4.259657E-12,4.067298E-12,
3.883625E-12,3.708247E-12,3.540788E-12,3.380892E-12,3.228216E-12,3.082435E-12,
2.943237E-12,2.810325E-12,2.683416E-12,2.562237E-12,2.446530E-12,2.336049E-12,
2.230557E-12,2.129828E-12,2.033649E-12,1.941812E-12,1.854123E-12,1.770394E-12,
1.690446E-12,1.614108E-12,1.541218E-12,1.471619E-12,1.405163E-12,1.341708E-12,
1.281118E-12,1.223265E-12,1.168025E-12,1.115278E-12,1.064914E-12,1.016824E-12,
9.709062E-13,9.270617E-13,8.851971E-13,8.452230E-13,8.070541E-13,7.706088E-13,
7.358093E-13,7.025814E-13,6.708539E-13,6.405592E-13,6.116326E-13,5.840123E-13,
5.576392E-13,5.324571E-13,5.084122E-13,4.854531E-13,4.635308E-13,4.425985E-13],
[1.747062E-05,1.699289E-05,1.652822E-05,1.607625E-05,1.563665E-05,1.520906E-05,
1.479317E-05,3.185927E-05,3.098808E-05,3.014071E-05,2.931651E-05,2.851485E-05,
2.773511E-05,2.697669E-05,4.370963E-05,4.251439E-05,4.135183E-05,4.022106E-05,
3.912122E-05,3.805144E-05,3.701093E-05,5.346948E-05,5.200736E-05,5.058521E-05,
4.920196E-05,4.785653E-05,4.654789E-05,4.527503E-05,6.150761E-05,5.982568E-05,
5.818974E-05,5.659854E-05,5.505085E-05,5.354548E-05,5.208128E-05,5.065711E-05,
4.927189E-05,4.792455E-05,4.661405E-05,4.533939E-05,4.409958E-05,4.289367E-05,
4.172074E-05,4.057989E-05,3.947023E-05,3.839091E-05,3.734111E-05,3.632002E-05,
3.532684E-05,3.436083E-05,3.342123E-05,3.250733E-05,3.161841E-05,3.075380E-05,
2.991284E-05,2.909487E-05,2.829927E-05,2.752543E-05,2.677274E-05,2.604064E-05,
2.532856E-05,2.463595E-05,2.396227E-05,2.330703E-05,2.266969E-05,2.204979E-05,
2.144684E-05,2.086037E-05,2.028994E-05,1.973511E-05,1.919546E-05,1.867056E-05,
1.816001E-05,1.766342E-05,1.718041E-05,1.671062E-05,1.625366E-05,1.580921E-05,
1.537690E-05,1.495642E-05,1.454744E-05,1.414964E-05,1.376271E-05,1.338637E-05,
1.302032E-05,1.266428E-05,1.231797E-05,1.198114E-05,1.165351E-05,1.133485E-05,
1.102489E-05,1.072342E-05,1.043019E-05,1.014497E-05,9.867557E-06,9.597728E-06,
9.335278E-06,9.080004E-06,8.831711E-06,8.590207E-06,8.355308E-06,8.126831E-06,
7.904603E-06,7.688451E-06,7.478210E-06,7.273718E-06,7.074818E-06,6.881356E-06,
6.693185E-06,6.510160E-06,6.332139E-06,6.158987E-06,5.990569E-06,5.826756E-06,
5.667423E-06,5.512447E-06,5.361709E-06,5.215093E-06,5.072486E-06,4.933779E-06,
4.798864E-06,4.667639E-06,4.540002E-06,4.415856E-06,4.295104E-06,4.177654E-06,
4.063416E-06,3.952301E-06,3.844226E-06,3.739105E-06,3.636859E-06,3.537409E-06,
3.440678E-06,3.346593E-06,3.255080E-06,3.166070E-06,3.079493E-06,2.995284E-06,
2.913378E-06,2.833712E-06,2.756224E-06,2.680855E-06,2.607546E-06,2.536243E-06,
2.466889E-06,2.399432E-06,2.333819E-06,2.270001E-06,2.207928E-06,2.147552E-06,
2.088827E-06,2.031708E-06,1.976151E-06,1.922113E-06,1.869552E-06,1.818429E-06,
1.768704E-06,1.720339E-06,1.673296E-06,1.627540E-06,1.583035E-06,1.539747E-06,
1.497642E-06,1.456689E-06,1.416856E-06,1.378112E-06,1.340427E-06,1.303773E-06,
1.268121E-06,1.233445E-06,1.199716E-06,1.166910E-06,1.135001E-06,1.103964E-06,
1.073776E-06,1.044413E-06,1.015854E-06,9.880754E-07,9.610564E-07,9.347762E-07,
9.092147E-07,8.843522E-07,8.601696E-07,8.366482E-07,8.137700E-07,7.915174E-07,
7.698733E-07,7.488211E-07,7.283445E-07,7.084279E-07,6.890559E-07,6.702136E-07,
6.518866E-07,6.340607E-07,6.167223E-07,5.998580E-07,5.834549E-07,5.675003E-07,
5.519819E-07,5.368880E-07,5.222067E-07,5.079270E-07,4.940377E-07,4.805282E-07,
4.673881E-07,4.546074E-07,4.421761E-07,4.300848E-07,4.183241E-07,4.068850E-07,
3.957587E-07,3.849367E-07,3.744105E-07,3.641723E-07,3.542140E-07,3.445280E-07,
3.351068E-07,3.259433E-07,3.170304E-07,3.083612E-07,2.999290E-07,2.917274E-07,
2.837501E-07,2.759910E-07,2.684440E-07,2.611034E-07,2.539635E-07,2.470188E-07,
2.402641E-07,2.336941E-07,2.273037E-07,2.210881E-07,2.150424E-07,2.091620E-07,
2.034425E-07,1.978794E-07,1.924683E-07,1.872053E-07,1.820861E-07,1.771070E-07,
1.722640E-07,1.675534E-07,1.629717E-07,1.585152E-07,1.541806E-07,1.499645E-07,
1.458637E-07,1.418751E-07,1.379955E-07,1.342220E-07,1.305517E-07,1.269817E-07,
1.235094E-07,1.201320E-07,1.168470E-07,1.136518E-07,1.105440E-07,1.075212E-07,
1.045810E-07,1.017212E-07,9.893968E-08,9.623416E-08,9.360264E-08,9.104307E-08,
8.855349E-08,8.613199E-08,8.377671E-08,8.148583E-08,7.925759E-08,7.709029E-08,
7.498225E-08,7.293186E-08,7.093753E-08,6.899774E-08,6.711100E-08,6.527584E-08,
6.349087E-08,6.175471E-08,6.006602E-08,5.842352E-08,5.682592E-08,5.527201E-08,
5.376060E-08,5.229051E-08,5.086062E-08,4.946984E-08,4.811708E-08,4.680132E-08,
4.552153E-08,4.427674E-08,4.306599E-08,4.188835E-08,4.074291E-08,3.962880E-08,
3.854515E-08,3.749113E-08,3.646593E-08,3.546877E-08,3.449887E-08,3.355550E-08,
3.263792E-08,3.174544E-08,3.087735E-08,3.003301E-08,2.921176E-08,2.841296E-08,
2.763601E-08,2.688030E-08,2.614526E-08,2.543031E-08,2.473492E-08,2.405854E-08,
2.340066E-08,2.276077E-08,2.213837E-08,2.153300E-08,2.094418E-08,2.037146E-08,
1.981440E-08,1.927257E-08,1.874556E-08,1.823296E-08,1.773438E-08,1.724944E-08,
1.677775E-08,1.631896E-08,1.587272E-08,1.543868E-08,1.501651E-08,1.460588E-08,
1.420648E-08,1.381800E-08,1.344015E-08,1.307263E-08,1.271516E-08,1.236746E-08,
1.202927E-08,1.170033E-08,1.138038E-08,1.106919E-08,1.076650E-08,1.047209E-08,
1.018573E-08,9.907199E-09,9.636286E-09,9.372782E-09,9.116482E-09,8.867192E-09,
8.624718E-09,8.388874E-09,8.159480E-09,7.936359E-09,7.719339E-09,7.508253E-09,
7.302939E-09,7.103240E-09,6.909002E-09,6.720075E-09,6.536314E-09,6.357578E-09,
6.183730E-09,6.014635E-09,5.850165E-09,5.690192E-09,5.534593E-09,5.383249E-09],
[1.133578E-07,1.111350E-07,1.089557E-07,1.068191E-07,1.047245E-07,1.026709E-07,
1.006576E-07,9.868374E-08,9.674861E-08,9.485143E-08,9.299145E-08,9.116795E-08,
8.938020E-08,8.762751E-08,8.590918E-08,8.422456E-08,8.257297E-08,8.095376E-08,
7.936631E-08,7.780998E-08,7.628418E-08,7.478829E-08,7.332174E-08,7.188394E-08,
7.047434E-08,6.909238E-08,6.773752E-08,6.640923E-08,6.510699E-08,6.383028E-08,
6.257861E-08,6.135148E-08,6.014841E-08,5.896894E-08,5.781259E-08,5.667892E-08,
5.556749E-08,5.447784E-08,5.340956E-08,5.236223E-08,5.133544E-08,5.032879E-08,
4.934187E-08,4.837431E-08,4.742571E-08,4.649573E-08,4.558397E-08,4.469010E-08,
4.381375E-08,4.295459E-08,4.211228E-08,4.128648E-08,4.047688E-08,3.968315E-08,
3.890499E-08,3.814209E-08,3.739414E-08,3.666087E-08,3.594197E-08,3.523717E-08,
3.454619E-08,3.386876E-08,3.320462E-08,3.255349E-08,3.191514E-08,3.128930E-08,
3.067574E-08,3.007421E-08,2.948447E-08,2.890630E-08,2.833946E-08,2.778374E-08,
2.723892E-08,2.670478E-08,2.618112E-08,2.566772E-08,2.516439E-08,2.467093E-08,
2.418715E-08,2.371286E-08,2.324786E-08,2.279199E-08,2.234505E-08,2.190688E-08,
2.147730E-08,2.105614E-08,2.064324E-08,2.023844E-08,1.984158E-08,1.945250E-08,
1.907104E-08,1.869707E-08,1.833043E-08,1.797099E-08,1.761859E-08,1.727310E-08,
1.693438E-08,1.660231E-08,1.627675E-08,1.595757E-08,1.564465E-08,1.533787E-08,
1.503710E-08,1.474223E-08,1.445315E-08,1.416973E-08,1.389187E-08,1.361946E-08,
1.335239E-08,1.309056E-08,1.283386E-08,1.258220E-08,1.233547E-08,1.209358E-08,
1.185643E-08,1.162393E-08,1.139599E-08,1.117252E-08,1.095344E-08,1.073865E-08,
1.052807E-08,1.032162E-08,1.011922E-08,9.920788E-09,9.726248E-09,9.535522E-09,
9.348536E-09,9.165217E-09,8.985493E-09,8.809293E-09,8.636548E-09,8.467190E-09,
8.301154E-09,8.138373E-09,7.978785E-09,7.822326E-09,7.668935E-09,7.518552E-09,
7.371117E-09,7.226574E-09,7.084866E-09,6.945936E-09,6.809730E-09,6.676195E-09,
6.545279E-09,6.416930E-09,6.291098E-09,6.167734E-09,6.046788E-09,5.928214E-09,
5.811966E-09,5.697997E-09,5.586262E-09,5.476719E-09,5.369324E-09,5.264035E-09,
5.160810E-09,5.059610E-09,4.960394E-09,4.863124E-09,4.767761E-09,4.674268E-09,
4.582609E-09,4.492746E-09,4.404646E-09,4.318274E-09,4.233595E-09,4.150577E-09,
4.069187E-09,3.989392E-09,3.911163E-09,3.834467E-09,3.759276E-09,3.685559E-09,
3.613287E-09,3.542433E-09,3.472968E-09,3.404865E-09,3.338098E-09,3.272640E-09,
3.208465E-09,3.145549E-09,3.083867E-09,3.023394E-09,2.964107E-09,2.905983E-09,
2.848998E-09,2.793131E-09,2.738360E-09,2.684662E-09,2.632017E-09,2.580405E-09,
2.529805E-09,2.480197E-09,2.431562E-09,2.383880E-09,2.337134E-09,2.291304E-09,
2.246373E-09,2.202323E-09,2.159137E-09,2.116798E-09,2.075288E-09,2.034593E-09,
1.994696E-09,1.955581E-09,1.917234E-09,1.879638E-09,1.842779E-09,1.806644E-09,
1.771216E-09,1.736484E-09,1.702433E-09,1.669049E-09,1.636320E-09,1.604233E-09,
1.572775E-09,1.541933E-09,1.511697E-09,1.482054E-09,1.452991E-09,1.424499E-09,
1.396566E-09,1.369180E-09,1.342331E-09,1.316009E-09,1.290203E-09,1.264903E-09,
1.240099E-09,1.215781E-09,1.191940E-09,1.168567E-09,1.145652E-09,1.123187E-09,
1.101162E-09,1.079568E-09,1.058399E-09,1.037644E-09,1.017297E-09,9.973481E-10,
9.777907E-10,9.586168E-10,9.398189E-10,9.213897E-10,9.033218E-10,8.856082E-10,
8.682420E-10,8.512163E-10,8.345244E-10,8.181599E-10,8.021163E-10,7.863873E-10,
7.709667E-10,7.558485E-10,7.410268E-10,7.264957E-10,7.122496E-10,6.982828E-10,
6.845899E-10,6.711655E-10,6.580044E-10,6.451013E-10,6.324513E-10,6.200493E-10,
6.078905E-10,5.959701E-10,5.842835E-10,5.728261E-10,5.615933E-10,5.505808E-10,
5.397842E-10,5.291994E-10,5.188221E-10,5.086483E-10,4.986741E-10,4.888954E-10,
4.793084E-10,4.699095E-10,4.606948E-10,4.516609E-10,4.428041E-10,4.341210E-10,
4.256081E-10,4.172622E-10,4.090800E-10,4.010581E-10,3.931936E-10,3.854834E-10,
3.779243E-10,3.705134E-10,3.632479E-10,3.561248E-10,3.491414E-10,3.422949E-10,
3.355828E-10,3.290022E-10,3.225506E-10,3.162256E-10,3.100246E-10,3.039452E-10,
2.979851E-10,2.921418E-10,2.864130E-10,2.807966E-10,2.752904E-10,2.698921E-10,
2.645997E-10,2.594111E-10,2.543242E-10,2.493370E-10,2.444477E-10,2.396542E-10,
2.349547E-10,2.303474E-10,2.258304E-10,2.214020E-10,2.170605E-10,2.128041E-10,
2.086311E-10,2.045400E-10,2.005291E-10,1.965968E-10,1.927417E-10,1.889621E-10,
1.852567E-10,1.816239E-10,1.780624E-10,1.745707E-10,1.711475E-10,1.677914E-10,
1.645011E-10,1.612753E-10,1.581128E-10,1.550123E-10,1.519726E-10,1.489925E-10,
1.460709E-10,1.432065E-10,1.403983E-10,1.376452E-10,1.349461E-10,1.322999E-10,
1.297055E-10,1.271621E-10,1.246685E-10,1.222238E-10,1.198271E-10,1.174774E-10,
1.151737E-10,1.129152E-10,1.107010E-10,1.085302E-10,1.064020E-10,1.043156E-10,
1.022700E-10,1.002645E-10,9.829841E-11,9.637084E-11,9.448107E-11,9.262835E-11,
9.081196E-11,8.903120E-11,8.728535E-11,8.557374E-11,8.389569E-11,8.225054E-11]]
try:
# internal model constants
ted_empty.num_simulation_days = 366
ted_empty.hectare_to_acre = 2.47105
ted_empty.gms_to_mg = 1000.
ted_empty.lbs_to_gms = 453.592
ted_empty.crop_hgt = 1. # m
ted_empty.hectare_area = 10000. # m2
ted_empty.m3_to_liters = 1000.
ted_empty.mass_plant = 25000. # kg/hectare
ted_empty.density_plant = 0.77 # kg/L
# internally calculated variable (hlc in atm-m3/mol are 2.0e-7, 1.0e-5, 3.5e-6)
ted_empty.log_unitless_hlc = pd.Series([-5.087265, -3.388295, -3.844227], dtype='float')
# input variables that change per simulation
ted_empty.log_kow = pd.Series([2.75, 4., 6.], dtype='float')
ted_empty.foliar_diss_hlife = | pd.Series([15., 25., 35.]) | pandas.Series |
import numpy as np
from scipy.stats import norm, lognorm
import pandas as pd
class prospect_confidence(object):
"""
:param verbose: If “verbose” is True, prints information for debugging.
If verbose = False your code does not generate ANY output.
"""
# constructor
def __init__(self, verbose = False):
"""
Constructor method
"""
self.verbose = verbose
def calculate_cumulative_conf(self,
areaP90: float=1.,
areaP10: float=10.,
pdP90: float=10.,
pdP10: float=24):
"""Calculate cumulative confidence level for expected development size in MW
Args:
areaP90 (float): pessimistic area in sqkm
areaP10 (float): optimistic area in sqkm
pdP90 (float): pessimistic power density in MWe/sqkm
pdP10 (float): optimistic power density in MWe/sqkm
Returns:
prob_df (pandas Dataframe): cumulative confidence curve in Reservoir Size
"""
assert isinstance(areaP90, float), "areaP90 variable data type expected to be float"
assert isinstance(areaP10, float), "areaP10 variable data type expected to be float"
assert isinstance(pdP90, float), "pdP90 variable data type expected to be float"
assert isinstance(pdP10, float), "pdP10 variable data type expected to be float"
if self.verbose:
print("areaP90: " , areaP90 )
print("areaP10: " , areaP10 )
print("pdP90: " , pdP90 )
print("pdP10: " , pdP10 )
# calculate area > 250 °C
area_mu = ((np.log(areaP90)+np.log(areaP10))/2)
area_sigma = (np.log(areaP10)-np.log(areaP90))/((norm.ppf(0.9)-(norm.ppf(0.1))))
# calculate powerdensity mean and standard dev
powerdens_mu = ((np.log(pdP90)+np.log(pdP10))/2)
powerdens_sigma = (np.log(pdP10)-np.log(pdP90))/((norm.ppf(0.9)-(norm.ppf(0.1))))
capacity_mu = area_mu + powerdens_mu
capacity_sigma = ((area_sigma**2)+(powerdens_sigma**2))**0.5
eds = [lognorm.ppf(x/100, capacity_sigma, loc=0, scale=np.exp(capacity_mu)) for x in range(0,100)]
indx = list(np.arange(0,100)[::-1])
edsepc_tups = list(zip(indx,eds))
prob_df = | pd.DataFrame(edsepc_tups, columns = ['Cumulative confidence (%)', 'expected development size (MW)']) | pandas.DataFrame |
# Copyright 2018 <NAME> <EMAIL>
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pandas as pd
import numpy as np
import os
import warnings
import datetime
from .dataset import Dataset
from .dataframe_tools import *
from .exceptions import FailedReindexWarning, PublicationEmbargoWarning, ReindexMapError
class Pdac(Dataset):
def __init__(self, version="latest", no_internet=False):
"""Load all of the dataframes as values in the self._data dict variable, with names as keys, and format them properly.
Parameters:
version (str, optional): The version number to load, or the string "latest" to just load the latest building. Default is "latest".
no_internet (bool, optional): Whether to skip the index update step because it requires an internet connection. This will be skipped automatically if there is no internet at all, but you may want to manually skip it if you have a spotty internet connection. Default is False.
"""
# Set some needed variables, and pass them to the parent Dataset class __init__ function
# This keeps a record of all versions that the code is equipped to handle. That way, if there's a new data release but they didn't update their package, it won't try to parse the new data version it isn't equipped to handle.
valid_versions = ["1.0"]
data_files = {
"1.0": [
"clinical_table_140.tsv.gz",
"microRNA_TPM_log2_Normal.cct.gz",
"microRNA_TPM_log2_Tumor.cct.gz",
"meta_table_140.tsv.gz",
"mRNA_RSEM_UQ_log2_Normal.cct.gz",
"mRNA_RSEM_UQ_log2_Tumor.cct.gz",
"PDAC_mutation.maf.gz",
"phosphoproteomics_site_level_MD_abundance_normal.cct.gz",
"phosphoproteomics_site_level_MD_abundance_tumor.cct.gz",
"proteomics_gene_level_MD_abundance_normal.cct.gz",
"proteomics_gene_level_MD_abundance_tumor.cct.gz",
"RNA_fusion_unfiltered_normal.tsv.gz",
"RNA_fusion_unfiltered_tumor.tsv.gz",
"SCNA_log2_gene_level.cct.gz"],
}
# Call the parent class __init__ function
super().__init__(cancer_type="pdac", version=version, valid_versions=valid_versions, data_files=data_files, no_internet=no_internet)
# Load the data into dataframes in the self._data dict
loading_msg = f"Loading {self.get_cancer_type()} v{self.version()}"
for file_path in self._data_files_paths: # Loops through files variable
# Print a loading message. We add a dot every time, so the user knows it's not frozen.
loading_msg = loading_msg + "."
print(loading_msg, end='\r')
path_elements = file_path.split(os.sep) # Get a list of the levels of the path
file_name = path_elements[-1] # The last element will be the name of the file. We'll use this to identify files for parsing in the if/elif statements below
mark_normal = lambda s: s + ".N"
remove_type_tag = lambda s: s[:-2] # remove _T and similar tags from end of string
if file_name == "clinical_table_140.tsv.gz": # Note that we use the "file_name" variable to identify files. That way we don't have to use the whole path.
df = pd.read_csv(file_path, sep='\t', index_col=0)
df = df.rename_axis("Patient_ID", axis="index")
df = df.sort_index()
df.columns.name = "Name"
df["Sample_Tumor_Normal"] = "Tumor"
self._data["clinical"] = df
elif file_name == "meta_table_140.tsv.gz":
df = pd.read_csv(file_path, sep='\t', index_col=0)
df = df.sort_index()
df.index.name = "Patient_ID"
df.columns.name = "Name"
self._data["derived_molecular"] = df
elif file_name == "microRNA_TPM_log2_Normal.cct.gz":
df_normal = pd.read_csv(file_path, sep='\t', index_col=0)
df_normal = df_normal.sort_index()
df_normal = df_normal.transpose()
df_normal = df_normal.rename(index=mark_normal)
# merge tumor and normal if tumor data has already been read
if "miRNA" in self._data:
df_tumor = self._data["miRNA"]
df_combined = pd.concat([df_normal, df_tumor])
df_combined.index.name = "Patient_ID"
df_combined.columns.name = "Name"
self._data["miRNA"] = df_combined
else:
self._data["miRNA"] = df_normal
elif file_name == "microRNA_TPM_log2_Tumor.cct.gz":
df_tumor = pd.read_csv(file_path, sep='\t', index_col=0)
df_tumor = df_tumor.sort_index()
df_tumor = df_tumor.transpose()
# merge tumor and normal if normal data has already been read
if "miRNA" in self._data:
df_normal = self._data["miRNA"]
df_combined = pd.concat([df_normal, df_tumor])
df_combined.index.name = "Patient_ID"
df_combined.columns.name = "Name"
self._data["miRNA"] = df_combined
else:
self._data["miRNA"] = df_tumor
elif file_name == "mRNA_RSEM_UQ_log2_Normal.cct.gz":
# create df for normal data
df_normal = pd.read_csv(file_path, sep='\t', index_col=0)
df_normal = df_normal.sort_index()
df_normal = df_normal.transpose()
df_normal = df_normal.rename(index=mark_normal)
# merge tumor and normal if tumor data has already been read
if "transcriptomics" in self._data:
df_tumor = self._data["transcriptomics"]
df_combined = | pd.concat([df_normal, df_tumor]) | pandas.concat |
import pandas as pd
import numpy as np
df = pd.read_csv("https://github.com/chris1610/pbpython/blob/master/data/sales_data_types.csv?raw=True",
dtype={'Customer Number':'int'},
converters={'2016': lambda x: float(x.replace('$','').replace(',','')),
'2017': lambda x: float(x.replace('$','').replace(',','')),
'Percent Growth': lambda x: float(x.replace('%', '')) / 100,
'Jan Units': lambda x: pd.to_numeric(x , errors='coerce'),
'Active': lambda x: np.where( x == 'Y', True, False)
})
df['Start_Date'] = | pd.to_datetime(df[['Month','Day','Year']]) | pandas.to_datetime |
# %load ../../src/models/model_utils.py
# %%writefile ../../src/models/model_utils.py
"""
Author: <NAME>
Created in the scope of my PhD
"""
import pandas as pd
import numpy as np
import sklearn as sk
import math
import itertools
from scipy import stats
from sklearn.model_selection import KFold
from sklearn.model_selection import GridSearchCV
from sklearn.linear_model import LinearRegression, Ridge, Lasso, HuberRegressor
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.kernel_ridge import KernelRidge
from sklearn.svm import SVC, SVR
from sklearn.preprocessing import PolynomialFeatures
def CreateRankedLabels(a):
pw = list(itertools.combinations(a,2))
labels = [1 if item[0]>item[1] else -1 for item in pw]
return labels
def GetParameterSet(parLabel, parRange):
"""Retrieve a set of parameter values used for training of a model in sklearn.
Parameters
-----------
parLabel : 1-dimensional numpy array (str)
numpy array holding a set of parameter labels. Valid labels include:
[alpha, gamma, C, coef0, epsilon, max_depth, min_samples, max_features]
parRange : 1-dimensional numpy array (int)
numpy array with the amount of parameters returned for every parameter label.
parLabel and parRange must be of the same dimension.
Returns
--------
parSet : Dictionary
Dictionary containing a set of parameters for every label
"""
if parLabel[0] in ['max_depth','min_samples_split', 'max_features']:
parameters = [np.zeros(parRange[u],dtype=np.int) for u in range(len(parRange))]
else:
parameters = [np.zeros(parRange[u]) for u in range(len(parRange))]
for i in range(len(parLabel)):
if parLabel[i] == "alpha":
parameters[i][:] = [math.pow(10,(u - np.around(parRange[i]/2))) for u in range(parRange[i])]
elif parLabel[i] == "gamma":
parameters[i][:] = [math.pow(10,(u - np.around(parRange[i]/2))) for u in range(parRange[i])]
elif parLabel[i] == "C":
parameters[i][:] = [math.pow(10,(u - np.around(parRange[i]/2))) for u in range(parRange[i])]
elif parLabel[i] == "coef0":
parameters[i][:] = [math.pow(10,(u - np.around(parRange[i]/2))) for u in range(parRange[i])]
elif parLabel[i] == "epsilon":
parameters[i][:] = [0+2/parRange[i]*u for u in range(parRange[i])]
elif parLabel[i] == "max_depth":
parameters[i][:] = [int(u+1) for u in range(parRange[i])]
elif parLabel[i] == 'min_samples_split':
parameters[i][:] = [int(u+2) for u in range(parRange[i])]
elif parLabel[i] == 'max_features':
parameters[i][:] = [int(u+2) for u in range(parRange[i])]
else:
return print("Not a valid parameter")
parSet = {parLabel[u]:parameters[u] for u in range(len(parLabel))}
return parSet
def EvaluateParameterSet(X_train, X_test, y_train, y_test, parModel, parSet):
"""Evaluate the scores of a set of parameters for a given model.
Parameters
-----------
X_train:
Training dataset features
X_test:
Test dataset features
y_train
Training dataset labels
y_test
Test dataset labels
parModel: Dictionary
parSet : Dictionary
Dictionary holding the parameter label and values over which the model has to be
evaluated. This can be created through the function GetParameterSet.
Accepted keys are:
[alpha, gamma, C, coef0, epsilon, max_depth, min_samples, max_features]
Returns
--------
scores: 1-dimensional numpy array: int
Fitted scores of the model with each of the parametersSets
optimalPar: int
Optimal parameter value for a given parameter label
"""
scores = []
for i in range(len(parSet[parLabel])):
parSetIt = {parLabel:parSet[parLabel][i]}
model = SelectModel(**parModel,**parEvalIt)
model.fit(X_train,y_train)
scores = np.append(model.score(X_test,y_test))
optimalPar = parSet[parLabel][np.argmax(scores)]
return scores, optimalPar
def EvaluateScore(X_train, X_test, y_train, y_test, parModel, scoring='default', pw=False):
"""Evaluates the score of a model given for a given test and training data
Parameters
-----------
X_train, X_test: DataFrame
Test and training data of the features
y_train, y_test: 1-dimensional numpy array
Test and training data of the labels
parModel: dictionary
Parameters indicating the model and some of its features
Returns
--------
score: int
Score of the test data on the model
y_pred: 1-dimensional array
An array giving the predicted labels for a given test set
"""
model = SelectModel(**parModel)
model.fit(X_train,y_train)
y_pred = model.predict(X_test)
if scoring == 'default':
score = model.score(X_test,y_test)
elif scoring == 'kt':
if pw is True:
score = KendallTau(y_pred, y_test)
if pw is False:
y_pred_pw = CreateRankedLabels(y_pred)
y_test_pw = CreateRankedLabels(y_test)
score = KendallTau(y_pred_pw, y_test_pw)
elif scoring == 'spearman':
score = stats.spearmanr(y_test, y_pred)[0]
else:
raise("Scoring type not defined. Possible options are: 'default', 'kt', and 'spearman'")
return score, y_pred
def KendallTau(y_pred, y_true):
a = np.array(y_pred)
b = np.array(y_true)
n = len(y_pred)
score = (np.sum(a==b)-np.sum(a!=b))/n
return score
def LearningCurveInSample(dfDataset, featureBox, y ,parModel, scoring='default', k=5, pw=False, step=1):
"""Calculates the learning curve of a dataset for a given model
Parameters
-----------
dfDataset: Dataframe
Dataframe holding sequences,
featureBox: Dataframe
Test dataset features
y: 1-dimensional numpy array
parModel: Dictionary
k: int
pw: Boolean
step: int
Returns
--------
scores: 1-dimensional numpy array: int
Fitted scores of the model with each of the parametersSets
optimalPar: int
Optimal parameter value for a given parameter label
"""
X = featureBox.values
if pw is True:
temp = np.unique(dfDataset[['ID_1', 'ID_2']].values)
dfId = pd.Series(temp[:-(len(temp)%k)])
else:
dfId = dfDataset['ID'][:-(len(dfDataset)%k)]
lenId = len(dfId)
Id = dfId.values
indexId = np.array(range(lenId))
scores = np.array([])
it=0
for i in range(k):
boolTest = np.logical_and(indexId>=i*lenId/k,indexId<(i+1)*lenId/k)
test = Id[boolTest]
train = Id[np.invert(boolTest)]
if pw is True:
indexTest = (dfDataset['ID_1'].isin(test) | dfDataset['ID_2'].isin(test)).values
else:
indexTest = dfDataset['ID'].isin(test).values
dfDatasetTrain = dfDataset[np.invert(indexTest)]
X_train, y_train = featureBox[np.invert(indexTest)], y[np.invert(indexTest)]
X_test, y_test = featureBox[indexTest], y[indexTest]
for j in range((len(train)-5)//step):
print("\rProgress {:2.1%}".format(it/k+(j/len(train)/k*step)), end='')
trainInner = train[:(j*step)+5]
if pw is True:
indexTrainInner = (dfDatasetTrain['ID_1'].isin(trainInner) & dfDatasetTrain['ID_2'].isin(trainInner)).values
else:
indexTrainInner = (dfDatasetTrain['ID'].isin(trainInner)).values
X_trainInner, y_trainInner = X_train[indexTrainInner], y_train[indexTrainInner]
score, y_pred = EvaluateScore(X_trainInner, X_test, y_trainInner, y_test, {**parModel}, scoring, pw)
scores = np.append(scores,score)
it+=1
scores = scores.reshape((k,-1))
return scores
def LearningCurveInSampleEnriched(dfDataset, featureBox, enrichBox, y, y_enrich ,parModel,
scoring='default', k=5, pw=True, step=1):
"""Calculates the learning curve of an enriched dataset for a given model
Parameters
-----------
dfDataset: Dataframe
Dataframe holding sequences,
featureBox: Dataframe
Test dataset features
y: 1-dimensional numpy array
parModel: Dictionary
k: int
pw: Boolean
step: int
Returns
--------
scores: 1-dimensional numpy array: int
Fitted scores of the model with each of the parametersSets
optimalPar: int
Optimal parameter value for a given parameter label
"""
if pw is True:
temp = np.unique(dfDataset[['ID_1', 'ID_2']].values)
dfId = pd.Series(temp[:-(len(temp)%k)])
else:
dfId = dfDataset['ID'][:-(len(dfDataset)%k)]
lenId = len(dfId)
Id = dfId.values
indexId = np.array(range(lenId))
scores = np.array([])
it=0
for i in range(k):
boolTest = np.logical_and(indexId>=i*lenId/k,indexId<(i+1)*lenId/k)
test = Id[boolTest]
train = Id[np.invert(boolTest)]
if pw is True:
indexTest = (dfDataset['ID_1'].isin(test) | dfDataset['ID_2'].isin(test)).values
else:
indexTest = dfDataset['ID'].isin(test).values
dfDatasetTrain = dfDataset[np.invert(indexTest)]
X_train = featureBox[np.invert(indexTest)]
y_train = y[np.invert(indexTest)]
X_test, y_test = featureBox[indexTest], y[indexTest]
for j in range((len(train))//step):
print("\rProgress {:2.1%}".format(it/k+(j/len(train)/k*step)), end='')
trainInner = train[:(j*step)]
if pw is True:
indexTrainInner = (dfDatasetTrain['ID_1'].isin(trainInner) & dfDatasetTrain['ID_2'].isin(trainInner)).values
else:
indexTrainInner = (dfDatasetTrain['ID'].isin(trainInner)).values
X_trainInner = np.vstack((enrichBox,X_train[indexTrainInner]))
y_trainInner = np.append(y_enrich, y_train[indexTrainInner])
score, y_pred = EvaluateScore(X_trainInner, X_test, y_trainInner, y_test, {**parModel}, scoring, pw)
scores = np.append(scores,score)
it+=1
scores = scores.reshape((k,-1))
return scores
def LearningCurveOutOfSample(dfDataset, featureBox, y , dataList, parModel, scoring='default', pw=False, step=1):
"""Calculates the learning curve of a dataset for a given model
Parameters
-----------
dfDataset: Dataframe
Dataframe holding sequences,
featureBox: Dataframe
Test dataset features
y: 1-dimensional numpy array
parModel: Dictionary
k: int
pw: Boolean
step: int
Returns
--------
scores: 1-dimensional numpy array: int
Fitted scores of the model with each of the parametersSets
optimalPar: int
Optimal parameter value for a given parameter label
"""
if pw is True:
temp = np.unique(dfDataset[['ID_1', 'ID_2']].values)
dfId = pd.Series(temp)
else:
dfId = dfDataset['ID']
lenId = len(dfId)
Id = dfId.values
indexId = np.array(range(lenId))
scores = np.zeros(shape=(len(dataList),(lenId-5)//step))
for i in range((lenId-5)//step):
print("\rProgress {:2.1%}".format(i/lenId*step), end='')
train = Id[:((i*step)+5)]
if pw is True:
indexTrain = (dfDataset['ID_1'].isin(train) & dfDataset['ID_2'].isin(train)).values
else:
indexTrain = dfDataset['ID'].isin(train).values
X_train, y_train = featureBox[indexTrain], y[indexTrain]
for j in range(len(dataList)):
score, y_pred = EvaluateScore(X_train, dataList[j][1].values, y_train, dataList[j][2],
{**parModel}, scoring, pw)
scores[j,i] = score
return scores
def LearningCurveOutOfSampleEnriched(dfDataset, featureBox, enrichBox, y, y_enrich, dataOutList,
parModel, scoring='default', pw=True, step=1):
if pw is True:
temp = np.unique(dfDataset[['ID_1', 'ID_2']].values)
dfId = | pd.Series(temp) | pandas.Series |
import os
import gc
import sys
print(sys.path)
import pickle
import warnings
import numpy as np
import pandas as pd
import datetime as dt
from diamond import helpers as helper
from diamond import utilities as util
from copy import deepcopy
from sklearn.preprocessing import StandardScaler
CONFIG = util.load_config()
class diamond(object):
"""
Class for handling relationships between normalized tables pulled from API
Standardizing adding starting pitchers, lineups (expected and/or actual)
Adding pitcher rolling stats
Adding batter rolling stats
"""
def __init__(self, seasonKey, min_date_gte=None, max_date_lte=None, upcoming_start_gte=None):
self.seasonKey = seasonKey
self.league = 'mlb'
self.min_date_gte = min_date_gte
self.max_date_lte = max_date_lte
self.upcoming_start_gte = upcoming_start_gte
# Pitching Stats attributes
self.pitching_roll_windows = [1, 3, 5, 10]
self.pitching_stats = ['fip', 'bb_per9', 'hr_fb_ratio', 'k_per9', 'gbpct']
self.pitching_roll_stats = [
'{}_roll{}'.format(s, w) for s in self.pitching_stats for
w in self.pitching_roll_windows
]
# Batting Stats Attributes
self.batting_roll_windows = [1, 3, 5, 10]
self.batting_stats = ['obp', 'slg', 'woba', 'iso']
self.batting_roll_stats = [
'{}_roll{}'.format(s, w) for s in self.batting_stats for
w in self.batting_roll_windows
]
self.batting_static_stats = ['atBats']
# Check args
assert not (
seasonKey and
(min_date_gte != None) and
(max_date_lte != None)
)
# Determine time period
if self.seasonKey:
self.min_date_gte = CONFIG.get(self.league)\
.get('seasons')\
.get(self.seasonKey)\
.get('seasonStart')
self.max_date_lte = CONFIG.get(self.league)\
.get('seasons')\
.get(self.seasonKey)\
.get('seasonEnd')
# Read in from daily game
path = CONFIG.get(self.league)\
.get('paths')\
.get('normalized').format(
f='daily_games'
)
paths = [
path+fname for fname in os.listdir(path) if (
(fname[:8] >= self.min_date_gte)
&
(fname[:8] <= self.max_date_lte)
)
]
self.summary = pd.concat(
objs=[pd.read_parquet(p) for p in paths],
axis=0
)
self.summary.drop_duplicates(subset=['gameId'], inplace=True)
self.summary.loc[:, 'gameStartDate'] = \
pd.to_datetime(self.summary['startTime'].str[:10])
def add_starting_pitchers(self, dispositions=['home', 'away']):
"""
ADDS DIMENSIONS TO SUMMARY
"""
helper.progress("Adding Starting Pitchers Attribute")
# Paths
atbats_path = CONFIG.get(self.league)\
.get('paths')\
.get('normalized').format(
f='game_atbats'
)
atbats_paths = [atbats_path+d+"/" for d in os.listdir(atbats_path) if (
(d >= self.min_date_gte)
&
(d <= self.max_date_lte)
)]
atbats_paths_full = []
for abp in atbats_paths:
atbats_paths_full.extend([abp+fname for fname in os.listdir(abp)])
# Get atbats
df_ab = pd.concat(
objs=[pd.read_parquet(p) for p in atbats_paths_full],
axis=0
)
df_ab.loc[:, 'gameStartTime'] = df_ab['gameStartTime'].str[:10]
df_ab.loc[:, 'gameStartTime'] = pd.to_datetime(df_ab['gameStartTime'])
# Save upcoming to use lineup approach with later
if self.upcoming_start_gte:
df_upc = df_ab.loc[df_ab['gameStartTime'] >= self.upcoming_start_gte, :]
df_ab = df_ab.loc[df_ab['gameStartTime'] < self.upcoming_start_gte, :]
else:
df_upc = df_ab.loc[df_ab['gameStartTime'] >= dt.datetime.now(), :]
df_ab = df_ab.loc[df_ab['gameStartTime'] < dt.datetime.now(), :]
# -------------------------
# -------------------------
# Filter to games in the past and use atbats to get starter (in case lineup wrong)
# Get Home Starters
df_top1 = df_ab.loc[(
(df_ab['inning']==1) &
(df_ab['inningHalf']=='TOP') &
(df_ab['outCount']==0)
), :]
df_home_starters = df_top1.loc[:, ['gameId', 'pitcherId']]\
.drop_duplicates(subset=['gameId'])
df_home_starters.rename(
columns={'pitcherId': 'homeStartingPitcherId'},
inplace=True
)
# Get Away Starters
df_bot1 = df_ab.loc[(
(df_ab['inning']==1) &
(df_ab['inningHalf']=='BOTTOM') &
(df_ab['outCount']==0)
), :]
df_away_starters = df_bot1.loc[:, ['gameId', 'pitcherId']]\
.drop_duplicates(subset=['gameId'])
df_away_starters.rename(
columns={'pitcherId': 'awayStartingPitcherId'},
inplace=True
)
# Assemble starters
df_hist_starters = pd.merge(
df_home_starters,
df_away_starters,
how='outer',
on=['gameId'],
validate='1:1'
)
# -------------------------
# -------------------------
# Filter to games in the current/future and use
# lineups to get starter (in case lineup wrong)
if not hasattr(self, 'lineups'):
self.add_lineups()
df_lup_home = self.lineups.loc[
self.lineups['batterDisposition'].str.lower() == 'home', :]
df_lup_away = self.lineups.loc[
self.lineups['batterDisposition'].str.lower() == 'away', :]
# Filter down
df_lup_home = df_lup_home.loc[(
(df_lup_home['playerPositionGeneral'] == 'P')
&
(df_lup_home['gameId'].isin(list(df_upc.gameId)))
), :]
df_lup_away = df_lup_away.loc[(
(df_lup_away['playerPositionGeneral'] == 'P')
&
(df_lup_away['gameId'].isin(list(df_upc.gameId)))
), :]
# Isolate
df_lup_home.rename(columns={'playerId': 'homeStartingPitcherId'}, inplace=True)
df_lup_home = df_lup_home.loc[:,
['gameId', 'homeStartingPitcherId']]\
.drop_duplicates(subset=['gameId'], inplace=False)
df_lup_away.rename(columns={'playerId': 'awayStartingPitcherId'}, inplace=True)
df_lup_away = df_lup_away.loc[:,
['gameId', 'awayStartingPitcherId']]\
.drop_duplicates(subset=['gameId'], inplace=False)
# Combine to one game per row
df_upc_starters = pd.merge(
df_lup_home,
df_lup_away,
how='left',
on=['gameId'],
validate='1:1'
)
# Concat hist and upc vertically to merge back to summary attrib
df_starters = pd.concat(
objs=[df_hist_starters, df_upc_starters],
axis=0
)
# Merge to summary attribute
self.summary = pd.merge(
self.summary,
df_starters,
how='left',
on=['gameId'],
validate='1:1'
)
def add_bullpen_summary(self, dispositions=['home', 'away']):
"""
ADDS ATTRIBUTE "bullpens_summary"
"""
helper.progress("Adding Bullpen Summary Attribute")
# Get atbats, filter to where not equal to starters
if not all(
s in self.summary.columns for s in \
['{}StartingPitcherId'.format(d) for d in dispositions]
):
self.add_starting_pitchers()
# Get atbats
# Paths
atbats_path = CONFIG.get(self.league)\
.get('paths')\
.get('normalized').format(
f='game_atbats'
)
atbats_paths = [atbats_path+d+"/" for d in os.listdir(atbats_path) if (
(d >= self.min_date_gte)
&
(d <= self.max_date_lte)
)]
atbats_paths_full = []
for abp in atbats_paths:
atbats_paths_full.extend([abp+fname for fname in os.listdir(abp)])
# Get atbats and sort by inning / outCount
df_ab = pd.concat(
objs=[pd.read_parquet(p) for p in atbats_paths_full],
axis=0
)
df_ab = df_ab.loc[:, ['gameId', 'gameStartTime', 'pitcherId', 'homeTeamId', 'awayTeamId',
'inning', 'inningHalf', 'outCount']]
# Select home, sort, dd, remove starter, and rerank
bullpen_summary = []
sides = {'TOP': 'home', 'BOTTOM': 'away'}
for half_, disp in sides.items():
# Set up starter map for later mask
startingPitcherMap = self.summary.set_index('gameId')\
['{}StartingPitcherId'.format(disp)].to_dict()
df_ab_h = df_ab.loc[df_ab['inningHalf']==half_, :]
# Sort
df_ab_h = df_ab_h.sort_values(
by=['gameId', 'gameStartTime', 'inning', 'outCount'],
ascending=True,
inplace=False
)
# Drop labels
df_ab_h = df_ab_h.drop(labels=['inning', 'outCount'], axis=1, inplace=False)
# Remove pitcher who was already identified as starter
# (self.summary['homeStartingPitcherId'].iloc[0]?
df_ab_h.loc[:, '{}StartingPitcherId'.format(disp)] = \
df_ab_h['gameId'].map(startingPitcherMap)
df_ab_h = df_ab_h.loc[
df_ab_h['pitcherId'] != df_ab_h['{}StartingPitcherId'.format(disp)], :]
# Handle ordering
df_ab_h['pitcherAppearOrder'] = df_ab_h\
.groupby(by=['gameId'])['pitcherId'].rank(method='first')
df_ab_h = df_ab_h.groupby(
by=['gameId', 'gameStartTime', '{}TeamId'.format(disp), 'pitcherId'],
as_index=False).agg({'pitcherAppearOrder': 'min'})
df_ab_h['pitcherAppearOrder'] = df_ab_h\
.groupby(by=['gameId'])['pitcherId'].rank(method='first')
df_ab_h['pitcherAppearOrderMax'] = df_ab_h\
.groupby('gameId')['pitcherAppearOrder'].transform('max')
# Label middle pitchers relief role and last pitcher closer` role
msk = (df_ab_h['pitcherAppearOrder']==df_ab_h['pitcherAppearOrderMax'])
df_ab_h.loc[msk, 'pitcherRoleType'] = 'closer'
df_ab_h.loc[~msk, 'pitcherRoleType'] = 'reliever'
# Subset (TODO add first inning appeared)
df_ab_h = df_ab_h.loc[:, ['gameId', 'gameStartTime', 'pitcherId', 'pitcherRoleType',
'{}TeamId'.format(disp), 'pitcherAppearOrder']]
df_ab_h.rename(columns={'{}TeamId'.format(disp): 'teamId'}, inplace=True)
df_ab_h['bullpenDisposition'] = disp
bullpen_summary.append(df_ab_h)
bullpen_summary = pd.concat(objs=bullpen_summary, axis=0)
self.bullpen_reliever_summary = bullpen_summary.loc[
bullpen_summary['pitcherRoleType'] == 'reliever', :]
self.bullpen_closer_summary = bullpen_summary.loc[
bullpen_summary['pitcherRoleType'] == 'closer', :]
def add_pitcher_rolling_stats(
self,
dispositions=['home', 'away'],
pitcher_roll_types=['starter', 'reliever', 'closer'],
shift_back=True
):
"""
"""
helper.progress("Adding Pitcher Rolling Stats to pitching-related attributes")
# Path
ptch_roll_path = CONFIG.get(self.league)\
.get('paths')\
.get('rolling_stats').format('pitching')+"player/"
# Read in
ptch_roll = pd.concat(
objs=[pd.read_parquet(ptch_roll_path+fname) for fname in
os.listdir(ptch_roll_path) if
((fname.replace(".parquet", "") >= self.min_date_gte)
&
(fname.replace(".parquet", "") <= self.max_date_lte))],
axis=0
)
# Create rolling metrics
cols = ['gameId', 'gameStartDate', 'playerId'] +\
self.pitching_roll_stats
# Subset
ptch_roll = ptch_roll.loc[:,
['gameId', 'gameStartDate', 'playerId'] +
self.pitching_roll_stats
]
# Sort
ptch_roll.sort_values(by=['gameStartDate'], ascending=True, inplace=True)
# Shift back if interested in rolling stats leading up to game
if shift_back:
for col in self.pitching_roll_stats:
msk = (ptch_roll['playerId'].shift(1)==ptch_roll['playerId'])
ptch_roll.loc[msk, col] = ptch_roll[col].shift(1)
# Handle Infs
for col in self.pitching_roll_stats:
ptch_roll = ptch_roll.loc[~ptch_roll[col].isin([np.inf, -np.inf]), :]
# Check if starter / all designation
if 'starter' in pitcher_roll_types:
print(" Adding stats for starters")
# Check that summary attribute has starting pitchers
if not any('StartingPitcherId' in col for col in
self.summary.columns):
self.add_starting_pitchers(dispositions=dispositions)
# Merge back to starters (one at a time)
pitcher_cols = ['{}StartingPitcherId'.format(d) for
d in dispositions]
# Prep self.starting_pitcher_stats
p = []
for pc in pitcher_cols:
df = self.summary.loc[:, ['gameId', pc]]
df = df.loc[df[pc].notnull(), :]
df.rename(columns={pc: 'pitcherId'}, inplace=True)
df.loc[:, 'pitcherDisposition'] = pc[:4].lower()
p.append(df)
# concatenate to form attribute
self.starting_pitcher_summary = \
pd.concat(objs=p, axis=0)
self.starting_pitcher_summary = pd.merge(
self.starting_pitcher_summary,
ptch_roll,
how='left',
left_on=['gameId', 'pitcherId'],
right_on=['gameId', 'playerId'],
validate='1:1'
)
self.starting_pitcher_summary.drop(
labels=['playerId'],
axis=1,
inplace=True
)
# Check if reliever / all designation
if 'reliever' in pitcher_roll_types:
print(" Adding stats for relievers")
# Check attribute (try / except cheaper but less readable)
if not hasattr(self, 'bullpen_reliever_summary'):
self.add_bullpen_summary(dispositions=dispositions)
# Merge back to relievers in bullpen summary
msk = (self.bullpen_reliever_summary['pitcherRoleType'].str.lower() == 'reliever')
bullpen = self.bullpen_reliever_summary.loc[msk, :]
if bullpen.shape[0] == 0:
warnings.warn(" No relief pitchers found in bullpen_summary attribute")
if not all(d in dispositions for d in ['home', 'away']):
assert len(dispositions) == 1 and dispositions[0] in ['home', 'away']
bullpen_reconstruct = []
for disp in dispositions:
bullpen_disp = bullpen.loc[bullpen['bullpenDisposition'] == disp, :]
bullpen_disp = bullpen_disp.loc[:, ['gameId', 'pitcherId']]
bullpen_disp = pd.merge(
bullpen_disp,
ptch_roll,
how='left',
left_on=['gameId', 'pitcherId'],
right_on=['gameId', 'playerId'],
validate='1:1'
)
bullpen_disp.drop(labels=['playerId'], axis=1, inplace=True)
bullpen_reconstruct.append(bullpen_disp)
bullpen_reconstruct = pd.concat(objs=bullpen_reconstruct, axis=0)
# Add back to summary / detail
self.bullpen_reliever_summary = pd.merge(
self.bullpen_reliever_summary,
bullpen_reconstruct,
how='left',
on=['gameId', 'pitcherId'],
validate='1:1'
)
# Set
# TODO Standard Deviation might not be best here
aggDict = {stat: ['mean', 'max', 'min'] for stat in [
x for x in self.bullpen_reliever_summary.columns if
any(y in x for y in self.pitching_stats)
]}
df = self.bullpen_reliever_summary.groupby(
by=['gameId', 'gameStartTime', 'teamId', 'bullpenDisposition'],
as_index=False
).agg(aggDict)
df.columns = [
x[0] if x[1] == '' else x[0]+"~"+x[1] for x in
df.columns
]
self.bullpen_reliever_summary = df
# TODO FIX CLOSER MERGE _x _y
if 'closer' in pitcher_roll_types:
print(" Adding stats for closers")
# Check if closer / all designation
if not hasattr(self, 'bullpen_closer_summary'):
self.add_bullpen_summary(dispositions=dispositions)
# Merge back to closers in bullpen summary
msk = (self.bullpen_closer_summary['pitcherRoleType'].str.lower() == 'closer')
bullpen = self.bullpen_closer_summary.loc[msk, :]
if bullpen.shape[0] == 0:
warnings.warn(" No closing pitchers found in bullpen_summary attribute")
if not all(d in dispositions for d in ['home', 'away']):
assert len(dispositions) == 1 and dispositions[0] in ['home', 'away']
bullpen_reconstruct = []
for disp in dispositions:
bullpen_disp = bullpen.loc[bullpen['bullpenDisposition'] == disp, :]
bullpen_disp = bullpen_disp.loc[:, ['gameId', 'pitcherId']]
bullpen_disp = pd.merge(
bullpen_disp,
ptch_roll,
how='left',
left_on=['gameId', 'pitcherId'],
right_on=['gameId', 'playerId'],
validate='1:1'
)
bullpen_disp.drop(labels=['playerId'], axis=1, inplace=True)
bullpen_reconstruct.append(bullpen_disp)
bullpen_reconstruct = pd.concat(objs=bullpen_reconstruct, axis=0)
# Add back to summary / detail
self.bullpen_closer_summary = pd.merge(
self.bullpen_closer_summary,
bullpen_reconstruct,
how='left',
on=['gameId', 'pitcherId'],
validate='1:1'
)
# Set
# TODO Standard Deviation might not be best here
aggDict = {stat: ['mean', 'max', 'min'] for stat in [
x for x in self.bullpen_closer_summary.columns if
any(y in x for y in self.pitching_stats)
]}
df = self.bullpen_closer_summary.groupby(
by=['gameId', 'gameStartTime', 'teamId', 'bullpenDisposition'],
as_index=False
).agg(aggDict)
df.columns = [
x[0] if x[1] == '' else x[0]+"~"+x[1] for x in
df.columns
]
self.bullpen_closer_summary = df
def add_lineups(self, status='auto'):
"""
status: 'auto' - expected/actual
"""
helper.progress("Adding Lineups Attribute")
# Add lineups
# add expected for upcoming game
# add actual for completed games
lineups_path = CONFIG.get(self.league)\
.get('paths')\
.get('normalized')\
.format(f='game_lineup')
df_lineup = pd.concat(
objs=[ | pd.read_parquet(lineups_path+fname) | pandas.read_parquet |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# *****************************************************************************/
# * Authors: <NAME>
# *****************************************************************************/
"""transformCSV.py
This module contains the basic functions for creating the content of a configuration file from CSV.
Args:
--inFile: Path for the configuration file where the time series data values CSV
--outFile: Path for the configuration file where the time series data values INI
--debug: Boolean flag to activate verbose printing for debug use
Example:
Default usage:
$ python transformCSV.py
Specific usage:
$ python transformCSV.py
--inFile C:\raad\src\software\time-series.csv
--outFile C:\raad\src\software\time-series.ini
--debug True
"""
import sys
import datetime
import optparse
import traceback
import pandas
import numpy
import os
import pprint
import csv
if sys.version_info.major > 2:
import configparser as cF
else:
import ConfigParser as cF
class TransformMetaData(object):
debug = False
fileName = None
fileLocation = None
columnsList = None
analysisFrameFormat = None
uniqueLists = None
analysisFrame = None
def __init__(self, inputFileName=None, debug=False, transform=False, sectionName=None, outFolder=None,
outFile='time-series-madness.ini'):
if isinstance(debug, bool):
self.debug = debug
if inputFileName is None:
return
elif os.path.exists(os.path.abspath(inputFileName)):
self.fileName = inputFileName
self.fileLocation = os.path.exists(os.path.abspath(inputFileName))
(analysisFrame, analysisFrameFormat, uniqueLists, columnNamesList) = self.CSVtoFrame(
inputFileName=self.fileName)
self.analysisFrame = analysisFrame
self.columnsList = columnNamesList
self.analysisFrameFormat = analysisFrameFormat
self.uniqueLists = uniqueLists
if transform:
passWrite = self.frameToINI(analysisFrame=analysisFrame, sectionName=sectionName, outFolder=outFolder,
outFile=outFile)
print(f"Pass Status is : {passWrite}")
return
def getColumnList(self):
return self.columnsList
def getAnalysisFrameFormat(self):
return self.analysisFrameFormat
def getuniqueLists(self):
return self.uniqueLists
def getAnalysisFrame(self):
return self.analysisFrame
@staticmethod
def getDateParser(formatString="%Y-%m-%d %H:%M:%S.%f"):
return (lambda x: pandas.datetime.strptime(x, formatString)) # 2020-06-09 19:14:00.000
def getHeaderFromFile(self, headerFilePath=None, method=1):
if headerFilePath is None:
return (None, None)
if method == 1:
fieldnames = pandas.read_csv(headerFilePath, index_col=0, nrows=0).columns.tolist()
elif method == 2:
with open(headerFilePath, 'r') as infile:
reader = csv.DictReader(infile)
fieldnames = list(reader.fieldnames)
elif method == 3:
fieldnames = list(pandas.read_csv(headerFilePath, nrows=1).columns)
else:
fieldnames = None
fieldDict = {}
for indexName, valueName in enumerate(fieldnames):
fieldDict[valueName] = pandas.StringDtype()
return (fieldnames, fieldDict)
def CSVtoFrame(self, inputFileName=None):
if inputFileName is None:
return (None, None)
# Load File
print("Processing File: {0}...\n".format(inputFileName))
self.fileLocation = inputFileName
# Create data frame
analysisFrame = pandas.DataFrame()
analysisFrameFormat = self._getDataFormat()
inputDataFrame = pandas.read_csv(filepath_or_buffer=inputFileName,
sep='\t',
names=self._getDataFormat(),
# dtype=self._getDataFormat()
# header=None
# float_precision='round_trip'
# engine='c',
# parse_dates=['date_column'],
# date_parser=True,
# na_values=['NULL']
)
if self.debug: # Preview data.
print(inputDataFrame.head(5))
# analysisFrame.astype(dtype=analysisFrameFormat)
# Cleanup data
analysisFrame = inputDataFrame.copy(deep=True)
analysisFrame.apply(pandas.to_numeric, errors='coerce') # Fill in bad data with Not-a-Number (NaN)
# Create lists of unique strings
uniqueLists = []
columnNamesList = []
for columnName in analysisFrame.columns:
if self.debug:
print('Column Name : ', columnName)
print('Column Contents : ', analysisFrame[columnName].values)
if isinstance(analysisFrame[columnName].dtypes, str):
columnUniqueList = analysisFrame[columnName].unique().tolist()
else:
columnUniqueList = None
columnNamesList.append(columnName)
uniqueLists.append([columnName, columnUniqueList])
if self.debug: # Preview data.
print(analysisFrame.head(5))
return (analysisFrame, analysisFrameFormat, uniqueLists, columnNamesList)
def frameToINI(self, analysisFrame=None, sectionName='Unknown', outFolder=None, outFile='nil.ini'):
if analysisFrame is None:
return False
try:
if outFolder is None:
outFolder = os.getcwd()
configFilePath = os.path.join(outFolder, outFile)
configINI = cF.ConfigParser()
configINI.add_section(sectionName)
for (columnName, columnData) in analysisFrame:
if self.debug:
print('Column Name : ', columnName)
print('Column Contents : ', columnData.values)
print("Column Contents Length:", len(columnData.values))
print("Column Contents Type", type(columnData.values))
writeList = "["
for colIndex, colValue in enumerate(columnData):
writeList = f"{writeList}'{colValue}'"
if colIndex < len(columnData) - 1:
writeList = f"{writeList}, "
writeList = f"{writeList}]"
configINI.set(sectionName, columnName, writeList)
if not os.path.exists(configFilePath) or os.stat(configFilePath).st_size == 0:
with open(configFilePath, 'w') as configWritingFile:
configINI.write(configWritingFile)
noErrors = True
except ValueError as e:
errorString = ("ERROR in {__file__} @{framePrintNo} with {ErrorFound}".format(__file__=str(__file__),
framePrintNo=str(
sys._getframe().f_lineno),
ErrorFound=e))
print(errorString)
noErrors = False
return noErrors
@staticmethod
def _validNumericalFloat(inValue):
"""
Determines if the value is a valid numerical object.
Args:
inValue: floating-point value
Returns: Value in floating-point or Not-A-Number.
"""
try:
return numpy.float128(inValue)
except ValueError:
return numpy.nan
@staticmethod
def _calculateMean(x):
"""
Calculates the mean in a multiplication method since division produces an infinity or NaN
Args:
x: Input data set. We use a data frame.
Returns: Calculated mean for a vector data frame.
"""
try:
mean = numpy.float128(numpy.average(x, weights=numpy.ones_like(numpy.float128(x)) / numpy.float128(x.size)))
except ValueError:
mean = 0
pass
return mean
def _calculateStd(self, data):
"""
Calculates the standard deviation in a multiplication method since division produces a infinity or NaN
Args:
data: Input data set. We use a data frame.
Returns: Calculated standard deviation for a vector data frame.
"""
sd = 0
try:
n = numpy.float128(data.size)
if n <= 1:
return numpy.float128(0.0)
# Use multiplication version of mean since numpy bug causes infinity.
mean = self._calculateMean(data)
sd = numpy.float128(mean)
# Calculate standard deviation
for el in data:
diff = numpy.float128(el) - numpy.float128(mean)
sd += (diff) ** 2
points = numpy.float128(n - 1)
sd = numpy.float128(numpy.sqrt(numpy.float128(sd) / numpy.float128(points)))
except ValueError:
pass
return sd
def _determineQuickStats(self, dataAnalysisFrame, columnName=None, multiplierSigma=3.0):
"""
Determines stats based on a vector to get the data shape.
Args:
dataAnalysisFrame: Dataframe to do analysis on.
columnName: Column name of the data frame.
multiplierSigma: Sigma range for the stats.
Returns: Set of stats.
"""
meanValue = 0
sigmaValue = 0
sigmaRangeValue = 0
topValue = 0
try:
# Clean out anomoly due to random invalid inputs.
if (columnName is not None):
meanValue = self._calculateMean(dataAnalysisFrame[columnName])
if meanValue == numpy.nan:
meanValue = numpy.float128(1)
sigmaValue = self._calculateStd(dataAnalysisFrame[columnName])
if float(sigmaValue) is float(numpy.nan):
sigmaValue = numpy.float128(1)
multiplier = numpy.float128(multiplierSigma) # Stats: 1 sigma = 68%, 2 sigma = 95%, 3 sigma = 99.7
sigmaRangeValue = (sigmaValue * multiplier)
if float(sigmaRangeValue) is float(numpy.nan):
sigmaRangeValue = numpy.float128(1)
topValue = numpy.float128(meanValue + sigmaRangeValue)
print("Name:{} Mean= {}, Sigma= {}, {}*Sigma= {}".format(columnName,
meanValue,
sigmaValue,
multiplier,
sigmaRangeValue))
except ValueError:
pass
return (meanValue, sigmaValue, sigmaRangeValue, topValue)
def _cleanZerosForColumnInFrame(self, dataAnalysisFrame, columnName='cycles'):
"""
Cleans the data frame with data values that are invalid. I.E. inf, NaN
Args:
dataAnalysisFrame: Dataframe to do analysis on.
columnName: Column name of the data frame.
Returns: Cleaned dataframe.
"""
dataAnalysisCleaned = None
try:
# Clean out anomoly due to random invalid inputs.
(meanValue, sigmaValue, sigmaRangeValue, topValue) = self._determineQuickStats(
dataAnalysisFrame=dataAnalysisFrame, columnName=columnName)
# dataAnalysisCleaned = dataAnalysisFrame[dataAnalysisFrame[columnName] != 0]
# When the cycles are negative or zero we missed cleaning up a row.
# logicVector = (dataAnalysisFrame[columnName] != 0)
# dataAnalysisCleaned = dataAnalysisFrame[logicVector]
logicVector = (dataAnalysisCleaned[columnName] >= 1)
dataAnalysisCleaned = dataAnalysisCleaned[logicVector]
# These timed out mean + 2 * sd
logicVector = (dataAnalysisCleaned[columnName] < topValue) # Data range
dataAnalysisCleaned = dataAnalysisCleaned[logicVector]
except ValueError:
pass
return dataAnalysisCleaned
def _cleanFrame(self, dataAnalysisTemp, cleanColumn=False, columnName='cycles'):
"""
Args:
dataAnalysisTemp: Dataframe to do analysis on.
cleanColumn: Flag to clean the data frame.
columnName: Column name of the data frame.
Returns: cleaned dataframe
"""
try:
replacementList = [pandas.NaT, numpy.Infinity, numpy.NINF, 'NaN', 'inf', '-inf', 'NULL']
if cleanColumn is True:
dataAnalysisTemp = self._cleanZerosForColumnInFrame(dataAnalysisTemp, columnName=columnName)
dataAnalysisTemp = dataAnalysisTemp.replace(to_replace=replacementList,
value=numpy.nan)
dataAnalysisTemp = dataAnalysisTemp.dropna()
except ValueError:
pass
return dataAnalysisTemp
@staticmethod
def _getDataFormat():
"""
Return the dataframe setup for the CSV file generated from server.
Returns: dictionary data format for pandas.
"""
dataFormat = {
"Serial_Number": pandas.StringDtype(),
"LogTime0": pandas.StringDtype(), # @todo force rename
"Id0": pandas.StringDtype(), # @todo force rename
"DriveId": pandas.StringDtype(),
"JobRunId": pandas.StringDtype(),
"LogTime1": pandas.StringDtype(), # @todo force rename
"Comment0": pandas.StringDtype(), # @todo force rename
"CriticalWarning": pandas.StringDtype(),
"Temperature": pandas.StringDtype(),
"AvailableSpare": pandas.StringDtype(),
"AvailableSpareThreshold": pandas.StringDtype(),
"PercentageUsed": pandas.StringDtype(),
"DataUnitsReadL": pandas.StringDtype(),
"DataUnitsReadU": pandas.StringDtype(),
"DataUnitsWrittenL": pandas.StringDtype(),
"DataUnitsWrittenU": pandas.StringDtype(),
"HostReadCommandsL": pandas.StringDtype(),
"HostReadCommandsU": pandas.StringDtype(),
"HostWriteCommandsL": pandas.StringDtype(),
"HostWriteCommandsU": pandas.StringDtype(),
"ControllerBusyTimeL": pandas.StringDtype(),
"ControllerBusyTimeU": pandas.StringDtype(),
"PowerCyclesL": pandas.StringDtype(),
"PowerCyclesU": pandas.StringDtype(),
"PowerOnHoursL": pandas.StringDtype(),
"PowerOnHoursU": pandas.StringDtype(),
"UnsafeShutdownsL": pandas.StringDtype(),
"UnsafeShutdownsU": pandas.StringDtype(),
"MediaErrorsL": pandas.StringDtype(),
"MediaErrorsU": pandas.StringDtype(),
"NumErrorInfoLogsL": pandas.StringDtype(),
"NumErrorInfoLogsU": pandas.StringDtype(),
"ProgramFailCountN": pandas.StringDtype(),
"ProgramFailCountR": pandas.StringDtype(),
"EraseFailCountN": pandas.StringDtype(),
"EraseFailCountR": pandas.StringDtype(),
"WearLevelingCountN": pandas.StringDtype(),
"WearLevelingCountR": pandas.StringDtype(),
"E2EErrorDetectCountN": pandas.StringDtype(),
"E2EErrorDetectCountR": pandas.StringDtype(),
"CRCErrorCountN": pandas.StringDtype(),
"CRCErrorCountR": pandas.StringDtype(),
"MediaWearPercentageN": pandas.StringDtype(),
"MediaWearPercentageR": pandas.StringDtype(),
"HostReadsN": pandas.StringDtype(),
"HostReadsR": pandas.StringDtype(),
"TimedWorkloadN": pandas.StringDtype(),
"TimedWorkloadR": pandas.StringDtype(),
"ThermalThrottleStatusN": pandas.StringDtype(),
"ThermalThrottleStatusR": pandas.StringDtype(),
"RetryBuffOverflowCountN": pandas.StringDtype(),
"RetryBuffOverflowCountR": pandas.StringDtype(),
"PLLLockLossCounterN": pandas.StringDtype(),
"PLLLockLossCounterR": pandas.StringDtype(),
"NandBytesWrittenN": pandas.StringDtype(),
"NandBytesWrittenR": pandas.StringDtype(),
"HostBytesWrittenN": pandas.StringDtype(),
"HostBytesWrittenR": pandas.StringDtype(),
"SystemAreaLifeRemainingN": pandas.StringDtype(),
"SystemAreaLifeRemainingR": pandas.StringDtype(),
"RelocatableSectorCountN": pandas.StringDtype(),
"RelocatableSectorCountR": pandas.StringDtype(),
"SoftECCErrorRateN": pandas.StringDtype(),
"SoftECCErrorRateR": pandas.StringDtype(),
"UnexpectedPowerLossN": pandas.StringDtype(),
"UnexpectedPowerLossR": pandas.StringDtype(),
"MediaErrorCountN": pandas.StringDtype(),
"MediaErrorCountR": pandas.StringDtype(),
"NandBytesReadN": pandas.StringDtype(),
"NandBytesReadR": pandas.StringDtype(),
"WarningCompTempTime": pandas.StringDtype(),
"CriticalCompTempTime": pandas.StringDtype(),
"TempSensor1": pandas.StringDtype(),
"TempSensor2": pandas.StringDtype(),
"TempSensor3": pandas.StringDtype(),
"TempSensor4": pandas.StringDtype(),
"TempSensor5": pandas.StringDtype(),
"TempSensor6": pandas.StringDtype(),
"TempSensor7": pandas.StringDtype(),
"TempSensor8": pandas.StringDtype(),
"ThermalManagementTemp1TransitionCount": pandas.StringDtype(),
"ThermalManagementTemp2TransitionCount": pandas.StringDtype(),
"TotalTimeForThermalManagementTemp1": pandas.StringDtype(),
"TotalTimeForThermalManagementTemp2": pandas.StringDtype(),
"Core_Num": pandas.StringDtype(),
"Id1": pandas.StringDtype(), # @todo force rename
"Job_Run_Id": pandas.StringDtype(),
"Stats_Time": pandas.StringDtype(),
"HostReads": pandas.StringDtype(),
"HostWrites": pandas.StringDtype(),
"NandReads": pandas.StringDtype(),
"NandWrites": pandas.StringDtype(),
"ProgramErrors": pandas.StringDtype(),
"EraseErrors": pandas.StringDtype(),
"ErrorCount": pandas.StringDtype(),
"BitErrorsHost1": pandas.StringDtype(),
"BitErrorsHost2": pandas.StringDtype(),
"BitErrorsHost3": pandas.StringDtype(),
"BitErrorsHost4": pandas.StringDtype(),
"BitErrorsHost5": pandas.StringDtype(),
"BitErrorsHost6": pandas.StringDtype(),
"BitErrorsHost7": pandas.StringDtype(),
"BitErrorsHost8": pandas.StringDtype(),
"BitErrorsHost9": pandas.StringDtype(),
"BitErrorsHost10": pandas.StringDtype(),
"BitErrorsHost11": pandas.StringDtype(),
"BitErrorsHost12": pandas.StringDtype(),
"BitErrorsHost13": pandas.StringDtype(),
"BitErrorsHost14": pandas.StringDtype(),
"BitErrorsHost15": pandas.StringDtype(),
"ECCFail": pandas.StringDtype(),
"GrownDefects": pandas.StringDtype(),
"FreeMemory": pandas.StringDtype(),
"WriteAllowance": pandas.StringDtype(),
"ModelString": pandas.StringDtype(),
"ValidBlocks": pandas.StringDtype(),
"TokenBlocks": pandas.StringDtype(),
"SpuriousPFCount": pandas.StringDtype(),
"SpuriousPFLocations1": pandas.StringDtype(),
"SpuriousPFLocations2": pandas.StringDtype(),
"SpuriousPFLocations3": pandas.StringDtype(),
"SpuriousPFLocations4": pandas.StringDtype(),
"SpuriousPFLocations5": pandas.StringDtype(),
"SpuriousPFLocations6": pandas.StringDtype(),
"SpuriousPFLocations7": pandas.StringDtype(),
"SpuriousPFLocations8": pandas.StringDtype(),
"BitErrorsNonHost1": pandas.StringDtype(),
"BitErrorsNonHost2": pandas.StringDtype(),
"BitErrorsNonHost3": pandas.StringDtype(),
"BitErrorsNonHost4": pandas.StringDtype(),
"BitErrorsNonHost5": pandas.StringDtype(),
"BitErrorsNonHost6": pandas.StringDtype(),
"BitErrorsNonHost7": pandas.StringDtype(),
"BitErrorsNonHost8": pandas.StringDtype(),
"BitErrorsNonHost9": pandas.StringDtype(),
"BitErrorsNonHost10": pandas.StringDtype(),
"BitErrorsNonHost11": pandas.StringDtype(),
"BitErrorsNonHost12": pandas.StringDtype(),
"BitErrorsNonHost13": pandas.StringDtype(),
"BitErrorsNonHost14": pandas.StringDtype(),
"BitErrorsNonHost15": pandas.StringDtype(),
"ECCFailNonHost": pandas.StringDtype(),
"NSversion": pandas.StringDtype(),
"numBands": pandas.StringDtype(),
"minErase": pandas.StringDtype(),
"maxErase": pandas.StringDtype(),
"avgErase": pandas.StringDtype(),
"minMVolt": pandas.StringDtype(),
"maxMVolt": pandas.StringDtype(),
"avgMVolt": pandas.StringDtype(),
"minMAmp": pandas.StringDtype(),
"maxMAmp": pandas.StringDtype(),
"avgMAmp": pandas.StringDtype(),
"comment1": pandas.StringDtype(), # @todo force rename
"minMVolt12v": pandas.StringDtype(),
"maxMVolt12v": pandas.StringDtype(),
"avgMVolt12v": pandas.StringDtype(),
"minMAmp12v": pandas.StringDtype(),
"maxMAmp12v": pandas.StringDtype(),
"avgMAmp12v": pandas.StringDtype(),
"nearMissSector": pandas.StringDtype(),
"nearMissDefect": pandas.StringDtype(),
"nearMissOverflow": pandas.StringDtype(),
"replayUNC": pandas.StringDtype(),
"Drive_Id": pandas.StringDtype(),
"indirectionMisses": pandas.StringDtype(),
"BitErrorsHost16": pandas.StringDtype(),
"BitErrorsHost17": pandas.StringDtype(),
"BitErrorsHost18": pandas.StringDtype(),
"BitErrorsHost19": pandas.StringDtype(),
"BitErrorsHost20": pandas.StringDtype(),
"BitErrorsHost21": pandas.StringDtype(),
"BitErrorsHost22": pandas.StringDtype(),
"BitErrorsHost23": pandas.StringDtype(),
"BitErrorsHost24": pandas.StringDtype(),
"BitErrorsHost25": pandas.StringDtype(),
"BitErrorsHost26": pandas.StringDtype(),
"BitErrorsHost27": pandas.StringDtype(),
"BitErrorsHost28": pandas.StringDtype(),
"BitErrorsHost29": pandas.StringDtype(),
"BitErrorsHost30": pandas.StringDtype(),
"BitErrorsHost31": pandas.StringDtype(),
"BitErrorsHost32": pandas.StringDtype(),
"BitErrorsHost33": pandas.StringDtype(),
"BitErrorsHost34": pandas.StringDtype(),
"BitErrorsHost35": pandas.StringDtype(),
"BitErrorsHost36": pandas.StringDtype(),
"BitErrorsHost37": pandas.StringDtype(),
"BitErrorsHost38": pandas.StringDtype(),
"BitErrorsHost39": pandas.StringDtype(),
"BitErrorsHost40": pandas.StringDtype(),
"XORRebuildSuccess": pandas.StringDtype(),
"XORRebuildFail": pandas.StringDtype(),
"BandReloForError": pandas.StringDtype(),
"mrrSuccess": pandas.StringDtype(),
"mrrFail": pandas.StringDtype(),
"mrrNudgeSuccess": pandas.StringDtype(),
"mrrNudgeHarmless": pandas.StringDtype(),
"mrrNudgeFail": pandas.StringDtype(),
"totalErases": pandas.StringDtype(),
"dieOfflineCount": pandas.StringDtype(),
"curtemp": pandas.StringDtype(),
"mintemp": pandas.StringDtype(),
"maxtemp": pandas.StringDtype(),
"oventemp": pandas.StringDtype(),
"allZeroSectors": pandas.StringDtype(),
"ctxRecoveryEvents": pandas.StringDtype(),
"ctxRecoveryErases": pandas.StringDtype(),
"NSversionMinor": pandas.StringDtype(),
"lifeMinTemp": pandas.StringDtype(),
"lifeMaxTemp": pandas.StringDtype(),
"powerCycles": pandas.StringDtype(),
"systemReads": pandas.StringDtype(),
"systemWrites": pandas.StringDtype(),
"readRetryOverflow": pandas.StringDtype(),
"unplannedPowerCycles": pandas.StringDtype(),
"unsafeShutdowns": pandas.StringDtype(),
"defragForcedReloCount": pandas.StringDtype(),
"bandReloForBDR": pandas.StringDtype(),
"bandReloForDieOffline": pandas.StringDtype(),
"bandReloForPFail": pandas.StringDtype(),
"bandReloForWL": pandas.StringDtype(),
"provisionalDefects": pandas.StringDtype(),
"uncorrectableProgErrors": pandas.StringDtype(),
"powerOnSeconds": pandas.StringDtype(),
"bandReloForChannelTimeout": pandas.StringDtype(),
"fwDowngradeCount": pandas.StringDtype(),
"dramCorrectablesTotal": pandas.StringDtype(),
"hb_id": pandas.StringDtype(),
"dramCorrectables1to1": pandas.StringDtype(),
"dramCorrectables4to1": pandas.StringDtype(),
"dramCorrectablesSram": pandas.StringDtype(),
"dramCorrectablesUnknown": pandas.StringDtype(),
"pliCapTestInterval": pandas.StringDtype(),
"pliCapTestCount": pandas.StringDtype(),
"pliCapTestResult": pandas.StringDtype(),
"pliCapTestTimeStamp": pandas.StringDtype(),
"channelHangSuccess": pandas.StringDtype(),
"channelHangFail": pandas.StringDtype(),
"BitErrorsHost41": pandas.StringDtype(),
"BitErrorsHost42": pandas.StringDtype(),
"BitErrorsHost43": pandas.StringDtype(),
"BitErrorsHost44": pandas.StringDtype(),
"BitErrorsHost45": pandas.StringDtype(),
"BitErrorsHost46": pandas.StringDtype(),
"BitErrorsHost47": pandas.StringDtype(),
"BitErrorsHost48": pandas.StringDtype(),
"BitErrorsHost49": pandas.StringDtype(),
"BitErrorsHost50": pandas.StringDtype(),
"BitErrorsHost51": pandas.StringDtype(),
"BitErrorsHost52": pandas.StringDtype(),
"BitErrorsHost53": pandas.StringDtype(),
"BitErrorsHost54": pandas.StringDtype(),
"BitErrorsHost55": pandas.StringDtype(),
"BitErrorsHost56": pandas.StringDtype(),
"mrrNearMiss": pandas.StringDtype(),
"mrrRereadAvg": pandas.StringDtype(),
"readDisturbEvictions": pandas.StringDtype(),
"L1L2ParityError": pandas.StringDtype(),
"pageDefects": pandas.StringDtype(),
"pageProvisionalTotal": pandas.StringDtype(),
"ASICTemp": pandas.StringDtype(),
"PMICTemp": pandas.StringDtype(),
"size": pandas.StringDtype(),
"lastWrite": pandas.StringDtype(),
"timesWritten": pandas.StringDtype(),
"maxNumContextBands": pandas.StringDtype(),
"blankCount": pandas.StringDtype(),
"cleanBands": pandas.StringDtype(),
"avgTprog": pandas.StringDtype(),
"avgEraseCount": pandas.StringDtype(),
"edtcHandledBandCnt": pandas.StringDtype(),
"bandReloForNLBA": pandas.StringDtype(),
"bandCrossingDuringPliCount": pandas.StringDtype(),
"bitErrBucketNum": pandas.StringDtype(),
"sramCorrectablesTotal": pandas.StringDtype(),
"l1SramCorrErrCnt": pandas.StringDtype(),
"l2SramCorrErrCnt": pandas.StringDtype(),
"parityErrorValue": pandas.StringDtype(),
"parityErrorType": pandas.StringDtype(),
"mrr_LutValidDataSize": pandas.StringDtype(),
"pageProvisionalDefects": pandas.StringDtype(),
"plisWithErasesInProgress": pandas.StringDtype(),
"lastReplayDebug": pandas.StringDtype(),
"externalPreReadFatals": pandas.StringDtype(),
"hostReadCmd": pandas.StringDtype(),
"hostWriteCmd": pandas.StringDtype(),
"trimmedSectors": pandas.StringDtype(),
"trimTokens": pandas.StringDtype(),
"mrrEventsInCodewords": pandas.StringDtype(),
"mrrEventsInSectors": pandas.StringDtype(),
"powerOnMicroseconds": pandas.StringDtype(),
"mrrInXorRecEvents": pandas.StringDtype(),
"mrrFailInXorRecEvents": pandas.StringDtype(),
"mrrUpperpageEvents": pandas.StringDtype(),
"mrrLowerpageEvents": pandas.StringDtype(),
"mrrSlcpageEvents": pandas.StringDtype(),
"mrrReReadTotal": pandas.StringDtype(),
"powerOnResets": pandas.StringDtype(),
"powerOnMinutes": pandas.StringDtype(),
"throttleOnMilliseconds": pandas.StringDtype(),
"ctxTailMagic": pandas.StringDtype(),
"contextDropCount": pandas.StringDtype(),
"lastCtxSequenceId": pandas.StringDtype(),
"currCtxSequenceId": pandas.StringDtype(),
"mbliEraseCount": pandas.StringDtype(),
"pageAverageProgramCount": pandas.StringDtype(),
"bandAverageEraseCount": pandas.StringDtype(),
"bandTotalEraseCount": pandas.StringDtype(),
"bandReloForXorRebuildFail": pandas.StringDtype(),
"defragSpeculativeMiss": pandas.StringDtype(),
"uncorrectableBackgroundScan": pandas.StringDtype(),
"BitErrorsHost57": pandas.StringDtype(),
"BitErrorsHost58": pandas.StringDtype(),
"BitErrorsHost59": pandas.StringDtype(),
"BitErrorsHost60": pandas.StringDtype(),
"BitErrorsHost61": pandas.StringDtype(),
"BitErrorsHost62": pandas.StringDtype(),
"BitErrorsHost63": pandas.StringDtype(),
"BitErrorsHost64": pandas.StringDtype(),
"BitErrorsHost65": pandas.StringDtype(),
"BitErrorsHost66": pandas.StringDtype(),
"BitErrorsHost67": pandas.StringDtype(),
"BitErrorsHost68": pandas.StringDtype(),
"BitErrorsHost69": pandas.StringDtype(),
"BitErrorsHost70": pandas.StringDtype(),
"BitErrorsHost71": pandas.StringDtype(),
"BitErrorsHost72": pandas.StringDtype(),
"BitErrorsHost73": pandas.StringDtype(),
"BitErrorsHost74": pandas.StringDtype(),
"BitErrorsHost75": pandas.StringDtype(),
"BitErrorsHost76": pandas.StringDtype(),
"BitErrorsHost77": pandas.StringDtype(),
"BitErrorsHost78": pandas.StringDtype(),
"BitErrorsHost79": pandas.StringDtype(),
"BitErrorsHost80": pandas.StringDtype(),
"bitErrBucketArray1": pandas.StringDtype(),
"bitErrBucketArray2": pandas.StringDtype(),
"bitErrBucketArray3": pandas.StringDtype(),
"bitErrBucketArray4": pandas.StringDtype(),
"bitErrBucketArray5": pandas.StringDtype(),
"bitErrBucketArray6": pandas.StringDtype(),
"bitErrBucketArray7": pandas.StringDtype(),
"bitErrBucketArray8": pandas.StringDtype(),
"bitErrBucketArray9": pandas.StringDtype(),
"bitErrBucketArray10": pandas.StringDtype(),
"bitErrBucketArray11": pandas.StringDtype(),
"bitErrBucketArray12": pandas.StringDtype(),
"bitErrBucketArray13": pandas.StringDtype(),
"bitErrBucketArray14": pandas.StringDtype(),
"bitErrBucketArray15": pandas.StringDtype(),
"bitErrBucketArray16": pandas.StringDtype(),
"bitErrBucketArray17": pandas.StringDtype(),
"bitErrBucketArray18": pandas.StringDtype(),
"bitErrBucketArray19": pandas.StringDtype(),
"bitErrBucketArray20": pandas.StringDtype(),
"bitErrBucketArray21": pandas.StringDtype(),
"bitErrBucketArray22": pandas.StringDtype(),
"bitErrBucketArray23": pandas.StringDtype(),
"bitErrBucketArray24": pandas.StringDtype(),
"bitErrBucketArray25": pandas.StringDtype(),
"bitErrBucketArray26": pandas.StringDtype(),
"bitErrBucketArray27": pandas.StringDtype(),
"bitErrBucketArray28": pandas.StringDtype(),
"bitErrBucketArray29": pandas.StringDtype(),
"bitErrBucketArray30": pandas.StringDtype(),
"bitErrBucketArray31": pandas.StringDtype(),
"bitErrBucketArray32": pandas.StringDtype(),
"bitErrBucketArray33": pandas.StringDtype(),
"bitErrBucketArray34": pandas.StringDtype(),
"bitErrBucketArray35": pandas.StringDtype(),
"bitErrBucketArray36": pandas.StringDtype(),
"bitErrBucketArray37": pandas.StringDtype(),
"bitErrBucketArray38": pandas.StringDtype(),
"bitErrBucketArray39": pandas.StringDtype(),
"bitErrBucketArray40": pandas.StringDtype(),
"bitErrBucketArray41": pandas.StringDtype(),
"bitErrBucketArray42": pandas.StringDtype(),
"bitErrBucketArray43": pandas.StringDtype(),
"bitErrBucketArray44": pandas.StringDtype(),
"bitErrBucketArray45": pandas.StringDtype(),
"bitErrBucketArray46": pandas.StringDtype(),
"bitErrBucketArray47": pandas.StringDtype(),
"bitErrBucketArray48": pandas.StringDtype(),
"bitErrBucketArray49": pandas.StringDtype(),
"bitErrBucketArray50": pandas.StringDtype(),
"bitErrBucketArray51": pandas.StringDtype(),
"bitErrBucketArray52": pandas.StringDtype(),
"bitErrBucketArray53": pandas.StringDtype(),
"bitErrBucketArray54": pandas.StringDtype(),
"bitErrBucketArray55": pandas.StringDtype(),
"bitErrBucketArray56": pandas.StringDtype(),
"bitErrBucketArray57": pandas.StringDtype(),
"bitErrBucketArray58": pandas.StringDtype(),
"bitErrBucketArray59": pandas.StringDtype(),
"bitErrBucketArray60": pandas.StringDtype(),
"bitErrBucketArray61": pandas.StringDtype(),
"bitErrBucketArray62": pandas.StringDtype(),
"bitErrBucketArray63": pandas.StringDtype(),
"bitErrBucketArray64": pandas.StringDtype(),
"bitErrBucketArray65": pandas.StringDtype(),
"bitErrBucketArray66": pandas.StringDtype(),
"bitErrBucketArray67": pandas.StringDtype(),
"bitErrBucketArray68": pandas.StringDtype(),
"bitErrBucketArray69": pandas.StringDtype(),
"bitErrBucketArray70": pandas.StringDtype(),
"bitErrBucketArray71": pandas.StringDtype(),
"bitErrBucketArray72": pandas.StringDtype(),
"bitErrBucketArray73": pandas.StringDtype(),
"bitErrBucketArray74": pandas.StringDtype(),
"bitErrBucketArray75": pandas.StringDtype(),
"bitErrBucketArray76": pandas.StringDtype(),
"bitErrBucketArray77": pandas.StringDtype(),
"bitErrBucketArray78": pandas.StringDtype(),
"bitErrBucketArray79": pandas.StringDtype(),
"bitErrBucketArray80": pandas.StringDtype(),
"mrr_successDistribution1": pandas.StringDtype(),
"mrr_successDistribution2": pandas.StringDtype(),
"mrr_successDistribution3": pandas.StringDtype(),
"mrr_successDistribution4": pandas.StringDtype(),
"mrr_successDistribution5": pandas.StringDtype(),
"mrr_successDistribution6": pandas.StringDtype(),
"mrr_successDistribution7": pandas.StringDtype(),
"mrr_successDistribution8": pandas.StringDtype(),
"mrr_successDistribution9": pandas.StringDtype(),
"mrr_successDistribution10": pandas.StringDtype(),
"mrr_successDistribution11": pandas.StringDtype(),
"mrr_successDistribution12": pandas.StringDtype(),
"mrr_successDistribution13": pandas.StringDtype(),
"mrr_successDistribution14": pandas.StringDtype(),
"mrr_successDistribution15": pandas.StringDtype(),
"mrr_successDistribution16": pandas.StringDtype(),
"mrr_successDistribution17": pandas.StringDtype(),
"mrr_successDistribution18": pandas.StringDtype(),
"mrr_successDistribution19": pandas.StringDtype(),
"mrr_successDistribution20": pandas.StringDtype(),
"mrr_successDistribution21": pandas.StringDtype(),
"mrr_successDistribution22": pandas.StringDtype(),
"mrr_successDistribution23": pandas.StringDtype(),
"mrr_successDistribution24": pandas.StringDtype(),
"mrr_successDistribution25": pandas.StringDtype(),
"mrr_successDistribution26": pandas.StringDtype(),
"mrr_successDistribution27": pandas.StringDtype(),
"mrr_successDistribution28": pandas.StringDtype(),
"mrr_successDistribution29": pandas.StringDtype(),
"mrr_successDistribution30": pandas.StringDtype(),
"mrr_successDistribution31": pandas.StringDtype(),
"mrr_successDistribution32": pandas.StringDtype(),
"mrr_successDistribution33": pandas.StringDtype(),
"mrr_successDistribution34": pandas.StringDtype(),
"mrr_successDistribution35": pandas.StringDtype(),
"mrr_successDistribution36": pandas.StringDtype(),
"mrr_successDistribution37": pandas.StringDtype(),
"mrr_successDistribution38": pandas.StringDtype(),
"mrr_successDistribution39": pandas.StringDtype(),
"mrr_successDistribution40": pandas.StringDtype(),
"mrr_successDistribution41": pandas.StringDtype(),
"mrr_successDistribution42": pandas.StringDtype(),
"mrr_successDistribution43": pandas.StringDtype(),
"mrr_successDistribution44": pandas.StringDtype(),
"mrr_successDistribution45": pandas.StringDtype(),
"mrr_successDistribution46": pandas.StringDtype(),
"mrr_successDistribution47": pandas.StringDtype(),
"mrr_successDistribution48": pandas.StringDtype(),
"mrr_successDistribution49": pandas.StringDtype(),
"mrr_successDistribution50": pandas.StringDtype(),
"mrr_successDistribution51": pandas.StringDtype(),
"mrr_successDistribution52": pandas.StringDtype(),
"mrr_successDistribution53": pandas.StringDtype(),
"mrr_successDistribution54": pandas.StringDtype(),
"mrr_successDistribution55": pandas.StringDtype(),
"mrr_successDistribution56": pandas.StringDtype(),
"mrr_successDistribution57": pandas.StringDtype(),
"mrr_successDistribution58": pandas.StringDtype(),
"mrr_successDistribution59": pandas.StringDtype(),
"mrr_successDistribution60": pandas.StringDtype(),
"mrr_successDistribution61": pandas.StringDtype(),
"mrr_successDistribution62": pandas.StringDtype(),
"mrr_successDistribution63": pandas.StringDtype(),
"mrr_successDistribution64": pandas.StringDtype(),
"blDowngradeCount": pandas.StringDtype(),
"snapReads": pandas.StringDtype(),
"pliCapTestTime": pandas.StringDtype(),
"currentTimeToFreeSpaceRecovery": pandas.StringDtype(),
"worstTimeToFreeSpaceRecovery": pandas.StringDtype(),
"rspnandReads": pandas.StringDtype(),
"cachednandReads": pandas.StringDtype(),
"spnandReads": pandas.StringDtype(),
"dpnandReads": pandas.StringDtype(),
"qpnandReads": pandas.StringDtype(),
"verifynandReads": pandas.StringDtype(),
"softnandReads": pandas.StringDtype(),
"spnandWrites": pandas.StringDtype(),
"dpnandWrites": pandas.StringDtype(),
"qpnandWrites": pandas.StringDtype(),
"opnandWrites": pandas.StringDtype(),
"xpnandWrites": pandas.StringDtype(),
"unalignedHostWriteCmd": pandas.StringDtype(),
"randomReadCmd": pandas.StringDtype(),
"randomWriteCmd": pandas.StringDtype(),
"secVenCmdCount": pandas.StringDtype(),
"secVenCmdCountFails": pandas.StringDtype(),
"mrrFailOnSlcOtfPages": pandas.StringDtype(),
"mrrFailOnSlcOtfPageMarkedAsMBPD": pandas.StringDtype(),
"lcorParitySeedErrors": pandas.StringDtype(),
"fwDownloadFails": pandas.StringDtype(),
"fwAuthenticationFails": pandas.StringDtype(),
"fwSecurityRev": pandas.StringDtype(),
"isCapacitorHealthly": pandas.StringDtype(),
"fwWRCounter": pandas.StringDtype(),
"sysAreaEraseFailCount": pandas.StringDtype(),
"iusDefragRelocated4DataRetention": pandas.StringDtype(),
"I2CTemp": pandas.StringDtype(),
"lbaMismatchOnNandReads": pandas.StringDtype(),
"currentWriteStreamsCount": pandas.StringDtype(),
"nandWritesPerStream1": pandas.StringDtype(),
"nandWritesPerStream2": pandas.StringDtype(),
"nandWritesPerStream3": pandas.StringDtype(),
"nandWritesPerStream4": pandas.StringDtype(),
"nandWritesPerStream5": pandas.StringDtype(),
"nandWritesPerStream6": pandas.StringDtype(),
"nandWritesPerStream7": pandas.StringDtype(),
"nandWritesPerStream8": pandas.StringDtype(),
"nandWritesPerStream9": pandas.StringDtype(),
"nandWritesPerStream10": pandas.StringDtype(),
"nandWritesPerStream11": pandas.StringDtype(),
"nandWritesPerStream12": pandas.StringDtype(),
"nandWritesPerStream13": pandas.StringDtype(),
"nandWritesPerStream14": pandas.StringDtype(),
"nandWritesPerStream15": pandas.StringDtype(),
"nandWritesPerStream16": pandas.StringDtype(),
"nandWritesPerStream17": pandas.StringDtype(),
"nandWritesPerStream18": pandas.StringDtype(),
"nandWritesPerStream19": pandas.StringDtype(),
"nandWritesPerStream20": pandas.StringDtype(),
"nandWritesPerStream21": pandas.StringDtype(),
"nandWritesPerStream22": pandas.StringDtype(),
"nandWritesPerStream23": pandas.StringDtype(),
"nandWritesPerStream24": pandas.StringDtype(),
"nandWritesPerStream25": pandas.StringDtype(),
"nandWritesPerStream26": pandas.StringDtype(),
"nandWritesPerStream27": pandas.StringDtype(),
"nandWritesPerStream28": pandas.StringDtype(),
"nandWritesPerStream29": pandas.StringDtype(),
"nandWritesPerStream30": pandas.StringDtype(),
"nandWritesPerStream31": pandas.StringDtype(),
"nandWritesPerStream32": pandas.StringDtype(),
"hostSoftReadSuccess": pandas.StringDtype(),
"xorInvokedCount": pandas.StringDtype(),
"comresets": pandas.StringDtype(),
"syncEscapes": pandas.StringDtype(),
"rErrHost": pandas.StringDtype(),
"rErrDevice": pandas.StringDtype(),
"iCrcs": pandas.StringDtype(),
"linkSpeedDrops": pandas.StringDtype(),
"mrrXtrapageEvents": pandas.StringDtype(),
"mrrToppageEvents": pandas.StringDtype(),
"hostXorSuccessCount": pandas.StringDtype(),
"hostXorFailCount": pandas.StringDtype(),
"nandWritesWithPreReadPerStream1": pandas.StringDtype(),
"nandWritesWithPreReadPerStream2": pandas.StringDtype(),
"nandWritesWithPreReadPerStream3": pandas.StringDtype(),
"nandWritesWithPreReadPerStream4": pandas.StringDtype(),
"nandWritesWithPreReadPerStream5": pandas.StringDtype(),
"nandWritesWithPreReadPerStream6": pandas.StringDtype(),
"nandWritesWithPreReadPerStream7": pandas.StringDtype(),
"nandWritesWithPreReadPerStream8": pandas.StringDtype(),
"nandWritesWithPreReadPerStream9": pandas.StringDtype(),
"nandWritesWithPreReadPerStream10": pandas.StringDtype(),
"nandWritesWithPreReadPerStream11": pandas.StringDtype(),
"nandWritesWithPreReadPerStream12": pandas.StringDtype(),
"nandWritesWithPreReadPerStream13": pandas.StringDtype(),
"nandWritesWithPreReadPerStream14": pandas.StringDtype(),
"nandWritesWithPreReadPerStream15": pandas.StringDtype(),
"nandWritesWithPreReadPerStream16": pandas.StringDtype(),
"nandWritesWithPreReadPerStream17": pandas.StringDtype(),
"nandWritesWithPreReadPerStream18": pandas.StringDtype(),
"nandWritesWithPreReadPerStream19": pandas.StringDtype(),
"nandWritesWithPreReadPerStream20": pandas.StringDtype(),
"nandWritesWithPreReadPerStream21": pandas.StringDtype(),
"nandWritesWithPreReadPerStream22": pandas.StringDtype(),
"nandWritesWithPreReadPerStream23": pandas.StringDtype(),
"nandWritesWithPreReadPerStream24": pandas.StringDtype(),
"nandWritesWithPreReadPerStream25": pandas.StringDtype(),
"nandWritesWithPreReadPerStream26": pandas.StringDtype(),
"nandWritesWithPreReadPerStream27": pandas.StringDtype(),
"nandWritesWithPreReadPerStream28": pandas.StringDtype(),
"nandWritesWithPreReadPerStream29": pandas.StringDtype(),
"nandWritesWithPreReadPerStream30": pandas.StringDtype(),
"nandWritesWithPreReadPerStream31": pandas.StringDtype(),
"nandWritesWithPreReadPerStream32": pandas.StringDtype(),
"dramCorrectables8to1": pandas.StringDtype(),
"driveRecoveryCount": pandas.StringDtype(),
"mprLiteReads": pandas.StringDtype(),
"eccErrOnMprLiteReads": pandas.StringDtype(),
"readForwardingXpPreReadCount": pandas.StringDtype(),
"readForwardingUpPreReadCount": pandas.StringDtype(),
"readForwardingLpPreReadCount": pandas.StringDtype(),
"pweDefectCompensationCredit": pandas.StringDtype(),
"planarXorRebuildFailure": pandas.StringDtype(),
"itgXorRebuildFailure": pandas.StringDtype(),
"planarXorRebuildSuccess": pandas.StringDtype(),
"itgXorRebuildSuccess": pandas.StringDtype(),
"xorLoggingSkippedSIcBand": pandas.StringDtype(),
"xorLoggingSkippedDieOffline": pandas.StringDtype(),
"xorLoggingSkippedDieAbsent": pandas.StringDtype(),
"xorLoggingSkippedBandErased": pandas.StringDtype(),
"xorLoggingSkippedNoEntry": pandas.StringDtype(),
"xorAuditSuccess": pandas.StringDtype(),
"maxSuspendCount": pandas.StringDtype(),
"suspendLimitPerPrgm": pandas.StringDtype(),
"psrCountStats": pandas.StringDtype(),
"readNandBuffCount": pandas.StringDtype(),
"readNandBufferRspErrorCount": pandas.StringDtype(),
"ddpNandWrites": pandas.StringDtype(),
"totalDeallocatedSectorsInCore": pandas.StringDtype(),
"prefetchHostReads": pandas.StringDtype(),
"hostReadtoDSMDCount": pandas.StringDtype(),
"hostWritetoDSMDCount": pandas.StringDtype(),
"snapReads4k": pandas.StringDtype(),
"snapReads8k": pandas.StringDtype(),
"snapReads16k": pandas.StringDtype(),
"xorLoggingTriggered": pandas.StringDtype(),
"xorLoggingAborted": pandas.StringDtype(),
"xorLoggingSkippedHistory": pandas.StringDtype(),
"deckDisturbRelocationUD": pandas.StringDtype(),
"deckDisturbRelocationMD": pandas.StringDtype(),
"deckDisturbRelocationLD": pandas.StringDtype(),
"bbdProactiveReadRetry": pandas.StringDtype(),
"statsRestoreRequired": pandas.StringDtype(),
"statsAESCount": pandas.StringDtype(),
"statsHESCount": pandas.StringDtype(),
"psrCountStats1": pandas.StringDtype(),
"psrCountStats2": pandas.StringDtype(),
"psrCountStats3": pandas.StringDtype(),
"psrCountStats4": pandas.StringDtype(),
"psrCountStats5": pandas.StringDtype(),
"psrCountStats6": pandas.StringDtype(),
"psrCountStats7": pandas.StringDtype(),
"psrCountStats8": pandas.StringDtype(),
"psrCountStats9": pandas.StringDtype(),
"psrCountStats10": pandas.StringDtype(),
"psrCountStats11": pandas.StringDtype(),
"psrCountStats12": pandas.StringDtype(),
"psrCountStats13": pandas.StringDtype(),
"psrCountStats14": pandas.StringDtype(),
"psrCountStats15": pandas.StringDtype(),
"psrCountStats16": pandas.StringDtype(),
"psrCountStats17": pandas.StringDtype(),
"psrCountStats18": pandas.StringDtype(),
"psrCountStats19": pandas.StringDtype(),
"psrCountStats20": pandas.StringDtype(),
"psrCountStats21": pandas.StringDtype(),
"psrCountStats22": pandas.StringDtype(),
"psrCountStats23": pandas.StringDtype(),
"psrCountStats24": pandas.StringDtype(),
"psrCountStats25": pandas.StringDtype(),
"psrCountStats26": pandas.StringDtype(),
"psrCountStats27": pandas.StringDtype(),
"psrCountStats28": pandas.StringDtype(),
"psrCountStats29": pandas.StringDtype(),
"psrCountStats30": pandas.StringDtype(),
"psrCountStats31": pandas.StringDtype(),
"psrCountStats32": pandas.StringDtype(),
"psrCountStats33": pandas.StringDtype(),
"psrCountStats34": pandas.StringDtype(),
"psrCountStats35": pandas.StringDtype(),
"psrCountStats36": pandas.StringDtype(),
"psrCountStats37": pandas.StringDtype(),
"psrCountStats38": pandas.StringDtype(),
"psrCountStats39": pandas.StringDtype(),
"psrCountStats40": pandas.StringDtype(),
"psrCountStats41": pandas.StringDtype(),
"psrCountStats42": pandas.StringDtype(),
"psrCountStats43": pandas.StringDtype(),
"psrCountStats44": pandas.StringDtype(),
"psrCountStats45": pandas.StringDtype(),
"psrCountStats46": pandas.StringDtype(),
"psrCountStatsHigh1": pandas.StringDtype(),
"psrCountStatsHigh2": pandas.StringDtype(),
"psrCountStatsHigh3": pandas.StringDtype(),
"psrCountStatsHigh4": pandas.StringDtype(),
"psrCountStatsHigh5": pandas.StringDtype(),
"psrCountStatsHigh6": pandas.StringDtype(),
"psrCountStatsHigh7": pandas.StringDtype(),
"psrCountStatsHigh8": pandas.StringDtype(),
"psrCountStatsHigh9": pandas.StringDtype(),
"psrCountStatsHigh10": pandas.StringDtype(),
"psrCountStatsHigh11": pandas.StringDtype(),
"psrCountStatsHigh12": pandas.StringDtype(),
"psrCountStatsHigh13": pandas.StringDtype(),
"psrCountStatsHigh14": pandas.StringDtype(),
"vssSwitchCount": pandas.StringDtype(),
"openBandReadCount": pandas.StringDtype(),
"closedBandReadCount": pandas.StringDtype(),
"minEraseSLC": pandas.StringDtype(),
"maxEraseSLC": pandas.StringDtype(),
"avgEraseSLC": pandas.StringDtype(),
"totalErasesSLC": pandas.StringDtype(),
"unexpectedPsrStateCount": pandas.StringDtype(),
"lowPrioritySqReadCmds": | pandas.StringDtype() | pandas.StringDtype |
"""
MLTrace: A machine learning progress tracker
====================================================
This module provides some basic functionality to track the process of machine learning model development.
It sets up a SQLite db-file and stores selected models, graphs, and data (for convenience) and recovers them
as requested.
``mltrace`` uses `peewee <http://docs.peewee-orm.com/en/latest/>`_ and `pandas <https://pandas.pydata.org/>`_ for
data manipulation.
It also has built in capabilities to generate some typical plots and graph in machine learning.
"""
try:
from peewee import *
except ModuleNotFoundError:
Model = type("Model", (object,), dict(Simple=lambda: 0.0))
SqliteDatabase = lambda x: None
from datetime import datetime
MLTRACK_DB = SqliteDatabase(None)
class np2df(object):
"""
A class to convert numpy ndarray to a pandas DataFrame. It produces a callable object which returns a
`pandas.DataFrame`
:param data: `numpy.ndarray` data
:param clmns: a list of titles for pandas DataFrame column names.
If None, it produces `C{num}` where `num` changes as the index of the ndarray does.
"""
def __init__(self, data, clmns=None):
self.data = data
self.N = len(data[0])
if clmns is None:
self.Columns = ["C%d" % (_) for _ in range(self.N)]
else:
self.Columns = clmns
def __call__(self, *args, **kwargs):
from pandas import DataFrame
dct = {}
for idx in range(self.N):
dct[self.Columns[idx]] = list(self.data[:, idx])
return DataFrame(dct)
class Task(Model):
"""
The class to generate the 'task` table in the SQLite db-file.
This table keeps basic information about the task on hand, e.g., the task name, a brief description,
target column, and columns to be ignored.
"""
try:
task_id = IntegerField(primary_key=True, unique=True, null=False, default=1)
name = CharField(null=True)
description = TextField(null=True)
target = CharField(null=True)
ignore = CharField(null=True)
init_date = DateTimeField(default=datetime.now, null=True)
last_mod_date = DateTimeField(default=datetime.now, null=True)
except:
pass
class Meta:
database = MLTRACK_DB
class MLModel(Model):
"""
The class to generate the 'mlmodel` table in the SQLite db-file.
It stores the scikit-learn scheme of the model/pipeline, its parameters, etc.
"""
try:
model_id = IntegerField(primary_key=True, unique=True, null=False)
task_id = ForeignKeyField(Task)
name = CharField(null=True)
model_str = TextField(null=True)
model_type = CharField(null=True)
parameters = BareField(null=True)
date_modified = DateTimeField(default=datetime.now, null=True)
except:
pass
class Meta:
database = MLTRACK_DB
class Metrics(Model):
"""
The class to generate the 'metrics` table in the SQLite db-file.
This table stores the calculated metrics of each stored model.
"""
try:
metrics_id = IntegerField(primary_key=True, unique=True, null=False)
model_id = ForeignKeyField(MLModel)
accuracy = FloatField(null=True)
auc = FloatField(null=True)
precision = FloatField(null=True)
recall = FloatField(null=True)
f1 = FloatField(null=True)
mcc = FloatField(null=True)
logloss = FloatField(null=True)
variance = FloatField(null=True)
max_error = FloatField(null=True)
mse = FloatField(null=True)
mae = FloatField(null=True)
r2 = FloatField(null=True)
except:
pass
class Meta:
database = MLTRACK_DB
class Saved(Model):
"""
The class to generate the 'saved` table in the SQLite db-file.
It keeps the pickled version of a stored model that can be later recovered.
"""
try:
pickle_id = IntegerField(primary_key=True, unique=True, null=False)
model_id = ForeignKeyField(MLModel)
pickle = BareField(null=True)
init_date = DateTimeField(default=datetime.now, null=True)
except:
pass
class Meta:
database = MLTRACK_DB
class Plots(Model):
"""
The class to generate the 'plots` table in the SQLite db-file.
This table stores `matplotlib` plots associated to each model.
"""
try:
plot_id = IntegerField(primary_key=True, unique=True, null=False)
model_id = ForeignKeyField(MLModel)
title = CharField(null=True)
plot = BareField(null=True)
init_date = DateTimeField(default=datetime.now, null=True)
except:
pass
class Meta:
database = MLTRACK_DB
class Data(Model):
"""
The class to generate the 'data` table in the SQLite db-file.
This table stores the whole given data for convenience.
"""
class Meta:
database = MLTRACK_DB
class Weights(Model):
"""
The class to generate the 'weights` table in the SQLite db-file.
Stores some sensitivity measures, correlations, etc.
"""
class Meta:
database = MLTRACK_DB
class mltrack(object):
"""
This class instantiates an object that tracks the ML activities and store them upon request.
:param task: 'str' the task name
:param task_is: the id of an existing task, if the name is not provided.
:param db_name: a file name for the SQLite database
:param cv: the default cross validation method, must be a valid cv based on `sklearn.model_selection`;
default: `ShuffleSplit(n_splits=3, test_size=.25)`
"""
def __init__(self, task, task_id=None, db_name="mltrack.db", cv=None):
self.db_name = db_name
tables = [Task, MLModel, Metrics, Saved, Plots, Data, Weights]
for tbl in tables:
tbl._meta.database.init(self.db_name)
MLTRACK_DB.create_tables(tables)
res = Task.select().where((Task.name == task) | (Task.task_id == task_id))
if len(res) > 0:
self.task = res[0].name
self.task_id = res[0].task_id
self.target = res[0].target
else:
new_task = Task.create(name=task, description="Initiated automatically")
self.task_id = new_task.task_id
import sqlite3
self.conn = sqlite3.connect(self.db_name)
if cv is None:
from sklearn.model_selection import ShuffleSplit
self.cv = ShuffleSplit(n_splits=3, test_size=0.25)
else:
self.cv = cv
self.X, self.y = None, None
self.Updated, self.Loaded, self.Recovered = [], [], []
def UpdateTask(self, data):
"""
Updates the current task info.
:param data: a dictionary that may include some the followings as its keys:
+ 'name': the corresponding value will replace the current task name
+ 'description': the corresponding value will replace the current description
+ 'ignore': the corresponding value will replace the current ignored columns
:return: None
"""
task = Task.select().where(Task.task_id == self.task_id).get()
if "name" in data:
task.name = data["name"]
if "description" in data:
task.description = data["description"]
if "ignore" in data:
task.ignore = ",".join(data["ignore"])
task.last_mod_date = datetime.now()
task.save()
def UpdateModel(self, mdl, name):
"""
Updates an already logged model which has `mltrack_id` set.
:param mdl: a scikit-learn compatible estimator/pipeline
:param name: an arbitrary string to name the model
:return: None
"""
from pickle import dumps
if "mltrack_id" not in mdl.__dict__:
return
else:
mltrack_id = mdl.mltrack_id
model = MLModel.select().where(MLModel.model_id == mltrack_id).get()
model.name = name
model.model_str = str(mdl)
model.parameters = dumps(mdl.get_params())
model.date_modified = datetime.now()
model.save()
if mltrack_id not in self.Updated:
self.Updated.append(mltrack_id)
def LogModel(self, mdl, name=None):
"""
Log a machine learning model
:param mdl: a scikit-learn compatible estimator/pipeline
:param name: an arbitrary string to name the model
:return: modified instance of `mdl` which carries a new attribute `mltrack_id` as its id.
"""
from pickle import dumps
if name is not None:
mdl.mltrack_name = name
else:
mdl.mltrack_name = name if name is not None else str(mdl).split("(")[0]
if "mltrack_id" not in mdl.__dict__:
MLModel.create(
task_id=self.task_id,
name=mdl.mltrack_name,
model_str=str(mdl),
model_type=str(type(mdl)).split("'")[1],
parameters=dumps(mdl.get_params()),
)
mdl.mltrack_id = (
MLModel.select(MLModel.model_id).order_by(MLModel.model_id.desc()).get()
)
else:
res = MLModel.select().where(MLModel.model_id == mdl.mltrack_id)[0]
res.name = mdl.mltrack_name
res.model_str = str(mdl)
res.parameters = dumps(mdl.get_params())
res.date_modified = datetime.now()
res.save()
# TBM
Tskres = Task.select().where(Task.task_id == self.task_id)[0]
Tskres.last_mod_date = datetime.now()
Tskres.save()
return mdl
def RegisterData(self, source_df, target):
"""
Registers a pandas DataFrame into the SQLite database.
Upon a call, it also sets `self.X` and `self.y` which are numpy arrays.
:param source_df: the pandas DataFrame to be stored
:param target: the name of the target column to be predicted
:return: None
"""
# TBM
res = Task.select().where(Task.task_id == self.task_id)[0]
res.target = target
res.last_mod_date = datetime.now()
res.save()
self.target = target
clmns = list(source_df.columns)
if target not in clmns:
raise BaseException("`%s` is not a part of data source." % target)
source_df.to_sql("data", self.conn, if_exists="replace", index=False)
clmns.remove(target)
self.X = source_df[clmns].values
self.y = source_df[target].values
def get_data(self):
"""
Retrieves data in numpy format
:return: numpy arrays X, y
"""
from pandas import read_sql
df = read_sql("SELECT * FROM data", self.conn)
clmns = list(df.columns)
clmns.remove(self.target)
self.X = df[clmns].values
self.y = df[self.target].values
return self.X, self.y
def get_dataframe(self):
"""
Retrieves data in pandas DataFrame format
:return: pandas DataFrame containing all data
"""
from pandas import read_sql
df = read_sql("SELECT * FROM data", self.conn)
return df
def LogMetrics(self, mdl, cv=None):
"""
Logs metrics of an already logged model using a cross validation methpd
:param mdl: the model to be measured
:param cv: cross validation method
:return: a dictionary of all measures with their corresponding values for the model
"""
if cv is not None:
self.cv = cv
if self.X is None:
self.get_data()
if "mltrack_id" not in mdl.__dict__:
mdl = self.LogModel(mdl)
mdl_id = mdl.mltrack_id
mdl_type = mdl._estimator_type
#######################################################
prds = []
prbs = []
for train_idx, test_idx in self.cv.split(self.X, self.y):
X_train, y_train = self.X[train_idx], self.y[train_idx]
X_test, y_test = self.X[test_idx], self.y[test_idx]
mdl.fit(X_train, y_train)
prds.append((mdl.predict(X_test), y_test))
try:
prbs.append(mdl.predict_proba(X_test)[:, 1])
except AttributeError:
try:
prbs.append(mdl.decision_function(X_test))
except AttributeError:
pass
#######################################################
acc = None
f_1 = None
prs = None
rcl = None
aur = None
mcc = None
lgl = None
vrn = None
mxe = None
mse = None
mae = None
r2 = None
n_ = float(len(prbs))
if mdl_type == "classifier":
from sklearn.metrics import (
accuracy_score,
f1_score,
precision_score,
recall_score,
roc_curve,
auc,
log_loss,
matthews_corrcoef,
)
acc = sum([accuracy_score(y_tst, y_prd) for y_prd, y_tst in prds]) / n_
f_1 = sum([f1_score(y_tst, y_prd) for y_prd, y_tst in prds]) / n_
prs = sum([precision_score(y_tst, y_prd) for y_prd, y_tst in prds]) / n_
rcl = sum([recall_score(y_tst, y_prd) for y_prd, y_tst in prds]) / n_
mcc = sum([matthews_corrcoef(y_tst, y_prd) for y_prd, y_tst in prds]) / n_
lgl = sum([log_loss(y_tst, y_prd) for y_prd, y_tst in prds]) / n_
aur = 0.0
for i in range(int(n_)):
fpr, tpr, _ = roc_curve(prds[i][1], prbs[i])
aur += auc(fpr, tpr)
aur /= n_
elif mdl_type == "regressor":
from sklearn.metrics import (
explained_variance_score,
median_absolute_error,
mean_squared_error,
mean_absolute_error,
r2_score,
)
vrn = (
sum([explained_variance_score(y_tst, y_prd) for y_prd, y_tst in prds])
/ n_
)
mxe = (
sum([median_absolute_error(y_tst, y_prd) for y_prd, y_tst in prds]) / n_
)
mse = sum([mean_squared_error(y_tst, y_prd) for y_prd, y_tst in prds]) / n_
mae = sum([mean_absolute_error(y_tst, y_prd) for y_prd, y_tst in prds]) / n_
r2 = sum([r2_score(y_tst, y_prd) for y_prd, y_tst in prds]) / n_
Metrics.create(
model_id=mdl_id,
accuracy=acc,
auc=aur,
precision=prs,
f1=f_1,
recall=rcl,
mcc=mcc,
logloss=lgl,
variance=vrn,
max_error=mxe,
mse=mse,
mae=mae,
r2=r2,
)
# TBM
res = Task.select().where(Task.task_id == self.task_id)[0]
res.last_mod_date = datetime.now()
res.save()
return dict(
accuracy=acc,
auc=aur,
precision=prs,
f1=f_1,
recall=rcl,
mcc=mcc,
logloss=lgl,
variance=vrn,
max_error=mxe,
mse=mse,
mae=mae,
r2=r2,
)
def LoadModel(self, mid):
"""
Loads a model corresponding to an id
:param mid: the model id
:return: an unfitted model
"""
from importlib import import_module
from pickle import loads
res = MLModel.select().where(MLModel.model_id == mid)
if len(res) == 0:
raise BaseException("No model with id '%d' were found" % (mid))
detail = res[0].model_type.split(".")
module_str = ".".join(detail[:-1])
clss = detail[-1]
module = import_module(module_str)
params = loads(res[0].parameters)
mdl = module.__getattribute__(clss)()
mdl.set_params(**params)
mdl.mltrack_id = mid
if mid not in self.Loaded:
self.Loaded.append(mid)
return mdl
@staticmethod
def getBest(metric):
"""
Finds the model with the best metric.
:param metric: the metric to find the best stored model for
:return: the model wiith the best `metric`
"""
res = (
Metrics.select()
.order_by(Metrics.__dict__[metric].__dict__["field"].desc())
.dicts()
)
return res[0]
def allTasks(self):
"""
Lists all tasks as a pandas DataFrame
:return: a pandas DataFrame
"""
from pandas import read_sql
return read_sql("SELECT * FROM task", self.conn)
def allModels(self):
"""
Lists all logged models as a pandas DataFrame
:return: a pandas DataFrame
"""
from pandas import read_sql
return read_sql(
"SELECT model_id, task_id, name, model_str, model_type, date_modified FROM mlmodel WHERE task_id=%d"
% (self.task_id),
self.conn,
)
def allPreserved(self):
"""
Lists all pickled models as a pandas DataFrame
:return: a pandas DataFrame
"""
from pandas import read_sql
return read_sql("SELECT pickle_id, model_id, init_date FROM saved", self.conn)
def PreserveModel(self, mdl):
"""
Pickles and preserves an already logged model
:param mdl: a logged model
:return: None
"""
from sklearn.externals import joblib
if "mltrack_id" not in mdl.__dict__:
mdl = self.LogModel(mdl)
mdl_id = mdl.mltrack_id
file = open("track_ml_tmp_mdl.joblib", "wb")
joblib.dump(mdl, file)
file.close()
file = open("track_ml_tmp_mdl.joblib", "rb")
str_cntnt = file.read()
Saved.create(model_id=mdl_id, pickle=str_cntnt)
file.close()
import os
os.remove("track_ml_tmp_mdl.joblib")
def RecoverModel(self, mdl_id):
"""
Recovers a pickled model
:param mdl_id: a valid `mltrack_id`
:return: a fitted model
"""
from sklearn.externals import joblib
res = (
Saved.select()
.where(Saved.model_id == mdl_id)
.order_by(Saved.init_date.desc())
.dicts()
)
file = open("track_ml_tmp_mdl.joblib", "wb")
file.write(res[0]["pickle"])
file.close()
file = open("track_ml_tmp_mdl.joblib", "rb")
mdl = joblib.load(file)
file.close()
import os
os.remove("track_ml_tmp_mdl.joblib")
if mdl_id not in self.Recovered:
self.Recovered.append(mdl_id)
return mdl
def allPlots(self, mdl_id):
"""
Lists all stored plots for a model with `mdl_id` as a pandas DataFrame
:param mdl_id: a valid `mltrack_id`
:return: a pandas DataFrame
"""
from pandas import read_sql
return read_sql(
"SELECT plot_id, model_id, title, init_date FROM plots WHERE model_id=%d"
% (mdl_id),
self.conn,
)
@staticmethod
def LoadPlot(pid):
"""
Loads a `matplotlib` plot
:param pid: the id of the plot
:return: a `matplotlib` figure
"""
from pickle import loads
# ax = plt.subplot(111)
res = Plots.select().where(Plots.plot_id == pid).dicts()
fig = loads(res[0]["plot"])
return fig
def plot_learning_curve(
self, mdl, title, ylim=None, cv=None, n_jobs=1, train_sizes=None, **kwargs
):
"""
Generate a simple plot of the test and training learning curve.
:param mdl: object type that implements the "fit" and "predict" methods;
An object of that type which is cloned for each validation.
:param title: string;
Title for the chart.
:param measure: string, a performance measure; must be one of hte followings:
`accuracy`, `f1`, `precision`, `recall`, `roc_auc`
:param ylim: tuple, shape (ymin, ymax), optional;
Defines minimum and maximum yvalues plotted.
:param cv: int, cross-validation generator or an iterable, optional;
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if ``y`` is binary or multiclass,
:class:`StratifiedKFold` used. If the mdl is not a classifier
or if ``y`` is neither binary nor multiclass, :class:`KFold` is used.
:param n_jobs: integer, optional;
Number of jobs to run in parallel (default 1).
:return: a `matplotlib` plot
"""
import matplotlib.pyplot as plt
import numpy as np
from sklearn.model_selection import learning_curve
if cv is not None:
self.cv = cv
if self.X is None:
self.get_data()
if "mltrack_id" not in mdl.__dict__:
mdl = self.LogModel(mdl)
mdl_id = mdl.mltrack_id
meas = kwargs.get("measure", "accuracy")
if meas not in ["accuracy", "f1", "precision", "recall", "roc_auc"]:
meas = "accuracy"
if train_sizes is None:
train_sizes = np.linspace(0.1, 1.0, 5)
plt.subplot(111)
fig = plt.figure()
plt.title(title)
if ylim is None:
ylim = (-0.05, 1.05)
plt.ylim(*ylim)
plt.xlabel("Training size")
plt.ylabel("Score (%s)" % (meas))
train_sizes, train_scores, test_scores = learning_curve(
mdl,
self.X,
self.y,
cv=self.cv,
n_jobs=n_jobs,
train_sizes=train_sizes,
scoring=meas,
)
xlbls = np.array(
[str(round(_ * 100, 1)) + " %" for _ in train_sizes / len(self.y)]
)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
plt.grid()
plt.fill_between(
xlbls,
train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std,
alpha=0.1,
color="r",
)
plt.fill_between(
xlbls,
test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std,
alpha=0.1,
color="g",
)
plt.plot(xlbls, train_scores_mean, "o-", color="r", label="Training score")
plt.plot(
xlbls, test_scores_mean, "o-", color="g", label="Cross-validation score"
)
plt.legend(loc="best")
from pickle import dumps
pckl = dumps(fig)
Plots.create(model_id=mdl_id, title=meas, plot=pckl)
return plt
def split_train(self, mdl):
from sklearn.model_selection import train_test_split
if "mltrack_id" not in mdl.__dict__:
mdl = self.LogModel(mdl)
mdl_id = mdl.mltrack_id
if self.X is None:
self.get_data()
X_train, X_test, y_train, y_test = train_test_split(
self.X, self.y, train_size=0.75
)
from sklearn.exceptions import NotFittedError
x_ = X_test[0]
try:
mdl.predict([x_])
except NotFittedError as _:
mdl.fit(X_train, y_train)
return mdl, mdl_id, X_train, X_test, y_train, y_test
def plot_calibration_curve(self, mdl, name, fig_index=1, bins=10):
"""
Plots calibration curves.
:param mdl: object type that implements the "fit" and "predict" methods;
An object of that type which is cloned for each validation.
:param name: string;
Title for the chart.
:param bins: number of bins to partition samples
:return: a `matplotlib` plot
"""
import matplotlib.pyplot as plt
from sklearn.calibration import calibration_curve
fig = plt.figure(fig_index, figsize=(10, 10))
ax1 = plt.subplot2grid((3, 1), (0, 0), rowspan=2)
ax2 = plt.subplot2grid((3, 1), (2, 0))
ax1.plot([0, 1], [0, 1], "k:", label="Perfectly calibrated")
mdl, mdl_id, _, X_test, _, y_test = self.split_train(mdl)
if hasattr(mdl, "predict_proba"):
prob_pos = mdl.predict_proba(X_test)[:, 1]
else: # use decision function
prob_pos = mdl.decision_function(X_test)
prob_pos = (prob_pos - prob_pos.min()) / (prob_pos.max() - prob_pos.min())
fraction_of_positives, mean_predicted_value = calibration_curve(
y_test, prob_pos, n_bins=bins
)
ax1.plot(mean_predicted_value, fraction_of_positives, "s-", label="%s" % (name))
ax2.hist(prob_pos, range=(0, 1), bins=bins, label=name, histtype="step", lw=2)
ax1.set_ylabel("Fraction of positives")
ax1.set_ylim([-0.05, 1.05])
ax1.legend(loc="lower right")
ax1.set_title("Calibration plots (reliability curve)")
ax2.set_xlabel("Mean predicted value")
ax2.set_ylabel("Count")
ax2.legend(loc="upper center", ncol=2)
plt.tight_layout()
from pickle import dumps
pckl = dumps(fig)
Plots.create(model_id=mdl_id, title="calibration", plot=pckl)
return plt
def plot_roc_curve(self, mdl, label=None):
"""
The ROC curve, modified from Hands-On Machine learning with Scikit-Learn.
:param mdl: object type that implements the "fit" and "predict" methods;
An object of that type which is cloned for each validation.
:param label: string;
label for the chart.
:return: a `matplotlib` plot
"""
import matplotlib.pyplot as plt
from numpy import arange
from sklearn.metrics import roc_curve
mdl, mdl_id, _, X_test, _, y_test = self.split_train(mdl)
_ = plt.subplot(111)
fig = plt.figure(figsize=(8, 8))
plt.title("ROC Curve")
try:
y_score = mdl.predict_proba(X_test)[:, 1]
except:
y_score_ = mdl.decision_function(X_test)
y_score = (y_score_ - y_score_.min()) / (y_score_.max() - y_score_.min())
fpr, tpr, _ = roc_curve(y_test, y_score)
plt.plot(fpr, tpr, linewidth=2, label=label)
plt.plot([0, 1], [0, 1], "k--")
plt.axis([-0.005, 1, 0, 1.005])
plt.xticks(arange(0, 1, 0.05), rotation=90)
plt.xlabel("False Positive Rate")
plt.ylabel("True Positive Rate (Recall)")
plt.legend(loc="best")
from pickle import dumps
pckl = dumps(fig)
Plots.create(model_id=mdl_id, title="roc curve", plot=pckl)
return plt
def plot_cumulative_gain(
self,
mdl,
title="Cumulative Gains Curve",
figsize=None,
title_fontsize="large",
text_fontsize="medium",
):
"""
Generates the Cumulative Gains Plot from labels and scores/probabilities
The cumulative gains chart is used to determine the effectiveness of a
binary classifier. A detailed explanation can be found at
`http://mlwiki.org/index.php/Cumulative_Gain_Chart <http://mlwiki.org/index.php/Cumulative_Gain_Chart>`_.
The implementation here works only for binary classification.
:param mdl: object type that implements the "fit" and "predict" methods;
An object of that type which is cloned for each validation.
:param title: (string, optional): Title of the generated plot.
Defaults to "Cumulative Gains Curve".
:param figsize: (2-tuple, optional): Tuple denoting figure size of the plot e.g. (6, 6).
Defaults to ``None``.
:param title_fontsize: (string or int, optional): Matplotlib-style fontsizes.
Use e.g., "small", "medium", "large" or integer-values. Defaults to "large".
:param text_fontsize: (string or int, optional): Matplotlib-style fontsizes.
Use e.g. "small", "medium", "large" or integer-values. Defaults to "medium".
:return: ax (:class:`matplotlib.axes.Axes`): The axes on which the plot was drawn.
"""
from numpy import array, unique
import matplotlib.pyplot as plt
mdl, mdl_id, _, X_test, _, y_test = self.split_train(mdl)
y_true = array(y_test)
try:
y_probas = mdl.predict_proba(X_test)
y_probas = array(y_probas)
prob_pos0 = y_probas[:, 0]
prob_pos1 = y_probas[:, 1]
except:
prob_pos = mdl.decision_function(X_test)
prob_pos1 = (prob_pos - prob_pos.min()) / (prob_pos.max() - prob_pos.min())
prob_pos0 = (prob_pos.max() - prob_pos) / (prob_pos.max() - prob_pos.min())
classes = unique(y_true)
if len(classes) != 2:
raise ValueError(
"Cannot calculate Cumulative Gains for data with "
"{} category/ies".format(len(classes))
)
# Compute Cumulative Gain Curves
percentages, gains1 = self.cumulative_gain_curve(y_true, prob_pos0, classes[0])
percentages, gains2 = self.cumulative_gain_curve(y_true, prob_pos1, classes[1])
fig, ax = plt.subplots(1, 1, figsize=figsize)
ax.set_title(title, fontsize=title_fontsize)
ax.plot(percentages, gains1, lw=3, label="Class {}".format(classes[0]))
ax.plot(percentages, gains2, lw=3, label="Class {}".format(classes[1]))
ax.set_xlim([0.0, 1.0])
ax.set_ylim([0.0, 1.0])
ax.plot([0, 1], [0, 1], "k--", lw=2, label="Baseline")
ax.set_xlabel("Percentage of sample", fontsize=text_fontsize)
ax.set_ylabel("Gain", fontsize=text_fontsize)
ax.tick_params(labelsize=text_fontsize)
ax.grid(True)
ax.legend(loc="lower right", fontsize=text_fontsize)
from pickle import dumps
pckl = dumps(fig)
Plots.create(model_id=mdl_id, title="cumulative gain", plot=pckl)
return ax
@staticmethod
def cumulative_gain_curve(y_true, y_score, pos_label=None):
"""
This function generates the points necessary to plot the Cumulative Gain
Note: This implementation is restricted to the binary classification task.
:param y_true: (array-like, shape (n_samples)): True labels of the data.
:param y_score: (array-like, shape (n_samples)): Target scores, can either be probability estimates of
the positive class, confidence values, or non-thresholded measure of decisions (as returned by
decision_function on some classifiers).
:param pos_label: (int or str, default=None): Label considered as positive and others are considered negative
:return:
percentages (numpy.ndarray): An array containing the X-axis values for plotting the Cumulative Gains chart.
gains (numpy.ndarray): An array containing the Y-axis values for one curve of the Cumulative Gains chart.
:raise:
ValueError: If `y_true` is not composed of 2 classes. The Cumulative Gain Chart is only relevant in
binary classification.
"""
from numpy import asarray, array_equal, cumsum, arange, insert, unique, argsort
y_true, y_score = asarray(y_true), asarray(y_score)
# ensure binary classification if pos_label is not specified
classes = unique(y_true)
if pos_label is None and not (
array_equal(classes, [0, 1])
or array_equal(classes, [-1, 1])
or array_equal(classes, [0])
or array_equal(classes, [-1])
or array_equal(classes, [1])
):
raise ValueError("Data is not binary and pos_label is not specified")
elif pos_label is None:
pos_label = 1.0
# make y_true a boolean vector
y_true = y_true == pos_label
sorted_indices = argsort(y_score)[::-1]
y_true = y_true[sorted_indices]
gains = cumsum(y_true)
percentages = arange(start=1, stop=len(y_true) + 1)
gains = gains / float(sum(y_true))
percentages = percentages / float(len(y_true))
gains = insert(gains, 0, [0])
percentages = insert(percentages, 0, [0])
return percentages, gains
def plot_lift_curve(
self,
mdl,
title="Lift Curve",
figsize=None,
title_fontsize="large",
text_fontsize="medium",
):
"""
Generates the Lift Curve from labels and scores/probabilities The lift curve is used to
determine the effectiveness of a binary classifier. A detailed explanation can be found at
`http://www2.cs.uregina.ca/~dbd/cs831/notes/lift_chart/lift_chart.html <http://www2.cs.uregina.ca/~dbd/cs831/notes/lift_chart/lift_chart.html>`_.
The implementation here works only for binary classification.
:param mdl: object type that implements the "fit" and "predict" methods;
An object of that type which is cloned for each validation.
:param title: (string, optional): Title of the generated plot. Defaults to "Lift Curve".
:param figsize: (2-tuple, optional): Tuple denoting figure size of the plot e.g. (6, 6). Defaults to ``None``.
:param title_fontsize: (string or int, optional): Matplotlib-style fontsizes. Use e.g. "small", "medium",
"large" or integer-values. Defaults to "large".
:param text_fontsize: (string or int, optional): Matplotlib-style fontsizes. Use e.g. "small", "medium",
"large" or integer-values. Defaults to "medium".
:return: ax (:class:`matplotlib.axes.Axes`): The axes on which the plot was drawn.
"""
import matplotlib.pyplot as plt
from numpy import array, unique
mdl, mdl_id, _, X_test, _, y_test = self.split_train(mdl)
y_true = array(y_test)
try:
y_probas = mdl.predict_proba(X_test)
y_probas = array(y_probas)
prob_pos0 = y_probas[:, 0]
prob_pos1 = y_probas[:, 1]
except:
prob_pos = mdl.decision_function(X_test)
prob_pos1 = (prob_pos - prob_pos.min()) / (prob_pos.max() - prob_pos.min())
prob_pos0 = (prob_pos.max() - prob_pos) / (prob_pos.max() - prob_pos.min())
classes = unique(y_true)
if len(classes) != 2:
raise ValueError(
"Cannot calculate Lift Curve for data with "
"{} category/ies".format(len(classes))
)
# Compute Cumulative Gain Curves
percentages, gains1 = self.cumulative_gain_curve(y_true, prob_pos0, classes[0])
percentages, gains2 = self.cumulative_gain_curve(y_true, prob_pos1, classes[1])
percentages = percentages[1:]
gains1 = gains1[1:]
gains2 = gains2[1:]
gains1 = gains1 / percentages
gains2 = gains2 / percentages
fig, ax = plt.subplots(1, 1, figsize=figsize)
ax.set_title(title, fontsize=title_fontsize)
ax.plot(percentages, gains1, lw=3, label="Class {}".format(classes[0]))
ax.plot(percentages, gains2, lw=3, label="Class {}".format(classes[1]))
ax.plot([0, 1], [1, 1], "k--", lw=2, label="Baseline")
ax.set_xlabel("Percentage of sample", fontsize=text_fontsize)
ax.set_ylabel("Lift", fontsize=text_fontsize)
ax.tick_params(labelsize=text_fontsize)
ax.grid(True)
ax.legend(loc="lower right", fontsize=text_fontsize)
from pickle import dumps
pckl = dumps(fig)
Plots.create(model_id=mdl_id, title="lift curve", plot=pckl)
return ax
def heatmap(
self,
corr_df=None,
sort_by=None,
ascending=False,
font_size=3,
cmap="gnuplot2",
idx_col="feature",
ignore=(),
):
"""
Plots a heatmap from the values of the dataframe `corr_df`
:param corr_df: value container
:param idx_col: the column whose values will be used as index
:param sort_by: dataframe will be sorted descending by values of this column.
If None, the first column is used
:param font_size: font size, defalut 3
:param cmap: color mapping. Must be one of the followings
'viridis', 'plasma', 'inferno', 'magma', 'cividis', 'Greys', 'Purples',
'Blues', 'Greens', 'Oranges', 'Reds', 'YlOrBr', 'YlOrRd', 'OrRd', 'PuRd',
'RdPu', 'BuPu', 'GnBu', 'PuBu', 'YlGnBu', 'PuBuGn', 'BuGn', 'YlGn',
'binary', 'gist_yarg', 'gist_gray', 'gray', 'bone', 'pink', 'spring',
'summer', 'autumn', 'winter', 'cool', 'Wistia', 'hot', 'afmhot',
'gist_heat', 'copper', 'PiYG', 'PRGn', 'BrBG', 'PuOr', 'RdGy', 'RdBu',
'RdYlBu', 'RdYlGn', 'Spectral', 'coolwarm', 'bwr', 'seismic', 'twilight',
'twilight_shifted', 'hsv', 'Pastel1', 'Pastel2', 'Paired', 'Accent',
'Dark2', 'Set1', 'Set2', 'Set3', 'tab10', 'tab20', 'tab20b', 'tab20c',
'flag', 'prism', 'ocean', 'gist_earth', 'terrain', 'gist_stern', 'gnuplot',
'gnuplot2', 'CMRmap', 'cubehelix', 'brg', 'gist_rainbow', 'rainbow',
'jet', 'nipy_spectral', 'gist_ncar'
:return: matplotlib pyplot instance
"""
import matplotlib.pyplot as plt
from numpy import arange, amin, amax
from pandas import read_sql
ax = plt.gca()
idx_col = idx_col
if corr_df is None:
df = read_sql("SELECT * FROM weights", self.conn)
clmns = list(df.columns)
df = df.sort_values(
by=clmns[0] if sort_by is None else sort_by, ascending=ascending
)
if idx_col is None:
idx_col = clmns[0]
clmns.remove(idx_col)
else:
df = corr_df
clmns = list(df.columns)
# df = df.sort_values(by=clmns[0] if sort_by is None else sort_by, ascending=ascending)
if idx_col is not None:
# idx_col = clmns[0]
clmns.remove(idx_col)
for itm in ignore:
clmns.remove(itm)
data = df[clmns].values
mn, mx = amin(data), amax(data)
im = ax.imshow(data, cmap=cmap, interpolation="bilinear")
# ax.set_adjustable(adjustable='box', share=False)
ax.autoscale(False)
cbar_kw = {
"fraction": 0.2,
"ticks": [mn, 0.0, (mn + mx) / 2.0, mx],
"drawedges": False,
}
cbar = ax.figure.colorbar(im, ax=ax, aspect=max(20, len(df)), **cbar_kw)
cbarlabel = ""
cbar.ax.set_ylabel(cbarlabel, rotation=-90, va="bottom")
cbar.ax.tick_params(labelsize=font_size + 1)
ax.set_xticks(arange(data.shape[1]))
ax.set_yticks(arange(data.shape[0]))
ax.set_xticklabels(clmns, fontdict={"fontsize": font_size})
if idx_col is not None:
ax.set_yticklabels(list(df[idx_col]), fontdict={"fontsize": font_size})
else:
ax.set_yticklabels(list(df.index), fontdict={"fontsize": font_size})
# Let the horizontal axes labeling appear on top.
ax.tick_params(top=True, bottom=False, labeltop=True, labelbottom=False)
# Rotate the tick labels and set their alignment.
plt.setp(
ax.get_xticklabels(),
rotation=-305,
ha="left",
va="top",
rotation_mode="anchor",
)
# Turn spines off and create white grid.
for _, spine in ax.spines.items():
spine.set_visible(False)
ax.set_xticks(arange(data.shape[1] + 1) - 0.5, minor=True)
ax.set_yticks(arange(data.shape[0] + 1) - 0.5, minor=True)
ax.grid(which="minor", color="w", linestyle="-", linewidth=0)
ax.tick_params(which="minor", bottom=False, left=False)
return plt
def FeatureWeights(self, weights=("pearson", "variance"), **kwargs):
"""
Calculates the requested weights and log them
:param weights: a list of weights, a subset of {'pearson', 'variance', 'relieff',
'surf', 'sobol', 'morris', 'delta_mmnt', 'info-gain'}
:param kwargs: all input acceptable by ``skrebate.ReliefF``, ``skrebate.surf``,
``sensapprx.SensAprx``
:return: None
"""
from pandas import DataFrame, read_sql
self.data = read_sql("SELECT * FROM data", self.conn)
features = list(self.data.columns)
features.remove(self.target)
weights_df = read_sql("SELECT * FROM weights", self.conn)
if len(weights_df) == 0:
weights_df = DataFrame({"feature": features})
X = self.data[features].values
y = self.data[self.target].values
n_features = kwargs.get("n_features", int(len(features) / 2))
domain = None
probs = None
regressor = kwargs.get("regressor", None)
reduce = kwargs.get("reduce", True)
num_smpl = kwargs.get("num_smpl", 700)
W = {"feature": features}
for factor in weights:
if factor == "pearson":
Res = dict(self.data.corr(method="pearson").fillna(0)[self.target])
W["pearson"] = [Res[v] for v in features]
elif factor == "variance":
Res = dict(self.data.var())
W["variance"] = [Res[v] for v in features]
elif factor == "relieff":
from skrebate import ReliefF
n_neighbors = kwargs.get("n_neighbors", 80)
RF = ReliefF(n_features_to_select=n_features, n_neighbors=n_neighbors)
RF.fit(X, y)
W["relieff"] = [
RF.feature_importances_[features.index(v)] for v in features
]
elif factor == "surf":
from skrebate import SURF
RF = SURF(n_features_to_select=n_features)
RF.fit(X, y)
W["surf"] = [
RF.feature_importances_[features.index(v)] for v in features
]
elif factor == "sobol":
from .sensapprx import SensAprx
SF = SensAprx(
method="sobol",
domain=domain,
probs=probs,
regressor=regressor,
reduce=reduce,
num_smpl=num_smpl,
)
SF.fit(X, y)
domain = SF.domain
probs = SF.probs
W["sobol"] = [SF.weights_[features.index(v)] for v in features]
elif factor == "morris":
from .sensapprx import SensAprx
SF = SensAprx(
method="morris",
domain=domain,
probs=probs,
regressor=regressor,
reduce=reduce,
num_smpl=num_smpl,
)
SF.fit(X, y)
domain = SF.domain
probs = SF.probs
W["morris"] = [SF.weights_[features.index(v)] for v in features]
elif factor == "delta-mmnt":
from .sensapprx import SensAprx
SF = SensAprx(
method="delta-mmnt",
domain=domain,
probs=probs,
regressor=regressor,
reduce=reduce,
num_smpl=num_smpl,
)
SF.fit(X, y)
domain = SF.domain
probs = SF.probs
W["delta_mmnt"] = [SF.weights_[features.index(v)] for v in features]
elif factor == "info-gain":
from sklearn.feature_selection import mutual_info_classif
Res = mutual_info_classif(X, y, discrete_features=True)
W["info_gain"] = [Res[features.index(v)] for v in features]
new_w_df = | DataFrame(W) | pandas.DataFrame |
from dataclasses import replace
import datetime as dt
from functools import partial
import inspect
from pathlib import Path
import re
import types
import uuid
import pandas as pd
from pandas.testing import assert_frame_equal
import pytest
from solarforecastarbiter import datamodel
from solarforecastarbiter.io import api, nwp, utils
from solarforecastarbiter.reference_forecasts import main, models
from solarforecastarbiter.conftest import default_forecast, default_observation
BASE_PATH = Path(nwp.__file__).resolve().parents[0] / 'tests/data'
@pytest.mark.parametrize('model', [
models.gfs_quarter_deg_hourly_to_hourly_mean,
models.gfs_quarter_deg_to_hourly_mean,
models.hrrr_subhourly_to_hourly_mean,
models.hrrr_subhourly_to_subhourly_instantaneous,
models.nam_12km_cloud_cover_to_hourly_mean,
models.nam_12km_hourly_to_hourly_instantaneous,
models.rap_cloud_cover_to_hourly_mean,
models.gefs_half_deg_to_hourly_mean
])
def test_run_nwp(model, site_powerplant_site_type, mocker):
""" to later patch the return value of load forecast, do something like
def load(*args, **kwargs):
return load_forecast_return_value
mocker.patch.object(inspect.unwrap(model), '__defaults__',
(partial(load),))
"""
mocker.patch.object(inspect.unwrap(model), '__defaults__',
(partial(nwp.load_forecast, base_path=BASE_PATH),))
mocker.patch(
'solarforecastarbiter.reference_forecasts.utils.get_init_time',
return_value=pd.Timestamp('20190515T0000Z'))
site, site_type = site_powerplant_site_type
fx = datamodel.Forecast('Test', dt.time(5), pd.Timedelta('1h'),
pd.Timedelta('1h'), pd.Timedelta('6h'),
'beginning', 'interval_mean', 'ghi', site)
run_time = pd.Timestamp('20190515T1100Z')
issue_time = pd.Timestamp('20190515T1100Z')
out = main.run_nwp(fx, model, run_time, issue_time)
for var in ('ghi', 'dni', 'dhi', 'air_temperature', 'wind_speed',
'ac_power'):
if site_type == 'site' and var == 'ac_power':
assert out.ac_power is None
else:
ser = getattr(out, var)
assert len(ser) >= 6
assert isinstance(ser, (pd.Series, pd.DataFrame))
assert ser.index[0] == pd.Timestamp('20190515T1200Z')
assert ser.index[-1] < pd.Timestamp('20190515T1800Z')
@pytest.fixture
def obs_5min_begin(site_metadata):
observation = default_observation(
site_metadata,
interval_length=pd.Timedelta('5min'), interval_label='beginning')
return observation
@pytest.fixture
def observation_values_text():
"""JSON text representation of test data"""
tz = 'UTC'
data_index = pd.date_range(
start='20190101', end='20190112', freq='5min', tz=tz, closed='left')
# each element of data is equal to the hour value of its label
data = pd.DataFrame({'value': data_index.hour, 'quality_flag': 0},
index=data_index)
text = utils.observation_df_to_json_payload(data)
return text.encode()
@pytest.fixture
def session(requests_mock, observation_values_text):
session = api.APISession('')
matcher = re.compile(f'{session.base_url}/observations/.*/values')
requests_mock.register_uri('GET', matcher, content=observation_values_text)
return session
@pytest.mark.parametrize('interval_label', ['beginning', 'ending'])
def test_run_persistence_scalar(session, site_metadata, obs_5min_begin,
interval_label, mocker):
run_time = pd.Timestamp('20190101T1945Z')
# intraday, index=False
forecast = default_forecast(
site_metadata,
issue_time_of_day=dt.time(hour=23),
lead_time_to_start=pd.Timedelta('1h'),
interval_length=pd.Timedelta('1h'),
run_length=pd.Timedelta('1h'),
interval_label=interval_label)
issue_time = pd.Timestamp('20190101T2300Z')
mocker.spy(main.persistence, 'persistence_scalar')
out = main.run_persistence(session, obs_5min_begin, forecast, run_time,
issue_time)
assert isinstance(out, pd.Series)
assert len(out) == 1
assert main.persistence.persistence_scalar.call_count == 1
@pytest.mark.parametrize('interval_label', ['beginning', 'ending'])
def test_run_persistence_scalar_index(session, site_metadata, obs_5min_begin,
interval_label, mocker):
run_time = pd.Timestamp('20190101T1945Z')
forecast = default_forecast(
site_metadata,
issue_time_of_day=dt.time(hour=23),
lead_time_to_start=pd.Timedelta('1h'),
interval_length=pd.Timedelta('1h'),
run_length=pd.Timedelta('1h'),
interval_label=interval_label)
issue_time = pd.Timestamp('20190101T2300Z')
# intraday, index=True
mocker.spy(main.persistence, 'persistence_scalar_index')
out = main.run_persistence(session, obs_5min_begin, forecast, run_time,
issue_time, index=True)
assert isinstance(out, pd.Series)
assert len(out) == 1
assert main.persistence.persistence_scalar_index.call_count == 1
def test_run_persistence_interval(session, site_metadata, obs_5min_begin,
mocker):
run_time = pd.Timestamp('20190102T1945Z')
# day ahead, index = False
forecast = default_forecast(
site_metadata,
issue_time_of_day=dt.time(hour=23),
lead_time_to_start=pd.Timedelta('1h'),
interval_length=pd.Timedelta('1h'),
run_length=pd.Timedelta('24h'),
interval_label='beginning')
issue_time = pd.Timestamp('20190102T2300Z')
mocker.spy(main.persistence, 'persistence_interval')
out = main.run_persistence(session, obs_5min_begin, forecast, run_time,
issue_time)
assert isinstance(out, pd.Series)
assert len(out) == 24
assert main.persistence.persistence_interval.call_count == 1
def test_run_persistence_weekahead(session, site_metadata, mocker):
variable = 'net_load'
observation = default_observation(
site_metadata, variable=variable,
interval_length=pd.Timedelta('5min'), interval_label='beginning')
run_time = pd.Timestamp('20190110T1945Z')
forecast = default_forecast(
site_metadata, variable=variable,
issue_time_of_day=dt.time(hour=23),
lead_time_to_start=pd.Timedelta('1h'),
interval_length=pd.Timedelta('1h'),
run_length=pd.Timedelta('1d'),
interval_label='beginning')
issue_time = pd.Timestamp('20190111T2300Z')
mocker.spy(main.persistence, 'persistence_interval')
out = main.run_persistence(session, observation, forecast, run_time,
issue_time)
assert isinstance(out, pd.Series)
assert len(out) == 24
assert main.persistence.persistence_interval.call_count == 1
def test_run_persistence_interval_index(session, site_metadata,
obs_5min_begin):
# index=True not supported for day ahead
forecast = default_forecast(
site_metadata,
issue_time_of_day=dt.time(hour=23),
lead_time_to_start=pd.Timedelta('1h'),
interval_length=pd.Timedelta('1h'),
run_length=pd.Timedelta('24h'),
interval_label='beginning')
issue_time = pd.Timestamp('20190423T2300Z')
run_time = pd.Timestamp('20190422T1945Z')
with pytest.raises(ValueError) as excinfo:
main.run_persistence(session, obs_5min_begin, forecast, run_time,
issue_time, index=True)
assert 'index=True not supported' in str(excinfo.value)
def test_run_persistence_interval_too_long(session, site_metadata,
obs_5min_begin):
forecast = default_forecast(
site_metadata,
issue_time_of_day=dt.time(hour=23),
lead_time_to_start=pd.Timedelta('1h'),
interval_length=pd.Timedelta('1h'),
run_length=pd.Timedelta('48h'), # too long
interval_label='beginning')
issue_time = pd.Timestamp('20190423T2300Z')
run_time = pd.Timestamp('20190422T1945Z')
with pytest.raises(ValueError) as excinfo:
main.run_persistence(session, obs_5min_begin, forecast, run_time,
issue_time)
assert 'midnight to midnight' in str(excinfo.value)
def test_run_persistence_interval_not_midnight_to_midnight(session,
site_metadata,
obs_5min_begin):
# not midnight to midnight
forecast = default_forecast(
site_metadata,
issue_time_of_day=dt.time(hour=22),
lead_time_to_start=pd.Timedelta('1h'),
interval_length=pd.Timedelta('1h'),
run_length=pd.Timedelta('24h'),
interval_label='beginning')
issue_time = pd.Timestamp('20190423T2200Z')
run_time = pd.Timestamp('20190422T1945Z')
with pytest.raises(ValueError) as excinfo:
main.run_persistence(session, obs_5min_begin, forecast, run_time,
issue_time)
assert 'midnight to midnight' in str(excinfo.value)
def test_run_persistence_incompatible_issue(session, site_metadata,
obs_5min_begin):
forecast = default_forecast(
site_metadata,
issue_time_of_day=dt.time(hour=23),
lead_time_to_start=pd.Timedelta('1h'),
interval_length=pd.Timedelta('1h'),
run_length=pd.Timedelta('1h'),
interval_label='beginning')
issue_time = pd.Timestamp('20190423T2330Z')
run_time = pd.Timestamp('20190422T1945Z')
with pytest.raises(ValueError) as excinfo:
main.run_persistence(session, obs_5min_begin, forecast, run_time,
issue_time)
assert 'incompatible' in str(excinfo.value).lower()
def test_run_persistence_fx_too_short(session, site_metadata,
obs_5min_begin):
forecast = default_forecast(
site_metadata,
issue_time_of_day=dt.time(hour=23),
lead_time_to_start=pd.Timedelta('1h'),
interval_length=pd.Timedelta('1min'),
run_length=pd.Timedelta('3min'),
interval_label='beginning')
issue_time = pd.Timestamp('20190423T2300Z')
run_time = pd.Timestamp('20190422T1945Z')
with pytest.raises(ValueError) as excinfo:
main.run_persistence(session, obs_5min_begin, forecast, run_time,
issue_time)
assert 'requires observation.interval_length' in str(excinfo.value)
def test_run_persistence_incompatible_instant_fx(session, site_metadata,
obs_5min_begin):
forecast = default_forecast(
site_metadata,
issue_time_of_day=dt.time(hour=23),
lead_time_to_start=pd.Timedelta('1h'),
interval_length=pd.Timedelta('1h'),
run_length=pd.Timedelta('1h'),
interval_label='instantaneous')
issue_time = pd.Timestamp('20190423T2300Z')
run_time = pd.Timestamp('20190422T1945Z')
with pytest.raises(ValueError) as excinfo:
main.run_persistence(session, obs_5min_begin, forecast, run_time,
issue_time)
assert 'instantaneous forecast' in str(excinfo.value).lower()
def test_run_persistence_incompatible_instant_interval(session, site_metadata,
obs_5min_begin):
forecast = default_forecast(
site_metadata,
issue_time_of_day=dt.time(hour=23),
lead_time_to_start=pd.Timedelta('1h'),
interval_length=pd.Timedelta('1h'),
run_length=pd.Timedelta('1h'),
interval_label='instantaneous')
obs = obs_5min_begin.replace(interval_label='instantaneous',
interval_length=pd.Timedelta('10min'))
issue_time = pd.Timestamp('20190423T2300Z')
run_time = pd.Timestamp('20190422T1945Z')
with pytest.raises(ValueError) as excinfo:
main.run_persistence(session, obs, forecast, run_time,
issue_time)
assert 'identical interval length' in str(excinfo.value)
def test_verify_nwp_forecasts_compatible(ac_power_forecast_metadata):
fx0 = ac_power_forecast_metadata
fx1 = replace(fx0, run_length=pd.Timedelta('10h'), interval_label='ending')
df = pd.DataFrame({'forecast': [fx0, fx1], 'model': ['a', 'b']})
errs = main._verify_nwp_forecasts_compatible(df)
assert set(errs) == {'model', 'run_length', 'interval_label'}
@pytest.mark.parametrize('string,expected', [
('{"is_reference_forecast": true}', True),
('{"is_reference_persistence_forecast": true}', False),
('{"is_reference_forecast": "True"}', True),
('{"is_reference_forecast":"True"}', True),
('is_reference_forecast" : "True"}', True),
('{"is_reference_forecast" : true, "otherkey": badjson, 9}', True),
('reference_forecast": true', False),
('{"is_reference_forecast": false}', False),
("is_reference_forecast", False)
])
def test_is_reference_forecast(string, expected):
assert main._is_reference_forecast(string) == expected
def test_find_reference_nwp_forecasts_json_err(ac_power_forecast_metadata,
mocker):
logger = mocker.patch(
'solarforecastarbiter.reference_forecasts.main.logger')
extra_params = '{"model": "themodel", "is_reference_forecast": true}'
fxs = [replace(ac_power_forecast_metadata, extra_parameters=extra_params),
replace(ac_power_forecast_metadata,
extra_parameters='{"model": "yes"}'),
replace(ac_power_forecast_metadata, extra_parameters='{"is_reference_forecast": true'), # NOQA
replace(ac_power_forecast_metadata, extra_parameters='')]
out = main.find_reference_nwp_forecasts(fxs)
assert logger.warning.called
assert len(out) == 1
def test_find_reference_nwp_forecasts_no_model(ac_power_forecast_metadata,
mocker):
logger = mocker.patch(
'solarforecastarbiter.reference_forecasts.main.logger')
fxs = [replace(ac_power_forecast_metadata, extra_parameters='{}',
forecast_id='0'),
replace(ac_power_forecast_metadata,
extra_parameters='{"piggyback_on": "0", "is_reference_forecast": true}', # NOQA
forecast_id='1')]
out = main.find_reference_nwp_forecasts(fxs)
assert len(out) == 0
assert logger.debug.called
assert logger.error.called
def test_find_reference_nwp_forecasts_no_init(ac_power_forecast_metadata):
fxs = [replace(ac_power_forecast_metadata,
extra_parameters='{"model": "am", "is_reference_forecast": true}', # NOQA
forecast_id='0'),
replace(ac_power_forecast_metadata,
extra_parameters='{"piggyback_on": "0", "model": "am", "is_reference_forecast": true}', # NOQA
forecast_id='1')]
out = main.find_reference_nwp_forecasts(fxs)
assert len(out) == 2
assert out.next_issue_time.unique() == [None]
assert out.piggyback_on.unique() == ['0']
def test_find_reference_nwp_forecasts(ac_power_forecast_metadata):
fxs = [replace(ac_power_forecast_metadata,
extra_parameters='{"model": "am", "is_reference_forecast": true}', # NOQA
forecast_id='0'),
replace(ac_power_forecast_metadata,
extra_parameters='{"piggyback_on": "0", "model": "am", "is_reference_forecast": true}', # NOQA
forecast_id='1')]
out = main.find_reference_nwp_forecasts(
fxs, pd.Timestamp('20190501T0000Z'))
assert len(out) == 2
assert out.next_issue_time.unique()[0] == pd.Timestamp('20190501T0500Z')
assert out.piggyback_on.unique() == ['0']
@pytest.fixture()
def forecast_list(ac_power_forecast_metadata):
model = 'nam_12km_cloud_cover_to_hourly_mean'
prob_dict = ac_power_forecast_metadata.to_dict()
prob_dict['constant_values'] = (0, 50, 100)
prob_dict['axis'] = 'y'
prob_dict['extra_parameters'] = '{"model": "gefs_half_deg_to_hourly_mean", "is_reference_forecast": true}' # NOQA
return [replace(ac_power_forecast_metadata,
extra_parameters=(
'{"model": "%s", "is_reference_forecast": true}'
% model),
forecast_id='0'),
replace(ac_power_forecast_metadata,
extra_parameters='{"model": "gfs_quarter_deg_hourly_to_hourly_mean", "is_reference_forecast": true}', # NOQA
forecast_id='1'),
replace(ac_power_forecast_metadata,
extra_parameters='{"piggyback_on": "0", "model": "%s", "is_reference_forecast": true}' % model, # NOQA
forecast_id='2',
variable='ghi'),
datamodel.ProbabilisticForecast.from_dict(prob_dict),
replace(ac_power_forecast_metadata,
extra_parameters='{"piggyback_on": "0", "model": "%s", "is_reference_forecast": true}' % model, # NOQA
forecast_id='3',
variable='dni',
provider='Organization 2'
),
replace(ac_power_forecast_metadata,
extra_parameters='{"piggyback_on": "0", "model": "badmodel", "is_reference_forecast": true}', # NOQA
forecast_id='4'),
replace(ac_power_forecast_metadata,
extra_parameters='{"piggyback_on": "6", "model": "%s", "is_reference_forecast": true}' % model, # NOQA
forecast_id='5',
variable='ghi'),
replace(ac_power_forecast_metadata,
extra_parameters='{"piggyback_on": "0", "model": "%s", "is_reference_forecast": false}' % model, # NOQA
forecast_id='7',
variable='ghi'),
]
def test_process_nwp_forecast_groups(mocker, forecast_list):
api = mocker.MagicMock()
run_nwp = mocker.patch(
'solarforecastarbiter.reference_forecasts.main.run_nwp')
post_vals = mocker.patch(
'solarforecastarbiter.reference_forecasts.main._post_forecast_values')
class res:
ac_power = [0]
ghi = [0]
run_nwp.return_value = res
fxs = main.find_reference_nwp_forecasts(forecast_list[:-4])
logger = mocker.patch(
'solarforecastarbiter.reference_forecasts.main.logger')
main.process_nwp_forecast_groups(api, pd.Timestamp('20190501T0000Z'), fxs)
assert not logger.error.called
assert not logger.warning.called
assert post_vals.call_count == 4
@pytest.mark.parametrize('run_time', [None, pd.Timestamp('20190501T0000Z')])
def test_process_nwp_forecast_groups_issue_time(mocker, forecast_list,
run_time):
api = mocker.MagicMock()
run_nwp = mocker.patch(
'solarforecastarbiter.reference_forecasts.main.run_nwp')
post_vals = mocker.patch(
'solarforecastarbiter.reference_forecasts.main._post_forecast_values')
class res:
ac_power = [0]
ghi = [0]
run_nwp.return_value = res
fxs = main.find_reference_nwp_forecasts(forecast_list[:-4], run_time)
main.process_nwp_forecast_groups(api, pd.Timestamp('20190501T0000Z'), fxs)
assert post_vals.call_count == 4
run_nwp.assert_called_with(mocker.ANY, mocker.ANY, mocker.ANY,
pd.Timestamp('20190501T0500Z'))
def test_process_nwp_forecast_groups_missing_var(mocker, forecast_list):
api = mocker.MagicMock()
run_nwp = mocker.patch(
'solarforecastarbiter.reference_forecasts.main.run_nwp')
post_vals = mocker.patch(
'solarforecastarbiter.reference_forecasts.main._post_forecast_values')
class res:
ac_power = [0]
ghi = [0]
dni = None
run_nwp.return_value = res
fxs = main.find_reference_nwp_forecasts(forecast_list[:-3])
logger = mocker.patch(
'solarforecastarbiter.reference_forecasts.main.logger')
main.process_nwp_forecast_groups(api, pd.Timestamp('20190501T0000Z'), fxs)
assert not logger.error.called
assert logger.warning.called
assert post_vals.call_count == 4
def test_process_nwp_forecast_groups_bad_model(mocker, forecast_list):
api = mocker.MagicMock()
run_nwp = mocker.patch(
'solarforecastarbiter.reference_forecasts.main.run_nwp')
post_vals = mocker.patch(
'solarforecastarbiter.reference_forecasts.main._post_forecast_values')
class res:
ac_power = [0]
ghi = [0]
dni = None
run_nwp.return_value = res
fxs = main.find_reference_nwp_forecasts(forecast_list[4:-1])
logger = mocker.patch(
'solarforecastarbiter.reference_forecasts.main.logger')
main.process_nwp_forecast_groups(api, pd.Timestamp('20190501T0000Z'), fxs)
assert logger.error.called
assert not logger.warning.called
assert post_vals.call_count == 0
def test_process_nwp_forecast_groups_missing_runfor(mocker, forecast_list):
api = mocker.MagicMock()
run_nwp = mocker.patch(
'solarforecastarbiter.reference_forecasts.main.run_nwp')
class res:
ac_power = [0]
ghi = [0]
dni = None
run_nwp.return_value = res
fxs = main.find_reference_nwp_forecasts(forecast_list[-2:])
logger = mocker.patch(
'solarforecastarbiter.reference_forecasts.main.logger')
main.process_nwp_forecast_groups(api, pd.Timestamp('20190501T0000Z'), fxs)
assert logger.error.called
assert not logger.warning.called
assert api.post_forecast_values.call_count == 0
@pytest.mark.parametrize('ind', [0, 1, 2])
def test__post_forecast_values_regular(mocker, forecast_list, ind):
api = mocker.MagicMock()
fx = forecast_list[ind]
main._post_forecast_values(api, fx, [0], 'whatever')
assert api.post_forecast_values.call_count == 1
def test__post_forecast_values_cdf(mocker, forecast_list):
api = mocker.MagicMock()
fx = forecast_list[3]
ser = pd.Series([0, 1])
vals = pd.DataFrame({i: ser for i in range(21)})
main._post_forecast_values(api, fx, vals, 'gefs')
assert api.post_probabilistic_forecast_constant_value_values.call_count == 3 # NOQA
def test__post_forecast_values_cdf_not_gefs(mocker, forecast_list):
api = mocker.MagicMock()
fx = forecast_list[3]
ser = pd.Series([0, 1])
vals = pd.DataFrame({i: ser for i in range(21)})
with pytest.raises(ValueError):
main._post_forecast_values(api, fx, vals, 'gfs')
def test__post_forecast_values_cdf_less_cols(mocker, forecast_list):
api = mocker.MagicMock()
fx = forecast_list[3]
ser = pd.Series([0, 1])
vals = pd.DataFrame({i: ser for i in range(10)})
with pytest.raises(TypeError):
main._post_forecast_values(api, fx, vals, 'gefs')
def test__post_forecast_values_cdf_not_df(mocker, forecast_list):
api = mocker.MagicMock()
fx = forecast_list[3]
ser = pd.Series([0, 1])
with pytest.raises(TypeError):
main._post_forecast_values(api, fx, ser, 'gefs')
def test__post_forecast_values_cdf_no_cv_match(mocker, forecast_list):
api = mocker.MagicMock()
fx = replace(forecast_list[3], constant_values=(
replace(forecast_list[3].constant_values[0], constant_value=3.0
),))
ser = pd.Series([0, 1])
vals = pd.DataFrame({i: ser for i in range(21)})
with pytest.raises(KeyError):
main._post_forecast_values(api, fx, vals, 'gefs')
@pytest.mark.parametrize('issue_buffer,empty', [
(pd.Timedelta('10h'), False),
(pd.Timedelta('1h'), True),
(pd.Timedelta('5h'), False)
])
def test_make_latest_nwp_forecasts(forecast_list, mocker, issue_buffer, empty):
session = mocker.patch('solarforecastarbiter.io.api.APISession')
session.return_value.get_user_info.return_value = {'organization': ''}
session.return_value.list_forecasts.return_value = forecast_list[:-3]
session.return_value.list_probabilistic_forecasts.return_value = []
run_time = pd.Timestamp('20190501T0000Z')
# last fx has different org
fxdf = main.find_reference_nwp_forecasts(forecast_list[:-4], run_time)
process = mocker.patch(
'solarforecastarbiter.reference_forecasts.main.process_nwp_forecast_groups') # NOQA
main.make_latest_nwp_forecasts('', run_time, issue_buffer)
if empty:
process.assert_not_called()
else:
assert_frame_equal(process.call_args[0][-1], fxdf)
@pytest.mark.parametrize('string,expected', [
('{"is_reference_forecast": true}', False),
('{"is_reference_persistence_forecast": true}', True),
('{"is_reference_persistence_forecast": "True"}', True),
('{"is_reference_persistence_forecast":"True"}', True),
('is_reference_persistence_forecast" : "True"}', True),
('{"is_reference_persistence_forecast" : true, "otherkey": badjson, 9}',
True),
('reference_persistence_forecast": true', False),
('{"is_reference_persistence_forecast": false}', False),
("is_reference_persistence_forecast", False)
])
def test_is_reference_persistence_forecast(string, expected):
assert main._is_reference_persistence_forecast(string) == expected
@pytest.fixture
def perst_fx_obs(mocker, ac_power_observation_metadata,
ac_power_forecast_metadata):
observations = [
ac_power_observation_metadata.replace(
observation_id=str(uuid.uuid1())
),
ac_power_observation_metadata.replace(
observation_id=str(uuid.uuid1())
),
ac_power_observation_metadata.replace(
observation_id=str(uuid.uuid1())
)
]
def make_extra(obs):
extra = (
'{"is_reference_persistence_forecast": true,'
f'"observation_id": "{obs.observation_id}"'
'}'
)
return extra
forecasts = [
ac_power_forecast_metadata.replace(
name='FX0',
extra_parameters=make_extra(observations[0]),
run_length=pd.Timedelta('1h'),
forecast_id=str(uuid.uuid1())
),
ac_power_forecast_metadata.replace(
name='FX no persist',
run_length=pd.Timedelta('1h'),
forecast_id=str(uuid.uuid1())
),
ac_power_forecast_metadata.replace(
name='FX bad js',
extra_parameters='is_reference_persistence_forecast": true other',
run_length=pd.Timedelta('1h'),
forecast_id=str(uuid.uuid1())
)
]
return forecasts, observations
def test_generate_reference_persistence_forecast_parameters(
mocker, perst_fx_obs):
forecasts, observations = perst_fx_obs
session = mocker.MagicMock()
session.get_user_info.return_value = {'organization': ''}
session.get_observation_time_range.return_value = (
pd.Timestamp('2019-01-01T12:00Z'), pd.Timestamp('2020-05-20T15:33Z'))
session.get_forecast_time_range.return_value = (
pd.Timestamp('2019-01-01T12:00Z'), pd.Timestamp('2020-05-20T14:00Z'))
max_run_time = pd.Timestamp('2020-05-20T16:00Z')
# one hour ahead forecast, so 14Z was made at 13Z
# enough data to do 14Z and 15Z issue times but not 16Z
param_gen = main.generate_reference_persistence_forecast_parameters(
session, forecasts, observations, max_run_time
)
assert isinstance(param_gen, types.GeneratorType)
param_list = list(param_gen)
assert len(param_list) == 2
assert param_list[0] == (
forecasts[0], observations[0],
pd.Timestamp('2020-05-20T14:00Z'),
False
)
assert param_list[1] == (
forecasts[0], observations[0],
pd.Timestamp('2020-05-20T15:00Z'),
False
)
def test_generate_reference_persistence_forecast_parameters_no_forecast_yet(
mocker, perst_fx_obs):
forecasts, observations = perst_fx_obs
session = mocker.MagicMock()
session.get_user_info.return_value = {'organization': ''}
session.get_observation_time_range.return_value = (
pd.Timestamp('2019-01-01T12:00Z'), pd.Timestamp('2020-05-20T15:33Z'))
session.get_forecast_time_range.return_value = (
pd.NaT, pd.NaT)
max_run_time = pd.Timestamp('2020-05-20T16:00Z')
param_gen = main.generate_reference_persistence_forecast_parameters(
session, forecasts, observations, max_run_time
)
assert isinstance(param_gen, types.GeneratorType)
param_list = list(param_gen)
assert len(param_list) == 1
assert param_list[0] == (
forecasts[0], observations[0],
pd.Timestamp('2020-05-20T15:00Z'),
False
)
def test_generate_reference_persistence_forecast_parameters_no_data(
mocker, perst_fx_obs):
forecasts, observations = perst_fx_obs
session = mocker.MagicMock()
session.get_user_info.return_value = {'organization': ''}
session.get_observation_time_range.return_value = (
pd.NaT, pd.NaT)
session.get_forecast_time_range.return_value = (
pd.NaT, pd.NaT)
max_run_time = pd.Timestamp('2020-05-20T16:00Z')
param_gen = main.generate_reference_persistence_forecast_parameters(
session, forecasts, observations, max_run_time
)
assert isinstance(param_gen, types.GeneratorType)
param_list = list(param_gen)
assert len(param_list) == 0
def test_generate_reference_persistence_forecast_parameters_diff_org(
mocker, perst_fx_obs):
forecasts, observations = perst_fx_obs
session = mocker.MagicMock()
session.get_user_info.return_value = {'organization': 'a new one'}
session.get_observation_time_range.return_value = (
pd.Timestamp('2019-01-01T12:00Z'), pd.Timestamp('2020-05-20T15:33Z'))
session.get_forecast_time_range.return_value = (
pd.Timestamp('2019-01-01T12:00Z'), pd.Timestamp('2020-05-20T14:00Z'))
max_run_time = pd.Timestamp('2020-05-20T16:00Z')
param_gen = main.generate_reference_persistence_forecast_parameters(
session, forecasts, observations, max_run_time
)
assert isinstance(param_gen, types.GeneratorType)
param_list = list(param_gen)
assert len(param_list) == 0
def test_generate_reference_persistence_forecast_parameters_not_reference_fx(
mocker, perst_fx_obs):
forecasts, observations = perst_fx_obs
forecasts = [fx.replace(extra_parameters='') for fx in forecasts]
session = mocker.MagicMock()
session.get_user_info.return_value = {'organization': ''}
session.get_observation_time_range.return_value = (
pd.Timestamp('2019-01-01T12:00Z'), | pd.Timestamp('2020-05-20T15:33Z') | pandas.Timestamp |
# -*- coding: utf-8 -*-
from datetime import timedelta
import operator
from string import ascii_lowercase
import warnings
import numpy as np
import pytest
from pandas.compat import lrange
import pandas.util._test_decorators as td
import pandas as pd
from pandas import (
Categorical, DataFrame, MultiIndex, Series, Timestamp, date_range, isna,
notna, to_datetime, to_timedelta)
import pandas.core.algorithms as algorithms
import pandas.core.nanops as nanops
import pandas.util.testing as tm
def assert_stat_op_calc(opname, alternative, frame, has_skipna=True,
check_dtype=True, check_dates=False,
check_less_precise=False, skipna_alternative=None):
"""
Check that operator opname works as advertised on frame
Parameters
----------
opname : string
Name of the operator to test on frame
alternative : function
Function that opname is tested against; i.e. "frame.opname()" should
equal "alternative(frame)".
frame : DataFrame
The object that the tests are executed on
has_skipna : bool, default True
Whether the method "opname" has the kwarg "skip_na"
check_dtype : bool, default True
Whether the dtypes of the result of "frame.opname()" and
"alternative(frame)" should be checked.
check_dates : bool, default false
Whether opname should be tested on a Datetime Series
check_less_precise : bool, default False
Whether results should only be compared approximately;
passed on to tm.assert_series_equal
skipna_alternative : function, default None
NaN-safe version of alternative
"""
f = getattr(frame, opname)
if check_dates:
df = DataFrame({'b': date_range('1/1/2001', periods=2)})
result = getattr(df, opname)()
assert isinstance(result, Series)
df['a'] = lrange(len(df))
result = getattr(df, opname)()
assert isinstance(result, Series)
assert len(result)
if has_skipna:
def wrapper(x):
return alternative(x.values)
skipna_wrapper = tm._make_skipna_wrapper(alternative,
skipna_alternative)
result0 = f(axis=0, skipna=False)
result1 = f(axis=1, skipna=False)
tm.assert_series_equal(result0, frame.apply(wrapper),
check_dtype=check_dtype,
check_less_precise=check_less_precise)
# HACK: win32
tm.assert_series_equal(result1, frame.apply(wrapper, axis=1),
check_dtype=False,
check_less_precise=check_less_precise)
else:
skipna_wrapper = alternative
result0 = f(axis=0)
result1 = f(axis=1)
tm.assert_series_equal(result0, frame.apply(skipna_wrapper),
check_dtype=check_dtype,
check_less_precise=check_less_precise)
if opname in ['sum', 'prod']:
expected = frame.apply(skipna_wrapper, axis=1)
tm.assert_series_equal(result1, expected, check_dtype=False,
check_less_precise=check_less_precise)
# check dtypes
if check_dtype:
lcd_dtype = frame.values.dtype
assert lcd_dtype == result0.dtype
assert lcd_dtype == result1.dtype
# bad axis
with pytest.raises(ValueError, match='No axis named 2'):
f(axis=2)
# all NA case
if has_skipna:
all_na = frame * np.NaN
r0 = getattr(all_na, opname)(axis=0)
r1 = getattr(all_na, opname)(axis=1)
if opname in ['sum', 'prod']:
unit = 1 if opname == 'prod' else 0 # result for empty sum/prod
expected = pd.Series(unit, index=r0.index, dtype=r0.dtype)
tm.assert_series_equal(r0, expected)
expected = pd.Series(unit, index=r1.index, dtype=r1.dtype)
tm.assert_series_equal(r1, expected)
def assert_stat_op_api(opname, float_frame, float_string_frame,
has_numeric_only=False):
"""
Check that API for operator opname works as advertised on frame
Parameters
----------
opname : string
Name of the operator to test on frame
float_frame : DataFrame
DataFrame with columns of type float
float_string_frame : DataFrame
DataFrame with both float and string columns
has_numeric_only : bool, default False
Whether the method "opname" has the kwarg "numeric_only"
"""
# make sure works on mixed-type frame
getattr(float_string_frame, opname)(axis=0)
getattr(float_string_frame, opname)(axis=1)
if has_numeric_only:
getattr(float_string_frame, opname)(axis=0, numeric_only=True)
getattr(float_string_frame, opname)(axis=1, numeric_only=True)
getattr(float_frame, opname)(axis=0, numeric_only=False)
getattr(float_frame, opname)(axis=1, numeric_only=False)
def assert_bool_op_calc(opname, alternative, frame, has_skipna=True):
"""
Check that bool operator opname works as advertised on frame
Parameters
----------
opname : string
Name of the operator to test on frame
alternative : function
Function that opname is tested against; i.e. "frame.opname()" should
equal "alternative(frame)".
frame : DataFrame
The object that the tests are executed on
has_skipna : bool, default True
Whether the method "opname" has the kwarg "skip_na"
"""
f = getattr(frame, opname)
if has_skipna:
def skipna_wrapper(x):
nona = x.dropna().values
return alternative(nona)
def wrapper(x):
return alternative(x.values)
result0 = f(axis=0, skipna=False)
result1 = f(axis=1, skipna=False)
tm.assert_series_equal(result0, frame.apply(wrapper))
tm.assert_series_equal(result1, frame.apply(wrapper, axis=1),
check_dtype=False) # HACK: win32
else:
skipna_wrapper = alternative
wrapper = alternative
result0 = f(axis=0)
result1 = f(axis=1)
tm.assert_series_equal(result0, frame.apply(skipna_wrapper))
tm.assert_series_equal(result1, frame.apply(skipna_wrapper, axis=1),
check_dtype=False)
# bad axis
with pytest.raises(ValueError, match='No axis named 2'):
f(axis=2)
# all NA case
if has_skipna:
all_na = frame * np.NaN
r0 = getattr(all_na, opname)(axis=0)
r1 = getattr(all_na, opname)(axis=1)
if opname == 'any':
assert not r0.any()
assert not r1.any()
else:
assert r0.all()
assert r1.all()
def assert_bool_op_api(opname, bool_frame_with_na, float_string_frame,
has_bool_only=False):
"""
Check that API for boolean operator opname works as advertised on frame
Parameters
----------
opname : string
Name of the operator to test on frame
float_frame : DataFrame
DataFrame with columns of type float
float_string_frame : DataFrame
DataFrame with both float and string columns
has_bool_only : bool, default False
Whether the method "opname" has the kwarg "bool_only"
"""
# make sure op works on mixed-type frame
mixed = float_string_frame
mixed['_bool_'] = np.random.randn(len(mixed)) > 0.5
getattr(mixed, opname)(axis=0)
getattr(mixed, opname)(axis=1)
if has_bool_only:
getattr(mixed, opname)(axis=0, bool_only=True)
getattr(mixed, opname)(axis=1, bool_only=True)
getattr(bool_frame_with_na, opname)(axis=0, bool_only=False)
getattr(bool_frame_with_na, opname)(axis=1, bool_only=False)
class TestDataFrameAnalytics(object):
# ---------------------------------------------------------------------
# Correlation and covariance
@td.skip_if_no_scipy
def test_corr_pearson(self, float_frame):
float_frame['A'][:5] = np.nan
float_frame['B'][5:10] = np.nan
self._check_method(float_frame, 'pearson')
@td.skip_if_no_scipy
def test_corr_kendall(self, float_frame):
float_frame['A'][:5] = np.nan
float_frame['B'][5:10] = np.nan
self._check_method(float_frame, 'kendall')
@td.skip_if_no_scipy
def test_corr_spearman(self, float_frame):
float_frame['A'][:5] = np.nan
float_frame['B'][5:10] = np.nan
self._check_method(float_frame, 'spearman')
def _check_method(self, frame, method='pearson'):
correls = frame.corr(method=method)
expected = frame['A'].corr(frame['C'], method=method)
tm.assert_almost_equal(correls['A']['C'], expected)
@td.skip_if_no_scipy
def test_corr_non_numeric(self, float_frame, float_string_frame):
float_frame['A'][:5] = np.nan
float_frame['B'][5:10] = np.nan
# exclude non-numeric types
result = float_string_frame.corr()
expected = float_string_frame.loc[:, ['A', 'B', 'C', 'D']].corr()
tm.assert_frame_equal(result, expected)
@td.skip_if_no_scipy
@pytest.mark.parametrize('meth', ['pearson', 'kendall', 'spearman'])
def test_corr_nooverlap(self, meth):
# nothing in common
df = DataFrame({'A': [1, 1.5, 1, np.nan, np.nan, np.nan],
'B': [np.nan, np.nan, np.nan, 1, 1.5, 1],
'C': [np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan]})
rs = df.corr(meth)
assert isna(rs.loc['A', 'B'])
assert isna(rs.loc['B', 'A'])
assert rs.loc['A', 'A'] == 1
assert rs.loc['B', 'B'] == 1
assert isna(rs.loc['C', 'C'])
@td.skip_if_no_scipy
@pytest.mark.parametrize('meth', ['pearson', 'spearman'])
def test_corr_constant(self, meth):
# constant --> all NA
df = DataFrame({'A': [1, 1, 1, np.nan, np.nan, np.nan],
'B': [np.nan, np.nan, np.nan, 1, 1, 1]})
rs = df.corr(meth)
assert isna(rs.values).all()
def test_corr_int(self):
# dtypes other than float64 #1761
df3 = DataFrame({"a": [1, 2, 3, 4], "b": [1, 2, 3, 4]})
df3.cov()
df3.corr()
@td.skip_if_no_scipy
def test_corr_int_and_boolean(self):
# when dtypes of pandas series are different
# then ndarray will have dtype=object,
# so it need to be properly handled
df = DataFrame({"a": [True, False], "b": [1, 0]})
expected = DataFrame(np.ones((2, 2)), index=[
'a', 'b'], columns=['a', 'b'])
for meth in ['pearson', 'kendall', 'spearman']:
with warnings.catch_warnings(record=True):
warnings.simplefilter("ignore", RuntimeWarning)
result = df.corr(meth)
tm.assert_frame_equal(result, expected)
def test_corr_cov_independent_index_column(self):
# GH 14617
df = pd.DataFrame(np.random.randn(4 * 10).reshape(10, 4),
columns=list("abcd"))
for method in ['cov', 'corr']:
result = getattr(df, method)()
assert result.index is not result.columns
assert result.index.equals(result.columns)
def test_corr_invalid_method(self):
# GH 22298
df = pd.DataFrame(np.random.normal(size=(10, 2)))
msg = ("method must be either 'pearson', "
"'spearman', 'kendall', or a callable, ")
with pytest.raises(ValueError, match=msg):
df.corr(method="____")
def test_cov(self, float_frame, float_string_frame):
# min_periods no NAs (corner case)
expected = float_frame.cov()
result = float_frame.cov(min_periods=len(float_frame))
tm.assert_frame_equal(expected, result)
result = float_frame.cov(min_periods=len(float_frame) + 1)
assert isna(result.values).all()
# with NAs
frame = float_frame.copy()
frame['A'][:5] = np.nan
frame['B'][5:10] = np.nan
result = float_frame.cov(min_periods=len(float_frame) - 8)
expected = float_frame.cov()
expected.loc['A', 'B'] = np.nan
expected.loc['B', 'A'] = np.nan
# regular
float_frame['A'][:5] = np.nan
float_frame['B'][:10] = np.nan
cov = float_frame.cov()
tm.assert_almost_equal(cov['A']['C'],
float_frame['A'].cov(float_frame['C']))
# exclude non-numeric types
result = float_string_frame.cov()
expected = float_string_frame.loc[:, ['A', 'B', 'C', 'D']].cov()
tm.assert_frame_equal(result, expected)
# Single column frame
df = DataFrame(np.linspace(0.0, 1.0, 10))
result = df.cov()
expected = DataFrame(np.cov(df.values.T).reshape((1, 1)),
index=df.columns, columns=df.columns)
tm.assert_frame_equal(result, expected)
df.loc[0] = np.nan
result = df.cov()
expected = DataFrame(np.cov(df.values[1:].T).reshape((1, 1)),
index=df.columns, columns=df.columns)
tm.assert_frame_equal(result, expected)
def test_corrwith(self, datetime_frame):
a = datetime_frame
noise = Series(np.random.randn(len(a)), index=a.index)
b = datetime_frame.add(noise, axis=0)
# make sure order does not matter
b = b.reindex(columns=b.columns[::-1], index=b.index[::-1][10:])
del b['B']
colcorr = a.corrwith(b, axis=0)
tm.assert_almost_equal(colcorr['A'], a['A'].corr(b['A']))
rowcorr = a.corrwith(b, axis=1)
tm.assert_series_equal(rowcorr, a.T.corrwith(b.T, axis=0))
dropped = a.corrwith(b, axis=0, drop=True)
tm.assert_almost_equal(dropped['A'], a['A'].corr(b['A']))
assert 'B' not in dropped
dropped = a.corrwith(b, axis=1, drop=True)
assert a.index[-1] not in dropped.index
# non time-series data
index = ['a', 'b', 'c', 'd', 'e']
columns = ['one', 'two', 'three', 'four']
df1 = DataFrame(np.random.randn(5, 4), index=index, columns=columns)
df2 = DataFrame(np.random.randn(4, 4),
index=index[:4], columns=columns)
correls = df1.corrwith(df2, axis=1)
for row in index[:4]:
tm.assert_almost_equal(correls[row],
df1.loc[row].corr(df2.loc[row]))
def test_corrwith_with_objects(self):
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame()
cols = ['A', 'B', 'C', 'D']
df1['obj'] = 'foo'
df2['obj'] = 'bar'
result = df1.corrwith(df2)
expected = df1.loc[:, cols].corrwith(df2.loc[:, cols])
tm.assert_series_equal(result, expected)
result = df1.corrwith(df2, axis=1)
expected = df1.loc[:, cols].corrwith(df2.loc[:, cols], axis=1)
tm.assert_series_equal(result, expected)
def test_corrwith_series(self, datetime_frame):
result = datetime_frame.corrwith(datetime_frame['A'])
expected = datetime_frame.apply(datetime_frame['A'].corr)
tm.assert_series_equal(result, expected)
def test_corrwith_matches_corrcoef(self):
df1 = DataFrame(np.arange(10000), columns=['a'])
df2 = DataFrame(np.arange(10000) ** 2, columns=['a'])
c1 = df1.corrwith(df2)['a']
c2 = np.corrcoef(df1['a'], df2['a'])[0][1]
tm.assert_almost_equal(c1, c2)
assert c1 < 1
def test_corrwith_mixed_dtypes(self):
# GH 18570
df = pd.DataFrame({'a': [1, 4, 3, 2], 'b': [4, 6, 7, 3],
'c': ['a', 'b', 'c', 'd']})
s = pd.Series([0, 6, 7, 3])
result = df.corrwith(s)
corrs = [df['a'].corr(s), df['b'].corr(s)]
expected = pd.Series(data=corrs, index=['a', 'b'])
tm.assert_series_equal(result, expected)
def test_corrwith_index_intersection(self):
df1 = pd.DataFrame(np.random.random(size=(10, 2)),
columns=["a", "b"])
df2 = pd.DataFrame(np.random.random(size=(10, 3)),
columns=["a", "b", "c"])
result = df1.corrwith(df2, drop=True).index.sort_values()
expected = df1.columns.intersection(df2.columns).sort_values()
tm.assert_index_equal(result, expected)
def test_corrwith_index_union(self):
df1 = pd.DataFrame(np.random.random(size=(10, 2)),
columns=["a", "b"])
df2 = pd.DataFrame(np.random.random(size=(10, 3)),
columns=["a", "b", "c"])
result = df1.corrwith(df2, drop=False).index.sort_values()
expected = df1.columns.union(df2.columns).sort_values()
tm.assert_index_equal(result, expected)
def test_corrwith_dup_cols(self):
# GH 21925
df1 = pd.DataFrame(np.vstack([np.arange(10)] * 3).T)
df2 = df1.copy()
df2 = pd.concat((df2, df2[0]), axis=1)
result = df1.corrwith(df2)
expected = pd.Series(np.ones(4), index=[0, 0, 1, 2])
tm.assert_series_equal(result, expected)
@td.skip_if_no_scipy
def test_corrwith_spearman(self):
# GH 21925
df = pd.DataFrame(np.random.random(size=(100, 3)))
result = df.corrwith(df**2, method="spearman")
expected = Series(np.ones(len(result)))
tm.assert_series_equal(result, expected)
@td.skip_if_no_scipy
def test_corrwith_kendall(self):
# GH 21925
df = pd.DataFrame(np.random.random(size=(100, 3)))
result = df.corrwith(df**2, method="kendall")
expected = Series(np.ones(len(result)))
tm.assert_series_equal(result, expected)
# ---------------------------------------------------------------------
# Describe
def test_bool_describe_in_mixed_frame(self):
df = DataFrame({
'string_data': ['a', 'b', 'c', 'd', 'e'],
'bool_data': [True, True, False, False, False],
'int_data': [10, 20, 30, 40, 50],
})
# Integer data are included in .describe() output,
# Boolean and string data are not.
result = df.describe()
expected = DataFrame({'int_data': [5, 30, df.int_data.std(),
10, 20, 30, 40, 50]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'])
tm.assert_frame_equal(result, expected)
# Top value is a boolean value that is False
result = df.describe(include=['bool'])
expected = DataFrame({'bool_data': [5, 2, False, 3]},
index=['count', 'unique', 'top', 'freq'])
tm.assert_frame_equal(result, expected)
def test_describe_bool_frame(self):
# GH 13891
df = pd.DataFrame({
'bool_data_1': [False, False, True, True],
'bool_data_2': [False, True, True, True]
})
result = df.describe()
expected = DataFrame({'bool_data_1': [4, 2, True, 2],
'bool_data_2': [4, 2, True, 3]},
index=['count', 'unique', 'top', 'freq'])
tm.assert_frame_equal(result, expected)
df = pd.DataFrame({
'bool_data': [False, False, True, True, False],
'int_data': [0, 1, 2, 3, 4]
})
result = df.describe()
expected = DataFrame({'int_data': [5, 2, df.int_data.std(), 0, 1,
2, 3, 4]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'])
tm.assert_frame_equal(result, expected)
df = pd.DataFrame({
'bool_data': [False, False, True, True],
'str_data': ['a', 'b', 'c', 'a']
})
result = df.describe()
expected = DataFrame({'bool_data': [4, 2, True, 2],
'str_data': [4, 3, 'a', 2]},
index=['count', 'unique', 'top', 'freq'])
tm.assert_frame_equal(result, expected)
def test_describe_categorical(self):
df = DataFrame({'value': np.random.randint(0, 10000, 100)})
labels = ["{0} - {1}".format(i, i + 499) for i in range(0, 10000, 500)]
cat_labels = Categorical(labels, labels)
df = df.sort_values(by=['value'], ascending=True)
df['value_group'] = pd.cut(df.value, range(0, 10500, 500),
right=False, labels=cat_labels)
cat = df
# Categoricals should not show up together with numerical columns
result = cat.describe()
assert len(result.columns) == 1
# In a frame, describe() for the cat should be the same as for string
# arrays (count, unique, top, freq)
cat = Categorical(["a", "b", "b", "b"], categories=['a', 'b', 'c'],
ordered=True)
s = Series(cat)
result = s.describe()
expected = Series([4, 2, "b", 3],
index=['count', 'unique', 'top', 'freq'])
tm.assert_series_equal(result, expected)
cat = Series(Categorical(["a", "b", "c", "c"]))
df3 = DataFrame({"cat": cat, "s": ["a", "b", "c", "c"]})
result = df3.describe()
tm.assert_numpy_array_equal(result["cat"].values, result["s"].values)
def test_describe_categorical_columns(self):
# GH 11558
columns = pd.CategoricalIndex(['int1', 'int2', 'obj'],
ordered=True, name='XXX')
df = DataFrame({'int1': [10, 20, 30, 40, 50],
'int2': [10, 20, 30, 40, 50],
'obj': ['A', 0, None, 'X', 1]},
columns=columns)
result = df.describe()
exp_columns = pd.CategoricalIndex(['int1', 'int2'],
categories=['int1', 'int2', 'obj'],
ordered=True, name='XXX')
expected = DataFrame({'int1': [5, 30, df.int1.std(),
10, 20, 30, 40, 50],
'int2': [5, 30, df.int2.std(),
10, 20, 30, 40, 50]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'],
columns=exp_columns)
tm.assert_frame_equal(result, expected)
tm.assert_categorical_equal(result.columns.values,
expected.columns.values)
def test_describe_datetime_columns(self):
columns = pd.DatetimeIndex(['2011-01-01', '2011-02-01', '2011-03-01'],
freq='MS', tz='US/Eastern', name='XXX')
df = DataFrame({0: [10, 20, 30, 40, 50],
1: [10, 20, 30, 40, 50],
2: ['A', 0, None, 'X', 1]})
df.columns = columns
result = df.describe()
exp_columns = pd.DatetimeIndex(['2011-01-01', '2011-02-01'],
freq='MS', tz='US/Eastern', name='XXX')
expected = DataFrame({0: [5, 30, df.iloc[:, 0].std(),
10, 20, 30, 40, 50],
1: [5, 30, df.iloc[:, 1].std(),
10, 20, 30, 40, 50]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'])
expected.columns = exp_columns
tm.assert_frame_equal(result, expected)
assert result.columns.freq == 'MS'
assert result.columns.tz == expected.columns.tz
def test_describe_timedelta_values(self):
# GH 6145
t1 = pd.timedelta_range('1 days', freq='D', periods=5)
t2 = pd.timedelta_range('1 hours', freq='H', periods=5)
df = pd.DataFrame({'t1': t1, 't2': t2})
expected = DataFrame({'t1': [5, pd.Timedelta('3 days'),
df.iloc[:, 0].std(),
pd.Timedelta('1 days'),
pd.Timedelta('2 days'),
pd.Timedelta('3 days'),
pd.Timedelta('4 days'),
pd.Timedelta('5 days')],
't2': [5, pd.Timedelta('3 hours'),
df.iloc[:, 1].std(),
pd.Timedelta('1 hours'),
pd.Timedelta('2 hours'),
pd.Timedelta('3 hours'),
pd.Timedelta('4 hours'),
pd.Timedelta('5 hours')]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'])
result = df.describe()
tm.assert_frame_equal(result, expected)
exp_repr = (" t1 t2\n"
"count 5 5\n"
"mean 3 days 00:00:00 0 days 03:00:00\n"
"std 1 days 13:56:50.394919 0 days 01:34:52.099788\n"
"min 1 days 00:00:00 0 days 01:00:00\n"
"25% 2 days 00:00:00 0 days 02:00:00\n"
"50% 3 days 00:00:00 0 days 03:00:00\n"
"75% 4 days 00:00:00 0 days 04:00:00\n"
"max 5 days 00:00:00 0 days 05:00:00")
assert repr(result) == exp_repr
def test_describe_tz_values(self, tz_naive_fixture):
# GH 21332
tz = tz_naive_fixture
s1 = Series(range(5))
start = Timestamp(2018, 1, 1)
end = Timestamp(2018, 1, 5)
s2 = Series(date_range(start, end, tz=tz))
df = pd.DataFrame({'s1': s1, 's2': s2})
expected = DataFrame({'s1': [5, np.nan, np.nan, np.nan, np.nan, np.nan,
2, 1.581139, 0, 1, 2, 3, 4],
's2': [5, 5, s2.value_counts().index[0], 1,
start.tz_localize(tz),
end.tz_localize(tz), np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan]},
index=['count', 'unique', 'top', 'freq', 'first',
'last', 'mean', 'std', 'min', '25%', '50%',
'75%', 'max']
)
result = df.describe(include='all')
tm.assert_frame_equal(result, expected)
# ---------------------------------------------------------------------
# Reductions
def test_stat_op_api(self, float_frame, float_string_frame):
assert_stat_op_api('count', float_frame, float_string_frame,
has_numeric_only=True)
assert_stat_op_api('sum', float_frame, float_string_frame,
has_numeric_only=True)
assert_stat_op_api('nunique', float_frame, float_string_frame)
assert_stat_op_api('mean', float_frame, float_string_frame)
assert_stat_op_api('product', float_frame, float_string_frame)
assert_stat_op_api('median', float_frame, float_string_frame)
assert_stat_op_api('min', float_frame, float_string_frame)
assert_stat_op_api('max', float_frame, float_string_frame)
assert_stat_op_api('mad', float_frame, float_string_frame)
assert_stat_op_api('var', float_frame, float_string_frame)
assert_stat_op_api('std', float_frame, float_string_frame)
assert_stat_op_api('sem', float_frame, float_string_frame)
assert_stat_op_api('median', float_frame, float_string_frame)
try:
from scipy.stats import skew, kurtosis # noqa:F401
assert_stat_op_api('skew', float_frame, float_string_frame)
assert_stat_op_api('kurt', float_frame, float_string_frame)
except ImportError:
pass
def test_stat_op_calc(self, float_frame_with_na, mixed_float_frame):
def count(s):
return notna(s).sum()
def nunique(s):
return len(algorithms.unique1d(s.dropna()))
def mad(x):
return np.abs(x - x.mean()).mean()
def var(x):
return np.var(x, ddof=1)
def std(x):
return np.std(x, ddof=1)
def sem(x):
return np.std(x, ddof=1) / np.sqrt(len(x))
def skewness(x):
from scipy.stats import skew # noqa:F811
if len(x) < 3:
return np.nan
return skew(x, bias=False)
def kurt(x):
from scipy.stats import kurtosis # noqa:F811
if len(x) < 4:
return np.nan
return kurtosis(x, bias=False)
assert_stat_op_calc('nunique', nunique, float_frame_with_na,
has_skipna=False, check_dtype=False,
check_dates=True)
# mixed types (with upcasting happening)
assert_stat_op_calc('sum', np.sum, mixed_float_frame.astype('float32'),
check_dtype=False, check_less_precise=True)
assert_stat_op_calc('sum', np.sum, float_frame_with_na,
skipna_alternative=np.nansum)
assert_stat_op_calc('mean', np.mean, float_frame_with_na,
check_dates=True)
assert_stat_op_calc('product', np.prod, float_frame_with_na)
assert_stat_op_calc('mad', mad, float_frame_with_na)
assert_stat_op_calc('var', var, float_frame_with_na)
assert_stat_op_calc('std', std, float_frame_with_na)
assert_stat_op_calc('sem', sem, float_frame_with_na)
assert_stat_op_calc('count', count, float_frame_with_na,
has_skipna=False, check_dtype=False,
check_dates=True)
try:
from scipy import skew, kurtosis # noqa:F401
assert_stat_op_calc('skew', skewness, float_frame_with_na)
assert_stat_op_calc('kurt', kurt, float_frame_with_na)
except ImportError:
pass
# TODO: Ensure warning isn't emitted in the first place
@pytest.mark.filterwarnings("ignore:All-NaN:RuntimeWarning")
def test_median(self, float_frame_with_na, int_frame):
def wrapper(x):
if isna(x).any():
return np.nan
return np.median(x)
assert_stat_op_calc('median', wrapper, float_frame_with_na,
check_dates=True)
assert_stat_op_calc('median', wrapper, int_frame, check_dtype=False,
check_dates=True)
@pytest.mark.parametrize('method', ['sum', 'mean', 'prod', 'var',
'std', 'skew', 'min', 'max'])
def test_stat_operators_attempt_obj_array(self, method):
# GH#676
data = {
'a': [-0.00049987540199591344, -0.0016467257772919831,
0.00067695870775883013],
'b': [-0, -0, 0.0],
'c': [0.00031111847529610595, 0.0014902627951905339,
-0.00094099200035979691]
}
df1 = DataFrame(data, index=['foo', 'bar', 'baz'], dtype='O')
df2 = DataFrame({0: [np.nan, 2], 1: [np.nan, 3],
2: [np.nan, 4]}, dtype=object)
for df in [df1, df2]:
assert df.values.dtype == np.object_
result = getattr(df, method)(1)
expected = getattr(df.astype('f8'), method)(1)
if method in ['sum', 'prod']:
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('op', ['mean', 'std', 'var',
'skew', 'kurt', 'sem'])
def test_mixed_ops(self, op):
# GH#16116
df = DataFrame({'int': [1, 2, 3, 4],
'float': [1., 2., 3., 4.],
'str': ['a', 'b', 'c', 'd']})
result = getattr(df, op)()
assert len(result) == 2
with pd.option_context('use_bottleneck', False):
result = getattr(df, op)()
assert len(result) == 2
def test_reduce_mixed_frame(self):
# GH 6806
df = DataFrame({
'bool_data': [True, True, False, False, False],
'int_data': [10, 20, 30, 40, 50],
'string_data': ['a', 'b', 'c', 'd', 'e'],
})
df.reindex(columns=['bool_data', 'int_data', 'string_data'])
test = df.sum(axis=0)
tm.assert_numpy_array_equal(test.values,
np.array([2, 150, 'abcde'], dtype=object))
tm.assert_series_equal(test, df.T.sum(axis=1))
def test_nunique(self):
df = DataFrame({'A': [1, 1, 1],
'B': [1, 2, 3],
'C': [1, np.nan, 3]})
tm.assert_series_equal(df.nunique(), Series({'A': 1, 'B': 3, 'C': 2}))
tm.assert_series_equal(df.nunique(dropna=False),
Series({'A': 1, 'B': 3, 'C': 3}))
tm.assert_series_equal(df.nunique(axis=1), Series({0: 1, 1: 2, 2: 2}))
tm.assert_series_equal(df.nunique(axis=1, dropna=False),
Series({0: 1, 1: 3, 2: 2}))
@pytest.mark.parametrize('tz', [None, 'UTC'])
def test_mean_mixed_datetime_numeric(self, tz):
# https://github.com/pandas-dev/pandas/issues/24752
df = pd.DataFrame({"A": [1, 1],
"B": [pd.Timestamp('2000', tz=tz)] * 2})
result = df.mean()
expected = pd.Series([1.0], index=['A'])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('tz', [None, 'UTC'])
def test_mean_excludeds_datetimes(self, tz):
# https://github.com/pandas-dev/pandas/issues/24752
# Our long-term desired behavior is unclear, but the behavior in
# 0.24.0rc1 was buggy.
df = pd.DataFrame({"A": [pd.Timestamp('2000', tz=tz)] * 2})
result = df.mean()
expected = pd.Series()
tm.assert_series_equal(result, expected)
def test_var_std(self, datetime_frame):
result = datetime_frame.std(ddof=4)
expected = datetime_frame.apply(lambda x: x.std(ddof=4))
tm.assert_almost_equal(result, expected)
result = datetime_frame.var(ddof=4)
expected = datetime_frame.apply(lambda x: x.var(ddof=4))
tm.assert_almost_equal(result, expected)
arr = np.repeat(np.random.random((1, 1000)), 1000, 0)
result = nanops.nanvar(arr, axis=0)
assert not (result < 0).any()
with pd.option_context('use_bottleneck', False):
result = nanops.nanvar(arr, axis=0)
assert not (result < 0).any()
@pytest.mark.parametrize(
"meth", ['sem', 'var', 'std'])
def test_numeric_only_flag(self, meth):
# GH 9201
df1 = DataFrame(np.random.randn(5, 3), columns=['foo', 'bar', 'baz'])
# set one entry to a number in str format
df1.loc[0, 'foo'] = '100'
df2 = DataFrame(np.random.randn(5, 3), columns=['foo', 'bar', 'baz'])
# set one entry to a non-number str
df2.loc[0, 'foo'] = 'a'
result = getattr(df1, meth)(axis=1, numeric_only=True)
expected = getattr(df1[['bar', 'baz']], meth)(axis=1)
tm.assert_series_equal(expected, result)
result = getattr(df2, meth)(axis=1, numeric_only=True)
expected = getattr(df2[['bar', 'baz']], meth)(axis=1)
tm.assert_series_equal(expected, result)
# df1 has all numbers, df2 has a letter inside
msg = r"unsupported operand type\(s\) for -: 'float' and 'str'"
with pytest.raises(TypeError, match=msg):
getattr(df1, meth)(axis=1, numeric_only=False)
msg = "could not convert string to float: 'a'"
with pytest.raises(TypeError, match=msg):
getattr(df2, meth)(axis=1, numeric_only=False)
def test_sem(self, datetime_frame):
result = datetime_frame.sem(ddof=4)
expected = datetime_frame.apply(
lambda x: x.std(ddof=4) / np.sqrt(len(x)))
tm.assert_almost_equal(result, expected)
arr = np.repeat(np.random.random((1, 1000)), 1000, 0)
result = nanops.nansem(arr, axis=0)
assert not (result < 0).any()
with pd.option_context('use_bottleneck', False):
result = nanops.nansem(arr, axis=0)
assert not (result < 0).any()
@td.skip_if_no_scipy
def test_kurt(self):
index = MultiIndex(levels=[['bar'], ['one', 'two', 'three'], [0, 1]],
codes=[[0, 0, 0, 0, 0, 0],
[0, 1, 2, 0, 1, 2],
[0, 1, 0, 1, 0, 1]])
df = DataFrame(np.random.randn(6, 3), index=index)
kurt = df.kurt()
kurt2 = df.kurt(level=0).xs('bar')
tm.assert_series_equal(kurt, kurt2, check_names=False)
assert kurt.name is None
assert kurt2.name == 'bar'
@pytest.mark.parametrize("dropna, expected", [
(True, {'A': [12],
'B': [10.0],
'C': [1.0],
'D': ['a'],
'E': Categorical(['a'], categories=['a']),
'F': to_datetime(['2000-1-2']),
'G': to_timedelta(['1 days'])}),
(False, {'A': [12],
'B': [10.0],
'C': [np.nan],
'D': np.array([np.nan], dtype=object),
'E': Categorical([np.nan], categories=['a']),
'F': [pd.NaT],
'G': to_timedelta([pd.NaT])}),
(True, {'H': [8, 9, np.nan, np.nan],
'I': [8, 9, np.nan, np.nan],
'J': [1, np.nan, np.nan, np.nan],
'K': Categorical(['a', np.nan, np.nan, np.nan],
categories=['a']),
'L': to_datetime(['2000-1-2', 'NaT', 'NaT', 'NaT']),
'M': to_timedelta(['1 days', 'nan', 'nan', 'nan']),
'N': [0, 1, 2, 3]}),
(False, {'H': [8, 9, np.nan, np.nan],
'I': [8, 9, np.nan, np.nan],
'J': [1, np.nan, np.nan, np.nan],
'K': Categorical([np.nan, 'a', np.nan, np.nan],
categories=['a']),
'L': to_datetime(['NaT', '2000-1-2', 'NaT', 'NaT']),
'M': to_timedelta(['nan', '1 days', 'nan', 'nan']),
'N': [0, 1, 2, 3]})
])
def test_mode_dropna(self, dropna, expected):
df = DataFrame({"A": [12, 12, 19, 11],
"B": [10, 10, np.nan, 3],
"C": [1, np.nan, np.nan, np.nan],
"D": [np.nan, np.nan, 'a', np.nan],
"E": Categorical([np.nan, np.nan, 'a', np.nan]),
"F": to_datetime(['NaT', '2000-1-2', 'NaT', 'NaT']),
"G": to_timedelta(['1 days', 'nan', 'nan', 'nan']),
"H": [8, 8, 9, 9],
"I": [9, 9, 8, 8],
"J": [1, 1, np.nan, np.nan],
"K": Categorical(['a', np.nan, 'a', np.nan]),
"L": to_datetime(['2000-1-2', '2000-1-2',
'NaT', 'NaT']),
"M": to_timedelta(['1 days', 'nan',
'1 days', 'nan']),
"N": np.arange(4, dtype='int64')})
result = df[sorted(list(expected.keys()))].mode(dropna=dropna)
expected = DataFrame(expected)
tm.assert_frame_equal(result, expected)
def test_mode_sortwarning(self):
# Check for the warning that is raised when the mode
# results cannot be sorted
df = DataFrame({"A": [np.nan, np.nan, 'a', 'a']})
expected = DataFrame({'A': ['a', np.nan]})
with tm.assert_produces_warning(UserWarning, check_stacklevel=False):
result = df.mode(dropna=False)
result = result.sort_values(by='A').reset_index(drop=True)
tm.assert_frame_equal(result, expected)
def test_operators_timedelta64(self):
df = DataFrame(dict(A=date_range('2012-1-1', periods=3, freq='D'),
B=date_range('2012-1-2', periods=3, freq='D'),
C=Timestamp('20120101') -
timedelta(minutes=5, seconds=5)))
diffs = DataFrame(dict(A=df['A'] - df['C'],
B=df['A'] - df['B']))
# min
result = diffs.min()
assert result[0] == diffs.loc[0, 'A']
assert result[1] == diffs.loc[0, 'B']
result = diffs.min(axis=1)
assert (result == diffs.loc[0, 'B']).all()
# max
result = diffs.max()
assert result[0] == diffs.loc[2, 'A']
assert result[1] == diffs.loc[2, 'B']
result = diffs.max(axis=1)
assert (result == diffs['A']).all()
# abs
result = diffs.abs()
result2 = abs(diffs)
expected = DataFrame(dict(A=df['A'] - df['C'],
B=df['B'] - df['A']))
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(result2, expected)
# mixed frame
mixed = diffs.copy()
mixed['C'] = 'foo'
mixed['D'] = 1
mixed['E'] = 1.
mixed['F'] = Timestamp('20130101')
# results in an object array
result = mixed.min()
expected = Series([pd.Timedelta(timedelta(seconds=5 * 60 + 5)),
pd.Timedelta(timedelta(days=-1)),
'foo', 1, 1.0,
Timestamp('20130101')],
index=mixed.columns)
tm.assert_series_equal(result, expected)
# excludes numeric
result = mixed.min(axis=1)
expected = Series([1, 1, 1.], index=[0, 1, 2])
tm.assert_series_equal(result, expected)
# works when only those columns are selected
result = mixed[['A', 'B']].min(1)
expected = Series([timedelta(days=-1)] * 3)
tm.assert_series_equal(result, expected)
result = mixed[['A', 'B']].min()
expected = Series([timedelta(seconds=5 * 60 + 5),
timedelta(days=-1)], index=['A', 'B'])
tm.assert_series_equal(result, expected)
# GH 3106
df = DataFrame({'time': date_range('20130102', periods=5),
'time2': date_range('20130105', periods=5)})
df['off1'] = df['time2'] - df['time']
assert df['off1'].dtype == 'timedelta64[ns]'
df['off2'] = df['time'] - df['time2']
df._consolidate_inplace()
assert df['off1'].dtype == 'timedelta64[ns]'
assert df['off2'].dtype == 'timedelta64[ns]'
def test_sum_corner(self):
empty_frame = DataFrame()
axis0 = empty_frame.sum(0)
axis1 = empty_frame.sum(1)
assert isinstance(axis0, Series)
assert isinstance(axis1, Series)
assert len(axis0) == 0
assert len(axis1) == 0
@pytest.mark.parametrize('method, unit', [
('sum', 0),
('prod', 1),
])
def test_sum_prod_nanops(self, method, unit):
idx = ['a', 'b', 'c']
df = pd.DataFrame({"a": [unit, unit],
"b": [unit, np.nan],
"c": [np.nan, np.nan]})
# The default
result = getattr(df, method)
expected = pd.Series([unit, unit, unit], index=idx, dtype='float64')
# min_count=1
result = getattr(df, method)(min_count=1)
expected = pd.Series([unit, unit, np.nan], index=idx)
tm.assert_series_equal(result, expected)
# min_count=0
result = getattr(df, method)(min_count=0)
expected = pd.Series([unit, unit, unit], index=idx, dtype='float64')
tm.assert_series_equal(result, expected)
result = getattr(df.iloc[1:], method)(min_count=1)
expected = pd.Series([unit, np.nan, np.nan], index=idx)
tm.assert_series_equal(result, expected)
# min_count > 1
df = pd.DataFrame({"A": [unit] * 10, "B": [unit] * 5 + [np.nan] * 5})
result = getattr(df, method)(min_count=5)
expected = pd.Series(result, index=['A', 'B'])
tm.assert_series_equal(result, expected)
result = getattr(df, method)(min_count=6)
expected = pd.Series(result, index=['A', 'B'])
tm.assert_series_equal(result, expected)
def test_sum_nanops_timedelta(self):
# prod isn't defined on timedeltas
idx = ['a', 'b', 'c']
df = pd.DataFrame({"a": [0, 0],
"b": [0, np.nan],
"c": [np.nan, np.nan]})
df2 = df.apply(pd.to_timedelta)
# 0 by default
result = df2.sum()
expected = pd.Series([0, 0, 0], dtype='m8[ns]', index=idx)
tm.assert_series_equal(result, expected)
# min_count=0
result = df2.sum(min_count=0)
tm.assert_series_equal(result, expected)
# min_count=1
result = df2.sum(min_count=1)
expected = pd.Series([0, 0, np.nan], dtype='m8[ns]', index=idx)
tm.assert_series_equal(result, expected)
def test_sum_object(self, float_frame):
values = float_frame.values.astype(int)
frame = DataFrame(values, index=float_frame.index,
columns=float_frame.columns)
deltas = frame * timedelta(1)
deltas.sum()
def test_sum_bool(self, float_frame):
# ensure this works, bug report
bools = np.isnan(float_frame)
bools.sum(1)
bools.sum(0)
def test_mean_corner(self, float_frame, float_string_frame):
# unit test when have object data
the_mean = float_string_frame.mean(axis=0)
the_sum = float_string_frame.sum(axis=0, numeric_only=True)
tm.assert_index_equal(the_sum.index, the_mean.index)
assert len(the_mean.index) < len(float_string_frame.columns)
# xs sum mixed type, just want to know it works...
the_mean = float_string_frame.mean(axis=1)
the_sum = float_string_frame.sum(axis=1, numeric_only=True)
tm.assert_index_equal(the_sum.index, the_mean.index)
# take mean of boolean column
float_frame['bool'] = float_frame['A'] > 0
means = float_frame.mean(0)
assert means['bool'] == float_frame['bool'].values.mean()
def test_stats_mixed_type(self, float_string_frame):
# don't blow up
float_string_frame.std(1)
float_string_frame.var(1)
float_string_frame.mean(1)
float_string_frame.skew(1)
def test_sum_bools(self):
df = DataFrame(index=lrange(1), columns=lrange(10))
bools = isna(df)
assert bools.sum(axis=1)[0] == 10
# ---------------------------------------------------------------------
# Cumulative Reductions - cumsum, cummax, ...
def test_cumsum_corner(self):
dm = DataFrame(np.arange(20).reshape(4, 5),
index=lrange(4), columns=lrange(5))
# ?(wesm)
result = dm.cumsum() # noqa
def test_cumsum(self, datetime_frame):
datetime_frame.loc[5:10, 0] = np.nan
datetime_frame.loc[10:15, 1] = np.nan
datetime_frame.loc[15:, 2] = np.nan
# axis = 0
cumsum = datetime_frame.cumsum()
expected = datetime_frame.apply(Series.cumsum)
tm.assert_frame_equal(cumsum, expected)
# axis = 1
cumsum = datetime_frame.cumsum(axis=1)
expected = datetime_frame.apply(Series.cumsum, axis=1)
tm.assert_frame_equal(cumsum, expected)
# works
df = DataFrame({'A': np.arange(20)}, index=np.arange(20))
result = df.cumsum() # noqa
# fix issue
cumsum_xs = datetime_frame.cumsum(axis=1)
assert np.shape(cumsum_xs) == np.shape(datetime_frame)
def test_cumprod(self, datetime_frame):
datetime_frame.loc[5:10, 0] = np.nan
datetime_frame.loc[10:15, 1] = np.nan
datetime_frame.loc[15:, 2] = np.nan
# axis = 0
cumprod = datetime_frame.cumprod()
expected = datetime_frame.apply(Series.cumprod)
tm.assert_frame_equal(cumprod, expected)
# axis = 1
cumprod = datetime_frame.cumprod(axis=1)
expected = datetime_frame.apply(Series.cumprod, axis=1)
tm.assert_frame_equal(cumprod, expected)
# fix issue
cumprod_xs = datetime_frame.cumprod(axis=1)
assert np.shape(cumprod_xs) == np.shape(datetime_frame)
# ints
df = datetime_frame.fillna(0).astype(int)
df.cumprod(0)
df.cumprod(1)
# ints32
df = datetime_frame.fillna(0).astype(np.int32)
df.cumprod(0)
df.cumprod(1)
def test_cummin(self, datetime_frame):
datetime_frame.loc[5:10, 0] = np.nan
datetime_frame.loc[10:15, 1] = np.nan
datetime_frame.loc[15:, 2] = np.nan
# axis = 0
cummin = datetime_frame.cummin()
expected = datetime_frame.apply(Series.cummin)
tm.assert_frame_equal(cummin, expected)
# axis = 1
cummin = datetime_frame.cummin(axis=1)
expected = datetime_frame.apply(Series.cummin, axis=1)
tm.assert_frame_equal(cummin, expected)
# it works
df = DataFrame({'A': np.arange(20)}, index=np.arange(20))
result = df.cummin() # noqa
# fix issue
cummin_xs = datetime_frame.cummin(axis=1)
assert np.shape(cummin_xs) == np.shape(datetime_frame)
def test_cummax(self, datetime_frame):
datetime_frame.loc[5:10, 0] = np.nan
datetime_frame.loc[10:15, 1] = np.nan
datetime_frame.loc[15:, 2] = np.nan
# axis = 0
cummax = datetime_frame.cummax()
expected = datetime_frame.apply(Series.cummax)
tm.assert_frame_equal(cummax, expected)
# axis = 1
cummax = datetime_frame.cummax(axis=1)
expected = datetime_frame.apply(Series.cummax, axis=1)
tm.assert_frame_equal(cummax, expected)
# it works
df = DataFrame({'A': np.arange(20)}, index=np.arange(20))
result = df.cummax() # noqa
# fix issue
cummax_xs = datetime_frame.cummax(axis=1)
assert np.shape(cummax_xs) == np.shape(datetime_frame)
# ---------------------------------------------------------------------
# Miscellanea
def test_count(self):
# corner case
frame = DataFrame()
ct1 = frame.count(1)
assert isinstance(ct1, Series)
ct2 = frame.count(0)
assert isinstance(ct2, Series)
# GH#423
df = DataFrame(index=lrange(10))
result = df.count(1)
expected = Series(0, index=df.index)
tm.assert_series_equal(result, expected)
df = DataFrame(columns=lrange(10))
result = df.count(0)
expected = Series(0, index=df.columns)
tm.assert_series_equal(result, expected)
df = DataFrame()
result = df.count()
expected = Series(0, index=[])
tm.assert_series_equal(result, expected)
def test_count_objects(self, float_string_frame):
dm = DataFrame(float_string_frame._series)
df = DataFrame(float_string_frame._series)
tm.assert_series_equal(dm.count(), df.count())
tm.assert_series_equal(dm.count(1), df.count(1))
def test_pct_change(self):
# GH#11150
pnl = DataFrame([np.arange(0, 40, 10),
np.arange(0, 40, 10),
np.arange(0, 40, 10)]).astype(np.float64)
pnl.iat[1, 0] = np.nan
pnl.iat[1, 1] = np.nan
pnl.iat[2, 3] = 60
for axis in range(2):
expected = pnl.ffill(axis=axis) / pnl.ffill(axis=axis).shift(
axis=axis) - 1
result = pnl.pct_change(axis=axis, fill_method='pad')
tm.assert_frame_equal(result, expected)
# ----------------------------------------------------------------------
# Index of max / min
def test_idxmin(self, float_frame, int_frame):
frame = float_frame
frame.loc[5:10] = np.nan
frame.loc[15:20, -2:] = np.nan
for skipna in [True, False]:
for axis in [0, 1]:
for df in [frame, int_frame]:
result = df.idxmin(axis=axis, skipna=skipna)
expected = df.apply(Series.idxmin, axis=axis,
skipna=skipna)
tm.assert_series_equal(result, expected)
msg = ("No axis named 2 for object type"
" <class 'pandas.core.frame.DataFrame'>")
with pytest.raises(ValueError, match=msg):
frame.idxmin(axis=2)
def test_idxmax(self, float_frame, int_frame):
frame = float_frame
frame.loc[5:10] = np.nan
frame.loc[15:20, -2:] = np.nan
for skipna in [True, False]:
for axis in [0, 1]:
for df in [frame, int_frame]:
result = df.idxmax(axis=axis, skipna=skipna)
expected = df.apply(Series.idxmax, axis=axis,
skipna=skipna)
tm.assert_series_equal(result, expected)
msg = ("No axis named 2 for object type"
" <class 'pandas.core.frame.DataFrame'>")
with pytest.raises(ValueError, match=msg):
frame.idxmax(axis=2)
# ----------------------------------------------------------------------
# Logical reductions
@pytest.mark.parametrize('opname', ['any', 'all'])
def test_any_all(self, opname, bool_frame_with_na, float_string_frame):
assert_bool_op_calc(opname, getattr(np, opname), bool_frame_with_na,
has_skipna=True)
assert_bool_op_api(opname, bool_frame_with_na, float_string_frame,
has_bool_only=True)
def test_any_all_extra(self):
df = DataFrame({
'A': [True, False, False],
'B': [True, True, False],
'C': [True, True, True],
}, index=['a', 'b', 'c'])
result = df[['A', 'B']].any(1)
expected = Series([True, True, False], index=['a', 'b', 'c'])
tm.assert_series_equal(result, expected)
result = df[['A', 'B']].any(1, bool_only=True)
tm.assert_series_equal(result, expected)
result = df.all(1)
expected = Series([True, False, False], index=['a', 'b', 'c'])
tm.assert_series_equal(result, expected)
result = df.all(1, bool_only=True)
tm.assert_series_equal(result, expected)
# Axis is None
result = df.all(axis=None).item()
assert result is False
result = df.any(axis=None).item()
assert result is True
result = df[['C']].all(axis=None).item()
assert result is True
def test_any_datetime(self):
# GH 23070
float_data = [1, np.nan, 3, np.nan]
datetime_data = [pd.Timestamp('1960-02-15'),
pd.Timestamp('1960-02-16'),
pd.NaT,
pd.NaT]
df = DataFrame({
"A": float_data,
"B": datetime_data
})
result = df.any(1)
expected = Series([True, True, True, False])
tm.assert_series_equal(result, expected)
def test_any_all_bool_only(self):
# GH 25101
df = DataFrame({"col1": [1, 2, 3],
"col2": [4, 5, 6],
"col3": [None, None, None]})
result = df.all(bool_only=True)
expected = Series(dtype=np.bool)
tm.assert_series_equal(result, expected)
df = DataFrame({"col1": [1, 2, 3],
"col2": [4, 5, 6],
"col3": [None, None, None],
"col4": [False, False, True]})
result = df.all(bool_only=True)
expected = Series({"col4": False})
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('func, data, expected', [
(np.any, {}, False),
(np.all, {}, True),
(np.any, {'A': []}, False),
(np.all, {'A': []}, True),
(np.any, {'A': [False, False]}, False),
(np.all, {'A': [False, False]}, False),
(np.any, {'A': [True, False]}, True),
(np.all, {'A': [True, False]}, False),
(np.any, {'A': [True, True]}, True),
(np.all, {'A': [True, True]}, True),
(np.any, {'A': [False], 'B': [False]}, False),
(np.all, {'A': [False], 'B': [False]}, False),
(np.any, {'A': [False, False], 'B': [False, True]}, True),
(np.all, {'A': [False, False], 'B': [False, True]}, False),
# other types
(np.all, {'A': pd.Series([0.0, 1.0], dtype='float')}, False),
(np.any, {'A': pd.Series([0.0, 1.0], dtype='float')}, True),
(np.all, {'A': pd.Series([0, 1], dtype=int)}, False),
(np.any, {'A': pd.Series([0, 1], dtype=int)}, True),
pytest.param(np.all, {'A': pd.Series([0, 1], dtype='M8[ns]')}, False,
marks=[td.skip_if_np_lt_115]),
pytest.param(np.any, {'A': pd.Series([0, 1], dtype='M8[ns]')}, True,
marks=[td.skip_if_np_lt_115]),
pytest.param(np.all, {'A': pd.Series([1, 2], dtype='M8[ns]')}, True,
marks=[td.skip_if_np_lt_115]),
pytest.param(np.any, {'A': | pd.Series([1, 2], dtype='M8[ns]') | pandas.Series |
import os
import math
from pathlib import Path
from datetime import datetime
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import numpy as np
from matplotlib import gridspec
import pandas as pd
import seaborn as sns
from matplotlib.ticker import (MultipleLocator)
from pymongo import MongoClient
from main import Util
from main import StatisticalTests
np.warnings.filterwarnings('ignore')
sns.set_context("paper")
sns.set_style("whitegrid")
# sns.despine()
CUSTOM_SIZE_Y_AXES_VALUES = "large"
CUSTOM_SIZE_X_AXES_VALUES = 10
SHOW_FIGURE = False
df_RQ = ""
# columnsToLatex = ['NTSSTPE', 'NTSCE', 'NTSNIE', 'NTS', 'NTSSTPE/NTS', 'NTSCE/NTS', 'NTSNIE/NTS', 'NCTSSTPE',
# 'NCTSCE', 'NCTNIE', 'NCTS', 'NCTSSTPE/NCTS', 'NCTNIE/NCTS', 'NCTSCE/NCTS', 'NCTSCE/NTSCE', 'NCTSSTPE/NTSSTPE', 'NCTSSTPE/NTSSTPE_REAL']
columnsToLatex = ['NCTS', 'NTS', 'NCTS/NTS', 'tagCoverage']
#columnsToLatex = ['NCTSCE', 'NTSCE', 'NCTSCE/NTSCE', 'NCTSSTPE', 'NTSSTPE', 'NCTSSTPE/NTSSTPE']
RQ_NUMBER = "RQ3_RQ4"
OUTPUT_PATH = "RQS/" + RQ_NUMBER
OUTPUT_FIGURES_PATH = Util.create_new_dirs(OUTPUT_PATH + "/figures/")
fileName = "res_"+RQ_NUMBER+".txt"
f = open(Path(OUTPUT_PATH) / fileName, "w+")
############################## TABLE GENERATION ##############################
def generateDataSet():
global df_RQ
dataSet = []
for doc in Util.mongo_collection.find():
if doc['tagCoverageWithProblems'] is True or doc['tagCoverage'] is None or doc['tagCoverage'] == 0.0:
#if doc['tagCoverage'] is None or doc['tagCoverage'] == 0.0:
continue
project = doc["projectName"]
tagCreatedOn = datetime.strptime(doc["tagCreatedAt"], '%d/%m/%Y %H:%M:%S').year
tagCoverage = doc['tagCoverage']/100
projectCreatedOn = datetime.strptime(doc["projectDetails"]["createdAt"], '%d/%m/%Y %H:%M:%S').year
platform = doc['projectDetails']["platform"]
domain = Util.adjust_domain_name(doc['projectDetails']["domain"])
stars = doc['projectDetails']["stars"]
contributors = doc['projectDetails']["contributors"]
NTSSTPE = doc["statistics"]['totalNumberOfThrowStatementsStandardOrThirdPartyExceptions']
NTSCE = doc["statistics"]['totalNumberOfThrowStatementCustomExceptions']
NTSNIE = doc["statistics"]['totalNumberOfThrowStatementNotIdentifiedExceptions']
NTS = doc["statistics"]['totalNumberOfThrowStatements']
NTSSTPE_NTS = round(np.float64(NTSSTPE) / NTS, 4)
NTSCE_NTS = round(np.float64(NTSCE) / NTS, 4)
NTSNIE_NTS = round(np.float64(NTSNIE) / NTS, 4)
NEBTM = doc["statistics"]['totalNumberOfExceptionalBehaviorTestMethods']
NTM = doc["statistics"]['totalNumberOfTestMethods']
NEBTM_NTM = round(np.float64(NEBTM) / np.float64(NTM), 4)
#Tested and Used Exceptions
NDTE = doc["statistics"]['totalNumberOfDistinctTestedExceptions'] # distinct used and tested exceptions
NDUE = doc["statistics"]['totalNumberOfDistinctUsedExceptions'] # distinct used exceptions
NDTE_NDUE = round(np.float64(NDTE) / np.float64(NDUE), 4)
# Only covered exceptions
NCTSSTPE = doc["statistics"]['totalNumberOfCoveredThrowStatementsStandardOrThirdPartyExceptions']
NCTSCE = doc["statistics"]['totalNumberOfCoveredThrowStatementCustomExceptions']
NCTNIE = doc["statistics"]['totalNumberOfCoveredThrowStatementNotIdentifiedExceptions']
NCTS = doc["statistics"]['totalNumberOfCoveredThrowStatements']
NCTSSTPE_NCTS = round(np.float64(NCTSSTPE) / NCTS, 4)
NCTSCE_NCTS = round(np.float64(NCTSCE) / NCTS, 4)
NCTNIE_NCTS = round(np.float64(NCTNIE) / NCTS, 4)
NCTS_NTS = round(np.float64(NCTS) / NTS, 4)
NCTSCE_NTSCE = round(np.float64(NCTSCE) / NTSCE, 4)
if np.isnan(NCTSCE_NTSCE):
NCTSSTPE_NTSSTPE = float("NaN")
else:
NCTSSTPE_NTSSTPE = round(np.float64(NCTSSTPE) / NTSSTPE, 4)
NCTSSTPE_NTSSTPE_REAL = round(np.float64(NCTSSTPE) / NTSSTPE, 4)
dataSetRQ3 = [project, tagCreatedOn, tagCoverage, projectCreatedOn, platform, domain, stars,
contributors, NEBTM_NTM, NTSSTPE, NTSCE, NTSNIE, NTS, NTSSTPE_NTS, NTSCE_NTS, NTSNIE_NTS, NCTSSTPE,
NCTSCE, NCTNIE, NCTS, NCTSSTPE_NCTS, NCTSCE_NCTS, NCTNIE_NCTS, NCTS_NTS, NCTSCE_NTSCE, NCTSSTPE_NTSSTPE, NCTSSTPE_NTSSTPE_REAL, NDTE_NDUE]
dataSet.append(dataSetRQ3)
df_RQ = pd.DataFrame(dataSet,
columns=['project', 'tagCreatedOn', 'tagCoverage', 'projectCreatedOn', 'platform', 'domain', 'stars',
'contributors','NEBTM/NTM', 'NTSSTPE', 'NTSCE', 'NTSNIE', 'NTS', 'NTSSTPE/NTS', 'NTSCE/NTS', 'NTSNIE/NTS', 'NCTSSTPE',
'NCTSCE', 'NCTNIE', 'NCTS', 'NCTSSTPE/NCTS', 'NCTNIE/NCTS', 'NCTSCE/NCTS', 'NCTS/NTS', 'NCTSCE/NTSCE', 'NCTSSTPE/NTSSTPE', 'NCTSSTPE/NTSSTPE_REAL', 'NDTE/NDUE'])
Util.format_to_csv(df_RQ, "RQs/" + RQ_NUMBER + "/", "table" + RQ_NUMBER, RQ_NUMBER, columnsToLatex)
############################## RQ3 - PART 1 ##############################
def barPlotNumberOfProjects():
platformList = df_RQ["platform"].unique()
domainList = df_RQ["domain"].unique()
df_aux = []
totalDfSize = len(df_RQ)
f.write(f'Number of projects with coverage data: {totalDfSize}\n')
for platform in platformList:
df_platform = df_RQ[(df_RQ['platform'] == platform)]
totalPlatformDfSize = len(df_platform)
ratioAux = round(totalPlatformDfSize/totalDfSize, 4)
f.write(f'{platform}_Number of projects with coverage data: {totalPlatformDfSize} out of {totalDfSize}({ratioAux})\n')
for domain in domainList:
df_domain = df_platform[(df_platform["domain"] == domain)]
totalDomainDfSize = len(df_domain)
df_aux.append([platform, domain, totalDomainDfSize])
ratioAux = round(totalDomainDfSize / totalDfSize, 4)
f.write(f'{platform}_{domain}_Number of projects with coverage data: {totalDomainDfSize} out of {totalDfSize}({ratioAux})\n')
df_counter = pd.DataFrame(df_aux, columns=["platform", "domain", "count"])
# g = sns.catplot(x="domain", y="count", col="platform", data=df_counter, kind="bar",
# palette=Util.COLOR_DEGRADE, legend=False, legend_out=False, col_order=Util.FIGURE_ORDER)
g = sns.catplot(x="domain", y="count", col="platform", data=df_counter, kind="bar",
legend=False, legend_out=False, col_order=Util.FIGURE_ORDER, color="#1b69af")
g.fig.subplots_adjust(wspace=.05, hspace=.05)
g.fig.set_figheight(3)
g.fig.set_figwidth(6)
for ax in g.axes.flat:
# ax.get_yaxis().set_tick_params(labelsize='x-large', which="major")
# labels ao redor dos graficos
# ax.set_xlabel(ax.get_xlabel(), fontsize='20')
ax.set_xlabel("", fontsize=Util.SIZE_AXES_LABELS, rotation=30)
ax.set_ylabel(ax.get_ylabel(), fontsize=Util.SIZE_AXES_LABELS)
# títulos em cima dos graficos
if ax.get_title():
ax.set_title(ax.get_title().split('=')[1],
fontsize=Util.SIZE_AXES_TITLE)
# Valores em cima das barras
for p in ax.patches:
height = p.get_height()
ax.text(p.get_x() + p.get_width() / 2., height + 0.1, int(height), ha="center", fontsize=Util.SIZE_BAR_VALUES)
ax.yaxis.grid(True, linewidth=1, which="major")
ax.get_yaxis().set_tick_params(labelsize=Util.SIZE_AXES_TITLE, which="major")
ax.get_xaxis().set_tick_params(direction='out', labelsize=Util.SIZE_AXES_TITLE, rotation=20)
ax_aux = g.axes[0, 0]
ax_aux.set_xlabel('', fontsize=Util.SIZE_AXES_LABELS)
ax_aux.set_ylabel('Number of Projects', fontsize=Util.SIZE_AXES_LABELS)
# Show graphic
plt.tight_layout()
plt.savefig(OUTPUT_FIGURES_PATH / 'RQ3_numberOfProjectsWithCoverageData_bar.pdf', bbox_inches="tight")
plt.savefig(OUTPUT_FIGURES_PATH / 'RQ3_numberOfProjectsWithCoverageData_bar.png', bbox_inches="tight")
if (SHOW_FIGURE):
plt.show()
plt.clf()
def violinPlotTagCoverage():
fig = plt.subplots(figsize=(6, 2))
gs = gridspec.GridSpec(1, 2, width_ratios=[3, 1.5], wspace=.02, hspace=.05)
ax0 = plt.subplot(gs[0])
ax1 = plt.subplot(gs[1])
df_coverageAboveSixty = df_RQ[(df_RQ['tagCoverage'] >= 0.6)]
f.write(f'Number of projects with coverage greater or equal to 60%: {len(df_coverageAboveSixty)}\n')
df_withoutNaN = df_RQ.dropna(subset=['tagCoverage'])
sns.violinplot(x="domain", y="tagCoverage", data=df_RQ, cut=0, inner="box", hue="platform",
hue_order=Util.FIGURE_ORDER, scale="width",
palette=Util.COLOR_PALETTE, linewidth=1.5, ax=ax0, saturation=1)
sns.violinplot(x="platform", y="tagCoverage", data=df_RQ, cut=0, inner="box", scale="width",
order=Util.FIGURE_ORDER,
palette=Util.COLOR_PALETTE, linewidth=1.5, ax=ax1, saturation=1)
ax0.set(xlabel='', ylabel='', title='', yticks=[0, 0.2, 0.4, 0.6, 0.8, 1])
ax0.set_ylabel(ax0.get_ylabel(), fontsize=Util.SIZE_AXES_LABELS)
# ax0.set_ylim(0, 0.4)
# ax0.legend(title="", loc="upper center")
ax0.legend(title="", loc="upper center", bbox_to_anchor=(0.75, 1.20), ncol=3, frameon=False)
ax0.yaxis.grid(True, linewidth=1, which="major")
plt.setp(ax0.get_legend().get_texts(), fontsize=Util.SIZE_LEGEND_TEXT) # for legend text
plt.setp(ax0.get_legend().get_title(), fontsize=Util.SIZE_LEGEND_TITLE) # for legend title
ax0.get_yaxis().set_tick_params(labelsize=Util.SIZE_AXES_VALUES, which="major")
ax0.get_xaxis().set_tick_params(direction='out', labelsize=Util.SIZE_AXES_VALUES)
ax0.xaxis.set_ticks_position('bottom')
ax0.yaxis.set_major_formatter(ticker.PercentFormatter(xmax=1, decimals=0))
ax1.set(xlabel='', ylabel='', title='', xticks=[1], yticks=[0, 0.2, 0.4, 0.6, 0.8, 1])
# ax1.set_xticks(5)
ax1.set_xticklabels(['All'])
ax1.yaxis.grid(True, linewidth=1, which="major")
ax1.get_yaxis().set_tick_params(labelsize=Util.SIZE_AXES_VALUES, which="major")
ax1.get_xaxis().set_tick_params(direction='out', labelsize=Util.SIZE_AXES_VALUES)
ax1.xaxis.set_ticks_position('bottom')
ax1.yaxis.set_major_formatter(ticker.PercentFormatter(xmax=1, decimals=0))
ax1.set_yticklabels([])
# ax1.set_ylim(0, 0.4)
# Show graphic
plt.tight_layout()
plt.savefig(OUTPUT_FIGURES_PATH / 'RQ3_Description_TagCoverage.pdf',
bbox_inches="tight")
plt.savefig(OUTPUT_FIGURES_PATH / 'RQ3_Description_TagCoverage.png',
bbox_inches="tight")
if (SHOW_FIGURE):
plt.show()
plt.clf()
def violinPlotRatio_NCTS_NTS():
platformList = df_RQ["platform"].unique()
domainList = df_RQ["domain"].unique()
Util.count_number_of_projects_lte_to_ratio(df_RQ, "tagCoverage", 0.6, f)
Util.count_number_of_projects_lte_to_ratio(df_RQ, "NCTS/NTS", 0.6, f)
Util.count_number_of_projects_gte_to_ratio(df_RQ, "tagCoverage", 0.6, f)
Util.count_number_of_projects_gte_to_ratio(df_RQ, "NCTS/NTS", 0.6, f)
Util.count_number_of_projects_gte_to_ratio(df_RQ, "tagCoverage", 0.8, f)
Util.count_number_of_projects_gte_to_ratio(df_RQ, "NCTS/NTS", 0.8, f)
Util.count_number_of_projects_gte_to_ratio(df_RQ, "tagCoverage", 0.9, f)
Util.count_number_of_projects_gte_to_ratio(df_RQ, "NCTS/NTS", 0.9, f)
fig = plt.subplots(figsize=(6, 2))
gs = gridspec.GridSpec(1, 2, width_ratios=[3, 1.5], wspace=.02, hspace=.05)
ax0 = plt.subplot(gs[0])
ax1 = plt.subplot(gs[1])
df_withoutNaN = df_RQ.dropna(subset=['NCTS/NTS'])
sns.violinplot(x="domain", y="NCTS/NTS", data=df_RQ, cut=0, inner="box", hue="platform",
hue_order=Util.FIGURE_ORDER, scale="width",
palette=Util.COLOR_PALETTE, linewidth=1.5, ax=ax0, saturation=1)
sns.violinplot(x="platform", y="NCTS/NTS", data=df_RQ, cut=0, inner="box", scale="width",
order=Util.FIGURE_ORDER,
palette=Util.COLOR_PALETTE, linewidth=1.5, ax=ax1, saturation=1)
ax0.set(xlabel='', ylabel='', title='', yticks=[0, 0.2, 0.4, 0.6, 0.8, 1])
ax0.set_ylabel(ax0.get_ylabel(), fontsize=Util.SIZE_AXES_LABELS)
# ax0.set_ylim(0, 0.4)
# ax0.legend(title="", loc="upper center")
ax0.legend(title="", loc="upper center", bbox_to_anchor=(0.75, 1.20), ncol=3, frameon=False)
ax0.yaxis.grid(True, linewidth=1, which="major")
plt.setp(ax0.get_legend().get_texts(), fontsize=Util.SIZE_LEGEND_TEXT) # for legend text
plt.setp(ax0.get_legend().get_title(), fontsize=Util.SIZE_LEGEND_TITLE) # for legend title
ax0.get_yaxis().set_tick_params(labelsize=Util.SIZE_AXES_VALUES, which="major")
ax0.get_xaxis().set_tick_params(direction='out', labelsize=Util.SIZE_AXES_VALUES)
ax0.xaxis.set_ticks_position('bottom')
ax0.yaxis.set_major_formatter(ticker.PercentFormatter(xmax=1, decimals=0))
ax1.set(xlabel='', ylabel='', title='', xticks=[1], yticks=[0, 0.2, 0.4, 0.6, 0.8, 1])
# ax1.set_xticks(5)
ax1.set_xticklabels(['All'])
ax1.yaxis.grid(True, linewidth=1, which="major")
ax1.get_yaxis().set_tick_params(labelsize=Util.SIZE_AXES_VALUES, which="major")
ax1.get_xaxis().set_tick_params(direction='out', labelsize=Util.SIZE_AXES_VALUES)
ax1.xaxis.set_ticks_position('bottom')
ax1.yaxis.set_major_formatter(ticker.PercentFormatter(xmax=1, decimals=0))
ax1.set_yticklabels([])
# ax1.set_ylim(0, 0.4)
# Show graphic
plt.tight_layout()
plt.savefig(OUTPUT_FIGURES_PATH / 'RQ3_NCTS_NTS_platform.pdf',
bbox_inches="tight")
plt.savefig(OUTPUT_FIGURES_PATH / 'RQ3_NCTS_NTS_platform.png',
bbox_inches="tight")
if (SHOW_FIGURE):
plt.show()
plt.clf()
def violinPlotRatio_Overall_NCTS_NTS():
ax = sns.violinplot(y="NCTS/NTS", data=df_RQ, cut=0, inner="box",
linewidth=1.5, saturation=1)
ax.set(xlabel='', ylabel='', title='', yticks=[0, 0.2, 0.4, 0.6, 0.8, 1])
ax.set_ylabel(ax.get_ylabel(), fontsize=Util.SIZE_AXES_LABELS)
# ax0.set_ylim(0, 0.4)
# ax0.legend(title="", loc="upper center")
#ax.legend(title="", loc="upper center", bbox_to_anchor=(0.75, 1.20), ncol=3, frameon=False)
ax.yaxis.grid(True, linewidth=1, which="major")
plt.setp(ax.get_legend().get_texts(), fontsize=Util.SIZE_LEGEND_TEXT) # for legend text
plt.setp(ax.get_legend().get_title(), fontsize=Util.SIZE_LEGEND_TITLE) # for legend title
ax.get_yaxis().set_tick_params(labelsize=Util.SIZE_AXES_VALUES, which="major")
ax.get_xaxis().set_tick_params(direction='out', labelsize=Util.SIZE_AXES_VALUES)
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_major_formatter(ticker.PercentFormatter(xmax=1, decimals=0))
# Show graphic
plt.tight_layout()
plt.savefig(OUTPUT_FIGURES_PATH / 'RQ3_NCTS_NTS_overall.pdf',
bbox_inches="tight")
plt.savefig(OUTPUT_FIGURES_PATH / 'RQ3_NCTS_NTS_overall.png',
bbox_inches="tight")
if (SHOW_FIGURE):
plt.show()
plt.clf()
def pairPlotCoverages():
g = sns.lmplot(data=df_RQ,
x='NCTS/NTS', y='tagCoverage', hue="platform", fit_reg=False, palette=Util.COLOR_PALETTE_COLORFUL, legend=False, hue_order=Util.FIGURE_ORDER)
#plt.tight_layout()
ax_aux = g.axes[0, 0]
ax_aux.legend(title="", loc="upper center", bbox_to_anchor=(0.5, 1.1), ncol=3, frameon=False)
ax_aux.set_xlabel('NCTS/NTS', fontsize=Util.SIZE_AXES_LABELS)
ax_aux.set_ylabel('Line Coverage', fontsize=Util.SIZE_AXES_LABELS)
ax_aux.xaxis.set_major_formatter(ticker.PercentFormatter(xmax=1, decimals=0))
ax_aux.yaxis.set_major_formatter(ticker.PercentFormatter(xmax=1, decimals=0))
plt.setp(ax_aux.get_legend().get_texts(), fontsize=Util.SIZE_LEGEND_TEXT) # for legend text
plt.setp(ax_aux.get_legend().get_title(), fontsize=Util.SIZE_LEGEND_TITLE) # for legend title
plt.savefig(OUTPUT_FIGURES_PATH / 'RQ3_pairPlot.pdf',
bbox_inches="tight")
plt.savefig(OUTPUT_FIGURES_PATH / 'RQ3_pairPlot.png',
bbox_inches="tight")
if (SHOW_FIGURE):
plt.show()
plt.clf()
def pairPlotRatioOfMethodsxCoverage():
g = sns.lmplot(data=df_RQ,
x='NEBTM/NTM', y='NCTS/NTS', hue="platform", fit_reg=False, palette=Util.COLOR_PALETTE_COLORFUL, legend=False, markers=["o", "x", "+"], hue_order=Util.FIGURE_ORDER)
#col="platform"
#plt.tight_layout()
ax_aux = g.axes[0, 0]
ax_aux.legend(title="", loc="upper center", bbox_to_anchor=(0.5, 1.1), ncol=3, frameon=False)
ax_aux.set_xlabel('NEBTM/NTM', fontsize=Util.SIZE_AXES_LABELS)
ax_aux.set_ylabel('Throw Statement Line Coverage', fontsize=Util.SIZE_AXES_LABELS)
ax_aux.xaxis.set_major_formatter(ticker.PercentFormatter(xmax=1, decimals=0))
ax_aux.yaxis.set_major_formatter(ticker.PercentFormatter(xmax=1, decimals=0))
plt.setp(ax_aux.get_legend().get_texts(), fontsize=Util.SIZE_LEGEND_TEXT) # for legend text
plt.setp(ax_aux.get_legend().get_title(), fontsize=Util.SIZE_LEGEND_TITLE) # for legend title
plt.savefig(OUTPUT_FIGURES_PATH / 'RQ3_pairPlotTestMethodsCoverage.pdf',
bbox_inches="tight")
plt.savefig(OUTPUT_FIGURES_PATH / 'RQ3_pairPlotTestMethodsCoverage.png',
bbox_inches="tight")
if (SHOW_FIGURE):
plt.show()
plt.clf()
def pairPlotTestedExceptionsxCoverage():
g = sns.lmplot(data=df_RQ,
x='NDTE/NDUE', y='NCTS/NTS', hue="platform", fit_reg=False, palette=Util.COLOR_PALETTE_COLORFUL, legend=False, markers=["o", "x", "+"], hue_order=Util.FIGURE_ORDER)
#col="platform"
#plt.tight_layout()
ax_aux = g.axes[0, 0]
ax_aux.legend(title="", loc="upper center", bbox_to_anchor=(0.5, 1.1), ncol=3, frameon=False)
ax_aux.set_xlabel('NDTE/NDUE', fontsize=Util.SIZE_AXES_LABELS)
ax_aux.set_ylabel('Throw Statement Line Coverage', fontsize=Util.SIZE_AXES_LABELS)
ax_aux.xaxis.set_major_formatter(ticker.PercentFormatter(xmax=1, decimals=0))
ax_aux.yaxis.set_major_formatter(ticker.PercentFormatter(xmax=1, decimals=0))
plt.setp(ax_aux.get_legend().get_texts(), fontsize=Util.SIZE_LEGEND_TEXT) # for legend text
plt.setp(ax_aux.get_legend().get_title(), fontsize=Util.SIZE_LEGEND_TITLE) # for legend title
plt.savefig(OUTPUT_FIGURES_PATH / 'RQ3_pairPlotTestedExceptionsxCoverage.pdf',
bbox_inches="tight")
plt.savefig(OUTPUT_FIGURES_PATH / 'RQ3_pairPlotTestedExceptionsxCoverage.png',
bbox_inches="tight")
if (SHOW_FIGURE):
plt.show()
plt.clf()
def barPlotCoverages():
f, ax = plt.subplots(figsize=(5, 10))
df_sorted = df_RQ.sort_values("tagCoverage", ascending=False)
#sns.set_color_codes("muted")
sns.barplot(data=df_sorted,
x='tagCoverage', y='project', color='#494848', label='Line Coverage')
#sns.set_color_codes("pastel")
kwargs = {'alpha': 0.7}
sns.barplot(data=df_sorted,
x='NCTS/NTS', y='project', color='#D4D4D4', **kwargs, label='Throw Statement Line Coverage')
ax.legend(title="", loc="upper center", bbox_to_anchor=(0.5, 1.03), ncol=2, frameon=False)
#ax.set_xlabel('NCTS/NTS', fontsize=Util.SIZE_AXES_LABELS)
ax.set_xlabel('', fontsize=Util.SIZE_AXES_LABELS)
ax.set_ylabel('', fontsize=Util.SIZE_AXES_LABELS)
#ax.set_ylabel('Line Coverage', fontsize=Util.SIZE_AXES_LABELS)
ax.get_xaxis().set_tick_params(direction='out', labelsize=Util.SIZE_AXES_VALUES, gridOn=True)
ax.xaxis.set_ticks_position('bottom')
ax.xaxis.set_major_formatter(ticker.PercentFormatter(xmax=1, decimals=0))
ytickslabels = []
for p in ax.get_yticklabels():
flag = ''
platformAux = df_RQ.loc[df_RQ['project'] == p.get_text(), ['platform']].values[0][0]
if platformAux == 'Desktop/Server':
flag = '*'
elif platformAux == 'Mobile':
flag = '**'
elif platformAux == 'Multi-platform':
flag = '***'
else:
flag = '???'
ytickslabels.append(p.get_text() + flag)
ax.set_yticklabels(ytickslabels)
#ax.text(0.73, 45.3, '* Desktop/Server\n** Mobile\n***Multi-platform', style='italic', fontsize=Util.SIZE_AXES_VALUES,
ax.text(0.73, 45.45, '* Desktop/Server\n** Mobile\n***Multi-platform', style='italic', fontsize=Util.SIZE_AXES_VALUES,
bbox={'facecolor': 'gray', 'alpha': 0.2, 'pad': 10})
plt.setp(ax.get_legend().get_texts(), fontsize=Util.SIZE_LEGEND_TEXT) # for legend text
plt.setp(ax.get_legend().get_title(), fontsize=Util.SIZE_LEGEND_TITLE) # for legend title
#sns.despine(left=False, right=True)
plt.savefig(OUTPUT_FIGURES_PATH / 'RQ3_barPlotCoverageRatios.pdf',
bbox_inches="tight")
plt.savefig(OUTPUT_FIGURES_PATH / 'RQ3_barPlotCoverageRatios.png',
bbox_inches="tight")
if (SHOW_FIGURE):
plt.show()
plt.clf()
def barPlotCoveragesSideBySide():
f, ax = plt.subplots(figsize=(5, 10))
df_sorted = df_RQ.sort_values('tagCoverage', ascending=False)
#df_aux = df_sorted[['project', 'tagCoverage', 'NCTS/NTS']].set_index('project')
df_sorted.rename(columns={'tagCoverage': 'Line Coverage', 'NCTS/NTS': 'Throw Statement Line Coverage'}, inplace=True)
df_aux = df_sorted.melt(id_vars='project', value_vars=['Line Coverage', 'Throw Statement Line Coverage'])
sns.barplot(data=df_aux,
x='value', y='project', hue='variable', palette=['#494848', '#B4B4B4'])
# #sns.set_color_codes("muted")
# sns.barplot(data=df_sorted,
# x='tagCoverage', y='project', color='#494848', label='Line Coverage')
#
# #sns.set_color_codes("pastel")
# kwargs = {'alpha': 0.7}
# sns.barplot(data=df_sorted,
# x='NCTS/NTS', y='project', color='#D4D4D4', **kwargs, label='Throw Statements Line Coverage')
ax.legend(title="", loc="upper center", bbox_to_anchor=(0.5, 1.03), ncol=2, frameon=False)
ax.set_xlabel('', fontsize=Util.SIZE_AXES_LABELS)
ax.set_ylabel('', fontsize=Util.SIZE_AXES_LABELS)
#ax.set_ylabel('Line Coverage', fontsize=Util.SIZE_AXES_LABELS)
ax.get_xaxis().set_tick_params(direction='out', labelsize=Util.SIZE_AXES_VALUES, gridOn=True)
ax.xaxis.set_ticks_position('bottom')
ax.xaxis.set_major_formatter(ticker.PercentFormatter(xmax=1, decimals=0))
ytickslabels = []
for p in ax.get_yticklabels():
flag = ''
platformAux = df_RQ.loc[df_RQ['project'] == p.get_text(), ['platform']].values[0][0]
if platformAux == 'Desktop/Server':
flag = '*'
elif platformAux == 'Mobile':
flag = '**'
elif platformAux == 'Multi-platform':
flag = '***'
else:
flag = '???'
ytickslabels.append(p.get_text() + flag)
ax.set_yticklabels(ytickslabels)
#ax.text(0.73, 45.3, '* Desktop/Server\n** Mobile\n***Multi-platform', style='italic', fontsize=Util.SIZE_AXES_VALUES,
ax.text(0.73, 37.65, '* Desktop/Server\n** Mobile\n***Multi-platform', style='italic', fontsize=Util.SIZE_AXES_VALUES,
bbox={'facecolor': 'gray', 'alpha': 0.2, 'pad': 10})
ax.get_legend
plt.setp(ax.get_legend().get_texts(), fontsize=Util.SIZE_LEGEND_TEXT) # for legend text
plt.setp(ax.get_legend().get_title(), fontsize=Util.SIZE_LEGEND_TITLE) # for legend title
#sns.despine(left=False, right=True)
plt.savefig(OUTPUT_FIGURES_PATH / 'RQ3_barPlotCoverageRatios_sideBySide.pdf',
bbox_inches="tight")
plt.savefig(OUTPUT_FIGURES_PATH / 'RQ3_barPlotCoverageRatios_sideBySide.png',
bbox_inches="tight")
if (SHOW_FIGURE):
plt.show()
plt.clf()
def barPlotCoverages():
f, ax = plt.subplots(figsize=(5, 10))
df_sorted = df_RQ.sort_values("tagCoverage", ascending=False)
#sns.set_color_codes("muted")
sns.barplot(data=df_sorted,
x='tagCoverage', y='project', color='#494848', label='Line Coverage')
#sns.set_color_codes("pastel")
kwargs = {'alpha': 0.7}
sns.barplot(data=df_sorted,
x='NCTS/NTS', y='project', color='#D4D4D4', **kwargs, label='Throw Statement Line Coverage')
ax.legend(title="", loc="upper center", bbox_to_anchor=(0.5, 1.03), ncol=2, frameon=False)
#ax.set_xlabel('NCTS/NTS', fontsize=Util.SIZE_AXES_LABELS)
ax.set_xlabel('', fontsize=Util.SIZE_AXES_LABELS)
ax.set_ylabel('', fontsize=Util.SIZE_AXES_LABELS)
#ax.set_ylabel('Line Coverage', fontsize=Util.SIZE_AXES_LABELS)
ax.get_xaxis().set_tick_params(direction='out', labelsize=Util.SIZE_AXES_VALUES, gridOn=True)
ax.xaxis.set_ticks_position('bottom')
ax.xaxis.set_major_formatter(ticker.PercentFormatter(xmax=1, decimals=0))
ytickslabels = []
for p in ax.get_yticklabels():
flag = ''
platformAux = df_RQ.loc[df_RQ['project'] == p.get_text(), ['platform']].values[0][0]
if platformAux == 'Desktop/Server':
flag = '*'
elif platformAux == 'Mobile':
flag = '**'
elif platformAux == 'Multi-platform':
flag = '***'
else:
flag = '???'
ytickslabels.append(p.get_text() + flag)
ax.set_yticklabels(ytickslabels)
#ax.text(0.73, 45.3, '* Desktop/Server\n** Mobile\n***Multi-platform', style='italic', fontsize=Util.SIZE_AXES_VALUES,
ax.text(0.73, 45.45, '* Desktop/Server\n** Mobile\n***Multi-platform', style='italic', fontsize=Util.SIZE_AXES_VALUES,
bbox={'facecolor': 'gray', 'alpha': 0.2, 'pad': 10})
plt.setp(ax.get_legend().get_texts(), fontsize=Util.SIZE_LEGEND_TEXT) # for legend text
plt.setp(ax.get_legend().get_title(), fontsize=Util.SIZE_LEGEND_TITLE) # for legend title
#sns.despine(left=False, right=True)
plt.savefig(OUTPUT_FIGURES_PATH / 'RQ3_barPlotCoverageRatios.pdf',
bbox_inches="tight")
plt.savefig(OUTPUT_FIGURES_PATH / 'RQ3_barPlotCoverageRatios.png',
bbox_inches="tight")
if (SHOW_FIGURE):
plt.show()
plt.clf()
def coveredThrowStatementsRatio_histogram():
bins = np.arange(0, 1, 0.2)
g = sns.FacetGrid(df_RQ, sharex=False)
g = (g.map(sns.distplot, "NCTS/NTS", kde=False, rug=True, bins=bins, color="#494848")).set(xticks=[0.0, 0.2, 0.4, 0.6, 0.8, 1])
for ax in g.axes.flat:
# ax.get_yaxis().set_tick_params(labelsize='x-large', which="major")
# labels ao redor dos graficos
# ax.set_xlabel(ax.get_xlabel(), fontsize='20')
ax.set_xlabel('Throw Statement Line Coverage', fontsize=Util.SIZE_AXES_LABELS)
ax.set_ylabel(ax.get_ylabel(), fontsize=Util.SIZE_AXES_LABELS)
# títulos em cima dos graficos
if ax.get_title():
ax.set_title(ax.get_title().split('=')[1],
fontsize=Util.SIZE_AXES_TITLE)
ax.yaxis.grid(True, linewidth=1, which="major")
ax.get_yaxis().set_tick_params(labelsize=Util.SIZE_AXES_TITLE, which="major")
ax.get_xaxis().set_tick_params(direction='out', labelsize=Util.SIZE_AXES_TITLE)
ax.xaxis.set_major_formatter(ticker.PercentFormatter(xmax=1, decimals=0))
plt.tight_layout()
plt.savefig(OUTPUT_FIGURES_PATH / 'RQ3_coveredThrowStatementsRatio_histogram.png',
bbox_inches="tight")
plt.savefig(OUTPUT_FIGURES_PATH / 'RQ3_coveredThrowStatementsRatio_histogram.pdf', bbox_inches="tight")
if (SHOW_FIGURE):
plt.show()
plt.clf()
############ RQ4 ####################
def violinPlotRatio_NTSCE_NTS():
f.write("\n\n ############## RQ4 - PART 1 ##############\n")
fig = plt.subplots(figsize=(6, 2))
gs = gridspec.GridSpec(1, 2, width_ratios=[3, 1.5], wspace=.02, hspace=.05)
ax0 = plt.subplot(gs[0])
ax1 = plt.subplot(gs[1])
sns.violinplot(x="domain", y="NTSCE/NTS", data=df_RQ, cut=0, inner="box", hue="platform",
hue_order=Util.FIGURE_ORDER, scale="width",
palette=Util.COLOR_PALETTE, linewidth=1.5, ax=ax0, saturation=1)
sns.violinplot(x="platform", y="NTSCE/NTS", data=df_RQ, cut=0, inner="box", scale="width",
order=Util.FIGURE_ORDER,
palette=Util.COLOR_PALETTE, linewidth=1.5, ax=ax1, saturation=1)
ax0.set(xlabel='', ylabel='', title='', yticks=[0, 0.2, 0.4, 0.6, 0.8, 1])
ax0.set_ylabel(ax0.get_ylabel(), fontsize=Util.SIZE_AXES_LABELS)
ax0.legend(title="", loc="upper center", bbox_to_anchor=(0.75, 1.20), ncol=3, frameon=False)
ax0.yaxis.grid(True, linewidth=1, which="major")
plt.setp(ax0.get_legend().get_texts(), fontsize=Util.SIZE_LEGEND_TEXT) # for legend text
plt.setp(ax0.get_legend().get_title(), fontsize=Util.SIZE_LEGEND_TITLE) # for legend title
ax0.get_yaxis().set_tick_params(labelsize=Util.SIZE_AXES_VALUES, which="major")
ax0.get_xaxis().set_tick_params(direction='out', labelsize=Util.SIZE_AXES_VALUES)
ax0.xaxis.set_ticks_position('bottom')
ax0.yaxis.set_major_formatter(ticker.PercentFormatter(xmax=1, decimals=0))
ax1.set(xlabel='', ylabel='', title='', xticks=[1], yticks=[0, 0.2, 0.4, 0.6, 0.8, 1])
ax1.set_xticklabels(['All'])
ax1.yaxis.grid(True, linewidth=1, which="major")
ax1.get_yaxis().set_tick_params(labelsize=Util.SIZE_AXES_VALUES, which="major")
ax1.get_xaxis().set_tick_params(direction='out', labelsize=Util.SIZE_AXES_VALUES)
ax1.xaxis.set_ticks_position('bottom')
ax1.yaxis.set_major_formatter(ticker.PercentFormatter(xmax=1, decimals=0))
ax1.set_yticklabels([])
# Show graphic
plt.tight_layout()
plt.savefig(OUTPUT_FIGURES_PATH / 'RQ4_NTSCE_NTS_platform.pdf',
bbox_inches="tight")
plt.savefig(OUTPUT_FIGURES_PATH / 'RQ4_NTSCE_NTS_platform.png',
bbox_inches="tight")
if (SHOW_FIGURE):
plt.show()
plt.clf()
def violinPlotRatio_NTSSTPE_NTS():
platformList = df_RQ["platform"].unique()
domainList = df_RQ["domain"].unique()
# df_aux = []
# for platform in platformList:
# ratio = 0.2
# df_platform_total = df_RQ[(df_RQ['platform'] == platform)]
# df_platform_belowTo20 = df_RQ[
# (df_RQ['platform'] == platform) & (df_RQ['NEBTM/NTM'] <= ratio)]
# f.write("NEBTM_NTM <= 20% | " + platform + ":" + str(len(df_platform_belowTo20)) + " out of " + str(
# len(df_platform_total)) + str("({:.2%})").format(len(df_platform_belowTo20) / len(df_platform_total))\n)
# for domain in domainList:
# df_domain_total = df_RQ[(df_RQ["domain"] == domain) & (df_RQ['platform'] == platform)]
# df_belowTo20 = df_RQ[
# (df_RQ["domain"] == domain) & (df_RQ['platform'] == platform) & (df_RQ['NEBTM/NTM'] <= ratio)]
# f.write("NEBTM_NTM <= 20% | " + platform + "/" + domain + ":" + str(len(df_belowTo20)) + " out of " + str(
# len(df_domain_total)) + str("({:.2%})").format(len(df_belowTo20) / len(df_domain_total))\n)
fig = plt.subplots(figsize=(6, 2))
gs = gridspec.GridSpec(1, 2, width_ratios=[3, 1.5], wspace=.02, hspace=.05)
ax0 = plt.subplot(gs[0])
ax1 = plt.subplot(gs[1])
df_withoutNaN = df_RQ.dropna(subset=['NTSCE/NTS'])
sns.violinplot(x="domain", y="NTSSTPE/NTS", data=df_RQ, cut=0, inner="box", hue="platform",
hue_order=Util.FIGURE_ORDER, scale="width",
palette=Util.COLOR_PALETTE, linewidth=1.5, ax=ax0, saturation=1)
sns.violinplot(x="platform", y="NTSSTPE/NTS", data=df_RQ, cut=0, inner="box", scale="width",
order=Util.FIGURE_ORDER,
palette=Util.COLOR_PALETTE, linewidth=1.5, ax=ax1, saturation=1)
ax0.set(xlabel='', ylabel='', title='', yticks=[0, 0.2, 0.4, 0.6, 0.8, 1])
ax0.set_ylabel(ax0.get_ylabel(), fontsize=Util.SIZE_AXES_LABELS)
# ax0.set_ylim(0, 0.4)
# ax0.legend(title="", loc="upper center")
ax0.legend(title="", loc="upper center", bbox_to_anchor=(0.75, 1.20), ncol=3, frameon=False)
ax0.yaxis.grid(True, linewidth=1, which="major")
plt.setp(ax0.get_legend().get_texts(), fontsize=Util.SIZE_LEGEND_TEXT) # for legend text
plt.setp(ax0.get_legend().get_title(), fontsize=Util.SIZE_LEGEND_TITLE) # for legend title
ax0.get_yaxis().set_tick_params(labelsize=Util.SIZE_AXES_VALUES, which="major")
ax0.get_xaxis().set_tick_params(direction='out', labelsize=Util.SIZE_AXES_VALUES)
ax0.xaxis.set_ticks_position('bottom')
ax0.yaxis.set_major_formatter(ticker.PercentFormatter(xmax=1, decimals=0))
ax1.set(xlabel='', ylabel='', title='', xticks=[1], yticks=[0, 0.2, 0.4, 0.6, 0.8, 1])
# ax1.set_xticks(5)
ax1.set_xticklabels(['All'])
ax1.yaxis.grid(True, linewidth=1, which="major")
ax1.get_yaxis().set_tick_params(labelsize=Util.SIZE_AXES_VALUES, which="major")
ax1.get_xaxis().set_tick_params(direction='out', labelsize=Util.SIZE_AXES_VALUES)
ax1.xaxis.set_ticks_position('bottom')
ax1.yaxis.set_major_formatter(ticker.PercentFormatter(xmax=1, decimals=0))
ax1.set_yticklabels([])
# ax1.set_ylim(0, 0.4)
# Show graphic
plt.tight_layout()
plt.savefig(OUTPUT_FIGURES_PATH / 'RQ4_NTSSTPE_NTS_platform.pdf',
bbox_inches="tight")
plt.savefig(OUTPUT_FIGURES_PATH / 'RQ4_NTSSTPE_NTS_platform.png',
bbox_inches="tight")
if (SHOW_FIGURE):
plt.show()
plt.clf()
def countProjectsWhere_NTSCE_RATIO_isBigger(df_aux):
df_clean = df_aux[(df_aux['NTSCE/NTS'] > df_aux['NTSSTPE/NTS'])]
f.write(f'Number of projects where NTSCE/NTS > NTSSTPE/NTS = {len(df_clean)} : {df_clean["project"].values.tolist()}\n')
def violinPlotRatio_NTSCE_NTS_NTSSTPE_NTS_MELTED():
fig, ax = plt.subplots(figsize=(6, 2))
countProjectsWhere_NTSCE_RATIO_isBigger(df_RQ.dropna(subset=['project', 'NTSCE/NTS', 'NTSSTPE/NTS']))
df_withoutNaN = df_RQ.dropna(subset=['NTSCE/NTS', 'NTSSTPE/NTS'])
df_melted = | pd.melt(df_withoutNaN, id_vars=["project", "platform"], value_vars=['NTSCE/NTS', 'NTSSTPE/NTS']) | pandas.melt |
import numpy as np
import pandas as pd
import re
class TweetAnalyzer():
"""
Analyzing and categorizing content from tweets.
"""
def clean_tweet(self, tweet):
return ' '.join(re.sub("(@[A-Za-z0-9]+)|([^0-9A-Za-z \t])|(\w+:\/\/\S+)", " ", tweet).split())
def tweets_to_data_frame1(self, tweets):
df = pd.DataFrame(data=[tweet.text for tweet in tweets], columns=['tweets'])
df['id'] = np.array([tweet.id for tweet in tweets])
df['len'] = np.array([len(tweet.text) for tweet in tweets])
df['date'] = np.array([tweet.created_at for tweet in tweets])
df['source'] = np.array([tweet.source for tweet in tweets])
df['likes'] = np.array([tweet.favorite_count for tweet in tweets])
df['retweets'] = np.array([tweet.retweet_count for tweet in tweets])
#df['tweets'] = np.array([api.u for tweet in tweets])
return df
def tweets_to_data_frame2(self, tweets):
df = | pd.DataFrame(data=[tweet.text for tweet in tweets], columns=['tweets']) | pandas.DataFrame |
from collections import OrderedDict
from datetime import timedelta
import numpy as np
import pytest
from pandas.core.dtypes.dtypes import CategoricalDtype, DatetimeTZDtype
import pandas as pd
from pandas import (
Categorical,
DataFrame,
Series,
Timedelta,
Timestamp,
_np_version_under1p14,
concat,
date_range,
option_context,
)
from pandas.core.arrays import integer_array
import pandas.util.testing as tm
def _check_cast(df, v):
"""
Check if all dtypes of df are equal to v
"""
assert all(s.dtype.name == v for _, s in df.items())
class TestDataFrameDataTypes:
def test_concat_empty_dataframe_dtypes(self):
df = DataFrame(columns=list("abc"))
df["a"] = df["a"].astype(np.bool_)
df["b"] = df["b"].astype(np.int32)
df["c"] = df["c"].astype(np.float64)
result = pd.concat([df, df])
assert result["a"].dtype == np.bool_
assert result["b"].dtype == np.int32
assert result["c"].dtype == np.float64
result = pd.concat([df, df.astype(np.float64)])
assert result["a"].dtype == np.object_
assert result["b"].dtype == np.float64
assert result["c"].dtype == np.float64
def test_empty_frame_dtypes_ftypes(self):
empty_df = pd.DataFrame()
tm.assert_series_equal(empty_df.dtypes, pd.Series(dtype=np.object))
# GH 26705 - Assert .ftypes is deprecated
with tm.assert_produces_warning(FutureWarning):
tm.assert_series_equal(empty_df.ftypes, pd.Series(dtype=np.object))
nocols_df = pd.DataFrame(index=[1, 2, 3])
tm.assert_series_equal(nocols_df.dtypes, pd.Series(dtype=np.object))
# GH 26705 - Assert .ftypes is deprecated
with tm.assert_produces_warning(FutureWarning):
tm.assert_series_equal(nocols_df.ftypes, pd.Series(dtype=np.object))
norows_df = pd.DataFrame(columns=list("abc"))
tm.assert_series_equal(
norows_df.dtypes, pd.Series(np.object, index=list("abc"))
)
# GH 26705 - Assert .ftypes is deprecated
with tm.assert_produces_warning(FutureWarning):
tm.assert_series_equal(
norows_df.ftypes, pd.Series("object:dense", index=list("abc"))
)
norows_int_df = pd.DataFrame(columns=list("abc")).astype(np.int32)
tm.assert_series_equal(
norows_int_df.dtypes, pd.Series(np.dtype("int32"), index=list("abc"))
)
# GH 26705 - Assert .ftypes is deprecated
with tm.assert_produces_warning(FutureWarning):
tm.assert_series_equal(
norows_int_df.ftypes, pd.Series("int32:dense", index=list("abc"))
)
odict = OrderedDict
df = pd.DataFrame(odict([("a", 1), ("b", True), ("c", 1.0)]), index=[1, 2, 3])
ex_dtypes = pd.Series(
odict([("a", np.int64), ("b", np.bool), ("c", np.float64)])
)
ex_ftypes = pd.Series(
odict([("a", "int64:dense"), ("b", "bool:dense"), ("c", "float64:dense")])
)
tm.assert_series_equal(df.dtypes, ex_dtypes)
# GH 26705 - Assert .ftypes is deprecated
with tm.assert_produces_warning(FutureWarning):
tm.assert_series_equal(df.ftypes, ex_ftypes)
# same but for empty slice of df
tm.assert_series_equal(df[:0].dtypes, ex_dtypes)
# GH 26705 - Assert .ftypes is deprecated
with tm.assert_produces_warning(FutureWarning):
tm.assert_series_equal(df[:0].ftypes, ex_ftypes)
def test_datetime_with_tz_dtypes(self):
tzframe = DataFrame(
{
"A": date_range("20130101", periods=3),
"B": date_range("20130101", periods=3, tz="US/Eastern"),
"C": date_range("20130101", periods=3, tz="CET"),
}
)
tzframe.iloc[1, 1] = pd.NaT
tzframe.iloc[1, 2] = pd.NaT
result = tzframe.dtypes.sort_index()
expected = Series(
[
np.dtype("datetime64[ns]"),
DatetimeTZDtype("ns", "US/Eastern"),
DatetimeTZDtype("ns", "CET"),
],
["A", "B", "C"],
)
tm.assert_series_equal(result, expected)
def test_dtypes_are_correct_after_column_slice(self):
# GH6525
df = pd.DataFrame(index=range(5), columns=list("abc"), dtype=np.float_)
odict = OrderedDict
tm.assert_series_equal(
df.dtypes,
pd.Series(odict([("a", np.float_), ("b", np.float_), ("c", np.float_)])),
)
tm.assert_series_equal(
df.iloc[:, 2:].dtypes, pd.Series(odict([("c", np.float_)]))
)
tm.assert_series_equal(
df.dtypes,
pd.Series(odict([("a", np.float_), ("b", np.float_), ("c", np.float_)])),
)
def test_select_dtypes_include_using_list_like(self):
df = DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.Categorical(list("abc")),
"g": pd.date_range("20130101", periods=3),
"h": pd.date_range("20130101", periods=3, tz="US/Eastern"),
"i": pd.date_range("20130101", periods=3, tz="CET"),
"j": pd.period_range("2013-01", periods=3, freq="M"),
"k": pd.timedelta_range("1 day", periods=3),
}
)
ri = df.select_dtypes(include=[np.number])
ei = df[["b", "c", "d", "k"]]
tm.assert_frame_equal(ri, ei)
ri = df.select_dtypes(include=[np.number], exclude=["timedelta"])
ei = df[["b", "c", "d"]]
tm.assert_frame_equal(ri, ei)
ri = df.select_dtypes(include=[np.number, "category"], exclude=["timedelta"])
ei = df[["b", "c", "d", "f"]]
tm.assert_frame_equal(ri, ei)
ri = df.select_dtypes(include=["datetime"])
ei = df[["g"]]
tm.assert_frame_equal(ri, ei)
ri = df.select_dtypes(include=["datetime64"])
ei = df[["g"]]
tm.assert_frame_equal(ri, ei)
ri = df.select_dtypes(include=["datetimetz"])
ei = df[["h", "i"]]
tm.assert_frame_equal(ri, ei)
with pytest.raises(NotImplementedError, match=r"^$"):
df.select_dtypes(include=["period"])
def test_select_dtypes_exclude_using_list_like(self):
df = DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
}
)
re = df.select_dtypes(exclude=[np.number])
ee = df[["a", "e"]]
tm.assert_frame_equal(re, ee)
def test_select_dtypes_exclude_include_using_list_like(self):
df = DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.date_range("now", periods=3).values,
}
)
exclude = (np.datetime64,)
include = np.bool_, "integer"
r = df.select_dtypes(include=include, exclude=exclude)
e = df[["b", "c", "e"]]
tm.assert_frame_equal(r, e)
exclude = ("datetime",)
include = "bool", "int64", "int32"
r = df.select_dtypes(include=include, exclude=exclude)
e = df[["b", "e"]]
tm.assert_frame_equal(r, e)
def test_select_dtypes_include_using_scalars(self):
df = DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.Categorical(list("abc")),
"g": | pd.date_range("20130101", periods=3) | pandas.date_range |
from openff.toolkit.typing.engines.smirnoff import ForceField
from openff.toolkit.topology import Molecule, Topology
from biopandas.pdb import PandasPdb
import matplotlib.pyplot as plt
from operator import itemgetter
from mendeleev import element
from simtk.openmm import app
from scipy import optimize
import subprocess as sp
from sys import stdout
import pandas as pd
import numpy as np
import statistics
import itertools
import parmed
import pickle
import shutil
import simtk
import scipy
import time
import math
import sys
import ast
import re
import os
BOHRS_PER_ANGSTROM = 0.529
HARTREE_PER_KCAL_MOL = 627.509391
#kcal/mol * A^2 to kJ/mol * nm^2
KCAL_MOL_PER_KJ_MOL = 4.184
ANGSTROMS_PER_NM = 10.0
RADIANS_PER_DEGREE = np.pi / 180.0
method_basis_scale_dict = {
"HF STO-3G": 0.817,
"HF 3-21G": 0.906,
"HF 3-21G*": 0.903,
"HF 6-31G": 0.903,
"HF 6-31G*": 0.899,
"HF 6-31G**": 0.903,
"HF 6-31+G**": 0.904,
"HF 6-311G*": 0.904,
"HF 6-311G**": 0.909,
"HF TZVP": 0.909,
"HF cc-pVDZ": 0.908,
"HF cc-pVTZ": 0.91,
"HF cc-pVQZ": 0.908,
"HF aug-cc-pVDZ": 0.911,
"HF aug-cc-pVTZ": 0.91,
"HF aug-cc-pVQZ": 0.909,
"HF daug-cc-pVDZ": 0.912,
"HF daug-cc-pVTZ": 0.905,
"ROHF 3-21G": 0.907,
"ROHF 3-21G*": 0.909,
"ROHF 6-31G": 0.895,
"ROHF 6-31G*": 0.89,
"ROHF 6-31G**": 0.855,
"ROHF 6-31+G**": 0.856,
"ROHF 6-311G*": 0.856,
"ROHF 6-311G**": 0.913,
"ROHF cc-pVDZ": 0.861,
"ROHF cc-pVTZ": 0.901,
"LSDA STO-3G": 0.896,
"LSDA 3-21G": 0.984,
"LSDA 3-21G*": 0.982,
"LSDA 6-31G": 0.98,
"LSDA 6-31G*": 0.981,
"LSDA 6-31G**": 0.981,
"LSDA 6-31+G**": 0.985,
"LSDA 6-311G*": 0.984,
"LSDA 6-311G**": 0.988,
"LSDA TZVP": 0.988,
"LSDA cc-pVDZ": 0.989,
"LSDA cc-pVTZ": 0.989,
"LSDA aug-cc-pVDZ": 0.989,
"LSDA aug-cc-pVTZ": 0.991,
"BLYP STO-3G": 0.925,
"BLYP 3-21G": 0.995,
"BLYP 3-21G*": 0.994,
"BLYP 6-31G": 0.992,
"BLYP 6-31G*": 0.992,
"BLYP 6-31G**": 0.992,
"BLYP 6-31+G**": 0.995,
"BLYP 6-311G*": 0.998,
"BLYP 6-311G**": 0.996,
"BLYP TZVP": 0.998,
"BLYP cc-pVDZ": 1.002,
"BLYP cc-pVTZ": 0.997,
"BLYP aug-cc-pVDZ": 0.998,
"BLYP aug-cc-pVTZ": 0.997,
"B1B95 STO-3G": 0.883,
"B1B95 3-21G": 0.957,
"B1B95 3-21G*": 0.955,
"B1B95 6-31G": 0.954,
"B1B95 6-31G*": 0.949,
"B1B95 6-31G**": 0.955,
"B1B95 6-31+G**": 0.957,
"B1B95 6-311G*": 0.959,
"B1B95 6-311G**": 0.96,
"B1B95 TZVP": 0.957,
"B1B95 cc-pVDZ": 0.961,
"B1B95 cc-pVTZ": 0.957,
"B1B95 aug-cc-pVDZ": 0.958,
"B1B95 aug-cc-pVTZ": 0.959,
"B3LYP STO-3G": 0.892,
"B3LYP 3-21G": 0.965,
"B3LYP 3-21G*": 0.962,
"B3LYP 6-31G": 0.962,
"B3LYP 6-31G*": 0.96,
"B3LYP 6-31G**": 0.961,
"B3LYP 6-31+G**": 0.964,
"B3LYP 6-311G*": 0.966,
"B3LYP 6-311G**": 0.967,
"B3LYP TZVP": 0.965,
"B3LYP cc-pVDZ": 0.97,
"B3LYP cc-pVTZ": 0.967,
"B3LYP cc-pVQZ": 0.969,
"B3LYP aug-cc-pVDZ": 0.97,
"B3LYP aug-cc-pVTZ": 0.968,
"B3LYP aug-cc-pVQZ": 0.969,
"B3PW91 STO-3G": 0.885,
"B3PW91 3-21G": 0.961,
"B3PW91 3-21G*": 0.959,
"B3PW91 6-31G": 0.958,
"B3PW91 6-31G*": 0.957,
"B3PW91 6-31G**": 0.958,
"B3PW91 6-31+G**": 0.96,
"B3PW91 6-311G*": 0.963,
"B3PW91 6-311G**": 0.963,
"B3PW91 TZVP": 0.964,
"B3PW91 cc-pVDZ": 0.965,
"B3PW91 cc-pVTZ": 0.962,
"B3PW91 aug-cc-pVDZ": 0.965,
"B3PW91 aug-cc-pVTZ": 0.965,
"mPW1PW91 STO-3G": 0.879,
"mPW1PW91 3-21G": 0.955,
"mPW1PW91 3-21G*": 0.95,
"mPW1PW91 6-31G": 0.947,
"mPW1PW91 6-31G*": 0.948,
"mPW1PW91 6-31G**": 0.952,
"mPW1PW91 6-31+G**": 0.952,
"mPW1PW91 6-311G*": 0.954,
"mPW1PW91 6-311G**": 0.957,
"mPW1PW91 TZVP": 0.954,
"mPW1PW91 cc-pVDZ": 0.958,
"mPW1PW91 cc-pVTZ": 0.959,
"mPW1PW91 aug-cc-pVDZ": 0.958,
"mPW1PW91 aug-cc-pVTZ": 0.958,
"PBEPBE STO-3G": 0.914,
"PBEPBE 3-21G": 0.991,
"PBEPBE 3-21G*": 0.954,
"PBEPBE 6-31G": 0.986,
"PBEPBE 6-31G*": 0.986,
"PBEPBE 6-31G**": 0.986,
"PBEPBE 6-31+G**": 0.989,
"PBEPBE 6-311G*": 0.99,
"PBEPBE 6-311G**": 0.991,
"PBEPBE TZVP": 0.989,
"PBEPBE cc-pVDZ": 0.994,
"PBEPBE cc-pVTZ": 0.993,
"PBEPBE aug-cc-pVDZ": 0.994,
"PBEPBE aug-cc-pVTZ": 0.994,
"PBE1PBE STO-3G": 0.882,
"PBE1PBE 3-21G": 0.96,
"PBE1PBE 3-21G*": 0.96,
"PBE1PBE 6-31G": 0.956,
"PBE1PBE 6-31G*": 0.95,
"PBE1PBE 6-31G**": 0.953,
"PBE1PBE 6-31+G**": 0.955,
"PBE1PBE 6-311G*": 0.959,
"PBE1PBE 6-311G**": 0.959,
"PBE1PBE TZVP": 0.96,
"PBE1PBE cc-pVDZ": 0.962,
"PBE1PBE cc-pVTZ": 0.961,
"PBE1PBE aug-cc-pVDZ": 0.962,
"PBE1PBE aug-cc-pVTZ": 0.962,
"HSEh1PBE STO-3G": 0.883,
"HSEh1PBE 3-21G": 0.963,
"HSEh1PBE 3-21G*": 0.96,
"HSEh1PBE 6-31G": 0.957,
"HSEh1PBE 6-31G*": 0.951,
"HSEh1PBE 6-31G**": 0.954,
"HSEh1PBE 6-31+G**": 0.955,
"HSEh1PBE 6-311G*": 0.96,
"HSEh1PBE 6-311G**": 0.96,
"HSEh1PBE TZVP": 0.96,
"HSEh1PBE cc-pVDZ": 0.962,
"HSEh1PBE cc-pVTZ": 0.961,
"HSEh1PBE aug-cc-pVDZ": 0.962,
"HSEh1PBE aug-cc-pVTZ": 0.962,
"TPSSh 3-21G": 0.969,
"TPSSh 3-21G*": 0.966,
"TPSSh 6-31G": 0.962,
"TPSSh 6-31G*": 0.959,
"TPSSh 6-31G**": 0.959,
"TPSSh 6-31+G**": 0.963,
"TPSSh 6-311G*": 0.963,
"TPSSh TZVP": 0.964,
"TPSSh cc-pVDZ": 0.972,
"TPSSh cc-pVTZ": 0.968,
"TPSSh aug-cc-pVDZ": 0.967,
"TPSSh aug-cc-pVTZ": 0.965,
"B97D3 3-21G": 0.983,
"B97D3 6-31G*": 0.98,
"B97D3 6-31+G**": 0.983,
"B97D3 6-311G**": 0.986,
"B97D3 TZVP": 0.986,
"B97D3 cc-pVDZ": 0.992,
"B97D3 cc-pVTZ": 0.986,
"B97D3 aug-cc-pVTZ": 0.985,
"MP2 STO-3G": 0.872,
"MP2 3-21G": 0.955,
"MP2 3-21G*": 0.951,
"MP2 6-31G": 0.957,
"MP2 6-31G*": 0.943,
"MP2 6-31G**": 0.937,
"MP2 6-31+G**": 0.941,
"MP2 6-311G*": 0.95,
"MP2 6-311G**": 0.95,
"MP2 TZVP": 0.948,
"MP2 cc-pVDZ": 0.953,
"MP2 cc-pVTZ": 0.95,
"MP2 cc-pVQZ": 0.948,
"MP2 aug-cc-pVDZ": 0.959,
"MP2 aug-cc-pVTZ": 0.953,
"MP2 aug-cc-pVQZ": 0.95,
"MP2=FULL STO-3G": 0.889,
"MP2=FULL 3-21G": 0.955,
"MP2=FULL 3-21G*": 0.948,
"MP2=FULL 6-31G": 0.95,
"MP2=FULL 6-31G*": 0.942,
"MP2=FULL 6-31G**": 0.934,
"MP2=FULL 6-31+G**": 0.939,
"MP2=FULL 6-311G*": 0.947,
"MP2=FULL 6-311G**": 0.949,
"MP2=FULL TZVP": 0.953,
"MP2=FULL cc-pVDZ": 0.95,
"MP2=FULL cc-pVTZ": 0.949,
"MP2=FULL cc-pVQZ": 0.957,
"MP2=FULL aug-cc-pVDZ": 0.969,
"MP2=FULL aug-cc-pVTZ": 0.951,
"MP2=FULL aug-cc-pVQZ": 0.956,
"MP3 STO-3G": 0.894,
"MP3 3-21G": 0.968,
"MP3 3-21G*": 0.965,
"MP3 6-31G": 0.966,
"MP3 6-31G*": 0.939,
"MP3 6-31G**": 0.935,
"MP3 6-31+G**": 0.931,
"MP3 TZVP": 0.935,
"MP3 cc-pVDZ": 0.948,
"MP3 cc-pVTZ": 0.945,
"MP3=FULL 6-31G*": 0.938,
"MP3=FULL 6-31+G**": 0.932,
"MP3=FULL TZVP": 0.934,
"MP3=FULL cc-pVDZ": 0.94,
"MP3=FULL cc-pVTZ": 0.933,
"B2PLYP 6-31G*": 0.949,
"B2PLYP 6-31+G**": 0.952,
"B2PLYP TZVP": 0.954,
"B2PLYP cc-pVDZ": 0.958,
"B2PLYP cc-pVTZ": 0.959,
"B2PLYP cc-pVQZ": 0.957,
"B2PLYP aug-cc-pVTZ": 0.961,
"B2PLYP=FULL 3-21G": 0.952,
"B2PLYP=FULL 6-31G*": 0.948,
"B2PLYP=FULL 6-31+G**": 0.951,
"B2PLYP=FULL TZVP": 0.954,
"B2PLYP=FULL cc-pVDZ": 0.959,
"B2PLYP=FULL cc-pVTZ": 0.956,
"B2PLYP=FULL aug-cc-pVDZ": 0.962,
"B2PLYP=FULL aug-cc-pVTZ": 0.959,
"CID 3-21G": 0.932,
"CID 3-21G*": 0.931,
"CID 6-31G": 0.935,
"CID 6-31G*": 0.924,
"CID 6-31G**": 0.924,
"CID 6-31+G**": 0.924,
"CID 6-311G*": 0.929,
"CID cc-pVDZ": 0.924,
"CID cc-pVTZ": 0.927,
"CISD 3-21G": 0.941,
"CISD 3-21G*": 0.934,
"CISD 6-31G": 0.938,
"CISD 6-31G*": 0.926,
"CISD 6-31G**": 0.918,
"CISD 6-31+G**": 0.922,
"CISD 6-311G*": 0.925,
"CISD cc-pVDZ": 0.922,
"CISD cc-pVTZ": 0.93,
"QCISD 3-21G": 0.969,
"QCISD 3-21G*": 0.961,
"QCISD 6-31G": 0.964,
"QCISD 6-31G*": 0.952,
"QCISD 6-31G**": 0.941,
"QCISD 6-31+G**": 0.945,
"QCISD 6-311G*": 0.957,
"QCISD 6-311G**": 0.954,
"QCISD TZVP": 0.955,
"QCISD cc-pVDZ": 0.959,
"QCISD cc-pVTZ": 0.956,
"QCISD aug-cc-pVDZ": 0.969,
"QCISD aug-cc-pVTZ": 0.962,
"CCD 3-21G": 0.972,
"CCD 3-21G*": 0.957,
"CCD 6-31G": 0.96,
"CCD 6-31G*": 0.947,
"CCD 6-31G**": 0.938,
"CCD 6-31+G**": 0.942,
"CCD 6-311G*": 0.955,
"CCD 6-311G**": 0.955,
"CCD TZVP": 0.948,
"CCD cc-pVDZ": 0.957,
"CCD cc-pVTZ": 0.934,
"CCD aug-cc-pVDZ": 0.965,
"CCD aug-cc-pVTZ": 0.957,
"CCSD 3-21G": 0.943,
"CCSD 3-21G*": 0.943,
"CCSD 6-31G": 0.943,
"CCSD 6-31G*": 0.944,
"CCSD 6-31G**": 0.933,
"CCSD 6-31+G**": 0.934,
"CCSD 6-311G*": 0.954,
"CCSD TZVP": 0.954,
"CCSD cc-pVDZ": 0.947,
"CCSD cc-pVTZ": 0.941,
"CCSD cc-pVQZ": 0.951,
"CCSD aug-cc-pVDZ": 0.963,
"CCSD aug-cc-pVTZ": 0.956,
"CCSD aug-cc-pVQZ": 0.953,
"CCSD=FULL 6-31G*": 0.95,
"CCSD=FULL TZVP": 0.948,
"CCSD=FULL cc-pVTZ": 0.948,
"CCSD=FULL aug-cc-pVTZ": 0.951,
}
element_list = [
["1 ", "H ", "Hydrogen"],
["2 ", "He", "Helium"],
["3 ", "Li", "Lithium"],
["4 ", "Be", "Beryllium"],
["5 ", "B ", "Boron"],
["6 ", "C ", "Carbon"],
["7 ", "N ", "Nitrogen"],
["8 ", "O ", "Oxygen"],
["9 ", "F ", "Fluorine"],
["10", "Ne", "Neon"],
["11", "Na", "Sodium"],
["12", "Mg", "Magnesium"],
["13", "Al", "Aluminum"],
["14", "Si", "Silicon"],
["15", "P ", "Phosphorus"],
["16", "S ", "Sulfur"],
["17", "Cl", "Chlorine"],
["18", "Ar", "Argon"],
["19", "K ", "Potassium"],
["20", "Ca", "Calcium"],
["21", "Sc", "Scandium"],
["22", "Ti", "Titanium"],
["23", "V ", "Vanadium"],
["24", "Cr", "Chromium"],
["25", "Mn", "Manganese"],
["26", "Fe", "Iron"],
["27", "Co", "Cobalt"],
["28", "Ni", "Nickel"],
["29", "Cu", "Copper"],
["30", "Zn", "Zinc"],
["31", "Ga", "Gallium"],
["32", "Ge", "Germanium"],
["33", "As", "Arsenic"],
["34", "Se", "Selenium"],
["35", "Br", "Bromine"],
["36", "Kr", "Krypton"],
["37", "Rb", "Rubidium"],
["38", "Sr", "Strontium"],
["39", "Y ", "Yttrium"],
["40", "Zr", "Zirconium"],
["41", "Nb", "Niobium"],
["42", "Mo", "Molybdenum"],
["43", "Tc", "Technetium"],
["44", "Ru", "Ruthenium"],
["45", "Rh", "Rhodium"],
["46", "Pd", "Palladium"],
["47", "Ag", "Silver"],
["48", "Cd", "Cadmium"],
["49", "In", "Indium"],
["50", "Sn", "Tin"],
["51", "Sb", "Antimony"],
["52", "Te", "Tellurium"],
["53", "I ", "Iodine"],
["54", "Xe", "Xenon"],
["55", "Cs", "Cesium"],
["56", "Ba", "Barium"],
["57", "La", "Lanthanum"],
["58", "Ce", "Cerium"],
["59", "Pr", "Praseodymium"],
["60", "Nd", "Neodymium"],
["61", "Pm", "Promethium"],
["62", "Sm", "Samarium"],
["63", "Eu", "Europium"],
["64", "Gd", "Gadolinium"],
["65", "Tb", "Terbium"],
["66", "Dy", "Dysprosium"],
["67", "Ho", "Holmium"],
["68", "Er", "Erbium"],
["69", "Tm", "Thulium"],
["70", "Yb", "Ytterbium"],
["71", "Lu", "Lutetium"],
["72", "Hf", "Hafnium"],
["73", "Ta", "Tantalum"],
["74", "W ", "Tungsten"],
["75", "Re", "Rhenium"],
["76", "Os", "Osmium"],
["77", "Ir", "Iridium"],
["78", "Pt", "Platinum"],
["79", "Au", "Gold"],
["80", "Hg", "Mercury"],
["81", "Tl", "Thallium"],
["82", "Pb", "Lead"],
["83", "Bi", "Bismuth"],
["84", "Po", "Polonium"],
["85", "At", "Astatine"],
["86", "Rn", "Radon"],
["87", "Fr", "Francium"],
["88", "Ra", "Radium"],
["89", "Ac", "Actinium"],
["90", "Th", "Thorium"],
["91", "Pa", "Protactinium"],
["92", "U ", "Uranium"],
["93", "Np", "Neptunium"],
["94", "Pu", "Plutonium"],
["95", "Am", "Americium"],
["96", "Cm", "Curium"],
["97", "Bk", "Berkelium"],
["98", "Cf", "Californium"],
["99", "Es", "Einsteinium"],
]
def get_vibrational_scaling(functional, basis_set):
"""
Returns vibrational scaling factor given the functional
and the basis set for the QM engine.
Parameters
----------
functional: str
Functional
basis_set: str
Basis set
Returns
-------
vib_scale: float
Vibrational scaling factor corresponding to the given
the basis_set and the functional.
Examples
--------
>>> get_vibrational_scaling("QCISD", "6-311G*")
0.957
"""
vib_scale = method_basis_scale_dict.get(functional + " " + basis_set)
return vib_scale
def unit_vector_N(u_BC, u_AB):
"""
Calculates unit normal vector perpendicular to plane ABC.
Parameters
----------
u_BC : (.. , 1, 3) array
Unit vector from atom B to atom C.
u_AB : (..., 1, 3) array
Unit vector from atom A to atom B.
Returns
-------
u_N : (..., 1, 3) array
Unit normal vector perpendicular to plane ABC.
Examples
--------
>>> u_BC = [0.34040355, 0.62192853, 0.27011169]
>>> u_AB = [0.28276792, 0.34232697, 0.02370306]
>>> unit_vector_N(u_BC, u_AB)
array([-0.65161629, 0.5726879 , -0.49741811])
"""
cross_product = np.cross(u_BC, u_AB)
norm_u_N = np.linalg.norm(cross_product)
u_N = cross_product / norm_u_N
return u_N
def delete_guest_angle_params(guest_qm_params_file="guest_qm_params.txt"):
"""
"""
f_params = open(guest_qm_params_file, "r")
lines_params = f_params.readlines()
for i in range(len(lines_params)):
if "Begin writing the Angle Parameters" in lines_params[i]:
to_begin = int(i)
if "Finish writing the Angle Parameters" in lines_params[i]:
to_end = int(i)
lines_selected = lines_params[:to_begin] + lines_params[to_end + 1 :]
with open(guest_qm_params_file, "w") as f_:
f_.write("".join(lines_selected))
return
def remove_bad_angle_params(
guest_qm_params_file="guest_qm_params.txt", angle=1.00, k_angle=500):
with open(guest_qm_params_file, "r") as f_params:
lines_params = f_params.readlines()
for i in range(len(lines_params)):
if "Begin writing the Angle Parameters" in lines_params[i]:
to_begin = int(i)
if "Finish writing the Angle Parameters" in lines_params[i]:
to_end = int(i)
angle_params = lines_params[to_begin + 1 : to_end]
lines_to_omit = []
for i in angle_params:
if float(re.findall(r"[-+]?\d+[.]?\d*", i)[0]) < float(angle) or float(
re.findall(r"[-+]?\d+[.]?\d*", i)[1]
) > float(k_angle):
lines_to_omit.append(i)
for b in lines_to_omit:
lines_params.remove(b)
with open(guest_qm_params_file, "w") as file:
for j in lines_params:
file.write(j)
def get_num_host_atoms(host_pdb):
"""
Reads the host PDB file and returns the
total number of atoms.
"""
ppdb = PandasPdb()
ppdb.read_pdb(host_pdb)
no_host_atoms = ppdb.df["ATOM"].shape[0]
return no_host_atoms
def change_names(inpcrd_file, prmtop_file, pdb_file):
command = "cp -r " + inpcrd_file + " system_qmmmrebind.inpcrd"
os.system(command)
command = "cp -r " + prmtop_file + " system_qmmmrebind.prmtop"
os.system(command)
command = "cp -r " + pdb_file + " system_qmmmrebind.pdb"
os.system(command)
def copy_file(source, destination):
"""
Copies a file from a source to the destination.
"""
shutil.copy(source, destination)
def get_openmm_energies(system_pdb, system_xml):
"""
Returns decomposed OPENMM energies for the
system.
Parameters
----------
system_pdb : str
Input PDB file
system_xml : str
Forcefield file in XML format
"""
pdb = simtk.openmm.app.PDBFile(system_pdb)
ff_xml_file = open(system_xml, "r")
system = simtk.openmm.XmlSerializer.deserialize(ff_xml_file.read())
integrator = simtk.openmm.LangevinIntegrator(
300 * simtk.unit.kelvin,
1 / simtk.unit.picosecond,
0.002 * simtk.unit.picoseconds,
)
simulation = simtk.openmm.app.Simulation(pdb.topology, system, integrator)
simulation.context.setPositions(pdb.positions)
state = simulation.context.getState(
getEnergy=True, getParameters=True, getForces=True
)
force_group = []
for i, force in enumerate(system.getForces()):
force_group.append(force.__class__.__name__)
forcegroups = {}
for i in range(system.getNumForces()):
force = system.getForce(i)
force.setForceGroup(i)
forcegroups[force] = i
energies = {}
for f, i in forcegroups.items():
energies[f] = (
simulation.context.getState(getEnergy=True, groups=2 ** i)
.getPotentialEnergy()
._value
)
decomposed_energy = []
for key, val in energies.items():
decomposed_energy.append(val)
df_energy_openmm = pd.DataFrame(
list(zip(force_group, decomposed_energy)),
columns=["Energy_term", "Energy_openmm_params"],
)
energy_values = [
list(
df_energy_openmm.loc[
df_energy_openmm["Energy_term"] == "HarmonicBondForce"
].values[0]
)[1],
list(
df_energy_openmm.loc[
df_energy_openmm["Energy_term"] == "HarmonicAngleForce"
].values[0]
)[1],
list(
df_energy_openmm.loc[
df_energy_openmm["Energy_term"] == "PeriodicTorsionForce"
].values[0]
)[1],
list(
df_energy_openmm.loc[
df_energy_openmm["Energy_term"] == "NonbondedForce"
].values[0]
)[1],
]
energy_group = [
"HarmonicBondForce",
"HarmonicAngleForce",
"PeriodicTorsionForce",
"NonbondedForce",
]
df_energy_open_mm = pd.DataFrame(
list(zip(energy_group, energy_values)),
columns=["Energy_term", "Energy_openmm_params"],
)
df_energy_open_mm = df_energy_open_mm.set_index("Energy_term")
print(df_energy_open_mm)
def u_PA_from_angles(atom_A, atom_B, atom_C, coords):
"""
Returns the vector in the plane A,B,C and perpendicular to AB.
Parameters
----------
atom_A : int
Index of atom A (left, starting from 0).
atom_B : int
Index of atom B (center, starting from 0).
atom_C : int
Index of atom C (right, starting from 0).
coords : (..., N, 3) array
An array which contains the coordinates of all
the N atoms.
"""
diff_AB = coords[atom_B, :] - coords[atom_A, :]
norm_diff_AB = np.linalg.norm(diff_AB)
u_AB = diff_AB / norm_diff_AB
diff_CB = coords[atom_B, :] - coords[atom_C, :]
norm_diff_CB = np.linalg.norm(diff_CB)
u_CB = diff_CB / norm_diff_CB
u_N = unit_vector_N(u_CB, u_AB)
u_PA = np.cross(u_N, u_AB)
norm_PA = np.linalg.norm(u_PA)
u_PA = u_PA / norm_PA
return u_PA
def force_angle_constant(
atom_A,
atom_B,
atom_C,
bond_lengths,
eigenvalues,
eigenvectors,
coords,
scaling_1,
scaling_2,
):
"""
Calculates force constant according to Equation 14 of
Seminario calculation paper; returns angle (in kcal/mol/rad^2)
and equilibrium angle (in degrees).
Parameters
----------
atom_A : int
Index of atom A (left, starting from 0).
atom_B : int
Index of atom B (center, starting from 0).
atom_C : int
Index of atom C (right, starting from 0).
bond_lengths : (N, N) array
An N * N array containing the bond lengths for
all the possible pairs of atoms.
eigenvalues : (N, N, 3) array
A numpy array of shape (N, N, 3) containing
eigenvalues of the hessian matrix, where N
is the total number of atoms.
eigenvectors : (3, 3, N, N) array
A numpy array of shape (3, 3, N, N) containing
eigenvectors of the hessian matrix.
coords : (N, 3) array
A numpy array of shape (N, 3) having the X, Y and Z
coordinates of all N atoms.
scaling_1 : float
Factor to scale the projections of eigenvalues for AB.
scaling_2 : float
Factor to scale the projections of eigenvalues for BC.
Returns
-------
k_theta : float
Force angle constant calculated using modified
seminario method.
k_0 : float
Equilibrium angle between AB and BC.
"""
# Vectors along bonds calculated
diff_AB = coords[atom_B, :] - coords[atom_A, :]
norm_diff_AB = np.linalg.norm(diff_AB)
u_AB = diff_AB / norm_diff_AB
diff_CB = coords[atom_B, :] - coords[atom_C, :]
norm_diff_CB = np.linalg.norm(diff_CB)
u_CB = diff_CB / norm_diff_CB
# Bond lengths and eigenvalues found
bond_length_AB = bond_lengths[atom_A, atom_B]
eigenvalues_AB = eigenvalues[atom_A, atom_B, :]
eigenvectors_AB = eigenvectors[0:3, 0:3, atom_A, atom_B]
bond_length_BC = bond_lengths[atom_B, atom_C]
eigenvalues_CB = eigenvalues[atom_C, atom_B, :]
eigenvectors_CB = eigenvectors[0:3, 0:3, atom_C, atom_B]
# Normal vector to angle plane found
u_N = unit_vector_N(u_CB, u_AB)
u_PA = np.cross(u_N, u_AB)
norm_u_PA = np.linalg.norm(u_PA)
u_PA = u_PA / norm_u_PA
u_PC = np.cross(u_CB, u_N)
norm_u_PC = np.linalg.norm(u_PC)
u_PC = u_PC / norm_u_PC
sum_first = 0
sum_second = 0
# Projections of eigenvalues
for i in range(0, 3):
eig_AB_i = eigenvectors_AB[:, i]
eig_BC_i = eigenvectors_CB[:, i]
sum_first = sum_first + (
eigenvalues_AB[i] * abs(dot_product(u_PA, eig_AB_i))
)
sum_second = sum_second + (
eigenvalues_CB[i] * abs(dot_product(u_PC, eig_BC_i))
)
# Scaling due to additional angles - Modified Seminario Part
sum_first = sum_first / scaling_1
sum_second = sum_second / scaling_2
# Added as two springs in series
k_theta = (1 / ((bond_length_AB ** 2) * sum_first)) + (
1 / ((bond_length_BC ** 2) * sum_second)
)
k_theta = 1 / k_theta
k_theta = -k_theta # Change to OPLS form
k_theta = abs(k_theta * 0.5) # Change to OPLS form
# Equilibrium Angle
theta_0 = math.degrees(math.acos(np.dot(u_AB, u_CB)))
# If the vectors u_CB and u_AB are linearly dependent u_N cannot be defined.
# This case is dealt with here :
if abs(sum((u_CB) - (u_AB))) < 0.01 or (
abs(sum((u_CB) - (u_AB))) > 1.99 and abs(sum((u_CB) - (u_AB))) < 2.01
):
scaling_1 = 1
scaling_2 = 1
[k_theta, theta_0] = force_angle_constant_special_case(
atom_A,
atom_B,
atom_C,
bond_lengths,
eigenvalues,
eigenvectors,
coords,
scaling_1,
scaling_2,
)
return k_theta, theta_0
def dot_product(u_PA, eig_AB):
"""
Returns the dot product of two vectors.
Parameters
----------
u_PA : (..., 1, 3) array
Unit vector perpendicular to AB and in the
plane of A, B, C.
eig_AB : (..., 3, 3) array
Eigenvectors of the hessian matrix for
the bond AB.
"""
x = 0
for i in range(0, 3):
x = x + u_PA[i] * eig_AB[i].conjugate()
return x
def force_angle_constant_special_case(
atom_A,
atom_B,
atom_C,
bond_lengths,
eigenvalues,
eigenvectors,
coords,
scaling_1,
scaling_2,
):
"""
Calculates force constant according to Equation 14
of Seminario calculation paper when the vectors
u_CB and u_AB are linearly dependent and u_N cannot
be defined. It instead takes samples of u_N across a
unit sphere for the calculation; returns angle
(in kcal/mol/rad^2) and equilibrium angle in degrees.
Parameters
----------
atom_A : int
Index of atom A (left, starting from 0).
atom_B : int
Index of atom B (center, starting from 0).
atom_C : int
Index of atom C (right, starting from 0).
bond_lengths : (N, N) array
An N * N array containing the bond lengths for
all the possible pairs of atoms.
eigenvalues : (N, N, 3) array
A numpy array of shape (N, N, 3) containing
eigenvalues of the hessian matrix, where N
is the total number of atoms.
eigenvectors : (3, 3, N, N) array
A numpy array of shape (3, 3, N, N) containing
eigenvectors of the hessian matrix.
coords : (N, 3) array
A numpy array of shape (N, 3) having the X, Y,
and Z coordinates of all N atoms.
scaling_1 : float
Factor to scale the projections of eigenvalues for AB.
scaling_2 : float
Factor to scale the projections of eigenvalues for BC.
Returns
-------
k_theta : float
Force angle constant calculated using modified
seminario method.
k_0 : float
Equilibrium angle between AB and BC.
"""
# Vectors along bonds calculated
diff_AB = coords[atom_B, :] - coords[atom_A, :]
norm_diff_AB = np.linalg.norm(diff_AB)
u_AB = diff_AB / norm_diff_AB
diff_CB = coords[atom_B, :] - coords[atom_C, :]
norm_diff_CB = np.linalg.norm(diff_CB)
u_CB = diff_CB / norm_diff_CB
# Bond lengths and eigenvalues found
bond_length_AB = bond_lengths[atom_A, atom_B]
eigenvalues_AB = eigenvalues[atom_A, atom_B, :]
eigenvectors_AB = eigenvectors[0:3, 0:3, atom_A, atom_B]
bond_length_BC = bond_lengths[atom_B, atom_C]
eigenvalues_CB = eigenvalues[atom_C, atom_B, :]
eigenvectors_CB = eigenvectors[0:3, 0:3, atom_C, atom_B]
k_theta_array = np.zeros((180, 360))
# Find force constant with varying u_N (with vector uniformly
# sampled across a sphere)
for theta in range(0, 180):
for phi in range(0, 360):
r = 1
u_N = [
r
* math.sin(math.radians(theta))
* math.cos(math.radians(theta)),
r
* math.sin(math.radians(theta))
* math.sin(math.radians(theta)),
r * math.cos(math.radians(theta)),
]
u_PA = np.cross(u_N, u_AB)
u_PA = u_PA / np.linalg.norm(u_PA)
u_PC = np.cross(u_CB, u_N)
u_PC = u_PC / np.linalg.norm(u_PC)
sum_first = 0
sum_second = 0
# Projections of eigenvalues
for i in range(0, 3):
eig_AB_i = eigenvectors_AB[:, i]
eig_BC_i = eigenvectors_CB[:, i]
sum_first = sum_first + (
eigenvalues_AB[i] * abs(dot_product(u_PA, eig_AB_i))
)
sum_second = sum_second + (
eigenvalues_CB[i] * abs(dot_product(u_PC, eig_BC_i))
)
# Added as two springs in series
k_theta_ij = (1 / ((bond_length_AB ** 2) * sum_first)) + (
1 / ((bond_length_BC ** 2) * sum_second)
)
k_theta_ij = 1 / k_theta_ij
k_theta_ij = -k_theta_ij # Change to OPLS form
k_theta_ij = abs(k_theta_ij * 0.5) # Change to OPLS form
k_theta_array[theta, phi] = k_theta_ij
# Removes cases where u_N was linearly dependent of u_CB or u_AB.
# Force constant used is taken as the mean.
k_theta = np.mean(np.mean(k_theta_array))
# Equilibrium Angle independent of u_N
theta_0 = math.degrees(math.cos(np.dot(u_AB, u_CB)))
return k_theta, theta_0
def force_constant_bond(atom_A, atom_B, eigenvalues, eigenvectors, coords):
"""
Calculates the bond force constant for the bonds in the
molecule according to equation 10 of seminario paper,
given the bond atoms' indices and the corresponding
eigenvalues, eigenvectors and coordinates matrices.
Parameters
----------
atom_A : int
Index of Atom A.
atom_B : int
Index of Atom B.
eigenvalues : (N, N, 3) array
A numpy array of shape (N, N, 3) containing eigenvalues
of the hessian matrix, where N is the total number
of atoms.
eigenvectors : (3, 3, N, N) array
A numpy array of shape (3, 3, N, N) containing the
eigenvectors of the hessian matrix.
coords : (N, 3) array
A numpy array of shape (N, 3) having the X, Y, and
Z coordinates of all N atoms.
Returns
--------
k_AB : float
Bond Force Constant value for the bond with atoms A and B.
"""
# Eigenvalues and eigenvectors calculated
eigenvalues_AB = eigenvalues[atom_A, atom_B, :]
eigenvectors_AB = eigenvectors[:, :, atom_A, atom_B]
# Vector along bond
diff_AB = np.array(coords[atom_B, :]) - np.array(coords[atom_A, :])
norm_diff_AB = np.linalg.norm(diff_AB)
unit_vectors_AB = diff_AB / norm_diff_AB
k_AB = 0
# Projections of eigenvalues
for i in range(0, 3):
dot_product = abs(np.dot(unit_vectors_AB, eigenvectors_AB[:, i]))
k_AB = k_AB + (eigenvalues_AB[i] * dot_product)
k_AB = -k_AB * 0.5 # Convert to OPLS form
return k_AB
def u_PA_from_angles(atom_A, atom_B, atom_C, coords):
"""
Returns the vector in the plane A,B,C and perpendicular to AB.
Parameters
----------
atom_A : int
Index of atom A (left, starting from 0).
atom_B : int
Index of atom B (center, starting from 0).
atom_C : int
Index of atom C (right, starting from 0).
coords : (..., N, 3) array
An array containing the coordinates of all the N atoms.
Returns
-------
u_PA : (..., 1, 3) array
Unit vector perpendicular to AB and in the plane of A, B, C.
"""
diff_AB = coords[atom_B, :] - coords[atom_A, :]
norm_diff_AB = np.linalg.norm(diff_AB)
u_AB = diff_AB / norm_diff_AB
diff_CB = coords[atom_B, :] - coords[atom_C, :]
norm_diff_CB = np.linalg.norm(diff_CB)
u_CB = diff_CB / norm_diff_CB
u_N = unit_vector_N(u_CB, u_AB)
u_PA = np.cross(u_N, u_AB)
norm_PA = np.linalg.norm(u_PA)
u_PA = u_PA / norm_PA
return u_PA
def reverse_list(lst):
"""
Returns the reversed form of a given list.
Parameters
----------
lst : list
Input list.
Returns
-------
reversed_list : list
Reversed input list.
Examples
--------
>>> lst = [5, 4, 7, 2]
>>> reverse_list(lst)
[2, 7, 4, 5]
"""
reversed_list = lst[::-1]
return reversed_list
def uniq(input_):
"""
Returns a list with only unique elements from a list
containing duplicate / repeating elements.
Parameters
----------
input_ : list
Input list.
Returns
-------
output : list
List with only unique elements.
Examples
--------
>>> lst = [2, 4, 2, 9, 10, 35, 10]
>>> uniq(lst)
[2, 4, 9, 10, 35]
"""
output = []
for x in input_:
if x not in output:
output.append(x)
return output
def search_in_file(file: str, word: str) -> list:
"""
Search for the given string in file and return lines
containing that string along with line numbers.
Parameters
----------
file : str
Input file.
word : str
Search word.
Returns
-------
list_of_results : list
List of lists with each element representing the
line number and the line contents.
"""
line_number = 0
list_of_results = []
with open(file, "r") as f:
for line in f:
line_number += 1
if word in line:
list_of_results.append((line_number, line.rstrip()))
return list_of_results
def list_to_dict(lst):
"""
Converts an input list with mapped characters (every
odd entry is the key of the dictionary and every
even entry adjacent to the odd entry is its correponding
value) to a dictionary.
Parameters
----------
lst : list
Input list.
Returns
-------
res_dct : dict
A dictionary with every element mapped with
its successive element starting from index 0.
Examples
--------
>>> lst = [5, 9, 3, 6, 2, 7]
>>> list_to_dict(lst)
{5: 9, 3: 6, 2: 7}
"""
res_dct = {lst[i]: lst[i + 1] for i in range(0, len(lst), 2)}
return res_dct
def scale_list(list_):
"""
Returns a scaled list with the minimum value
subtracted from each element of the corresponding list.
Parameters
----------
list_ : list
Input list.
Returns
-------
scaled_list : list
Scaled list.
Examples
--------
>>> list_ = [6, 3, 5, 11, 3, 2, 8, 6]
>>> scale_list(list_)
[4, 1, 3, 9, 1, 0, 6, 4]
"""
scaled_list = [i - min(list_) for i in list_]
return scaled_list
def list_kJ_kcal(list_):
"""
Convert the elements in the list from
kiloJoules units to kiloCalories units.
Parameters
----------
list_ : list
List with elements in units of kJ.
Returns
-------
converted_list : list
List with elements in units of kcal.
Examples
--------
>>> list_ = [6, 3, 5]
>>> list_kJ_kcal(list_)
[1.4340344168260037, 0.7170172084130019, 1.1950286806883366]
"""
converted_list = [i / 4.184 for i in list_]
return converted_list
def list_hartree_kcal(list_):
"""
Convert the elements in the list from
hartree units to kiloCalories units.
Parameters
----------
list_ : list
List with elements in units of hartree.
Returns
-------
converted_list : list
List with elements in units of kcal.
Examples
--------
>>> list_ = [6, 3, 5]
>>> list_hartree_kcal(list_)
[3765.0564000000004, 1882.5282000000002, 3137.547]
"""
converted_list = [i * 627.5094 for i in list_]
return converted_list
def torsiondrive_input_to_xyz(psi_input_file, xyz_file):
"""
Returns an xyz file from a torsiondrive formatted
input file.
Parameters
----------
psi_input_file : str
Input file for the psi4 QM engine.
xyz_file : str
XYZ format file to write the coords of the system.
"""
with open(psi_input_file, "r") as f:
lines = f.readlines()
for i in range(len(lines)):
if "molecule {" in lines[i]:
to_begin = int(i)
if "set {" in lines[i]:
to_end = int(i)
xyz_lines = lines[to_begin + 2 : to_end - 1]
with open(xyz_file, "w") as f:
f.write(str(len(xyz_lines)) + "\n")
f.write(xyz_file + "\n")
for i in xyz_lines:
f.write(i)
def xyz_to_pdb(xyz_file, coords_file, template_pdb, system_pdb):
"""
Converts a XYZ file to a PDB file.
Parameters
----------
xyz_file : str
XYZ file containing the coordinates of the system.
coords_file : str
A text file containing the coordinates part of XYZ file.
template_pdb : str
A pdb file to be used as a template for the required PDB.
system_pdb : str
Output PDB file with the coordinates updated in the
template pdb using XYZ file.
"""
with open(xyz_file, "r") as f:
lines = f.readlines()
needed_lines = lines[2:]
with open(coords_file, "w") as f:
for i in needed_lines:
f.write(i)
df = pd.read_csv(coords_file, header=None, delimiter=r"\s+")
df.columns = ["atom", "x", "y", "z"]
ppdb = PandasPdb()
ppdb.read_pdb(template_pdb)
ppdb.df["ATOM"]["x_coord"] = df["x"]
ppdb.df["ATOM"]["y_coord"] = df["y"]
ppdb.df["ATOM"]["z_coord"] = df["z"]
ppdb.to_pdb(system_pdb)
def generate_xml_from_pdb_sdf(system_pdb, system_sdf, system_xml):
"""
Generates an openforcefield xml file from the pdb file.
Parameters
----------
system_pdb : str
Input PDB file.
system_sdf : str
SDF file of the system.
system_xml : str
XML force field file generated using PDB and SDF files.
"""
# command = "babel -ipdb " + system_pdb + " -osdf " + system_sdf
command = "obabel -ipdb " + system_pdb + " -osdf -O " + system_sdf
os.system(command)
# off_molecule = openforcefield.topology.Molecule(system_sdf)
off_molecule = Molecule(system_sdf)
# force_field = openforcefield.typing.engines.smirnoff.ForceField("openff_unconstrained-1.0.0.offxml")
force_field = ForceField("openff_unconstrained-1.0.0.offxml")
system = force_field.create_openmm_system(off_molecule.to_topology())
pdbfile = simtk.openmm.app.PDBFile(system_pdb)
structure = parmed.openmm.load_topology(
pdbfile.topology, system, xyz=pdbfile.positions
)
with open(system_xml, "w") as f:
f.write(simtk.openmm.XmlSerializer.serialize(system))
def generate_xml_from_charged_pdb_sdf(
system_pdb,
system_init_sdf,
system_sdf,
num_charge_atoms,
index_charge_atom_1,
charge_atom_1,
system_xml,
):
"""
Generates an openforcefield xml file from the pdb
file via SDF file and openforcefield.
Parameters
----------
system_pdb : str
Input PDB file.
system_init_sdf : str
SDF file for the system excluding charge information.
system_sdf : str
SDF file of the system.
num_charge_atoms : int
Total number of charged atoms in the PDB.
index_charge_atom_1 : int
Index of the first charged atom.
charge_atom_1 : float
Charge on first charged atom.
system_xml : str
XML force field file generated using PDB and SDF files.
"""
# command = "babel -ipdb " + system_pdb + " -osdf " + system_init_sdf
command = "obabel -ipdb " + system_pdb + " -osdf -O " + system_init_sdf
os.system(command)
with open(system_init_sdf, "r") as f1:
filedata = f1.readlines()
filedata = filedata[:-2]
with open(system_sdf, "w+") as out:
for i in filedata:
out.write(i)
line_1 = (
"M CHG "
+ str(num_charge_atoms)
+ " "
+ str(index_charge_atom_1)
+ " "
+ str(charge_atom_1)
+ "\n"
)
line_2 = "M END" + "\n"
line_3 = "$$$$"
out.write(line_1)
out.write(line_2)
out.write(line_3)
# off_molecule = openforcefield.topology.Molecule(system_sdf)
off_molecule = Molecule(system_sdf)
# force_field = openforcefield.typing.engines.smirnoff.ForceField("openff_unconstrained-1.0.0.offxml")
force_field = ForceField("openff_unconstrained-1.0.0.offxml")
system = force_field.create_openmm_system(off_molecule.to_topology())
pdbfile = simtk.openmm.app.PDBFile(system_pdb)
structure = parmed.openmm.load_topology(
pdbfile.topology, system, xyz=pdbfile.positions
)
with open(system_xml, "w") as f:
f.write(simtk.openmm.XmlSerializer.serialize(system))
def get_dihedrals(qm_scan_file):
"""
Returns dihedrals from the torsiondrive scan file.
Parameters
----------
qm_scan_file : str
Output scan file containing torsiondrive scans.
Returns
-------
dihedrals : list
List of all the dihedral values from the qm scan file.
"""
with open(qm_scan_file, "r") as f:
lines = f.readlines()
energy_dihedral_lines = []
for i in range(len(lines)):
if "Dihedral" in lines[i]:
energy_dihedral_lines.append(lines[i])
dihedrals = []
for i in energy_dihedral_lines:
energy_dihedral = i
energy_dihedral = re.findall(r"[-+]?\d+[.]?\d*", energy_dihedral)
dihedral = float(energy_dihedral[0])
dihedrals.append(dihedral)
return dihedrals
def get_qm_energies(qm_scan_file):
"""
Returns QM optimized energies from the torsiondrive
scan file.
Parameters
----------
qm_scan_file : str
Output scan file containing torsiondrive scans.
Returns
-------
qm_energies : list
List of all the qm optimiseed energies extracted from the torsiondrive
scan file.
"""
with open(qm_scan_file, "r") as f:
lines = f.readlines()
energy_dihedral_lines = []
for i in range(len(lines)):
if "Dihedral" in lines[i]:
energy_dihedral_lines.append(lines[i])
qm_energies = []
for i in energy_dihedral_lines:
energy_dihedral = i
energy_dihedral = re.findall(r"[-+]?\d+[.]?\d*", energy_dihedral)
energy = float(energy_dihedral[1])
qm_energies.append(energy)
return qm_energies
def generate_mm_pdbs(qm_scan_file, template_pdb):
"""
Generate PDBs from the torsiondrive scan file
based on a template PDB.
"""
with open(qm_scan_file, "r") as f:
lines = f.readlines()
energy_dihedral_lines = []
for i in range(len(lines)):
if "Dihedral" in lines[i]:
energy_dihedral_lines.append(lines[i])
dihedrals = []
for i in energy_dihedral_lines:
energy_dihedral = i
energy_dihedral = re.findall(r"[-+]?\d+[.]?\d*", energy_dihedral)
dihedral = float(energy_dihedral[0])
dihedrals.append(dihedral)
lines_markers = []
for i in range(len(lines)):
if "Dihedral" in lines[i]:
lines_markers.append(i)
lines_markers.append(len(lines) + 1)
for i in range(len(lines_markers) - 1):
# pdb_file_to_write = str(dihedrals[i]) + ".pdb"
if dihedrals[i] > 0:
pdb_file_to_write = "plus_" + str(abs(dihedrals[i])) + ".pdb"
if dihedrals[i] < 0:
pdb_file_to_write = "minus_" + str(abs(dihedrals[i])) + ".pdb"
to_begin = lines_markers[i]
to_end = lines_markers[i + 1]
lines_to_write = lines[to_begin + 1 : to_end - 1]
x_coords = []
y_coords = []
z_coords = []
for i in lines_to_write:
coordinates = i
coordinates = re.findall(r"[-+]?\d+[.]?\d*", coordinates)
x = float(coordinates[0])
y = float(coordinates[1])
z = float(coordinates[2])
x_coords.append(x)
y_coords.append(y)
z_coords.append(z)
ppdb = PandasPdb()
ppdb.read_pdb(template_pdb)
ppdb.df["ATOM"]["x_coord"] = x_coords
ppdb.df["ATOM"]["y_coord"] = y_coords
ppdb.df["ATOM"]["z_coord"] = z_coords
ppdb.to_pdb(pdb_file_to_write)
def remove_mm_files(qm_scan_file):
"""
Delete all generated PDB files.
Parameters
----------
qm_scan_file : str
Output scan file containing torsiondrive scans.
"""
mm_pdb_list = []
for i in get_dihedrals(qm_scan_file):
if i > 0:
pdb_file = "plus_" + str(abs(i)) + ".pdb"
if i < 0:
pdb_file = "minus_" + str(abs(i)) + ".pdb"
mm_pdb_list.append(pdb_file)
for i in mm_pdb_list:
command = "rm -rf " + i
os.system(command)
command = "rm -rf " + i[:-4] + ".inpcrd"
os.system(command)
command = "rm -rf " + i[:-4] + ".prmtop"
os.system(command)
def get_non_torsion_mm_energy(system_pdb, load_topology, system_xml):
"""
Returns sum of all the non-torsional energies (that
includes HarmonicBondForce, HarmonicAngleForce
and NonBondedForce) of the system from the PDB
file given the topology and the forcefield file.
Parameters
----------
system_pdb : str
System PDB file to load the openmm system topology
and coordinates.
load_topology : {"openmm", "parmed"}
Argument to specify how to load the topology.
system_xml : str
XML force field file for the openmm system.
Returns
-------
Sum of all the non-torsional energies of the system.
"""
system_prmtop = system_pdb[:-4] + ".prmtop"
system_inpcrd = system_pdb[:-4] + ".inpcrd"
if load_topology == "parmed":
openmm_system = parmed.openmm.load_topology(
parmed.load_file(system_pdb, structure=True).topology,
parmed.load_file(system_xml),
)
if load_topology == "openmm":
openmm_system = parmed.openmm.load_topology(
simtk.openmm.app.PDBFile(system_pdb).topology,
parmed.load_file(system_xml),
)
openmm_system.save(system_prmtop, overwrite=True)
openmm_system.coordinates = parmed.load_file(
system_pdb, structure=True
).coordinates
openmm_system.save(system_inpcrd, overwrite=True)
parm = parmed.load_file(system_prmtop, system_inpcrd)
prmtop_energy_decomposition = parmed.openmm.energy_decomposition_system(
parm, parm.createSystem()
)
# print(prmtop_energy_decomposition)
prmtop_energy_decomposition_value_no_torsion = [
list_to_dict(
[
item
for sublist in [
list(elem) for elem in prmtop_energy_decomposition
]
for item in sublist
]
).get("HarmonicBondForce"),
list_to_dict(
[
item
for sublist in [
list(elem) for elem in prmtop_energy_decomposition
]
for item in sublist
]
).get("HarmonicAngleForce"),
list_to_dict(
[
item
for sublist in [
list(elem) for elem in prmtop_energy_decomposition
]
for item in sublist
]
).get("NonbondedForce"),
]
return sum(prmtop_energy_decomposition_value_no_torsion)
def get_mm_potential_energies(qm_scan_file, load_topology, system_xml):
"""
Returns potential energy of the system from the PDB file
given the topology and the forcefield file.
Parameters
----------
qm_scan_file : str
Output scan file containing torsiondrive scans.
load_topology : {"openmm", "parmed"}
Argument to spcify how to load the topology.
system_xml : str
XML file to load the openmm system.
Returns
-------
mm_potential_energies : list
List of all the non torsion mm energies for the
generated PDB files.
"""
mm_pdb_list = []
for i in get_dihedrals(qm_scan_file):
if i > 0:
pdb_file = "plus_" + str(abs(i)) + ".pdb"
if i < 0:
pdb_file = "minus_" + str(abs(i)) + ".pdb"
mm_pdb_list.append(pdb_file)
for i in mm_pdb_list:
mm_pdb_file = i
mm_potential_energies = []
for i in mm_pdb_list:
mm_pdb_file = i
mm_energy = get_non_torsion_mm_energy(
system_pdb=i, load_topology=load_topology, system_xml=system_xml,
)
mm_potential_energies.append(mm_energy)
return mm_potential_energies
def list_diff(list_1, list_2):
"""
Returns the difference between two lists as a list.
Parameters
----------
list_1 : list
First list
list_2 : list
Second list.
Returns
-------
diff_list : list
List containing the diferences between the elements of
the two lists.
Examples
--------
>>> list_1 = [4, 2, 8, 3, 0, 6, 7]
>>> list_2 = [5, 3, 1, 5, 6, 0, 4]
>>> list_diff(list_1, list_2)
[-1, -1, 7, -2, -6, 6, 3]
"""
diff_list = []
zipped_list = zip(list_1, list_2)
for list1_i, list2_i in zipped_list:
diff_list.append(list1_i - list2_i)
return diff_list
def dihedral_energy(x, k1, k2, k3, k4=0):
"""
Expression for the dihedral energy.
"""
energy_1 = k1 * (1 + np.cos(1 * x * 0.01745))
energy_2 = k2 * (1 - np.cos(2 * x * 0.01745))
energy_3 = k3 * (1 + np.cos(3 * x * 0.01745))
energy_4 = k4 * (1 - np.cos(4 * x * 0.01745))
dihedral_energy = energy_1 + energy_2 + energy_3 + energy_4
return dihedral_energy
def error_function(delta_qm, delta_mm):
"""
Root Mean Squared Error.
"""
squared_error = np.square(np.subtract(delta_qm, delta_mm))
mean_squared_error = squared_error.mean()
root_mean_squared_error = math.sqrt(mean_squared_error)
return root_mean_squared_error
def error_function_boltzmann(delta_qm, delta_mm, T):
"""
Boltzmann Root Mean Squared Error.
"""
kb = 3.297623483 * 10 ** (-24) # in cal/K
delta_qm_boltzmann_weighted = [np.exp(-i / (kb * T)) for i in delta_qm]
squared_error = (
np.square(np.subtract(delta_qm, delta_mm))
* delta_qm_boltzmann_weighted
)
mean_squared_error = squared_error.mean()
root_mean_squared_error = math.sqrt(mean_squared_error)
return root_mean_squared_error
def gen_init_guess(qm_scan_file, load_topology, system_xml):
"""
Initial guess for the torsional parameter.
Parameters
----------
qm_scan_file : str
Output scan file containing torsiondrive scans.
load_topology : {"openmm", "parmed"}
Argument to speify how to load the topology.
system_xml : str
XML force field file for the system.
Returns
-------
k_init_guess : list
Initial guess for the torsional parameters.
"""
x = get_dihedrals(qm_scan_file)
y = scale_list(
list_=get_mm_potential_energies(
qm_scan_file=qm_scan_file,
load_topology=load_topology,
system_xml=system_xml,
)
)
init_vals = [0.0, 0.0, 0.0, 0.0]
k_init_guess, covar = scipy.optimize.curve_fit(
dihedral_energy, x, y, p0=init_vals
)
for i in range(len(k_init_guess)):
if k_init_guess[i] < 0:
k_init_guess[i] = 0
return k_init_guess
def objective_function(k_array, x, delta_qm):
"""
Objective function for the torsional parameter fitting.
"""
delta_mm = dihedral_energy(
x, k1=k_array[0], k2=k_array[1], k3=k_array[2], k4=k_array[3]
)
loss_function = error_function(delta_qm, delta_mm)
return loss_function
def fit_params(qm_scan_file, load_topology, system_xml, method):
"""
Optimization of the objective function.
"""
k_guess = gen_init_guess(
qm_scan_file=qm_scan_file,
load_topology=load_topology,
system_xml=system_xml,
)
x_data = np.array(get_dihedrals(qm_scan_file))
delta_qm = np.array(
scale_list(list_hartree_kcal(list_=get_qm_energies(qm_scan_file)))
)
optimise = scipy.optimize.minimize(
objective_function,
k_guess,
args=(x_data, delta_qm),
method=method,
bounds=[(0.00, None), (0.00, None), (0.00, None), (0.00, None),],
)
return optimise.x
def get_tor_params(
qm_scan_file, template_pdb, load_topology, system_xml, method
):
"""
Returns the fitted torsional parameters.
"""
qm_e = get_qm_energies(qm_scan_file=qm_scan_file)
qm_e_kcal = list_hartree_kcal(qm_e)
delta_qm = scale_list(qm_e_kcal)
generate_mm_pdbs(qm_scan_file=qm_scan_file, template_pdb=template_pdb)
mm_pe_no_torsion_kcal = get_mm_potential_energies(
qm_scan_file=qm_scan_file,
load_topology=load_topology,
system_xml=system_xml,
)
delta_mm = scale_list(mm_pe_no_torsion_kcal)
opt_param = fit_params(
qm_scan_file=qm_scan_file,
load_topology=load_topology,
system_xml=system_xml,
method=method,
)
return opt_param
def get_torsional_lines(
template_pdb,
system_xml,
qm_scan_file,
load_topology,
method,
dihedral_text_file,
):
"""
Returns the torsional lines for the XML forcefield file.
"""
opt_param = get_tor_params(
qm_scan_file=qm_scan_file,
template_pdb=template_pdb,
load_topology=load_topology,
system_xml=system_xml,
method=method,
)
dihedral_text = open(dihedral_text_file, "r")
dihedral_text_lines = dihedral_text.readlines()
atom_numbers = dihedral_text_lines[-1]
atom_index_from_1 = [
int(re.findall(r"\d+", atom_numbers)[0]),
int(re.findall(r"\d+", atom_numbers)[1]),
int(re.findall(r"\d+", atom_numbers)[2]),
int(re.findall(r"\d+", atom_numbers)[3]),
]
atom_index = [i - 1 for i in atom_index_from_1]
atom_index_lines = (
" "
+ "p1="
+ '"'
+ str(atom_index[0])
+ '"'
+ " "
+ "p2="
+ '"'
+ str(atom_index[1])
+ '"'
+ " "
+ "p3="
+ '"'
+ str(atom_index[2])
+ '"'
+ " "
+ "p4="
+ '"'
+ str(atom_index[3])
+ '"'
+ " "
)
tor_lines = []
for i in range(len(opt_param)):
line_to_append = (
" "
+ "<Torsion "
+ "k="
+ '"'
+ str(round(opt_param[i], 8))
+ '"'
+ atom_index_lines
+ "periodicity="
+ '"'
+ str(i + 1)
+ '"'
+ " "
+ "phase="
+ '"'
+ "0"
+ '"'
+ "/>"
)
# print(line_to_append)
tor_lines.append(line_to_append)
return tor_lines
def singular_resid(pdbfile, qmmmrebind_init_file):
"""
Returns a PDB file with chain ID = A
Parameters
----------
pdbfile: str
Input PDB file
qmmmrebind_init_file: str
Output PDB file
"""
ppdb = PandasPdb().read_pdb(pdbfile)
ppdb.df["HETATM"]["chain_id"] = "A"
ppdb.df["ATOM"]["chain_id"] = "A"
ppdb.to_pdb(
path=qmmmrebind_init_file, records=None, gz=False, append_newline=True
)
def relax_init_structure(
pdbfile,
prmtopfile,
qmmmrebindpdb,
sim_output="output.pdb",
sim_steps=100000,
):
"""
Minimizing the initial PDB file with the given topology
file
Parameters
----------
pdbfile: str
Input PDB file.
prmtopfile : str
Input prmtop file.
qmmmrebind_init_file: str
Output PDB file.
sim_output: str
Simulation output trajectory file.
sim_steps: int
MD simulation steps.
"""
prmtop = simtk.openmm.app.AmberPrmtopFile(prmtopfile)
pdb = simtk.openmm.app.PDBFile(pdbfile)
system = prmtop.createSystem(
nonbondedMethod=simtk.openmm.app.PME,
nonbondedCutoff=1 * simtk.unit.nanometer,
constraints=simtk.openmm.app.HBonds,
)
integrator = simtk.openmm.LangevinIntegrator(
300 * simtk.unit.kelvin,
1 / simtk.unit.picosecond,
0.002 * simtk.unit.picoseconds,
)
simulation = simtk.openmm.app.Simulation(
prmtop.topology, system, integrator
)
simulation.context.setPositions(pdb.positions)
print(simulation.context.getState(getEnergy=True).getPotentialEnergy())
simulation.minimizeEnergy(maxIterations=10000000)
print(simulation.context.getState(getEnergy=True).getPotentialEnergy())
simulation.reporters.append(
simtk.openmm.app.PDBReporter(sim_output, int(sim_steps / 10))
)
simulation.reporters.append(
simtk.openmm.app.StateDataReporter(
stdout,
int(sim_steps / 10),
step=True,
potentialEnergy=True,
temperature=True,
)
)
simulation.reporters.append(
simtk.openmm.app.PDBReporter(qmmmrebindpdb, sim_steps)
)
simulation.step(sim_steps)
command = "rm -rf " + sim_output
os.system(command)
def truncate(x):
"""
Returns a float or an integer with an exact number
of characters.
Parameters
----------
x: str
input value
"""
if len(str(int(float(x)))) == 1:
x = format(x, ".8f")
if len(str(int(float(x)))) == 2:
x = format(x, ".7f")
if len(str(int(float(x)))) == 3:
x = format(x, ".6f")
if len(str(int(float(x)))) == 4:
x = format(x, ".5f")
if len(str(x)) > 10:
x = round(x, 10)
return x
def add_vectors_inpcrd(pdbfile, inpcrdfile):
"""
Adds periodic box dimensions to the inpcrd file
Parameters
----------
pdbfile: str
PDB file containing the periodic box information.
inpcrdfile: str
Input coordinate file.
"""
pdbfilelines = open(pdbfile, "r").readlines()
for i in pdbfilelines:
if "CRYST" in i:
vector_list = re.findall(r"[-+]?\d*\.\d+|\d+", i)
vector_list = [float(i) for i in vector_list]
vector_list = vector_list[1 : 1 + 6]
line_to_add = (
" "
+ truncate(vector_list[0])
+ " "
+ truncate(vector_list[1])
+ " "
+ truncate(vector_list[2])
+ " "
+ truncate(vector_list[3])
+ " "
+ truncate(vector_list[4])
+ " "
+ truncate(vector_list[5])
)
print(line_to_add)
with open(inpcrdfile, "a+") as f:
f.write(line_to_add)
def add_dim_prmtop(pdbfile, prmtopfile):
"""
Adds periodic box dimensions flag in the prmtop file.
Parameters
----------
prmtopfile: str
Input prmtop file.
pdbfile: str
PDB file containing the periodic box information.
"""
pdbfilelines = open(pdbfile, "r").readlines()
for i in pdbfilelines:
if "CRYST" in i:
vector_list = re.findall(r"[-+]?\d*\.\d+|\d+", i)
vector_list = [float(i) for i in vector_list]
vector_list = vector_list[1 : 1 + 6]
vector_list = [i / 10 for i in vector_list]
vector_list = [truncate(i) for i in vector_list]
vector_list = [i + "E+01" for i in vector_list]
line3 = (
" "
+ vector_list[3]
+ " "
+ vector_list[0]
+ " "
+ vector_list[1]
+ " "
+ vector_list[2]
)
print(line3)
line1 = "%FLAG BOX_DIMENSIONS"
line2 = "%FORMAT(5E16.8)"
with open(prmtopfile) as f1, open("intermediate.prmtop", "w") as f2:
for line in f1:
if line.startswith("%FLAG RADIUS_SET"):
line = line1 + "\n" + line2 + "\n" + line3 + "\n" + line
f2.write(line)
command = "rm -rf " + prmtopfile
os.system(command)
command = "mv intermediate.prmtop " + prmtopfile
os.system(command)
def add_period_prmtop(parm_file, ifbox):
"""
Changes the value of IFBOX if needed for the prmtop / parm file.
Set to 1 if standard periodic box and 2 when truncated octahedral.
"""
with open(parm_file) as f:
parm_lines = f.readlines()
lines_contain = []
for i in range(len(parm_lines)):
if parm_lines[i].startswith("%FLAG POINTERS"):
lines_contain.append(i + 4)
line = parm_lines[lines_contain[0]]
line_new = "%8s %6s %6s %6s %6s %6s %6s %6s %6s %6s" % (
re.findall(r"\d+", line)[0],
re.findall(r"\d+", line)[1],
re.findall(r"\d+", line)[2],
re.findall(r"\d+", line)[3],
re.findall(r"\d+", line)[4],
re.findall(r"\d+", line)[5],
re.findall(r"\d+", line)[6],
str(ifbox),
re.findall(r"\d+", line)[8],
re.findall(r"\d+", line)[9],
)
parm_lines[lines_contain[0]] = line_new + "\n"
with open(parm_file, "w") as f:
for i in parm_lines:
f.write(i)
def add_solvent_pointers_prmtop(non_reparams_file, reparams_file):
"""
Adds the flag solvent pointers to the topology file.
"""
f_non_params = open(non_reparams_file, "r")
lines_non_params = f_non_params.readlines()
for i in range(len(lines_non_params)):
if "FLAG SOLVENT_POINTERS" in lines_non_params[i]:
to_begin = int(i)
solvent_pointers = lines_non_params[to_begin : to_begin + 3]
file = open(reparams_file, "a")
for i in solvent_pointers:
file.write(i)
def prmtop_calibration(
prmtopfile="system_qmmmrebind.prmtop",
inpcrdfile="system_qmmmrebind.inpcrd",
):
"""
Standardizes the topology files
Parameters
----------
prmtopfile: str
Input prmtop file.
inpcrdfile: str
Input coordinate file.
"""
parm = parmed.load_file(prmtopfile, inpcrdfile)
parm_1 = parmed.tools.actions.changeRadii(parm, "mbondi3")
parm_1.execute()
parm_2 = parmed.tools.actions.setMolecules(parm)
parm_2.execute()
parm.save(prmtopfile, overwrite=True)
def run_openmm_prmtop_inpcrd(
pdbfile="system_qmmmrebind.pdb",
prmtopfile="system_qmmmrebind.prmtop",
inpcrdfile="system_qmmmrebind.inpcrd",
sim_output="output.pdb",
sim_steps=10000,
):
"""
Runs OpenMM simulation with inpcrd and prmtop files.
Parameters
----------
pdbfile: str
Input PDB file.
prmtopfile: str
Input prmtop file.
inpcrdfile: str
Input coordinate file.
sim_output: str
Output trajectory file.
sim_steps: int
Simulation steps.
"""
prmtop = simtk.openmm.app.AmberPrmtopFile(prmtopfile)
inpcrd = simtk.openmm.app.AmberInpcrdFile(inpcrdfile)
system = prmtop.createSystem(
nonbondedCutoff=1 * simtk.unit.nanometer,
constraints=simtk.openmm.app.HBonds,
)
integrator = simtk.openmm.LangevinIntegrator(
300 * simtk.unit.kelvin,
1 / simtk.unit.picosecond,
0.002 * simtk.unit.picoseconds,
)
simulation = simtk.openmm.app.Simulation(
prmtop.topology, system, integrator
)
if inpcrd.boxVectors is None:
add_vectors_inpcrd(
pdbfile=pdbfile, inpcrdfile=inpcrdfile,
)
if inpcrd.boxVectors is not None:
simulation.context.setPeriodicBoxVectors(*inpcrd.boxVectors)
print(inpcrd.boxVectors)
simulation.context.setPositions(inpcrd.positions)
print(simulation.context.getState(getEnergy=True).getPotentialEnergy())
simulation.minimizeEnergy(maxIterations=1000000)
print(simulation.context.getState(getEnergy=True).getPotentialEnergy())
simulation.reporters.append(
simtk.openmm.app.PDBReporter(sim_output, int(sim_steps / 10))
)
simulation.reporters.append(
simtk.openmm.app.StateDataReporter(
stdout,
int(sim_steps / 10),
step=True,
potentialEnergy=True,
temperature=True,
)
)
simulation.step(sim_steps)
def run_openmm_prmtop_pdb(
pdbfile="system_qmmmrebind.pdb",
prmtopfile="system_qmmmrebind.prmtop",
sim_output="output.pdb",
sim_steps=10000,
):
"""
Runs OpenMM simulation with pdb and prmtop files.
Parameters
----------
pdbfile: str
Input PDB file.
prmtopfile: str
Input prmtop file.
sim_output: str
Output trajectory file.
sim_steps: int
Simulation steps.
"""
prmtop = simtk.openmm.app.AmberPrmtopFile(prmtopfile)
pdb = simtk.openmm.app.PDBFile(pdbfile)
system = prmtop.createSystem(
nonbondedCutoff=1 * simtk.unit.nanometer,
constraints=simtk.openmm.app.HBonds,
)
integrator = simtk.openmm.LangevinIntegrator(
300 * simtk.unit.kelvin,
1 / simtk.unit.picosecond,
0.002 * simtk.unit.picoseconds,
)
simulation = simtk.openmm.app.Simulation(
prmtop.topology, system, integrator
)
simulation.context.setPositions(pdb.positions)
print(simulation.context.getState(getEnergy=True).getPotentialEnergy())
simulation.minimizeEnergy(maxIterations=1000000)
print(simulation.context.getState(getEnergy=True).getPotentialEnergy())
simulation.reporters.append(
simtk.openmm.app.PDBReporter(sim_output, int(sim_steps / 10))
)
simulation.reporters.append(
simtk.openmm.app.StateDataReporter(
stdout,
int(sim_steps / 10),
step=True,
potentialEnergy=True,
temperature=True,
)
)
simulation.step(sim_steps)
def move_qmmmmrebind_files(
prmtopfile="system_qmmmrebind.prmtop",
inpcrdfile="system_qmmmrebind.inpcrd",
pdbfile="system_qmmmrebind.pdb",
):
"""
Moves QMMMReBind generated topology and parameter files
to a new directory .
Parameters
----------
prmtopfile: str
QMMMReBind generated prmtop file.
inpcrdfile: str
QMMMReBind generated inpcrd file.
pdbfile: str
QMMMReBind generated PDB file.
"""
current_pwd = os.getcwd()
command = "rm -rf reparameterized_files"
os.system(command)
command = "mkdir reparameterized_files"
os.system(command)
shutil.copy(
current_pwd + "/" + prmtopfile,
current_pwd + "/" + "reparameterized_files" + "/" + prmtopfile,
)
shutil.copy(
current_pwd + "/" + inpcrdfile,
current_pwd + "/" + "reparameterized_files" + "/" + inpcrdfile,
)
shutil.copy(
current_pwd + "/" + pdbfile,
current_pwd + "/" + "reparameterized_files" + "/" + pdbfile,
)
def move_qm_files():
"""
Moves QM engine generated files to a new directory .
"""
current_pwd = os.getcwd()
command = "rm -rf qm_data"
os.system(command)
command = "mkdir qm_data"
os.system(command)
command = "cp -r " + "*.com* " + current_pwd + "/" + "qm_data"
os.system(command)
command = "cp -r " + "*.log* " + current_pwd + "/" + "qm_data"
os.system(command)
command = "cp -r " + "*.chk* " + current_pwd + "/" + "qm_data"
os.system(command)
command = "cp -r " + "*.fchk* " + current_pwd + "/" + "qm_data"
os.system(command)
def move_qmmmrebind_files():
"""
Moves all QMMMREBind files to a new directory.
"""
current_pwd = os.getcwd()
command = "rm -rf qmmmrebind_data"
os.system(command)
command = "mkdir qmmmrebind_data"
os.system(command)
command = "mv " + "*.sdf* " + current_pwd + "/" + "qmmmrebind_data"
os.system(command)
command = "mv " + "*.txt* " + current_pwd + "/" + "qmmmrebind_data"
os.system(command)
command = "mv " + "*.pdb* " + current_pwd + "/" + "qmmmrebind_data"
os.system(command)
command = "mv " + "*.xml* " + current_pwd + "/" + "qmmmrebind_data"
os.system(command)
command = "mv " + "*.chk* " + current_pwd + "/" + "qmmmrebind_data"
os.system(command)
command = "mv " + "*.fchk* " + current_pwd + "/" + "qmmmrebind_data"
os.system(command)
command = "mv " + "*.com* " + current_pwd + "/" + "qmmmrebind_data"
os.system(command)
command = "mv " + "*.log* " + current_pwd + "/" + "qmmmrebind_data"
os.system(command)
command = "mv " + "*.inpcrd* " + current_pwd + "/" + "qmmmrebind_data"
os.system(command)
command = "mv " + "*.prmtop* " + current_pwd + "/" + "qmmmrebind_data"
os.system(command)
command = "mv " + "*.parm7* " + current_pwd + "/" + "qmmmrebind_data"
os.system(command)
command = "mv " + "*.out* " + current_pwd + "/" + "qmmmrebind_data"
os.system(command)
command = "mv " + "*run_command* " + current_pwd + "/" + "qmmmrebind_data"
os.system(command)
command = "mv " + "*.dat* " + current_pwd + "/" + "qmmmrebind_data"
os.system(command)
command = "mv " + "*.xyz* " + current_pwd + "/" + "qmmmrebind_data"
os.system(command)
class PrepareQMMM:
"""
A class used to segregate the QM and MM regions.
This class contain methods to remove the solvent, ions and all
entities that are exclusive of receptor and the ligand. It also
defines the Quantum Mechanical (QM) region and the Molecular
Mechanical (MM) region based upon the distance of the ligand
from the receptor and the chosen number of receptor residues. It
is also assumed that the initial PDB file will have the receptor
followed by the ligand.
...
Attributes
----------
init_pdb : str
Initial PDB file containing the receptor-ligand complex with
solvent, ions, etc.
cleaned_pdb : str
Formatted PDB file containing only the receptor and the ligand.
guest_init_pdb : str
A separate ligand PDB file with atom numbers not beginning from 1.
host_pdb : str
A separate receptor PDB file with atom numbers beginning from 1.
guest_resname : str
Three letter residue ID for the ligand.
guest_pdb : str, optional
Ligand PDB file with atom numbers beginning from 1.
guest_xyz : str, optional
A text file of the XYZ coordinates of the ligand.
distance : float, optional
The distance required to define the QM region of the receptor.
This is the distance between the atoms of the ligand and the
atoms of the receptor.
residue_list : str, optional
A text file of the residue numbers of the receptor within the
proximity (as defined by the distance) from the ligand.
host_qm_atoms : str, optional
A text file of the atom numbers of the receptors in the QM
region.
host_mm_atoms : str, optional
A text file of the atom numbers of the receptors in the MM
region (all atoms except atoms in the QM region)
host_qm_pdb : str, optional
PDB file for the receptor's QM region.
host_mm_pdb : str, optional
PDB file for the receptor's MM region.
qm_pdb : str, optional
PDB file for the QM region (receptor's QM region and the
ligand).
mm_pdb : str, optional
PDB file for the MM region.
host_mm_region_I_atoms : str, optional
A text file of the atom numbers of the receptors in the MM
region preceeding the QM region.
host_mm_region_II_atoms : str, optional
A text file of the atom numbers of the receptors in the MM
region following the QM region.
host_mm_region_I_pdb : str, optional
PDB file of the receptor in the MM region preceeding the
QM region.
host_mm_region_II_pdb : str, optional
PDB file of the receptor in the MM region following the
QM region.
num_residues : int, optional
Number of residues required in the QM region of the receptor.
"""
def __init__(
self,
init_pdb,
distance,
num_residues,
guest_resname,
cleaned_pdb="system.pdb",
guest_init_pdb="guest_init.pdb",
host_pdb="host.pdb",
guest_pdb="guest_init_II.pdb",
guest_xyz="guest_coord.txt",
residue_list="residue_list.txt",
host_qm_atoms="host_qm.txt",
host_mm_atoms="host_mm.txt",
host_qm_pdb="host_qm.pdb",
host_mm_pdb="host_mm.pdb",
qm_pdb="qm.pdb",
mm_pdb="mm.pdb",
host_mm_region_I_atoms="host_mm_region_I.txt",
host_mm_region_II_atoms="host_mm_region_II.txt",
host_mm_region_I_pdb="host_mm_region_I.pdb",
host_mm_region_II_pdb="host_mm_region_II.pdb",
):
self.init_pdb = init_pdb
self.distance = distance
self.num_residues = num_residues
self.guest_resname = guest_resname
self.cleaned_pdb = cleaned_pdb
self.guest_init_pdb = guest_init_pdb
self.host_pdb = host_pdb
self.guest_pdb = guest_pdb
self.guest_xyz = guest_xyz
self.residue_list = residue_list
self.host_qm_atoms = host_qm_atoms
self.host_mm_atoms = host_mm_atoms
self.host_qm_pdb = host_qm_pdb
self.host_mm_pdb = host_mm_pdb
self.qm_pdb = qm_pdb
self.mm_pdb = mm_pdb
self.host_mm_region_I_atoms = host_mm_region_I_atoms
self.host_mm_region_II_atoms = host_mm_region_II_atoms
self.host_mm_region_I_pdb = host_mm_region_I_pdb
self.host_mm_region_II_pdb = host_mm_region_II_pdb
def clean_up(self):
"""
Reads the given PDB file, removes all entities except the
receptor and ligand and saves a new pdb file.
"""
ions = [
"Na+",
"Cs+",
"K+",
"Li+",
"Rb+",
"Cl-",
"Br-",
"F-",
"I-",
"Ca2",
]
intermediate_file_1 = self.cleaned_pdb[:-4] + "_intermediate_1.pdb"
intermediate_file_2 = self.cleaned_pdb[:-4] + "_intermediate_2.pdb"
command = (
"pdb4amber -i "
+ self.init_pdb
+ " -o "
+ intermediate_file_1
+ " --noter --dry"
)
os.system(command)
to_delete = (
intermediate_file_1[:-4] + "_nonprot.pdb",
intermediate_file_1[:-4] + "_renum.txt",
intermediate_file_1[:-4] + "_sslink",
intermediate_file_1[:-4] + "_water.pdb",
)
os.system("rm -rf " + " ".join(to_delete))
with open(intermediate_file_1) as f1, open(
intermediate_file_2, "w") as f2:
for line in f1:
if not any(ion in line for ion in ions):
f2.write(line)
with open(intermediate_file_2, "r") as f1:
filedata = f1.read()
filedata = filedata.replace("HETATM", "ATOM ")
with open(self.cleaned_pdb, "w") as f2:
f2.write(filedata)
command = "rm -rf " + intermediate_file_1 + " " + intermediate_file_2
os.system(command)
def create_host_guest(self):
"""
Saves separate receptor and ligand PDB files.
"""
with open(self.cleaned_pdb) as f1, open(self.host_pdb, "w") as f2:
for line in f1:
if not self.guest_resname in line and not "CRYST1" in line:
f2.write(line)
with open(self.cleaned_pdb) as f1, open(
self.guest_init_pdb, "w"
) as f2:
for line in f1:
if self.guest_resname in line or "END" in line:
f2.write(line)
def realign_guest(self):
"""
Saves a ligand PDB file with atom numbers beginning from 1.
"""
ppdb = PandasPdb()
ppdb.read_pdb(self.guest_init_pdb)
to_subtract = min(ppdb.df["ATOM"]["atom_number"]) - 1
ppdb.df["ATOM"]["atom_number"] = (
ppdb.df["ATOM"]["atom_number"] - to_subtract
)
intermediate_file_1 = self.guest_pdb[:-4] + "_intermediate_1.pdb"
intermediate_file_2 = self.guest_pdb[:-4] + "_intermediate_2.pdb"
ppdb.to_pdb(path=intermediate_file_1)
command = (
"pdb4amber -i "
+ intermediate_file_1
+ " -o "
+ intermediate_file_2
)
os.system(command)
to_delete = (
intermediate_file_2[:-4] + "_nonprot.pdb",
intermediate_file_2[:-4] + "_renum.txt",
intermediate_file_2[:-4] + "_sslink",
)
os.system("rm -rf " + " ".join(to_delete))
with open(intermediate_file_2, "r") as f1:
filedata = f1.read()
filedata = filedata.replace("HETATM", "ATOM ")
with open(self.guest_pdb, "w") as f2:
f2.write(filedata)
command = "rm -rf " + intermediate_file_1 + " " + intermediate_file_2
os.system(command)
def get_guest_coord(self):
"""
Saves a text file of the XYZ coordinates of the ligand.
"""
ppdb = PandasPdb()
ppdb.read_pdb(self.guest_pdb)
xyz = ppdb.df["ATOM"][["x_coord", "y_coord", "z_coord"]]
xyz_to_list = xyz.values.tolist()
np.savetxt(self.guest_xyz, xyz_to_list)
def get_qm_resids(self):
"""
Saves a text file of the residue numbers of the receptor within the
proximity (as defined by the distance) from the ligand.
"""
guest_coord_list = np.loadtxt(self.guest_xyz)
host_atom_list = []
for i in range(len(guest_coord_list)):
reference_point = guest_coord_list[i]
# TODO: move reads outside of loop
ppdb = PandasPdb()
ppdb.read_pdb(self.host_pdb)
distances = ppdb.distance(xyz=reference_point, records=("ATOM"))
all_within_distance = ppdb.df["ATOM"][
distances < float(self.distance)
]
host_df = all_within_distance["atom_number"]
host_list = host_df.values.tolist()
host_atom_list.append(host_list)
host_atom_list = list(itertools.chain(*host_atom_list))
host_atom_list = set(host_atom_list)
host_atom_list = list(host_atom_list)
host_atom_list.sort()
ppdb = PandasPdb()
ppdb.read_pdb(self.host_pdb)
df = ppdb.df["ATOM"][["atom_number", "residue_number", "residue_name"]]
index_list = []
for i in host_atom_list:
indices = np.where(df["atom_number"] == i)
indices = list(indices)[0]
indices = list(indices)
index_list.append(indices)
index_list = list(itertools.chain.from_iterable(index_list))
df1 = df.iloc[
index_list,
]
# TODO: make it write list of integers
resid_num = list(df1.residue_number.unique())
np.savetxt(self.residue_list, resid_num, fmt="%i")
def get_host_qm_mm_atoms(self):
"""
Saves a text file of the atom numbers of the receptors in the QM
region and MM region separately.
"""
resid_num = np.loadtxt(self.residue_list)
# approximated_res_list = [int(i) for i in resid_num]
approximated_res_list = []
# TODO: what is this doing?
for i in range(
int(statistics.median(resid_num))
- int(int(self.num_residues) / 2),
int(statistics.median(resid_num))
+ int(int(self.num_residues) / 2),
):
approximated_res_list.append(i)
ppdb = PandasPdb()
ppdb.read_pdb(self.host_pdb)
df = ppdb.df["ATOM"][["atom_number", "residue_number", "residue_name"]]
host_index_nested_list = []
for i in approximated_res_list:
indices = np.where(df["residue_number"] == i)
#TODO: the program seems to error when this line is removed, which
# makes no sense.
indices = list(indices)[0]
indices = list(indices)
host_index_nested_list.append(indices)
host_index_list = list(
itertools.chain.from_iterable(host_index_nested_list)
)
df_atom = df.iloc[host_index_list]
df_atom_number = df_atom["atom_number"]
host_atom_list = df_atom_number.values.tolist()
selected_atoms = []
selected_atoms.extend(host_atom_list)
ppdb = PandasPdb()
ppdb.read_pdb(self.host_pdb)
len_atoms = []
for i in range(len(ppdb.df["ATOM"])):
len_atoms.append(i + 1)
non_selected_atoms = list(set(len_atoms).difference(selected_atoms))
assert len(non_selected_atoms) + len(selected_atoms) == len(len_atoms),\
"Sum of the atoms in the selected and non-selected region "\
"does not equal the length of list of total atoms."
np.savetxt(self.host_qm_atoms, selected_atoms, fmt="%i")
np.savetxt(self.host_mm_atoms, non_selected_atoms, fmt="%i")
def save_host_pdbs(self):
"""
Saves a PDB file for the receptor's QM region and MM
region separately.
"""
selected_atoms = np.loadtxt(self.host_qm_atoms)
# TODO: not necessary if savetxt writes in integers
selected_atoms = [int(i) for i in selected_atoms]
ppdb = PandasPdb()
ppdb.read_pdb(self.host_pdb)
for i in selected_atoms:
ppdb.df["ATOM"] = ppdb.df["ATOM"][
ppdb.df["ATOM"]["atom_number"] != i
]
ppdb.to_pdb(
path=self.host_mm_pdb, records=None, gz=False, append_newline=True,
)
non_selected_atoms = np.loadtxt(self.host_mm_atoms)
non_selected_atoms = [int(i) for i in non_selected_atoms]
ppdb = PandasPdb()
ppdb.read_pdb(self.host_pdb)
for i in non_selected_atoms:
ppdb.df["ATOM"] = ppdb.df["ATOM"][
ppdb.df["ATOM"]["atom_number"] != i
]
ppdb.to_pdb(
path=self.host_qm_pdb, records=None, gz=False, append_newline=True,
)
def get_host_mm_region_atoms(self):
"""
Saves a text file for the atoms of the receptor's MM region
preceding the QM region and saves another text file for the
atoms of the receptor's MM region folllowing the QM region.
"""
resid_num = np.loadtxt(self.residue_list)
approximated_res_list = []
for i in range(
int(statistics.median(resid_num))
- int(int(self.num_residues) / 2),
int(statistics.median(resid_num))
+ int(int(self.num_residues) / 2),
):
approximated_res_list.append(i)
# print(approximated_res_list)
ppdb = PandasPdb()
ppdb.read_pdb(self.host_pdb)
df = ppdb.df["ATOM"][["residue_number"]]
res_list = list(set(df["residue_number"].to_list()))
res_mm_list = list(set(res_list).difference(approximated_res_list))
# print(res_mm_list)
res_mm_region_I_list = []
# TODO: This can probably be made into a single loop by comparing i
# to the maximum value within approximated_res_list
for i in res_mm_list:
for j in approximated_res_list:
if i < j:
res_mm_region_I_list.append(i)
res_mm_region_I_list = list(set(res_mm_region_I_list))
res_mm_region_II_list = list(
set(res_mm_list).difference(res_mm_region_I_list)
)
# print(res_mm_region_II_list)
ppdb.read_pdb(self.host_mm_pdb)
df = ppdb.df["ATOM"][["atom_number", "residue_number", "residue_name"]]
mm_region_I_index_nested_list = []
for i in res_mm_region_I_list:
indices = np.where(df["residue_number"] == i)
# TODO: again, this is strange code
indices = list(indices)[0]
indices = list(indices)
mm_region_I_index_nested_list.append(indices)
mm_region_I_index_list = list(
itertools.chain.from_iterable(mm_region_I_index_nested_list)
)
df_atom = df.iloc[mm_region_I_index_list]
df_atom_number = df_atom["atom_number"]
mm_region_I_atom_list = df_atom_number.values.tolist()
mm_region_I_atoms = []
mm_region_I_atoms.extend(mm_region_I_atom_list)
mm_region_II_index_nested_list = []
for i in res_mm_region_II_list:
indices = np.where(df["residue_number"] == i)
# TODO: again, this is strange code
indices = list(indices)[0]
indices = list(indices)
mm_region_II_index_nested_list.append(indices)
mm_region_II_index_list = list(
itertools.chain.from_iterable(mm_region_II_index_nested_list)
)
df_atom = df.iloc[mm_region_II_index_list]
df_atom_number = df_atom["atom_number"]
mm_region_II_atom_list = df_atom_number.values.tolist()
mm_region_II_atoms = []
mm_region_II_atoms.extend(mm_region_II_atom_list)
ppdb.read_pdb(self.host_mm_pdb)
len_atoms = []
for i in range(len(ppdb.df["ATOM"])):
len_atoms.append(i + 1)
assert len(mm_region_I_atoms) + len(mm_region_II_atoms) == len(len_atoms),\
"Sum of the atoms in the selected and non-selected region "\
"does not equal the length of list of total atoms."
np.savetxt(self.host_mm_region_I_atoms, mm_region_I_atoms, fmt="%i")
np.savetxt(self.host_mm_region_II_atoms, mm_region_II_atoms, fmt="%i")
def save_host_mm_regions_pdbs(self):
"""
Saves a PDB file for the receptor's MM region preceding
the QM region and saves another PDB file for the receptor's
MM region folllowing the QM region.
"""
mm_region_I_atoms = np.loadtxt(self.host_mm_region_I_atoms)
mm_region_I_atoms = [int(i) for i in mm_region_I_atoms]
mm_region_II_atoms = np.loadtxt(self.host_mm_region_II_atoms)
mm_region_II_atoms = [int(i) for i in mm_region_II_atoms]
# NOTE: this is a slightly confusing way to define the atoms to
# write to a PDB - the members that are *not* in a section, rather
# than the members that are.
ppdb = PandasPdb()
ppdb.read_pdb(self.host_mm_pdb)
for i in mm_region_II_atoms:
ppdb.df["ATOM"] = ppdb.df["ATOM"][
ppdb.df["ATOM"]["atom_number"] != i
]
ppdb.to_pdb(
path=self.host_mm_region_I_pdb,
records=None,
gz=False,
append_newline=True,
)
ppdb = PandasPdb()
ppdb.read_pdb(self.host_mm_pdb)
for i in mm_region_I_atoms:
ppdb.df["ATOM"] = ppdb.df["ATOM"][
ppdb.df["ATOM"]["atom_number"] != i
]
ppdb.to_pdb(
path=self.host_mm_region_II_pdb,
records=None,
gz=False,
append_newline=True,
)
def get_qm_mm_regions(self):
"""
Saves separate PDB files for the QM and MM regions.
QM regions comprise the QM region of the receptor
and the entire ligand where the MM region comprise
the non-selected QM regions of the receptor.
"""
with open(self.host_qm_pdb) as f1, open(self.qm_pdb, "w") as f2:
for line in f1:
if "ATOM" in line:
f2.write(line)
with open(self.guest_pdb) as f1, open(self.qm_pdb, "a") as f2:
for line in f1:
if "ATOM" in line:
f2.write(line)
f2.write("END")
with open(self.host_mm_pdb) as f1, open(self.mm_pdb, "w") as f2:
for line in f1:
if "ATOM" in line:
f2.write(line)
f2.write("END")
class PrepareGaussianGuest:
"""
A class used to prepare the QM engine input file (Gaussian)
for the ligand and run QM calculations with appropriate
keywords.
This class contain methods to write an input file (.com extension)
for the QM engine. It then runs a QM calculation with the given
basis set and functional. Checkpoint file is then converted to
a formatted checkpoint file. Output files (.log, .chk, and .fhck)
will then be used to extract ligand's force field parameters.
...
Attributes
----------
charge : int, optional
Charge of the ligand.
multiplicity: int, optional
Spin Multiplicity (2S+1) of the ligand where S represents
the total spin of the ligand.
guest_pdb: str, optional
Ligand PDB file with atom numbers beginning from 1.
n_processors : int, optional
Number of processors to be used for Gaussian program to run and
set in %NProcShared command of Gaussian.
memory : int, optional
Memory (in GB) to be used set in %Mem command of Gaussian.
functional: str, optional
Exchange/Correlation or hybrid functional to use in the Gaussian
QM calculation.
basis_set: str, optional
Basis set to use for the Gaussian QM calculation.
optimisation: str, optional
set to "OPT" to perform a geometry optimization on the ligand
specified in the system; else set to an empty string.
frequency: str, optional
set to "FREQ" for Gaussian to perform a frequency calculation;
else set to an empty string.
add_keywords_I: str, optional
Specifies the integration grid.
add_keywords_II: str, optional
Specifies the QM engine to select one of the methods for
analyzing the electron density of the system. Methods used
are based on fitting the molecular electrostatic potential.
Methods used are : POP=CHELPG (Charges from Electrostatic
Potentials using a Grid based method) and POP=MK
(Merz-Singh-Kollman scheme)
add_keywords_III: str, optional
Used to include the IOp keyword (to set the internal options to
specific values) in the Gaussian command.
gauss_out_file: str, optional
This file contains the output script obtained after running
the Gaussian QM calculation.
fchk_out_file: str, optional
Formatted checkpoint file obtained from the checkpoint file
using formchk command.
"""
def __init__(
self,
charge=0,
multiplicity=1,
guest_pdb="guest_init_II.pdb",
n_processors=12,
memory=50,
functional="B3LYP",
basis_set="6-31G",
optimisation="OPT",
frequency="FREQ",
add_keywords_I="INTEGRAL=(GRID=ULTRAFINE)",
add_keywords_II="POP(MK,READRADII)",
add_keywords_III="IOP(6/33=2,6/42=6)",
gauss_out_file="guest.out",
fchk_out_file="guest_fchk.out",
):
self.charge = charge
self.multiplicity = multiplicity
self.guest_pdb = guest_pdb
self.n_processors = n_processors
self.memory = memory
self.functional = functional
self.basis_set = basis_set
self.optimisation = optimisation
self.frequency = frequency
self.gauss_out_file = gauss_out_file
self.fchk_out_file = fchk_out_file
self.add_keywords_I = add_keywords_I
self.add_keywords_II = add_keywords_II
self.add_keywords_III = add_keywords_III
def write_input(self):
"""
Writes a Gaussian input file for the ligand.
"""
command_line_1 = "%Chk = " + self.guest_pdb[:-4] + ".chk"
command_line_2 = "%Mem = " + str(self.memory) + "GB"
command_line_3 = "%NProcShared = " + str(self.n_processors)
command_line_4 = (
"# "
+ self.functional
+ " "
+ self.basis_set
+ " "
+ self.optimisation
+ " "
+ self.frequency
+ " "
+ self.add_keywords_I
+ " "
+ self.add_keywords_II
+ " "
+ self.add_keywords_III
)
command_line_5 = " "
command_line_6 = self.guest_pdb[:-4] + " " + "gaussian input file"
command_line_7 = " "
command_line_8 = str(self.charge) + " " + str(self.multiplicity)
ppdb = PandasPdb()
ppdb.read_pdb(self.guest_pdb)
df = ppdb.df["ATOM"]
df_1 = ppdb.df["ATOM"]["element_symbol"]
df_1.columns = ["atom"]
df_2 = df[["x_coord", "y_coord", "z_coord"]]
df_merged = pd.concat([df_1, df_2], axis=1)
command_line_9 = df_merged.to_string(header=False, index=False)
command_line_10 = " "
command = [
command_line_1,
command_line_2,
command_line_3,
command_line_4,
command_line_5,
command_line_6,
command_line_7,
command_line_8,
command_line_9,
command_line_10,
]
commands = "\n".join(command)
with open(self.guest_pdb[:-4] + ".com", "w") as f:
f.write(commands)
def run_gaussian(self):
"""
Runs the Gaussian QM calculation for the ligand locally.
"""
execute_command = (
"g16"
+ " < "
+ self.guest_pdb[:-4]
+ ".com"
+ " > "
+ self.guest_pdb[:-4]
+ ".log"
)
with open(self.gauss_out_file, "w+") as f:
sp.run(
execute_command, shell=True, stdout=f, stderr=sp.STDOUT,
)
def get_fchk(self):
"""
Converts the Gaussian checkpoint file (.chk) to a formatted checkpoint
file (.fchk).
"""
execute_command = (
"formchk"
+ " "
+ self.guest_pdb[:-4]
+ ".chk"
+ " "
+ self.guest_pdb[:-4]
+ ".fchk"
)
with open(self.fchk_out_file, "w+") as f:
sp.run(
execute_command, shell=True, stdout=f, stderr=sp.STDOUT,
)
class PrepareGaussianHostGuest:
"""
A class used to prepare the QM engine input file (Gaussian) for
the receptor - ligand complex and run the QM calculations with
the appropriate keywords.
This class contain methods to write an input file (.com extension)
for the QM engine for the receptor - ligand complex. It then runs
a QM calculation with the given basis set and functional. Checkpoint
file is then converted to a formatted checkpoint file. Output files
(.log, .chk, and .fhck) will then be used to extract charges for the
ligand and the receptor.
...
Attributes
----------
charge : int, optional
Total charge of the receptor - ligand complex.
multiplicity : int, optional
Spin Multiplicity (2S+1) of the ligand where S represents
the total spin of the ligand.
guest_pdb : str, optional
Ligand PDB file with atom numbers beginning from 1.
host_qm_pdb : str, optional
PDB file for the receptor's QM region.
n_processors : int, optional
Number of processors to be used for Gaussian program to run and
set in %NProcShared command of Gaussian.
memory : int, optional
Memory (in GB) to be used set in %Mem command of Gaussian.
functional: str, optional
Exchange/Correlation or hybrid functional to use in the Gaussian
QM calculation.
basis_set: str, optional
Basis set to use for the Gaussian QM calculation.
optimisation: str, optional
set to "OPT" to perform a geometry optimization on the ligand
specified in the system; else set to an empty string.
frequency: str, optional
set to "FREQ" for Gaussian to perform a frequency calculation;
else set to an empty string.
add_keywords_I: str, optional
Specifies the integration grid.
add_keywords_II: str, optional
Specifies the QM engine to select one of the methods for
analyzing the electron density of the system. Methods used
are based on fitting the molecular electrostatic potential.
Methods used are : POP=CHELPG (Charges from Electrostatic
Potentials using a Grid based method) and POP=MK
(Merz-Singh-Kollman scheme)
add_keywords_III: str, optional
Used to include the IOp keyword (to set the internal options to
specific values) in the Gaussian command.
gauss_system_out_file : str, optional
This file contains the output script obtained after running
the Gaussian QM calculation.
fchk_system_out_file : str, optional
Formatted checkpoint file obtained from the checkpoint file
using formchk command.
host_guest_input : str, optional
Gaussian input file (.com extension) for the receptor - ligand
QM region.
qm_guest_charge_parameter_file : str, optional
File containing the charges of ligand atoms and their corresponding
atoms. Charge obtained are the polarised charged due to the
surrounding receptor's region.
qm_host_charge_parameter_file : str, optional
File containing the charges of the QM region of the receptor.
qm_guest_atom_charge_parameter_file : str, optional
File containing the charges of ligand atoms. Charge obtained
are the polarised charged due to the surrounding receptor's region.
"""
def __init__(
self,
charge=0,
multiplicity=1,
guest_pdb="guest_init_II.pdb",
host_qm_pdb="host_qm.pdb",
n_processors=12,
memory=50,
functional="B3LYP",
basis_set="6-31G",
optimisation="",
frequency="",
add_keywords_I="INTEGRAL=(GRID=ULTRAFINE)",
add_keywords_II="POP(MK,READRADII)",
add_keywords_III="IOP(6/33=2,6/42=6) SCRF=PCM",
gauss_system_out_file="system_qm.out",
fchk_system_out_file="system_qm_fchk.out",
host_guest_input="host_guest.com",
qm_guest_charge_parameter_file="guest_qm_surround_charges.txt",
qm_host_charge_parameter_file="host_qm_surround_charges.txt",
qm_guest_atom_charge_parameter_file="guest_qm_atom_surround_charges.txt",
):
self.charge = charge
self.multiplicity = multiplicity
self.guest_pdb = guest_pdb
self.host_qm_pdb = host_qm_pdb
self.n_processors = n_processors
self.memory = memory
self.functional = functional
self.basis_set = basis_set
self.optimisation = optimisation
self.frequency = frequency
self.add_keywords_I = add_keywords_I
self.add_keywords_II = add_keywords_II
self.add_keywords_III = add_keywords_III
self.gauss_system_out_file = gauss_system_out_file
self.fchk_system_out_file = fchk_system_out_file
self.host_guest_input = host_guest_input
self.qm_guest_charge_parameter_file = qm_guest_charge_parameter_file
self.qm_host_charge_parameter_file = qm_host_charge_parameter_file
self.qm_guest_atom_charge_parameter_file = (
qm_guest_atom_charge_parameter_file
)
def write_input(self):
"""
Writes a Gaussian input file for the receptor - ligand QM region.
"""
command_line_1 = "%Chk = " + self.host_guest_input[:-4] + ".chk"
command_line_2 = "%Mem = " + str(self.memory) + "GB"
command_line_3 = "%NProcShared = " + str(self.n_processors)
command_line_4 = (
"# "
+ self.functional
+ " "
+ self.basis_set
+ " "
+ self.optimisation
+ " "
+ self.frequency
+ " "
+ self.add_keywords_I
+ " "
+ self.add_keywords_II
+ " "
+ self.add_keywords_III
)
command_line_5 = " "
command_line_6 = "Gaussian Input File"
command_line_7 = " "
command_line_8 = str(self.charge) + " " + str(self.multiplicity)
ppdb = PandasPdb()
ppdb.read_pdb(self.guest_pdb)
df = ppdb.df["ATOM"]
df_1 = ppdb.df["ATOM"]["element_symbol"]
df_1.columns = ["atom"]
df_3 = df[["x_coord", "y_coord", "z_coord"]]
df_2 = pd.Series(["0"] * len(df), name="decide_freeze")
df_merged_1 = pd.concat([df_1, df_2, df_3], axis=1)
ppdb = PandasPdb()
ppdb.read_pdb(self.host_qm_pdb)
df = ppdb.df["ATOM"]
df_1 = ppdb.df["ATOM"]["element_symbol"]
df_1.columns = ["atom"]
df_3 = df[["x_coord", "y_coord", "z_coord"]]
df_2 = pd.Series(["0"] * len(df), name="decide_freeze")
df_merged_2 = pd.concat([df_1, df_2, df_3], axis=1)
df_merged = pd.concat([df_merged_1, df_merged_2], axis=0)
command_line_9 = df_merged.to_string(header=False, index=False)
command_line_10 = " "
command = [
command_line_1,
command_line_2,
command_line_3,
command_line_4,
command_line_5,
command_line_6,
command_line_7,
command_line_8,
command_line_9,
command_line_10,
]
commands = "\n".join(command)
with open(self.host_guest_input, "w") as f:
f.write(commands)
def run_gaussian(self):
"""
Runs the Gaussian QM calculation for the ligand - receptor region
locally.
"""
execute_command = (
"g16"
+ " < "
+ self.host_guest_input
+ " > "
+ self.host_guest_input[:-4]
+ ".log"
)
with open(self.gauss_system_out_file, "w+") as f:
sp.run(
execute_command, shell=True, stdout=f, stderr=sp.STDOUT,
)
def get_fchk(self):
"""
Converts the Gaussian checkpoint file (.chk) to a formatted checkpoint
file (.fchk).
"""
execute_command = (
"formchk"
+ " "
+ self.host_guest_input[:-4]
+ ".chk"
+ " "
+ self.host_guest_input[:-4]
+ ".fchk"
)
with open(self.fchk_system_out_file, "w+") as f:
sp.run(
execute_command, shell=True, stdout=f, stderr=sp.STDOUT,
)
def get_qm_host_guest_charges(self):
"""
Extract charge information for the receptor - ligand QM region.
"""
log_file = self.host_guest_input[:-4] + ".log"
with open(log_file, "r") as f:
lines = f.readlines()
for i in range(len(lines)):
if "Fitting point charges to electrostatic potential" in lines[i]:
to_begin = int(i)
if " Sum of ESP charges =" in lines[i]:
to_end = int(i)
# Why + 4?
charges = lines[to_begin + 4 : to_end]
charge_list = []
for i in range(len(charges)):
charge_list.append(charges[i].strip().split())
charge_list_value = []
atom_list = []
for i in range(len(charge_list)):
charge_list_value.append(charge_list[i][2])
atom_list.append(charge_list[i][1])
ppdb = PandasPdb()
ppdb.read_pdb(self.guest_pdb)
df_guest = ppdb.df["ATOM"]
number_guest_atoms = df_guest.shape[0]
data_tuples = list(zip(atom_list, charge_list_value))
df_charge = pd.DataFrame(data_tuples, columns=["Atom", "Charge"])
number_host_atoms = df_charge.shape[0] - number_guest_atoms
df_charge_guest = df_charge.head(number_guest_atoms)
df_charge_host = df_charge.tail(number_host_atoms)
df_charge_only_guest = df_charge_guest["Charge"]
df_charge_guest.to_csv(
self.qm_guest_charge_parameter_file,
index=False,
header=False,
sep=" ",
)
df_charge_host.to_csv(
self.qm_host_charge_parameter_file,
index=False,
header=False,
sep=" ",
)
df_charge_only_guest.to_csv(
self.qm_guest_atom_charge_parameter_file,
index=False,
header=False,
sep=" ",
)
class ParameterizeGuest:
"""
A class used to obtain force field parameters for the ligand (bond,
angle and charge parameters) from QM calculations.
This class contain methods to process the output files of the
Gaussian QM output files (.chk, .fchk and .log files). Methods
in the class extract the unprocessed hessian matrix from the
Gaussian QM calculations, processes it and uses the Modified
Seminario Method to ontain the bond and angle parameters. The
class also extracts the QM charges from the log file.
...
Attributes
----------
xyz_file: str, optional
XYZ file for ligand coordinates obtained from its corresponding
formatted checkpoint file.
coordinate_file: str, optional
Text file containing the ligand coordinates (extracted
from the formatted checkpoint file).
unprocessed_hessian_file: str, optional
Unprocessed hessian matrix of the ligand obtained from the
formatted checkpoint file.
bond_list_file: str, optional
Text file containing the bond information of the ligand extracted
from the log file.
angle_list_file: str, optional
Text file containing the angle information of the ligand extracted
from the log file.
hessian_file: str, optional
Processed hessian matrix of the ligand.
atom_names_file: str, optional
Text file containing the list of atom names from the fchk file.
bond_parameter_file: str, optional
Text file containing the bond parameters for the ligand obtained
using the Modified Seminario method.
angle_parameter_file: str, optional
Text file containing the angle parameters of the ligand obtained
using the Modified Seminario method..
charge_parameter_file: str, optional
Text file containing the QM charges of the ligand.
guest_pdb: str, optional
Ligand PDB file with atom numbers beginning from 1.
proper_dihedral_file: str, optional
A text file containing proper dihedral angles of the ligand.
functional: str, optional
Exchange/Correlation or hybrid functional to use in the Gaussian
QM calculation.
basis_set: str, optional
Basis set to use for the Gaussian QM calculation.
"""
def __init__(
self,
xyz_file="guest_coords.xyz",
coordinate_file="guest_coordinates.txt",
unprocessed_hessian_file="guest_unprocessed_hessian.txt",
bond_list_file="guest_bond_list.txt",
angle_list_file="guest_angle_list.txt",
hessian_file="guest_hessian.txt",
atom_names_file="guest_atom_names.txt",
bond_parameter_file="guest_bonds.txt",
angle_parameter_file="guest_angles.txt",
charge_parameter_file="guest_qm_surround_charges.txt",
guest_pdb="guest_init_II.pdb",
proper_dihedral_file="proper_dihedrals.txt",
functional="B3LYP",
basis_set="6-31G",
):
self.xyz_file = xyz_file
self.coordinate_file = coordinate_file
self.unprocessed_hessian_file = unprocessed_hessian_file
self.bond_list_file = bond_list_file
self.angle_list_file = angle_list_file
self.hessian_file = hessian_file
self.atom_names_file = atom_names_file
self.bond_parameter_file = bond_parameter_file
self.angle_parameter_file = angle_parameter_file
self.charge_parameter_file = charge_parameter_file
self.guest_pdb = guest_pdb
self.proper_dihedral_file = proper_dihedral_file
self.functional = functional
self.basis_set = basis_set
def get_xyz(self):
"""
Saves XYZ file from the formatted checkpoint file.
"""
fchk_file = self.guest_pdb[:-4] + ".fchk"
with open(fchk_file, "r") as f:
lines = f.readlines()
for i in range(len(lines)):
if "Current cartesian coordinates" in lines[i]:
no_coordinates = re.findall(r"\d+|\d+.\d+", lines[i])
no_coordinates = int(no_coordinates[0])
to_begin = int(i)
cartesian_coords = lines[
to_begin + 1 : to_begin + 1 + int(math.ceil(no_coordinates / 5))
]
cartesian_list = []
for i in range(len(cartesian_coords)):
cartesian_list.append(cartesian_coords[i].strip().split())
coordinates_list = [
item for sublist in cartesian_list for item in sublist
]
# Converted from Atomic units (Bohrs) to Angstroms
list_coords = [float(x) * BOHRS_PER_ANGSTROM for x in coordinates_list]
for i in range(len(lines)):
if "Atomic numbers" in lines[i]:
to_begin = int(i)
if "Nuclear charges" in lines[i]:
to_end = int(i)
atomic_number_strings = lines[to_begin + 1 : to_end]
atom_numbers_nested = []
for i in range(len(atomic_number_strings)):
atom_numbers_nested.append(atomic_number_strings[i].strip().split())
numbers = [item for sublist in atom_numbers_nested for item in sublist]
N = int(no_coordinates / 3)
# Opens the new xyz file
with open(self.xyz_file, "w") as file:
file.write(str(N) + "\n \n")
coords = np.zeros((N, 3))
n = 0
names = []
# Gives name for atomic number
for x in range(0, len(numbers)):
names.append(element_list[int(numbers[x]) - 1][1])
# Print coordinates to new input_coords.xyz file
for i in range(0, N):
for j in range(0, 3):
coords[i][j] = list_coords[n]
n = n + 1
file.write(
names[i]
+ str(round(coords[i][0], 3))
+ " "
+ str(round(coords[i][1], 3))
+ " "
+ str(round(coords[i][2], 3))
+ "\n"
)
np.savetxt(self.coordinate_file, coords, fmt="%s")
def get_unprocessed_hessian(self):
"""
Saves a text file of the unprocessed hessian matrix from the
formatted checkpoint file.
"""
fchk_file = self.guest_pdb[:-4] + ".fchk"
with open(fchk_file, "r") as f:
lines = f.readlines()
for i in range(len(lines)):
if "Cartesian Force Constants" in lines[i]:
no_hessian = re.findall(r"\d+|\d+.\d+", lines[i])
no_hessian = int(no_hessian[0])
to_begin = int(i)
hessian = lines[
to_begin + 1 : to_begin + 1 + int(math.ceil(no_hessian / 5))
]
hessian_list = []
for i in range(len(hessian)):
hessian_list.append(hessian[i].strip().split())
unprocessed_Hessian = [
item for sublist in hessian_list for item in sublist
]
np.savetxt(
self.unprocessed_hessian_file, unprocessed_Hessian, fmt="%s",
)
def get_bond_angles(self):
"""
Saves a text file containing bonds and angles from the gaussian
log file.
"""
log_file = self.guest_pdb[:-4] + ".log"
with open(log_file, "r") as fid:
tline = fid.readline()
bond_list = []
angle_list = []
tmp = "R" # States if bond or angle
# Finds the bond and angles from the .log file
while tline:
tline = fid.readline()
# Line starts at point when bond and angle list occurs
if (
len(tline) > 80
and tline[0:81].strip()
== "! Name Definition Value Derivative Info. !"
):
tline = fid.readline()
tline = fid.readline()
# Stops when all bond and angles recorded
while (tmp[0] == "R") or (tmp[0] == "A"):
line = tline.split()
tmp = line[1]
# Bond or angles listed as string
list_terms = line[2][2:-1]
# Bond List
if tmp[0] == "R":
x = list_terms.split(",")
# Subtraction due to python array indexing at 0
x = [(int(i) - 1) for i in x]
bond_list.append(x)
# Angle List
if tmp[0] == "A":
x = list_terms.split(",")
# Subtraction due to python array indexing at 0
x = [(int(i) - 1) for i in x]
angle_list.append(x)
tline = fid.readline()
# Leave loop
tline = -1
np.savetxt(self.bond_list_file, bond_list, fmt="%s")
np.savetxt(self.angle_list_file, angle_list, fmt="%s")
def get_hessian(self):
"""
Extracts hessian matrix from the unprocessed hessian matrix
and saves into a new file.
"""
unprocessed_Hessian = np.loadtxt(self.unprocessed_hessian_file)
fchk_file = self.guest_pdb[:-4] + ".fchk"
with open(fchk_file, "r") as f:
lines = f.readlines()
for i in range(len(lines)):
if "Current cartesian coordinates" in lines[i]:
no_coordinates = re.findall(r"\d+|\d+.\d+", lines[i])
no_coordinates = int(no_coordinates[0])
N = int(no_coordinates / 3)
length_hessian = 3 * N
hessian = np.zeros((length_hessian, length_hessian))
m = 0
# Write the hessian in a 2D array format
for i in range(0, length_hessian):
for j in range(0, (i + 1)):
hessian[i][j] = unprocessed_Hessian[m]
hessian[j][i] = unprocessed_Hessian[m]
m = m + 1
hessian = (hessian * HARTREE_PER_KCAL_MOL) / (
BOHRS_PER_ANGSTROM ** 2
) # Change from Hartree/bohr to kcal/mol/ang
np.savetxt(self.hessian_file, hessian, fmt="%s")
def get_atom_names(self):
"""
Saves a list of atom names from the formatted checkpoint file.
"""
fchk_file = self.guest_pdb[:-4] + ".fchk"
with open(fchk_file, "r") as f:
lines = f.readlines()
for i in range(len(lines)):
if "Atomic numbers" in lines[i]:
to_begin = int(i)
if "Nuclear charges" in lines[i]:
to_end = int(i)
atomic_numbers = lines[to_begin + 1 : to_end]
atom_numbers = []
for i in range(len(atomic_numbers)):
atom_numbers.append(atomic_numbers[i].strip().split())
numbers = [item for sublist in atom_numbers for item in sublist]
names = []
# Gives name for atomic number
for x in range(0, len(numbers)):
names.append(element_list[int(numbers[x]) - 1][1])
atom_names = []
for i in range(0, len(names)):
atom_names.append(names[i].strip() + str(i + 1))
np.savetxt(self.atom_names_file, atom_names, fmt="%s")
def get_bond_angle_params(self):
"""
Saves the bond and angle parameter files obtained from
the formatted checkpoint file.
"""
fchk_file = self.guest_pdb[:-4] + ".fchk"
with open(fchk_file, "r") as f:
lines = f.readlines()
for i in range(len(lines)):
if "Current cartesian coordinates" in lines[i]:
no_coordinates = re.findall(r"\d+|\d+.\d+", lines[i])
no_coordinates = int(no_coordinates[0])
N = int(no_coordinates / 3)
coords = np.loadtxt(self.coordinate_file)
hessian = np.loadtxt(self.hessian_file)
bond_list = np.loadtxt(self.bond_list_file, dtype=int)
atom_names = np.loadtxt(self.atom_names_file, dtype=str)
# Find bond lengths
bond_lengths = np.zeros((N, N))
for i in range(0, N):
for j in range(0, N):
diff_i_j = np.array(coords[i, :]) - np.array(coords[j, :])
bond_lengths[i][j] = np.linalg.norm(diff_i_j)
eigenvectors = np.empty((3, 3, N, N), dtype=complex)
eigenvalues = np.empty((N, N, 3), dtype=complex)
partial_hessian = np.zeros((3, 3))
for i in range(0, N):
for j in range(0, N):
partial_hessian = hessian[
(i * 3) : ((i + 1) * 3), (j * 3) : ((j + 1) * 3)
]
[a, b] = np.linalg.eig(partial_hessian)
eigenvalues[i, j, :] = a
eigenvectors[:, :, i, j] = b
# Modified Seminario method to find the bond parameters and
# print them to file
file_bond = open(self.bond_parameter_file, "w")
k_b = np.zeros(len(bond_list))
bond_length_list = np.zeros(len(bond_list))
unique_values_bonds = [] # Used to find average values
for i in range(0, len(bond_list)):
AB = force_constant_bond(
bond_list[i][0],
bond_list[i][1],
eigenvalues,
eigenvectors,
coords,
)
BA = force_constant_bond(
bond_list[i][1],
bond_list[i][0],
eigenvalues,
eigenvectors,
coords,
)
# Order of bonds sometimes causes slight differences,
# find the mean
k_b[i] = np.real((AB + BA) / 2)
# Vibrational_scaling takes into account DFT deficities /
# anharmocity
vibrational_scaling = get_vibrational_scaling(
functional=self.functional, basis_set=self.basis_set
)
vibrational_scaling_squared = vibrational_scaling ** 2
k_b[i] = k_b[i] * vibrational_scaling_squared
bond_length_list[i] = bond_lengths[bond_list[i][0]][
bond_list[i][1]
]
file_bond.write(
atom_names[bond_list[i][0]]
+ "-"
+ atom_names[bond_list[i][1]]
+ " "
)
file_bond.write(
str("%#.5g" % k_b[i])
+ " "
+ str("%#.4g" % bond_length_list[i])
+ " "
+ str(bond_list[i][0] + 1)
+ " "
+ str(bond_list[i][1] + 1)
)
file_bond.write("\n")
unique_values_bonds.append(
[
atom_names[bond_list[i][0]],
atom_names[bond_list[i][1]],
k_b[i],
bond_length_list[i],
1,
]
)
file_bond.close()
angle_list = np.loadtxt(self.angle_list_file, dtype=int)
# Modified Seminario method to find the angle parameters
# and print them to file
file_angle = open(self.angle_parameter_file, "w")
k_theta = np.zeros(len(angle_list))
theta_0 = np.zeros(len(angle_list))
unique_values_angles = [] # Used to find average values
# Modified Seminario part goes here ...
# Connectivity information for Modified Seminario Method
central_atoms_angles = []
# A structure is created with the index giving the central
# atom of the angle,
# an array then lists the angles with that central atom.
# i.e. central_atoms_angles{3} contains an array of angles
# with central atom 3
for i in range(0, len(coords)):
central_atoms_angles.append([])
for j in range(0, len(angle_list)):
if i == angle_list[j][1]:
# For angle ABC, atoms A C are written to array
AC_array = [angle_list[j][0], angle_list[j][2], j]
central_atoms_angles[i].append(AC_array)
# For angle ABC, atoms C A are written to array
CA_array = [angle_list[j][2], angle_list[j][0], j]
central_atoms_angles[i].append(CA_array)
# Sort rows by atom number
for i in range(0, len(coords)):
central_atoms_angles[i] = sorted(
central_atoms_angles[i], key=itemgetter(0)
)
# Find normals u_PA for each angle
unit_PA_all_angles = []
for i in range(0, len(central_atoms_angles)):
unit_PA_all_angles.append([])
for j in range(0, len(central_atoms_angles[i])):
# For the angle at central_atoms_angles[i][j,:] the
# corresponding u_PA value
# is found for the plane ABC and bond AB, where ABC
# corresponds to the order
# of the arguements. This is why the reverse order
# was also added
unit_PA_all_angles[i].append(
u_PA_from_angles(
central_atoms_angles[i][j][0],
i,
central_atoms_angles[i][j][1],
coords,
)
)
# Finds the contributing factors from the other angle terms
# scaling_factor_all_angles
# = cell(max(max(angle_list))); %This array will contain
# scaling factor and angle list position
scaling_factor_all_angles = []
for i in range(0, len(central_atoms_angles)):
scaling_factor_all_angles.append([])
for j in range(0, len(central_atoms_angles[i])):
n = 1
m = 1
angles_around = 0
additional_contributions = 0
scaling_factor_all_angles[i].append([0, 0])
# Position in angle list
scaling_factor_all_angles[i][j][1] = central_atoms_angles[i][
j
][2]
# Goes through the list of angles with the same central atom
# and computes the
# term need for the modified Seminario method
# Forwards directions, finds the same bonds with the central atom i
while (
((j + n) < len(central_atoms_angles[i]))
and central_atoms_angles[i][j][0]
== central_atoms_angles[i][j + n][0]
):
additional_contributions = (
additional_contributions
+ (
abs(
np.dot(
unit_PA_all_angles[i][j][:],
unit_PA_all_angles[i][j + n][:],
)
)
)
** 2
)
n = n + 1
angles_around = angles_around + 1
# Backwards direction, finds the same bonds with the central atom i
while ((j - m) >= 0) and central_atoms_angles[i][j][
0
] == central_atoms_angles[i][j - m][0]:
additional_contributions = (
additional_contributions
+ (
abs(
np.dot(
unit_PA_all_angles[i][j][:],
unit_PA_all_angles[i][j - m][:],
)
)
)
** 2
)
m = m + 1
angles_around = angles_around + 1
if n != 1 or m != 1:
# Finds the mean value of the additional contribution to
# change to normal
# Seminario method comment out + part
scaling_factor_all_angles[i][j][0] = 1 + (
additional_contributions / (m + n - 2)
)
else:
scaling_factor_all_angles[i][j][0] = 1
scaling_factors_angles_list = []
for i in range(0, len(angle_list)):
scaling_factors_angles_list.append([])
# Orders the scaling factors according to the angle list
for i in range(0, len(central_atoms_angles)):
for j in range(0, len(central_atoms_angles[i])):
scaling_factors_angles_list[
scaling_factor_all_angles[i][j][1]
].append(scaling_factor_all_angles[i][j][0])
# Finds the angle force constants with the scaling factors
# included for each angle
for i in range(0, len(angle_list)):
# Ensures that there is no difference when the
# ordering is changed
[AB_k_theta, AB_theta_0] = force_angle_constant(
angle_list[i][0],
angle_list[i][1],
angle_list[i][2],
bond_lengths,
eigenvalues,
eigenvectors,
coords,
scaling_factors_angles_list[i][0],
scaling_factors_angles_list[i][1],
)
[BA_k_theta, BA_theta_0] = force_angle_constant(
angle_list[i][2],
angle_list[i][1],
angle_list[i][0],
bond_lengths,
eigenvalues,
eigenvectors,
coords,
scaling_factors_angles_list[i][1],
scaling_factors_angles_list[i][0],
)
k_theta[i] = (AB_k_theta + BA_k_theta) / 2
theta_0[i] = (AB_theta_0 + BA_theta_0) / 2
# Vibrational_scaling takes into account DFT
# deficities/ anharmonicity
k_theta[i] = k_theta[i] * vibrational_scaling_squared
file_angle.write(
atom_names[angle_list[i][0]]
+ "-"
+ atom_names[angle_list[i][1]]
+ "-"
+ atom_names[angle_list[i][2]]
+ " "
)
file_angle.write(
str("%#.4g" % k_theta[i])
+ " "
+ str("%#.4g" % theta_0[i])
+ " "
+ str(angle_list[i][0] + 1)
+ " "
+ str(angle_list[i][1] + 1)
+ " "
+ str(angle_list[i][2] + 1)
)
file_angle.write("\n")
unique_values_angles.append(
[
atom_names[angle_list[i][0]],
atom_names[angle_list[i][1]],
atom_names[angle_list[i][2]],
k_theta[i],
theta_0[i],
1,
]
)
file_angle.close()
def get_charges(self):
"""
Saves the atomic charges in a text file obtained from
the Gaussian log file.
"""
log_file = self.guest_pdb[:-4] + ".log"
with open(log_file, "r") as f:
lines = f.readlines()
for i in range(len(lines)):
if "Fitting point charges to electrostatic potential" in lines[i]:
to_begin = int(i)
if " Sum of ESP charges =" in lines[i]:
to_end = int(i)
charges = lines[to_begin + 4 : to_end]
charge_list = []
for i in range(len(charges)):
charge_list.append(charges[i].strip().split())
charge_list_value = []
atom_list = []
for i in range(len(charge_list)):
charge_list_value.append(charge_list[i][2])
atom_list.append(charge_list[i][1])
data_tuples = list(zip(atom_list, charge_list_value))
df_charge = pd.DataFrame(data_tuples, columns=["Atom", "Charge"])
df_charge.to_csv(
self.charge_parameter_file, index=False, header=False, sep=" ",
)
def get_proper_dihedrals(self):
"""
Saves proper dihedral angles of the ligand in a text file.
"""
ppdb = PandasPdb()
ppdb.read_pdb(self.guest_pdb)
no_atoms = len(ppdb.df["ATOM"])
atom_index_list = []
for i in range(no_atoms):
atom_index_list.append(i + 1)
possible_dihedrals = []
for dihed in itertools.permutations(atom_index_list, 4):
possible_dihedrals.append(dihed)
df_bonds = pd.read_csv(
self.bond_parameter_file, header=None, delimiter=r"\s+"
)
df_bonds.columns = [
"bond",
"k_bond",
"bond_length",
"bond_1",
"bond_2",
]
bond1 = df_bonds["bond_1"].values.tolist()
bond2 = df_bonds["bond_2"].values.tolist()
bond_list_list = []
for i in range(len(bond1)):
args = (bond1[i], bond2[i])
bond_list_list.append(list(args))
reverse_bond_list_list = []
for bonds in bond_list_list:
reverse_bond_list_list.append(reverse_list(bonds))
bond_lists = bond_list_list + reverse_bond_list_list
proper_dihed_repeated = []
for i in range(len(possible_dihedrals)):
dihed_frag = (
[possible_dihedrals[i][0], possible_dihedrals[i][1]],
[possible_dihedrals[i][1], possible_dihedrals[i][2]],
[possible_dihedrals[i][2], possible_dihedrals[i][3]],
)
a = [
dihed_frag[0] in bond_lists,
dihed_frag[1] in bond_lists,
dihed_frag[2] in bond_lists,
]
if a == [True, True, True]:
proper_dihed_repeated.append(possible_dihedrals[i])
len_repeated_dihed_list = len(proper_dihed_repeated)
proper_dihedrals = proper_dihed_repeated
for x in proper_dihedrals:
z = x[::-1]
if z in proper_dihedrals:
proper_dihedrals.remove(z)
len_non_repeated_dihed_list = len(proper_dihedrals)
# print(len_repeated_dihed_list == len_non_repeated_dihed_list * 2)
np.savetxt(self.proper_dihedral_file, proper_dihedrals, fmt="%s")
# return(proper_dihedrals)
class PrepareGaussianHost:
"""
A class used to prepare the QM engine input file (Gaussian)
for the receptor and run QM calculations with appropriate keywords.
This class contain methods to write an input file (.com extension)
for the QM engine. It then runs a QM calculation with the given
basis set and functional. Checkpoint file is then converted to
a formatted checkpoint file. Output files (.log, .chk, and .fhck)
will then be used to extract receptors's force field parameters.
...
Attributes
----------
charge : int, optional
Charge of the receptor.
multiplicity: int, optional
Spin Multiplicity (2S+1) of the receptor where S represents
the total spin of the receptor.
host_qm_pdb: str, optional
PDB file of the receptor's QM region with atom numbers
beginning from 1.
n_processors : int, optional
Number of processors to be used for Gaussian program to run and
set in %NProcShared command of Gaussian.
memory : int, optional
Memory (in GB) to be used set in %Mem command of Gaussian.
functional: str, optional
Exchange/Correlation or hybrid functional to use in the Gaussian
QM calculation.
basis_set: str, optional
Basis set to use for the Gaussian QM calculation.
optimisation: str, optional
set to "OPT" to perform a geometry optimization on the receptor
specified in the system; else set to an empty string.
frequency: str, optional
set to "FREQ" for Gaussian to perform a frequency calculation;
else set to an empty string.
add_keywords_I: str, optional
Specifies the integration grid.
add_keywords_II: str, optional
Specifies the QM engine to select one of the methods for
analyzing the electron density of the system. Methods used
are based on fitting the molecular electrostatic potential.
Methods used are : POP=CHELPG (Charges from Electrostatic
Potentials using a Grid based method) and POP=MK
(Merz-Singh-Kollman scheme)
add_keywords_III: str, optional
Used to include the IOp keyword (to set the internal options to
specific values) in the Gaussian command.
gauss_out_file: str, optional
This file contains the output script obtained after running
the Gaussian QM calculation.
fchk_out_file: str, optional
Formatted checkpoint file obtained from the checkpoint file
using formchk command.
"""
def __init__(
self,
charge=0,
multiplicity=1,
host_qm_pdb="host_qm.pdb",
n_processors=12,
memory=50,
functional="B3LYP",
basis_set="6-31G",
optimisation="OPT",
frequency="FREQ",
add_keywords_I="INTEGRAL=(GRID=ULTRAFINE) SCF=(maxcycles=4000) SYMMETRY=NONE",
add_keywords_II="POP(MK,READRADII)",
add_keywords_III="IOP(6/33=2,6/42=6)",
gauss_out_file="host_qm.out",
fchk_out_file="host_qm_fchk.out",
):
self.charge = charge
self.multiplicity = multiplicity
self.host_qm_pdb = host_qm_pdb
self.n_processors = n_processors
self.memory = memory
self.functional = functional
self.basis_set = basis_set
self.optimisation = optimisation
self.frequency = frequency
self.gauss_out_file = gauss_out_file
self.fchk_out_file = fchk_out_file
self.add_keywords_I = add_keywords_I
self.add_keywords_II = add_keywords_II
self.add_keywords_III = add_keywords_III
def write_input(self):
"""
Writes a Gaussian input file for the receptor QM region.
"""
# TODO: create generic function for Gaussian Input file (DRY principle)
command_line_1 = "%Chk = " + self.host_qm_pdb[:-4] + ".chk"
command_line_2 = "%Mem = " + str(self.memory) + "GB"
command_line_3 = "%NProcShared = " + str(self.n_processors)
command_line_4 = (
"# "
+ self.functional
+ " "
+ self.basis_set
+ " "
+ self.optimisation
+ " "
+ self.frequency
+ " "
+ self.add_keywords_I
+ " "
+ self.add_keywords_II
+ " "
+ self.add_keywords_III
)
command_line_5 = " "
command_line_6 = self.host_qm_pdb[:-4] + " " + "gaussian input file"
command_line_7 = " "
command_line_8 = str(self.charge) + " " + str(self.multiplicity)
ppdb = PandasPdb()
ppdb.read_pdb(self.host_qm_pdb)
df = ppdb.df["ATOM"]
df_1 = ppdb.df["ATOM"]["element_symbol"]
df_1.columns = ["atom"]
df_2 = df[["x_coord", "y_coord", "z_coord"]]
df_merged = pd.concat([df_1, df_2], axis=1)
command_line_9 = df_merged.to_string(header=False, index=False)
command_line_10 = " "
command = [
command_line_1,
command_line_2,
command_line_3,
command_line_4,
command_line_5,
command_line_6,
command_line_7,
command_line_8,
command_line_9,
command_line_10,
]
commands = "\n".join(command)
with open(self.host_qm_pdb[:-4] + ".com", "w") as f:
f.write(commands)
def run_gaussian(self):
"""
Runs the Gaussian QM calculation for the receptor locally.
"""
execute_command = (
"g16"
+ " < "
+ self.host_qm_pdb[:-4]
+ ".com"
+ " > "
+ self.host_qm_pdb[:-4]
+ ".log"
)
with open(self.gauss_out_file, "w+") as f:
sp.run(
execute_command, shell=True, stdout=f, stderr=sp.STDOUT,
)
def get_fchk(self):
"""
Converts the Gaussian checkpoint file (.chk) to a formatted checkpoint
file (.fchk).
"""
execute_command = (
"formchk"
+ " "
+ self.host_qm_pdb[:-4]
+ ".chk"
+ " "
+ self.host_qm_pdb[:-4]
+ ".fchk"
)
with open(self.fchk_out_file, "w+") as f:
sp.run(
execute_command, shell=True, stdout=f, stderr=sp.STDOUT,
)
class ParameterizeHost:
"""
A class used to obtain force field parameters for the QM region
of the receptor (bond, angle and charge parameters) from QM
calculations.
This class contain methods to process the output files of the
Gaussian QM output files (.chk, .fchk and .log files). Methods
in the class extract the unprocessed hessian matrix from the
Gaussian QM calculations, processes it and uses the Modified
Seminario Method to ontain the bond and angle parameters. The
class also extracts the QM charges from the log file.
...
Attributes
----------
xyz_file: str, optional
XYZ file for ligand coordinates obtained from its corresponding
formatted checkpoint file.
coordinate_file: str, optional
Text file containing the receptor coordinates (extracted
from the formatted checkpoint file).
unprocessed_hessian_file: str, optional
Unprocessed hessian matrix of the receptor obtained from the
formatted checkpoint file.
bond_list_file: str, optional
Text file containing the bond information of the receptor
extracted from the log file.
angle_list_file: str, optional
Text file containing the angle information of the receptor
extracted from the log file.
hessian_file: str, optional
Processed hessian matrix of the receptor.
atom_names_file: str, optional
Text file containing the list of atom names from the fchk file.
bond_parameter_file: str, optional
Text file containing the bond parameters for the receptor
obtained using the Modified Seminario method.
angle_parameter_file: str, optional
Text file containing the angle parameters of the receptor.
charge_parameter_file: str, optional
Text file containing the QM charges of the receptor.
host_qm_pdb: str, optional
PDB file for the receptor's QM region.
functional: str, optional
Exchange/Correlation or hybrid functional to use in the Gaussian
QM calculation.
basis_set: str, optional
Basis set to use for the Gaussian QM calculation.
"""
def __init__(
self,
xyz_file="host_qm_coords.xyz",
coordinate_file="host_qm_coordinates.txt",
unprocessed_hessian_file="host_qm_unprocessed_hessian.txt",
bond_list_file="host_qm_bond_list.txt",
angle_list_file="host_qm_angle_list.txt",
hessian_file="host_qm_hessian.txt",
atom_names_file="host_qm_atom_names.txt",
bond_parameter_file="host_qm_bonds.txt",
angle_parameter_file="host_qm_angles.txt",
charge_parameter_file="host_qm_surround_charges.txt",
host_qm_pdb="host_qm.pdb",
functional="B3LYP",
basis_set="6-31G",
):
self.xyz_file = xyz_file
self.coordinate_file = coordinate_file
self.unprocessed_hessian_file = unprocessed_hessian_file
self.bond_list_file = bond_list_file
self.angle_list_file = angle_list_file
self.hessian_file = hessian_file
self.atom_names_file = atom_names_file
self.bond_parameter_file = bond_parameter_file
self.angle_parameter_file = angle_parameter_file
self.charge_parameter_file = charge_parameter_file
self.host_qm_pdb = host_qm_pdb
self.functional = functional
self.basis_set = basis_set
def get_xyz(self):
"""
Saves XYZ file from the formatted checkpoint file.
"""
fchk_file = self.host_qm_pdb[:-4] + ".fchk"
with open(fchk_file, "r") as f:
lines = f.readlines()
for i in range(len(lines)):
if "Current cartesian coordinates" in lines[i]:
no_coordinates = re.findall(r"\d+|\d+.\d+", lines[i])
no_coordinates = int(no_coordinates[0])
for i in range(len(lines)):
if "Current cartesian coordinates" in lines[i]:
to_begin = int(i)
cartesian_coords = lines[
to_begin + 1 : to_begin + 1 + int(math.ceil(no_coordinates / 5))
]
cartesian_list = []
for i in range(len(cartesian_coords)):
cartesian_list.append(cartesian_coords[i].strip().split())
coordinates_list = [
item for sublist in cartesian_list for item in sublist
]
list_coords = [float(x) * float(0.529) for x in coordinates_list]
for i in range(len(lines)):
if "Atomic numbers" in lines[i]:
to_begin = int(i)
if "Nuclear charges" in lines[i]:
to_end = int(i)
atomic_numbers = lines[to_begin + 1 : to_end]
atom_numbers = []
for i in range(len(atomic_numbers)):
atom_numbers.append(atomic_numbers[i].strip().split())
numbers = [item for sublist in atom_numbers for item in sublist]
N = int(no_coordinates / 3)
# Opens the new xyz file
file = open(self.xyz_file, "w")
file.write(str(N) + "\n \n")
coords = np.zeros((N, 3))
n = 0
names = []
# Gives name for atomic number
for x in range(0, len(numbers)):
names.append(element_list[int(numbers[x]) - 1][1])
# Print coordinates to new input_coords.xyz file
for i in range(0, N):
for j in range(0, 3):
coords[i][j] = list_coords[n]
n = n + 1
file.write(
names[i]
+ str(round(coords[i][0], 3))
+ " "
+ str(round(coords[i][1], 3))
+ " "
+ str(round(coords[i][2], 3))
+ "\n"
)
file.close()
np.savetxt(self.coordinate_file, coords, fmt="%s")
def get_unprocessed_hessian(self):
"""
Saves a text file of the unprocessed hessian matrix from the
formatted checkpoint file.
"""
fchk_file = self.host_qm_pdb[:-4] + ".fchk"
with open(fchk_file, "r") as f:
lines = f.readlines()
for i in range(len(lines)):
if "Cartesian Force Constants" in lines[i]:
no_hessian = re.findall(r"\d+|\d+.\d+", lines[i])
no_hessian = int(no_hessian[0])
for i in range(len(lines)):
if "Cartesian Force Constants" in lines[i]:
to_begin = int(i)
hessian = lines[
to_begin + 1 : to_begin + 1 + int(math.ceil(no_hessian / 5))
]
hessian_list = []
for i in range(len(hessian)):
hessian_list.append(hessian[i].strip().split())
unprocessed_Hessian = [
item for sublist in hessian_list for item in sublist
]
np.savetxt(
self.unprocessed_hessian_file, unprocessed_Hessian, fmt="%s",
)
def get_bond_angles(self):
"""
Saves a text file containing bonds and angles from the gaussian
log file.
"""
log_file = self.host_qm_pdb[:-4] + ".log"
fid = open(log_file, "r")
tline = fid.readline()
bond_list = []
angle_list = []
n = 1
n_bond = 1
n_angle = 1
tmp = "R" # States if bond or angle
B = []
# Finds the bond and angles from the .log file
while tline:
tline = fid.readline()
# Line starts at point when bond and angle list occurs
if (
len(tline) > 80
and tline[0:81].strip()
== "! Name Definition Value Derivative Info. !"
):
tline = fid.readline()
tline = fid.readline()
# Stops when all bond and angles recorded
while (tmp[0] == "R") or (tmp[0] == "A"):
line = tline.split()
tmp = line[1]
# Bond or angles listed as string
list_terms = line[2][2:-1]
# Bond List
if tmp[0] == "R":
x = list_terms.split(",")
# Subtraction due to python array indexing at 0
x = [(int(i) - 1) for i in x]
bond_list.append(x)
# Angle List
if tmp[0] == "A":
x = list_terms.split(",")
# Subtraction due to python array indexing at 0
x = [(int(i) - 1) for i in x]
angle_list.append(x)
tline = fid.readline()
# Leave loop
tline = -1
np.savetxt(self.bond_list_file, bond_list, fmt="%s")
np.savetxt(self.angle_list_file, angle_list, fmt="%s")
def get_hessian(self):
"""
Extracts hessian matrix from the unprocessed hessian matrix
and saves into a new file.
"""
unprocessed_Hessian = np.loadtxt(self.unprocessed_hessian_file)
fchk_file = self.host_qm_pdb[:-4] + ".fchk"
with open(fchk_file, "r") as f:
lines = f.readlines()
for i in range(len(lines)):
if "Current cartesian coordinates" in lines[i]:
no_coordinates = re.findall(r"\d+|\d+.\d+", lines[i])
no_coordinates = int(no_coordinates[0])
N = int(no_coordinates / 3)
length_hessian = 3 * N
hessian = np.zeros((length_hessian, length_hessian))
m = 0
# Write the hessian in a 2D array format
for i in range(0, (length_hessian)):
for j in range(0, (i + 1)):
hessian[i][j] = unprocessed_Hessian[m]
hessian[j][i] = unprocessed_Hessian[m]
m = m + 1
hessian = (hessian * (627.509391)) / (
0.529 ** 2
) # Change from Hartree/bohr to kcal/mol/ang
np.savetxt(self.hessian_file, hessian, fmt="%s")
def get_atom_names(self):
"""
Saves a list of atom names from the formatted checkpoint file.
"""
fchk_file = self.host_qm_pdb[:-4] + ".fchk"
with open(fchk_file, "r") as f:
lines = f.readlines()
for i in range(len(lines)):
if "Atomic numbers" in lines[i]:
to_begin = int(i)
if "Nuclear charges" in lines[i]:
to_end = int(i)
atomic_numbers = lines[to_begin + 1 : to_end]
atom_numbers = []
for i in range(len(atomic_numbers)):
atom_numbers.append(atomic_numbers[i].strip().split())
numbers = [item for sublist in atom_numbers for item in sublist]
names = []
# Gives name for atomic number
for x in range(0, len(numbers)):
names.append(element_list[int(numbers[x]) - 1][1])
atom_names = []
for i in range(0, len(names)):
atom_names.append(names[i].strip() + str(i + 1))
np.savetxt(self.atom_names_file, atom_names, fmt="%s")
def get_bond_angle_params(self):
"""
Saves the bond and angle parameter files obtained from
the formatted checkpoint file.
"""
fchk_file = self.host_qm_pdb[:-4] + ".fchk"
with open(fchk_file, "r") as f:
lines = f.readlines()
for i in range(len(lines)):
if "Current cartesian coordinates" in lines[i]:
no_coordinates = re.findall(r"\d+|\d+.\d+", lines[i])
no_coordinates = int(no_coordinates[0])
N = int(no_coordinates / 3)
coords = np.loadtxt(self.coordinate_file)
hessian = np.loadtxt(self.hessian_file)
bond_list = np.loadtxt(self.bond_list_file, dtype=int)
atom_names = np.loadtxt(self.atom_names_file, dtype=str)
# Find bond lengths
bond_lengths = np.zeros((N, N))
for i in range(0, N):
for j in range(0, N):
diff_i_j = np.array(coords[i, :]) - np.array(coords[j, :])
bond_lengths[i][j] = np.linalg.norm(diff_i_j)
eigenvectors = np.empty((3, 3, N, N), dtype=complex)
eigenvalues = np.empty((N, N, 3), dtype=complex)
partial_hessian = np.zeros((3, 3))
for i in range(0, N):
for j in range(0, N):
partial_hessian = hessian[
(i * 3) : ((i + 1) * 3), (j * 3) : ((j + 1) * 3)
]
[a, b] = np.linalg.eig(partial_hessian)
eigenvalues[i, j, :] = a
eigenvectors[:, :, i, j] = b
# Modified Seminario method to find the bond parameters
# and print them to file
file_bond = open(self.bond_parameter_file, "w")
k_b = np.zeros(len(bond_list))
bond_length_list = np.zeros(len(bond_list))
unique_values_bonds = [] # Used to find average values
for i in range(0, len(bond_list)):
AB = force_constant_bond(
bond_list[i][0],
bond_list[i][1],
eigenvalues,
eigenvectors,
coords,
)
BA = force_constant_bond(
bond_list[i][1],
bond_list[i][0],
eigenvalues,
eigenvectors,
coords,
)
# Order of bonds sometimes causes slight differences,
# find the mean
k_b[i] = np.real((AB + BA) / 2)
# Vibrational_scaling takes into account DFT deficities
# / anharmocity
vibrational_scaling = get_vibrational_scaling(
functional=self.functional, basis_set=self.basis_set
)
vibrational_scaling_squared = vibrational_scaling ** 2
k_b[i] = k_b[i] * vibrational_scaling_squared
bond_length_list[i] = bond_lengths[bond_list[i][0]][
bond_list[i][1]
]
file_bond.write(
atom_names[bond_list[i][0]]
+ "-"
+ atom_names[bond_list[i][1]]
+ " "
)
file_bond.write(
str("%#.5g" % k_b[i])
+ " "
+ str("%#.4g" % bond_length_list[i])
+ " "
+ str(bond_list[i][0] + 1)
+ " "
+ str(bond_list[i][1] + 1)
)
file_bond.write("\n")
unique_values_bonds.append(
[
atom_names[bond_list[i][0]],
atom_names[bond_list[i][1]],
k_b[i],
bond_length_list[i],
1,
]
)
file_bond.close()
angle_list = np.loadtxt(self.angle_list_file, dtype=int)
# Modified Seminario method to find the angle parameters
# and print them to file
file_angle = open(self.angle_parameter_file, "w")
k_theta = np.zeros(len(angle_list))
theta_0 = np.zeros(len(angle_list))
unique_values_angles = [] # Used to find average values
# Modified Seminario part goes here ...
# Connectivity information for Modified Seminario Method
central_atoms_angles = []
# A structure is created with the index giving the central
# atom of the angle, an array then lists the angles with
# that central atom.
# i.e. central_atoms_angles{3} contains an array of angles
# with central atom 3
for i in range(0, len(coords)):
central_atoms_angles.append([])
for j in range(0, len(angle_list)):
if i == angle_list[j][1]:
# For angle ABC, atoms A C are written to array
AC_array = [angle_list[j][0], angle_list[j][2], j]
central_atoms_angles[i].append(AC_array)
# For angle ABC, atoms C A are written to array
CA_array = [angle_list[j][2], angle_list[j][0], j]
central_atoms_angles[i].append(CA_array)
# Sort rows by atom number
for i in range(0, len(coords)):
central_atoms_angles[i] = sorted(
central_atoms_angles[i], key=itemgetter(0)
)
# Find normals u_PA for each angle
unit_PA_all_angles = []
for i in range(0, len(central_atoms_angles)):
unit_PA_all_angles.append([])
for j in range(0, len(central_atoms_angles[i])):
# For the angle at central_atoms_angles[i][j,:] the corresponding
# u_PA value is found for the plane ABC and bond AB,
# where ABC corresponds to the order of the arguements.
# This is why the reverse order was also added
unit_PA_all_angles[i].append(
u_PA_from_angles(
central_atoms_angles[i][j][0],
i,
central_atoms_angles[i][j][1],
coords,
)
)
# Finds the contributing factors from the other angle terms
# scaling_factor_all_angles = cell(max(max(angle_list)));
# This array will contain scaling factor and angle list position
scaling_factor_all_angles = []
for i in range(0, len(central_atoms_angles)):
scaling_factor_all_angles.append([])
for j in range(0, len(central_atoms_angles[i])):
n = 1
m = 1
angles_around = 0
additional_contributions = 0
scaling_factor_all_angles[i].append([0, 0])
# Position in angle list
scaling_factor_all_angles[i][j][1] = central_atoms_angles[i][
j
][2]
# Goes through the list of angles with the same central
# atom and computes the term need for the modified Seminario method
# Forwards directions, finds the same bonds with the central atom i
while (
((j + n) < len(central_atoms_angles[i]))
and central_atoms_angles[i][j][0]
== central_atoms_angles[i][j + n][0]
):
additional_contributions = (
additional_contributions
+ (
abs(
np.dot(
unit_PA_all_angles[i][j][:],
unit_PA_all_angles[i][j + n][:],
)
)
)
** 2
)
n = n + 1
angles_around = angles_around + 1
# Backwards direction, finds the same bonds with the central atom i
while ((j - m) >= 0) and central_atoms_angles[i][j][
0
] == central_atoms_angles[i][j - m][0]:
additional_contributions = (
additional_contributions
+ (
abs(
np.dot(
unit_PA_all_angles[i][j][:],
unit_PA_all_angles[i][j - m][:],
)
)
)
** 2
)
m = m + 1
angles_around = angles_around + 1
if n != 1 or m != 1:
# Finds the mean value of the additional contribution to
# change to normal Seminario method comment out + part
scaling_factor_all_angles[i][j][0] = 1 + (
additional_contributions / (m + n - 2)
)
else:
scaling_factor_all_angles[i][j][0] = 1
scaling_factors_angles_list = []
for i in range(0, len(angle_list)):
scaling_factors_angles_list.append([])
# Orders the scaling factors according to the angle list
for i in range(0, len(central_atoms_angles)):
for j in range(0, len(central_atoms_angles[i])):
scaling_factors_angles_list[
scaling_factor_all_angles[i][j][1]
].append(scaling_factor_all_angles[i][j][0])
# Finds the angle force constants with the scaling factors
# included for each angle
for i in range(0, len(angle_list)):
# Ensures that there is no difference when the
# ordering is changed
[AB_k_theta, AB_theta_0] = force_angle_constant(
angle_list[i][0],
angle_list[i][1],
angle_list[i][2],
bond_lengths,
eigenvalues,
eigenvectors,
coords,
scaling_factors_angles_list[i][0],
scaling_factors_angles_list[i][1],
)
[BA_k_theta, BA_theta_0] = force_angle_constant(
angle_list[i][2],
angle_list[i][1],
angle_list[i][0],
bond_lengths,
eigenvalues,
eigenvectors,
coords,
scaling_factors_angles_list[i][1],
scaling_factors_angles_list[i][0],
)
k_theta[i] = (AB_k_theta + BA_k_theta) / 2
theta_0[i] = (AB_theta_0 + BA_theta_0) / 2
# Vibrational_scaling takes into account DFT
# deficities / anharmonicity
k_theta[i] = k_theta[i] * vibrational_scaling_squared
file_angle.write(
atom_names[angle_list[i][0]]
+ "-"
+ atom_names[angle_list[i][1]]
+ "-"
+ atom_names[angle_list[i][2]]
+ " "
)
file_angle.write(
str("%#.4g" % k_theta[i])
+ " "
+ str("%#.4g" % theta_0[i])
+ " "
+ str(angle_list[i][0] + 1)
+ " "
+ str(angle_list[i][1] + 1)
+ " "
+ str(angle_list[i][2] + 1)
)
file_angle.write("\n")
unique_values_angles.append(
[
atom_names[angle_list[i][0]],
atom_names[angle_list[i][1]],
atom_names[angle_list[i][2]],
k_theta[i],
theta_0[i],
1,
]
)
file_angle.close()
def get_charges(self):
"""
Saves the atomic charges in a text file obtained from
the Gaussian log file.
"""
log_file = self.host_qm_pdb[:-4] + ".log"
with open(log_file, "r") as f:
lines = f.readlines()
for i in range(len(lines)):
if "Fitting point charges to electrostatic potential" in lines[i]:
to_begin = int(i)
if " Sum of ESP charges =" in lines[i]:
to_end = int(i)
charges = lines[to_begin + 4 : to_end]
charge_list = []
for i in range(len(charges)):
charge_list.append(charges[i].strip().split())
charge_list_value = []
atom_list = []
for i in range(len(charge_list)):
charge_list_value.append(charge_list[i][2])
atom_list.append(charge_list[i][1])
data_tuples = list(zip(atom_list, charge_list_value))
df_charge = pd.DataFrame(data_tuples, columns=["Atom", "Charge"])
df_charge.to_csv(
self.charge_parameter_file, index=False, header=False, sep=" ",
)
class GuestAmberXMLAmber:
"""
A class used to generate a template force field XML file for the ligand
in order regenerate the reparameterised forcefield XML file.
This class contain methods to generate a template XML force field through
openforcefield. XML template generation can be obtained through different
file formats such as PDB, SDF, and SMI. Methods support charged ligands as
well. Re-parameterized XML force field files are then generated from the
template files. Different energy components such as the bond, angle,
torsional and non-bonded energies are computed for the non-reparametrized
and the reparameterized force fields. Difference between the
non-reparameterized and reparameterized force field energies can then be
analyzed.
...
Attributes
----------
charge : int
Charge of the ligand.
num_charge_atoms: int, optional
Number of charged atoms in the molecule.
charge_atom_1: int, optional
Charge on the first charged atom.
index_charge_atom_1: int, optional
Index of the first charged atom.
system_pdb: str, optional
Ligand PDB file with atom numbers beginning from 1.
system_mol2: str, optional
Ligand Mol2 file obtained from PDB file.
system_in: str, optional
Prepi file as required by antechamber.
system_frcmod: str, optional
FRCMOD file as required by antechamber.
prmtop_system : str, optional
Topology file obtained from the ligand PDB.
inpcrd_system : str, optional
Coordinate file obtained from the ligand PDB using the
command saveamberparm.
system_leap : str, optional
Amber generated leap file for generating and saving topology
and coordinate files.
system_xml: str, optional
Serialized XML force field file of the ligand.
system_smi: str, optional
Ligand SMILES format file.
system_sdf: str, optional
Ligand SDF (structure-data) format file.
system_init_sdf: str, optional
Ligand SDF (structure-data) format file. This file will be
generated only if the ligand is charged.
index_charge_atom_2: int, optional
Index of the second charged atom of the ligand.
charge_atom_2: int, optional
Charge on the second charged atom of the ligand.
charge_parameter_file: str, optional
File containing the charges of ligand atoms and their corresponding
atoms.
system_qm_pdb: str, optional
Ligand PDB file with atom numbers beginning from 1.
bond_parameter_file: str, optional
Text file containing the bond parameters for the ligand.
angle_parameter_file: str, optional
Text file containing the angle parameters of the ligand.
system_qm_params_file: str, optional
A text file containing the QM obtained parameters for the
ligand.
reparameterised_intermediate_system_xml_file: str, optional
XML foce field file with bond and angle parameter lines replaced by
corresponding values obtained from the QM calculations.
system_xml_non_bonded_file: str, optional
Text file to write the NonBondedForce Charge Parameters from
the non-parameterised system XML file.
system_xml_non_bonded_reparams_file: str, optional
Text file containing the non-bonded parameters parsed from the
XML force field file.
reparameterised_system_xml_file: str, optional
Reparameterized force field XML file obtained using
openforcefield.
non_reparameterised_system_xml_file: str, optional
Non-reparameterized force field XML file obtained using
openforcefield.
prmtop_system_non_params: str, optional
Amber generated topology file saved from the non-reparameterized
force field XML file for the ligand.
inpcrd_system_non_params: str, optional
Amber generated coordinate file saved from the non-reparameterized
force field XML file for the ligand.
prmtop_system_params: str, optional
Amber generated topology file saved from the reparameterized
force field XML file for the ligand.
inpcrd_system_params: str, optional
Amber generated coordinate file saved from the reparameterized
force field XML file for the ligand.
load_topology: str, optional
Argument to specify how to load the topology. Can either be "openmm"
or "parmed".
"""
def __init__(
self,
charge=0,
# TODO: some of these variables are ints, and shouldn't be initialized as strings
num_charge_atoms="",
charge_atom_1="",
index_charge_atom_1="",
system_pdb="guest_init_II.pdb",
system_mol2="guest.mol2",
system_in="guest.in",
system_frcmod="guest.frcmod",
prmtop_system="guest.prmtop",
inpcrd_system="guest.inpcrd",
system_leap="guest.leap",
system_xml="guest_init.xml",
system_smi="guest.smi",
system_sdf="guest.sdf",
system_init_sdf="guest_init.sdf",
index_charge_atom_2=" ",
charge_atom_2=" ",
charge_parameter_file="guest_qm_surround_charges.txt",
system_qm_pdb="guest_init_II.pdb",
bond_parameter_file="guest_bonds.txt",
angle_parameter_file="guest_angles.txt",
system_qm_params_file="guest_qm_params.txt",
reparameterised_intermediate_system_xml_file="guest_intermediate_reparameterised.xml",
system_xml_non_bonded_file="guest_xml_non_bonded.txt",
system_xml_non_bonded_reparams_file="guest_xml_non_bonded_reparams.txt",
reparameterised_system_xml_file="guest_reparameterised.xml",
non_reparameterised_system_xml_file="guest_init.xml",
prmtop_system_non_params="guest_non_params.prmtop",
inpcrd_system_non_params="guest_non_params.inpcrd",
prmtop_system_params="guest_params.prmtop",
inpcrd_system_params="guest_params.inpcrd",
load_topology="openmm",
):
self.charge = charge
self.num_charge_atoms = num_charge_atoms
self.charge_atom_1 = charge_atom_1
self.index_charge_atom_1 = index_charge_atom_1
self.system_pdb = system_pdb
self.system_mol2 = system_mol2
self.system_in = system_in
self.system_frcmod = system_frcmod
self.prmtop_system = prmtop_system
self.inpcrd_system = inpcrd_system
self.system_leap = system_leap
self.system_xml = system_xml
self.system_smi = system_smi
self.system_sdf = system_sdf
self.system_init_sdf = system_init_sdf
self.index_charge_atom_2 = index_charge_atom_2
self.charge_atom_2 = charge_atom_2
self.charge_parameter_file = charge_parameter_file
self.system_qm_pdb = system_qm_pdb
self.bond_parameter_file = bond_parameter_file
self.angle_parameter_file = angle_parameter_file
self.system_qm_params_file = system_qm_params_file
self.reparameterised_intermediate_system_xml_file = (
reparameterised_intermediate_system_xml_file
)
self.system_xml_non_bonded_file = system_xml_non_bonded_file
self.system_xml_non_bonded_reparams_file = (
system_xml_non_bonded_reparams_file
)
self.reparameterised_system_xml_file = reparameterised_system_xml_file
self.non_reparameterised_system_xml_file = (
non_reparameterised_system_xml_file
)
self.prmtop_system_non_params = prmtop_system_non_params
self.inpcrd_system_non_params = inpcrd_system_non_params
self.prmtop_system_params = prmtop_system_params
self.inpcrd_system_params = inpcrd_system_params
self.load_topology = load_topology
def generate_xml_antechamber(self):
"""
Generates an XML forcefield file from the PDB file through antechamber.
"""
command = (
# "babel -ipdb " + self.system_pdb + " -omol2 " + self.system_mol2
"obabel -ipdb "
+ self.system_pdb
+ " -omol2 -O "
+ self.system_mol2
)
os.system(command)
command = (
"antechamber -i "
+ self.system_mol2
+ " -fi mol2 -o "
+ self.system_in
+ " -fo prepi -c bcc -nc "
+ str(self.charge)
)
os.system(command)
command = (
"parmchk2 -i "
+ self.system_in
+ " -o "
+ self.system_frcmod
+ " -f prepi -a Y"
)
os.system(command)
os.system(
"rm -rf ANTECHAMBER* leap.log sqm* ATOMTYPE.INF PREP.INF NEWPDB.PDB"
)
line_1 = "loadamberprep " + self.system_in
line_2 = "loadamberparams " + self.system_frcmod
line_3 = "pdb = loadpdb " + self.system_pdb
line_4 = (
"saveamberparm pdb "
+ self.prmtop_system
+ " "
+ self.inpcrd_system
)
line_5 = "quit"
with open(self.system_leap, "w") as f:
f.write(" " + "\n")
f.write(line_1 + "\n")
f.write(line_2 + "\n")
f.write(line_3 + "\n")
f.write(line_4 + "\n")
f.write(line_5 + "\n")
command = "tleap -f " + self.system_leap
os.system(command)
parm = parmed.load_file(self.prmtop_system, self.inpcrd_system)
system = parm.createSystem()
with open(self.system_xml, "w") as f:
f.write(simtk.openmm.XmlSerializer.serialize(system))
def generate_xml_from_pdb_smi(self):
"""
Generates an XML forcefield file from the SMILES file through
openforcefield.
"""
# off_molecule = openforcefield.topology.Molecule(self.system_smi)
off_molecule = Molecule(self.system_smi)
# force_field = openforcefield.typing.engines.smirnoff.ForceField("openff_unconstrained-1.0.0.offxml")
force_field = ForceField("openff_unconstrained-1.0.0.offxml")
system = force_field.create_openmm_system(off_molecule.to_topology())
pdbfile = simtk.openmm.app.PDBFile(self.system_pdb)
structure = parmed.openmm.load_topology(
pdbfile.topology, system, xyz=pdbfile.positions
)
with open(self.system_xml, "w") as f:
f.write(simtk.openmm.XmlSerializer.serialize(system))
def generate_xml_from_pdb_sdf(self):
"""
Generates an XML forcefield file from the SDF file through
openforcefield.
"""
command = (
# "babel -ipdb " + self.system_pdb + " -osdf " + self.system_sdf
"obabel -ipdb "
+ self.system_pdb
+ " -osdf -O "
+ self.system_sdf
)
os.system(command)
# off_molecule = openforcefield.topology.Molecule(self.system_sdf)
off_molecule = Molecule(self.system_sdf)
# force_field = openforcefield.typing.engines.smirnoff.ForceField("openff_unconstrained-1.0.0.offxml")
force_field = ForceField("openff_unconstrained-1.0.0.offxml")
system = force_field.create_openmm_system(off_molecule.to_topology())
pdbfile = simtk.openmm.app.PDBFile(self.system_pdb)
structure = parmed.openmm.load_topology(
pdbfile.topology, system, xyz=pdbfile.positions
)
with open(self.system_xml, "w") as f:
f.write(simtk.openmm.XmlSerializer.serialize(system))
def generate_xml_from_charged_pdb_sdf(self):
"""
Generates an XML forcefield file for a singly charged ligand molecule
from the SDF file through openforcefield.
"""
command = (
# "babel -ipdb " + self.system_pdb + " -osdf " + self.system_init_sdf
"obabel -ipdb "
+ self.system_pdb
+ " -osdf -O "
+ self.system_init_sdf
)
os.system(command)
with open(self.system_init_sdf, "r") as f1:
filedata = f1.readlines()
filedata = filedata[:-2]
with open(self.system_sdf, "w+") as out:
for i in filedata:
out.write(i)
line_1 = (
"M CHG "
+ str(self.num_charge_atoms)
+ " "
+ str(self.index_charge_atom_1)
+ " "
+ str(self.charge_atom_1)
+ "\n"
)
line_2 = "M END" + "\n"
line_3 = "$$$$"
out.write(line_1)
out.write(line_2)
out.write(line_3)
# off_molecule = openforcefield.topology.Molecule(self.system_sdf)
off_molecule = Molecule(self.system_sdf)
# force_field = openforcefield.typing.engines.smirnoff.ForceField("openff_unconstrained-1.0.0.offxml")
force_field = ForceField("openff_unconstrained-1.0.0.offxml")
system = force_field.create_openmm_system(off_molecule.to_topology())
pdbfile = simtk.openmm.app.PDBFile(self.system_pdb)
structure = parmed.openmm.load_topology(
pdbfile.topology, system, xyz=pdbfile.positions
)
with open(self.system_xml, "w") as f:
f.write(simtk.openmm.XmlSerializer.serialize(system))
def generate_xml_from_doubly_charged_pdb_sdf(self):
"""
Generates an XML forcefield file for a singly charged ligand molecule
from the SDF file through openforcefield.
"""
command = (
# "babel -ipdb " + self.system_pdb + " -osdf " + self.system_init_sdf
"obabel -ipdb "
+ self.system_pdb
+ " -osdf -O "
+ self.system_init_sdf
)
os.system(command)
with open(self.system_init_sdf, "r") as f1:
filedata = f1.readlines()
filedata = filedata[:-2]
with open(self.system_sdf, "w+") as out:
for i in filedata:
out.write(i)
line_1 = (
"M CHG "
+ str(self.num_charge_atoms)
+ " "
+ str(self.index_charge_atom_1)
+ " "
+ str(self.charge_atom_1)
+ " "
+ str(self.index_charge_atom_2)
+ " "
+ str(self.charge_atom_2)
+ "\n"
)
line_2 = "M END" + "\n"
line_3 = "$$$$"
out.write(line_1)
out.write(line_2)
out.write(line_3)
# off_molecule = openforcefield.topology.Molecule(self.system_sdf)
off_molecule = Molecule(self.system_sdf)
# force_field = openforcefield.typing.engines.smirnoff.ForceField("openff_unconstrained-1.0.0.offxml")
force_field = ForceField("openff_unconstrained-1.0.0.offxml")
system = force_field.create_openmm_system(off_molecule.to_topology())
pdbfile = simtk.openmm.app.PDBFile(self.system_pdb)
structure = parmed.openmm.load_topology(
pdbfile.topology, system, xyz=pdbfile.positions
)
with open(self.system_xml, "w") as f:
f.write(simtk.openmm.XmlSerializer.serialize(system))
def write_system_params(self):
"""
Saves the parameters obtained from the QM log files in a text file.
"""
# Charges from QM files
df_charges = pd.read_csv(
self.charge_parameter_file, header=None, delimiter=r"\s+"
)
df_charges.columns = ["atom", "charges"]
qm_charges = df_charges["charges"].values.tolist()
qm_charges = [round(num, 6) for num in qm_charges]
# print(qm_charges)
# Bond Parameters from QM files
ppdb = PandasPdb()
ppdb.read_pdb(self.system_qm_pdb)
atom_name_list = ppdb.df["ATOM"]["atom_number"].values.tolist()
atom_name_list = [i - 1 for i in atom_name_list]
# print(atom_name_list)
df = pd.read_csv(
self.bond_parameter_file, header=None, delimiter=r"\s+"
)
df.columns = [
"bond",
"k_bond",
"bond_length",
"bond_1",
"bond_2",
]
# print(df.head())
bond_1_list = df["bond_1"].values.tolist()
bond_1_list = [x - 1 + min(atom_name_list) for x in bond_1_list]
bond_2_list = df["bond_2"].values.tolist()
bond_2_list = [x - 1 + min(atom_name_list) for x in bond_2_list]
# print(bond_1_list)
# print(bond_2_list)
k_bond_list = df["k_bond"].values.tolist()
#k_bond_list = [
# i * 418.40 for i in k_bond_list
#] # kcal/mol * A^2 to kJ/mol * nm^2
k_bond_list = [
i * KCAL_MOL_PER_KJ_MOL * ANGSTROMS_PER_NM**2 for i in k_bond_list
] # kcal/mol * A^2 to kJ/mol * nm^2
k_bond_list = [round(num, 10) for num in k_bond_list]
# print(k_bond_list)
bond_length_list = df["bond_length"].values.tolist()
# TODO: units here? Anstroms per nm?
bond_length_list = [i / 10.00 for i in bond_length_list]
bond_length_list = [round(num, 6) for num in bond_length_list]
# print(bond_length_list)
# Angle Parameters from QM files
ppdb = PandasPdb()
ppdb.read_pdb(self.system_qm_pdb)
atom_name_list = ppdb.df["ATOM"]["atom_number"].values.tolist()
atom_name_list = [i - 1 for i in atom_name_list]
# print(atom_name_list)
df = pd.read_csv(
self.angle_parameter_file, header=None, delimiter=r"\s+"
)
df.columns = [
"angle",
"k_angle",
"angle_degrees",
"angle_1",
"angle_2",
"angle_3",
]
# print(df.head())
angle_1_list = df["angle_1"].values.tolist()
angle_1_list = [x - 1 + min(atom_name_list) for x in angle_1_list]
# print(angle_1_list)
angle_2_list = df["angle_2"].values.tolist()
angle_2_list = [x - 1 + min(atom_name_list) for x in angle_2_list]
# print(angle_2_list)
angle_3_list = df["angle_3"].values.tolist()
angle_3_list = [x - 1 + min(atom_name_list) for x in angle_3_list]
# print(angle_3_list)
k_angle_list = df["k_angle"].values.tolist()
k_angle_list = [
i * KCAL_MOL_PER_KJ_MOL for i in k_angle_list
] # kcal/mol * radian^2 to kJ/mol * radian^2
k_angle_list = [round(num, 6) for num in k_angle_list]
# print(k_angle_list)
angle_list = df["angle_degrees"].values.tolist()
angle_list = [i * RADIANS_PER_DEGREE for i in angle_list]
angle_list = [round(num, 6) for num in angle_list]
# print(angle_list)
xml = open(self.system_qm_params_file, "w")
xml.write("Begin writing the Bond Parameters" + "\n")
# TODO: These should use string formatting to become more concise
for i in range(len(k_bond_list)):
xml.write(
" "
+ "<Bond"
+ " "
+ "d="
+ '"'
+ str(bond_length_list[i])
+ '"'
+ " "
+ "k="
+ '"'
+ str(k_bond_list[i])
+ '"'
+ " "
+ "p1="
+ '"'
+ str(bond_1_list[i])
+ '"'
+ " "
+ "p2="
+ '"'
+ str(bond_2_list[i])
+ '"'
+ "/>"
+ "\n"
)
xml.write("Finish writing the Bond Parameters" + "\n")
xml.write("Begin writing the Angle Parameters" + "\n")
for i in range(len(k_angle_list)):
xml.write(
" "
+ "<Angle"
+ " "
+ "a="
+ '"'
+ str(angle_list[i])
+ '"'
+ " "
+ "k="
+ '"'
+ str(k_angle_list[i])
+ '"'
+ " "
+ "p1="
+ '"'
+ str(angle_1_list[i])
+ '"'
+ " "
+ "p2="
+ '"'
+ str(angle_2_list[i])
+ '"'
+ " "
+ "p3="
+ '"'
+ str(angle_3_list[i])
+ '"'
+ "/>"
+ "\n"
)
xml.write("Finish writing the Angle Parameters" + "\n")
xml.write("Begin writing the Charge Parameters" + "\n")
for i in range(len(qm_charges)):
xml.write(
"<Particle"
+ " "
+ "q="
+ '"'
+ str(qm_charges[i])
+ '"'
+ " "
+ "eps="
+ '"'
+ str(0.00)
+ '"'
+ " "
+ "sig="
+ '"'
+ str(0.00)
+ '"'
+ " "
+ "atom="
+ '"'
+ str(atom_name_list[i])
+ '"'
+ "/>"
+ "\n"
)
xml.write("Finish writing the Charge Parameters" + "\n")
xml.close()
def write_intermediate_reparameterised_system_xml(self):
"""
Writes a reparameterised XML force field file for
ligand but without the QM obtained charges.
"""
# Bond Parameters
f_params = open(self.system_qm_params_file, "r")
lines_params = f_params.readlines()
# Bond Parameters
for i in range(len(lines_params)):
if "Begin writing the Bond Parameters" in lines_params[i]:
to_begin = int(i)
if "Finish writing the Bond Parameters" in lines_params[i]:
to_end = int(i)
bond_params = lines_params[to_begin + 1 : to_end]
index_search_replace_bond = []
for i in bond_params:
bond_line_to_replace = i
# print(bond_line_to_replace)
atom_number_list = [
re.findall("\d*\.?\d+", i)[3],
re.findall("\d*\.?\d+", i)[5],
]
# print(atom_number_list)
comb_1 = (
"p1="
+ '"'
+ atom_number_list[0]
+ '"'
+ " "
+ "p2="
+ '"'
+ atom_number_list[1]
+ '"'
+ "/>"
)
comb_2 = (
"p1="
+ '"'
+ atom_number_list[1]
+ '"'
+ " "
+ "p2="
+ '"'
+ atom_number_list[0]
+ '"'
+ "/>"
)
comb_list_bond = [comb_1, comb_2]
# print(comb_list_bond)
list_search_bond = [
search_in_file(file=self.system_xml, word=comb_1),
search_in_file(file=self.system_xml, word=comb_2),
]
# print(list_search_bond)
for j in range(len(list_search_bond)):
if list_search_bond[j] != []:
to_add = (list_search_bond[j], i)
# print(to_add)
index_search_replace_bond.append(to_add)
# Angle Parameters
for i in range(len(lines_params)):
if "Begin writing the Angle Parameters" in lines_params[i]:
to_begin = int(i)
if "Finish writing the Angle Parameters" in lines_params[i]:
to_end = int(i)
angle_params = lines_params[to_begin + 1 : to_end]
index_search_replace_angle = []
for i in angle_params:
angle_line_to_replace = i
# print(angle_line_to_replace)
index_search_replace_angle = []
for i in angle_params:
angle_line_to_replace = i
# print(angle_line_to_replace)
atom_number_list = [
re.findall("\d*\.?\d+", i)[3],
re.findall("\d*\.?\d+", i)[5],
re.findall("\d*\.?\d+", i)[7],
]
# print(atom_number_list)
comb_1 = (
"p1="
+ '"'
+ atom_number_list[0]
+ '"'
+ " "
+ "p2="
+ '"'
+ atom_number_list[1]
+ '"'
+ " "
+ "p3="
+ '"'
+ atom_number_list[2]
+ '"'
+ "/>"
)
comb_2 = (
"p1="
+ '"'
+ atom_number_list[0]
+ '"'
+ " "
+ "p2="
+ '"'
+ atom_number_list[2]
+ '"'
+ " "
+ "p3="
+ '"'
+ atom_number_list[1]
+ '"'
+ "/>"
)
comb_3 = (
"p1="
+ '"'
+ atom_number_list[1]
+ '"'
+ " "
+ "p2="
+ '"'
+ atom_number_list[0]
+ '"'
+ " "
+ "p3="
+ '"'
+ atom_number_list[2]
+ '"'
+ "/>"
)
comb_4 = (
"p1="
+ '"'
+ atom_number_list[1]
+ '"'
+ " "
+ "p2="
+ '"'
+ atom_number_list[2]
+ '"'
+ " "
+ "p3="
+ '"'
+ atom_number_list[0]
+ '"'
+ "/>"
)
comb_5 = (
"p1="
+ '"'
+ atom_number_list[2]
+ '"'
+ " "
+ "p2="
+ '"'
+ atom_number_list[0]
+ '"'
+ " "
+ "p3="
+ '"'
+ atom_number_list[1]
+ '"'
+ "/>"
)
comb_6 = (
"p1="
+ '"'
+ atom_number_list[2]
+ '"'
+ " "
+ "p2="
+ '"'
+ atom_number_list[1]
+ '"'
+ " "
+ "p3="
+ '"'
+ atom_number_list[0]
+ '"'
+ "/>"
)
comb_list_angle = [
comb_1,
comb_2,
comb_3,
comb_4,
comb_5,
comb_6,
]
# print(comb_list_angle)
list_search_angle = [
search_in_file(file=self.system_xml, word=comb_1),
search_in_file(file=self.system_xml, word=comb_2),
search_in_file(file=self.system_xml, word=comb_3),
search_in_file(file=self.system_xml, word=comb_4),
search_in_file(file=self.system_xml, word=comb_5),
search_in_file(file=self.system_xml, word=comb_6),
]
# print(list_search_angle)
for j in range(len(list_search_angle)):
if list_search_angle[j] != []:
to_add = (list_search_angle[j], i)
# print(to_add)
index_search_replace_angle.append(to_add)
f_org = open(self.system_xml)
lines = f_org.readlines()
for i in range(len(index_search_replace_bond)):
line_number = index_search_replace_bond[i][0][0][0] - 1
line_to_replace = index_search_replace_bond[i][0][0][1]
line_to_replace_with = index_search_replace_bond[i][1]
lines[line_number] = line_to_replace_with
for i in range(len(index_search_replace_angle)):
line_number = index_search_replace_angle[i][0][0][0] - 1
line_to_replace = index_search_replace_angle[i][0][0][1]
line_to_replace_with = index_search_replace_angle[i][1]
lines[line_number] = line_to_replace_with
f_cop = open(self.reparameterised_intermediate_system_xml_file, "w")
for i in lines:
f_cop.write(i)
f_cop.close()
def write_reparameterised_system_xml(self):
"""
Writes a reparameterised XML force field file for the ligand.
"""
# Bond Parameters
f_params = open(self.system_qm_params_file, "r")
lines_params = f_params.readlines()
# Bond Parameters
for i in range(len(lines_params)):
if "Begin writing the Bond Parameters" in lines_params[i]:
to_begin = int(i)
if "Finish writing the Bond Parameters" in lines_params[i]:
to_end = int(i)
bond_params = lines_params[to_begin + 1 : to_end]
index_search_replace_bond = []
# TODO: These should use string formatting to become more concise
for i in bond_params:
bond_line_to_replace = i
# print(bond_line_to_replace)
atom_number_list = [
re.findall("\d*\.?\d+", i)[3],
re.findall("\d*\.?\d+", i)[5],
]
# print(atom_number_list)
comb_1 = (
"p1="
+ '"'
+ atom_number_list[0]
+ '"'
+ " "
+ "p2="
+ '"'
+ atom_number_list[1]
+ '"'
+ "/>"
)
comb_2 = (
"p1="
+ '"'
+ atom_number_list[1]
+ '"'
+ " "
+ "p2="
+ '"'
+ atom_number_list[0]
+ '"'
+ "/>"
)
comb_list_bond = [comb_1, comb_2]
# print(comb_list_bond)
list_search_bond = [
search_in_file(file=self.system_xml, word=comb_1),
search_in_file(file=self.system_xml, word=comb_2),
]
# print(list_search_bond)
for j in range(len(list_search_bond)):
if list_search_bond[j] != []:
to_add = (list_search_bond[j], i)
# print(to_add)
index_search_replace_bond.append(to_add)
# Angle Parameters
for i in range(len(lines_params)):
if "Begin writing the Angle Parameters" in lines_params[i]:
to_begin = int(i)
if "Finish writing the Angle Parameters" in lines_params[i]:
to_end = int(i)
angle_params = lines_params[to_begin + 1 : to_end]
index_search_replace_angle = []
for i in angle_params:
angle_line_to_replace = i
# print(angle_line_to_replace)
index_search_replace_angle = []
for i in angle_params:
angle_line_to_replace = i
# print(angle_line_to_replace)
atom_number_list = [
re.findall("\d*\.?\d+", i)[3],
re.findall("\d*\.?\d+", i)[5],
re.findall("\d*\.?\d+", i)[7],
]
# print(atom_number_list)
comb_1 = (
"p1="
+ '"'
+ atom_number_list[0]
+ '"'
+ " "
+ "p2="
+ '"'
+ atom_number_list[1]
+ '"'
+ " "
+ "p3="
+ '"'
+ atom_number_list[2]
+ '"'
+ "/>"
)
comb_2 = (
"p1="
+ '"'
+ atom_number_list[0]
+ '"'
+ " "
+ "p2="
+ '"'
+ atom_number_list[2]
+ '"'
+ " "
+ "p3="
+ '"'
+ atom_number_list[1]
+ '"'
+ "/>"
)
comb_3 = (
"p1="
+ '"'
+ atom_number_list[1]
+ '"'
+ " "
+ "p2="
+ '"'
+ atom_number_list[0]
+ '"'
+ " "
+ "p3="
+ '"'
+ atom_number_list[2]
+ '"'
+ "/>"
)
comb_4 = (
"p1="
+ '"'
+ atom_number_list[1]
+ '"'
+ " "
+ "p2="
+ '"'
+ atom_number_list[2]
+ '"'
+ " "
+ "p3="
+ '"'
+ atom_number_list[0]
+ '"'
+ "/>"
)
comb_5 = (
"p1="
+ '"'
+ atom_number_list[2]
+ '"'
+ " "
+ "p2="
+ '"'
+ atom_number_list[0]
+ '"'
+ " "
+ "p3="
+ '"'
+ atom_number_list[1]
+ '"'
+ "/>"
)
comb_6 = (
"p1="
+ '"'
+ atom_number_list[2]
+ '"'
+ " "
+ "p2="
+ '"'
+ atom_number_list[1]
+ '"'
+ " "
+ "p3="
+ '"'
+ atom_number_list[0]
+ '"'
+ "/>"
)
comb_list_angle = [
comb_1,
comb_2,
comb_3,
comb_4,
comb_5,
comb_6,
]
# print(comb_list_angle)
list_search_angle = [
search_in_file(file=self.system_xml, word=comb_1),
search_in_file(file=self.system_xml, word=comb_2),
search_in_file(file=self.system_xml, word=comb_3),
search_in_file(file=self.system_xml, word=comb_4),
search_in_file(file=self.system_xml, word=comb_5),
search_in_file(file=self.system_xml, word=comb_6),
]
# print(list_search_angle)
for j in range(len(list_search_angle)):
if list_search_angle[j] != []:
to_add = (list_search_angle[j], i)
# print(to_add)
index_search_replace_angle.append(to_add)
f_org = open(self.system_xml)
lines = f_org.readlines()
for i in range(len(index_search_replace_bond)):
line_number = index_search_replace_bond[i][0][0][0] - 1
line_to_replace = index_search_replace_bond[i][0][0][1]
line_to_replace_with = index_search_replace_bond[i][1]
lines[line_number] = line_to_replace_with
for i in range(len(index_search_replace_angle)):
line_number = index_search_replace_angle[i][0][0][0] - 1
line_to_replace = index_search_replace_angle[i][0][0][1]
line_to_replace_with = index_search_replace_angle[i][1]
lines[line_number] = line_to_replace_with
f_cop = open(self.reparameterised_intermediate_system_xml_file, "w")
for i in lines:
f_cop.write(i)
f_cop.close()
f_params = open(self.system_qm_params_file)
lines_params = f_params.readlines()
# Charge Parameters
for i in range(len(lines_params)):
if "Begin writing the Charge Parameters" in lines_params[i]:
to_begin = int(i)
if "Finish writing the Charge Parameters" in lines_params[i]:
to_end = int(i)
charge_params = lines_params[to_begin + 1 : to_end]
non_bonded_index = []
for k in charge_params:
non_bonded_index.append(int(re.findall("[-+]?\d*\.\d+|\d+", k)[3]))
charge_for_index = []
for k in charge_params:
charge_for_index.append(
float(re.findall("[-+]?\d*\.\d+|\d+", k)[0])
)
xml_off = open(self.system_xml)
xml_off_lines = xml_off.readlines()
for i in range(len(xml_off_lines)):
if "<GlobalParameters/>" in xml_off_lines[i]:
to_begin = int(i)
if "<Exceptions>" in xml_off_lines[i]:
to_end = int(i)
nonbond_params = xml_off_lines[to_begin + 4 : to_end - 1]
# print(len(nonbond_params))
f_non_bonded = open(self.system_xml_non_bonded_file, "w")
for x in nonbond_params:
f_non_bonded.write(x)
f_non_bonded = open(self.system_xml_non_bonded_file)
lines_non_bonded = f_non_bonded.readlines()
# print(len(lines_non_bonded))
lines_non_bonded_to_write = []
for i in range(len(non_bonded_index)):
line_ = lines_non_bonded[non_bonded_index[i]]
# print(line_)
eps = float(re.findall("[-+]?\d*\.\d+|\d+", line_)[0])
sig = float(re.findall("[-+]?\d*\.\d+|\d+", line_)[2])
line_to_replace = (
" "
+ "<Particle "
+ "eps="
+ '"'
+ str(eps)
+ '"'
+ " "
+ "q="
+ '"'
+ str(charge_for_index[i])
+ '"'
+ " "
+ "sig="
+ '"'
+ str(sig)
+ '"'
+ "/>"
)
lines_non_bonded_to_write.append(line_to_replace)
data_ = list(zip(non_bonded_index, lines_non_bonded_to_write))
df_non_bonded_params = pd.DataFrame(
data_, columns=["line_index", "line"]
)
# print(df_non_bonded_params.head())
f_non_bonded_ = open(self.system_xml_non_bonded_file)
lines_non_bonded_ = f_non_bonded_.readlines()
for i in range(len(lines_non_bonded_)):
if i in non_bonded_index:
lines_non_bonded_[i] = (
df_non_bonded_params.loc[
df_non_bonded_params.line_index == i, "line"
].values[0]
) + "\n"
# print(len(lines_non_bonded_))
f_write_non_bonded_reparams = open(
self.system_xml_non_bonded_reparams_file, "w"
)
for p in range(len(lines_non_bonded_)):
f_write_non_bonded_reparams.write(lines_non_bonded_[p])
f_write_non_bonded_reparams.close()
f_ = open(self.system_xml_non_bonded_reparams_file)
lines_ = f_.readlines()
print(len(lines_) == len(lines_non_bonded))
xml_off = open(self.reparameterised_intermediate_system_xml_file)
# TODO: implement function(s) to read certain types of files. DRY principle
xml_off_lines = xml_off.readlines()
for i in range(len(xml_off_lines)):
if "<GlobalParameters/>" in xml_off_lines[i]:
to_begin = int(i)
if "<Exceptions>" in xml_off_lines[i]:
to_end = int(i)
lines_before_params = xml_off_lines[: to_begin + 4]
f__ = open(self.system_xml_non_bonded_reparams_file)
lines_params_non_bonded = f__.readlines()
lines_after_params = xml_off_lines[to_end - 1 :]
f_reparams_xml = open(self.reparameterised_system_xml_file, "w")
for x in lines_before_params:
f_reparams_xml.write(x)
for x in lines_params_non_bonded:
f_reparams_xml.write(x)
for x in lines_after_params:
f_reparams_xml.write(x)
f_reparams_xml.close()
def save_amber_params_non_qm_charges(self):
"""
Saves amber generated topology files for the ligand
without the QM charges.
"""
if self.load_topology == "parmed":
openmm_system = parmed.openmm.load_topology(
parmed.load_file(self.system_pdb, structure=True).topology,
parmed.load_file(self.non_reparameterised_system_xml_file),
)
if self.load_topology == "openmm":
openmm_system = parmed.openmm.load_topology(
simtk.openmm.app.PDBFile(self.system_pdb).topology,
parmed.load_file(self.non_reparameterised_system_xml_file),
)
openmm_system.save(self.prmtop_system_non_params, overwrite=True)
openmm_system.coordinates = parmed.load_file(
self.system_pdb, structure=True
).coordinates
openmm_system.save(self.inpcrd_system_non_params, overwrite=True)
parm = parmed.load_file(
self.prmtop_system_non_params, self.inpcrd_system_non_params,
)
xml_energy_decomposition = parmed.openmm.energy_decomposition_system(
openmm_system,
parmed.load_file(self.non_reparameterised_system_xml_file),
)
xml_energy_decomposition_value = [
list_to_dict(
[
item
for sublist in [
list(elem) for elem in xml_energy_decomposition
]
for item in sublist
]
).get("HarmonicBondForce"),
list_to_dict(
[
item
for sublist in [
list(elem) for elem in xml_energy_decomposition
]
for item in sublist
]
).get("HarmonicAngleForce"),
list_to_dict(
[
item
for sublist in [
list(elem) for elem in xml_energy_decomposition
]
for item in sublist
]
).get("PeriodicTorsionForce"),
list_to_dict(
[
item
for sublist in [
list(elem) for elem in xml_energy_decomposition
]
for item in sublist
]
).get("NonbondedForce"),
]
xml_energy_decomposition_list = [
"HarmonicBondForce",
"HarmonicAngleForce",
"PeriodicTorsionForce",
"NonbondedForce",
]
df_energy_xml = pd.DataFrame(
list(
zip(
xml_energy_decomposition_list,
xml_energy_decomposition_value,
)
),
columns=["Energy_term", "Energy_xml_non_params"],
)
df_energy_xml = df_energy_xml.set_index("Energy_term")
prmtop_energy_decomposition = parmed.openmm.energy_decomposition_system(
parm, parm.createSystem()
)
prmtop_energy_decomposition_value = [
list_to_dict(
[
item
for sublist in [
list(elem) for elem in prmtop_energy_decomposition
]
for item in sublist
]
).get("HarmonicBondForce"),
list_to_dict(
[
item
for sublist in [
list(elem) for elem in prmtop_energy_decomposition
]
for item in sublist
]
).get("HarmonicAngleForce"),
list_to_dict(
[
item
for sublist in [
list(elem) for elem in prmtop_energy_decomposition
]
for item in sublist
]
).get("PeriodicTorsionForce"),
list_to_dict(
[
item
for sublist in [
list(elem) for elem in prmtop_energy_decomposition
]
for item in sublist
]
).get("NonbondedForce"),
]
prmtop_energy_decomposition_list = [
"HarmonicBondForce",
"HarmonicAngleForce",
"PeriodicTorsionForce",
"NonbondedForce",
]
df_energy_prmtop = pd.DataFrame(
list(
zip(
prmtop_energy_decomposition_list,
prmtop_energy_decomposition_value,
)
),
columns=["Energy_term", "Energy_prmtop_non_params"],
)
df_energy_prmtop = df_energy_prmtop.set_index("Energy_term")
df_compare = pd.concat([df_energy_xml, df_energy_prmtop], axis=1)
print(df_compare)
if self.load_topology == "parmed":
openmm_system = parmed.openmm.load_topology(
parmed.load_file(self.system_pdb, structure=True).topology,
parmed.load_file(
self.reparameterised_intermediate_system_xml_file
),
)
if self.load_topology == "openmm":
openmm_system = parmed.openmm.load_topology(
simtk.openmm.app.PDBFile(self.system_pdb).topology,
parmed.load_file(
self.reparameterised_intermediate_system_xml_file
),
)
openmm_system.save(self.prmtop_system_params, overwrite=True)
openmm_system.coordinates = parmed.load_file(
self.system_pdb, structure=True
).coordinates
openmm_system.save(self.inpcrd_system_params, overwrite=True)
parm = parmed.load_file(
self.prmtop_system_params, self.inpcrd_system_params
)
xml_energy_decomposition = parmed.openmm.energy_decomposition_system(
openmm_system,
parmed.load_file(
self.reparameterised_intermediate_system_xml_file
),
)
xml_energy_decomposition_value = [
list_to_dict(
[
item
for sublist in [
list(elem) for elem in xml_energy_decomposition
]
for item in sublist
]
).get("HarmonicBondForce"),
list_to_dict(
[
item
for sublist in [
list(elem) for elem in xml_energy_decomposition
]
for item in sublist
]
).get("HarmonicAngleForce"),
list_to_dict(
[
item
for sublist in [
list(elem) for elem in xml_energy_decomposition
]
for item in sublist
]
).get("PeriodicTorsionForce"),
list_to_dict(
[
item
for sublist in [
list(elem) for elem in xml_energy_decomposition
]
for item in sublist
]
).get("NonbondedForce"),
]
xml_energy_decomposition_list = [
"HarmonicBondForce",
"HarmonicAngleForce",
"PeriodicTorsionForce",
"NonbondedForce",
]
df_energy_xml = pd.DataFrame(
list(
zip(
xml_energy_decomposition_list,
xml_energy_decomposition_value,
)
),
columns=["Energy_term", "Energy_xml_params"],
)
df_energy_xml = df_energy_xml.set_index("Energy_term")
prmtop_energy_decomposition = parmed.openmm.energy_decomposition_system(
parm, parm.createSystem()
)
prmtop_energy_decomposition_value = [
list_to_dict(
[
item
for sublist in [
list(elem) for elem in prmtop_energy_decomposition
]
for item in sublist
]
).get("HarmonicBondForce"),
list_to_dict(
[
item
for sublist in [
list(elem) for elem in prmtop_energy_decomposition
]
for item in sublist
]
).get("HarmonicAngleForce"),
list_to_dict(
[
item
for sublist in [
list(elem) for elem in prmtop_energy_decomposition
]
for item in sublist
]
).get("PeriodicTorsionForce"),
list_to_dict(
[
item
for sublist in [
list(elem) for elem in prmtop_energy_decomposition
]
for item in sublist
]
).get("NonbondedForce"),
]
prmtop_energy_decomposition_list = [
"HarmonicBondForce",
"HarmonicAngleForce",
"PeriodicTorsionForce",
"NonbondedForce",
]
df_energy_prmtop = pd.DataFrame(
list(
zip(
prmtop_energy_decomposition_list,
prmtop_energy_decomposition_value,
)
),
columns=["Energy_term", "Energy_prmtop_params"],
)
df_energy_prmtop = df_energy_prmtop.set_index("Energy_term")
df_compare = | pd.concat([df_energy_xml, df_energy_prmtop], axis=1) | pandas.concat |
import pandas as pd
import auth,query,textscrape,saveplots
#Get twitter auth
api = auth.main()
# Function variables: type, terms, max number elements
# Available Query Types: bio, user, tweet
# Example query.main('bio','data journalist',1000000)
# Saves data as csv in wp3/data folder
# Returns filename
filename=query.main("bio","data",10000)
print ("Data stored in:" + filename)
# Read an older file
# filename = "379_bio_data_journalist_315K_tweets.csv"
# Function variables: query type, terms, users, max number of elements
# Query type and terms are read from data filename
# Example query.gettweets('bio','data',["guardian","times","bbc"],1000000)
# Saves data as csv in wp3/data folder
# Returns filename
filename2=query.usertweets(filename.replace(".csv","").split("/")[-1].split("_")[0],
filename.replace(".csv","").split("/")[-1].split("_")[1],
| pd.DataFrame.from_csv("datastories/wp3/data/"+filename) | pandas.DataFrame.from_csv |
#! /usr/bin/ python
# -*- coding: utf-8 -*-
#------------------------------------------------------------------------------
# PROGRAM: worldlines.py
#------------------------------------------------------------------------------
# Version 0.11
# 9 July, 2020
# Dr <NAME>
# https://patternizer.github.io
# patternizer AT gmail DOT com
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
# SETTINGS
#------------------------------------------------------------------------------
generate_anyons = True
generate_variants = True
generate_networkx_edges = True
generate_qubits = False
generate_erdos_parameter = False
generate_erdos_equivalence = False
generate_adjacency = False
qubit_logic = False
plot_branchpoint_table = True
plot_networkx_connections = True
plot_networkx_non_circular = True
plot_networkx_erdos_parameter = False
plot_networkx_erdos_equivalence = False
plot_networkx_connections_branchpoints = True
plot_networkx_connections_dags = True
plot_variants = True
machine_learning = False
write_log = True
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
# IMPORT PYTHON LIBRARIES
#------------------------------------------------------------------------------
import numpy as np
import pandas as pd
import scipy as sp
# import math
# math.log(N,2) for entropy calculations
import random
from random import randint
from random import randrange
# Text Parsing libraries:
import re
from collections import Counter
# Network Graph libraries:
import networkx as nx
from networkx.algorithms import approximation as aprx
# Plotting libraries:
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from matplotlib import colors as mcol
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
import plotly.express as px
import plotly.graph_objects as go
import plotly.figure_factory as ff
from plotly.subplots import make_subplots
from skimage import io
import glob
from PIL import Image
# Silence library version notifications
import warnings
warnings.filterwarnings("ignore", category=UserWarning)
# NLP Libraries
# ML Libraries
# App Deployment Libraries
# import dash
# import dash_core_components as dcc
# import dash_html_components as html
# import dash_bootstrap_components as dbc
# from dash.dependencies import Input, Output, State
# from flask import Flask
# import json
# import os
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
# METHODS
#------------------------------------------------------------------------------
def word_in_line(word, line):
"""
Check if word is in line
word, line - str
returns - True if word in line, False if not
"""
pattern = r'(^|[^\w]){}([^\w]|$)'.format(word)
pattern = re.compile(pattern, re.IGNORECASE)
matches = re.search(pattern, text)
return bool(matches)
def discrete_colorscale(values, colors):
"""
values - categorical values
colors - rgb or hex colorcodes for len(values)-1
eeturn - discrete colorscale, tickvals, ticktext
"""
if len(values) != len(colors)+1:
raise ValueError('len(values) should be = len(colors)+1')
values = sorted(values)
nvalues = [(v-values[0])/(values[-1]-values[0]) for v in values] #normalized values
colorscale = []
for k in range(len(colors)):
colorscale.extend([[nvalues[k], colors[k]], [nvalues[k+1], colors[k]]])
tickvals = [((values[k]+values[k+1])/2.0) for k in range(len(values)-1)]
ticktext = [f'{int(values[k])}' for k in range(len(values)-1)]
return colorscale, tickvals, ticktext
def rgb2hex(colorin):
"""
Convert (r,g,b) to hex
"""
r = int(colorin.split('(')[1].split(')')[0].split(',')[0])
g = int(colorin.split('(')[1].split(')')[0].split(',')[1])
b = int(colorin.split('(')[1].split(')')[0].split(',')[2])
return "#{:02x}{:02x}{:02x}".format(r,g,b)
def parse_poem(input_file):
"""
Text parsing of poem and construction of branchpoint array
"""
print('parsing poem ...')
# Store lines in a list
linelist = []
with open (input_file, 'rt') as f:
for line in f:
if len(line)>1: # ignore empty lines
linelist.append(line.strip())
else:
continue
# Store text as a single string
textstr = ''
for i in range(len(linelist)):
if i < len(linelist) - 1:
textstr = textstr + linelist[i] + ' '
else:
textstr = textstr + linelist[i]
# extract sentences into list
# (ignore last entry which is '' due to final full stop)
sentencelist = textstr.split('.')[0:-1]
# Clean text and lower case all words
str = textstr
for char in '-.,\n':
str = str.replace(char,' ')
str = str.lower()
wordlist = str.split()
# Store unique words in an array
uniquewordlist = []
for word in wordlist:
if word not in uniquewordlist:
uniquewordlist.append(word)
# Word frequencies
wordfreq = Counter(wordlist).most_common() # --> wordfreq[0][0] = 'the' and wordfreq[0][1] = '13'
# Find branchpoints having word frequency > 1
branchpointlist = []
for word in range(len(wordfreq)-1):
if wordfreq[word][1] > 1:
branchpointlist.append(wordfreq[word][0])
else:
continue
# Branchpoint index array
maxbranches = wordfreq[0][1]
branchpointarray = np.zeros((len(branchpointlist), maxbranches), dtype='int')
for k in range(len(branchpointlist)):
index = []
for i, j in enumerate(wordlist):
if j == branchpointlist[k]:
index.append(i)
branchpointarray[k,0:len(index)] = index
# Filter out multiple branchpoint in single line only occurences
# using word indices of branchpoints and line start and end indices
lineindices = []
wordcount = 0
for i in range(len(linelist)):
linelen = len(linelist[i].split())
lineindices.append([i, wordcount, wordcount+linelen-1])
wordcount += linelen
mask = []
branchlinearray = []
for i in range(np.size(branchpointarray, axis=0)): # i.e. nbranchpoints
branchpointindices = branchpointarray[i,:][branchpointarray[i,:]>0]
linecounter = 0
for j in range(len(linelist)):
branchpointcounter = 0
for k in range(len(branchpointindices)):
if branchpointindices[k] in np.arange(lineindices[j][1],lineindices[j][2]+1):
branchpointcounter += 1
branchlinearray.append([j,i,lineindices[j][1],branchpointindices[k],lineindices[j][2]])
if branchpointcounter > 0:
linecounter += 1
if linecounter < 2:
mask.append(i)
a = np.array(branchpointarray)
b = branchpointlist
for i in range(len(mask)):
a = np.delete(a,mask[i]-i,0)
b = np.delete(b,mask[i]-i,0)
branchpointarray = a
branchpointlist = list(b)
db = pd.DataFrame(branchpointarray)
db.to_csv('branchpointarray.csv', sep=',', index=False, header=False, encoding='utf-8')
return textstr, sentencelist, linelist, wordlist, uniquewordlist, wordfreq, branchpointlist, branchpointarray
def generate_branchpoint_colormap(wordfreq, nbranchpoints, nwords, branchpointarray):
"""
Generate colormap using hexcolors for all branchpoints
"""
print('generating branchpoint_colormap ...')
freq = [ wordfreq[i][1] for i in range(len(wordfreq)) ]
nlabels = nbranchpoints
cmap = px.colors.diverging.Spectral
cmap_idx = np.linspace(0,len(cmap)-1, nlabels, dtype=int)
colors = [cmap[i] for i in cmap_idx]
hexcolors = [ rgb2hex(colors[i]) for i in range(len(colors)) ]
branchpoint_colormap = []
for k in range(nwords):
branchpoint_colormap.append('lightgrey')
for j in range(np.size(branchpointarray, axis=0)): # i.e. nbranchpoints
for i in range(np.size(branchpointarray, axis=1)): # i.e. maxfreq
branchpoint_colormap[branchpointarray[j,i]] = hexcolors[j]
return branchpoint_colormap, hexcolors
def compute_networkx_edges(nwords, wordlist, branchpointarray):
print('computing_networkx_edges ...')
# Construct edgelist, labellist
edgelist = [(i,i+1) for i in range(nwords-1)]
labellist = [{i : wordlist[i]} for i in range(nwords)]
df = pd.DataFrame()
G = nx.Graph()
G.add_edges_from(edgelist)
for node in G.nodes():
G.nodes[node]['label'] = labellist[node]
edge_colormap = []
for k in range(nwords-1):
edge_colormap.append('lightgrey')
for j in range(np.size(branchpointarray, axis=0)): # i.e. nbranchpoints
branchpointedges = []
for i in range(np.size(branchpointarray, axis=1)): # i.e. maxfreq
branchpointindices = branchpointarray[j,:]
connections = branchpointindices[(branchpointindices != branchpointindices[i]) & (branchpointindices > 0)]
for k in range(len(connections)):
if branchpointindices[i] > 0:
branchpointedges.append([branchpointindices[i], connections[k]])
G.add_edges_from(branchpointedges)
# for l in range(int(len(branchpointedges)/2)): # NB 2-driectional edges
# edge_colormap.append(hexcolors[j])
nedges = len(G.edges)
# Generate non-circular form of the networkx graph
N = nx.Graph()
N.add_edges_from(edgelist)
for j in range(np.size(branchpointarray, axis=0)): # i.e. nbranchpoints
branchpointedges = []
for i in range(np.size(branchpointarray, axis=1)): # i.e. maxfreq
branchpointindices = branchpointarray[j,:]
connections = branchpointindices[(branchpointindices != branchpointindices[i]) & (branchpointindices > 0)]
for k in range(len(connections)):
if branchpointindices[i] > 0:
branchpointedges.append([branchpointindices[i], connections[k]])
N.add_edges_from(branchpointedges)
N.remove_edges_from(edgelist)
N_degrees = [degree for node,degree in dict(N.degree()).items()] # degree of nodes
notbranchpoints = [ node for node,degree in dict(N.degree()).items() if degree == 0 ] # each node in circular graph has 2 neighbours at start
return nedges, notbranchpoints, G, N
def compute_erdos_parameter(nwords, nedges):
"""
Compute Erdos-Renyi parameter estimate
"""
print('computing_erdos_parameter ...')
edgelist = [(i,i+1) for i in range(nwords-1)]
for connectivity in np.linspace(0,1,1000001):
random.seed(42)
E = nx.erdos_renyi_graph(nwords, connectivity)
erdosedges = len(E.edges)
if erdosedges == (nedges-len(edgelist)):
# print("{0:.6f}".format(connectivity))
# print("{0:.6f}".format(erdosedges))
nerdosedges = len(E.edges)
return nerdosedges, connectivity, E
# break
nerdosedges = len(E.edges)
return nerdosedges, connectivity, E
def compute_erdos_equivalence(nwords, nedges, N, notbranchpoints):
"""
Compute Erdos-Renyi equivalence probability
"""
print('computing_erdos_equivalence ...')
# Compare Erdos-Renyi graph edges in reduced networks (branchpoint network)
N.remove_nodes_from(notbranchpoints)
mapping = { np.array(N.nodes)[i]:i for i in range(len(N.nodes)) }
H = nx.relabel_nodes(N,mapping)
maxdiff = len(H.edges)
iterations = 100000
for i in range(iterations+1):
E = nx.erdos_renyi_graph(len(H.nodes), connectivity)
diff = H.edges - E.edges
if len(diff) < maxdiff:
maxdiff = len(diff)
commonedges = H.edges - diff
pEquivalence = i/iterations
Equivalence = E
return commonedges, pEquivalence, Equivalence
def compute_anyons(linelist, wordlist, branchpointarray):
"""
Anyon construction: braiding
"""
print('generating_anyons ...')
# Compute start and end word indices for each line of the poem
lineindices = []
wordcount = 0
for i in range(len(linelist)):
linelen = len(linelist[i].split())
lineindices.append([i, wordcount, wordcount+linelen-1])
wordcount += linelen
# For each branchpoint find line index and word indices of line start, branchpoint and line end
# branchlinearray: [line, branchpoint, wordstart, wordbranchpoint, wordend]
branchlinearray = []
for i in range(np.size(branchpointarray, axis=0)): # i.e. nbranchpoints
branchpointindices = branchpointarray[i,:][branchpointarray[i,:]>0]
for j in range(len(linelist)):
for k in range(len(branchpointindices)):
if branchpointindices[k] in np.arange(lineindices[j][1],lineindices[j][2]+1):
branchlinearray.append([j,i,lineindices[j][1],branchpointindices[k],lineindices[j][2]])
# Filter out multiple branchpoint in single line only occurences
a = np.array(branchlinearray)
mask = []
for i in range(len(branchlinearray)-2):
if (a[i,0] == a[i+1,0]) & (a[i,1] == a[i+1,1]) & (a[i+2,1]!=a[i,1]):
mask.append(i)
mask.append(i+1)
for i in range(len(mask)):
a = np.delete(a,mask[i]-i,0)
branchlinearray = a[a[:,0].argsort()]
# Filter out start of line and end of line occurring branchpoints
a = np.array(branchlinearray)
mask = []
for i in range(len(branchlinearray)):
if ((a[i,2] == a[i,3]) | (a[i,3] == a[i,4])):
mask.append(i)
for i in range(len(mask)):
a = np.delete(a,mask[i]-i,0)
branchlinearray = a[a[:,0].argsort()]
# Anyons
anyonarray = []
for i in range(len(linelist)):
a = branchlinearray[branchlinearray[:,0]==i]
if len(a) == 0:
break
for j in range(len(a)):
anyon_pre = wordlist[a[j,2]:a[j,3]+1]
b = branchlinearray[(branchlinearray[:,1]==a[j,1]) & (branchlinearray[:,0]!=a[j,0])]
#######################################################
# For > 1 swaps, add additional anyon segment code here
# + consider case of forward in 'time' constraint
# + consider return to start line occurrence
#######################################################
if len(b) == 0:
break
for k in range(len(b)):
anyon_post = wordlist[b[k,3]+1:b[k,4]+1]
anyon = anyon_pre + anyon_post
anyonarray.append( [i, b[k,0], branchpointlist[a[j,1]], anyon, a[j,2], a[j,3], a[j,4] ])
df = pd.DataFrame(anyonarray)
df.to_csv('anyonarray.csv', sep=',', index=False, header=False, encoding='utf-8')
return anyonarray
def compute_variants(linelist, anyonarray):
"""
Variant construction
"""
print('generating_variants ...')
# generate variants of the poem
df = pd.DataFrame(anyonarray)
allpoemsidx = []
allpoems = []
allidx = []
nvariants = 0
for i in range(len(linelist)):
a = df[df[0]==i]
for j in range(len(a)):
poem = []
lineidx = []
lines = np.arange(len(linelist))
while len(lines)>0:
print(nvariants,i,j)
if len(lines) == len(linelist):
linestart = a[0].values[j]
lineend = a[1].values[j]
branchpoint = a[2].values[j]
else:
b = df[df[0]==lines[0]]
linestart = b[0].values[0]
lineend = np.setdiff1d( np.unique(b[1].values), lineidx )[0]
branchpoint = df[ (df[0]==linestart) & (df[1]==lineend) ][2].values[0]
lineidx.append(linestart)
lineidx.append(lineend)
branchpointstartpre = df[ (df[0]==linestart) & (df[1]==lineend) & (df[2]==branchpoint) ][4].values[0]
branchpointstart = df[ (df[0]==linestart) & (df[1]==lineend) & (df[2]==branchpoint) ][5].values[0]
branchpointstartpro = df[ (df[0]==linestart) & (df[1]==lineend) & (df[2]==branchpoint) ][6].values[0]
branchpointendpre = df[ (df[0]==lineend) & (df[1]==linestart) & (df[2]==branchpoint) ][4].values[0]
branchpointend = df[ (df[0]==lineend) & (df[1]==linestart) & (df[2]==branchpoint) ][5].values[0]
branchpointendpro = df[ (df[0]==lineend) & (df[1]==linestart) & (df[2]==branchpoint) ][6].values[0]
allidx.append([nvariants, linestart, lineend, branchpoint, branchpointstartpre, branchpointstart, branchpointstartpro])
allidx.append([nvariants, lineend, linestart, branchpoint, branchpointendpre, branchpointend, branchpointendpro])
poem.append(df[ (df[0]==linestart) & (df[1]==lineend) & (df[2]==branchpoint) ][3].values[0])
poem.append(df[ (df[0]==lineend) & (df[1]==linestart) & (df[2]==branchpoint) ][3].values[0])
lines = np.setdiff1d(lines,lineidx)
nvariants += 1
poemsorted = []
for k in range(len(lineidx)):
poemsorted.append(poem[lineidx.index(k)])
allpoems.append(poemsorted)
allpoemsidx.append(lineidx)
dp = pd.DataFrame(poemsorted)
dp.to_csv('poem'+'_'+"{0:.0f}".format(nvariants-1).zfill(3)+'.csv', sep=',', index=False, header=False, encoding='utf-8')
di = pd.DataFrame(allpoemsidx)
di.to_csv('poem_allidx.csv', sep=',', index=False, header=False, encoding='utf-8')
da = pd.DataFrame(allpoems)
da.to_csv('poem_all.csv', sep=',', index=False, header=False, encoding='utf-8')
dl = pd.DataFrame(allidx)
dl.to_csv('allidx.csv', sep=',', index=False, header=False, encoding='utf-8')
return nvariants, allpoemsidx, allpoems, allidx
def generate_qubits():
"""
Qubit contruction
"""
print('generating_qubits ...')
def qubit_logic():
"""
Apply gates to Bell states
"""
print('applying logic gates ...')
def machine_learning():
"""
Feature extraction
"""
print('extracting features ...')
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
# LOAD POEM
#------------------------------------------------------------------------------
"""
Poem to generate quantum variants from
"""
#input_file = 'poem.txt'
input_file = 'poem-v1.txt'
textstr, sentencelist, linelist, wordlist, uniquewordlist, wordfreq, branchpointlist, branchpointarray = parse_poem(input_file)
# Counts
nsentences = len(sentencelist) # --> 4
nlines = len(linelist) # --> 8
nwords = len(wordlist) # --> 98
nunique = len(uniquewordlist) # --> 59
nbranchpoints = len(branchpointlist) # --> 20
if generate_networkx_edges == True:
nedges, notbranchpoints, G, N = compute_networkx_edges(nwords, wordlist, branchpointarray)
if generate_anyons == True:
anyonarray = compute_anyons(linelist, wordlist, branchpointarray)
if generate_variants == True:
nvariants, allpoemsidx, allpoems, allidx = compute_variants(linelist, anyonarray)
if generate_qubits == True:
print('generating_qubits ...')
if generate_erdos_parameter == True:
nerdosedges, connectivity, E = compute_erdos_parameter(nwords, nedges)
if generate_erdos_equivalence == True:
commonedges, pEquivalence, Equivalence = compute_erdos_equivalence(nwords, nedges, N, notbranchpoints)
if qubit_logic == True:
print('applying logic gates ...')
if machine_learning == True:
print('extracting features ...')
# -----------------------------------------------------------------------------
branchpoint_colormap, hexcolors = generate_branchpoint_colormap(wordfreq, nbranchpoints, nwords, branchpointarray)
# -----------------------------------------------------------------------------
if plot_branchpoint_table == True:
print('plotting_branchpoint_table ...')
fig, ax = plt.subplots(figsize=(15,10))
plt.plot(np.arange(0,len(wordlist)), np.zeros(len(wordlist)))
for k in range(len(branchpointlist)):
plt.plot(np.arange(0,len(wordlist)), np.ones(len(wordlist))*k, color='black')
a = branchpointarray[k,:]
vals = a[a>0]
plt.scatter(vals, np.ones(len(vals))*k, label=branchpointlist[k], s=100, facecolors=hexcolors[k], edgecolors='black')
xticks = np.arange(0, len(wordlist)+0, step=10)
xlabels = np.array(np.arange(0, len(wordlist), step=10).astype('str'))
yticks = np.arange(0, len(branchpointlist), step=1)
ylabels = np.array(np.arange(0, len(branchpointlist), step=1).astype('str'))
plt.xticks(ticks=xticks, labels=xlabels) # Set label locations
plt.yticks(ticks=yticks, labels=ylabels) # Set label locations
plt.xticks(fontsize=20)
plt.yticks(fontsize=20)
plt.xlabel('word n in text', fontsize=20)
plt.ylabel('branchpoint k in text (>1 connection)', fontsize=20)
plt.title('Branch Analysis Plot', fontsize=20)
plt.gca().invert_yaxis()
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5), fontsize=12)
plt.savefig('branchplot.png')
plt.close(fig)
if plot_networkx_connections == True:
print('plotting_networkx_connections ...')
fig, ax = plt.subplots(figsize=(15,10))
nx.draw_circular(G, node_color=branchpoint_colormap, node_size=300, linewidths=0.5, font_size=12, font_weight='normal', with_labels=True)
plt.title('Networkx (circularly connected): N(edges)=' + "{0:.0f}".format(len(G.edges)), fontsize=20)
plt.savefig('networkx.png')
plt.close(fig)
if plot_networkx_non_circular == True:
print('plotting_networkx_non_circular ...')
fig, ax = plt.subplots(figsize=(15,10))
nx.draw_circular(N, node_color=branchpoint_colormap, node_size=300, linewidths=0.5, font_size=12, font_weight='normal', with_labels=True)
plt.title('Networkx (non-circularly connected): N(edges)=' + "{0:.0f}".format(len(N.edges)), fontsize=20)
plt.savefig('networkx_non_circular.png')
if plot_networkx_erdos_parameter == True:
print('plotting_networkx_erdos ...')
fig, ax = plt.subplots(figsize=(15,10))
nx.draw_circular(E, node_color=branchpoint_colormap, node_size=300, linewidths=0.5, font_size=12, font_weight='normal', with_labels=True)
plt.title('Erdős-Rényi Model: p=' + "{0:.6f}".format(connectivity) + ', N(edges)=' + "{0:.0f}".format(nerdosedges), fontsize=20)
plt.savefig('networkx_erdos.png')
plt.close(fig)
if plot_networkx_erdos_equivalence == True:
print('plotting_networkx_erdos_equivalence ...')
fig, ax = plt.subplots(figsize=(15,10))
nx.draw_circular(Eequivalence, node_color='lightgrey', node_size=300, linewidths=0.5, font_size=12, font_weight='normal', with_labels=True)
plt.title('Erdős-Rényi Model (equivalent): N(common edges)=' + "{0:.0f}".format(len(N.edges)-len(diff)), fontsize=20)
plt.savefig('networkx_erdos_equivalence.png')
if plot_variants == True:
print('plotting_variants ...')
di = pd.DataFrame(allpoemsidx)
da = pd.DataFrame(allpoems)
dl = | pd.DataFrame(allidx) | pandas.DataFrame |
# Pymatgen
from pymatgen.core import Structure
from pymatgen.analysis.local_env import CrystalNN, CutOffDictNN
from pymatgen.io.vasp.outputs import Locpot
# Misc
import math
import numpy as np
import pandas as pd
import warnings
# surfaxe
from surfaxe.generation import oxidation_states
from surfaxe.io import plot_bond_analysis, plot_electrostatic_potential, _instantiate_structure
def cart_displacements(start, end, max_disp=0.1, save_txt=True,
txt_fname='cart_displacements.txt'):
"""
Produces a text file with all the magnitude of displacements of atoms
in Cartesian space
Args:
start (`str`): Filename of initial structure file in any format
supported by pymatgen or pymatgen structure object.
end (`str`): Filename of final structure file in any format supported
by pymatgen or pymatgen structure object.
max_disp (`float`, optional): The maximum displacement shown. Defaults
to 0.1 Å.
save_txt (`bool`, optional): Save the displacements to file. Defaults to
``True``.
txt_fname (`str`, optional): Filename of the csv file. Defaults to
``'cart_displacement.txt'``.
Returns:
None (default) or DataFrame of displacements of atoms in Cartesian space
"""
# Instantiate the structures
start_struc = _instantiate_structure(start)
end_struc = _instantiate_structure(end)
# Add the site labels to the structure
els = ''.join([i for i in start_struc.formula if not i.isdigit()]).split(' ')
el_dict = {i : 1 for i in els}
site_labels = []
for site in start_struc:
symbol = site.specie.symbol
site_labels.append((symbol,el_dict[symbol]))
el_dict[symbol] +=1
start_struc.add_site_property('', site_labels)
# Convert to cartesian coordinates
start_struc = start_struc.cart_coords
end_struc = end_struc.cart_coords
# Calculate the displacements
disp_list = []
for n, (start_coord, end_coord) in enumerate(zip(start_struc, end_struc)):
xdisp = math.pow(start_coord[0] - end_coord[0], 2)
ydisp = math.pow(start_coord[1] - end_coord[1], 2)
zdisp = math.pow(start_coord[2] - end_coord[2], 2)
d = math.sqrt(xdisp + ydisp + zdisp)
label = site_labels[n]
if d >= max_disp:
disp_list.append({
'site': n+1,
'atom': label,
# this makes the displacements round to the same number of
# decimal places as max displacement, for presentation
'displacement': round(d, int(format(max_disp, 'E')[-1]))
})
# Save as txt file
df = pd.DataFrame(disp_list)
if save_txt:
df.to_csv(txt_fname, header=True, index=False, sep='\t', mode='w')
else:
return df
def bond_analysis(structure, bond, nn_method=CrystalNN(), ox_states=None,
save_csv=True, csv_fname='bond_analysis.csv', save_plt=False,
plt_fname='bond_analysis.png', **kwargs):
"""
Parses the structure looking for bonds between atoms. Check the validity of
the nearest neighbour method on the bulk structure before using it on slabs.
Args:
structure (`str`): filename of structure, takes all pymatgen-supported
formats, including pmg structure object
bond (`list`): Bond to analyse e.g. ``['Y', 'O']``
nn_method (`class`, optional): The coordination number prediction
algorithm used. Because the ``nn_method`` is a class, the class
needs to be imported from ``pymatgen.analysis.local_env`` before it
can be instantiated here. Defaults to ``CrystalNN()``.
ox_states (``None``, `list` or `dict`, optional): Add oxidation states
to the structure. Different types of oxidation states specified will
result in different pymatgen functions used. The options are:
* if supplied as ``list``: The oxidation states are added by site
e.g. ``[3, 2, 2, 1, -2, -2, -2, -2]``
* if supplied as ``dict``: The oxidation states are added by element
e.g. ``{'Fe': 3, 'O':-2}``
* if ``None``: The oxidation states are added by guess.
Defaults to ``None``.
save_csv (`bool`, optional): Makes a csv file with the c coordinate of
the first atom and bond length. Defaults to ``True``.
csv_fname (`str`, optional): Filename of the csv file. Defaults to
``'bond_analysis.csv'``.
save_plt (`bool`, optional): Make and save the bond analysis plot.
Defaults to ``False``.
plt_fname (`str`, optional): Filename of the plot. Defaults to
``'bond_analysis.png'``.
Returns:
DataFrame with the c coordinate of the first atom and bond length
"""
struc = _instantiate_structure(structure)
struc = oxidation_states(structure=struc, ox_states=ox_states)
if len(bond) > 2:
warnings.warn('Bond with more than two elements supplied. '
'Only the first two elements will be treated as a bond.')
# Iterates through the structure, looking for pairs of bonded atoms. If the
# sites match, the bond distance is calculated and passed to a dataframe
bonds_info = []
for n, pos in enumerate(struc):
if pos.specie.symbol == bond[0]:
nearest_neighbours = nn_method.get_nn_info(struc, n)
matched_sites = []
for d in nearest_neighbours:
if d.get('site').specie.symbol == bond[1]:
matched_sites.append(d)
bond_distances = [
struc.get_distance(n,x['site_index']) for x in matched_sites
]
bonds_info.append({
'{}_index'.format(bond[0]): n+1,
'{}_c_coord'.format(bond[0]): pos.c,
'{}-{}_bond_distance'.format(bond[0],bond[1]): np.mean(bond_distances)
})
df = pd.DataFrame(bonds_info)
# Save plot and csv, or return the DataFrame
if save_plt:
plot_bond_analysis(bond, df=df, plt_fname=plt_fname, **kwargs)
if save_csv:
if not csv_fname.endswith('.csv'):
csv_fname += '.csv'
df.to_csv(csv_fname, header=True, index=False)
else:
return df
def electrostatic_potential(locpot='./LOCPOT', lattice_vector=None,
save_csv=True, csv_fname='potential.csv', save_plt=True,
plt_fname='potential.png', **kwargs):
"""
Reads LOCPOT to get the planar and optionally macroscopic potential in
c direction.
Args:
locpot (`str`, optional): The path to the LOCPOT file. Defaults to
``'./LOCPOT'``
lattice_vector (`float`, optional): The periodicity of the slab,
calculates macroscopic potential with that periodicity
save_csv (`bool`, optional): Saves to csv. Defaults to ``True``.
csv_fname (`str`, optional): Filename of the csv file. Defaults
to ``'potential.csv'``.
save_plt (`bool`, optional): Make and save the plot of electrostatic
potential. Defaults to ``True``.
plt_fname (`str`, optional): Filename of the plot. Defaults to
``'potential.png'``.
Returns:
DataFrame
"""
# Read potential and structure data
lpt = Locpot.from_file(locpot)
struc = Structure.from_file(locpot)
# Planar potential
planar = lpt.get_average_along_axis(2)
df = pd.DataFrame(data=planar, columns=['planar'])
# Calculate macroscopic potential
if lattice_vector is not None:
# Divide lattice parameter by no. of grid points in the direction
resolution = struc.lattice.abc[2]/lpt.dim[2]
# Get number of points over which the rolling average is evaluated
points = int(lattice_vector/resolution)
# Need extra points at the start and end of planar potential to evaluate the
# macroscopic potential this makes use of the PBC where the end of one unit
# cell coincides with start of the next one
add_to_start = planar[(len(planar) - points): ]
add_to_end = planar[0:points]
pfm_data = np.concatenate((add_to_start,planar,add_to_end))
pfm = pd.DataFrame(data=pfm_data, columns=['y'])
# Macroscopic potential
m_data = pfm.y.rolling(window=points, center=True).mean()
macroscopic = m_data.iloc[points:(len(planar)+points)]
macroscopic.reset_index(drop=True,inplace=True)
df['macroscopic'] = macroscopic
# Get gradient of the plot - this is used for convergence testing, to make
# sure the potential is actually flat
df['gradient'] = np.gradient(df['planar'])
# Plot and save the graph, save the csv or return the dataframe
if save_plt:
plot_electrostatic_potential(df=df, plt_fname=plt_fname, **kwargs)
if save_csv:
if not csv_fname.endswith('.csv'):
csv_fname += '.csv'
df.to_csv(csv_fname, header=True, index=False)
else:
return df
def simple_nn(start, end=None, ox_states=None, nn_method=CrystalNN(),
save_csv=True, csv_fname='nn_data.csv'):
"""
Finds the nearest neighbours for simple structures. Before using on slabs
make sure the nn_method works with the bulk structure.
The ``site_index`` in the produced DataFrame or csv file is one-indexed and
represents the atom index in the structure.
Args:
start (`str`): Filename of structure file in any format supported by
pymatgen
end (`str`, optional): Filename of structure file in any format
supported by pymatgen. Use if comparing initial and final structures.
The structures must have same constituent atoms and number of sites.
Defaults to ``None``.
ox_states (``None``, `list` or `dict`, optional): Add oxidation states
to the structure. Different types of oxidation states specified will
result in different pymatgen functions used. The options are:
* if supplied as ``list``: The oxidation states are added by site
e.g. ``[3, 2, 2, 1, -2, -2, -2, -2]``
* if supplied as ``dict``: The oxidation states are added by element
e.g. ``{'Fe': 3, 'O':-2}``
* if ``None``: The oxidation states are added by guess.
Defaults to ``None``.
nn_method (`class`, optional): The coordination number prediction
algorithm used. Because the ``nn_method`` is a class, the class
needs to be imported from pymatgen.analysis.local_env before it
can be instantiated here. Defaults to ``CrystalNN()``.
save_csv (`bool`, optional): Save to a csv file. Defaults to ``True``.
csv_fname (`str`, optional): Filename of the csv file. Defaults to
``'nn_data.csv'``
Returns
None (default) or DataFrame containing coordination data
"""
# Instantiate start structure object
start_struc = _instantiate_structure(start)
# Add atom site labels to the structure
els = ''.join([i for i in start_struc.formula if not i.isdigit()]).split(' ')
el_dict = {i : 1 for i in els}
site_labels = []
for site in start_struc:
symbol = site.specie.symbol
site_labels.append((symbol,el_dict[symbol]))
el_dict[symbol] +=1
start_struc.add_site_property('', site_labels)
# Add oxidation states and get bonded structure
start_struc = oxidation_states(start_struc, ox_states)
bonded_start = nn_method.get_bonded_structure(start_struc)
if end:
end_struc = _instantiate_structure(end)
end_struc = oxidation_states(end_struc, ox_states)
bonded_end = nn_method.get_bonded_structure(end_struc)
# Iterate through structure, evaluate the coordination number and the
# nearest neighbours specie for start and end structures, collects the
# symbol and index of the site (atom) evaluated and its nearest neighbours
df_list = []
for n, site in enumerate(start_struc):
cn_start = bonded_start.get_coordination_of_site(n)
coord_start = bonded_start.get_connected_sites(n)
specie_list = []
for d in coord_start:
spc = d.site.specie.symbol
specie_list.append(spc)
specie_list.sort()
site_nn_start = ' '.join(specie_list)
label = site_labels[n]
if end:
cn_end = bonded_end.get_coordination_of_site(n)
coord_end = bonded_end.get_connected_sites(n)
specie_list = []
for d in coord_end:
spc = d.site.specie.symbol
specie_list.append(spc)
specie_list.sort()
site_nn_end = ' '.join(specie_list)
df_list.append({'site': n+1, 'atom': label, 'cn_start': cn_start,
'nn_start': site_nn_start, 'cn_end': cn_end, 'nn_end': site_nn_end})
else:
df_list.append({'site_index': n+1, 'site': label,
'cn_start': cn_start, 'nn_start': site_nn_start})
# Make a dataframe from df_list
df = pd.DataFrame(df_list)
# Save the csv file or return as dataframe
if save_csv:
if not csv_fname.endswith('.csv'):
csv_fname += '.csv'
df.to_csv(csv_fname, header=True, index=False)
else:
return df
def complex_nn(start, cut_off_dict, end=None, ox_states=None,
save_csv=True, csv_fname='nn_data.csv'):
"""
Finds the nearest neighbours for more complex structures. Uses CutOffDictNN()
class as the nearest neighbour method. Check validity on bulk structure
before applying to surface slabs.
The ``site_index`` in the produced DataFrame or csv file is one-indexed and
represents the atom index in the structure.
Args:
start (`str`): filename of structure, takes all pymatgen-supported formats.
cut_off_dict (`dict`): Dictionary of bond lengths. The bonds should be
specified with the oxidation states\n
e.g. ``{('Bi3+', 'O2-'): 2.46, ('V5+', 'O2-'): 1.73}``
end (`str`, optional): filename of structure to analyse, use if
comparing initial and final structures. The structures must have
same constituent atoms and number of sites. Defaults to ``None``.
ox_states (``None``, `list` or `dict`, optional): Add oxidation states
to the structure. Different types of oxidation states specified will
result in different pymatgen functions used. The options are:
* if supplied as ``list``: The oxidation states are added by site
e.g. ``[3, 2, 2, 1, -2, -2, -2, -2]``
* if supplied as ``dict``: The oxidation states are added by element
e.g. ``{'Fe': 3, 'O':-2}``
* if ``None``: The oxidation states are added by guess.
Defaults to ``None``
save_csv (`bool`, optional): Save to a csv file. Defaults to ``True``.
csv_fname (`str`, optional): Filename of the csv file. Defaults to
``'nn_data.csv'``
Returns
None (default) or DataFrame containing coordination data.
"""
# Instantiate start structure object
start_struc = Structure.from_file(start)
# Add atom site labels to the structure
els = ''.join([i for i in start_struc.formula if not i.isdigit()]).split(' ')
el_dict = {i : 1 for i in els}
site_labels = []
for site in start_struc:
symbol = site.specie.symbol
site_labels.append((symbol,el_dict[symbol]))
el_dict[symbol] +=1
start_struc.add_site_property('', site_labels)
# Add oxidation states
start_struc = oxidation_states(start_struc, ox_states=ox_states)
# Instantiate the nearest neighbour algorithm and get bonded structure
codnn = CutOffDictNN(cut_off_dict=cut_off_dict)
bonded_start = codnn.get_bonded_structure(start_struc)
# Instantiate the end structure if provided
if end:
end_struc = Structure.from_file(end)
end_struc = oxidation_states(end_struc, ox_states=ox_states)
bonded_end = codnn.get_bonded_structure(end_struc)
# Iterate through structure, evaluate the coordination number and the
# nearest neighbours specie for start and end structures, collects the
# symbol and index of the site (atom) evaluated and its nearest neighbours
df_list = []
for n, site in enumerate(start_struc):
cn_start = bonded_start.get_coordination_of_site(n)
coord_start = bonded_start.get_connected_sites(n)
specie_list = []
for d in coord_start:
spc = d.site.specie.symbol
specie_list.append(spc)
specie_list.sort()
site_nn_start = ' '.join(specie_list)
label = site_labels[n]
if end:
cn_end = bonded_end.get_coordination_of_site(n)
coord_end = bonded_end.get_connected_sites(n)
specie_list = []
for d in coord_end:
spc = d.site.specie.symbol
specie_list.append(spc)
specie_list.sort()
site_nn_end = ' '.join(specie_list)
df_list.append({'site': n+1, 'atom': label, 'cn start': cn_start,
'nn_start': site_nn_start, 'cn_end': cn_end, 'nn_end': site_nn_end})
else:
df_list.append({'site_index': n+1, 'site': label,
'cn_start': cn_start, 'nn_start': site_nn_start})
# Make a dataframe from df_list
df = | pd.DataFrame(df_list) | pandas.DataFrame |
import numpy as np
import pytest
import pandas.util._test_decorators as td
import pandas as pd
import pandas._testing as tm
from pandas.arrays import SparseArray
from pandas.core.arrays.sparse import SparseDtype
class TestSparseDataFrameIndexing:
def test_getitem_sparse_column(self):
# https://github.com/pandas-dev/pandas/issues/23559
data = SparseArray([0, 1])
df = pd.DataFrame({"A": data})
expected = pd.Series(data, name="A")
result = df["A"]
tm.assert_series_equal(result, expected)
result = df.iloc[:, 0]
| tm.assert_series_equal(result, expected) | pandas._testing.assert_series_equal |
import logging
import fundamentus
import streamlit as st
import numpy as np
import pandas as pd
from pandas_datareader import data
from dmapi import DMAPI
@st.cache
def get_tickers():
dm = DMAPI(token='<PASSWORD>')
dm_json = dm.tickers()
df = | pd.DataFrame.from_dict(dm_json) | pandas.DataFrame.from_dict |
import pandas as pd
import os
import subprocess
import math
from datetime import datetime, timedelta
from ShotDetectionInterface import ShotDetectionInterface
import secrets
class PySceneDetection(ShotDetectionInterface):
def __init__(self, no_of_bytes = 32, threshold = 20, output_path = './shot_detection/video_scenes/', video_format = '.mp4'):
self.no_of_bytes = no_of_bytes
self.modified_split = pd.DataFrame()
self.to_add = 0
self.threshold = threshold
self.output_path = output_path
self.video_name = str()
self.video_format = video_format
def generate_scenes(self, local_video_path):
"""Generates csv for detecting important scenes"""
command_list = ['scenedetect']
inputs = ['-i', os.path.abspath(local_video_path)]
framerate = ['-f', '29.97']
output = ['-o', self.output_path]
stats = ['-s', self.video_name + '.stats.csv']
detect_content = ['detect-content', '-t', str(self.threshold)]
list_scenes = ['list-scenes']
execute_command = (command_list + inputs + framerate + output + stats + detect_content + list_scenes)
subprocess.call(execute_command)
def get_random_video_name(self):
return secrets.token_urlsafe(self.no_of_bytes)
def get_total_minutes(self, row):
"""Returns total minutes for a video"""
length_timecode = row['Length (timecode)'].split(':')
hours,minutes = int(length_timecode[0]) ,int(length_timecode[1])
minutes += hours*60
no_of_splits = math.ceil(minutes/5.0)
return (minutes, no_of_splits)
def append_dataframe(self, values, filename):
dict_to_append = {}
dict_to_append['Start Timecode'] = [values[0]]
dict_to_append['End Timecode'] = [values[1]]
dict_to_append['Length (timecode)'] = [values[2]]
dict_to_append['filename'] = [filename]
self.modified_split = self.modified_split.append(pd.DataFrame(dict_to_append),ignore_index=True)
def add_last_frame(self, row):
start_time = self.convert_str_to_datetime(list(self.modified_split['Start Timecode'])[-1])
duration = self.convert_str_to_datetime(list(self.modified_split['Length (timecode)'])[-1])
duration += self.convert_str_to_datetime(row['Length (timecode)'])
end_time = start_time + duration
start_time = list(self.modified_split['Start Timecode'])[-1]
return [start_time, str(end_time), str(duration)]
def small_frames(self, row):
filename = self.get_random_video_name()
if len(self.modified_split) != 0:
values = self.add_last_frame(row)
self.modified_split.drop(self.modified_split.tail(1).index,inplace=True)
self.to_add = 0
else:
self.to_add = 1
values = [str(row['Start Timecode']), str(row['End Timecode']), str(row['Length (timecode)'])]
self.append_dataframe(values, filename)
def next_frames(self, no_of_splits, duration, row):
for i in range(1,no_of_splits - 1):
start_time = self.convert_str_to_datetime(list(self.modified_split['End Timecode'])[-1])
start_time_act = list(self.modified_split['End Timecode'])[-1]
values = [start_time_act, str(start_time + duration), str(duration)]
filename = self.get_random_video_name()
self.append_dataframe(values, filename)
start_time = self.convert_str_to_datetime(list(self.modified_split['End Timecode'])[-1])
rem_length = self.convert_str_to_datetime(row['Length (timecode)'])
rem_length -= (duration*(no_of_splits-1))
filename = self.get_random_video_name()
start_time_act = list(self.modified_split['End Timecode'])[-1]
values = [start_time_act, str(start_time + rem_length), str(rem_length)]
self.append_dataframe(values, filename)
def large_frames(self, row, no_of_splits):
filename = self.get_random_video_name()
duration = self.convert_str_to_datetime(row['Length (timecode)']) / no_of_splits
if self.to_add == 1:
start_time = self.convert_str_to_datetime(list(self.modified_split['Start Timecode'])[-1])
new_duration = duration + self.convert_str_to_datetime(list(self.modified_split['Length (timecode)'])[-1])
start_time_act = list(self.modified_split['Start Timecode'])[-1]
self.modified_split.drop(self.modified_split.tail(1).index,inplace=True)
self.to_add = 0
else:
start_time = self.convert_str_to_datetime(row['Start Timecode'])
start_time_act = str(start_time)
new_duration = duration
values = [start_time_act, str(start_time + new_duration), str(new_duration)]
self.append_dataframe(values, filename)
self.next_frames(no_of_splits, duration, row)
def intermediate_frames(self, row):
values = []
filename = self.get_random_video_name()
if self.to_add == 1:
values = self.add_last_frame(row)
self.modified_split.drop(self.modified_split.tail(1).index,inplace=True)
self.to_add = 0
else:
values = [str(row['Start Timecode']), str(row['End Timecode']), str(row['Length (timecode)'])]
self.append_dataframe(values, filename)
def get_optimal_splits(self, local_file_path):
"""Combines small splits or breaks large splits of the video"""
splits = pd.read_csv(local_file_path,skiprows=1)
self.modified_split = pd.DataFrame({'Start Timecode':[],'End Timecode':[],'Length (timecode)':[],'filename':[]})
self.to_add = 0
for index, row in splits.iterrows():
(minutes, no_of_splits) = self.get_total_minutes(row)
if minutes > 5:
self.large_frames(row,no_of_splits)
elif minutes < 1:
self.small_frames(row)
else:
self.intermediate_frames(row)
self.modified_split.to_csv(os.path.join(self.output_path, self.video_name + '_split_times.csv'), index=False)
def get_video_name_from_filepath(self, local_video_path):
return local_video_path.split('/')[-1].split('.')[0]
def detect_scenes(self, local_video_path):
self.video_name = self.get_video_name_from_filepath(local_video_path)
print("Video_name : ", self.video_name)
self.generate_scenes(local_video_path)
generated_csv = os.path.join(self.output_path, self.video_name + '.stats.csv')
df = pd.read_csv(generated_csv,skiprows=1)
video_len = df['Timecode'].max().split(':')
video_len = int(video_len[0]*60) + int(video_len[1])
self.threshold = self.search_threshold(video_len, generated_csv)
self.generate_scenes(local_video_path)
self.get_optimal_splits(os.path.join(self.output_path, self.video_name + '-Scenes.csv'))
def convert_str_to_datetime(self, str_time):
"""Converts string to datetime object"""
obj = datetime.strptime(str_time, '%H:%M:%S.%f')
time_object = timedelta(hours=obj.hour,minutes=obj.minute,seconds=obj.second,milliseconds=obj.microsecond/1e3)
return time_object
def search_threshold(self, length_of_video, local_file_path):
"""Returns threshold for generating better splits"""
dataframe = | pd.read_csv(local_file_path, skiprows=1) | pandas.read_csv |
#%%
import sys
import os
#sys.path.append(os.getcwd() + '/connectome_tools/')
os.chdir(os.path.dirname(os.getcwd())) # make directory one step up the current directory
os.chdir(os.path.dirname(os.getcwd())) # make directory one step up the current directory
sys.path.append('/Users/mwinding/repos/maggot_models')
from pymaid_creds import url, name, password, token
import pymaid
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import pandas as pd
from src.data import load_metagraph
from src.visualization import CLASS_COLOR_DICT, adjplot
from src.traverse import Cascade, to_transmission_matrix
from src.traverse import TraverseDispatcher
from src.visualization import matrixplot
rm = pymaid.CatmaidInstance(url, token, name, password)
import connectome_tools.cascade_analysis as casc
import connectome_tools.celltype as ct
#mg = load_metagraph("Gad", version="2020-06-10", path = '/Volumes/GoogleDrive/My Drive/python_code/maggot_models/data/processed/')
#mg.calculate_degrees(inplace=True)
#adj = mg.adj # adjacency matrix from the "mg" object
adj_ad = pd.read_csv(f'data/adj/all-neurons_ad.csv', index_col = 0).rename(columns=int)
adj = adj_ad.values
clusters = pd.read_csv('cascades/data/meta-method=color_iso-d=8-bic_ratio=0.95-min_split=32.csv', index_col = 0, header = 0)
order = pd.read_csv('cascades/data/signal_flow_order_lvl7.csv').values
# make array from list of lists
order_delisted = []
for sublist in order:
order_delisted.append(sublist[0])
order = np.array(order_delisted)
#%%
# pull sensory annotations and then pull associated skids
order = ['ORN', 'AN sensories', 'MN sensories', 'photoreceptors', 'thermosensories', 'v\'td', 'A1 ascending noci', 'A1 ascending mechano', 'A1 ascending proprio', 'A1 ascending class II_III']
sens = [ct.Celltype(name, pymaid.get_skids_by_annotation(f'mw {name}')) for name in order]
input_skids_list = [x.get_skids() for x in sens]
input_skids = [val for sublist in input_skids_list for val in sublist]
output_names = pymaid.get_annotated('mw brain outputs').name
output_skids_list = list(map(pymaid.get_skids_by_annotation, pymaid.get_annotated('mw brain outputs').name))
output_skids = [val for sublist in output_skids_list for val in sublist]
#%%
# cascades from each sensory modality
p = 0.05
max_hops = 10
n_init = 100
simultaneous = True
adj=adj_ad
input_hit_hist_list = casc.Cascade_Analyzer.run_cascades_parallel(source_skids_list=input_skids_list, stop_skids=output_skids,
adj=adj_ad, p=p, max_hops=max_hops, n_init=n_init, simultaneous=simultaneous)
# **** continue here when new clusters are available
#%%
# grouping cascade indices by cluster type
# level 7 clusters
lvl7 = clusters.groupby('lvl7_labels')
# cluster order and number of neurons per cluster
cluster_lvl7 = []
for key in lvl7.groups.keys():
cluster_lvl7.append([key, len(lvl7.groups[key])])
cluster_lvl7 = pd.DataFrame(cluster_lvl7, columns = ['key', 'num_cluster'])
# breaking signal cascades into cluster groups
input_hit_hist_lvl7 = []
for hit_hist in input_hit_hist_list:
sensory_clustered_hist = []
for key in lvl7.groups.keys():
skids = lvl7.groups[key]
indices = np.where([x in skids for x in mg.meta.index])[0]
cluster_hist = hit_hist[indices]
cluster_hist = pd.DataFrame(cluster_hist, index = indices)
sensory_clustered_hist.append(cluster_hist)
input_hit_hist_lvl7.append(sensory_clustered_hist)
# summed signal cascades per cluster group (hops remain intact)
summed_hist_lvl7 = []
for input_hit_hist in input_hit_hist_lvl7:
sensory_sum_hist = []
for i, cluster in enumerate(input_hit_hist):
sum_cluster = cluster.sum(axis = 0)/(len(cluster.index)) # normalize by number of neurons in cluster
sensory_sum_hist.append(sum_cluster)
sensory_sum_hist = pd.DataFrame(sensory_sum_hist) # column names will be hop number
sensory_sum_hist.index = cluster_lvl7.key # uses cluster name for index of each summed cluster row
summed_hist_lvl7.append(sensory_sum_hist)
# number of neurons per cluster group over threshold (hops remain intact)
threshold = 50
num_hist_lvl7 = []
for input_hit_hist in input_hit_hist_lvl7:
sensory_num_hist = []
for i, cluster in enumerate(input_hit_hist):
num_cluster = (cluster>threshold).sum(axis = 0)
sensory_num_hist.append(num_cluster)
sensory_num_hist = | pd.DataFrame(sensory_num_hist) | pandas.DataFrame |
import biomart
import sys
import pandas as pd
import numpy as np
from biomart import BiomartServer
#from cStringIO import StringIO # python2
from io import BytesIO as cStringIO
from io import StringIO
biomart_host="http://www.ensembl.org/biomart"
def datasetsBM(host=biomart_host):
"""
Lists BioMart datasets.
:param host: address of the host server, default='http://www.ensembl.org/biomart'
:returns: nothing
"""
stdout_ = sys.stdout #Keep track of the previous value.
stream = StringIO()
sys.stdout = stream
server = BiomartServer(biomart_host)
server.show_datasets()
sys.stdout = stdout_ # restore the previous stdout.
variable = stream.getvalue()
v=variable.replace("{"," ")
v=v.replace("}"," ")
v=v.replace(": ","\t")
print(v)
def filtersBM(dataset,host=biomart_host):
"""
Lists BioMart filters for a specific dataset.
:param dataset: dataset to list filters of.
:param host: address of the host server, default='http://www.ensembl.org/biomart'
:returns: nothing
"""
stdout_ = sys.stdout #Keep track of the previous value.
stream = StringIO()
sys.stdout = stream
server = BiomartServer(host)
d=server.datasets[dataset]
d.show_filters()
sys.stdout = stdout_ # restore the previous stdout.
variable = stream.getvalue()
v=variable.replace("{"," ")
v=v.replace("}"," ")
v=v.replace(": ","\t")
print(v)
def attributesBM(dataset,host=biomart_host):
"""
Lists BioMart attributes for a specific dataset.
:param dataset: dataset to list attributes of.
:param host: address of the host server, default='http://www.ensembl.org/biomart'
:returns: nothing
"""
stdout_ = sys.stdout #Keep track of the previous value.
stream = StringIO()
sys.stdout = stream
server = BiomartServer(host)
d=server.datasets[dataset]
d.show_attributes()
sys.stdout = stdout_ # restore the previous stdout.
variable = stream.getvalue()
v=variable.replace("{"," ")
v=v.replace("}"," ")
v=v.replace(": ","\t")
print(v)
def queryBM(query_attributes,query_dataset,query_filter=None,query_items=None,query_dic=None,host=biomart_host):
"""
Queries BioMart.
:param query_attributes: list of attributes to recover from BioMart
:param query_dataset: dataset to query
:param query_filter: one BioMart filter associated with the items being queried
:param query_items: list of items to be queried (must assoiate with given filter)
:param query_dic: for complex queries this option should be used instead of 'filters' and 'items' and a dictionary of filters provided here eg. querydic={"filter1":["item1","item2"],"filter2":["item3","item4"]}. If using querydic, don't query more than 350 items at once.
:param host: address of the host server, default='http://www.ensembl.org/biomart'
:returns: a Pandas dataframe of the queried attributes
"""
server = BiomartServer(host)
d=server.datasets[query_dataset]
res=[]
if not query_dic:
if query_items:
chunks=[query_items[x:x+350] for x in xrange(0, len(query_items), 350)]
for c in chunks:
response=d.search({'filters':{query_filter:c},'attributes':query_attributes})
for line in response.iter_lines():
line = line.decode('utf-8')
res.append(line.split("\t"))
else:
response=d.search({'attributes':query_attributes})
for line in response.iter_lines():
line = line.decode('utf-8')
res.append(line.split("\t"))
elif query_dic:
response=d.search({'filters':query_dic,'attributes':query_attributes})
for line in response.iter_lines():
line = line.decode('utf-8')
res.append(line.split("\t"))
res= | pd.DataFrame(res) | pandas.DataFrame |
import numpy as np
import pandas as pd
from pandas import Categorical, DataFrame, Series, Timestamp, date_range
import pandas._testing as tm
class TestDataFrameDescribe:
def test_describe_bool_in_mixed_frame(self):
df = DataFrame(
{
"string_data": ["a", "b", "c", "d", "e"],
"bool_data": [True, True, False, False, False],
"int_data": [10, 20, 30, 40, 50],
}
)
# Integer data are included in .describe() output,
# Boolean and string data are not.
result = df.describe()
expected = DataFrame(
{"int_data": [5, 30, df.int_data.std(), 10, 20, 30, 40, 50]},
index=["count", "mean", "std", "min", "25%", "50%", "75%", "max"],
)
tm.assert_frame_equal(result, expected)
# Top value is a boolean value that is False
result = df.describe(include=["bool"])
expected = DataFrame(
{"bool_data": [5, 2, False, 3]}, index=["count", "unique", "top", "freq"]
)
tm.assert_frame_equal(result, expected)
def test_describe_empty_object(self):
# GH#27183
df = pd.DataFrame({"A": [None, None]}, dtype=object)
result = df.describe()
expected = pd.DataFrame(
{"A": [0, 0, np.nan, np.nan]},
dtype=object,
index=["count", "unique", "top", "freq"],
)
tm.assert_frame_equal(result, expected)
result = df.iloc[:0].describe()
tm.assert_frame_equal(result, expected)
def test_describe_bool_frame(self):
# GH#13891
df = pd.DataFrame(
{
"bool_data_1": [False, False, True, True],
"bool_data_2": [False, True, True, True],
}
)
result = df.describe()
expected = DataFrame(
{"bool_data_1": [4, 2, True, 2], "bool_data_2": [4, 2, True, 3]},
index=["count", "unique", "top", "freq"],
)
tm.assert_frame_equal(result, expected)
df = pd.DataFrame(
{
"bool_data": [False, False, True, True, False],
"int_data": [0, 1, 2, 3, 4],
}
)
result = df.describe()
expected = DataFrame(
{"int_data": [5, 2, df.int_data.std(), 0, 1, 2, 3, 4]},
index=["count", "mean", "std", "min", "25%", "50%", "75%", "max"],
)
tm.assert_frame_equal(result, expected)
df = pd.DataFrame(
{"bool_data": [False, False, True, True], "str_data": ["a", "b", "c", "a"]}
)
result = df.describe()
expected = DataFrame(
{"bool_data": [4, 2, True, 2], "str_data": [4, 3, "a", 2]},
index=["count", "unique", "top", "freq"],
)
tm.assert_frame_equal(result, expected)
def test_describe_categorical(self):
df = DataFrame({"value": np.random.randint(0, 10000, 100)})
labels = [f"{i} - {i + 499}" for i in range(0, 10000, 500)]
cat_labels = Categorical(labels, labels)
df = df.sort_values(by=["value"], ascending=True)
df["value_group"] = pd.cut(
df.value, range(0, 10500, 500), right=False, labels=cat_labels
)
cat = df
# Categoricals should not show up together with numerical columns
result = cat.describe()
assert len(result.columns) == 1
# In a frame, describe() for the cat should be the same as for string
# arrays (count, unique, top, freq)
cat = Categorical(
["a", "b", "b", "b"], categories=["a", "b", "c"], ordered=True
)
s = Series(cat)
result = s.describe()
expected = Series([4, 2, "b", 3], index=["count", "unique", "top", "freq"])
tm.assert_series_equal(result, expected)
cat = Series(Categorical(["a", "b", "c", "c"]))
df3 = DataFrame({"cat": cat, "s": ["a", "b", "c", "c"]})
result = df3.describe()
tm.assert_numpy_array_equal(result["cat"].values, result["s"].values)
def test_describe_empty_categorical_column(self):
# GH#26397
# Ensure the index of an an empty categorical DataFrame column
# also contains (count, unique, top, freq)
df = pd.DataFrame({"empty_col": Categorical([])})
result = df.describe()
expected = DataFrame(
{"empty_col": [0, 0, np.nan, np.nan]},
index=["count", "unique", "top", "freq"],
dtype="object",
)
tm.assert_frame_equal(result, expected)
# ensure NaN, not None
assert np.isnan(result.iloc[2, 0])
assert np.isnan(result.iloc[3, 0])
def test_describe_categorical_columns(self):
# GH#11558
columns = pd.CategoricalIndex(["int1", "int2", "obj"], ordered=True, name="XXX")
df = DataFrame(
{
"int1": [10, 20, 30, 40, 50],
"int2": [10, 20, 30, 40, 50],
"obj": ["A", 0, None, "X", 1],
},
columns=columns,
)
result = df.describe()
exp_columns = pd.CategoricalIndex(
["int1", "int2"],
categories=["int1", "int2", "obj"],
ordered=True,
name="XXX",
)
expected = DataFrame(
{
"int1": [5, 30, df.int1.std(), 10, 20, 30, 40, 50],
"int2": [5, 30, df.int2.std(), 10, 20, 30, 40, 50],
},
index=["count", "mean", "std", "min", "25%", "50%", "75%", "max"],
columns=exp_columns,
)
tm.assert_frame_equal(result, expected)
tm.assert_categorical_equal(result.columns.values, expected.columns.values)
def test_describe_datetime_columns(self):
columns = pd.DatetimeIndex(
["2011-01-01", "2011-02-01", "2011-03-01"],
freq="MS",
tz="US/Eastern",
name="XXX",
)
df = DataFrame(
{
0: [10, 20, 30, 40, 50],
1: [10, 20, 30, 40, 50],
2: ["A", 0, None, "X", 1],
}
)
df.columns = columns
result = df.describe()
exp_columns = pd.DatetimeIndex(
["2011-01-01", "2011-02-01"], freq="MS", tz="US/Eastern", name="XXX"
)
expected = DataFrame(
{
0: [5, 30, df.iloc[:, 0].std(), 10, 20, 30, 40, 50],
1: [5, 30, df.iloc[:, 1].std(), 10, 20, 30, 40, 50],
},
index=["count", "mean", "std", "min", "25%", "50%", "75%", "max"],
)
expected.columns = exp_columns
tm.assert_frame_equal(result, expected)
assert result.columns.freq == "MS"
assert result.columns.tz == expected.columns.tz
def test_describe_timedelta_values(self):
# GH#6145
t1 = pd.timedelta_range("1 days", freq="D", periods=5)
t2 = pd.timedelta_range("1 hours", freq="H", periods=5)
df = pd.DataFrame({"t1": t1, "t2": t2})
expected = DataFrame(
{
"t1": [
5,
pd.Timedelta("3 days"),
df.iloc[:, 0].std(),
pd.Timedelta("1 days"),
pd.Timedelta("2 days"),
pd.Timedelta("3 days"),
pd.Timedelta("4 days"),
pd.Timedelta("5 days"),
],
"t2": [
5,
pd.Timedelta("3 hours"),
df.iloc[:, 1].std(),
pd.Timedelta("1 hours"),
pd.Timedelta("2 hours"),
pd.Timedelta("3 hours"),
pd.Timedelta("4 hours"),
pd.Timedelta("5 hours"),
],
},
index=["count", "mean", "std", "min", "25%", "50%", "75%", "max"],
)
result = df.describe()
tm.assert_frame_equal(result, expected)
exp_repr = (
" t1 t2\n"
"count 5 5\n"
"mean 3 days 00:00:00 0 days 03:00:00\n"
"std 1 days 13:56:50.394919 0 days 01:34:52.099788\n"
"min 1 days 00:00:00 0 days 01:00:00\n"
"25% 2 days 00:00:00 0 days 02:00:00\n"
"50% 3 days 00:00:00 0 days 03:00:00\n"
"75% 4 days 00:00:00 0 days 04:00:00\n"
"max 5 days 00:00:00 0 days 05:00:00"
)
assert repr(result) == exp_repr
def test_describe_tz_values(self, tz_naive_fixture):
# GH#21332
tz = tz_naive_fixture
s1 = Series(range(5))
start = Timestamp(2018, 1, 1)
end = Timestamp(2018, 1, 5)
s2 = Series(date_range(start, end, tz=tz))
df = | pd.DataFrame({"s1": s1, "s2": s2}) | pandas.DataFrame |
# coding=utf-8
# pylint: disable-msg=E1101,W0612
import numpy as np
import pytest
from pandas.compat import lrange, range
import pandas as pd
from pandas import DataFrame, Index, Series
import pandas.util.testing as tm
from pandas.util.testing import assert_series_equal
def test_get():
# GH 6383
s = Series(np.array([43, 48, 60, 48, 50, 51, 50, 45, 57, 48, 56, 45,
51, 39, 55, 43, 54, 52, 51, 54]))
result = s.get(25, 0)
expected = 0
assert result == expected
s = Series(np.array([43, 48, 60, 48, 50, 51, 50, 45, 57, 48, 56,
45, 51, 39, 55, 43, 54, 52, 51, 54]),
index=pd.Float64Index(
[25.0, 36.0, 49.0, 64.0, 81.0, 100.0,
121.0, 144.0, 169.0, 196.0, 1225.0,
1296.0, 1369.0, 1444.0, 1521.0, 1600.0,
1681.0, 1764.0, 1849.0, 1936.0],
dtype='object'))
result = s.get(25, 0)
expected = 43
assert result == expected
# GH 7407
# with a boolean accessor
df = pd.DataFrame({'i': [0] * 3, 'b': [False] * 3})
vc = df.i.value_counts()
result = vc.get(99, default='Missing')
assert result == 'Missing'
vc = df.b.value_counts()
result = vc.get(False, default='Missing')
assert result == 3
result = vc.get(True, default='Missing')
assert result == 'Missing'
def test_get_nan():
# GH 8569
s = pd.Float64Index(range(10)).to_series()
assert s.get(np.nan) is None
assert s.get(np.nan, default='Missing') == 'Missing'
def test_get_nan_multiple():
# GH 8569
# ensure that fixing "test_get_nan" above hasn't broken get
# with multiple elements
s = pd.Float64Index(range(10)).to_series()
idx = [2, 30]
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
assert_series_equal(s.get(idx),
Series([2, np.nan], index=idx))
idx = [2, np.nan]
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
assert_series_equal(s.get(idx),
Series([2, np.nan], index=idx))
# GH 17295 - all missing keys
idx = [20, 30]
assert(s.get(idx) is None)
idx = [np.nan, np.nan]
assert(s.get(idx) is None)
def test_delitem():
# GH 5542
# should delete the item inplace
s = Series(lrange(5))
del s[0]
expected = Series(lrange(1, 5), index= | lrange(1, 5) | pandas.compat.lrange |
####
#
# The MIT License (MIT)
#
# Copyright 2017, 2018 <NAME> <<EMAIL>>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is furnished
# to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
####
'''
Scripts to run the evaluation protocoll described in the retention order
prediction section of the paper.
'''
import numpy as np
import scipy as sp
import pandas as pd
import itertools
import time
import csv
import networkx as nx
import os
import re
import copy
## my own classes, e.g. ranksvm, retention graph, etc ...
from helper_cls import Timer, join_dicts, sample_perc_from_list, get_statistic_about_concordant_and_discordant_pairs
from helper_cls import pairwise, is_sorted
from rank_svm_cls import load_data, KernelRankSVC
from svr_pairwise_cls import SVRPairwise
# load my own kernels
from rank_svm_cls import tanimoto_kernel, tanimoto_kernel_mat, minmax_kernel_mat, minmax_kernel
# load functions for the pair generation
from rank_svm_cls import get_pairs_single_system, get_pairs_multiple_systems
# load function for the model selection
from model_selection_cls import find_hparan_ranksvm, find_hparam_regression
## scikit-learn methods
from sklearn.model_selection import ShuffleSplit, KFold, GroupKFold, GroupShuffleSplit, PredefinedSplit
from sklearn.preprocessing import StandardScaler, MinMaxScaler, Normalizer, PolynomialFeatures
from sklearn.metrics.pairwise import pairwise_kernels
from sklearn.pipeline import Pipeline
## Data structures
from pandas import DataFrame
from collections import OrderedDict
## Allow the paralellization of the candidate graph construction
from joblib import Parallel, delayed
def evaluate_on_target_systems (
target_systems, training_systems, predictor, pair_params, kernel_params, opt_params, input_dir, estimator,
feature_type, n_jobs = 1, perc_for_training = 100):
"""
Task: Evaluate rank-correlation, accuracy, etc. by learning an order predictor using the given
set of training systems and prediction on the given set of target systems.
For the evaluation we use either a repeated random-split of the target systems' data
(if less than 75 examples are provided for test) or a cross-validation (else). The
hyper-paramters of the order predictor are optimized using a nested cross-validation.
The routines for that can be found in the file 'model_selection_cls.py'.
If desired (excl_mol_by_struct_only == True), the molecular structures from the test set
are removed from the training based on their molecular structure, e.g. by comparison of
their InChIs, _even_ if these structures have been measured with another than the
target system, i.e., another chromatographic system.
See also the paper for details on the evaluation strategy.
:param target_systems: list of strings, containing the target systems
:param training_systems: list of strings, containing the training systems
:param predictor: list of string, containing the predictors / molecular features used for the
model construction.
:param pair_params: dictionary, containing the paramters used for the creation of
the RankSVM learning pairs, e.g. minimum and maximum oder distance.
:param kernel_params: dictionary, containing the parameters for the kernels and
generally for handling the input features / predictors. See definition of the
dictionary in the __main__ of file 'evaluation_scenario_cls.py'.
:param opt_params: dictionary, containing the paramters controlling the hyper-paramter
optimization, number of cross-validation splits, etc. See definition of the
dictionary in the __main__ of file 'evaluation_scenario_cls.py'.
:param input_dir: string, directory containing the input data, e.g., fingerprints and retention
times.
:param estimator: string, order predictor to use: either "ranksvm" or "svr".
:param feature_type: string, feature type that is used for the RankSVM. Currently
only 'difference' features are supported, i.e., \phi_j - \phi_i is used for
the decision. If the estimator is not RankSVM, but e.g. Support Vector Regression,
than tis parameter can be set to None and is ignored.
:param n_jobs: integer, number of jobs used for the hyper-parameter estimation. The maximum number
of used jobs, is the number of inner splits (cross-validation or random split)!
:param perc_for_training: scalar, percentage of the target systems data, that is
used for the training, e.g., selected by simple random sub-sampling. This value
only effects the training process, of the target system is in the set of training
systems.
:return: tuple of pandas.DataFrame
1) mapped_values: predicted order scores for each target system
- corresponds to: w^\phi_i in the RankSVM case
- corresponds to: the predicted retention time, in the SVR case
2) correlations: rank correlations of the order scores for each target system
3) accuracies: pairwise prediction accuracies for each target system
4) simple_statistics: number of training and test examples, etc.
5) grid_search_results: hyper-parameter scores for the different grid-parameters
6) grid_search_best_params: hyper-parameter scores for the best grid-parameters
NOTE: The returned results (except mapped_values and grid search results) are averages
across the different random splits / crossvalidation folds and repetitions.
"""
# Variables related to the number of random / cv splits, for inner (*_cv)
# and outer fold (*_ncv).
n_splits_shuffle = opt_params["n_splits_shuffle"]
n_splits_nshuffle = opt_params["n_splits_nshuffle"]
n_splits_cv = opt_params["n_splits_cv"]
n_splits_ncv = opt_params["n_splits_ncv"]
n_rep = opt_params["n_rep"]
# Should molecules be excluded from the training, if their structure appears
# in the test _even if_ they have been measured with another system than the
# (current) target system:
excl_mol_by_struct_only = opt_params["excl_mol_by_struct_only"]
# Currently only 'slack_type == "on_pairs"' is supported.
slack_type = opt_params["slack_type"]
if slack_type != "on_pairs":
raise ValueError ("Invalid slack type: %s" % slack_type)
# Should all possible pairs be used for the (inner) test split during the
# parameter estimation, regardless of what are the settings for 'd_upper'
# and 'd_lower'?
all_pairs_for_test = opt_params["all_pairs_for_test"]
if not estimator in ["ranksvm", "svr"]:
raise ValueError ("Invalid estimator: %s" % estimator)
# RankSVM and SVR regularization parameter
param_grid = {"C": opt_params["C"]}
if estimator == "svr":
# error-tube width of the SVR
param_grid["epsilon"] = opt_params["epsilon"]
# Molecule kernel
if kernel_params["kernel"] == "linear":
kernel = "linear"
elif kernel_params["kernel"] in ["rbf", "gaussian"]:
param_grid["gamma"] = kernel_params["gamma"]
kernel = "rbf"
elif kernel_params["kernel"] == "tanimoto":
if estimator in ["ranksvm"]:
kernel = tanimoto_kernel
elif estimator in ["svr"]:
kernel = tanimoto_kernel_mat
elif kernel_params["kernel"] == "minmax":
if estimator in ["ranksvm"]:
kernel = minmax_kernel
elif estimator in ["svr"]:
kernel = minmax_kernel_mat
else:
raise ValueError ("Invalid kernel: %s." % kernel_params["kernel"])
if isinstance (target_systems, str):
target_systems = [target_systems]
if isinstance (training_systems, str):
training_systems = [training_systems]
all_systems = list (set (target_systems).union (training_systems))
assert isinstance (target_systems, list) and isinstance (training_systems, list)
n_target_systems = len (target_systems)
n_training_systems = len (training_systems)
print ("Target systems (# = %d): %s" % (n_target_systems, ",".join (target_systems)))
print ("Training systems (# = %d): %s" % (n_training_systems, ",".join (training_systems)))
## Load the target and training systems into directories using (molecule, system)-keys
## and retention times respectively molecular features as values
# If we use molecular descriptors, we need to scale the data, e.g. to [0, 1].
if kernel_params["scaler"] == "noscaling":
scaler = None
elif kernel_params["scaler"] == "minmax":
scaler = MinMaxScaler()
elif kernel_params["scaler"] == "std":
scaler = StandardScaler()
elif kernel_params["scaler"] == "l2norm":
scaler = Normalizer()
else:
raise ValueError ("Invalid scaler for the molecular features: %s"
% kernel_params["scaler"])
# Handle counting MACCS fingerprints
if predictor[0] == "maccsCount_f2dcf0b3":
predictor_c = ["maccs"]
predictor_fn = "fps_maccs_count.csv"
else:
predictor_c = predictor
predictor_fn = None
d_rts, d_features, d_system_index = OrderedDict(), OrderedDict(), OrderedDict()
for k_sys, system in enumerate (all_systems):
rts, data = load_data (input_dir, system = system, predictor = predictor_c, pred_fn = predictor_fn)
# Use (mol-id, system)-tupel as key
keys = list (zip (rts.inchi.values, [system] * rts.shape[0]))
# Values: retention time, features
rts = rts.rt.values.reshape (-1, 1)
data = data.drop ("inchi", axis = 1).values
if kernel_params["poly_feature_exp"]:
# If we use binary fingerprints, we can include some
# interactions, e.g. x_1x_2, ...
data = PolynomialFeatures (interaction_only = True, include_bias = False).fit_transform (data)
# Make ordered directories
d_rts[system], d_features[system] = OrderedDict(), OrderedDict()
for i, key in enumerate (keys):
d_rts[system][key] = rts[i, 0]
d_features[system][key] = data[i, :]
# Dictionary containing a unique numeric identifier for each system
d_system_index[system] = k_sys
if scaler is not None:
if getattr (scaler, "partial_fit", None) is not None:
# 'partial_fit' allows us to learn the parameters of the scaler
# online. (great stuff :))
scaler.partial_fit (data)
else:
# We have scaler at hand, that does not allow online fitting.
# This probably means, that this is a scaler, that performs
# the desired scaling for each example independently, e.g.
# sklearn.preprocessing.Normalizer.
pass
for system in target_systems:
print ("Target set '%s' contains %d examples." % (system, len (d_rts[system])))
# Collect all the data that is available for training.
d_rts_training = join_dicts (d_rts, training_systems)
d_features_training = join_dicts (d_features, training_systems)
# (mol-id, system)-tuples used in the training set
l_keys_training = list (d_features_training.keys())
# Data frames storing the evaluation measures
mapped_values = {target_system : DataFrame() for target_system in target_systems}
accuracies, correlations, simple_statistics= DataFrame(), DataFrame(), DataFrame()
grid_search_results, grid_search_best_params = DataFrame(), | DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
r"""Module to create and manage an ensemble of models.
The method of the :mod:`~.ensemble` submodule are designed to generate an
ensemble of models. It contains various methods to assist in generating
multiple models from existing :class:`~.MassModel`\ s, using flux data or
concentration data in :class:`pandas.DataFrame`\ s (e.g. generated from
:mod:`~mass.thermo.conc_sampling`). There are also methods to help ensure that
models are thermodynamically feasible and can reach steady states with or
without perturbations applied.
In addition to containing various methods that can be combined into
an ensemble generation workflow, the :mod:`~.ensemble` submodule contains the
:func:`generate_ensemble_of_models` function, which is optimized for
performance when generating a large number of models.
The :func:`generate_ensemble_of_models` function also ensures that the user
input is valid before generating models to reduce the likelihood of a user
error causing the model generation process to stop before completion. However,
there is time spent in function's setup, meaining that when generating a
smaller number of models, performance gains may not be seen.
"""
import logging
import warnings
import numpy as np
import pandas as pd
from six import iteritems, string_types
from mass.core.mass_model import MassModel
from mass.exceptions import MassEnsembleError
from mass.simulation.simulation import (
STEADY_STATE_SOLVERS,
Simulation,
_get_sim_values_from_model,
)
from mass.util.util import _check_kwargs, _log_msg, _make_logger, ensure_iterable
# Set the logger
LOGGER = _make_logger(__name__)
"""logging.Logger: Logger for :mod:`~mass.thermo.ensemble` submodule."""
def create_models_from_flux_data(
reference_model, data=None, raise_error=False, **kwargs
):
"""Generate ensemble of models for a given set of flux data.
Parameters
----------
reference_model : iterable, None
A :class:`.MassModel` object to treat as the reference model.
data : pandas.DataFrame
A :class:`pandas.DataFrame` containing the flux data for generation
of the models. Each row is a different set of flux values to
generate a model for, and each column corresponds to the reaction
identifier for the flux value.
raise_error : bool
Whether to raise an error upon failing to generate a model from a
given reference. Default is ``False``.
**kwargs
verbose :
``bool`` indicating the verbosity of the function.
Default is ``False``.
suffix :
``str`` representing the suffix to append to generated models.
Default is ``'_F'``.
Returns
-------
new_models : list
A ``list`` of successfully generated :class:`.MassModel` objects.
Raises
------
MassEnsembleError
Raised if generation of a model fails and ``raise_error=True``.
"""
kwargs = _check_kwargs(
{
"verbose": False,
"suffix": "_F",
},
kwargs,
)
if not isinstance(reference_model, MassModel):
raise TypeError("`reference_model` must be a MassModel")
data, id_array = _validate_data_input(
reference_model, data, "reactions", kwargs.get("verbose")
)
new_models = []
for i, values in enumerate(data.values):
# Create new model
new_model = reference_model.copy()
new_model.id += kwargs.get("suffix") + str(i)
try:
_log_msg(
LOGGER,
logging.INFO,
kwargs.get("verbose"),
"New model '%s' created",
new_model.id,
)
# Update the model parameters
new_model.update_parameters(dict(zip(id_array, values)), verbose=False)
_log_msg(
LOGGER,
logging.INFO,
kwargs.get("verbose"),
"Updated flux values for '%s'",
new_model.id,
)
# Add model to the ensemble
new_models.append(new_model)
except Exception as e:
msg = str(
"Could not create '{0}' for the ensemble due to the "
"following error: {1!r}".format(new_model.id, str(e))
)
if raise_error:
raise MassEnsembleError(msg)
_log_msg(LOGGER, logging.ERROR, kwargs.get("verbose"), msg)
return new_models
def create_models_from_concentration_data(
reference_model, data=None, raise_error=False, **kwargs
):
"""Generate ensemble of models for a given set of concentration data.
Parameters
----------
reference_model : iterable, None
A :class:`.MassModel` object to treat as the reference model.
data : pandas.DataFrame
A :class:`pandas.DataFrame` containing the concentration data for
generation of the models. Each row is a different set of
concentration values to generate a model for, and each column
corresponds to the metabolite identifier for the concentraiton
value.
raise_error : bool
Whether to raise an error upon failing to generate a model from a
given reference. Default is ``False``.
**kwargs
verbose :
``bool`` indicating the verbosity of the function.
Default is ``False``.
suffix :
``str`` representing the suffix to append to generated models.
Default is ``'_C'``.
Returns
-------
new_models : list
A ``list`` of successfully generated :class:`.MassModel` objects.
Raises
------
MassEnsembleError
Raised if generation of a model fails and ``raise_error=True``.
"""
kwargs = _check_kwargs(
{
"verbose": False,
"suffix": "_C",
},
kwargs,
)
if not isinstance(reference_model, MassModel):
raise TypeError("`reference_model` must be a MassModel")
data, id_array = _validate_data_input(
reference_model, data, "metabolites", kwargs.get("verbose")
)
new_models = []
for i, values in enumerate(data.values):
# Create new model
new_model = reference_model.copy()
new_model.id += kwargs.get("suffix") + str(i)
try:
_log_msg(
LOGGER,
logging.INFO,
kwargs.get("verbose"),
"New model '%s' created",
new_model.id,
)
# Update the model parameters
new_model.update_initial_conditions(
dict(zip(id_array, values)), verbose=False
)
_log_msg(
LOGGER,
logging.INFO,
kwargs.get("verbose"),
"Updated initial conditions for '%s'",
new_model.id,
)
# Add model to the ensemble
new_models.append(new_model)
except Exception as e:
msg = str(
"Could not create '{0}' for the ensemble due to the "
"following error: {1!r}".format(new_model.id, str(e))
)
if raise_error:
raise MassEnsembleError(msg)
_log_msg(LOGGER, logging.ERROR, kwargs.get("verbose"), msg)
return new_models
def ensure_positive_percs(
models, reactions=None, raise_error=False, update_values=False, **kwargs
):
"""Seperate models based on whether all calculated PERCs are positive.
Parameters
----------
models : iterable
An iterable of :class:`.MassModel` objects to use for PERC
calculations.
reactions : iterable
An iterable of reaction identifiers to calculate the
PERCs for. If ``None``, all reactions in the model will be used.
raise_error : bool
Whether to raise an error upon failing to generate a model from a
given reference. Default is ``False``.
update_values : bool
Whether to update the PERC values for models that generate all
positive PERCs. Default is ``False``.
**kwargs
verbose :
``bool`` indicating the verbosity of the function.
Default is ``False``.
at_equilibrium_default :
``float`` value to set the pseudo-order rate constant if the
reaction is at equilibrium.
Default is ``100,000``.
Returns
-------
tuple (positive, negative)
positive : list
A ``list`` of :class:`.MassModel` objects whose calculated PERC
values were postiive.
negative : list
A ``list`` of :class:`.MassModel` objects whose calculated PERC
values were negative.
Raises
------
MassEnsembleError
Raised if PERC calculation fails and ``raise_error=True``.
"""
kwargs = _check_kwargs(
{
"verbose": False,
"at_equilibrium_default": 100000,
},
kwargs,
)
positive = []
negative = []
models = ensure_iterable(models)
if any([not isinstance(model, MassModel) for model in models]):
raise TypeError("`models` must be an iterable of MassModels.")
for model in models:
model, is_positive = _ensure_positive_percs_for_model(
model,
reactions,
kwargs.get("verbose"),
raise_error,
update_values,
kwargs.get("at_equilibrium_default"),
)
if is_positive:
positive.append(model)
else:
negative.append(model)
_log_msg(
LOGGER,
logging.INFO,
kwargs.get("verbose"),
"Finished PERC calculations, returning seperated models.",
)
return positive, negative
def ensure_steady_state(
models,
strategy="simulate",
perturbations=None,
solver_options=None,
update_values=False,
**kwargs
):
"""Seperate models based on whether a steady state can be reached.
All ``kwargs`` are passed to :meth:`~.Simulation.find_steady_state`.
Parameters
----------
models : MassModel, iterable
A :class:`.MassModel` or an iterable of :class:`.MassModel` objects to
find a steady state for.
strategy : str
The strategy for finding the steady state. Must be one of the
following:
* ``'simulate'``
* ``'nleq1'``
* ``'nleq2'``
perturbations : dict
A ``dict`` of perturbations to incorporate into the simulation.
Models must reach a steady state with the given pertubration to be
considered as feasible.
See :mod:`~.simulation.simulation` documentation for more
information on valid perturbations.
solver_options : dict
A `dict` of options to pass to the solver utilized in determining a
steady state. Solver options should be for the
:class:`roadrunner.Integrator` if ``strategy="simulate"``, otherwise
options should correspond to the :class:`roadrunner.SteadyStateSolver`.
update_values : bool
Whether to update the model with the steady state results.
Default is ``False``. Only updates models that reached steady state.
**kwargs
verbose :
``bool`` indicating the verbosity of the method.
Default is ``False``.
steps :
``int`` indicating number of steps at which the output is
sampled where the samples are evenly spaced and
``steps = (number of time points) - 1.``
Steps and number of time points may not both be specified.
Only valid for ``strategy='simulate'``.
Default is ``None``.
tfinal :
``float`` indicating the final time point to use in when
simulating to long times to find a steady state.
Only valid for ``strategy='simulate'``.
Default is ``1e8``.
num_attempts :
``int`` indicating the number of attempts the steady state
solver should make before determining that a steady state
cannot be found. Only valid for ``strategy='nleq1'`` or
``strategy='nleq2'``.
Default is ``2``.
decimal_precision :
``bool`` indicating whether to apply the
:attr:`~.MassBaseConfiguration.decimal_precision` attribute of
the :class:`.MassConfiguration` to the solution values.
Default is ``False``.
Returns
-------
tuple (feasible, infeasible)
feasible : list
A ``list`` of :class:`.MassModel` objects that could successfully reach
a steady state.
infeasible : list
A ``list`` of :class:`.MassModel` objects that could not successfully
reach a steady state.
"""
kwargs = _check_kwargs(
{
"verbose": False,
"steps": None,
"tfinal": 1e8,
"num_attempts": 2,
"decimal_precision": True,
},
kwargs,
)
models = ensure_iterable(models)
if any([not isinstance(model, MassModel) for model in models]):
raise TypeError("`models` must be an iterable of MassModels.")
# Ensure strategy input is valid
if strategy not in STEADY_STATE_SOLVERS and strategy != "simulate":
raise ValueError("Invalid steady state strategy: '{0}'".format(strategy))
simulation = _initialize_simulation(
models[0], strategy, solver_options, kwargs.get("verbose")
)
if len(models) > 1:
simulation.add_models(models[1:], verbose=kwargs.get("verbose"))
conc_sol_list, flux_sol_list = simulation.find_steady_state(
models, strategy, perturbations, update_values, **kwargs
)
feasible = []
infeasible = []
for i, model in enumerate(models):
if len(models) == 1:
conc_sol, flux_sol = conc_sol_list, flux_sol_list
else:
conc_sol, flux_sol = conc_sol_list[i], flux_sol_list[i]
if conc_sol and flux_sol:
ics, params = simulation.get_model_simulation_values(model)
model.update_initial_conditions(ics)
model.update_parameters(
{
param: value
for param, value in params.items()
if param in model.reactions.list_attr("flux_symbol_str")
}
)
feasible.append(model)
else:
infeasible.append(model)
_log_msg(
LOGGER,
logging.INFO,
kwargs.get("verbose"),
"Finished finding steady states, returning seperated models.",
)
return feasible, infeasible
def generate_ensemble_of_models(
reference_model,
flux_data=None,
conc_data=None,
ensure_positive_percs=None,
strategy=None,
perturbations=None,
**kwargs
):
"""Generate an ensemble of models for given data sets.
This function is optimized for performance when generating a large
ensemble of models when compared to the combination of various individual
methods of the :mod:`ensemble` submodule used. However, this function may
not provide as much control over the process when compared to utilizing a
combination of other methods defined in the :mod:`ensemble` submodule.
Notes
-----
* Only one data set is required to generate the ensemble, meaning that
a flux data set can be given without a concentration data set, and vice
versa.
* If ``x`` flux data samples and ``y`` concentration data samples are
provided, ``x * y`` total models will be generated.
* If models deemed ``infeasible`` are to be returned, ensure the
``return_infeasible`` kwarg is set to ``True``.
Parameters
----------
reference_model : MassModel
The reference model used in generating the ensemble.
flux_data : pandas.DataFrame or None
A :class:`pandas.DataFrame` containing the flux data for generation
of the models. Each row is a different set of flux values to
generate a model for, and each column corresponds to the reaction
identifier for the flux value.
conc_data : pandas.DataFrame or None
A :class:`pandas.DataFrame` containing the concentration data for
generation of the models. Each row is a different set of
concentration values to generate a model for, and each column
corresponds to the metabolite identifier for the concentraiton
value.
ensure_positive_percs :
A ``list`` of reactions to calculate PERCs for, ensure they
are postive, and update feasible models with the new PERC values.
If ``None``, no PERCs will be checked.
strategy : str, None
The strategy for finding the steady state.
Must be one of the following:
* ``'simulate'``
* ``'nleq1'``
* ``'nleq2'``
If a ``strategy`` is given, models must reach a steady state to be
considered feasible. All feasible models are updated to steady state.
If ``None``, no attempts will be made to determine whether a generated
model can reach a steady state.
perturbations : dict
A ``dict`` of perturbations to incorporate into the simulation,
or a list of perturbation dictionaries where each ``dict`` is applied
to a simulation. Models must reach a steady state with all given
pertubration dictionaries to be considered feasible.
See :mod:`~.simulation.simulation` documentation for more
information on valid perturbations.
Ignored if ``strategy=None``.
**kwargs
solver_options :
``dict`` of options to pass to the solver utilized in determining
a steady state. Solver options should be for the
:class:`roadrunner.Integrator` if ``strategy="simulate"``,
otherwise options should correspond to the
:class:`roadrunner.SteadyStateSolver`.
Default is ``None``.
verbose :
``bool`` indicating the verbosity of the function.
Default is ``False``.
decimal_precision :
``bool`` indicating whether to apply the
:attr:`~.MassBaseConfiguration.decimal_precision` attribute of
the :class:`.MassConfiguration` to the solution values.
Default is ``False``.
flux_suffix :
``str`` representing the suffix to append to generated models
indicating the flux data set used.
Default is ``'_F'``.
conc_suffix :
``str`` representing the suffix to append to generated models
indicating the conc data set used.
Default is ``'_C'``.
at_equilibrium_default :
``float`` value to set the pseudo-order rate constant if the
reaction is at equilibrium.
Default is ``100,000``. Ignored if ``ensure_positive_percs=None``.
return_infeasible :
``bool`` indicating whether to generate and return an
:class:`Ensemble` containing the models deemed infeasible.
Default is ``False``.
Returns
-------
feasible : list
A ``list`` containing the `MassModel` objects that are deemed
`feasible` by sucessfully passing through all PERC and simulation
checks in the ensemble building processes.
infeasible : list
A ``list`` containing the `MassModel` objects that are deemed
`infeasible` by failing to passing through one of the PERC or
simulation checks in the ensemble building processes.
"""
# Check all inputs at beginning to ensure that ensemble generation is not
# disrupted near the end due to invalid input format
kwargs = _check_kwargs(
{
"verbose": False,
"decimal_precision": False,
"flux_suffix": "_F",
"conc_suffix": "_C",
"at_equilibrium_default": 100000,
"solver_options": None,
"return_infeasible": False,
},
kwargs,
)
verbose = kwargs.pop("verbose")
_log_msg(LOGGER, logging.INFO, verbose, "Validating input")
# Validate model input
if not isinstance(reference_model, MassModel):
raise TypeError("`reference_model` must be a MassModel.")
# Validate DataFrame inputs, if any
if flux_data is not None:
# Validate flux data if provided
flux_data, flux_ids = _validate_data_input(
reference_model, flux_data, "reactions", verbose
)
else:
# Set a value to allow for iteration
flux_data = pd.DataFrame([0])
flux_ids = np.array([])
if conc_data is not None:
# Validate conc data if provided
conc_data, conc_ids = _validate_data_input(
reference_model, conc_data, "metabolites", verbose
)
else:
# Set a value to allow for iteration
conc_data = | pd.DataFrame([0]) | pandas.DataFrame |
from itertools import chain
from operator import itemgetter
from typing import Iterator
from typing import Tuple
from numpy import nansum
from pandas import DataFrame
from pandas import Series
from pandas import pivot_table
from .model import Sales
from .model import to_array
from .. import create_table
from .. import with_change
bacon_cuts = ('Derind Belly 7-9#', 'Derind Belly 9-13#', 'Derind Belly 13-17#', 'Derind Belly 17-19#')
report_columns = {
'lm_pk610': 'Negotiated',
'lm_pk620': 'Formula'
}
def fresh_bacon(sales: Sales) -> bool:
return sales.description in bacon_cuts
def format_column(column: Tuple[str, str]) -> str:
value, report = column
return f"{report_columns[report]} {value.capitalize()}"
def format_columns(table: DataFrame) -> DataFrame:
table.columns = map(format_column, table.columns)
return table
def format_table(weight: Series, value: Series) -> DataFrame:
values = pivot_table(create_table(weight, value), index=['date', 'report'], aggfunc=nansum)
price = (values.value / values.weight).rename('price')
table = create_table(values.weight, price).unstack()
return table[sorted(table.columns, key=itemgetter(1, 0))]
def bacon_index_report(negotiated: Iterator[Sales], formula: Iterator[Sales]) -> DataFrame:
records = to_array(filter(fresh_bacon, chain(negotiated, formula)))
columns = ['date', 'report', 'avg_price', 'weight']
bacon = | DataFrame.from_records(records, columns=columns) | pandas.DataFrame.from_records |
from __future__ import print_function
import six
import unittest
from unittest import mock
import pandas as pd
from dataprofiler.profilers import column_profile_compilers as \
col_pro_compilers
from dataprofiler.profilers.profiler_options import BaseOption, StructuredOptions
class TestBaseProfileCompilerClass(unittest.TestCase):
def test_cannot_instantiate(self):
"""showing we normally can't instantiate an abstract class"""
with self.assertRaises(TypeError) as e:
col_pro_compilers.BaseCompiler()
self.assertEqual(
"Can't instantiate abstract class BaseCompiler with "
"abstract methods profile",
str(e.exception)
)
@mock.patch.multiple(
col_pro_compilers.BaseCompiler, __abstractmethods__=set(),
_profilers=[mock.Mock()], _option_class=mock.Mock(spec=BaseOption))
@mock.patch.multiple(
col_pro_compilers.ColumnStatsProfileCompiler, _profilers=[mock.Mock()])
def test_add_profilers(self):
compiler1 = col_pro_compilers.BaseCompiler(mock.Mock())
compiler2 = col_pro_compilers.BaseCompiler(mock.Mock())
# test incorrect type
with self.assertRaisesRegex(TypeError,
'`BaseCompiler` and `int` are '
'not of the same profile compiler type.'):
compiler1 + 3
compiler3 = col_pro_compilers.ColumnStatsProfileCompiler(mock.Mock())
compiler3._profiles = [mock.Mock()]
with self.assertRaisesRegex(TypeError,
'`BaseCompiler` and '
'`ColumnStatsProfileCompiler` are '
'not of the same profile compiler type.'):
compiler1 + compiler3
# test mismatched names
compiler1.name = 'compiler1'
compiler2.name = 'compiler2'
with self.assertRaisesRegex(ValueError,
'Column profile names are unmatched: '
'compiler1 != compiler2'):
compiler1 + compiler2
# test mismatched profiles due to options
compiler2.name = 'compiler1'
compiler1._profiles = dict(test1=mock.Mock())
compiler2._profiles = dict(test2=mock.Mock())
with self.assertRaisesRegex(ValueError,
'Column profilers were not setup with the '
'same options, hence they do not calculate '
'the same profiles and cannot be added '
'together.'):
compiler1 + compiler2
# test success
compiler1._profiles = dict(test=1)
compiler2._profiles = dict(test=2)
merged_compiler = compiler1 + compiler2
self.assertEqual(3, merged_compiler._profiles['test'])
self.assertEqual('compiler1', merged_compiler.name)
def test_diff_primitive_compilers(self):
# Test different data types
data1 = pd.Series(['-2', '-1', '1', '2'])
data2 = pd.Series(["YO YO YO", "HELLO"])
compiler1 = col_pro_compilers.ColumnPrimitiveTypeProfileCompiler(data1)
compiler2 = col_pro_compilers.ColumnPrimitiveTypeProfileCompiler(data2)
expected_diff = {
'data_type_representation': {
'datetime': 'unchanged',
'int': 1.0,
'float': 1.0,
'text': 'unchanged'
},
'data_type': ['int', 'text']
}
self.assertDictEqual(expected_diff, compiler1.diff(compiler2))
# Test different data types with datetime specifically
data1 = pd.Series(['-2', '-1', '1', '2'])
data2 = pd.Series(["01/12/1967", "11/9/2024"])
compiler1 = col_pro_compilers.ColumnPrimitiveTypeProfileCompiler(data1)
compiler2 = col_pro_compilers.ColumnPrimitiveTypeProfileCompiler(data2)
expected_diff = {
'data_type_representation': {
'datetime': -1.0,
'int': 1.0,
'float': 1.0,
'text': 'unchanged'
},
'data_type': ['int', 'datetime']
}
self.assertDictEqual(expected_diff, compiler1.diff(compiler2))
# Test same data types
data1 = | pd.Series(['-2', '15', '1', '2']) | pandas.Series |
from pathlib import Path
import numpy as np
import pandas as pd
import geopandas as gp
import pygeos as pg
from analysis.constants import ACRES_PRECISION, M2_ACRES, INPUTS, INPUT_AREA_VALUES
from analysis.lib.pygeos_util import intersection, sjoin_geometry, explode
chat_dir = Path("data/inputs/indicators/chat")
out_dir = Path("data/results/huc12")
input_filename = "data/inputs/boundaries/input_areas.feather"
def get_chat_input_values(state):
return [
e["value"]
for e in INPUT_AREA_VALUES
if f"{state}chat" in set(e["id"].split(","))
]
def get_analysis_notes():
return """Note: areas are based on the polygon boundary of this area
compared to watershed boundaries rather than pixel-level analyses used
elsewhere in this report."""
def summarize_by_areas(df, state, rank_only=False):
"""Calculate acres by value and area-weighted value for each CHAT field in fields.
Parameters
----------
df : GeoDataFrame
area(s) of interest
state : str, one of ['ok', 'tx']
rank_only : bool (default False)
if True, will only calculate areas for CHAT Rank
Returns
-------
DataFrame
columns for total_acres, analysis_acrs, chat_acres, and avg (bare) and
_x suffixed fields for each field
"""
if not df.index.name:
df.index.name = "index"
index_name = df.index.name
df = df.reset_index()
chat_df = gp.read_feather(chat_dir / f"{state}chat.feather")
fields = ["chatrank"]
if not rank_only:
fields += [e["id"] for e in INPUTS[f"{state}chat"]["indicators"]]
print("Intersecting with CHAT...")
chat_df = intersection(df, chat_df)
chat_df["acres"] = pg.area(chat_df.geometry_right.values.data) * M2_ACRES
chat_df = chat_df.loc[chat_df.acres > 0].copy()
if not len(chat_df):
return None
# total_acres = chat_df.groupby(index_name).geometry.first()
total_acres = df.loc[df[index_name].isin(chat_df[index_name])].set_index(index_name)
total_acres["total_acres"] = pg.area(total_acres.geometry.values.data) * M2_ACRES
results = pd.DataFrame(
chat_df.groupby(index_name).acres.sum().rename("chat_acres")
).join(total_acres[["total_acres"]], how="left")
# intersect edge units with SE input areas to determine areas outside
edge_df = explode(
df.loc[
df[index_name].isin(
results.loc[(results.chat_acres < results.total_acres - 1)].index
)
].copy()[[index_name, "geometry"]]
)
print("Intersecting with input areas, this may take a while...")
input_df = gp.read_feather(input_filename).reset_index(drop=True)
# this is inverted because input_df performs better if prepared (left side)
# note: we don't do intersection() here because of topology errors
left = pd.Series(input_df.geometry.values.data, index=input_df.index)
right = | pd.Series(edge_df.geometry.values.data, index=edge_df.index) | pandas.Series |
"""test_algo_api.py module."""
# from datetime import datetime, timedelta
import pytest
# import sys
# from pathlib import Path
import numpy as np
import pandas as pd # type: ignore
import string
import math
from typing import Any, List, NamedTuple
# from typing_extensions import Final
from ibapi.tag_value import TagValue # type: ignore
from ibapi.contract import ComboLeg # type: ignore
from ibapi.contract import DeltaNeutralContract
from ibapi.contract import Contract, ContractDetails
from scottbrian_algo1.algo_api import AlgoApp, AlreadyConnected, \
DisconnectLockHeld, ConnectTimeout, RequestTimeout, DisconnectDuringRequest
from scottbrian_algo1.algo_maps import get_contract_dict, get_contract_obj
from scottbrian_algo1.algo_maps import get_contract_details_obj
# from scottbrian_utils.diag_msg import diag_msg
# from scottbrian_utils.file_catalog import FileCatalog
import logging
logger = logging.getLogger(__name__)
###############################################################################
# TestAlgoAppConnect class
###############################################################################
class TestAlgoAppConnect:
"""TestAlgoAppConnect class."""
def test_mock_connect_to_ib(self,
algo_app: "AlgoApp"
) -> None:
"""Test connecting to IB.
Args:
algo_app: pytest fixture instance of AlgoApp (see conftest.py)
"""
verify_algo_app_initialized(algo_app)
# we are testing connect_to_ib and the subsequent code that gets
# control as a result, such as getting the first requestID and then
# starting a separate thread for the run loop.
logger.debug("about to connect")
algo_app.connect_to_ib("127.0.0.1",
algo_app.PORT_FOR_LIVE_TRADING,
client_id=0)
# verify that algo_app is connected and alive with a valid reqId
verify_algo_app_connected(algo_app)
algo_app.disconnect_from_ib()
verify_algo_app_disconnected(algo_app)
def test_mock_connect_to_ib_with_timeout(self,
algo_app: "AlgoApp",
mock_ib: Any
) -> None:
"""Test connecting to IB.
Args:
algo_app: pytest fixture instance of AlgoApp (see conftest.py)
mock_ib: pytest fixture of contract_descriptions
"""
verify_algo_app_initialized(algo_app)
# we are testing connect_to_ib with a simulated timeout
logger.debug("about to connect")
with pytest.raises(ConnectTimeout):
algo_app.connect_to_ib("127.0.0.1",
mock_ib.PORT_FOR_REQID_TIMEOUT,
client_id=0)
# verify that algo_app is not connected
verify_algo_app_disconnected(algo_app)
assert algo_app.request_id == 0
def test_connect_to_ib_already_connected(self,
algo_app: "AlgoApp",
mock_ib: Any
) -> None:
"""Test connecting to IB.
Args:
algo_app: pytest fixture instance of AlgoApp (see conftest.py)
mock_ib: pytest fixture of contract_descriptions
"""
verify_algo_app_initialized(algo_app)
# first, connect normally to mock_ib
logger.debug("about to connect")
algo_app.connect_to_ib("127.0.0.1",
algo_app.PORT_FOR_PAPER_TRADING,
client_id=0)
# verify that algo_app is connected
verify_algo_app_connected(algo_app)
# try to connect again - should get error
with pytest.raises(AlreadyConnected):
algo_app.connect_to_ib("127.0.0.1",
algo_app.PORT_FOR_PAPER_TRADING,
client_id=0)
# verify that algo_app is still connected and alive with a valid reqId
verify_algo_app_connected(algo_app)
algo_app.disconnect_from_ib()
verify_algo_app_disconnected(algo_app)
def test_connect_to_ib_with_lock_held(self,
algo_app: "AlgoApp",
mock_ib: Any
) -> None:
"""Test connecting to IB with disconnect lock held.
Args:
algo_app: pytest fixture instance of AlgoApp (see conftest.py)
mock_ib: pytest fixture of contract_descriptions
"""
verify_algo_app_initialized(algo_app)
# obtain the disconnect lock
logger.debug("about to obtain disconnect lock")
algo_app.disconnect_lock.acquire()
# try to connect - should get error
with pytest.raises(DisconnectLockHeld):
algo_app.connect_to_ib("127.0.0.1",
algo_app.PORT_FOR_LIVE_TRADING,
client_id=0)
# verify that algo_app is still simply initialized
verify_algo_app_initialized(algo_app)
# def test_real_connect_to_IB(self) -> None:
# """Test connecting to IB.
#
# Args:
# algo_app: instance of AlgoApp from conftest pytest fixture
# monkeypatch: pytest fixture
#
# """
# proj_dir = Path.cwd().resolve().parents[1] # back two directories
# test_cat = \
# FileCatalog({'symbols': Path(proj_dir / 't_datasets/symbols.csv')
# })
# algo_app = AlgoApp(test_cat)
# verify_algo_app_initialized(algo_app)
#
# # we are testing connect_to_ib and the subsequent code that gets
# # control as a result, such as getting the first requestID and then
# # starting a separate thread for the run loop.
# logger.debug("about to connect")
# connect_ans = algo_app.connect_to_ib("127.0.0.1", 7496, client_id=0)
#
# # verify that algo_app is connected and alive with a valid reqId
# assert connect_ans
# assert algo_app.run_thread.is_alive()
# assert algo_app.isConnected()
# assert algo_app.request_id == 1
#
# algo_app.disconnect_from_ib()
# assert not algo_app.run_thread.is_alive()
# assert not algo_app.isConnected()
###############################################################################
# connect disconnect verification
###############################################################################
def verify_algo_app_initialized(algo_app: "AlgoApp") -> None:
"""Helper function to verify the also_app instance is initialized.
Args:
algo_app: instance of AlgoApp that is to be checked
"""
assert len(algo_app.ds_catalog) > 0
assert algo_app.request_id == 0
assert algo_app.symbols.empty
assert algo_app.stock_symbols.empty
assert algo_app.response_complete_event.is_set() is False
assert algo_app.nextValidId_event.is_set() is False
assert algo_app.__repr__() == 'AlgoApp(ds_catalog)'
# assert algo_app.run_thread is None
def verify_algo_app_connected(algo_app: "AlgoApp") -> None:
"""Helper function to verify we are connected to ib.
Args:
algo_app: instance of AlgoApp that is to be checked
"""
assert algo_app.run_thread.is_alive()
assert algo_app.isConnected()
assert algo_app.request_id == 1
def verify_algo_app_disconnected(algo_app: "AlgoApp") -> None:
"""Helper function to verify we are disconnected from ib.
Args:
algo_app: instance of AlgoApp that is to be checked
"""
assert not algo_app.run_thread.is_alive()
assert not algo_app.isConnected()
###############################################################################
###############################################################################
# matching symbols
###############################################################################
###############################################################################
class ExpCounts(NamedTuple):
"""NamedTuple for the expected counts."""
sym_non_recursive: int
sym_recursive: int
stock_sym_non_recursive: int
stock_sym_recursive: int
class SymDfs:
"""Saved sym dfs."""
def __init__(self,
mock_sym_df: Any,
sym_df: Any,
mock_stock_sym_df: Any,
stock_sym_df: Any) -> None:
"""Initialize the SymDfs.
Args:
mock_sym_df: mock sym DataFrame
sym_df: symbol DataFrame
mock_stock_sym_df: mock stock symbol DataFrame
stock_sym_df: stock symbols dataFrame
"""
self.mock_sym_df = mock_sym_df
self.sym_df = sym_df
self.mock_stock_sym_df = mock_stock_sym_df
self.stock_sym_df = stock_sym_df
class TestAlgoAppMatchingSymbols:
"""TestAlgoAppMatchingSymbols class."""
def test_request_symbols_all_combos(self,
algo_app: "AlgoApp",
mock_ib: Any) -> None:
"""Test request_symbols with all patterns.
Args:
algo_app: pytest fixture instance of AlgoApp (see conftest.py)
mock_ib: pytest fixture of contract_descriptions
"""
verify_algo_app_initialized(algo_app)
logger.debug("about to connect")
algo_app.connect_to_ib("127.0.0.1",
algo_app.PORT_FOR_LIVE_TRADING,
client_id=0)
verify_algo_app_connected(algo_app)
algo_app.request_throttle_secs = 0.01
try:
for idx, search_pattern in enumerate(
mock_ib.search_patterns()):
exp_counts = get_exp_number(search_pattern, mock_ib)
# verify symbol table has zero entries for the symbol
logger.info("calling verify_match_symbols req_type 1 "
"sym %s num %d", search_pattern, idx)
algo_app.symbols = pd.DataFrame()
algo_app.stock_symbols = pd.DataFrame()
verify_match_symbols(algo_app,
mock_ib,
search_pattern,
exp_counts=exp_counts,
req_type=1)
logger.info("calling verify_match_symbols req_type 2 "
"sym %s num %d", search_pattern, idx)
algo_app.symbols = pd.DataFrame()
algo_app.stock_symbols = pd.DataFrame()
verify_match_symbols(algo_app,
mock_ib,
search_pattern,
exp_counts=exp_counts,
req_type=2)
finally:
logger.debug('disconnecting')
algo_app.disconnect_from_ib()
logger.debug('verifying disconnected')
verify_algo_app_disconnected(algo_app)
logger.debug('disconnected - test case returning')
def test_request_symbols_zero_result(self,
algo_app: "AlgoApp",
mock_ib: Any
) -> None:
"""Test request_symbols with pattern that finds exactly 1 symbol.
Args:
algo_app: instance of AlgoApp from conftest pytest fixture
mock_ib: pytest fixture of contract_descriptions
"""
verify_algo_app_initialized(algo_app)
logger.debug("about to connect")
algo_app.connect_to_ib("127.0.0.1",
algo_app.PORT_FOR_LIVE_TRADING,
client_id=0)
verify_algo_app_connected(algo_app)
algo_app.request_throttle_secs = 0.01
try:
exp_counts = ExpCounts(0, 0, 0, 0)
# verify symbol table has zero entries for the symbols
for idx, search_pattern in enumerate(
mock_ib.no_find_search_patterns()):
logger.info("calling verify_match_symbols req_type 1 "
"sym %s num %d", search_pattern, idx)
verify_match_symbols(algo_app,
mock_ib,
search_pattern,
exp_counts=exp_counts,
req_type=1)
logger.info("calling verify_match_symbols req_type 2 "
"sym %s num %d", search_pattern, idx)
verify_match_symbols(algo_app,
mock_ib,
search_pattern,
exp_counts=exp_counts,
req_type=2)
finally:
logger.debug('disconnecting')
algo_app.disconnect_from_ib()
logger.debug('verifying disconnected')
verify_algo_app_disconnected(algo_app)
logger.debug('disconnected - test case returning')
def test_get_symbols_timeout(self,
algo_app: "AlgoApp",
mock_ib: Any) -> None:
"""Test get_symbols gets timeout.
Args:
algo_app: instance of AlgoApp from conftest pytest fixture
mock_ib: pytest fixture of contract_descriptions
"""
verify_algo_app_initialized(algo_app)
try:
logger.debug("about to connect")
algo_app.connect_to_ib("127.0.0.1",
mock_ib.PORT_FOR_SIMULATE_REQUEST_TIMEOUT,
client_id=0)
verify_algo_app_connected(algo_app)
with pytest.raises(RequestTimeout):
algo_app.request_symbols('A')
finally:
logger.debug('disconnecting')
algo_app.disconnect_from_ib()
logger.debug('verifying disconnected')
verify_algo_app_disconnected(algo_app)
logger.debug('disconnected - test case returning')
def test_get_symbols_disconnect(self,
algo_app: "AlgoApp",
mock_ib: Any) -> None:
"""Test get_symbols gets disconnected while waiting.
Args:
algo_app: instance of AlgoApp from conftest pytest fixture
mock_ib: pytest fixture of contract_descriptions
"""
verify_algo_app_initialized(algo_app)
try:
logger.debug("about to connect")
algo_app.connect_to_ib("127.0.0.1",
mock_ib.
PORT_FOR_SIMULATE_REQUEST_DISCONNECT,
client_id=0)
verify_algo_app_connected(algo_app)
with pytest.raises(DisconnectDuringRequest):
algo_app.request_symbols('A')
finally:
logger.debug('disconnecting')
algo_app.disconnect_from_ib()
logger.debug('verifying disconnected')
verify_algo_app_disconnected(algo_app)
logger.debug('disconnected - test case returning')
def test_get_symbols(self,
algo_app: "AlgoApp",
mock_ib: Any) -> None:
"""Test get_symbols with pattern that finds no symbols.
Args:
algo_app: instance of AlgoApp from conftest pytest fixture
mock_ib: pytest fixture of contract_descriptions
"""
verify_algo_app_initialized(algo_app)
try:
logger.debug("about to connect")
algo_app.connect_to_ib("127.0.0.1",
algo_app.PORT_FOR_LIVE_TRADING,
client_id=0)
verify_algo_app_connected(algo_app)
algo_app.request_throttle_secs = 0.01
sym_dfs = SymDfs(pd.DataFrame(),
pd.DataFrame(),
pd.DataFrame(),
| pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
################################################################################
# Description: Python script to analyze the results of the asset allocation exp.
# Author: <NAME>
# Email: <EMAIL>
# Date: dom 24 lug 2016 21:31:25 BST
################################################################################
#--------#
# Import #
#--------#
import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.pylab as pylab
import matplotlib
import errno
import ffn
# General settings
matplotlib.style.use('seaborn-colorblind')
params = {'legend.fontsize': 'x-large',
'figure.figsize': (20, 10),
'figure.facecolor': 'white',
'figure.edgecolor': 'black',
'axes.labelsize': 'x-large',
'axes.titlesize': 'x-large',
'xtick.labelsize': 'x-large',
'ytick.labelsize': 'x-large'}
pylab.rcParams.update(params)
# Colors used
colors = ['black',
'dimgrey',
'steelblue',
'lightsteelblue']
#-----------------------#
# Algorithms considered #
#-----------------------#
algorithms = set(['ARAC', 'PGPE', 'NPGPE', 'RSARAC', 'RSPGPE', 'RSNPGPE'])
#-------------------#
# Utility functions #
#-------------------#
def createDirectory(dirPath):
""" Create directory at a given path (absolute).
Args:
dirPath (str): absolute path for new directory.
"""
if not os.path.exists(os.path.expanduser(dirPath)):
try:
os.makedirs(os.path.expanduser(dirPath))
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
#-----------#
# Functions #
#-----------#
def analyzeConvergence(filesList, algorithmName):
""" Aggregate the convergence information of a series of independent
experiments of a certain learning algorithms.
Args:
filesList (list of str): list of the files of convergence information
Returns:
dfReward (pd.DataFrame): dataframe containing the aggregate average reward
dfStddev (pd.DataFrame): datraframe containing the aggregate standard dev
dfSharpe (pd.DataFrame): dataframe containing the aggreagate Sharpe ratio
"""
# Initialize output dataframes
temp = pd.read_csv(os.path.expanduser(filesList[0]), index_col=0)
dfRewardExp = pd.DataFrame(index=temp.index)
dfStddevExp = pd.DataFrame(index=temp.index)
dfSharpeExp = pd.DataFrame(index=temp.index)
# For all the files
for f in filesList:
expName = f[::-1].split('/', 1)[0][::-1][:-4]
df = pd.read_csv(os.path.expanduser(f), index_col=0)
dfRewardExp[expName] = df['average']
dfStddevExp[expName] = df['stdev']
dfSharpeExp[expName] = df['sharpe']
# Compute mean and stddev across experiments
c1 = algorithmName
c2 = algorithmName + '_delta'
dfReward = pd.DataFrame(index=temp.index, columns=[c1, c2])
dfStddev = pd.DataFrame(index=temp.index, columns=[c1, c2])
dfSharpe = pd.DataFrame(index=temp.index, columns=[c1, c2])
dfReward[c1] = dfRewardExp.mean(axis=1)
dfReward[c2] = dfRewardExp.std(axis=1)
dfStddev[c1] = dfStddevExp.mean(axis=1)
dfStddev[c2] = dfStddevExp.std(axis=1)
dfSharpe[c1] = dfSharpeExp.mean(axis=1)
dfSharpe[c2] = dfSharpeExp.std(axis=1)
# Return
return dfReward, dfStddev, dfSharpe
def compareAlgorithmConvergence(debugDir, imagesDir=None):
""" Compare the convergence properties of several learning algorithms. The
function produces images and csv summaries of the analysis in the given
directories.
Args:
outputDir (str): output directory.
imagesDir (str): images directory.
"""
dfReward = pd.DataFrame()
dfStddev = | pd.DataFrame() | pandas.DataFrame |
import pytest
import numpy as np
import pandas as pd
from iguanas.rule_generation import RuleGeneratorOpt
from sklearn.metrics import precision_score, recall_score
from iguanas.metrics.classification import FScore, Precision
from itertools import product
import random
import math
@pytest.fixture
def create_data():
def return_random_num(y, fraud_min, fraud_max, nonfraud_min, nonfraud_max, rand_func):
data = [rand_func(fraud_min, fraud_max) if i == 1 else rand_func(
nonfraud_min, nonfraud_max) for i in y]
return data
random.seed(0)
np.random.seed(0)
y = pd.Series(data=[0]*980 + [1]*20, index=list(range(0, 1000)))
X = pd.DataFrame(data={
"num_distinct_txn_per_email_1day": [round(max(i, 0)) for i in return_random_num(y, 2, 1, 1, 2, np.random.normal)],
"num_distinct_txn_per_email_7day": [round(max(i, 0)) for i in return_random_num(y, 4, 2, 2, 3, np.random.normal)],
"ip_country_us": [round(min(i, 1)) for i in [max(i, 0) for i in return_random_num(y, 0.3, 0.4, 0.5, 0.5, np.random.normal)]],
"email_kb_distance": [min(i, 1) for i in [max(i, 0) for i in return_random_num(y, 0.2, 0.5, 0.6, 0.4, np.random.normal)]],
"email_alpharatio": [min(i, 1) for i in [max(i, 0) for i in return_random_num(y, 0.33, 0.1, 0.5, 0.2, np.random.normal)]],
},
index=list(range(0, 1000))
)
columns_int = [
'num_distinct_txn_per_email_1day', 'num_distinct_txn_per_email_7day', 'ip_country_us']
columns_cat = ['ip_country_us']
columns_num = ['num_distinct_txn_per_email_1day',
'num_distinct_txn_per_email_7day', 'email_kb_distance', 'email_alpharatio']
weights = y.apply(lambda x: 1000 if x == 1 else 1)
return [X, y, columns_int, columns_cat, columns_num, weights]
@pytest.fixture
def create_smaller_data():
random.seed(0)
np.random.seed(0)
y = pd.Series(data=[0]*5 + [1]*5, index=list(range(0, 10)))
X = pd.DataFrame(data={
'A': [5, 0, 5, 0, 5, 3, 4, 0, 0, 0],
'B': [0, 1, 0, 1, 0, 1, 0.6, 0.7, 0, 0],
'C_US': [1, 1, 1, 1, 1, 1, 0, 0, 1, 1]
},
index=list(range(0, 10))
)
columns_int = ['A']
columns_cat = ['C']
columns_num = ['A', 'B']
weights = y.apply(lambda x: 1000 if x == 1 else 1)
return [X, y, columns_int, columns_cat, columns_num, weights]
@pytest.fixture
def fs_instantiated():
f = FScore(0.5)
return f.fit
@pytest.fixture
def rg_instantiated(fs_instantiated):
f0dot5 = fs_instantiated
params = {
'metric': f0dot5,
'n_total_conditions': 4,
'num_rules_keep': 50,
'n_points': 10,
'ratio_window': 2,
'remove_corr_rules': False,
'verbose': 1
}
rg = RuleGeneratorOpt(**params)
rg._today = '20200204'
return [rg, params]
@pytest.fixture
def return_dummy_rules():
def _read(weight_is_none=True):
if weight_is_none:
rule_descriptions = pd.DataFrame(
np.array([["(X['B']>=0.5)", 0.6, 0.6, 1, 0.5, 0.6],
["(X['C_US']==True)", 0.375, 0.6,
1, 0.8, 0.4054054054054054],
["(X['A']>=3)", 0.4, 0.4, 1, 0.5, 0.4000000000000001]]),
columns=['Logic', 'Precision', 'Recall',
'nConditions', 'PercDataFlagged', 'Metric'],
index=['RGO_Rule_20200204_1',
'RGO_Rule_20200204_2', 'RGO_Rule_20200204_0'],
)
rule_descriptions = rule_descriptions.astype({'Logic': object, 'Precision': float, 'Recall': float,
'nConditions': int, 'PercDataFlagged': float, 'Metric': float})
rule_descriptions.index.name = 'Rule'
else:
rule_descriptions = pd.DataFrame(
np.array([["(X['B']>=0.5)", 0.9993337774816788, 0.6, 1, 0.5,
0.8819379115710255],
["(X['C_US']==True)", 0.9983361064891847, 0.6, 1, 0.8,
0.8813160987074031],
["(X['A']>=3)", 0.9985022466300549, 0.4, 1, 0.5,
0.7685213648939442]]),
columns=['Logic', 'Precision', 'Recall',
'nConditions', 'PercDataFlagged', 'Metric'],
index=['RGO_Rule_20200204_1',
'RGO_Rule_20200204_2', 'RGO_Rule_20200204_0'],
)
rule_descriptions = rule_descriptions.astype({'Logic': object, 'Precision': float, 'Recall': float,
'nConditions': int, 'PercDataFlagged': float, 'Metric': float})
rule_descriptions.index.name = 'Rule'
X_rules = pd.DataFrame(
np.array([[0, 1, 1],
[1, 1, 0],
[0, 1, 1],
[1, 1, 0],
[0, 1, 1],
[1, 1, 1],
[1, 0, 1],
[1, 0, 0],
[0, 1, 0],
[0, 1, 0]], dtype=np.int),
columns=['RGO_Rule_20200204_1',
'RGO_Rule_20200204_2', 'RGO_Rule_20200204_0'],
)
rule_combinations = [(('RGO_Rule_20200204_1', 'RGO_Rule_20200204_2'), ("(X['B']>=0.5)", "(X['C_US']==True)")),
(('RGO_Rule_20200204_1', 'RGO_Rule_20200204_0'),
("(X['B']>=0.5)", "(X['A']>=3)")),
(('RGO_Rule_20200204_2', 'RGO_Rule_20200204_0'), ("(X['C_US']==True)", "(X['A']>=3)"))]
return rule_descriptions, X_rules, rule_combinations
return _read
@pytest.fixture
def return_dummy_pairwise_rules():
rule_descriptions = pd.DataFrame(
{
'Rule': ['A', 'B', 'C'],
'Precision': [1, 0.5, 0]
}
)
rule_descriptions.set_index('Rule', inplace=True)
pairwise_descriptions = pd.DataFrame(
{
'Rule': ['A&B', 'B&C', 'A&C'],
'Precision': [1, 0.75, 0]
}
)
pairwise_descriptions.set_index('Rule', inplace=True)
X_rules_pairwise = pd.DataFrame({
'A&B': range(0, 1000),
'B&C': range(0, 1000),
'A&C': range(0, 1000),
})
pairwise_to_orig_lookup = {
'A&B': ['A', 'B'],
'A&C': ['A', 'C'],
'B&C': ['B', 'C'],
}
return pairwise_descriptions, X_rules_pairwise, pairwise_to_orig_lookup, rule_descriptions
@pytest.fixture
def return_iteration_results():
iteration_ranges = {
('num_distinct_txn_per_email_1day', '>='): (0, 7),
('num_distinct_txn_per_email_1day', '<='): (0, 7),
('num_distinct_txn_per_email_7day', '>='): (6.0, 12),
('num_distinct_txn_per_email_7day', '<='): (0, 6.0),
('email_kb_distance', '>='): (0.5, 1.0),
('email_kb_distance', '<='): (0.0, 0.5),
('email_alpharatio', '>='): (0.5, 1.0),
('email_alpharatio', '<='): (0.0, 0.5)
}
iteration_arrays = {('num_distinct_txn_per_email_1day', '>='): np.array([0, 1, 2, 3, 4, 5, 6, 7]),
('num_distinct_txn_per_email_1day', '<='): np.array([0, 1, 2, 3, 4, 5, 6, 7]),
('num_distinct_txn_per_email_7day',
'>='): np.array([6, 7, 8, 9, 10, 11, 12]),
('num_distinct_txn_per_email_7day', '<='): np.array([0, 1, 2, 3, 4, 5, 6]),
('email_kb_distance',
'>='): np.array([0.5, 0.56, 0.61, 0.67, 0.72, 0.78, 0.83, 0.89, 0.94, 1.]),
('email_kb_distance',
'<='): np.array([0., 0.056, 0.11, 0.17, 0.22, 0.28, 0.33, 0.39, 0.44,
0.5]),
('email_alpharatio',
'>='): np.array([0.5, 0.56, 0.61, 0.67, 0.72, 0.78, 0.83, 0.89, 0.94, 1.]),
('email_alpharatio',
'<='): np.array([0., 0.056, 0.11, 0.17, 0.22, 0.28, 0.33, 0.39, 0.44,
0.5])}
iteration_ranges_3pts = {
('num_distinct_txn_per_email_1day', '>='): np.array([0., 4., 7.]),
('num_distinct_txn_per_email_1day', '<='): np.array([0., 4., 7.]),
('num_distinct_txn_per_email_7day', '>='): np.array([6., 9., 12.]),
('num_distinct_txn_per_email_7day', '<='): np.array([0., 3., 6.]),
('email_kb_distance', '>='): np.array([0.5, 0.75, 1.]),
('email_kb_distance', '<='): np.array([0., 0.25, 0.5]),
('email_alpharatio', '>='): np.array([0.5, 0.75, 1.]),
('email_alpharatio', '<='): np.array([0., 0.25, 0.5])
}
fscore_arrays = {('num_distinct_txn_per_email_1day',
'>='): np.array([0.02487562, 0.04244482, 0.05393401, 0.01704545, 0.,
0., 0., 0.]),
('num_distinct_txn_per_email_1day',
'<='): np.array([0., 0.00608766, 0.02689873, 0.0275634, 0.02590674,
0.02520161, 0.0249004, 0.02487562]),
('num_distinct_txn_per_email_7day',
'>='): np.array([0.0304878, 0.04934211, 0., 0., 0.,
0., 0.]),
('num_distinct_txn_per_email_7day',
'<='): np.array([0., 0.00903614, 0.01322751, 0.01623377, 0.0248139,
0.02395716, 0.02275161]),
('email_kb_distance',
'>='): np.array([0.01290878, 0.01420455, 0.01588983, 0.01509662, 0.0136612,
0.01602564, 0.01798561, 0.0210084, 0.0245098, 0.0154321]),
('email_kb_distance',
'<='): np.array([0.10670732, 0.08333333, 0.06835938, 0.06410256, 0.06048387,
0.05901288, 0.05474453, 0.04573171, 0.04298942, 0.04079254]),
('email_alpharatio',
'>='): np.array([0.00498008, 0.00327225, 0., 0., 0.,
0., 0., 0., 0., 0.]),
('email_alpharatio',
'<='): np.array([0., 0., 0., 0.02232143, 0.04310345,
0.06161972, 0.06157635, 0.05662021, 0.05712366, 0.04429134])}
return iteration_ranges, iteration_arrays, iteration_ranges_3pts, fscore_arrays
@pytest.fixture
def return_pairwise_info_dict():
pairwise_info_dict = {"(X['B']>=0.5)&(X['C_US']==True)": {'RuleName1': 'RGO_Rule_20200204_1',
'RuleName2': 'RGO_Rule_20200204_2',
'PairwiseRuleName': 'RGO_Rule_20200204_0',
'PairwiseComponents': ['RGO_Rule_20200204_1', 'RGO_Rule_20200204_2']},
"(X['A']>=3)&(X['B']>=0.5)": {'RuleName1': 'RGO_Rule_20200204_1',
'RuleName2': 'RGO_Rule_20200204_0',
'PairwiseRuleName': 'RGO_Rule_20200204_1',
'PairwiseComponents': ['RGO_Rule_20200204_1', 'RGO_Rule_20200204_0']},
"(X['A']>=3)&(X['C_US']==True)": {'RuleName1': 'RGO_Rule_20200204_2',
'RuleName2': 'RGO_Rule_20200204_0',
'PairwiseRuleName': 'RGO_Rule_20200204_2',
'PairwiseComponents': ['RGO_Rule_20200204_2', 'RGO_Rule_20200204_0']}}
return pairwise_info_dict
def test_repr(rg_instantiated):
rg, _ = rg_instantiated
exp_repr = "RuleGeneratorOpt(metric=<bound method FScore.fit of FScore with beta=0.5>, n_total_conditions=4, num_rules_keep=50, n_points=10, ratio_window=2, one_cond_rule_opt_metric=<bound method FScore.fit of FScore with beta=1>, remove_corr_rules=False, target_feat_corr_types=None)"
assert rg.__repr__() == exp_repr
_ = rg.fit(pd.DataFrame({'A': [1, 0, 0]}), pd.Series([1, 0, 0]))
assert rg.__repr__() == 'RuleGeneratorOpt object with 1 rules generated'
def test_fit(create_data, rg_instantiated):
X, y, _, _, _, weights = create_data
rg, _ = rg_instantiated
exp_results = [
((1000, 86), 8474),
((1000, 59), 11281)
]
for i, w in enumerate([None, weights]):
X_rules = rg.fit(X, y, sample_weight=w)
assert X_rules.shape == exp_results[i][0]
assert X_rules.sum().sum() == exp_results[i][1]
assert rg.rule_names == X_rules.columns.tolist() == list(
rg.rule_lambdas.keys()) == list(rg.lambda_kwargs.keys()) == list(
rg.rules.rule_strings.keys())
def test_fit_target_feat_corr_types_infer(create_data, rg_instantiated, fs_instantiated):
X, y, _, _, _, weights = create_data
rg, _ = rg_instantiated
rg.target_feat_corr_types = 'Infer'
exp_results = [
((1000, 30), 1993),
((1000, 30), 4602)
]
for i, w in enumerate([None, weights]):
X_rules = rg.fit(X, y, sample_weight=w)
assert X_rules.shape == exp_results[i][0]
assert X_rules.sum().sum() == exp_results[i][1]
assert rg.rule_names == X_rules.columns.tolist() == list(
rg.rule_lambdas.keys()) == list(rg.lambda_kwargs.keys()) == list(
rg.rules.rule_strings.keys())
assert len(
[l for l in list(rg.rule_strings.values()) if "X['email_alpharatio']>" in l]) == 0
assert len(
[l for l in list(rg.rule_strings.values()) if "X['email_kb_distance']>" in l]) == 0
assert len(
[l for l in list(rg.rule_strings.values()) if "X['ip_country_us']==True" in l]) == 0
assert len([l for l in list(rg.rule_strings.values())
if "X['num_distinct_txn_per_email_1day']<" in l]) == 0
assert len([l for l in list(rg.rule_strings.values())
if "X['num_distinct_txn_per_email_7day']<" in l]) == 0
def test_fit_target_feat_corr_types_provided(create_data, rg_instantiated, fs_instantiated):
X, y, _, _, _, weights = create_data
rg, _ = rg_instantiated
rg.target_feat_corr_types = {
'PositiveCorr': [
'num_distinct_txn_per_email_1day',
'num_distinct_txn_per_email_7day'
],
'NegativeCorr': [
'ip_country_us', 'email_kb_distance', 'email_alpharatio']
}
exp_results = [
((1000, 30), 1993),
((1000, 30), 4602)
]
for i, w in enumerate([None, weights]):
X_rules = rg.fit(X, y, sample_weight=w)
assert X_rules.shape == exp_results[i][0]
assert X_rules.sum().sum() == exp_results[i][1]
assert rg.rule_names == X_rules.columns.tolist() == list(
rg.rule_lambdas.keys()) == list(rg.lambda_kwargs.keys()) == list(
rg.rules.rule_strings.keys())
assert len(
[l for l in list(rg.rule_strings.values()) if "X['email_alpharatio']>" in l]) == 0
assert len(
[l for l in list(rg.rule_strings.values()) if "X['email_kb_distance']>" in l]) == 0
assert len(
[l for l in list(rg.rule_strings.values()) if "X['ip_country_us']==True" in l]) == 0
assert len([l for l in list(rg.rule_strings.values())
if "X['num_distinct_txn_per_email_1day']<" in l]) == 0
assert len([l for l in list(rg.rule_strings.values())
if "X['num_distinct_txn_per_email_7day']<" in l]) == 0
def test_transform(create_data, rg_instantiated):
exp_result = [
(1000, 86),
(1000, 59)
]
X, y, _, _, _, weights = create_data
rg, _ = rg_instantiated
for i, w in enumerate([None, weights]):
_ = rg.fit(X, y, w)
X_rules = rg.transform(X)
assert X_rules.shape == exp_result[i]
def test_generate_numeric_one_condition_rules(create_data, rg_instantiated, fs_instantiated):
exp_rule_strings = [
{
'RGO_Rule_20200204_0': "(X['num_distinct_txn_per_email_1day']>=2)", 'RGO_Rule_20200204_1': "(X['num_distinct_txn_per_email_7day']>=7)", 'RGO_Rule_20200204_2': "(X['email_kb_distance']>=0.94)", 'RGO_Rule_20200204_3': "(X['email_alpharatio']>=0.5)",
'RGO_Rule_20200204_4': "(X['num_distinct_txn_per_email_1day']<=3)", 'RGO_Rule_20200204_5': "(X['num_distinct_txn_per_email_7day']<=4)", 'RGO_Rule_20200204_6': "(X['email_kb_distance']<=0.0)", 'RGO_Rule_20200204_7': "(X['email_alpharatio']<=0.33)"
},
{
'RGO_Rule_20200204_8': "(X['num_distinct_txn_per_email_1day']>=1)", 'RGO_Rule_20200204_9': "(X['num_distinct_txn_per_email_7day']>=7)", 'RGO_Rule_20200204_10': "(X['email_kb_distance']>=0.61)", 'RGO_Rule_20200204_11': "(X['email_alpharatio']>=0.5)",
'RGO_Rule_20200204_12': "(X['num_distinct_txn_per_email_1day']<=3)", 'RGO_Rule_20200204_13': "(X['num_distinct_txn_per_email_7day']<=5)", 'RGO_Rule_20200204_14': "(X['email_kb_distance']<=0.5)", 'RGO_Rule_20200204_15': "(X['email_alpharatio']<=0.5)"
}
]
X, y, columns_int, _, columns_num, weights = create_data
rg, _ = rg_instantiated
metric = fs_instantiated
for i, w in enumerate([None, weights]):
rule_strings, X_rules = rg._generate_numeric_one_condition_rules(
X, y, columns_num, columns_int, w
)
assert X_rules.shape == (1000, 8)
assert rule_strings == exp_rule_strings[i]
def test_generate_numeric_one_condition_rules_warning(rg_instantiated):
X = pd.DataFrame({'A': [0, 0, 0]})
y = pd.Series([0, 1, 0])
rg, _ = rg_instantiated
with pytest.warns(UserWarning, match='No numeric one condition rules could be created.'):
results = rg._generate_numeric_one_condition_rules(
X, y, ['A'], ['A'], None)
pd.testing.assert_frame_equal(results[0], pd.DataFrame())
pd.testing.assert_frame_equal(results[1], | pd.DataFrame() | pandas.DataFrame |
import warnings
warnings.filterwarnings('ignore')
from sklearn.model_selection import KFold, StratifiedKFold
import pandas as pd
import numpy as np
class BaggingRegressor():
def __init__(self, regressors, seeds = [2022], n_fold=5):
self.regressors = regressors
self.n_regressors = 1 if type(self.regressors) != list else len(self.regressors)
self.fitted_regressors = []
self.seeds = seeds
self.n_seeds = len(self.seeds)
self.n_fold = n_fold
self.folds = None
def fit(self, X, y):
for idx, cur_regressor in enumerate(self.regressors):
cur_fitted_regressors = []
for seed in self.seeds:
self.folds = KFold(n_splits=self.n_fold, shuffle=True, random_state=seed)
for fold_n, (train_index, valid_index) in enumerate(self.folds.split(X, y)):
clf = cur_regressor.fit(X.loc[train_index], y.loc[train_index],
eval_set=[(X.loc[valid_index], y.loc[valid_index])],
early_stopping_rounds = 50,
verbose = 0)
cur_fitted_regressors.append(clf)
self.fitted_regressors.append(cur_fitted_regressors)
print('Training Done')
def predict(self, X, regressor_weights = []):
predict_test = pd.DataFrame()
if np.sum(regressor_weights) != 1:
regressor_weights = np.ones(self.n_regressors) / self.n_regressors
for idx, cur_fitted_regressors in enumerate(self.fitted_regressors):
for i, cur_fitted_regressor in enumerate(cur_fitted_regressors):
if i == 0:
pred = cur_fitted_regressor.predict(X) / float(self.n_fold) / float(self.n_seeds)
else:
pred += cur_fitted_regressor.predict(X) / float(self.n_fold) / float(self.n_seeds)
predict_test['model_%d_predict' % (idx)] = pred * regressor_weights[idx]
self.result = predict_test.sum(axis = 1)
print('Prediction Done')
return self.result
class BaggingClassifier():
def __init__(self, classifiers, seeds = [2022], n_fold=5):
self.classifiers = classifiers
self.n_classifiers = 1 if type(self.classifiers) != list else len(self.classifiers)
self.fitted_classifiers = []
self.seeds = seeds
self.n_seeds = len(self.seeds)
self.n_fold = n_fold
self.folds = None
def fit(self, X, y, custom_metric_list = []):
for idx, cur_classifier in enumerate(self.classifiers):
cur_fitted_classifiers = []
if idx < len(custom_metric_list):
cur_metric = custom_metric_list[idx]
else:
cur_metric = 'auc'
for seed in self.seeds:
self.folds = StratifiedKFold(n_splits=self.n_fold, shuffle=True, random_state=seed)
for fold_n, (train_index, valid_index) in enumerate(self.folds.split(X, y)):
clf = cur_classifier.fit(X.loc[train_index], y.loc[train_index],
eval_set=[(X.loc[valid_index], y.loc[valid_index])],
eval_metric = cur_metric,
early_stopping_rounds = 50,
verbose = 0)
cur_fitted_classifiers.append(clf)
self.fitted_classifiers.append(cur_fitted_classifiers)
print('Training Done')
def predict_proba(self, X, classifier_weights = []):
predict_proba_test = pd.DataFrame()
if np.sum(classifier_weights) != 1:
classifier_weights = np.ones(len(self.classifiers)) / len(self.classifiers)
for idx, cur_fitted_classifiers in enumerate(self.fitted_classifiers):
for i, cur_fitted_classifier in enumerate(cur_fitted_classifiers):
if i == 0:
pred = cur_fitted_classifier.predict_proba(X)[:, 1] / float(self.n_fold) / float(self.n_seeds)
else:
pred += cur_fitted_classifier.predict_proba(X)[:, 1] / float(self.n_fold) / float(self.n_seeds)
predict_proba_test['model_%d_predict_proba' % (idx)] = pred * classifier_weights[idx]
self.result = predict_proba_test.sum(axis = 1)
print('Prediction Done')
return self.result
def predict(self, X, classifier_weights = []):
predict_proba_test = | pd.DataFrame() | pandas.DataFrame |
""" Quick n Simple Image Folder, Tarfile based DataSet
Hacked together by / Copyright 2020 <NAME>
"""
import torch.utils.data as data
import os
import logging
import math
import collections
import tqdm
import cv2
import torch
import pandas as pd
from glob import glob
from PIL import Image
from .parsers import create_parser
from torchvision import datasets as torch_datasets
from torchvision import transforms
from torchvision.utils import save_image
from torch.utils.data import Dataset
from torchvision.utils import make_grid
# create logger
_logger = logging.getLogger(__name__)
# create console handler and set level to debug
ch = logging.StreamHandler()
# create formatter
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# add formatter to ch
ch.setFormatter(formatter)
# add ch to logger
_logger.addHandler(ch)
_ERROR_RETRY = 50
class ImageDataset(data.Dataset):
def __init__(
self,
root,
parser=None,
class_map=None,
load_bytes=False,
transform=None,
target_transform=None,
):
if parser is None or isinstance(parser, str):
parser = create_parser(parser or '', root=root, class_map=class_map)
self.parser = parser
self.load_bytes = load_bytes
self.transform = transform
self.target_transform = target_transform
self._consecutive_errors = 0
def __getitem__(self, index):
img, target = self.parser[index]
try:
img = img.read() if self.load_bytes else Image.open(img).convert('RGB')
except Exception as e:
_logger.warning(f'Skipped sample (index {index}, file {self.parser.filename(index)}). {str(e)}')
self._consecutive_errors += 1
if self._consecutive_errors < _ERROR_RETRY:
return self.__getitem__((index + 1) % len(self.parser))
else:
raise e
self._consecutive_errors = 0
if self.transform is not None:
img = self.transform(img)
if target is None:
target = -1
elif self.target_transform is not None:
target = self.target_transform(target)
return img, target
def __len__(self):
return len(self.parser)
def filename(self, index, basename=False, absolute=False):
return self.parser.filename(index, basename, absolute)
def filenames(self, basename=False, absolute=False):
return self.parser.filenames(basename, absolute)
class IterableImageDataset(data.IterableDataset):
def __init__(
self,
root,
parser=None,
split='train',
is_training=False,
batch_size=None,
repeats=0,
download=False,
transform=None,
target_transform=None,
):
assert parser is not None
if isinstance(parser, str):
self.parser = create_parser(
parser, root=root, split=split, is_training=is_training,
batch_size=batch_size, repeats=repeats, download=download)
else:
self.parser = parser
self.transform = transform
self.target_transform = target_transform
self._consecutive_errors = 0
def __iter__(self):
for img, target in self.parser:
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
yield img, target
def __len__(self):
if hasattr(self.parser, '__len__'):
return len(self.parser)
else:
return 0
def filename(self, index, basename=False, absolute=False):
assert False, 'Filename lookup by index not supported, use filenames().'
def filenames(self, basename=False, absolute=False):
return self.parser.filenames(basename, absolute)
class AugMixDataset(torch.utils.data.Dataset):
"""Dataset wrapper to perform AugMix or other clean/augmentation mixes"""
def __init__(self, dataset, num_splits=2):
self.augmentation = None
self.normalize = None
self.dataset = dataset
if self.dataset.transform is not None:
self._set_transforms(self.dataset.transform)
self.num_splits = num_splits
def _set_transforms(self, x):
assert isinstance(x, (list, tuple)) and len(x) == 3, 'Expecting a tuple/list of 3 transforms'
self.dataset.transform = x[0]
self.augmentation = x[1]
self.normalize = x[2]
@property
def transform(self):
return self.dataset.transform
@transform.setter
def transform(self, x):
self._set_transforms(x)
def _normalize(self, x):
return x if self.normalize is None else self.normalize(x)
def __getitem__(self, i):
x, y = self.dataset[i] # all splits share the same dataset base transform
x_list = [self._normalize(x)] # first split only normalizes (this is the 'clean' split)
# run the full augmentation on the remaining splits
for _ in range(self.num_splits - 1):
x_list.append(self._normalize(self.augmentation(x)))
return tuple(x_list), y
def __len__(self):
return len(self.dataset)
class EventMNISTDataset(data.Dataset):
def __init__(self, root, split='train', transform=None, target_transform=None, number_of_frames=9,
img_prefix="img_", sample=False, sample_times=9):
self.dataset_root = root
self.train_dir = os.path.join(self.dataset_root, "train")
self.val_dir = os.path.join(self.dataset_root, "val")
self.test_dir = os.path.join(self.dataset_root, "test")
self.sample = sample
self.sample_times = sample_times
self.dir_dict = {
"train": self.train_dir,
"val": self.val_dir,
"valid": self.val_dir,
"validation": self.val_dir,
"test": self.test_dir,
}
self.transform = transform
self.target_transform = target_transform
accepted_frames = [4, 9, 16, 25, 36, 49, 64]
if number_of_frames not in accepted_frames:
raise Exception("The number of frames should be a value between {}")
self.number_of_frames = number_of_frames
self.frames_per_row = int(math.sqrt(number_of_frames))
self.frames_per_col = self.frames_per_row
self.img_dir = self.dir_dict[split]
self.labels_file = os.path.join(self.img_dir, "labels.csv")
self.csv_data = {'fname': [], 'label': []}
self.raw_data_loader = None
self.generated_img_id = 0
if not os.path.exists(self.labels_file):
if not os.path.exists(self.img_dir):
os.makedirs(self.img_dir)
if not os.path.exists(os.path.join(self.dataset_root, "raw")):
self.__download_raw_data(split)
else:
self.__load_raw_data(split)
self.__create_summation_training_data(img_prefix)
#self.__create_event_training_data(split, img_prefix)
#self.__create_no_event_training_data(split, img_prefix)
self.__save_annotations()
self.img_labels = pd.read_csv(self.labels_file)
def __len__(self):
return len(self.img_labels)
def __getitem__(self, idx):
img_path = os.path.join(self.img_dir, self.img_labels.iloc[idx, 0])
image = Image.open(img_path).convert('RGB')
image = image.convert('L') # Reading grayscale
# image = read_image(img_path, mode=ImageReadMode.GRAY)
label = self.img_labels.iloc[idx, 1]
if self.transform:
image = self.transform(image)
if self.target_transform:
label = self.target_transform(label)
return image, label
def __download_raw_data(self, split):
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])
if split in ["train"]:
dataset1 = torch_datasets.MNIST(f"{os.path.join(self.dataset_root, 'raw')}", train=True, download=True,
transform=transform)
elif split in ["val", "valid", "validation"]:
dataset1 = torch_datasets.MNIST(f"{os.path.join(self.dataset_root, 'raw')}", train=False, download=True,
transform=transform)
# If true the data loaded for each batch will be sampled multiple times
if self.sample:
train_kwargs = {'batch_size': self.sample_times*self.number_of_frames}
else:
train_kwargs = {'batch_size': self.number_of_frames}
self.raw_data_loader = torch.utils.data.DataLoader(dataset1, **train_kwargs)
def __load_raw_data(self, split):
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])
if split in ["train"]:
dataset1 = torch_datasets.MNIST(f"{os.path.join(self.dataset_root, 'raw')}", train=True, download=False,
transform=transform)
elif split in ["val", "valid", "validation"]:
dataset1 = torch_datasets.MNIST(f"{os.path.join(self.dataset_root, 'raw')}", train=False, download=False,
transform=transform)
if self.sample:
train_kwargs = {'batch_size': self.sample_times*self.number_of_frames}
else:
train_kwargs = {'batch_size': self.number_of_frames}
self.raw_data_loader = torch.utils.data.DataLoader(dataset1, **train_kwargs)
def __create_summation_training_data(self, img_prefix="img_"):
"""
A function to create an experimental MNist dataset. The dataset is used to test/verify if transformer can
compute additions of 9 mnist numbers patched together in an image. That will assume that the mnist ciphers
will be distributed differently in the image space. Will the transformers understand that?
"""
no_cuda = True
use_cuda = not no_cuda and torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
for batch_idx, (data, target) in enumerate(tqdm.tqdm(self.raw_data_loader, desc="Creating the event dataset")):
data, target = data.to(device), target.to(device)
if data.shape[0] != self.number_of_frames:
continue
data = data.reshape(self.frames_per_row, data.shape[1], data.shape[2] * self.frames_per_row, data.shape[3])
data = data.transpose(2, 3)
data = data.reshape(1, data.shape[1], data.shape[2] * self.frames_per_row, data.shape[3])
data = data.transpose(2, 3)
image_data = data[0]
img_id = f'{self.generated_img_id}'.zfill(9)
img_name = f'{img_prefix}{img_id}.png'
img_path = os.path.join(self.img_dir, img_name)
save_image(image_data, f'{img_path}')
self.csv_data["fname"].append(img_name)
self.csv_data["label"].append(int(target.sum().cpu().numpy()))
self.generated_img_id += 1
def __create_event_training_data(self, split, img_prefix="img_"):
no_cuda = True
use_cuda = not no_cuda and torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
event = 1
for batch_idx, (data, target) in enumerate(tqdm.tqdm(self.raw_data_loader, desc="Creating the event dataset")):
data, target = data.to(device), target.to(device)
if data.shape[0] != self.number_of_frames:
continue
data = data.reshape(self.frames_per_row, data.shape[1], data.shape[2] * self.frames_per_row, data.shape[3])
data = data.transpose(2, 3)
data = data.reshape(1, data.shape[1], data.shape[2] * self.frames_per_row, data.shape[3])
data = data.transpose(2, 3)
image_data = data[0]
img_id = f'{self.generated_img_id}'.zfill(9)
img_name = f'{img_prefix}{img_id}.png'
img_path = os.path.join(self.img_dir, img_name)
save_image(image_data, f'{img_path}')
self.csv_data["fname"].append(img_name)
self.csv_data["label"].append(event)
self.generated_img_id += 1
def __create_no_event_training_data(self, split, img_prefix="img_"):
no_cuda = True
use_cuda = not no_cuda and torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
event = 0
label_to_tensors = {}
for batch_idx, (data, target) in enumerate(
tqdm.tqdm(self.raw_data_loader, desc="Creating the no event dataset")):
data, target = data.to(device), target.to(device)
for sample_idx in range(data.shape[0]):
if target[sample_idx].item() not in label_to_tensors:
label_to_tensors[target[sample_idx].item()] = [data[sample_idx]]
else:
label_to_tensors[target[sample_idx].item()].append(data[sample_idx])
for label, value in label_to_tensors.items():
for batch in range(0, len(value), self.number_of_frames):
data = value[batch:batch + self.number_of_frames]
if len(data) != 9:
continue
data = torch.stack(data)
data = data.reshape(self.frames_per_row, data.shape[1], data.shape[2] * self.frames_per_row,
data.shape[3])
data = data.transpose(2, 3)
data = data.reshape(1, data.shape[1], data.shape[2] * self.frames_per_row, data.shape[3])
data = data.transpose(2, 3)
image_data = data[0]
img_id = f'{self.generated_img_id}'.zfill(9)
img_name = f'{img_prefix}{img_id}.png'
img_path = os.path.join(self.img_dir, img_name)
save_image(image_data, f'{img_path}')
self.csv_data["fname"].append(img_name)
self.csv_data["label"].append(event)
self.generated_img_id += 1
def __save_annotations(self):
pd.DataFrame(self.csv_data).to_csv(self.labels_file, index=False)
class VideoEventDataset(Dataset):
def __init__(self, root, input_data,
split='train',
transform=None,
target_transform=None,
number_of_frames=9,
crop=None,
update=False, # If the dataset creation should be re-run
img_prefix="img_",
video_ext="mp4"):
"""
The class is used to create the Dataset for detecting events in a sequence of images that are taken in
with a sampling rate that can variate from 1 frame per 15 seconds to 1 frame per 5 minutes
This class is being created to be able to model relationships between patches of the same image.
The inspiration is based on the transformer architecture.
Since transformers try to create attention the image on a global scale, by creating queries about the different
patches. The idea is that by patching different images in a new one, the temporal dimension will be converted
into a spatial dimension. Thus reducing the need to perform 4D convolutional operation.
Another inspiration is to understand if transformers/attention will be able to learn features about the
"""
_logger.info("Creating the VideoEventDataset for {split}")
self.dataset_root = root
# Video should be stored in the following directory structure
# root/event/video.ext, where event is an integer mapping the labeled event for that video
self.video_ext = video_ext
# Fix automatic validation and training dataset creation
self.video_list = glob(f"{input_data}/{split}/*/*.{self.video_ext}")
_logger.info(f"The following {split} videos were found in the input directory {self.video_list }")
self.train_dir = os.path.join(self.dataset_root, "train")
self.val_dir = os.path.join(self.dataset_root, "val")
self.test_dir = os.path.join(self.dataset_root, "test")
self.dir_dict = {
"train": self.train_dir,
"val": self.val_dir,
"valid": self.val_dir,
"validation": self.val_dir,
"test": self.test_dir,
}
self.update = update
self.transform = transform
self.crop = crop # (y, h, x, w)
self.target_transform = target_transform
accepted_frames = [4, 9, 16, 25, 36, 49, 64]
if number_of_frames not in accepted_frames:
raise Exception("The number of frames should be a value between {}")
self.number_of_frames = number_of_frames
self.frames_per_row = int(math.sqrt(number_of_frames))
self.frames_per_col = self.frames_per_row
self.img_dir = self.dir_dict[split]
self.labels_file = os.path.join(self.img_dir, "labels.csv")
self.csv_data = {'fname': [], 'label': []}
self.raw_data_loader = None
self.generated_img_id = 0
if not os.path.exists(self.labels_file) or self.update:
_logger.info(f"The labels file does not exist. Creating dataset from scratch for the following videos {self.video_list}")
if not os.path.exists(self.img_dir):
os.makedirs(self.img_dir)
self.__create_event_data_split(img_prefix)
self.__save_annotations()
self.img_labels = pd.read_csv(self.labels_file)
def __len__(self):
return len(self.img_labels)
def __getitem__(self, idx):
img_path = os.path.join(self.img_dir, self.img_labels.iloc[idx, 0])
image = Image.open(img_path).convert('RGB')
label = self.img_labels.iloc[idx, 1]
if self.transform:
image = self.transform(image)
if self.target_transform:
label = self.target_transform(label)
return image, label
def __create_event_data_split(self, img_prefix):
"""
@param img_prefix: the prefix to add to the ImageId when saving.
"""
transform = transforms.ToTensor()
for idx, video in enumerate(tqdm.tqdm(self.video_list)):
event = video.split("/")[1]
vidcap = cv2.VideoCapture(video)
video_length = int(vidcap.get(cv2.CAP_PROP_FRAME_COUNT))
success, image = vidcap.read()
if success is False:
print("Video file could not be read")
raise("VideoFormatError")
image_buffer = collections.deque(maxlen=self.number_of_frames)
with tqdm.tqdm(total=video_length, desc=f"Processing video file {video}") as pbar:
while success:
success, image = vidcap.read()
if success:
if self.crop is not None:
image = image[self.crop[0]:self.crop[0] + self.crop[1],
self.crop[2]:self.crop[2] + self.crop[3]]
# Resize the shape of the original image to have it fit inside the frame
w, h = int(image.shape[1] / self.frames_per_row), int(image.shape[0] / self.frames_per_col)
image = cv2.resize(image, (w, h))
image = Image.fromarray(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
tensor = transform(image)
image_buffer.append(tensor)
pbar.update(1)
if len(image_buffer) != image_buffer.maxlen:
continue
image_data = make_grid(list(image_buffer), nrow=self.frames_per_row)
img_id = f'{self.generated_img_id}'.zfill(9)
img_name = f'{img_prefix}{img_id}.png'
img_path = os.path.join(self.img_dir, img_name)
save_image(image_data, f'{img_path}')
self.csv_data["fname"].append(img_name)
self.csv_data["label"].append(event)
self.generated_img_id += 1
else:
break
def __save_annotations(self):
| pd.DataFrame(self.csv_data) | pandas.DataFrame |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.